import os
from typing import Dict
import cv2
import numpy as np
import tensorflow as tf
import sys
import glob
import requests
import base64
import time
import argparse

sys.path.append("..")

parser = argparse.ArgumentParser(description="Demo of argparse")
parser.add_argument('-i', '--img', default='./img_92.jpg')

# Import utilites
from utils import label_map_util
from utils import visualization_utils as vis_util


def digitalOCR(imgName):
    host = 'https://aip.baidubce.com/oauth/2.0/token?grant_type=client_credentials&client_id=CDBwaIpCk0lU2Gm01Bbg7uDm&client_secret=M5aUvbG9rLnVtizKjYngHRK3i0aZUiAO'
    response = requests.get(host)
    if response:
        accessToken = response.json()['access_token']
    request_url = "https://aip.baidubce.com/rest/2.0/ocr/v1/numbers"
    f = open(imgName, 'rb')
    img = base64.b64encode(f.read())

    params = {"image": img}
    access_token = accessToken
    request_url = request_url + "?access_token=" + access_token
    headers = {'content-type': 'application/x-www-form-urlencoded'}
    response = requests.post(request_url, data=params, headers=headers)
    if response:
        #     print (response.json())
        try:
            result_ = response.json()['words_result']
        except:
            print("=====  Digital Detection Failure =====")
            return None
        # result_ = response.json()['words_result']
    return result_


def model_inference(MODEL_NAME, IMAGE_NAME):

    CWD_PATH = os.getcwd()
    PATH_TO_CKPT = os.path.join(
        CWD_PATH, MODEL_NAME, 'frozen_inference_graph.pb')
    PATH_TO_LABELS = os.path.join(
        CWD_PATH, 'inference_graph', 'labelmap.pbtxt')
    PATH_TO_IMAGE = os.path.join(CWD_PATH, IMAGE_NAME)

    NUM_CLASSES = 1

    label_map = label_map_util.load_labelmap(PATH_TO_LABELS)
    categories = label_map_util.convert_label_map_to_categories(
        label_map, max_num_classes=NUM_CLASSES, use_display_name=True)
    category_index_ = label_map_util.create_category_index(categories)

    # Load the Tensorflow model into memory.
    detection_graph = tf.Graph()
    with detection_graph.as_default():
        od_graph_def = tf.compat.v1.GraphDef()
        with tf.compat.v2.io.gfile.GFile(PATH_TO_CKPT, 'rb') as fid:
            serialized_graph = fid.read()
            od_graph_def.ParseFromString(serialized_graph)
            tf.import_graph_def(od_graph_def, name='')

        sess = tf.compat.v1.Session(graph=detection_graph)

    image_tensor = detection_graph.get_tensor_by_name('image_tensor:0')
    detection_boxes = detection_graph.get_tensor_by_name('detection_boxes:0')
    detection_scores = detection_graph.get_tensor_by_name('detection_scores:0')
    detection_classes = detection_graph.get_tensor_by_name(
        'detection_classes:0')

    # Number of objects detected
    num_detections = detection_graph.get_tensor_by_name('num_detections:0')

    image_ = cv2.imread(PATH_TO_IMAGE)
    image_expanded = np.expand_dims(image_, axis=0)

    time_start_detect = time.time()
    # Inference
    (boxes_, scores_, classes_, num_) = sess.run(
        [detection_boxes, detection_scores, detection_classes, num_detections],
        feed_dict={image_tensor: image_expanded})

    print("Time dect cost: {}".format(time.time() - time_start_detect))

    return image_, category_index_, (boxes_, scores_, classes_, num_)


def detection_OCR_outcome(image, IMAGE_NAME, boxes, scores) -> Dict:

    # 如果没有矩形框，输出No bottle detected
    if len(boxes[0]) == 0:
        print("========   No bottle detected   ========")
        return {'msg': 'no bottle detected', 'value': -1}

    count = 0
    for i in range(len(boxes[0])):
        if scores[0][i] >= 0.50:
            # print(boxes[0][i],(boxes[0][i][2]-boxes[0][i][0])/(boxes[0][i][-1]-boxes[0][i][1]))  # ymin, xmin, ymax, xmax
            middle_line = int(
                0.5 * (boxes[0][i][0] * image.shape[0] + boxes[0][i][2] * image.shape[0]))
            # print(middle_line)
            break
        else:
            count += 1

    # 如果没有检测到奶瓶或识别概率过低，输出No bottle detected
    if len(boxes[0]) == count:
        print("========   No bottle detected   ========")
        return {'msg': 'bottle score low', 'value': -1}

    # OCR 输出结果
    time_start_ocr = time.time()
    result = digitalOCR(IMAGE_NAME)
    print("Time ocr cost: {}".format(time.time() - time_start_ocr))

    list_measure = {}  # 150 100 50 200 250
    try:
        result[i]['words']
    except:
        print("=====  Digital Detection Failure =====")
        return {'msg': 'OCR failure', 'value': -1}

    for i in range(len(result)):
        if result[i]['words'] == '150':
            list_measure['150'] = result[i]['location']['top']
        elif result[i]['words'] == '100':
            list_measure['100'] = result[i]['location']['top']
        elif result[i]['words'] == '50':
            list_measure['50'] = result[i]['location']['top']
        elif result[i]['words'] == '200':
            list_measure['200'] = result[i]['location']['top']
        elif result[i]['words'] == '250':
            list_measure['250'] = result[i]['location']['top']

    # 如果没有检测到三个以上的刻度，输出Digital Detection Failure
    if len(list_measure) >= 3:
        scale = (list_measure['100'] - list_measure['150']) // 5
    else:
        print("=====  Digital Detection Failure =====")
        return {'msg': 'OCR value error', 'value': -1}

    # 计算比100ml高/低多少
    scale2level = abs(int(middle_line) -
                      int(list_measure['100'])) // int(scale)
    if middle_line > list_measure['100']:
        ans = 100 - scale2level * 10
    else:
        ans = 100 + scale2level * 10

    # 边界
    if ans >= 250 and len(list_measure) > 3:
        ans = 250
    if ans >= 150 and len(list_measure) <= 3:
        ans = 150
    if ans <= 0:
        ans = 0

    return {'msg': '', 'value': ans}


if __name__ == '__main__':

    time_start = time.time()
    args = parser.parse_args()

    MODEL_NAME = 'inference_graph'
    # IMAGE_NAME = './250_350.jpg'
    IMAGE_NAME = args.img

    # # 长度大于2000则图像大小减半
    # image_temp = cv2.imread(IMAGE_NAME)
    # if int(image_temp.shape[0]) >= 2000:
    #     # img_temp = cv2.resize(image_temp,(0,0),fx = 0.7,fy = 0.7)
    #     hight,width,_ = image_temp.shape
    #     # 这里需要用户对准区域拍摄
    #     img_temp = image_temp[int(0.2*hight):int(0.8*hight),int(0.2*width):int(0.8*width)]
    #     cv2.imwrite('temp_{}.png'.format(time_start), img_temp) # 保存
    #     IMAGE_NAME_temp = 'temp_{}.png'.format(time_start)

    image, category_index, (boxes, scores, classes,
                            num) = model_inference(MODEL_NAME, IMAGE_NAME)

    milk_container = detection_OCR_outcome(image, IMAGE_NAME, boxes, scores)

    vis_util.visualize_boxes_and_labels_on_image_array(
        image,
        np.squeeze(boxes),
        np.squeeze(classes).astype(np.int32),
        np.squeeze(scores),
        category_index,
        use_normalized_coordinates=True,
        line_thickness=4,
        min_score_thresh=0.50,
        max_boxes_to_draw=1)

    print("Time total cost: {}".format(time.time() - time_start))

    cv2.imwrite('{}_output.png'.format(IMAGE_NAME), image)
    print("预测奶量: {}ml".format(milk_container['value']))
    # os.remove('temp_{}.png'.format(time_start))
