import os
from pickletools import int4
import cv2
import numpy as np
import tensorflow as tf
import sys
import glob
import requests
import base64
import time
import argparse

sys.path.append("..")

parser = argparse.ArgumentParser(description="Demo of argparse")
parser.add_argument('-i','--img', default='./img_92.jpg')

# Import utilites
from utils import label_map_util
from utils import visualization_utils as vis_util



def digitalOCR(imgName):
    host = 'https://aip.baidubce.com/oauth/2.0/token?grant_type=client_credentials&client_id=MdaPthBvqDXMBcewAY54tEPL&client_secret=kyOGrSZY9zGmzeV7tSPE8dAF0XBvI7qL'
    response = requests.get(host)
    if response:
        accessToken = response.json()['access_token']
    request_url = "https://aip.baidubce.com/rest/2.0/ocr/v1/numbers"
    f = open(imgName, 'rb')
    img = base64.b64encode(f.read())

    params = {"image":img}
    access_token = accessToken
    request_url = request_url + "?access_token=" + access_token
    headers = {'content-type': 'application/x-www-form-urlencoded'}
    response = requests.post(request_url, data=params, headers=headers)
    if response:
        # print (response.json())
        try: 
            result_ = response.json()['words_result']
        except:
            print("=====  Digital Detection Failure 0 =====")
            exit()
        # result_ = response.json()['words_result']
    return result_


def model_inference(MODEL_NAME,IMAGE_NAME):

    CWD_PATH = os.getcwd()
    PATH_TO_CKPT = os.path.join(CWD_PATH,MODEL_NAME,'frozen_inference_graph.pb')
    PATH_TO_LABELS = os.path.join(CWD_PATH,'inference_graph','labelmap.pbtxt')
    PATH_TO_IMAGE = os.path.join(CWD_PATH,IMAGE_NAME)

    NUM_CLASSES = 1

    label_map = label_map_util.load_labelmap(PATH_TO_LABELS)
    categories = label_map_util.convert_label_map_to_categories(label_map, max_num_classes=NUM_CLASSES, use_display_name=True)
    category_index_ = label_map_util.create_category_index(categories)

    # Load the Tensorflow model into memory.
    detection_graph = tf.Graph()
    with detection_graph.as_default():
        od_graph_def = tf.GraphDef()
        with tf.gfile.GFile(PATH_TO_CKPT, 'rb') as fid:
            serialized_graph = fid.read()
            od_graph_def.ParseFromString(serialized_graph)
            tf.import_graph_def(od_graph_def, name='')

        sess = tf.Session(graph=detection_graph)

    image_tensor = detection_graph.get_tensor_by_name('image_tensor:0')
    detection_boxes = detection_graph.get_tensor_by_name('detection_boxes:0')
    detection_scores = detection_graph.get_tensor_by_name('detection_scores:0')
    detection_classes = detection_graph.get_tensor_by_name('detection_classes:0')

    # Number of objects detected
    num_detections = detection_graph.get_tensor_by_name('num_detections:0')

    image_ = cv2.imread(PATH_TO_IMAGE)

    if int(image_.shape[0]) < 1300: 
        image_  = cv2.resize(image_,(0,0),fx=3,fy=3,interpolation=cv2.INTER_AREA)
    if int(image_.shape[0]) > 3500: 
        image_  = cv2.resize(image_,(0,0),fx=0.5,fy=0.5,interpolation=cv2.INTER_AREA)
        
    print(image_.shape)

    image_expanded = np.expand_dims(image_, axis=0)

    time_start_detect = time.time()
    # Inference
    (boxes_, scores_, classes_, num_) = sess.run(
        [detection_boxes, detection_scores, detection_classes, num_detections],
        feed_dict={image_tensor: image_expanded})

    print("Time dect cost: {}".format(time.time() - time_start_detect))

    # 如果没有矩形框，输出No bottle detected 
    if len(boxes_[0]) == 0:
        print("========   No bottle detected 1  ========")
        exit()

    count = 0
    print(classes_,scores_)
    for i in range(len(boxes_[0])): 
        if scores_[0][i] >= 0.25:
            middle_line = int(0.5*(boxes_[0][i][0]*image_.shape[0] + boxes_[0][i][2]*image_.shape[0]))
            ymin, xmin, ymax, xmax = int(boxes_[0][i][0]*image_.shape[0]),int(boxes_[0][i][1]*image_.shape[1]) ,int(boxes_[0][i][2]*image_.shape[0]),int(boxes_[0][i][3]*image_.shape[1])
            cls_ = classes_[0][i]
            if int(cls_) == 2:
                middle_line = ymin
            # ymax = middle_line + (xmax - xmin)
            # ymin = middle_line - (xmax - xmin)

            # if ymax >= image_.shape[0]: 
            #     ymax = image_.shape[0]
            # if ymin <= 0: 
            #     ymin = 0

            # print(ymin, xmin, ymax, xmax)

            break
        else:
            count += 1
    # exit()

    # 如果没有检测到奶瓶或识别概率过低，输出No bottle detected
    if len(boxes_[0]) == count:
        print("========   No bottle detected 2 ========")
        exit()

    return image_, category_index_, cls_, (boxes_, scores_, classes_, num_, ymin, xmin, ymax, xmax, middle_line)

def detection_OCR_outcome(image, IMAGE_NAME, boxes, scores,middle_line, CLS):

    # OCR 输出结果
    time_start_ocr = time.time()
    result = digitalOCR(IMAGE_NAME)
    print("Time ocr cost: {}".format(time.time() - time_start_ocr))

    list_measure = {} # 150 100 50 200 250
    list_measure_mid_diff = {}
    try: 
        result[0]['words']
    except:
        print("=====  Digital Detection Failure 1 =====")
        exit()
    temp_250 = [] # in-case OCR detect 2 Oz as 250ml
    for i in range(len(result)):
        if result[i]['words'] == '150': 
            list_measure['150'] = result[i]['location']['top'] + int(result[i]['location']['height']) // 2
            list_measure_mid_diff['150'] = abs(result[i]['location']['top'] + int(result[i]['location']['height']) // 2 - middle_line)
        elif result[i]['words'] == '100': 
            list_measure['100'] = result[i]['location']['top'] + int(result[i]['location']['height']) // 2
            list_measure_mid_diff['100'] = abs(result[i]['location']['top'] + int(result[i]['location']['height']) // 2 - middle_line)
        elif result[i]['words'] == '50': 
            list_measure['50'] = result[i]['location']['top'] + int(result[i]['location']['height']) // 2
            list_measure_mid_diff['50'] = abs(result[i]['location']['top'] + int(result[i]['location']['height']) // 2 - middle_line)
        elif result[i]['words'] == '200': 
            list_measure['200'] = result[i]['location']['top'] + int(result[i]['location']['height']) // 2
            list_measure_mid_diff['200'] = abs(result[i]['location']['top'] + int(result[i]['location']['height']) // 2 - middle_line)
        elif result[i]['words'] == '250': 
            list_measure['250'] = result[i]['location']['top'] + int(result[i]['location']['height']) // 2
            list_measure_mid_diff['250'] = abs(result[i]['location']['top'] + int(result[i]['location']['height']) // 2 - middle_line)
            temp_250.append(list_measure['250'])

    if len(temp_250) > 1:
        list_measure['250'] = min(temp_250)
    
    print(CLS)
    if int(CLS) == 3 and len(temp_250) >= 1:
        ans = 360
        return ans

    if int(CLS) == 3 and len(temp_250) == 0:
        ans = 220
        return ans

    

    # 如果没有检测到三个以上的刻度，输出Digital Detection Failure
    print(list_measure)
    if len(list_measure) >= 2:
        # scale = (list_measure['100'] - list_measure['150']) // 5
        temp_list = list(list_measure.keys())
        temp_list_int = [int(i) for i in temp_list]
        temp_list_int.sort(reverse=False)
        interval = int(temp_list_int[1]- temp_list_int[0] ) / 10
        # 1ml
        # scale = (list_measure[str(temp_list_int[1])] - list_measure[str(temp_list_int[0])]) // int(temp_list_int[1]- temp_list_int[0] ) 
        scale = (list_measure[str(temp_list_int[1])] - list_measure[str(temp_list_int[0])]) / interval
    else:
        print("=====  Digital Detection Failure 2 =====")
        exit()
    
    # 计算比100ml高/低多少
    # new 计算比最接近middleline高/低多少 
    print("middle_line: ",middle_line)

    nearest_scale = int(min(list_measure_mid_diff,key=list_measure_mid_diff.get))
    scale2level = round(abs(int(middle_line) -
                        list_measure[str(nearest_scale)]) / abs(int(scale)))
    print(nearest_scale,temp_list_int,scale,scale2level)

    if middle_line > list_measure[str(nearest_scale)]:
        ans = nearest_scale - scale2level * 10
    else:
        ans = nearest_scale + scale2level * 10

    # scale2level = abs(int(middle_line) -
    #                     list_measure[str(temp_list_int[1])]) // abs(int(scale))
    # if middle_line > list_measure[str(temp_list_int[1])]:
    #     ans = int(str(temp_list_int[1])) - scale2level * 10
    # else:
    #     ans = int(str(temp_list_int[1])) + scale2level * 10

    # 边界
    if ans >= 360 and len(temp_250)>=1:
        ans = 360
    if ans >= 220 and len(list_measure)<=3:
        ans = 220
    if ans <= 0 :
        ans = 0

    del(temp_250)
        
    return ans

if __name__ == '__main__':
    

    time_start = time.time()
    args = parser.parse_args()

    MODEL_NAME = 'inference_3'
    # IMAGE_NAME = './250_350.jpg'
    IMAGE_NAME = args.img

    image, category_index, CLS, (boxes, scores, classes, num, ymin, xmin, ymax, xmax, middle_line) \
                                                        = model_inference(MODEL_NAME,IMAGE_NAME)

    image_temp = cv2.imread(IMAGE_NAME,0)
    # th2 = cv2.adaptiveThreshold(image_temp, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY, 21, 15)
    if int(image_temp.shape[0]) < 1300:
        image_temp = cv2.resize(image_temp,(0,0),fx=3,fy=3,interpolation=cv2.INTER_AREA)
    if int(image_temp.shape[0]) > 3500: 
        image_temp  = cv2.resize(image_temp,(0,0),fx=0.5,fy=0.5,interpolation=cv2.INTER_AREA)
    print(image_temp.shape)


    img_temp = image_temp
    


    cv2.imwrite('temp_{}.jpg'.format(time_start), img_temp) # 保存
    IMAGE_NAME_temp = 'temp_{}.jpg'.format(time_start)


    vis_util.visualize_boxes_and_labels_on_image_array(
        image,
        np.squeeze(boxes),
        np.squeeze(classes).astype(np.int32),
        np.squeeze(scores),
        category_index,
        use_normalized_coordinates=True,
        line_thickness=4,
        min_score_thresh=0.25,
        max_boxes_to_draw=1)
    cv2.imwrite('{}_output.jpg'.format(IMAGE_NAME),image)

    milk_container = detection_OCR_outcome(image, IMAGE_NAME_temp, boxes, scores, middle_line, CLS)


    print("Time total cost: {}".format(time.time() - time_start))

    print("预测奶量: {}ml".format(milk_container))

    os.rename('{}_output.jpg'.format(IMAGE_NAME),'{}_output_{}.jpg'.format(IMAGE_NAME,milk_container))
    os.remove('temp_{}.jpg'.format(time_start))
