# coding:utf-8

import time
import cv2
import numpy as np
import tensorflow as tf

#--------------Model preparation----------------
# Path to frozen detection graph. This is the actual model that is used for
# the object detection.
# PATH_TO_CKPT = r'/home/hadoop/redldw/ldw/models-master/research/voc/inference_graph/frozen_inference_graph.pb'
PATH_TO_CKPT = r'F:\Resources\model\Detector\frozen_inference_graph.pb'

# 定义图
detection_graph = tf.Graph()
with detection_graph.as_default():
    od_graph_def = tf.GraphDef()
    # 载入pb文件
    with tf.gfile.GFile(PATH_TO_CKPT,'rb') as fid:
        serialized_graph=fid.read()
        # 解析内容
        od_graph_def.ParseFromString(serialized_graph)
        tf.import_graph_def(od_graph_def,name='')
print(detection_graph.get_operations())
image_tensor = detection_graph.get_tensor_by_name('image_tensor:0')
# Each box represents a part of the image where a particular
# object was detected.
gboxes = detection_graph.get_tensor_by_name('detection_boxes:0')
# Each score represent how level of confidence for each of the objects.
# Score is shown on the result image, together with the class label.
gscores = detection_graph.get_tensor_by_name('detection_scores:0')
gclasses = detection_graph.get_tensor_by_name('detection_classes:0')
gnum_detections=detection_graph.get_tensor_by_name('num_detections:0')

#TODO:Add class names showing in the image
def detect_image_objects(image, sess, detection_graph):
    # Expand dimensions since the model expects images to have
    # shape: [1, None, None, 3]
    image_np_expanded = np.expand_dims(image, axis=0)
    # boxes:[None,ymin,xmin,ymax,xmax]
    #
    (boxes,scores,classes,num_detections) = sess.run(
        [gboxes,gscores,gclasses,gnum_detections],
        feed_dict={image_tensor:image_np_expanded}
    )
    print('boxes shape:',boxes.shape,'\n')
    print('score shape',scores.shape)
    print('classes shape',classes.shape)
    print('num_detection shape',num_detections.shape)
    # Visualization of the results of a detection.
    # squeeze:把shape中为1的维度去掉
    boxes = np.squeeze(boxes)
    scores = np.squeeze(scores)
    height,width = image.shape[:2]
    # print('boxes shape:',boxes.shape[0],'\n')
    # print('score shape',scores.shape)
    # print('classes shape',classes.shape,classes)
    # print('num_detection shape',num_detections.shape)
    for i in range(2):
        if (scores is None or
             scores[i] > 0.5):
            ymin, xmin, ymax, xmax = boxes[i]
            ymin = int(ymin * height)
            ymax = int(ymax * height)
            xmin = int(xmin * width)
            xmax = int(xmax * width)

            score = None if scores is None else scores[i]
            print(f'predict score: {score}')

            font = cv2.FONT_HERSHEY_SIMPLEX
            text_y = np.max((0,ymin-10))
            text_x = np.max((0,xmin-10))
            cv2.putText(image,f"{score:}",
                        (text_x,text_y),font,0.4,(0,255,0))
            cv2.rectangle(image,(xmin,ymin),(xmax,ymax),
                                            (0,245,0),2)

    return image
def do_detect(image):
    image = cv2.imread(image)
    with detection_graph.as_default():
        with tf.Session(graph=detection_graph) as sess:
            t_start = time.time()
            detect_image_objects(image, sess, detection_graph)
            t_end = time.time()
            print(f'detect image done\n'
                  f'用时{t_end - t_start}')
            cv2.imshow('detected', image)
            cv2.waitKey(0)
            cv2.destroyWindow()


if __name__ == "__main__":
    # img = r"F:\bigphoto\test_images\image6.jpg"
    img = r'F:\bigphoto\test_images\A_17.jpg'
    do_detect(img)
