import os
import cv2
import numpy as np
import time
import threading
from queue import Queue
# from blinker import signal

global release_rect
global diff_flag
release_rect = 0
diff_flag = True

event_rect = threading.Event()
event_img  = threading.Event()

'''
label_path：类别标签文件的路径
config_path：模型配置文件的路径
weights_path：模型权重文件的路径
confidence_thre：0-1，置信度（概率/打分）阈值，即保留概率大于这个值的边界框，默认为0.5
nms_thre：非极大值抑制的阈值，默认为0.3
jpg_quality：设定输出图片的质量，范围为0到100，默认为80，越大质量越好
'''
def detect_tracker(q_rect, img, net, LABELS, ln):
    confidence_thre = 0.5
    nms_thre = 0.3
    # 载入图片并获取其维度
    (H, W) = img.shape[:2]
    # 将图片构建成一个blob，设置图片尺寸，然后执行一次
    # YOLO前馈网络计算，最终获取边界框和相应概率
    blob = cv2.dnn.blobFromImage(img, 1 / 255.0, (416, 416), swapRB=True, crop=False)
    net.setInput(blob)
    start = time.time()
    layerOutputs = net.forward(ln)
    end = time.time()
    # 显示预测所花费时间
    print('YOLO3模型花费 {:.2f} 秒来预测一张图片'.format(end - start))
    # 初始化边界框，置信度（概率）以及类别
    boxes = []
    confidences = []
    classIDs = []
    # 迭代每个输出层，总共三个
    for output in layerOutputs:
        # 迭代每个检测
        for detection in output:
            # 提取类别ID和置信度
            scores = detection[5:]
            classID = np.argmax(scores)
            confidence = scores[classID]
            # 只保留置信度大于某值的边界框
            if confidence > confidence_thre:
                # 将边界框的坐标还原至与原图片相匹配，记住YOLO返回的是
                # 边界框的中心坐标以及边界框的宽度和高度
                box = detection[0:4] * np.array([W, H, W, H])
                (centerX, centerY, width, height) = box.astype("int")
                # 计算边界框的左上角位置
                x = int(centerX - (width / 2))
                y = int(centerY - (height / 2))
                # 更新边界框，置信度（概率）以及类别
                boxes.append([x, y, int(width), int(height)])
                confidences.append(float(confidence))
                classIDs.append(classID)
    # 使用非极大值抑制方法抑制弱、重叠边界框
    idxs = cv2.dnn.NMSBoxes(boxes, confidences, confidence_thre, nms_thre)
    # 确保至少一个边界框
    if len(idxs) > 0:
        # 迭代每个边界框
        for i in idxs.flatten():
            # 提取边界框的坐标
            (x, y) = (boxes[i][0], boxes[i][1])
            (w, h) = (boxes[i][2], boxes[i][3])
            print(LABELS[classIDs[i]])
            ###cup, person,cell phone
            if LABELS[classIDs[i]] == "person":
                rect = [x, y, w, h]
                q_rect.put(rect)


def detect_thread(q_rect, q_img):
    global release_rect
    label_path = './cfg/coco.names'
    config_path = './cfg/yolov3.cfg'
    weights_path = './cfg/yolov3.weights'

    # 加载类别标签文件
    LABELS = open(label_path).read().strip().split("\n")
    # 加载模型配置和权重文件
    print('从硬盘加载YOLO......')
    net = cv2.dnn.readNetFromDarknet(config_path, weights_path)

    # 获取YOLO输出层的名字
    ln = net.getLayerNames()#net.getUnconnectedOutLayers() [200 227 254]
    index = net.getUnconnectedOutLayers()
    ln = [ln[index[0] - 1], ln[index[1] - 1], ln[index[2] - 1]]
    while True:
        event_img.wait()
        img = q_img.get()
        detect_tracker(q_rect, img, net, LABELS, ln)
        release_rect = 1  #释放识别反馈目标坐标
        event_img.clear()


def person_detect_model(q_rect, q_img):
    global release_rect
    len = 80
    cnt = 0
    boxss = dict()
    tracker = dict()
    tracker_num = 0
    a_adjust = 0.25
    b_adjust = 0.75
    delay_frame = 60
    total = 0
    box_diff = -1
    # 开启摄像头
    cap = cv2.VideoCapture(0, cv2.CAP_DSHOW)
    COLORS = np.random.randint(0, 255, size=(len, 3), dtype='uint8')
    while True:
        ret, frame = cap.read()
        wh = frame.shape
        if not ret:
            print("ret", ret)
            break
        if frame is None:
            print("frame", frame)
            break
        if cnt >= delay_frame:       ##如果每一帧都识别，目前计算机处理速度是跟不上的
            q_img.put(frame)
            event_img.set()
            cnt = 0
        cnt += 1

        if release_rect == 1:  #不阻塞等待识别反馈坐标
            #清除之前存储的旧坐标，！！！！这里肯定不能全部清除掉，只能清除跟踪完成的目标
            # boxss.clear()
            # tracker.clear()
            for i in range(q_rect.qsize()):
                box_event = [q_rect.get()]
                box = box_event[0]
                for j in range(tracker_num):
                    if boxss.get(j) is not None:
                        old_box = boxss.get(j)
                        if (abs(box[0]+box[2]/2-old_box[0]-old_box[2]/2) <= 20): #小于误差，则目标重复
                            box_diff = i
                            break

                #新旧目标不能重叠
                if box_diff == i:
                    box_diff = -1
                    break

                ###确认目标跟踪区域,需要满足在识别区域
                if ((box[1]+a_adjust*box[3]) > wh[0]/2) & ((box[1]+b_adjust*box[3]) > wh[0]/2):
                    box_diff = 0
                    box_id = [i+tracker_num]#tracker_num是用来更新不同的键，保证不同每次不同的键值，但是问题就是如何确认当前目标是否是上一帧目标的（是否重复跟踪）
                    temp_dic = dict(zip(box_id, box_event))
                    boxss.update(temp_dic)
                    tracker_id = [i+tracker_num]
                    event = cv2.legacy.TrackerKCF_create()
                    tracker_event = [event]
                    temp_dic = dict(zip(tracker_id, tracker_event))
                    tracker.update(temp_dic)
                    event.init(frame, box)  ##
                    tracker_num += 1
                    print(tracker_num)

            release_rect = 0

        for i in range(tracker_num):
            ##删除某个目标后，就不能依次访问了
            trac = tracker.get(i)
            if trac is not None:
                (success, box) = trac.update(frame)
                if success:
                    (x, y, w, h) = [int(v) for v in box]
                    color = [int(c) for c in COLORS[i % len]]
                    p1 = (int(x), int(y))
                    p2 = (int(x + w), int(y + h))
                    cv2.rectangle(frame, p1, p2, color, 2)
                    text = '{}'.format(i+1)
                    cv2.putText(frame, text, (x, y - 5), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 0), 2)
                    ###目标计数区域统计
                    if ((box[1] + a_adjust * box[3]) <= wh[0] / 2) & ((box[1] + b_adjust * box[3]) <= wh[0] / 2):
                        boxss.pop(i)
                        tracker.pop(i)
                        total += 1
                        tracker_num -= 1

        ##中心检测线##
        cv2.line(frame, (0, int(wh[0]/2)), (int(wh[1]), int(wh[0]/2)), (0, 0, 255), 1, 8)

        # Display the frame
        text = "total = %2d"%total
        cv2.putText(frame, text, (10, 30), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 255), 2)
        cv2.imshow("person", frame)
        # Quit when Esc is pressed
        if cv2.waitKey(1) & 0xFF == ord('q'):
            break

    # 最后，关闭所有窗口
    cap.release()
    cv2.destroyAllWindows()

def main():
    q_rect = Queue()
    q_img = Queue()

    threading.Thread(target=person_detect_model, args=(q_rect, q_img)).start()
    threading.Thread(target=detect_thread, args=(q_rect, q_img)).start()

if __name__ == "__main__":
    main()
