# =====================================================
# 引用一些必要的包
from deep_sort.utils.parser import get_config
from deep_sort.deep_sort import DeepSort
from yolox.data.datasets.things_classes import THS_CLASSES
import torch
import cv2
import time
# import differenceDetection
import numpy as np
import global_val

timelist = []
nohadposes = []
nohadpos = []
borderwarn = []
bordercoll = []
speedid = {}
speedlist = {}
detectid = {}
# =====================================================

# ======================================================================================================================================
# 初始化 相关环境
# 获取 DeepSort 的参数
palette = (2 ** 11 - 1, 2 ** 15 - 1, 2 ** 20 - 1)
cfg = get_config()
cfg.merge_from_file("deep_sort/configs/deep_sort.yaml")
# ==============================================================

# =================================================================================================================
# 初始化 DeepSort
deepsort = DeepSort(cfg.DEEPSORT.REID_CKPT,
                    max_dist=cfg.DEEPSORT.MAX_DIST, min_confidence=cfg.DEEPSORT.MIN_CONFIDENCE,
                    nms_max_overlap=cfg.DEEPSORT.NMS_MAX_OVERLAP, max_iou_distance=cfg.DEEPSORT.MAX_IOU_DISTANCE,
                    max_age=cfg.DEEPSORT.MAX_AGE, n_init=cfg.DEEPSORT.N_INIT, nn_budget=cfg.DEEPSORT.NN_BUDGET,
                    use_cuda=True)


# =================================================================================================================
# def get_speed(point1,point2,interval,posid):
#     center=((point1[0]+point2[0])/2,(point1[1]+point2[1])/2)
#     w=abs(point1[0]-point2[0])
#     ava_cars_length=4.8
#     per_pixeltometer=4.8/w
#     if str(posid) not in speedid.keys():
#         speedid['{}'.format(posid)]=center[0]
#         return None
#     else:
#         m_speed=(abs(center[0]-speedid['{}'.format(posid)]))/interval
#         km_speed=3.6*m_speed
#         speedid['{}'.format(posid)]=center[0]
#         return km_speed


# ==========================================================
# 绘图函数  根据 神经网络的处理结果  对原图进行标注 返回标注好的图片


def plot_bboxes(image, bboxes, videonum, choicement, line_thickness=None, plot=None, speedget=None):
    # Plots one bounding box on image img
    # 如果是第一帧的话，清空上个视频的错误日志
    if plot:
        linelist = plot['linelist']
        pointlist = plot['pointlist']
        draw_index = plot['draw_index']
        typelist = plot['typelist']
    else:
        linelist = None
        pointlist = None
        draw_index = None
        typelist = None
    nohadposes.clear()
    borderwarn.clear()
    typewarn = []
    typewarn.clear()
    if videonum == 1:
        timelist.append(0)
        nohadpos.clear()
        bordercoll.clear()
        speedid.clear()
    # if len(nohadposes)>=10:
    #     nohadpos.clear()
    if len(nohadpos) >= 100:
        nohadpos.clear()
    if len(bordercoll) >= 100:
        bordercoll.clear()
    # if len(borderwarn)>=10:
    #     bordercoll.clear()
    tl = line_thickness or round(
        0.002 * (image.shape[0] + image.shape[1]) / 2) + 1  # line/font thickness
    for (x1, y1, x2, y2, cls_id, pos_id) in bboxes:
        typeflag = 0
        flag = 0
        borderwarnning = 0
        if choicement != 9:
            # and cls_id not in ['cell phone']:

            if str(pos_id) not in detectid.keys():  # speedid={}全局变量在上面初始化，记录读到的每个id的位置和时间
                start_time = time.time()
                detectid['{}'.format(pos_id)] = (start_time)  # 如果该id第一次出现则将其位置和当前时间计入
            else:
                end_time = time.time()
                start_time = detectid['{}'.format(pos_id)]
                interval_time = end_time - start_time

                # if interval_time >= 0:
                #     cv2.putText(image, '{}ID{}'.format(cls_id, pos_id),
                #                 (c1[0], c1[1] - 2), 0, tl / 3,
                #                 [0, 0, 255], thickness=tf, lineType=cv2.LINE_AA)
                # else:
                #     cv2.putText(image,
                #                 '{} ID-{} '.format(cls_id, pos_id, ),
                #                 (c1[0], c1[1] - 2), 0, tl / 3,
                #                 [225, 255, 255], thickness=tf, lineType=cv2.LINE_AA)
        if choicement == 6:  # 杂物检测的类，检测时用的还是cococlasses，thsclasses是要显示的类，和其他classes在同一个路径
            # if img1 != None:
            #     img2 = image
            # else:
            #     img1 = image
            # img1 = cv2.imread("img1.jpg")
            # img2 = image
            # image = differenceDetection.soundries_detect(img1, img2)
            continue
        elif choicement == 9 and cls_id not in ['bicycle', 'car', 'motorcycle', 'bus', 'truck']:  # 不是这些类型的话不画框
            continue
        else:
            if choicement == 9:  # 速度检测
                color = (0, 255, 0)
                if speedget:  # speedget=speed_get(Dmin,Dmax,H,B0,image,speed_limit)传进来的
                    bottompoint = ((x1 + x2) / 2, y1)  # 取名取的bottom其实取得是框顶部中点作为移动距离的判断点
                    if str(pos_id) not in speedid.keys():  # speedid={}全局变量在上面初始化，记录读到的每个id的位置和时间
                        timestart = time.time()
                        speedid['{}'.format(pos_id)] = (bottompoint, timestart)  # 如果该id第一次出现则将其位置和当前时间计入speedid
                        speedlist['{}'.format(pos_id)] = None  # speedlist存放速度，当前该id第一次出现没有速度
                    else:  # 如果该id不是第一次出现就考虑要计算速度了
                        if (image.shape[1] / 7) < y2 < (image.shape[1] * 6 / 7):  # 防止图片边缘物体出现不完全时导致框变形进而影响速度计算
                            timeend = time.time()
                            timestart = speedid['{}'.format(pos_id)][1]
                            interval = timeend - timestart  # 计算当前时间与上一次该id出现时的时间间隔，如果间隔大于0.7s则计算速度

                            if interval >= 0.7:
                                point1 = speedid['{}'.format(pos_id)][0]  # 上一次位置
                                point2 = bottompoint  # 当前位置
                                speed = speedget.get_speed(point1, point2, interval)  # 计算速度
                                speedid['{}'.format(pos_id)] = (point2, timeend)  # 将该id下一次速度计算的起始位置和时间覆盖为当前的
                                speedlist['{}'.format(pos_id)] = speed  # 记录速度
            else:
                mid_x = (x1 + x2) / 2
                mid_y = (y1 + y2) / 2
                pointArray = global_val.get_pointArray('pointArray')
                max_x = pointArray[3][0]
                min_y = pointArray[0][1]
                min_x = pointArray[0][0]
                max_y = pointArray[3][1]
                if mid_x < max_x and mid_x > min_x and mid_y < max_y and mid_y > min_y:
                    if cls_id in ['sleep', 'other', 'phone', 'fire', 'smoking', 'smoke', 'hat']:
                        # if mid_x < max_x and mid_x > min_x and mid_y < max_y and mid_y > min_y:
                        color = (0, 0, 255)
                        if pos_id in nohadpos:
                            if interval_time <= 2:
                                color = (0, 255, 0)
                        else:
                            if interval_time > 2:
                                nohadposes.append(pos_id)
                                nohadpos.append(pos_id)
                            else:
                                color = (0, 255, 0)
                    else:
                        color = (0, 255, 0)
                        if pos_id in nohadpos:
                            nohadpos.remove(pos_id)
            c1, c2 = (x1, y1), (x2, y2)
            w = abs(x2 - x1)
            bottompoint = ((x1 + x2) / 2, y2)
            if pointlist and linelist:
                for linenum in range(0, len(linelist)):
                    line = linelist[linenum]
                    type = typelist[linenum]
                    pointindex = draw_index[linenum]
                    point1 = pointlist[pointindex]
                    point2 = pointlist[pointindex - 1]
                    if len(line) == 1:
                        distant = abs(line[0] - bottompoint[0])
                        flag = 1
                    elif -0.025 < line[0] < 0.025:
                        distant = abs(line[1] - bottompoint[1])
                        flag = 2
                    else:
                        borderpoint_y = line[0] * bottompoint[0] + line[1]
                        borderpoint_x = (bottompoint[1] - line[1]) / line[0]
                        distant = min(abs(borderpoint_y - bottompoint[1]), abs(borderpoint_x - bottompoint[0]))
                        if distant == abs(borderpoint_x - bottompoint[0]):
                            flag = 1
                        elif distant == abs(borderpoint_y - bottompoint[1]):
                            flag = 2
                    if flag == 1:
                        if min(point1[1], point2[1]) < bottompoint[1] < max(point1[1], point2[1]):
                            if distant < (w / 5):
                                borderwarnning = 1
                                if typeflag == 1 or typeflag == 2:
                                    pass
                                else:
                                    if type == 0:
                                        typeflag = 0
                                    elif type == 1:
                                        typeflag = 1
                                    elif type == 2:
                                        typeflag = 2

                    elif flag == 2:
                        if min(point1[0], point2[0]) < bottompoint[0] < max(point1[0], point2[0]):
                            if distant < (w / 5):
                                borderwarnning = 1
                                if typeflag == 1 or typeflag == 2:
                                    pass
                                else:
                                    if type == 0:
                                        typeflag = 0
                                    elif type == 1:
                                        typeflag = 1
                                    elif type == 2:
                                        typeflag = 2

            if borderwarnning == 1:
                color = (255, 0, 0)
                if pos_id in bordercoll:
                    pass
                else:
                    bordercoll.append(pos_id)
                    borderwarn.append((pos_id, typeflag))
            elif borderwarnning == 0:
                if pos_id in bordercoll:
                    bordercoll.remove(pos_id)

            cv2.rectangle(image, c1, c2, color, thickness=tl, lineType=cv2.LINE_AA)
            tf = max(tl - 1, 1)  # font thickness
            t_size = cv2.getTextSize(cls_id, 0, fontScale=tl / 3, thickness=tf)[0]
            c2 = c1[0] + t_size[0], c1[1] - t_size[1] - 3
            cv2.rectangle(image, c1, c2, color, -1, cv2.LINE_AA)  # filled
        if choicement == 9 and speedlist['{}'.format(pos_id)]:
            if (image.shape[1] / 7) < y2 < (image.shape[1] * 6 / 7):
                if speedlist['{}'.format(pos_id)] > speedget.speed_limit:  # 超速标红
                    cv2.putText(image, '{} ID-{} {}km/h'.format(cls_id, pos_id, int(speedlist['{}'.format(pos_id)])),
                                (c1[0], c1[1] - 2), 0, tl / 3,
                                [0, 0, 255], thickness=tf, lineType=cv2.LINE_AA)
                else:
                    cv2.putText(image,
                                '{} ID-{} {}km/h'.format(cls_id, pos_id, int(speedlist['{}'.format(pos_id)])),
                                (c1[0], c1[1] - 2), 0, tl / 3,
                                [225, 255, 255], thickness=tf, lineType=cv2.LINE_AA)
            else:
                cv2.putText(image, '{} ID-{}'.format(cls_id, pos_id), (c1[0], c1[1] - 2), 0, tl / 3,
                            [225, 255, 255], thickness=tf, lineType=cv2.LINE_AA)
        else:
            cv2.putText(image, '{} ID-{}'.format(cls_id, pos_id), (c1[0], c1[1] - 2), 0, tl / 3,
                        [0, 255, 255], thickness=tf, lineType=cv2.LINE_AA)

    if pointlist:
        typeindex = 0
        for i in range(1, len(pointlist)):
            if i in draw_index:
                type = typelist[typeindex]
                typeindex = typeindex + 1
                if type == 0:
                    cv2.line(image, pointlist[i], pointlist[i - 1], (255, 0, 0), thickness=3)
                elif type == 1:
                    cv2.line(image, pointlist[i], pointlist[i - 1], (0, 255, 0), thickness=3)
                elif type == 2:
                    cv2.line(image, pointlist[i], pointlist[i - 1], (0, 0, 255), thickness=3)

    return image, nohadposes, borderwarn


# ==========================================================


# ==============================================================================================================================
# 定义检测器
def update_tracker(target_detector, image, vedionum, choicement, plot=None, speedget=None):
    # =======================================
    # 初始化一些容器
    new_faces = []
    bbox_xywh = []
    confs = []
    clss = []
    # =======================================

    # ===================================================================
    # 获取 图片的检测结果 这里获取的是 yolo 的检测结果 只有 对象的框格数据
    _, bboxes = target_detector.detect(image)
    # ===================================================================

    # ======================================================================================

    # ==============================================================
    # 对框格的描述数据的方式进行转换和收集
    for x1, y1, x2, y2, cls_id, conf in bboxes:
        # 对框格的描述数据的方式进行转换
        obj = [int((x1 + x2) / 2), int((y1 + y2) / 2), x2 - x1, y2 - y1]
        # 将 yolo 获取的检测数据进行 保存
        bbox_xywh.append(obj)
        confs.append(conf)
        clss.append(cls_id)
    # ==============================================================

    # ============================================================
    # 将 框格数据和置信度 转化为 torch的形式
    xywhs = torch.Tensor(bbox_xywh)
    confss = torch.Tensor(confs)
    # ============================================================

    # ============================================================
    # 将 检测器的相关数据 送入 追踪 deepsort中
    outputs = deepsort.update(xywhs, confss, clss, image)
    # print(outputs)--None---
    # ============================================================

    # ============================================================
    # 追踪变量的 容器的初始化
    bboxes2draw = []
    face_bboxes = []
    current_ids = []
    # 将 追踪器的输出 进行 重整 收集
    for value in list(outputs):
        x1, y1, x2, y2, cls_, track_id = value
        bboxes2draw.append((x1, y1, x2, y2, cls_, track_id))
        current_ids.append(track_id)
    # ============================================================

    # ============================================================
    # 追踪器 ID 分配部分
    ids2delete = []
    for history_id in target_detector.faceTracker:
        if not history_id in current_ids:
            target_detector.faceTracker[history_id] -= 1
        if target_detector.faceTracker[history_id] < -5:
            ids2delete.append(history_id)
    # ============================================================

    # ================================================
    # ID 丢失 反馈部分
    for ids in ids2delete:
        target_detector.faceTracker.pop(ids)
        print('-[INFO] Delete track id:', ids)
    # ================================================

    # ================================================
    # 将相关结果绘制在图上
    image, nohadposes, borderwarn = plot_bboxes(image, bboxes2draw, vedionum, choicement=choicement,
                                                line_thickness=None, plot=plot, speedget=speedget)
    # ================================================   

    return image, new_faces, face_bboxes, nohadposes, borderwarn
# ==============================================================================================================================