# -*- coding:UTF-8 -*-
"""
@author: Louis_Yang
@contact: 302810009@qq.com
@time: 2022/4/1 19:11
@file: track.py
@desc: 采用粒子滤波的目标跟踪算法
"""
import cv2 as cv
from yolo import YOLO
import numpy as np
from os.path import basename
from PIL import Image
import operator
import time
import gc
from particle_filter import get_object_area
from particle_filter import get_hist
from particle_filter import calcu_similar_hist
from particle_filter import init_particle
from particle_filter import particle_transition
from particle_filter import normalize_weights
from particle_filter import resample
from particle_filter import particle_sort
from particle_filter import get_dhash
from particle_filter import calcu_similar_dhash

if __name__ == '__main__':

    video_path = "./video/test_video5.mp4"
    video_save_path = "./video_out/{}".format(basename(video_path))
    PARTICLE_NUMBER = 120  # 定义目标库中每个目标产生的粒子数
    one_similar_thre = 0.81  # 首次判断相似阈值
    two_similar_thre = 0.69  # 二次判断相似阈值
    new_object_condition = 0.58  # 判定为新目标的阈值(就是该目标与目标库中的目标相似度不能超过这个阈值)
    track_limit = 40  # 目标丢失阈值
    object_library_list = []  # 目标库列表
    frame_count = 0  # 记录帧数
    video_fps = 22.0  # 存储视频的帧数
    frame_width = 0  # 记录视频每帧图像的宽
    frame_height = 0  # 记录视频每帧图像的高
    transition_count = 3  # 粒子每次扰动的次数

    yolo = YOLO()  # 初始化检测网络

    # 尝试打开视频，读取视频信息，并设定保存视频的格式等信息
    capture = cv.VideoCapture(video_path)
    out = None  # 定义输出视频的视频流
    if video_save_path != "":
        fourcc = cv.VideoWriter_fourcc(*'mp4v')  # 保存格式
        size = (int(capture.get(cv.CAP_PROP_FRAME_WIDTH)), int(capture.get(cv.CAP_PROP_FRAME_HEIGHT)))
        out = cv.VideoWriter(video_save_path, fourcc, video_fps, size)

    ref, frame = capture.read()  # 试着读取一帧视频，检测是否正常
    if not ref:
        raise ValueError("未能正确读取摄像头（视频），请注意是否正确安装摄像头（是否正确填写视频路径）。")
    frame_width = frame.shape[1]  # 获取帧图像的宽度
    frame_height = frame.shape[0]  # 获取帧图像的高度
    fps = 0.0  # 这个是推理视频的初始化帧数，这个fps随硬件设备而定，而不是保存视频的帧数

    # 开始逐帧读取视频内容
    while True:
        t1 = time.time()
        # 读取一帧
        ref, frame = capture.read()
        if not ref:
            break
        frame_count += 1
        # 格式转变，BGRtoRGB
        frame = cv.cvtColor(frame, cv.COLOR_BGR2RGB)
        # 转变成Image
        frame = Image.fromarray(np.uint8(frame))
        # 进行检测，获取目标标签、坐标等相关数据
        # 例如：[['person', 0.9864609, 789, 248, 1074, 385], ['person', 0.9364385, 184, 1794, 362, 1869]
        frame, detection_lis = yolo.detect_image(frame)
        # 只检测行人
        new_detection_lis = []
        for each, object_data in enumerate(detection_lis):
            if object_data[0] == 'person':
                new_detection_lis.append(object_data)
        detection_lis = new_detection_lis
        del new_detection_lis

        # 转换成numpy格式
        frame = np.array(frame)
        # RGBtoBGR满足opencv显示格式
        frame = cv.cvtColor(frame, cv.COLOR_RGB2BGR)

        # 第一帧时初始化目标库
        if frame_count == 1:
            # 读取目标初始化目标信息并赋初值
            for step, object_data in enumerate(detection_lis):
                # 获取目标区域图像
                object_area = get_object_area(frame, object_data[2], object_data[3], object_data[4], object_data[5])
                # 获取目标区域颜色直方图与差异哈希值
                object_hist = get_hist(object_area)
                object_dhash = get_dhash(object_area)
                # 为每个目标初始化一个粒子群，每个粒子的信息和目标完全一致，权重初始值为0
                particle = init_particle(PARTICLE_NUMBER,
                                         [object_data[3], object_data[2]], [object_data[5], object_data[4]],
                                         [object_data[3], object_data[2]], [object_data[5], object_data[4]], 1.0,
                                         [object_data[3], object_data[2]], [object_data[5], object_data[4]], 1.0,
                                         object_hist, object_dhash, 0)
                # 设置目标信息，用字典存储
                object_info = {'ID': step, 'label': object_data[0], 'score': object_data[1], 'find_out': False,
                               'track_limit': track_limit, 'hist': object_hist, 'dhash': object_dhash,
                               'particle': particle,
                               'y1': object_data[2], 'x1': object_data[3],
                               'y2': object_data[4], 'x2': object_data[5]}
                # 加入到目标库中
                object_library_list.append(object_info)

        # 从第二帧开始，判断目标，以及增加新目标
        else:
            # 取目标库，依次遍历目标库中的目标
            for lib_step, each_object in enumerate(object_library_list):
                similar_lis = []  # 存储相似度值的临时列表,用完即删除
                hist_lis = []  # 存储颜色直方图临时列表,用完即删除
                dhash_lis = []  # 存储差异哈希值临时列表,用完即删除
                # ---------------------------------------------------------------------------------------------------#
                # 1.首先遍历detection的结果，看是否有高度相似的目标，有则直接判定为同一目标，这里只用颜色直方图判断较为快速
                # ----------------------------------------------------------------------------------------------------#
                for step, object_data in enumerate(detection_lis):
                    object_area = get_object_area(frame, object_data[2], object_data[3], object_data[4], object_data[5])
                    object_hist = get_hist(object_area)
                    object_dhash = get_dhash(object_area)
                    hist_lis.append(object_hist)
                    dhash_lis.append(object_dhash)
                    similar = calcu_similar_hist(object_hist, each_object['hist'])  # 第一步只用颜色直方图做判断
                    similar_lis.append(similar)
                if not similar_lis:  # 判断是否为空列表
                    continue
                max_index, max_similar = max(enumerate(similar_lis), key=operator.itemgetter(1))  # 找到该次相似度最大值

                # 查看相似度是否达到阈值，达到阈值代表确实是同一个物体，则目标库的特征数据应相应修改(因为目标大小可能再变化，必须修正特征)
                if max_similar >= one_similar_thre:
                    detection_lis[max_index].append(str(object_library_list[lib_step]['ID']))  # 赋予该detection结果ID值
                    # 相应修改目标库的数据
                    object_library_list[lib_step]['find_out'] = True
                    object_library_list[lib_step]['label'] = detection_lis[max_index][0]
                    object_library_list[lib_step]['score'] = detection_lis[max_index][1]
                    object_library_list[lib_step]['y1'] = detection_lis[max_index][2]
                    object_library_list[lib_step]['x1'] = detection_lis[max_index][3]
                    object_library_list[lib_step]['y2'] = detection_lis[max_index][4]
                    object_library_list[lib_step]['x2'] = detection_lis[max_index][5]
                    object_library_list[lib_step]['hist'] = hist_lis[max_index]
                    object_library_list[lib_step]['dhash'] = dhash_lis[max_index]

                # ----------------------------------------------------------------------------------------------------#
                # 2.无法用上一帧来判断同一目标的话，开始用第二种方法，用粒子滤波结果进行判定
                # ----------------------------------------------------------------------------------------------------#
                else:
                    # 删除上个判断用完的变量与列表，释放内存
                    del similar_lis, hist_lis, dhash_lis
                    gc.collect()
                    # 使用该目标中的粒子群进行撒粒子操作
                    for par_step, each_particle in enumerate(each_object['particle']):
                        # 粒子位置进行改变，transition_count变量可以控制该粒子的改变次数，在程序头处修改
                        for _ in range(transition_count):
                            each_object['particle'][par_step] = particle_transition(each_particle, frame_width,
                                                                                    frame_height)
                        y1 = each_object['particle'][par_step].now_left_corner[1]
                        y2 = each_object['particle'][par_step].now_right_corner[1]
                        x1 = each_object['particle'][par_step].now_left_corner[0]
                        x2 = each_object['particle'][par_step].now_right_corner[0]
                        par_area = frame[y1:y2, x1: x2]  # 取该粒子改变位置后的区域
                        par_hist = get_hist(par_area)  # 计算该粒子颜色直方图
                        par_dhash = get_dhash(par_area)  # 计算该粒子差异哈希值
                        each_object['particle'][par_step].hist = par_hist  # 存储颜色直方图
                        each_object['particle'][par_step].dhash = par_dhash  # 存储差异哈希值
                        similar1 = calcu_similar_hist(par_hist, each_object['hist'])  # 计算颜色直方图相似度
                        similar2 = calcu_similar_dhash(par_dhash, each_object['dhash'])  # 计算差异哈希值相似度
                        sum_similar = (similar1 + similar2) / 2.0  # 计算相似度平均值
                        each_object['particle'][par_step].weight = sum_similar  # 相似平均值即为：权重

                    # 撒完后，粒子群中所有的粒子都要做权重归一化处理
                    each_object['particle'] = normalize_weights(each_object['particle'])
                    # !!!!!!重要性重采样!!!!!!
                    each_object['particle'] = resample(each_object['particle'])
                    # 所有粒子按照权重从 '大-->小' 排序
                    each_object['particle'] = particle_sort(each_object['particle'])
                    # 可能性最大的为第一个粒子
                    par_area = frame[each_object['particle'][0].now_left_corner[1]:
                                     each_object['particle'][0].now_right_corner[1],
                                     each_object['particle'][0].now_left_corner[0]:
                                     each_object['particle'][0].now_right_corner[0]]
                    maxPar_hist = get_hist(par_area)  # 计算该粒子的颜色直方图
                    maxPar_dhash = get_dhash(par_area)  # 计算该粒子的差异哈希值
                    similar_lis = []  # 存储相似度临时列表
                    hist_lis = []
                    dhash_lis = []
                    # 用可能性最大的粒子再次与detection进行比对
                    for object_data in detection_lis:
                        if len(object_data) == 7:  # 这一步是为了忽略已经找到ID的目标框，已经找到ID的目标长度为7(因为加了一个ID值)
                            similar_lis.append(0.0)  # 加入一个空值，这是为了能正确找到最大值下标
                            hist_lis.append([])
                            dhash_lis.append([])
                            continue
                        object_area = get_object_area(frame, object_data[2], object_data[3], object_data[4],
                                                      object_data[5])
                        object_hist = get_hist(object_area)  # 计算该目标框颜色直方图
                        object_dhash = get_dhash(object_area)  # 计算该目标框差异哈希值
                        hist_lis.append(object_hist)  # 存储
                        dhash_lis.append(object_dhash)  # 存储
                        similar1 = calcu_similar_hist(object_hist, maxPar_hist)  # 计算直方图相似度
                        similar2 = calcu_similar_dhash(object_dhash, maxPar_dhash)  # 计算差异哈希值相似度
                        sum_similar = (similar1 + similar2) / 2.0  # 计算相似度平均值
                        similar_lis.append(sum_similar)
                    if not similar_lis:  # 判断是否为空列表
                        continue
                    max_index, max_similar = max(enumerate(similar_lis), key=operator.itemgetter(1))  # 找到最大值和索引
                    if max_similar >= two_similar_thre:
                        frame = cv.rectangle(frame, (detection_lis[max_index][3], detection_lis[max_index][2]),
                                             (detection_lis[max_index][5], detection_lis[max_index][4]),
                                             (0, 0, 255), 5)
                        # 相应修改目标库的值
                        detection_lis[max_index].append(str(object_library_list[lib_step]['ID']))
                        object_library_list[lib_step]['find_out'] = True
                        object_library_list[lib_step]['hist'] = hist_lis[max_index]
                        object_library_list[lib_step]['dhash'] = dhash_lis[max_index]
                        object_library_list[lib_step]['label'] = detection_lis[max_index][0]
                        object_library_list[lib_step]['score'] = detection_lis[max_index][1]
                        object_library_list[lib_step]['y1'] = detection_lis[max_index][2]
                        object_library_list[lib_step]['x1'] = detection_lis[max_index][3]
                        object_library_list[lib_step]['y2'] = detection_lis[max_index][4]
                        object_library_list[lib_step]['x2'] = detection_lis[max_index][5]
                    del similar_lis, hist_lis, dhash_lis
                    gc.collect()  # 删除临时变量，释放内存
            # ----------------------------------------------------------------------------------------------------#
            # 3.粒子滤波过后仍然无法跟踪目标时，则很有可能该目标是新目标，可以经过阈值判断，将其加入到目标库中
            # ----------------------------------------------------------------------------------------------------#
            for step, object_data in enumerate(detection_lis):
                if len(object_data) == 7:  # 取剩下还未匹配到的目标框
                    continue
                object_area = get_object_area(frame, object_data[2], object_data[3], object_data[4], object_data[5])
                object_hist = get_hist(object_area)  # 计算目标框颜色直方图
                object_dhash = get_dhash(object_area)  # 计算目标差异哈希值

                count = 0  # 计数，遍历查找该目标框是否与全部目标框都不匹配
                member_num = len(object_library_list)  # 目标库成员个数
                for lib_step, each_object in enumerate(object_library_list):
                    similar1 = calcu_similar_hist(object_hist, each_object['hist'])
                    similar2 = calcu_similar_dhash(object_dhash, each_object['dhash'])
                    sum_similar = (similar1 + similar2) / 2.0  # 计算相似度平均值
                    # print('add similar_thre {}'.format(sum_similar))
                    if sum_similar < new_object_condition:
                        count += 1
                        continue
                    else:
                        break
                if count == member_num:  # 该目标框与目标库中任意一个相似度都很低，说明是新目标
                    # 初始化新目标的粒子
                    particle = init_particle(PARTICLE_NUMBER,
                                             [object_data[3], object_data[2]], [object_data[5], object_data[4]],
                                             [object_data[3], object_data[2]], [object_data[5], object_data[4]], 1.0,
                                             [object_data[3], object_data[2]], [object_data[5], object_data[4]], 1.0,
                                             object_hist, object_dhash, 0)
                    # 取目标库中最后一个目标的ID,然后执行 + 1作为新目标的ID
                    new_ID = object_library_list[-1:][0]['ID'] + 1
                    # 设置目标信息，用字典存储
                    object_info = {'ID': new_ID, 'label': object_data[0], 'score': object_data[1], 'find_out': True,
                                   'track_limit': track_limit, 'hist': object_hist, 'particle': particle,
                                   'dhash': object_dhash, 'y1': object_data[2], 'x1': object_data[3],
                                   'y2': object_data[4], 'x2': object_data[5]}
                    # 加入到目标库中
                    object_library_list.append(object_info)
                    print('add new object {}-{}'.format(new_ID, object_data[0]))

            # 清算目标库，找到目标的重新把标志位置零，未找到目标的将对标位减一操作，当减到0时，删除该目标
            for step, each_object in enumerate(object_library_list):
                if each_object['find_out']:
                    each_object['track_limit'] = track_limit  # 重新赋初值
                    each_object['find_out'] = False
                else:
                    if each_object['track_limit'] > 0:  # 如果还未满足删除要求，则减一
                        each_object['track_limit'] -= 1
                    else:
                        print("删除 {} 号目标".format(object_library_list[step]['ID']))
                        del object_library_list[step]  # 减到0了，满足删除操作，对该目标进行删除，减少运算释放内存

            gc.collect()  # 释放内存

            for step, object_data in enumerate(detection_lis):
                if len(object_data) == 7:
                    # 画框
                    frame = cv.rectangle(frame, (object_data[3], object_data[2]), (object_data[5], object_data[4]),
                                         (0, 255, 0), 2)
                    # 画ID的框
                    frame = cv.rectangle(frame, (object_data[3], object_data[4]),
                                         (object_data[3] + 40, object_data[4] - 30), (0, 0, 0), -1)
                    # 画ID号
                    cv.putText(frame, object_data[6], (object_data[3] + 2, object_data[4] - 6), cv.FONT_HERSHEY_SIMPLEX,
                               1, (0, 255, 255), 2)
                # 是否画出未跟踪到的目标
                elif object_data[0] == 'person':
                    frame = cv.rectangle(frame, (object_data[3], object_data[2]), (object_data[5], object_data[4]),
                                         (0, 255, 0), 2)

            fps = (fps + (1. / (time.time() - t1))) / 2
            # print("fps= %.2f" % fps)
            # 打印FPS
            frame = cv.putText(frame, "fps= %.2f" % fps, (0, 40), cv.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 0), 2)
            # 打印当前跟踪成员数
            frame = cv.putText(frame, "track_member=%d" % len(object_library_list), (0, 75),
                               cv.FONT_HERSHEY_SIMPLEX, 1, (10, 255, 0), 2)
            cv.namedWindow("video", 0)
            cv.imshow("video", frame)
            # cv.waitKey(0)

            c = cv.waitKey(1) & 0xff
            if video_save_path != "":
                out.write(frame)

            if c == 27:
                capture.release()
                break
