# -*- coding: utf-8 -*-
"""
yolov8 track推理
测试
"""
import glob
import os.path
import sys
from collections import defaultdict

import cv2
import math
import numpy as np
from ultralytics import YOLO



# # Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
#
# # Default Ultralytics settings for BoT-SORT tracker when using mode="track"
# # For documentation and examples see https://docs.ultralytics.com/modes/track/
# # For BoT-SORT source code see https://github.com/NirAharon/BoT-SORT
#
# tracker_type: botsort # tracker type, ['botsort', 'bytetrack']
# track_high_thresh: 0.25 # threshold for the first association 基于iou
# track_low_thresh: 0.1 # threshold for the second association 基于reid
# new_track_thresh: 0.25 # 大一些 threshold for init new track if the detection does not match any tracks
# track_buffer: 30 # buffer to calculate the time when to remove tracks
# match_thresh: 0.8 # threshold for matching tracks
# fuse_score: True # Whether to fuse confidence scores with the iou distances before matching
# # min_box_area: 10  # threshold for min box areas(for tracker evaluation, not used for now)
#
# # BoT-SORT settings
# gmc_method: sparseOptFlow # method of global motion compensation
# # ReID model related thresh
# proximity_thresh: 0.5 # minimum IoU for valid match with ReID
# appearance_thresh: 0.8 # minimum appearance similarity for ReID
# with_reid: False
# model: auto # uses native features if detector is YOLO else yolo11n-cls.pt



# # Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
#
# # Default Ultralytics settings for ByteTrack tracker when using mode="track"
# # For documentation and examples see https://docs.ultralytics.com/modes/track/
# # For ByteTrack source code see https://github.com/ifzhang/ByteTrack
#
# tracker_type: bytetrack # tracker type, ['botsort', 'bytetrack']
# track_high_thresh: 0.25 # threshold for the first association
# track_low_thresh: 0.1 # threshold for the second association
# new_track_thresh: 0.25 # threshold for init new track if the detection does not match any tracks
# track_buffer: 30 # buffer to calculate the time when to remove tracks
# match_thresh: 0.8 # threshold for matching tracks
# fuse_score: True # Whether to fuse confidence scores with the iou distances before matching
# # min_box_area: 10  # threshold for min box areas(for tracker evaluation, not used for now)

def get_track(model_path):
    if isinstance(model_path, YOLO):
        model = model_path
    else:
        model = YOLO(model_path) # 检测 分割 pose

    track_history = defaultdict(lambda: []) # track_id:[(cx cy) (cx cy) ...]

    colors = []
    for i in range(100):
        dif = 0
        while dif < 5*3:
            new_color = [np.random.randint(0, 255), np.random.randint(0, 255), np.random.randint(0, 255)]
            # 找最小dif
            dif = 255*3
            for sc in colors:
                t = abs(sc[0]-new_color[0]) + abs(sc[1]-new_color[1]) + abs(sc[2]-new_color[2])
                dif = min(dif, t)
        colors.append(new_color)

    def track(frame,
              realtime_conf_thres=0.35,
              iou_thres=0.8,
              roi=None,
              pad=0,
              isMask2Xy=False,
              isRecordTrack=True, # 是否记录轨迹
              isDraw=True,
              isShowMask=False,
              show_img=None,
              thickness_rate=0.001,
              ):
        '''

        Args:
            frame:

        Returns:
            show_img:
            out: [[xyxy],cls_ind, clsname,conf,track_id] [list(1*4),int,str,float,int]
        '''
        # results = model.track(frame, persist=True)
        # results = model.track(frame, persist=True, imgsz=5120//2, conf=0.5, iou=0.2, device='0',tracker="bytetrack.yaml")
        results = model.track(frame, persist=True, imgsz=640, conf=0.5, iou=0.2, device='0',
                              tracker="bytetrack.yaml")
        # Get the boxes and track IDs
        boxes = results[0].boxes.cpu().numpy()
        names = results[0].names

        out = []
        for i in range(boxes.shape[0]):
            xyxy = boxes.xyxy[i, :].tolist()
            conf = boxes.conf[i]
            cls_ind = int(boxes.cls[i])
            cls_name = names[cls_ind]
            track_id = int(boxes.id[i]) if boxes.id is not None else -1
            if isRecordTrack:
                cx, cy = (xyxy[0]+xyxy[2])/2, (xyxy[1]+xyxy[3])/2
                track = track_history[track_id]
                track.append((cx, cy))
                if len(track) > 1000:  # retain 90 tracks for 90 frames
                    track.pop(0)

            out.append([xyxy, cls_ind, cls_name, conf, track_id])

        # draw
        if not isDraw:  # 是否画图
            show_img = None
        else:
            if show_img is None:
                show_img = frame.copy()

            img_diag = np.sqrt(show_img.shape[0] ** 2 + show_img.shape[1] ** 2)
            # print(img_diag)
            for ind, (xyxy, cls_ind, cls_name, conf, track_id) in enumerate(out):

                track_color = colors[track_id % len(colors)]
                px1, py1, px2, py2 = int(xyxy[0]), int(xyxy[1]), int(xyxy[2]), int(xyxy[3])
                # print(show_img.shape)
                cv2.putText(show_img, f'track_{track_id}_{str(np.round(conf, 2))}_{cls_name}', (px1, py1),
                            cv2.FONT_HERSHEY_COMPLEX_SMALL, fontScale=max(1, int(img_diag * 0.0005)),
                            color=track_color,
                            thickness=max(1, int(img_diag * thickness_rate)))
                # cv2.rectangle(show_img)
                cv2.rectangle(show_img, (px1, py1), (px2, py2), track_color, max(1, int(img_diag * thickness_rate)))
                if isRecordTrack and track_id != -1:
                    # Draw the tracking lines
                    # points = np.hstack(track_history[track_id]).astype(np.int32).reshape((-1, 1, 2))
                    # cv2.polylines(show_img, [points], isClosed=False, color=(0, 0, 250), thickness=5)
                    # cx,cy = map(int,(track_history[track_id][-1]))
                    pts = track_history[track_id]

                    cid = min(20,track_id)
                    # cid = track_id
                    # track_color = (cid * 255 // 20, cid * 255 // 20, cid * 255 // 20)
                    for kp_ind, (kpx,kpy) in enumerate(pts):
                        kpx, kpy = map(int, (kpx, kpy))
                        cv2.circle(show_img, (kpx, kpy), 10, track_color, -1)
                        if kp_ind == 0:
                            continue
                        pre_kpx, pre_kpy = map(int, (pts[kp_ind - 1]))
                        cv2.line(show_img, (pre_kpx, pre_kpy), (kpx, kpy), track_color, 5)

            if roi is not None:
                cv2.rectangle(show_img, (roi[0], roi[1]), (roi[2], roi[3]), (0, 255, 0),
                              max(1, int(img_diag * thickness_rate)))
        return show_img, out

    return track


class test:
    '''
    测试
    '''

    def __init__(self,engine):
        self.engine = engine
        self.DEBUG = True
        self.video_dir = '/data1/xiancai/BABY_DATA/other/test/Video2DeepCam/'

    def test_video_one(self,
                       video_path='/data1/xiancai/FACE_ANGLE_DATA/other/test/face_angle_test1.mp4',
                       save_path='/data1/xiancai/BABY_DATA/other/test/Video2DeepCam/res_10A4BE72856C_monitoringOff_1618593172930.mp4'
                       ):
        '''
        检测一个视频,保存检测结果
        :param video_path:
        :param save_path:
        :return:
        '''

        # os.startfile()
        # 设置video读入与写出
        cap = cv2.VideoCapture(video_path)
        fps, total = cap.get(cv2.CAP_PROP_FPS), int(cap.get(cv2.CAP_PROP_FRAME_COUNT))  # 帧率，总帧数
        w, h = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH)), int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))  # 帧宽，帧高
        mp4 = cv2.VideoWriter_fourcc(*'mp4v')
        # res = cv2.VideoWriter(save_path, mp4, 20.0, (1280, 720), True)  # WH
        down_scale = 10  # 下采样
        res = cv2.VideoWriter(save_path, mp4, max(1,fps / down_scale), (w, h), True)  # WH
        numb = 0
        test_total = 100 # 共检测多少帧
        print(f'fps: {fps}, total: {total}, w: {w}, h: {h}')
        # 检测
        while (cap.isOpened() and numb < test_total*down_scale):
            numb += 1
            ret, frame = cap.read()
            if numb % down_scale == 0:
                if ret:
                    print(f'{numb}/{total},frame.shape:{frame.shape}')
                    # detect
                    show_img, out = self.engine(frame)

                    print('')
                    # cv2.imwrite(f'{out_imgs}/{numb}_{name[:-4]}.jpg', res_img)  # 一帧保存为图片
                    if self.DEBUG:
                        save_dir = os.path.dirname(save_path)
                        video_name = os.path.basename(save_path)
                        save_frames_path = f'{save_dir}/{video_name}_track_frames'
                        if not os.path.exists(save_frames_path):
                            os.makedirs(save_frames_path)
                        cv2.imwrite(f'{save_frames_path}/{str(numb).zfill(6)}.jpg', show_img)
                    res.write(show_img)  # 一帧保存至 mp4
                else:
                    break

        cap.release()
        res.release()
        print('Done.')

    def test_video_mult(self):
        ls = glob.glob(self.video_dir + '*')
        for ind, i in enumerate(ls):
            print(f'{ind}/{len(ls)} video:')
            save_path = '/'.join(i.split('/')[:-1]) + '/res/320320_320320_res_' + i.split('/')[-1]
            self.test_video_one(i, save_path)

    def test_img_one(self, img_path):
        rep_model_name = self.engine.rep_inf.pt_path.split('/')[-1]
        det_model_name = self.engine.model_path.split('/')[-1]
        img_name = img_path.split('/')[-1]
        save_path = img_path[:-len(img_name)] + rep_model_name + '_' + det_model_name + '_' + img_name
        img = cv2.imread(img_path)
        # detect
        show_img, out = self.engine(img)

        # save
        cv2.imwrite(save_path, show_img)
        print(f'saved to {save_path}')

    def test_img_mult(self, imgs_glob):
        # imgs_glob='/data1/xiancai/FACE_ANGLE_DATA/other/test_04_13/SideFace_OImages/*'
        ls = glob.glob((imgs_glob))
        for i in ls:
            self.test_img_one(i)


if __name__ == '__main__':
    # test(
    #     get_track(model_path=r"D:\data\241010test_project_pose\trainV8Pose_hand\models\yolov8nPose_320_hand8\weights\best.pt")
    # ).test_video_one(
    #     video_path=r'D:\data\track\t1.mp4',
    #     save_path=r'D:\data\track\res_t1.mp4')
    #
    # test(
    #     get_track(model_path=rf"D:\CODE\ZXC\project_rbao\yolo11m-pose.pt")
    # ).test_video_one(
    #     # video_path=r"D:\DATA\20250519RENBAO\zxc\24\t23724-2025-05-13_16-48-30.mp4",
    #     video_path=r"D:\DATA\20250519RENBAO\trainV8Pose_closePeople\caitu\64\t23721-2025-06-04_09-57-50.mp4",
    #     save_path=r"D:\DATA\20250519RENBAO\trainV8Pose_closePeople\caitu\64\track_t23721-2025-06-04_09-57-50.mp4")


    test(
        get_track(model_path=rf"D:\DATA\20250611HKBZ\trainV8Seg_allv2\models\yolov8mSeg_640_allv22\weights\best.pt")
    ).test_video_one(
        # video_path=r"D:\DATA\20250519RENBAO\zxc\24\t23724-2025-05-13_16-48-30.mp4",
        video_path=r"D:\DATA\20250611HKBZ\caitu\20250619\action\2\DengJiQiao2JiuWei.mp4",
        save_path=r"D:\DATA\20250611HKBZ\caitu\20250619\action\2\track_DengJiQiao2JiuWei.mp4")

    # test().test_img_one(img_path='/data1/xiancai/FACE_ANGLE_DATA/other/compare/微信图片_20220304170522.png')
    # # #
    # test().test_img_one(img_path='/data1/xiancai/FACE_ANGLE_DATA/other/compare/微信图片_20220304170515.jpg')
    #

    #
    # test().test_img_one(img_path='/data1/xiancai/FACE_ANGLE_DATA/other/compare/微信图片_20220314184240.jpg',)
    #
    # test().test_img_one(img_path='/data1/xiancai/FACE_ANGLE_DATA/other/compare/微信图片_20220323140838.jpg')
    #
    # test().test_img_one(img_path='/data1/xiancai/FACE_ANGLE_DATA/other/test_04_13/0413_1.jpg')

    # test().test_img_mult(imgs_glob='/data1/xiancai/FACE_ANGLE_DATA/other/test_04_13/SideFace_OImages/S*')
