import os.path
import time
from collections import defaultdict

import cv2
import numpy as np
import torch
from PySide6.QtCore import QThread, Signal, QMutexLocker, QMutex, QWaitCondition
from pathlib import Path

from ultralytics import YOLO
from ultralytics.cfg import get_save_dir
from ultralytics.data import load_inference_source
from ultralytics.data.augment import classify_transforms
from ultralytics.data.utils import IMG_FORMATS, VID_FORMATS
from ultralytics.engine.predictor import STREAM_WARNING

from ultralytics.utils import LOGGER, callbacks
from ultralytics.utils.checks import check_file, check_imshow, check_imgsz
from ultralytics.utils.files import increment_path

from utils import DEFAULT_CFG, get_cfg


class TrackThread(QThread):
    # 输入 输出 消息
    send_input = Signal(np.ndarray)
    send_output = Signal(np.ndarray)
    send_msg = Signal(str)
    # 状态栏显示数据 进度条数据
    send_fps = Signal(str)  # fps
    # send_labels = Signal(dict)  # Detected target results (number of each category)
    send_progress = Signal(int)  # Completeness
    send_class_num = Signal(int)  # Number of categories detected
    send_target_num = Signal(int)  # Targets detected

    def __init__(self, cfg=DEFAULT_CFG, overrides=None):
        super(TrackThread, self).__init__()
        self.args = get_cfg(cfg, overrides)
        self.save_dir = get_save_dir(self.args)  # 保存路径
        if self.args.show:
            self.args.show = check_imshow(warn=True)
        # 界面参数设置
        self.used_model_name = None
        self.new_model_name = None  # Models that change in real time
        self.source = None  # input source
        self.is_continue = True  # continue/pause
        self.save_res = False  # 是否保存
        self.iou_thres = 0.45  # iou
        self.conf_thres = 0.25  # conf
        self.speed_thres = 10  # delay, ms
        self.labels_dict = {}  # return a dictionary of results
        self.max_progress_value = 0  # progress bar max value
        self.percent = 0  # progress bar percent
        self.res_status = False  # result status
        self.parent_workpath = None  # parent work path
        self.vid_writer = {}
        # YOLO参数设置
        self.model = None
        self.data = self.args.data  # data_dict
        self.imgsz = 640
        self.dataset = None
        self.nosave = False
        self.vid_cap = None
        self.name = 'exp'
        self.batch = None
        self.project = 'runs/detect'
        self.exist_ok = False
        self.callbacks = defaultdict(list, callbacks.default_callbacks)  # add callbacks
        callbacks.add_integration_callbacks(self)
        self.track_history = defaultdict(lambda: [])

        self.mutex = QMutex()  # 互斥锁，用于线程同步
        self.cond = QWaitCondition()  # 条件变量，用于线程同步
        self.is_file = False
        self.is_url = False
        # 网络视频流加载
        self.webcam = False
        self.screenshot = False
        self.is_folder = False
        self.quit_flag = False
        self.count = 0
        self.init_source()

    def init_source(self):
        source = str(self.source)
        self.is_file = Path(source).suffix[1:] in IMG_FORMATS.union(VID_FORMATS)
        self.is_url = source.lower().startswith(('rtsp://', 'rtmp://', 'http://', 'https://'))
        # 网络视频流加载
        self.webcam = source.isnumeric() or source.endswith('.txt') or (self.is_url and not self.is_file)
        self.screenshot = source.lower().startswith('screen')
        # 判断输入源是否是文件夹，如果是列表，则是文件夹
        self.is_folder = isinstance(self.source, list)
        if self.is_url and self.is_file:
            self.source = check_file(self.source)  # download

        # 设置当前线程为暂停状态

    def pause_thread(self):
        with QMutexLocker(self.mutex):
            self.is_continue = False

        # # 设置当前线程为继续状态

    def resume_thread(self):
        with QMutexLocker(self.mutex):
            self.is_continue = True
            self.cond.wakeOne()

    # 更新dataset
    def update_dataset(self, source):
        if self.source is not None:
            if self.is_continue:
                self.pause_thread()
            self.release_res()
            self.source = source
            self.init_source()
            self.count = 0
            self.percent = 0
            if self.is_folder:
                print("不支持文件夹输入，请输入文件")
            else:
                print("更新输入源为：{}".format(self.source))
                self.setup_source(self.source)
                self.data = iter(self.dataset)

    def run(self):
        if not self.model:
            self.send_msg.emit("Loading model: {}".format(os.path.basename(self.new_model_name)))
            self.setup_model(self.new_model_name)
            self.used_model_name = self.new_model_name
        if self.save_res:
            self.save_dir = increment_path(Path(self.project) / self.name, exist_ok=self.exist_ok)  # increment run
            (self.save_dir / 'labels').mkdir(parents=True, exist_ok=True)  # make dir
        if self.is_folder:
            for source in self.source:
                self.setup_source(source)
                self.track()
        else:
            print("开始跟踪:{}".format(self.source))
            self.setup_source(self.source)
            self.track()

    def track(self):
        # Store the track history
        self.track_history = defaultdict(lambda: [])
        self.run_callbacks("on_track_start")
        self.data = iter(self.dataset)
        start_time = time.time()  # used to calculate the frame rate
        while True:
            with QMutexLocker(self.mutex):
                if not self.is_continue:
                    self.cond.wait(self.mutex)
                if self.percent >= 100:
                    self.percent = 0
                if self.quit_flag:
                    return
                if self.is_continue:
                    if self.is_file:
                        self.send_msg.emit("Tracking File: {}".format(os.path.basename(self.source)))
                    elif self.webcam and not self.is_url:
                        self.send_msg.emit("Tracking Webcam: Camera_{}".format(self.source))
                    elif self.is_folder:
                        self.send_msg.emit("Tracking Folder: {}".format(os.path.dirname(self.source[0])))
                    elif self.is_url:
                        self.send_msg.emit("Tracking URL: {}".format(self.source))
                    else:
                        self.send_msg.emit("Tracking: {}".format(self.source))
                    try:
                        self.batch = next(self.data)
                    except StopIteration:
                        print("no more images")
                    else:
                        paths, im0s, s = self.batch
                        self.vid_cap = self.dataset.cap if self.dataset is not None and self.dataset.mode == "video" else None
                        # 送入 input框有卡顿问题
                        self.send_input.emit(im0s if isinstance(im0s, np.ndarray) else im0s[0])
                        self.count += len(im0s)
                        # 处理processBar
                        if self.vid_cap:
                            if self.vid_cap.get(cv2.CAP_PROP_FRAME_COUNT) > 0:
                                self.percent = int(
                                    self.count / self.vid_cap.get(cv2.CAP_PROP_FRAME_COUNT) * self.max_progress_value)
                                self.send_progress.emit(self.percent)
                            else:
                                self.percent = 100
                                self.send_progress.emit(self.percent)
                        else:
                            self.percent = self.max_progress_value
                        if self.count % 5 == 0 and self.count >= 5:  # Calculate the frame rate every 5 frames
                            self.send_fps.emit(str(int(5 / (time.time() - start_time))))
                            start_time = time.time()
                        # 批量预测
                        results = self.model.track(im0s)
                        if torch.is_tensor(results[0].boxes.id):
                            annotated_frame = self.postprocess(results)
                            # 预测图片送入 output框
                            self.send_output.emit(annotated_frame)  # after detection
                        if self.speed_thres != 0:
                            time.sleep(self.speed_thres / 1000)  # delay , ms
                        if self.percent == self.max_progress_value and not self.webcam:
                            self.percent = 0
                            self.send_progress.emit(0)
                            self.send_msg.emit('Finish Detection')
                            self.res_status = True
                            if len(self.vid_writer) > 0:
                                for key, value in self.vid_writer.items():
                                    if isinstance(value, cv2.VideoWriter):
                                        value.release()  # release final video writer


    def postprocess(self, results):
        """Post-processes predictions and returns a list of Results objects."""
        # Get the boxes and track IDs
        boxes = results[0].boxes.xywh.cpu()
        track_ids = results[0].boxes.id.int().cpu().tolist()
        # Visualize the results on the frame
        annotated_frame = results[0].plot()
        # Plot the tracks
        for box, track_id in zip(boxes, track_ids):
            x, y, w, h = box
            # 列表的赋值其实就是将类似C中的指针再次指向新变量名，而不是深拷贝
            track = self.track_history[track_id]
            track.append((float(x), float(y)))  # x, y center point
            if len(track) > 30:  # retain 90 tracks for 90 frames
                track.pop(0)
            # Draw the tracking lines
            points = np.hstack(track).astype(np.int32).reshape((-1, 1, 2))
            cv2.polylines(
                annotated_frame,
                [points],
                isClosed=False,
                color=(230, 230, 230),
                thickness=10,
            )
        # 画图
        return annotated_frame

    def setup_source(self, source):
        """Sets up source and inference mode. """
        self.imgsz = check_imgsz(self.args.imgsz, stride=self.args.vid_stride, min_dim=2)  # check image size
        self.transforms = (
            getattr(
                self.model.model,
                "transforms",
                classify_transforms(self.imgsz[0], crop_fraction=self.args.crop_fraction),
            )
            if self.args.task == "classify"
            else None
        )
        self.dataset = load_inference_source(
            source=source,
            batch=self.args.batch,
            vid_stride=self.args.vid_stride,
            buffer=self.args.stream_buffer,
        )
        self.source_type = self.dataset.source_type
        if not getattr(self, "stream", True) and (
                self.source_type.stream
                or self.source_type.screenshot
                or len(self.dataset) > 1000  # many images
                or any(getattr(self.dataset, "video_flag", [False]))
        ):  # videos
            LOGGER.warning(STREAM_WARNING)
        self.vid_writer = {}

    def setup_model(self, model):
        self.model = YOLO(model=model)

    # 释放资源
    def release_res(self):
        # 释放资源
        if self.dataset is not None:
            self.dataset.running = False  # stop flag for Thread
        self.track_history = defaultdict(lambda: [])
        # 判断self.dataset里面是否有threads
        if hasattr(self.dataset, 'threads'):
            for thread in self.dataset.threads:
                if thread.is_alive():
                    thread.join(timeout=5)  # Add timeout
        if hasattr(self.dataset, 'caps'):
            for cap in self.dataset.caps:  # Iterate through the stored VideoCapture objects
                try:
                    cap.release()  # release video capture
                except Exception as e:
                    LOGGER.warning(f"WARNING ⚠️ Could not release VideoCapture object: {e}")
        if len(self.vid_writer) > 0:
            for key, value in self.vid_writer.items():
                if isinstance(value, cv2.VideoWriter):
                    value.release()  # release final video writer
                break
        self.dataset = None  # clear dataset
        self.source = None  # input source

    def run_callbacks(self, event: str):
        """Runs all registered callbacks for a specific event."""
        for callback in self.callbacks.get(event, []):
            callback(self)
