import os

from config import config, application_path

os.environ['YOLO_CONFIG_DIR'] = application_path + '/Ultralytics'

import asyncio
import time

# import cv2
import numpy as np
import supervision as sv
import torch
from av import VideoFrame
from av.audio.frame import AudioFrame
from ultralytics import YOLO

modelName = config.get('DEFAULT', 'model', fallback='yolov8n.pt')
model = YOLO(application_path + '/models/' + modelName)

device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
print(f'use device: {device}')
model.to(device)

tracker = sv.ByteTrack()
annotator = sv.BoundingBoxAnnotator()

label_annotator = sv.LabelAnnotator()
trace_annotator = sv.TraceAnnotator()


def annotate_frame(frame: np.ndarray) -> list[dict]:
    results = model(frame)[0]
    detections = sv.Detections.from_ultralytics(results)

    detections = tracker.update_with_detections(detections)

    # labels = [
    #     f"#{tracker_id} {results.names[class_id]}"
    #     for class_id, tracker_id in zip(detections.class_id, detections.tracker_id)
    # ]
    #
    # annotated_frame = trace_annotator.annotate(scene=frame, detections=detections)
    # annotated_frame = label_annotator.annotate(scene=annotated_frame, detections=detections, labels=labels)
    # annotated_frame = annotator.annotate(scene=annotated_frame, detections=detections)
    #
    # cv2.imshow("Frame", annotated_frame)
    # cv2.waitKey(1)

    objects = [
        {'tracker_id': tracker_id.item(), 'class_name': results.names[class_id], 'xyxy': xyxy.tolist(), 'stable_time': None}
        for tracker_id, class_id, xyxy in zip(detections.tracker_id, detections.class_id, detections.xyxy)
    ]
    return objects


class VideoReceiver:
    def __init__(self, queue: asyncio.Queue):
        self.queue = queue
        self.target = None

    async def handle_track(self, track):
        print("处理帧")
        frame_count = 0
        timeout = 0
        frame_handle_rate = config.getint('DEFAULT', 'frame_handle_rate', fallback=5)
        while True:
            try:
                print("等待帧...")
                frame = await asyncio.wait_for(track.recv(), timeout=5.0)
                frame_count += 1
                print(f"接收第 {frame_count} 帧")

                if isinstance(frame, VideoFrame):
                    print(f'frame.width: {frame.width}, frame.height: {frame.height}')
                    print(f"帧类型: VideoFrame, pts: {frame.pts}, time_base: {frame.time_base}")
                    frame_ndarray = frame.to_ndarray(format="bgr24")
                elif isinstance(frame, AudioFrame):
                    print(f"帧类型: 音频")
                    continue
                elif isinstance(frame, np.ndarray):
                    print(f"帧类型: numpy 数组")
                    frame_ndarray = frame
                else:
                    print(f"意外的帧类型: {type(frame)}")
                    continue

                if frame_count % frame_handle_rate != 0:
                    continue

                objects = annotate_frame(frame_ndarray)
                for o in objects:
                    for index, i in enumerate(o['xyxy']):
                        if index % 2 == 0:
                            o['xyxy'][index] = i / frame.width
                        else:
                            o['xyxy'][index] = i / frame.height

                handle_objects(self, objects)
            except asyncio.TimeoutError:
                timeout += 1
                if timeout == 5:
                    break
                print("等待帧超时，继续等待...")
            except Exception as e:
                print(f"An error occurred: {e}")
                break
        print("退出 handle_track")
        # cv2.destroyAllWindows()


def handle_objects(video_receiver: VideoReceiver, objects: list[dict]):
    if len(objects) > 1:
        video_receiver.target = None
        video_receiver.queue.put_nowait({
            "resultCode": 200,
            "data": {
                "type": "MULTI_OBJECTS_FOUND",
                "data": objects
            }
        })
        return
    elif len(objects) == 0:
        video_receiver.target = None
        video_receiver.queue.put_nowait({
            "resultCode": 200,
            "data": {
                "type": "NONE_OBJECT_FOUND"
            }
        })
        return

    target = objects[0]
    if not video_receiver.target:
        video_receiver.target = target
        video_receiver.queue.put_nowait({
            "resultCode": 200,
            "data": {
                "type": "OBJECT_FOUND",
                "data": target
            }
        })
        return

    if video_receiver.target['tracker_id'] != target['tracker_id']:
        video_receiver.target = target
        video_receiver.queue.put_nowait({
            "resultCode": 200,
            "data": {
                "type": "OBJECT_CHANGE",
                "data": target
            }
        })
        return

    if position_change(video_receiver.target['xyxy'], target['xyxy']):
        video_receiver.target = target
        video_receiver.queue.put_nowait({
            "resultCode": 200,
            "data": {
                "type": "OBJECT_MOVE",
                "data": target
            }
        })
        return

    now = int(time.time() * 1000)
    if video_receiver.target['stable_time'] is None:
        video_receiver.target['stable_time'] = now

    if now - video_receiver.target['stable_time'] > 2000:
        video_receiver.queue.put_nowait({
            "resultCode": 200,
            "data": {
                "type": "OBJECT_FIXED",
                "data": target
            }
        })
    else:
        video_receiver.queue.put_nowait({
            "resultCode": 200,
            "data": {
                "type": "OBJECT_STABLE",
                "data": target
            }
        })


def position_change(xyxy1, xyxy2):
    for i in range(4):
        diff = abs(xyxy1[i] - xyxy2[i])
        if diff > 5:
            return True
    return False
