import cv2  # 导入OpenCV库，用于图像处理
import numpy as np  # 导入NumPy库，用于数组和矩阵操作
import supervision as sv  # 导入supervision库，用于视频帧处理
from tqdm import tqdm  # 导入tqdm库，用于显示进度条
from ultralytics import YOLO  # 导入YOLO模型库
from supervision.assets import VideoAssets, download_assets  # 导入视频资源下载函
from collections import defaultdict, deque  # 导入默认字典和双端队列
import os

os.environ['KMP_DUPLICATE_LIB_OK']='True'
download_assets(VideoAssets.VEHICLES)

# 定义源视频路径、目标视频路径、置信度阈值、IOU阈值、模型名称、模型分辨率等参数
SOURCE_VIDEO_PATH = "vehicles.mp4"
TARGET_VIDEO_PATH = "vehicles-result1.mp4"
CONFIDENCE_THRESHOLD = 0.3
IOU_THRESHOLD = 0.5
MODEL_NAME = "yolov8x.pt"
MODEL_RESOLUTION = 1280

# 定义源点和目标点坐标
SOURCE = np.array([
    [1252, 787],
    [2298, 803],
    [5039, 2159],
    [-550, 2159]
])
TARGET_WIDTH = 25
TARGET_HEIGHT = 250

TARGET = np.array([
    [0, 0],
    [TARGET_WIDTH - 1, 0],
    [TARGET_WIDTH - 1, TARGET_HEIGHT - 1],
    [0, TARGET_HEIGHT - 1],
])

# 获取视频帧生成器
frame_generator = sv.get_video_frames_generator(source_path=SOURCE_VIDEO_PATH)

# 创建迭代器，用于遍历视频帧
frame_iterator = iter(frame_generator)

# 获取下一帧
frame = next(frame_iterator)

# 复制当前帧，避免修改原始帧
annotated_frame = frame.copy()

# 在复制的帧上绘制多边形
annotated_frame = sv.draw_polygon(scene=annotated_frame, polygon=SOURCE, color=sv.Color.RED, thickness=4)

# 显示处理后的帧
sv.plot_image(annotated_frame)
class ViewTransformer:
    def __init__(self, source: np.ndarray, target: np.ndarray) -> None:
        source = source.astype(np.float32)  # 将源点坐标转换为浮点型
        target = target.astype(np.float32)  # 将目标点坐标转换为浮点型
        self.m = cv2.getPerspectiveTransform(source, target)  # 计算透视变换矩阵

    def transform_points(self, points: np.ndarray) -> np.ndarray:
        if points.size == 0:  # 如果输入的点数组为空，则直接返回
            return points

        reshaped_points = points.reshape(-1, 1, 2).astype(np.float32)  # 将点数组重新整形为形状为(N, 1, 2)的数组，并转换为浮点型
        transformed_points = cv2.perspectiveTransform(reshaped_points, self.m)  # 对整形后的点数组进行透视变换
        return transformed_points.reshape(-1, 2)  # 将变换后的点数组重新整形为形状为(N, 2)的数组，并返回

view_transformer = ViewTransformer(source=SOURCE, target=TARGET)  # 创建ViewTransformer对象，传入源点和目标点
# 加载YOLO模型
model = YOLO(MODEL_NAME)

# 获取视频信息
video_info = sv.VideoInfo.from_video_path(video_path=SOURCE_VIDEO_PATH)

# 初始化ByteTrack跟踪器
byte_track = sv.ByteTrack(
    frame_rate=video_info.fps, track_thresh=CONFIDENCE_THRESHOLD
)
# 计算动态线条厚度
thickness = sv.calculate_dynamic_line_thickness(
    resolution_wh=video_info.resolution_wh
)

# 计算动态文本缩放比例
text_scale = sv.calculate_dynamic_text_scale(
    resolution_wh=video_info.resolution_wh
)

# 创建边界框标注器，设置线条厚度
bounding_box_annotator = sv.BoundingBoxAnnotator(
    thickness=thickness
)

# 创建标签标注器，设置文本缩放比例、文本厚度和文本位置
label_annotator = sv.LabelAnnotator(
    text_scale=text_scale,
    text_thickness=thickness,
    text_position=sv.Position.BOTTOM_CENTER
)

# 创建轨迹标注器，设置线条厚度、轨迹长度和位置
trace_annotator = sv.TraceAnnotator(
    thickness=thickness,
    trace_length=video_info.fps * 2,
    position=sv.Position.BOTTOM_CENTER
)

# 创建多边形区域，设置多边形坐标和帧分辨率
polygon_zone = sv.PolygonZone(
    polygon=SOURCE,
    frame_resolution_wh=video_info.resolution_wh
)

# 创建一个字典，用于存储坐标信息，最大长度为视频的帧数
coordinates = defaultdict(lambda: deque(maxlen=video_info.fps))
# 打开目标视频并写入帧
with sv.VideoSink(TARGET_VIDEO_PATH, video_info) as sink:
    for frame in tqdm(frame_generator, total=video_info.total_frames):
        result = model(frame, imgsz=MODEL_RESOLUTION, verbose=False)[0]
        detections = sv.Detections.from_ultralytics(result)

        # 过滤掉不符合条件的检测结果
        detections = detections[detections.confidence > CONFIDENCE_THRESHOLD]
        detections = detections[detections.class_id != 0]

        # 过滤掉不在多边形区域内的检测结果
        detections = detections[polygon_zone.trigger(detections)]

        # 使用非极大值抑制进行检测结果优化
        detections = detections.with_nms(IOU_THRESHOLD)

        # 将检测结果传递给跟踪器
        detections = byte_track.update_with_detections(detections=detections)

        points = detections.get_anchors_coordinates(
            anchor=sv.Position.BOTTOM_CENTER
        )

        # 计算目标区域内的检测结果坐标
        points = view_transformer.transform_points(points=points).astype(int)

        # 存储检测结果坐标
        
# 标注帧
        annotated_frame = frame.copy()
        annotated_frame = trace_annotator.annotate(
            scene=annotated_frame, detections=detections
        )
        annotated_frame = bounding_box_annotator.annotate(
            scene=annotated_frame, detections=detections
        )
      
        # 将帧添加到目标视频中
        sink.write_frame(annotated_frame)