import csv
import cv2
import torch
import yaml
import time
import math
import os
import shutil
from tqdm import tqdm
from moviepy.editor import VideoFileClip
from datahub import video_queues
from upload.upload import output_csv
from common.file_utils import output_video_dir
from spotter.spot_key_frame import extract_keyframes


# 读取配置文件
with open('config.yml', 'r') as f:
    config = yaml.safe_load(f)

# 读取target类别为map,value 1、2、3与前端type值对应
target_map = {}
for index, targets_list in enumerate(config['targets'], 1):
    target_map[index] = set(targets_list)

# 加载YOLOv5模型
model = torch.hub.load(
    'recognizer/', 'custom', path=config['weight'], source='local')


def video_processor(file_name, target_type, socketio):
    # 获取视频总时长(用cv2获取的fps数据不对，会导致计算视频时长错误)
    def get_video_duration(video_path):
        clip = VideoFileClip(video_path)
        return clip.duration

    # 格式化时间
    def format_time(duration):
        minutes = int(duration // 60)
        seconds = int(duration % 60)
        return f"{minutes:02d}:{seconds:02d}"

    def check_targets_in(target_objs, detection_names) -> bool:
        return any(target_obj in detection_names for target_obj in target_objs)

    # 输入视频路径
    input_video_path = config['input_video_dir'] + file_name

    socketio.emit('log_message', {'data': '视频压缩中，请耐心等待...'})
    # 对导入的文件进行压缩
    _input_video_path = extract_keyframes(file_name)
    if os.path.exists(input_video_path):
        os.remove(input_video_path)
    shutil.move(_input_video_path, input_video_path)
    socketio.emit('log_message', {'data': '✔ 压缩完成，开始识别'})

    # 定义输出视频文件
    output_video = output_video_dir(file_name)
    # 定义创建输出csv文件
    output_csv_name = output_csv(file_name)
    with open(output_csv_name, 'w', newline='') as csvfile:
        # 创建CSV写入器
        csv_writer = csv.writer(csvfile)
        # 写入CSV文件的标题
        csv_writer.writerow(['Time', 'Target'])

    # 定义目标物体类别（在YOLOv5模型中的名称）
    targets = target_map[target_type]

    # 框与标签的颜色
    colors = config['colors']

    # 物体与颜色配对
    color_mapping = dict(zip(targets, colors))

    # 打开输入视频文件
    video_capture = cv2.VideoCapture(input_video_path)

    # 获取输入视频的帧率和尺寸
    total_frame = int(video_capture.get(cv2.CAP_PROP_FRAME_COUNT))
    fps = video_capture.get(cv2.CAP_PROP_FPS)
    width = int(video_capture.get(cv2.CAP_PROP_FRAME_WIDTH))
    height = int(video_capture.get(cv2.CAP_PROP_FRAME_HEIGHT))
    video_length = format_time(get_video_duration(input_video_path))  # mm:ss

    # 创建输出视频写入器
    fourcc = cv2.VideoWriter_fourcc(*'mp4v')
    output_writer = cv2.VideoWriter(output_video, fourcc, fps, (width, height))

    # 逐帧处理输入视频
    start_time = time.time()
    frame_index = 0

    for frame_index in tqdm(range(total_frame), unit='frame'):
        # 读取当前帧图像
        ret, frame = video_capture.read()
        if not ret:
            break

        # 转换图像为RGB格式，并进行物体检测
        frame_rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
        results = model(frame_rgb)

        detections = results.pandas().xyxy[0]
        detected_objects = []

        # 筛选出包含target物体的帧索引
        if check_targets_in(targets, detections['name'].values):
            current_time = math.floor(
                video_capture.get(cv2.CAP_PROP_POS_MSEC) / 1000)
            current_time = format_time(current_time)

            for index, row in detections.iterrows():
                for target in targets:
                    if row['name'] == target:
                        color = color_mapping[target]
                        box = row[['xmin', 'ymin', 'xmax', 'ymax']].tolist()
                        cv2.rectangle(frame, (int(box[0]), int(
                            box[1])), (int(box[2]), int(box[3])), color, 2)
                        cv2.putText(frame, target, (int(box[0]), int(box[1]) - 10),
                                    cv2.FONT_HERSHEY_SIMPLEX, 0.9, color, 2)
            # 在右上角添加时间指示
            cv2.putText(frame, f"{current_time}/{video_length}", (width - 230, 50), cv2.FONT_HERSHEY_SIMPLEX, 1,
                        (255, 255, 255), 2, cv2.LINE_AA)
            output_writer.write(frame)
            with open(output_csv_name, 'a', newline='') as csvfile:
                # 创建CSV写入器
                csv_writer = csv.writer(csvfile)
                csv_writer.writerow(
                    [current_time] + [detections['name'].values])
                
            # Iterate through the targets and add the detected objects to the list
            for target in targets:
                count = detections[detections['name'] == target].shape[0]
                if count > 0:
                    detected_objects.append((target, count))

        detected_objects_str = "None" if not detected_objects else str(detected_objects)
        # 保存进度到list
        progress = int(((frame_index+1) / total_frame) * 100)
        video_queues[file_name].append(progress)
        socketio.emit('process_msg', {'data': progress})
        socketio.emit('log_message', {
                       'data': f"已检测 {frame_index} / {total_frame} 帧，识别出: {detected_objects_str};"})

    # 释放视频读取器和写入器
    video_capture.release()
    output_writer.release()

    # 程序结束，输出耗费时间
    end_time = time.time()
    print("Video processing complete in %.2f seconds" %
          (end_time - start_time))
    socketio.emit('log_message', {'data': '✔视频处理完毕，详细结果请下载后查看'})
