import cv2
import numpy as np
import torch
from PIL import Image, ImageDraw, ImageFont
from ultralytics import YOLO
import os
import tempfile
import csv
from tqdm import tqdm
# ======================
# 全局常量定义
# ======================
STATIC_DIR = os.path.join(os.path.dirname(__file__), "static")
MODEL_DIR = "model"
SIMHEI_FONT_PATH = os.path.join(STATIC_DIR, "SimHei.ttf")

# 预定义颜色列表 (BGR格式)
PREDEFINED_COLORS = [
    (255, 0, 0), (0, 255, 0), (0, 0, 255), (255, 255, 0), (255, 0, 255),
    (0, 255, 255), (128, 0, 0), (0, 128, 0), (0, 0, 128), (128, 128, 0),
    (128, 0, 128), (0, 128, 128), (192, 0, 0), (0, 192, 0), (0, 0, 192),
    (192, 192, 0), (192, 0, 192), (0, 192, 192), (64, 0, 0), (0, 64, 0)
]

# 支持的视频格式与编码器
SUPPORTED_VIDEO_FORMATS = {
    ".mp4": ("mp4v", "video/mp4"),
    ".avi": ("XVID", "video/x-msvideo"),
    ".mov": ("mp4v", "video/quicktime"),
    ".mkv": ("mp4v", "video/x-matroska"),
}

class ProcessingModel:
    def __init__(self):
        self.model = None
        self.model_info = None

    def load_model(self, model_name):
        """
        加载YOLO模型
        
        参数:
            model_name: 模型文件名
            
        返回:
            tuple: (模型对象, 模型信息)
        """
        if not model_name or not model_name.endswith('.pt'):
            return None, "❌ 请选择有效的.pt文件"
        
        model_path = os.path.join(MODEL_DIR, model_name)
        if not os.path.exists(model_path):
            return None, f"❌ 模型文件 '{model_name}' 不存在"
        
        try:
            model = YOLO(model_path)
            if torch.cuda.is_available():
                model.to("cuda")
            
            # 尝试从模型中提取版本信息
            model_type = model.__class__.__name__.lower()
            if hasattr(model, "model") and hasattr(model.model, "yaml"):
                arch = model.model.yaml.get("arch", "").lower()
            else:
                arch = model_type

            if "yolov8" in arch:
                version = "8"
            elif "yolov5" in arch:
                version = "5"
            elif "yolov3" in arch:
                version = "3"
            elif "yolo11" or "yolov11" in arch:
                version = "11"
            else:
                version = "未知"

            # 收集模型信息
            model_info = {
                "名称": model_name,
                "路径": os.path.abspath(model_path),
                "类型": f"YOLOv{version}",
                "任务": model.task,
                "类别数": len(model.names) if hasattr(model, 'names') else "N/A",
                "输入分辨率": f"{model.args['imgsz']}x{model.args['imgsz']}" if hasattr(model, 'args') else "N/A",
                "设备": "GPU" if model.device.type != 'cpu' else "CPU"
            }

            self.model = model
            self.model_info = model_info
            return model, model_info
        except Exception as e:
            return None, f"❌ 加载失败: {str(e)}"

    def get_available_models(self):
        """获取model目录下可用的模型文件"""
        if not os.path.exists(MODEL_DIR):
            return ["请创建model文件夹并放置.pt文件"]
        
        pt_files = [f for f in os.listdir(MODEL_DIR) if f.endswith('.pt')]
        return pt_files if pt_files else ["请放置模型文件"]

    def read_label_mapping(self, file_path):
        """
        读取标签映射文件
        
        参数:
            file_path: 标签映射文件路径
            
        返回:
            tuple: (映射字典, 映射文本)
        """
        if not file_path:
            return {}, ""
        
        mapping = {}
        try:
            with open(file_path, 'r', encoding='utf-8') as f:
                for line in f:
                    line = line.strip()
                    if not line:
                        continue
                    parts = line.split(' ', 1)
                    if len(parts) == 2:
                        try:
                            class_id = int(parts[0])
                            mapping[class_id] = parts[1]
                        except ValueError:
                            continue
        except Exception as e:
            print(f"读取标签映射文件出错: {e}")
        
        return mapping, str(mapping)

    def get_high_contrast_color(self, bg_color):
        """
        根据背景颜色返回高对比度文字颜色（黑色或白色）
        
        参数:
            bg_color: 背景颜色 (R, G, B)
            
        返回:
            tuple: (R, G, B) 的高对比度颜色
        """
        r, g, b = bg_color
        luminance = (0.299 * r + 0.587 * g + 0.114 * b)  # 感知亮度公式
        return (0, 0, 0) if luminance > 180 else (255, 255, 255)

    def add_text_to_image(self, img, text, position, text_color=(0, 255, 0), bg_color=None, text_size=30, font_path=SIMHEI_FONT_PATH):
        """
        在图像上指定位置添加带背景的中文文本
        
        参数:
            img: 输入图像 (numpy数组)
            text: 要添加的文本
            position: 文本起始位置 (x, y)
            text_color: 文本颜色 (R, G, R)
            bg_color: 背景颜色 (R, G, B)，默认为与文本反色
            text_size: 字体大小
            font_path: 字体文件路径
            
        返回:
            numpy数组格式的图像（原图基础上加上带背景的文字）
        """
        if isinstance(img, np.ndarray):
            img_pil = Image.fromarray(cv2.cvtColor(img, cv2.COLOR_BGR2RGB))
        else:
            img_pil = img.copy()

        draw = ImageDraw.Draw(img_pil)

        try:
            font = ImageFont.truetype(font_path, text_size, encoding="utf-8")
        except IOError:
            font = ImageFont.load_default()
            print(f"警告: 无法加载字体文件 {font_path}, 使用默认字体")

        # 自动设置背景颜色为文本颜色的反色
        if bg_color is None:
            bg_color = tuple(255 - c for c in text_color)

        # 获取文本尺寸（新方式）
        left, top, right, bottom = draw.textbbox(position, text, font=font)
        text_width = right - left
        text_height = bottom - top

        # 绘制背景矩形（稍微大一点）
        padding = 4
        rect_position = [
            position[0] - padding,
            position[1] - padding,
            position[0] + text_width + padding,
            position[1] + text_height + padding
        ]
        draw.rectangle(rect_position, fill=bg_color)

        # 绘制文本
        draw.text(position, text, fill=text_color, font=font)

        # 返回 BGR 图像
        return cv2.cvtColor(np.array(img_pil), cv2.COLOR_RGB2BGR)

    def get_color_for_label(self, label):
        """为给定的标签分配一个固定颜色"""
        if not hasattr(self, "color_map"):
            self.color_map = {}
        
        if label not in self.color_map:
            color_idx = hash(label) % len(PREDEFINED_COLORS)
            self.color_map[label] = PREDEFINED_COLORS[color_idx]
        
        return self.color_map[label]

    def predict_image(self, input_image, label_mapping=None, use_chinese=False):
        """
        对单张图像进行目标检测（添加缩放选项）
        """
        if self.model is None:
            raise ValueError("模型未加载")

        # 判断输入类型
        if isinstance(input_image, str):
            image_bgr = cv2.imread(input_image)
        elif isinstance(input_image, np.ndarray):
            image_bgr = input_image
        else:
            raise ValueError("输入图像必须是文件路径字符串或 NumPy 数组")

        # 可选：降低分辨率以提高处理速度
        original_height, original_width = image_bgr.shape[:2]
        scale_factor = 1.0  # 可调整为 0.5 或 0.75 来降低处理负载
        
        if scale_factor != 1.0:
            new_width = int(original_width * scale_factor)
            new_height = int(original_height * scale_factor)
            processing_image = cv2.resize(image_bgr, (new_width, new_height))
        else:
            processing_image = image_bgr

        # 运行预测
        results = self.model.predict(processing_image,half = True,verbose = False)

        # 如果不需要中文标签，直接返回默认结果
        if not use_chinese or not label_mapping:
            result_img = results[0].plot()
            if scale_factor != 1.0:
                result_img = cv2.resize(result_img, (original_width, original_height))
            return cv2.cvtColor(result_img, cv2.COLOR_BGR2RGB)

        # 开始中文标注
        img_copy = np.array(processing_image, dtype=np.uint8, copy=True)
        img_copy.setflags(write=True)

        for result in results:
            boxes = result.boxes.cpu().numpy()
            for box in boxes:
                r = box.xyxy[0].astype(int)
                cls_id = int(box.cls[0])
                conf = box.conf[0]

                class_name = label_mapping.get(cls_id, f"未知{cls_id}")
                color = self.get_color_for_label(class_name)

                # 转换为 RGB（用于 PIL 绘图）
                bg_color = color[::-1]
                text_color = self.get_high_contrast_color(bg_color)

                # 绘制边界框（BGR）
                cv2.rectangle(img_copy, (r[0], r[1]), (r[2], r[3]), color, 3)

                # 添加中文标签
                label_text = f'{class_name} {conf:.2f}'
                img_copy = self.add_text_to_image(
                    img_copy,
                    label_text,
                    position=(r[0], r[1] - 30),
                    text_color=text_color,
                    bg_color=bg_color,
                    text_size=30
                )

        # 调整回原始尺寸
        if scale_factor != 1.0:
            img_copy = cv2.resize(img_copy, (original_width, original_height))
        
        # 返回 RGB 图像给 Gradio 显示
        return cv2.cvtColor(img_copy, cv2.COLOR_BGR2RGB)

    def predict_video(self, input_video, label_mapping=None, use_chinese=False):
        """
        对视频进行目标检测
        
        参数:
            input_video: 视频文件路径
            label_mapping: 标签映射字典
            use_chinese: 是否使用中文标签
            
        返回:
            处理后的视频文件路径
        """
        if self.model is None:
            raise ValueError("模型未加载")
        
        cap = cv2.VideoCapture(input_video)
        if not cap.isOpened():
            raise ValueError("无法打开视频文件")
        
        # 获取视频属性
        fps = cap.get(cv2.CAP_PROP_FPS)
        width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
        height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
        
        # 确保宽高为偶数
        width = width if width % 2 == 0 else width - 1
        height = height if height % 2 == 0 else height - 1
        
        # 确定输出格式
        file_ext = ".mp4"  # 默认使用mp4格式
        
        # 创建临时输出文件
        with tempfile.NamedTemporaryFile(suffix=file_ext, delete=False) as temp_file:
            output_path = temp_file.name
        
        # 创建时间戳CSV文件（保存在当前工作目录下）
        import time
        # 生成基于时间戳的文件名，避免重复
        timestamp = time.strftime("%Y%m%d_%H%M%S")
        csv_filename = f"video_timestamps_{timestamp}.csv"
        # 获取当前工作目录
        current_dir = os.getcwd()
        timestamp_csv_path = os.path.join(current_dir, csv_filename)
        
        # 设置视频编码器
        fourcc = cv2.VideoWriter_fourcc(*'mp4v')
        out = cv2.VideoWriter(output_path, fourcc, fps, (width, height))
        
        try:
            # 获取视频总帧数
            total_frames = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))

            with tqdm(total=total_frames, desc="处理视频帧", unit="帧") as pbar:
                while cap.isOpened():
                    ret, frame = cap.read()
                    if not ret:
                        break
                    
                    # 处理当前帧
                    if use_chinese and label_mapping:
                        processed_frame = self.predict_image(frame, label_mapping, use_chinese=True)
                        # 写入处理后的帧
                        out.write(cv2.cvtColor(processed_frame, cv2.COLOR_BGR2RGB))
                    else:
                        results = self.model.predict(frame,verbose = False)
                        processed_frame = results[0].plot()
                        # 写入处理后的帧
                        out.write(processed_frame)
                    pbar.update(1)
        
        except Exception as e:
            print(f"视频处理出错: {e}")
            return None
        finally:
            cap.release()
            out.release()
        
        return output_path

    def generate_video_frames(self, video_path, label_mapping_file=None, language="English"):
        """
        视频帧生成器：逐帧读取并返回检测后的图像帧
        
        参数:
            video_path: 视频文件路径
            label_mapping: 标签映射字典
            use_chinese: 是否使用中文标签
            
        Yields:
            numpy.ndarray: 处理后的图像帧
        """
        use_chinese_flag = (language == "中文")
        label_mapping, _ = self.read_label_mapping(label_mapping_file)

        cap = cv2.VideoCapture(video_path)
        if not cap.isOpened():
            raise ValueError("无法打开视频文件")

        try:
            while cap.isOpened():
                ret, frame = cap.read()
                if not ret:
                    break
                processed = self.predict_image(frame, label_mapping, use_chinese_flag)
                yield processed
        finally:
            cap.release()

    def extract_person_frames(self, video_path, selected_persons, label_mapping_file=None):
        """
        从视频中提取包含选定人物的帧并生成新视频，同时记录时间戳信息到CSV文件
        
        参数:
            video_path: 视频文件路径
            selected_persons: 选定的人物列表
            label_mapping_file: 标签映射文件路径
            
        返回:
            处理后的视频文件路径
        """
        if self.model is None:
            raise ValueError("模型未加载")
        
        # 读取标签映射
        label_mapping, _ = self.read_label_mapping(label_mapping_file)
        
        # 如果没有提供标签映射，则使用模型自带的标签
        if not label_mapping:
            if hasattr(self.model, 'names'):
                label_mapping = {i: name for i, name in enumerate(self.model.names)}
        
        # 反向映射：从名称到ID
        name_to_id = {name: id for id, name in label_mapping.items()}
        
        # 获取选定人物的类别ID
        selected_class_ids = []
        for person in selected_persons:
            if person in name_to_id:
                selected_class_ids.append(name_to_id[person])
            else:
                # 如果在标签映射中找不到，则尝试直接转换为整数
                try:
                    selected_class_ids.append(int(person))
                except ValueError:
                    continue
        
        cap = cv2.VideoCapture(video_path)
        if not cap.isOpened():
            raise ValueError("无法打开视频文件")
        
        # 获取视频属性
        fps = cap.get(cv2.CAP_PROP_FPS)
        width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
        height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
        
        # 确保宽高为偶数
        width = width if width % 2 == 0 else width - 1
        height = height if height % 2 == 0 else height - 1
        
        # 确定输出格式
        file_ext = ".mp4"  # 默认使用mp4格式
        
        # 创建临时输出文件
        with tempfile.NamedTemporaryFile(suffix=file_ext, delete=False) as temp_file:
            output_path = temp_file.name
        
        # 创建时间戳CSV文件（保存在当前工作目录下）
        import time
        # 生成基于时间戳的文件名，避免重复
        timestamp = time.strftime("%Y%m%d_%H%M%S")
        csv_filename = f"video_timestamps_{timestamp}.csv"
       # 获取当前工作目录并创建result文件夹路径
        current_dir = os.getcwd()
        result_dir = os.path.join(current_dir, "result")
        # 确保result文件夹存在
        os.makedirs(result_dir, exist_ok=True)
        timestamp_csv_path = os.path.join(result_dir, csv_filename)
        
        # 设置视频编码器
        fourcc = cv2.VideoWriter_fourcc(*'mp4v')
        out = cv2.VideoWriter(output_path, fourcc, fps, (width, height))
        
        frame_count = 0
        saved_frame_count = 0
        timestamps = []
        
        try:
            # 获取视频总帧数
            total_frames = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))

            with tqdm(total=total_frames, desc="提取人物帧", unit="帧") as pbar:
                while cap.isOpened():
                    ret, frame = cap.read()
                    if not ret:
                        break
                    
                    # 计算当前帧的时间戳
                    current_timestamp = frame_count / fps
                    
                    # 运行预测
                    results = self.model.predict(frame)
                    
                    # 检查是否包含选定的人物
                    save_frame = False
                    for result in results:
                        if result.boxes is not None:
                            boxes = result.boxes.cpu().numpy()
                            for box in boxes:
                                cls_id = int(box.cls[0])
                                if cls_id in selected_class_ids:
                                    save_frame = True
                                    break
                        if save_frame:
                            break
                    
                    # 如果包含选定人物，保存帧和时间戳
                    if save_frame:
                        out.write(frame)
                        timestamps.append(current_timestamp)
                        saved_frame_count += 1
                    
                    frame_count += 1
                    pbar.update(1)
                    # 限制处理帧数以避免处理时间过长（可选）
                    # if frame_count > 1000:  # 限制处理前1000帧
                    #     break
                
            # 将时间戳写入CSV文件
            with open(timestamp_csv_path, 'w', newline='') as csvfile:
                writer = csv.writer(csvfile)
                writer.writerow(['frame_index', 'start_time'])  # 写入表头
                for i, timestamp in enumerate(timestamps):
                    writer.writerow([i, timestamp])
        
        except Exception as e:
            print(f"视频处理出错: {e}")
            return None
        finally:
            cap.release()
            out.release()
        
        print(f"处理完成: 共处理 {frame_count} 帧，保存 {saved_frame_count} 帧")
        print(f"时间戳文件已保存到: {timestamp_csv_path}")
        # 只返回视频文件路径，时间戳文件会自动保存在同一目录下
        return output_path

    def get_person_classes(self, label_mapping_file=None):
        """
        获取所有可用的类别名称列表。注意：该方法实际返回所有类别名称，
        而不仅仅是人物类别，这是由于历史命名原因。未来版本可能会
        重命名该方法以更准确地反映其功能。
        
        参数:
            label_mapping_file: 标签映射文件路径
            
        返回:
            list: 所有可用的类别名称列表
        """
        if self.model is None:
            return []
        
        # 读取标签映射
        label_mapping, _ = self.read_label_mapping(label_mapping_file)
        
        # 如果没有提供标签映射，则使用模型自带的标签
        if not label_mapping:
            if hasattr(self.model, 'names'):
                label_mapping = {i: name for i, name in enumerate(self.model.names)}
        
        # 返回所有类别名称
        return list(label_mapping.values()) if label_mapping else []
