# infer_threads.py
from PyQt5.QtCore import pyqtSignal, QThread
from PyQt5.QtGui import QPixmap, QImage
import cv2
import yaml
import time
import os
import numpy as np
# from pathlib import Path # 未使用，可以注释掉

# 可选导入YOLO，如果安装了的话
try:
    from ultralytics import YOLO
    YOLO_AVAILABLE = True
except ImportError:
    YOLO_AVAILABLE = False
    print("Warning: ultralytics not installed. Using mock detection.")

class InferThread(QThread):
    # --- 定义信号 ---
    log_signal = pyqtSignal(str)
    frame_signal = pyqtSignal(QPixmap, QPixmap)
    fps_signal = pyqtSignal(float)
    progress_signal = pyqtSignal(int)  # 进度信号（用于视频处理）
    # --- 新增：定义 stats_signal ---
    stats_signal = pyqtSignal(dict)  # 发送检测统计信息 {class_name: count, ...}
    # --- 新增结束 ---
    finished = pyqtSignal()
    # --- 定义信号结束 ---

    def __init__(self, yaml_path="infer.yaml"):
        super().__init__()
        self.yaml_path = yaml_path
        self.input_source = 0
        self._stop_flag = False
        self._pause_flag = False
        self.infer_params = {}
        self.model = None
        self.model_loaded = False
        self.save_results = False
        self.output_writer = None
        self.frame_count = 0
        self.total_frames = 0

        self._load_infer_params()
        self._init_model()

    def _load_infer_params(self):
        """加载推理参数"""
        try:
            if os.path.exists(self.yaml_path):
                with open(self.yaml_path, "r", encoding="utf-8") as f:
                    loaded_params = yaml.safe_load(f)
                    self.infer_params = loaded_params if loaded_params is not None else {}
            else:
                self.log_signal.emit(f"[警告] 配置文件 {self.yaml_path} 不存在，使用默认参数")
                self.infer_params = self._get_default_params()
                self._save_default_config()

        except Exception as e:
            self.log_signal.emit(f"[错误] 加载推理参数失败: {e}")
            self.infer_params = self._get_default_params()

    def _get_default_params(self):
        """获取默认参数"""
        return {
            "model": "yolov8n.pt", # 与 DetectionWidget 保持一致，使用 "model"
            "conf": 0.25,
            "iou": 0.45,
            "show_box": True,
            "show_label": True,
            "show_conf": True,
            "save_results": False,
            "output_path": "output.mp4",
            "device": "cpu",  # "cpu" or "cuda"
            "img_size": 640
        }

    def _save_default_config(self):
        """保存默认配置"""
        try:
            # 确保目录存在
            config_dir = os.path.dirname(self.yaml_path)
            if config_dir and not os.path.exists(config_dir):
                os.makedirs(config_dir)

            with open(self.yaml_path, "w", encoding="utf-8") as f:
                yaml.dump(self.infer_params, f, allow_unicode=True, default_flow_style=False)
        except Exception as e:
            self.log_signal.emit(f"[错误] 保存默认配置失败: {e}")

    def _get_float_param(self, key, default):
        """安全获取浮点参数"""
        value = self.infer_params.get(key, default)
        if value is None:
            return default
        try:
            return float(value)
        except (ValueError, TypeError):
            self.log_signal.emit(f"[警告] 参数 {key} 值无效: {value}，使用默认值 {default}")
            return default

    def _get_bool_param(self, key, default):
        """安全获取布尔参数"""
        value = self.infer_params.get(key, default)
        if value is None:
            return default
        if isinstance(value, bool):
            return value
        if isinstance(value, str):
            return value.lower() in ['true', '1', 'yes', 'on']
        return bool(value)

    def _get_str_param(self, key, default):
        """安全获取字符串参数"""
        value = self.infer_params.get(key, default)
        if value is None:
            return default
        return str(value)

    def _init_model(self):
        """初始化模型"""
        # 注意：这里也使用 "model" 键
        model_path = self._get_str_param("model", "yolov8n.pt")
        if not YOLO_AVAILABLE:
            self.log_signal.emit(f"[警告] 未安装 ultralytics，使用模拟检测。模型路径: {model_path}")
            self.model_loaded = False
            return

        try:
            device = self._get_str_param("device", "cpu")

            # 检查模型文件是否存在 (简化检查)
            if not os.path.exists(model_path):
                 # 尝试在当前工作目录下查找
                 if os.path.exists(os.path.basename(model_path)):
                     model_path = os.path.basename(model_path)
                 else:
                     self.log_signal.emit(f"[警告] 模型文件不存在: {model_path}，使用模拟检测")
                     self.model_loaded = False
                     return

            self.model = YOLO(model_path)
            self.model_loaded = True
            self.log_signal.emit(f"[模型] 成功加载模型: {model_path} (设备: {device})")

        except Exception as e:
            self.log_signal.emit(f"[错误] 模型加载失败 ({model_path}): {e}")
            self.model_loaded = False

    def _setup_video_writer(self, frame_shape, output_path):
        """设置视频写入器"""
        try:
            height, width = frame_shape[:2]
            # 注意：OpenCV 需要 BGR 格式，所以写入器应处理 BGR
            fourcc = cv2.VideoWriter_fourcc(*'mp4v')
            fps = 30  # 默认FPS，或者尝试从源获取 cap.get(cv2.CAP_PROP_FPS)
            self.output_writer = cv2.VideoWriter(output_path, fourcc, fps, (width, height))
            if self.output_writer.isOpened():
                self.log_signal.emit(f"[保存] 视频写入器已初始化: {output_path}")
                return True
            else:
                self.log_signal.emit(f"[错误] 无法初始化视频写入器: {output_path}")
                return False
        except Exception as e:
            self.log_signal.emit(f"[错误] 视频写入器初始化失败: {e}")
            return False

    def _release_video_writer(self):
        """释放视频写入器"""
        if self.output_writer is not None:
            try:
                self.output_writer.release()
                self.output_writer = None
                self.log_signal.emit("[保存] 视频文件已保存")
            except Exception as e:
                self.log_signal.emit(f"[错误] 视频写入器释放失败: {e}")

    def run(self):
        """运行推理线程"""
        try:
            # 安全获取参数 (注意键名 "model")
            model_path = self._get_str_param("model", "yolov8n.pt")
            conf_threshold = self._get_float_param("conf", 0.25)
            iou_threshold = self._get_float_param("iou", 0.45)
            show_box = self._get_bool_param("show_box", True)
            show_label = self._get_bool_param("show_label", True)
            show_conf = self._get_bool_param("show_conf", True)
            self.save_results = self._get_bool_param("save_results", False)
            output_path = self._get_str_param("output_path", "output.mp4")
            img_size = int(self._get_float_param("img_size", 640))

            self.log_signal.emit(f"[参数] model={model_path}, conf={conf_threshold}, iou={iou_threshold}, save={self.save_results}")

            # 初始化视频源
            cap = cv2.VideoCapture(self.input_source)

            if not cap.isOpened():
                self.log_signal.emit(f"[错误] 无法打开输入源: {self.input_source}")
                # 发射空统计信息，表示无处理
                self.stats_signal.emit({})
                return

            # 获取视频信息
            self.total_frames = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
            fps_source = cap.get(cv2.CAP_PROP_FPS)
            self.log_signal.emit(f"[信息] 视频源FPS: {fps_source:.1f}, 总帧数: {self.total_frames}")

            # 如果需要保存结果，初始化视频写入器
            if self.save_results:
                ret, test_frame = cap.read()
                if ret:
                    if not self._setup_video_writer(test_frame.shape, output_path):
                        self.save_results = False
                    # 重新定位到第一帧
                    cap.set(cv2.CAP_PROP_POS_FRAMES, 0)
                else:
                    self.save_results = False

            # FPS计算初始化
            frame_count = 0
            start_time = time.time()
            last_progress_time = time.time()

            while not self._stop_flag:
                if self._pause_flag:
                    self.msleep(100)
                    continue

                ret, frame = cap.read()
                if not ret:
                    self.log_signal.emit("[完成] 视频播放结束")
                    # 发射空统计信息，表示处理结束
                    self.stats_signal.emit({})
                    break

                # FPS计算和进度更新
                frame_count += 1
                self.frame_count = frame_count

                current_time = time.time()

                # 每秒更新一次FPS
                if current_time - start_time >= 1.0:
                    fps = frame_count / (current_time - start_time)
                    self.fps_signal.emit(fps)
                    frame_count = 0
                    start_time = current_time

                # 每0.5秒更新一次进度
                if current_time - last_progress_time >= 0.5 and self.total_frames > 0:
                    progress = int((self.frame_count / self.total_frames) * 100)
                    self.progress_signal.emit(min(progress, 100))
                    last_progress_time = current_time

                # 转换和发送原始帧
                orig_pixmap = self._convert_cv_qt(frame)

                # 执行检测
                det_pixmap = self._perform_detection(frame, conf_threshold, iou_threshold,
                                                   show_box, show_label, show_conf, img_size)

                # 保存结果帧
                if self.save_results and self.output_writer is not None and det_pixmap:
                    try:
                        # 将QPixmap转换回OpenCV格式 (注意颜色空间)
                        det_qimage = det_pixmap.toImage().convertToFormat(QImage.Format_RGB888)
                        width = det_qimage.width()
                        height = det_qimage.height()
                        ptr = det_qimage.bits()
                        ptr.setsize(det_qimage.byteCount())
                        img_array = np.array(ptr).reshape(height, width, 3)
                        # QImage是RGB，OpenCV需要BGR
                        det_cv_frame_bgr = cv2.cvtColor(img_array, cv2.COLOR_RGB2BGR)

                        if det_cv_frame_bgr is not None and det_cv_frame_bgr.size > 0:
                            self.output_writer.write(det_cv_frame_bgr)
                        else:
                             self.log_signal.emit(f"[警告] 要保存的帧为空或无效")
                    except Exception as e:
                        self.log_signal.emit(f"[错误] 保存帧失败: {e}")

                # 发送检测帧
                self.frame_signal.emit(orig_pixmap, det_pixmap)

                # 日志输出（每10帧一次）
                if self.frame_count % 10 == 0:
                    self.log_signal.emit(f"[推理] 处理帧 {self.frame_count}")

                # 控制处理速度 (可以考虑根据实际情况调整)
                # self.msleep(10)

        except Exception as e:
            import traceback
            error_details = traceback.format_exc()
            self.log_signal.emit(f"[错误] 推理线程异常: {str(e)}\n详情:\n{error_details}")
            # 发射空统计信息，表示出错
            self.stats_signal.emit({})
        finally:
            # 清理资源
            if 'cap' in locals() and cap and cap.isOpened():
                cap.release()
            self._release_video_writer()
            self.finished.emit()

    def _convert_cv_qt(self, cv_img):
        """Convert from an opencv image to QPixmap"""
        try:
            if cv_img is None or cv_img.size == 0:
                return QPixmap()

            rgb_image = cv2.cvtColor(cv_img, cv2.COLOR_BGR2RGB)
            h, w, ch = rgb_image.shape
            bytes_per_line = ch * w
            convert_to_Qt_format = QImage(rgb_image.data, w, h, bytes_per_line, QImage.Format_RGB888)
            return QPixmap.fromImage(convert_to_Qt_format)
        except Exception as e:
            self.log_signal.emit(f"[错误] 图像转换失败: {e}")
            return QPixmap()

    # _qimage_to_cv 方法在此示例中不再需要，因为我们直接在 run 中处理了颜色转换
    # 如果其他地方还需要，可以保留

# 在 infer_threads.py 文件中找到 _perform_detection 方法并替换为以下内容

    def _perform_detection(self, frame, conf_threshold, iou_threshold, show_box, show_label, show_conf, img_size):
        """执行目标检测"""
        # --- 修改：初始化更详细的统计信息 ---
        # detailed_stats = {
        #     'counts': {},       # {class_name: count, ...}
        #     'detections': []    # [{'class': 'name', 'xywh': [x,y,w,h]}, ...]
        # }
        detailed_stats = {'counts': {}, 'detections': []}
        # --- 修改结束 ---
        try:
            # 如果模型加载成功且可用，使用真实检测
            if YOLO_AVAILABLE and self.model_loaded and self.model is not None:
                try:
                    # --- 修改：传递更多参数给 model.predict ---
                    # verbose=False 减少日志输出
                    results = self.model.predict(source=frame, conf=conf_threshold, iou=iou_threshold, imgsz=img_size, verbose=False)
                    # --- 修改结束 ---

                    # --- 修改：计算详细统计信息 (类别计数 + xywh 坐标) ---
                    # 检查 results[0].boxes 是否存在且不为 None
                    if results and len(results) > 0 and hasattr(results[0], 'boxes') and results[0].boxes is not None:
                        boxes = results[0].boxes
                        # 检查是否有检测到的框 (cls 和 xywh 属性)
                        if boxes.cls is not None and len(boxes.cls) > 0 and boxes.xywh is not None:
                            # 确保模型有 names 属性
                            if hasattr(self.model, 'names') and self.model.names:
                                # 获取类别 ID (tensor) 和 xywh 坐标 (tensor)
                                class_ids_tensor = boxes.cls
                                xywh_tensor = boxes.xywh # Nx4 tensor

                                # 将 tensor 移动到 CPU 并转换为 numpy 数组以便处理
                                try:
                                    class_ids_np = class_ids_tensor.cpu().numpy().astype(int)
                                    xywh_np = xywh_tensor.cpu().numpy() # Nx4 array

                                    # 遍历每一个检测到的物体
                                    for i in range(len(class_ids_np)):
                                        cls_id = class_ids_np[i]
                                        # 从模型的 names 字典中获取类别名
                                        cls_name = self.model.names.get(int(cls_id), f"Class_{cls_id}")

                                        # 1. 更新类别计数
                                        detailed_stats['counts'][cls_name] = detailed_stats['counts'].get(cls_name, 0) + 1

                                        # 2. 获取 xywh 坐标 (确保是 list 或可序列化格式)
                                        xywh_coords = xywh_np[i].tolist() # 转换为 Python list [x, y, w, h]

                                        # 3. 添加到 detections 列表
                                        detailed_stats['detections'].append({
                                            'class': cls_name,
                                            'xywh': xywh_coords # [x, y, w, h]
                                        })

                                except Exception as tensor_e:
                                    self.log_signal.emit(f"[警告] 处理类别或坐标张量时出错: {tensor_e}")
                    # --- 修改结束 ---


                    # 绘制结果
                    # --- 修改：使用 results[0].plot 并传递参数 ---
                    annotated_frame = results[0].plot(
                        conf=show_conf, # 控制是否显示置信度
                        labels=show_label, # 控制是否显示标签
                        boxes=show_box # 控制是否显示框
                    )
                    # --- 修改结束 ---

                    # --- 修改：发射包含详细信息的统计 ---
                    self.stats_signal.emit(detailed_stats)
                    # --- 修改结束 ---

                    return self._convert_cv_qt(annotated_frame)

                except Exception as e:
                    self.log_signal.emit(f"[警告] 模型推理失败: {e}")
                    # --- 修改：推理失败时也发射空的详细统计 ---
                    self.stats_signal.emit(detailed_stats) # 发送初始化的空字典
                    # --- 修改结束 ---
                    # 回退到简单显示
                    pass

            # 如果模型不可用或推理失败，使用简单显示
            # --- 修改：简单显示也应发射空的详细统计 ---
            simple_pixmap = self._simple_detection_display(frame, show_box, show_label, show_conf)
            self.stats_signal.emit(detailed_stats) # 发送初始化的空字典
            return simple_pixmap
            # --- 修改结束 ---


        except Exception as e:
            self.log_signal.emit(f"[错误] 检测过程异常: {e}")
            # --- 修改：发生未捕获的错误时也发射空的详细统计 ---
            self.stats_signal.emit(detailed_stats) # 发送初始化的空字典
            # --- 修改结束 ---
            return self._convert_cv_qt(frame)


    def _simple_detection_display(self, frame, show_box, show_label, show_conf):
        """简单的检测显示（占位符）"""
        try:
            det_frame = frame.copy()

            # 绘制一些模拟的检测框
            if show_box:
                # 模拟几个检测框
                boxes = [
                    ((50, 50, 150, 150), "person", 0.85),
                    ((200, 100, 300, 200), "car", 0.78),
                    ((400, 150, 500, 250), "dog", 0.92)
                ]

                for (x1, y1, x2, y2), label, conf in boxes:
                    cv2.rectangle(det_frame, (x1, y1), (x2, y2), (0, 255, 0), 2)

                    if show_label or show_conf:
                        text_parts = []
                        if show_label:
                            text_parts.append(label)
                        if show_conf:
                            text_parts.append(f"{conf:.2f}")

                        if text_parts:
                            text = " ".join(text_parts)
                            cv2.putText(det_frame, text, (x1, y1 - 10),
                                      cv2.FONT_HERSHEY_SIMPLEX, 0.6, (0, 255, 0), 2)

            return self._convert_cv_qt(det_frame)
        except Exception as e:
            self.log_signal.emit(f"[错误] 检测显示失败: {e}")
            return self._convert_cv_qt(frame)

    def pause(self):
        """暂停/继续推理"""
        self._pause_flag = not self._pause_flag
        status = "暂停" if self._pause_flag else "继续"
        self.log_signal.emit(f"[推理] {status}")

    def stop(self):
        """停止推理"""
        self._stop_flag = True
        self.wait()
        self.log_signal.emit("[推理] 已停止")

    def set_input_source(self, source):
        """设置输入源"""
        self.input_source = source

    def update_params(self, params):
        """更新推理参数"""
        # 注意：这里也使用 "model" 键
        old_model_path = self.infer_params.get("model", "")
        self.infer_params.update(params)

        # 如果模型路径改变，重新加载模型
        new_model_path = params.get("model", old_model_path)
        if new_model_path != old_model_path:
            self.log_signal.emit(f"[参数] 模型路径已更新: {new_model_path}")
            self._init_model()

    def get_progress(self):
        """获取当前处理进度"""
        if self.total_frames > 0:
            return min(int((self.frame_count / self.total_frames) * 100), 100)
        return 0

    def is_running(self):
        """检查线程是否正在运行"""
        return self.isRunning() and not self._stop_flag

# --- 修改总结 ---
# 1.  在类定义中添加了 `stats_signal = pyqtSignal(dict)`。
# 2.  在 `_perform_detection` 方法中：
#     *   初始化了 `stats` 字典。
#     *   在成功执行 YOLO 推理后，添加了计算 `stats` 的逻辑。
#     *   在 `results[0].plot` 后，添加了 `self.stats_signal.emit(stats)`。
#     *   在捕获到推理错误或回退到简单显示时，添加了 `self.stats_signal.emit({})`。
#     *   在最外层的 `except` 块中，也添加了 `self.stats_signal.emit({})` 以处理未预期的错误。
# 3.  在 `run` 方法中：
#     *   在无法打开视频源或视频播放结束时，也发射了 `self.stats_signal.emit({})`。
# 4.  统一了参数键名，使用 `model` 而不是 `model_path`，以匹配 `infer.yaml` 和 `DetectionWidget`。
# 5.  修正了 `model.predict` 的调用方式，使其更符合 Ultralytics YOLO 的 API。
# 6.  修正了视频保存时的颜色空间转换问题（QImage RGB -> OpenCV BGR）。
# --- 修改结束 ---