import cv2
import os
import datetime
import time
import numpy as np
from PyQt5.QtCore import QThread, pyqtSignal
from PyQt5.QtGui import QImage

class CameraThread(QThread):
    update_frame = pyqtSignal(QImage, list)  # 传递图像和检测结果
    update_fps = pyqtSignal(float)
    detection_finished = pyqtSignal()  # 新增信号：检测完成

    def __init__(self, model, conf_thres=0.5, iou_thres=0.5, selected_classes=None, 
                 source_type="camera", source_path=0, roi_mask=None, output_video_path=None,
                 target_width=None, target_height=None, save_dir="./runs/images"):
        super().__init__()
        self.model = model
        self.conf_thres = conf_thres
        self.iou_thres = iou_thres  # IoU阈值
        self.selected_classes = selected_classes if selected_classes else []
        
        self.source_type = source_type
        self.source_path = source_path
        self.roi_mask = roi_mask  # 检测区域掩码
        self.output_video_path = output_video_path  # 输出视频路径

        self.cap = None
        self.current_frame = None  # 当前帧
        self.video_writer = None  # 视频写入器

        # 目标宽度和高度
        self.target_width = target_width
        self.target_height = target_height
        self.save_dir = save_dir  # 保存目录
        self.running = False
        
    def update_selected_classes(self, selected_classes):
        """更新目标类别"""
        self.selected_classes = selected_classes
    
    def run(self):
        self.running = True
        # 初始化视频源
        if self.source_type == "camera":
            self.cap = cv2.VideoCapture(int(self.source_path))  # 本地摄像头
        elif self.source_type == "ip_camera":
            self.cap = cv2.VideoCapture(self.source_path)  # IP摄像头
        elif self.source_type == "video":
            self.cap = cv2.VideoCapture(self.source_path)  # 视频文件
        elif self.source_type == "image":
            self.process_image()  # 直接处理图片
            self.running = False  # 图片处理完成后自动停止
            return  # 直接返回，无需继续运行

        # 初始化视频写入器
        if self.source_type in ["camera", "ip_camera", "video"]:
            frame_width = int(self.cap.get(cv2.CAP_PROP_FRAME_WIDTH))
            frame_height = int(self.cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
            fps = int(self.cap.get(cv2.CAP_PROP_FPS))
            if fps == 0:  # 如果无法获取帧率，则设置为默认值
                fps = 30
            # 创建视频写入器
            fourcc = cv2.VideoWriter_fourcc(*'mp4v')  # MP4格式
            self.video_writer = cv2.VideoWriter(self.output_video_path, fourcc, fps, (frame_width, frame_height))

        prev_time = 0
        while self.running:
            if self.source_type in ["camera", "ip_camera", "video"]:
                ret, frame = self.cap.read()
                if not ret:
                    break

            self.current_frame = frame  # 保存当前帧

            # 如果设置了检测区域掩码，则仅检测该区域
            if self.roi_mask is not None:
                # 确保掩码是 uint8 类型
                if self.roi_mask.dtype != np.uint8:
                    self.roi_mask = self.roi_mask.astype(np.uint8)
                # 确保掩码尺寸与输入图像一致
                if self.roi_mask.shape[:2] != frame.shape[:2]:
                    self.roi_mask = cv2.resize(self.roi_mask, (frame.shape[1], frame.shape[0]))
                # 将掩码应用到图像上
                masked_frame = cv2.bitwise_and(frame, frame, mask=self.roi_mask)
            else:
                masked_frame = frame

            # YOLOv5推理
            results = self.model(masked_frame)
            detections = self.parse_results(results)  # 解析检测结果

            # 过滤检测结果
            detection_list = []
            if detections is not None:
                for det in detections:
                    if det['confidence'] > self.conf_thres and det['class'] in self.selected_classes:
                        detection_list.append({
                            "class": det['class'],
                            "confidence": f"{det['confidence']:.2f}",
                            "count": 1,
                            "xmin": det['xmin'],
                            "ymin": det['ymin'],
                            "xmax": det['xmax'],
                            "ymax": det['ymax']
                        })
                        # 绘制边界框
                        label = f"{det['class']} {det['confidence']:.2f}"
                        cv2.rectangle(frame, (int(det['xmin']), int(det['ymin'])), 
                                    (int(det['xmax']), int(det['ymax'])), 
                                    (0, 255, 0), 2)
                        cv2.putText(frame, label, (int(det['xmin']), int(det['ymin']) - 10),
                                    cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 2)

            # 绘制ROI区域框
            if self.roi_mask is not None:
                contours, _ = cv2.findContours(self.roi_mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
                for contour in contours:
                    cv2.drawContours(frame, [contour], -1, (0, 255, 255), 2)

            # 将帧写入视频文件
            if self.video_writer is not None:
                self.video_writer.write(frame)

            # 转换图像格式
            rgb_image = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
            h, w, ch = rgb_image.shape
            bytes_per_line = ch * w
            qt_image = QImage(rgb_image.data, w, h, bytes_per_line, QImage.Format_RGB888)
            
            # 计算FPS
            curr_time = time.time()
            fps = 1 / (curr_time - prev_time)
            prev_time = curr_time

            # 传递图像和检测结果
            self.update_frame.emit(qt_image, detection_list)  # 传递图像和检测结果
            self.update_fps.emit(fps)

        # 释放资源
        if self.video_writer is not None:
            self.video_writer.release()
        if self.cap is not None:
            self.cap.release()

    def process_image(self):
        """处理图片文件"""
        image = cv2.imread(self.source_path)
        if image is None:
            self.running = False 
            return

        try:
            # # 调整图片大小以适应目标尺寸
            # if self.target_width and self.target_height:
            #     image = cv2.resize(image, (self.target_width, self.target_height), interpolation=cv2.INTER_AREA)

            # 如果设置了检测区域掩码，则仅检测该区域
            if self.roi_mask is not None:
                # 确保掩码是 uint8 类型
                if self.roi_mask.dtype != np.uint8:
                    self.roi_mask = self.roi_mask.astype(np.uint8)
                # 确保掩码尺寸与输入图像一致
                if self.roi_mask.shape[:2] != image.shape[:2]:
                    self.roi_mask = cv2.resize(self.roi_mask, (image.shape[1], image.shape[0]))
                # 将掩码应用到图像上
                masked_image = cv2.bitwise_and(image, image, mask=self.roi_mask)
            else:
                masked_image = image

            # YOLOv5推理
            results = self.model(masked_image)
            detections = self.parse_results(results)  # 解析检测结果

            # 过滤检测结果
            detection_list = []
            if detections is not None:
                for det in detections:
                    if det['confidence'] > self.conf_thres and det['class'] in self.selected_classes:
                        detection_list.append({
                            "class": det['class'],
                            "confidence": f"{det['confidence']:.2f}",
                            "count": 1,
                            "xmin": det['xmin'],
                            "ymin": det['ymin'],
                            "xmax": det['xmax'],
                            "ymax": det['ymax']
                        })
                        # 绘制边界框
                        label = f"{det['class']} {det['confidence']:.2f}"
                        cv2.rectangle(image, (int(det['xmin']), int(det['ymin'])), 
                                    (int(det['xmax']), int(det['ymax'])), 
                                    (0, 255, 0), 2)
                        cv2.putText(image, label, (int(det['xmin']), int(det['ymin']) - 10),
                                    cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 2)

            # 绘制ROI区域框
            if self.roi_mask is not None:
                contours, _ = cv2.findContours(self.roi_mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
                for contour in contours:
                    cv2.drawContours(image, [contour], -1, (0, 255, 255), 2)

            # 转换图像格式
            rgb_image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
            h, w, ch = rgb_image.shape
            bytes_per_line = ch * w
            qt_image = QImage(rgb_image.data, w, h, bytes_per_line, QImage.Format_RGB888)

            # 传递图像和检测结果
            self.update_frame.emit(qt_image, detection_list)
            self.update_fps.emit(0)  # 图片处理不计算FPS

            # 保存检测后的图片
            self.save_detected_image(image)

        finally:
            # 停止线程
            # 检测完成后发出信号
            self.detection_finished.emit()
            self.running = False

    def parse_results(self, results):
        """解析 YOLOv5 的检测结果"""
        detections = []
        if results is not None:
            for *xyxy, conf, cls in results.xyxy[0]:
                label = self.model.names[int(cls)]
                detections.append({
                    "class": label,
                    "confidence": float(conf),
                    "xmin": float(xyxy[0]),
                    "ymin": float(xyxy[1]),
                    "xmax": float(xyxy[2]),
                    "ymax": float(xyxy[3])
                })
        return detections
    def save_detected_image(self, image):
        """保存检测后的图片到 ./runs/images 目录"""
        if not os.path.exists(self.save_dir):
            os.makedirs(self.save_dir)
        
        # 生成唯一的文件名
        timestamp = datetime.datetime.now().strftime("%Y%m%d_%H%M%S")
        file_name = f"detected_{timestamp}.jpg"
        save_path = os.path.join(self.save_dir, file_name)
        
        # 保存图片
        cv2.imwrite(save_path, image)
        print(f"检测后的图片已保存到: {save_path}")

    def stop(self):
        self.running = False
        self.wait()  # 等待线程结束
        if self.cap is not None:
            self.cap.release()
        if self.video_writer is not None:
            self.video_writer.release()