import os
import cv2
import torch
import torch.backends.cudnn as cudnn
from pathlib import Path
import sys

# 添加YOLOv5根目录到路径
FILE = Path(__file__).resolve()
ROOT = FILE.parents[1]  # YOLOv5 root directory
if str(ROOT) not in sys.path:
    sys.path.append(str(ROOT))  # add ROOT to PATH
ROOT = Path(os.path.relpath(ROOT, Path.cwd()))  # relative

from models.common import DetectMultiBackend
from utils.datasets import LoadImages, LoadStreams
from utils.general import (LOGGER, check_img_size, check_imshow, non_max_suppression, scale_coords, xyxy2xywh)
from utils.plots import Annotator, colors
from utils.torch_utils import select_device, time_sync
from torch.serialization import add_safe_globals
from torch.serialization import safe_globals
from ultralytics.nn.tasks import DetectionModel
from ultralytics import YOLO


class Detector:
    def __init__(self, weights="runs/train/exp/weights/best.pt", device='cpu'):
        """初始化检测器

        Args:
            weights: 模型权重路径
            device: 设备 (cpu/cuda)
        """
        self.weights = YOLO(weights)
        self.device = device
        self.model = None
        self.load_model()
        
    def detect_video(self, source, output_callback, stop_event=None, output_size=(640, 480)):
        cap = cv2.VideoCapture(int(source) if source == '0' else source)

        while cap.isOpened():
            ret, frame = cap.read()
            if not ret or (stop_event and stop_event.is_set()):
                break

            # 推理
            results = self.model.predict(frame, conf=0.25, iou=0.45, device=self.device, verbose=False)
            result = results[0]

            # 渲染图像
            annotated_frame = result.plot()

            # 提取检测数据（转换为你原来 update_detection_table 所需格式）
            detections = {
                "type": [result.names[int(cls)] for cls in result.boxes.cls.cpu().numpy()],
                "box": result.boxes.xyxy.cpu().numpy().tolist(),
                "confidence": result.boxes.conf.cpu().numpy().tolist()
            }

            # 回调更新界面
            output_callback(annotated_frame, detections)

            # 控制帧率（可选）
            time.sleep(0.03)

        cap.release()

    def load_model(self, half=False, dnn=False):
        """加载模型

        Args:
            half: 是否使用半精度
            dnn: 是否使用OpenCV DNN
        """
        device = select_device(self.device)
        half &= device.type != 'cpu'  # 只在CUDA上支持半精度
        model = DetectMultiBackend(self.weights, device=device, dnn=dnn)
        stride, names, pt, jit, onnx = model.stride, model.names, model.pt, model.jit, model.onnx
        
        # 半精度
        half &= pt and device.type != 'cpu'
        if pt:
            model.model.half() if half else model.model.float()
        
        # 预热模型
        if pt and device.type != 'cpu':
            model(torch.zeros(1, 3, 640, 640).to(device).type_as(next(model.model.parameters())))
            
        self.model = model
        self.stride = stride
        self.names = names
        self.pt = pt
        self.device = device
        print("模型加载完成!")
        return model

    def detect_image(self, source, output_size=480, conf_thres=0.25, iou_thres=0.45, max_det=1000):
        """检测图片

        Args:
            source: 图片路径
            output_size: 输出图片尺寸
            conf_thres: 置信度阈值
            iou_thres: IOU阈值
            max_det: 最大检测数量

        Returns:
            处理后的图片和检测结果 (im0, detections)，其中detections包含:
            - type: 隐患类型列表
            - box: 检测框列表
            - confidence: 置信度列表
        """
        model = self.model
        imgsz = [640, 640]
        augment = False
        visualize = False
        line_thickness = 3
        hide_labels = False
        hide_conf = False
        
        # 检查图片尺寸
        imgsz = check_img_size(imgsz, s=self.stride)
        
        # 加载数据
        dataset = LoadImages(source, img_size=imgsz, stride=self.stride, auto=self.pt)
        
        # 检测循环
        for path, im, im0s, vid_cap, s in dataset:
            # 预处理
            im = torch.from_numpy(im).to(self.device)
            im = im.float()
            im /= 255
            if len(im.shape) == 3:
                im = im[None]
            
            # 推理
            pred = model(im, augment=augment, visualize=visualize)
            
            # NMS
            pred = non_max_suppression(pred, conf_thres, iou_thres, None, False, max_det=max_det)
            
            # 处理预测结果
            for i, det in enumerate(pred):
                im0 = im0s.copy()
                
                # 初始化标注器
                annotator = Annotator(im0, line_width=line_thickness, example=str(self.names))
                
                # 初始化检测结果
                detections = {
                    'type': [],
                    'box': [],
                    'confidence': []
                }
                
                if len(det):
                    # 将检测框从img_size缩放到im0尺寸
                    det[:, :4] = scale_coords(im.shape[2:], det[:, :4], im0.shape).round()
                    
                    # 绘制结果
                    for *xyxy, conf, cls in reversed(det):
                        c = int(cls)
                        label = None if hide_labels else (self.names[c] if hide_conf else f'{self.names[c]} {conf:.2f}')
                        annotator.box_label(xyxy, label, color=colors(c, True))
                        
                        # 保存检测结果
                        detections['type'].append(self.names[c])
                        detections['box'].append([int(x) for x in xyxy])  # 转换为整数列表
                        detections['confidence'].append(float(conf))
                
                # 获取结果
                im0 = annotator.result()
                
                # 调整输出图像大小
                resize_scale = output_size / im0.shape[0]
                im0 = cv2.resize(im0, (0, 0), fx=resize_scale, fy=resize_scale)
                
                return im0, detections  # 返回图像和检测结果
    
    def detect_video(self, source, output_callback, stop_event, output_size=480, conf_thres=0.25, iou_thres=0.45, max_det=1000):
        """检测视频

        Args:
            source: 视频源（文件路径或摄像头索引）
            output_callback: 输出回调函数，用于实时显示结果
            stop_event: 停止事件，用于控制检测过程
            output_size: 输出图片尺寸
            conf_thres: 置信度阈值
            iou_thres: IOU阈值
            max_det: 最大检测数量
        """
        model = self.model
        device = self.device
        imgsz = [640, 640]
        augment = False
        visualize = False
        line_thickness = 3
        hide_labels = False
        hide_conf = False
        
        # 检查图片尺寸
        imgsz = check_img_size(imgsz, s=self.stride)
        
        # 确定是否为摄像头
        webcam = source.isnumeric() or source.endswith('.txt') or source.lower().startswith(
            ('rtsp://', 'rtmp://', 'http://', 'https://'))
        
        # 数据加载器
        if webcam:
            cudnn.benchmark = True  # 对固定尺寸的图片进行速度优化
            dataset = LoadStreams(source, img_size=imgsz, stride=self.stride, auto=self.pt)
        else:
            dataset = LoadImages(source, img_size=imgsz, stride=self.stride, auto=self.pt)
        
        # 检测循环
        for path, im, im0s, vid_cap, s in dataset:
            # 检查停止事件
            if stop_event.is_set():
                break
                
            # 预处理
            im = torch.from_numpy(im).to(device)
            im = im.float()
            im /= 255
            if len(im.shape) == 3:
                im = im[None]
            
            # 推理
            pred = model(im, augment=augment, visualize=visualize)
            
            # NMS
            pred = non_max_suppression(pred, conf_thres, iou_thres, None, False, max_det=max_det)
            
            # 处理预测结果
            for i, det in enumerate(pred):
                if webcam:  # 批处理大小 >= 1
                    p, im0, frame = path[i], im0s[i].copy(), dataset.count
                else:
                    p, im0, frame = path, im0s.copy(), getattr(dataset, 'frame', 0)
                
                # 初始化标注器
                annotator = Annotator(im0, line_width=line_thickness, example=str(self.names))
                
                # 初始化检测结果
                detections = {
                    'type': [],
                    'box': [],
                    'confidence': []
                }
                
                if len(det):
                    # 将检测框从img_size缩放到im0尺寸
                    det[:, :4] = scale_coords(im.shape[2:], det[:, :4], im0.shape).round()
                    
                    # 绘制结果
                    for *xyxy, conf, cls in reversed(det):
                        c = int(cls)
                        label = None if hide_labels else (self.names[c] if hide_conf else f'{self.names[c]} {conf:.2f}')
                        annotator.box_label(xyxy, label, color=colors(c, True))
                        
                        # 保存检测结果
                        detections['type'].append(self.names[c])
                        detections['box'].append([int(x) for x in xyxy])  # 转换为整数列表
                        detections['confidence'].append(float(conf))
                
                # 获取结果
                im0 = annotator.result()
                
                # 调整输出图像大小
                resize_scale = output_size / im0.shape[0]
                im0 = cv2.resize(im0, (0, 0), fx=resize_scale, fy=resize_scale)
                
                # 回调输出
                if output_callback:
                    output_callback(im0, detections)
                
                # 处理键盘中断
                if cv2.waitKey(1) & 0xFF == ord('q'):
                    break 