import sys
import os
import cv2
import torch
import numpy as np
from PyQt5.QtWidgets import (QApplication, QMainWindow, QPushButton, QLabel, QVBoxLayout, QHBoxLayout, 
                             QWidget, QFileDialog, QComboBox, QSlider, QStyle, QSizePolicy, QFrame, QMessageBox)
from PyQt5.QtGui import QPixmap, QImage, QFont, QIcon
from PyQt5.QtCore import Qt, QTimer, QSize, QThread, pyqtSignal, QUrl
import time
from pathlib import Path

# 添加YOLOv5目录到路径
sys.path.append(os.path.join(os.path.dirname(os.path.abspath(__file__)), 'yolov5'))

# 导入YOLOv5的detect模块
from yolov5.models.common import DetectMultiBackend
from yolov5.utils.general import (check_img_size, non_max_suppression, scale_boxes, 
                                  check_imshow, cv2)
from yolov5.utils.torch_utils import select_device
from yolov5.utils.plots import Annotator

# 定义视频处理线程
class VideoThread(QThread):
    change_pixmap_signal = pyqtSignal(np.ndarray)
    fps_signal = pyqtSignal(float)
    finished_signal = pyqtSignal()
    
    def __init__(self, source, model, device, imgsz, conf_thres=0.25):
        super().__init__()
        self.source = source
        self.model = model
        self.device = device
        self.imgsz = imgsz
        self.conf_thres = conf_thres
        self.running = True
        self.stride = self.model.stride
        self.auto = self.model.pt
        self.vid_stride = 1
        
    def letterbox(self, im, new_shape=(640, 640), color=(114, 114, 114), auto=True, scaleFill=False, scaleup=True, stride=32):
        # Resize and pad image while meeting stride-multiple constraints
        shape = im.shape[:2]  # current shape [height, width]
        if isinstance(new_shape, int):
            new_shape = (new_shape, new_shape)

        # Scale ratio (new / old)
        r = min(new_shape[0] / shape[0], new_shape[1] / shape[1])
        if not scaleup:  # only scale down, do not scale up (for better val mAP)
            r = min(r, 1.0)

        # Compute padding
        ratio = r, r  # width, height ratios
        new_unpad = int(round(shape[1] * r)), int(round(shape[0] * r))
        dw, dh = new_shape[1] - new_unpad[0], new_shape[0] - new_unpad[1]  # wh padding
        if auto:  # minimum rectangle
            dw, dh = np.mod(dw, stride), np.mod(dh, stride)  # wh padding
        elif scaleFill:  # stretch
            dw, dh = 0.0, 0.0
            new_unpad = (new_shape[1], new_shape[0])
            ratio = new_shape[1] / shape[1], new_shape[0] / shape[0]  # width, height ratios

        dw /= 2  # divide padding into 2 sides
        dh /= 2

        if shape[::-1] != new_unpad:  # resize
            im = cv2.resize(im, new_unpad, interpolation=cv2.INTER_LINEAR)
        top, bottom = int(round(dh - 0.1)), int(round(dh + 0.1))
        left, right = int(round(dw - 0.1)), int(round(dw + 0.1))
        im = cv2.copyMakeBorder(im, top, bottom, left, right, cv2.BORDER_CONSTANT, value=color)
        return im, ratio, (dw, dh)
        
    def run(self):
        # 打开视频源
        if isinstance(self.source, int) or self.source.isdigit():
            cap = cv2.VideoCapture(int(self.source))
            if not cap.isOpened():
                print(f"错误: 无法打开摄像头 {self.source}")
                self.finished_signal.emit()
                return
        else:
            cap = cv2.VideoCapture(self.source)
            if not cap.isOpened():
                print(f"错误: 无法打开视频文件 {self.source}")
                self.finished_signal.emit()
                return
        
        # 获取视频属性
        fps = cap.get(cv2.CAP_PROP_FPS)
        w = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
        h = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
        
        # 帧率计算变量
        prev_time = time.time()
        frame_count = 0
        current_fps = 0
        
        # 创建output文件夹（如果不存在）
        output_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'output')
        os.makedirs(output_dir, exist_ok=True)
        
        # 如果是视频文件，设置输出视频
        out = None
        if not (isinstance(self.source, int) or self.source.isdigit()):
            output_path = os.path.join(output_dir, f'output_{os.path.basename(self.source)}')
            output_path = str(Path(output_path).with_suffix('.mp4'))
            out = cv2.VideoWriter(output_path, cv2.VideoWriter_fourcc(*'mp4v'), fps, (w, h))
        
        # 预热模型
        self.model.warmup(imgsz=(1, 3, *self.imgsz))
        
        frame_idx = 0
        while self.running:
            ret, frame = cap.read()
            if not ret:
                break
                
            frame_idx += 1
            if frame_idx % self.vid_stride != 0:
                continue
                
            # 图像预处理
            im0 = frame.copy()
            
            # Letterbox
            im, ratio, (dw, dh) = self.letterbox(im0, self.imgsz, stride=self.stride, auto=self.auto)
            
            # Convert
            im = im.transpose((2, 0, 1))[::-1]  # HWC to CHW, BGR to RGB
            im = np.ascontiguousarray(im)
            im = torch.from_numpy(im).to(self.device)
            im = im.half() if self.model.fp16 else im.float()  # uint8 to fp16/32
            im /= 255  # 0 - 255 to 0.0 - 1.0
            if len(im.shape) == 3:
                im = im[None]  # expand for batch dim
                
            # 推理
            with torch.no_grad():
                pred = self.model(im, augment=False)
                pred = non_max_suppression(
                    pred,
                    conf_thres=self.conf_thres,
                    iou_thres=0.45,
                    classes=None,
                    agnostic=False,
                    max_det=1000
                )
            
            # 处理检测结果
            for i, det in enumerate(pred):
                if len(det):
                    # 将边界框从img_size缩放到im0大小
                    det[:, :4] = scale_boxes(im.shape[2:], det[:, :4], im0.shape).round()
                    
                    # 绘制结果
                    annotator = Annotator(im0, line_width=2, example=str(self.model.names))
                    for *xyxy, conf, cls in reversed(det):
                        c = int(cls)
                        label = f'{self.model.names[c]} {conf:.2f}'
                        
                        # 根据类别选择颜色
                        if c == 0:  # 戴口罩
                            color = (0, 255, 0)
                        elif c == 1:  # 未戴口罩
                            color = (0, 0, 255)
                        else:  # 戴口罩不规范
                            color = (0, 255, 255)
                            
                        # 使用Annotator绘制边界框和标签
                        annotator.box_label(xyxy, label, color=color)
            
            # 计算和显示FPS
            frame_count += 1
            current_time = time.time()
            if current_time - prev_time >= 1.0:
                current_fps = frame_count / (current_time - prev_time)
                frame_count = 0
                prev_time = current_time
                self.fps_signal.emit(current_fps)
            
            # 在帧上显示FPS
            cv2.putText(im0, f'FPS: {current_fps:.1f}', (20, 30), cv2.FONT_HERSHEY_SIMPLEX, 0.8, (0, 255, 0), 2)
            
            # 保存视频
            if out is not None:
                out.write(im0)
            
            # 发送处理后的帧
            self.change_pixmap_signal.emit(im0)
        
        # 释放资源
        cap.release()
        if out is not None:
            out.release()
        self.finished_signal.emit()

# 主窗口类
class MaskDetectionApp(QMainWindow):
    def __init__(self):
        super().__init__()
        
        # 设置窗口标题和大小
        self.setWindowTitle('口罩检测系统')
        self.setMinimumSize(800, 600)
        
        # 加载模型
        self.load_model()
        
        # 初始化UI
        self.init_ui()
        
        # 初始化变量
        self.video_thread = None
        self.current_source = None
        self.timer = QTimer(self)
        self.timer.timeout.connect(self.update_timer)
        self.processing_time = 0
        
    def load_model(self):
        # 模型参数
        self.weights = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'models', 'mask_yolov5.pt')
        self.device = select_device('')  # 自动选择设备
        self.imgsz = (640, 640)  # 推理尺寸
        self.conf_thres = 0.2  # 置信度阈值
        
        # 加载模型
        try:
            self.model = DetectMultiBackend(self.weights, device=self.device)
            stride, names, pt = self.model.stride, self.model.names, self.model.pt
            self.imgsz = check_img_size(self.imgsz, s=stride)  # 检查图像大小
            
            # 确保模型输入尺寸正确
            dummy_input = torch.zeros(1, 3, *self.imgsz).to(self.device)
            self.model.warmup(imgsz=(1, 3, *self.imgsz))  # 预热模型
            
            print(f"模型加载成功: {self.weights}")
            print(f"类别: {names}")
        except Exception as e:
            print(f"模型加载失败: {e}")
            QMessageBox.critical(self, "错误", f"模型加载失败: {e}")
            sys.exit(1)
    
    def init_ui(self):
        # 创建主窗口部件
        main_widget = QWidget()
        self.setCentralWidget(main_widget)
        
        # 主布局
        main_layout = QVBoxLayout(main_widget)
        main_layout.setContentsMargins(20, 20, 20, 20)
        main_layout.setSpacing(20)
        
        # 标题
        title_label = QLabel('口罩检测系统')
        title_label.setAlignment(Qt.AlignCenter)
        title_label.setFont(QFont('Arial', 24, QFont.Bold))
        title_label.setStyleSheet('color: #2c3e50;')
        main_layout.addWidget(title_label)
        
        # 内容布局
        content_layout = QHBoxLayout()
        content_layout.setSpacing(20)
        
        # 左侧控制面板
        control_panel = QFrame()
        control_panel.setFrameShape(QFrame.StyledPanel)
        control_panel.setStyleSheet('background-color: #f5f5f5; border-radius: 10px;')
        control_panel.setSizePolicy(QSizePolicy.Maximum, QSizePolicy.Preferred)
        control_panel.setMinimumWidth(250)
        control_panel.setMaximumWidth(300)
        
        control_layout = QVBoxLayout(control_panel)
        control_layout.setContentsMargins(15, 15, 15, 15)
        control_layout.setSpacing(15)
        
        # 控制面板标题
        panel_title = QLabel('控制面板')
        panel_title.setFont(QFont('Arial', 16, QFont.Bold))
        panel_title.setStyleSheet('color: #2c3e50;')
        panel_title.setAlignment(Qt.AlignCenter)
        control_layout.addWidget(panel_title)
        
        # 添加分隔线
        line = QFrame()
        line.setFrameShape(QFrame.HLine)
        line.setFrameShadow(QFrame.Sunken)
        line.setStyleSheet('background-color: #bdc3c7;')
        control_layout.addWidget(line)
        
        # 上传图片按钮
        self.upload_image_btn = QPushButton('上传图片')
        self.upload_image_btn.setFont(QFont('Arial', 12))
        self.upload_image_btn.setStyleSheet(
            'QPushButton {background-color: #3498db; color: white; border-radius: 5px; padding: 10px;}'
            'QPushButton:hover {background-color: #2980b9;}'
        )
        self.upload_image_btn.clicked.connect(self.upload_image)
        control_layout.addWidget(self.upload_image_btn)
        
        # 上传视频按钮
        self.upload_video_btn = QPushButton('上传视频')
        self.upload_video_btn.setFont(QFont('Arial', 12))
        self.upload_video_btn.setStyleSheet(
            'QPushButton {background-color: #2ecc71; color: white; border-radius: 5px; padding: 10px;}'
            'QPushButton:hover {background-color: #27ae60;}'
        )
        self.upload_video_btn.clicked.connect(self.upload_video)
        control_layout.addWidget(self.upload_video_btn)
        
        # 打开摄像头按钮
        self.open_camera_btn = QPushButton('打开摄像头')
        self.open_camera_btn.setFont(QFont('Arial', 12))
        self.open_camera_btn.setStyleSheet(
            'QPushButton {background-color: #e74c3c; color: white; border-radius: 5px; padding: 10px;}'
            'QPushButton:hover {background-color: #c0392b;}'
        )
        self.open_camera_btn.clicked.connect(self.open_camera)
        control_layout.addWidget(self.open_camera_btn)
        
        # 停止按钮
        self.stop_btn = QPushButton('停止检测')
        self.stop_btn.setFont(QFont('Arial', 12))
        self.stop_btn.setStyleSheet(
            'QPushButton {background-color: #95a5a6; color: white; border-radius: 5px; padding: 10px;}'
            'QPushButton:hover {background-color: #7f8c8d;}'
        )
        self.stop_btn.clicked.connect(self.stop_detection)
        self.stop_btn.setEnabled(False)
        control_layout.addWidget(self.stop_btn)
        
        # 添加分隔线
        line2 = QFrame()
        line2.setFrameShape(QFrame.HLine)
        line2.setFrameShadow(QFrame.Sunken)
        line2.setStyleSheet('background-color: #bdc3c7;')
        control_layout.addWidget(line2)
        
        # 置信度阈值标签
        conf_label = QLabel('置信度阈值:')
        conf_label.setFont(QFont('Arial', 12))
        control_layout.addWidget(conf_label)
        
        # 置信度阈值滑块
        self.conf_slider = QSlider(Qt.Horizontal)
        self.conf_slider.setMinimum(1)
        self.conf_slider.setMaximum(99)
        self.conf_slider.setValue(int(self.conf_thres * 100))
        self.conf_slider.setTickPosition(QSlider.TicksBelow)
        self.conf_slider.setTickInterval(10)
        self.conf_slider.valueChanged.connect(self.update_conf_thres)
        control_layout.addWidget(self.conf_slider)
        
        # 置信度值显示
        self.conf_value_label = QLabel(f'当前: {self.conf_thres:.2f}')
        self.conf_value_label.setAlignment(Qt.AlignCenter)
        self.conf_value_label.setFont(QFont('Arial', 10))
        control_layout.addWidget(self.conf_value_label)
        
        # 状态信息
        self.status_label = QLabel('状态: 就绪')
        self.status_label.setFont(QFont('Arial', 12))
        self.status_label.setStyleSheet('color: #2c3e50;')
        control_layout.addWidget(self.status_label)
        
        # FPS显示
        self.fps_label = QLabel('FPS: -')
        self.fps_label.setFont(QFont('Arial', 12))
        self.fps_label.setStyleSheet('color: #2c3e50;')
        control_layout.addWidget(self.fps_label)
        
        # 处理时间
        self.time_label = QLabel('处理时间: -')
        self.time_label.setFont(QFont('Arial', 12))
        self.time_label.setStyleSheet('color: #2c3e50;')
        control_layout.addWidget(self.time_label)
        
        # 添加弹簧
        control_layout.addStretch()
        
        # 版权信息
        copyright_label = QLabel(' 2025 口罩检测系统')
        copyright_label.setAlignment(Qt.AlignCenter)
        copyright_label.setFont(QFont('Arial', 10))
        copyright_label.setStyleSheet('color: #7f8c8d;')
        control_layout.addWidget(copyright_label)
        
        # 右侧区域（显示区域）
        right_panel = QFrame()
        right_panel.setFrameShape(QFrame.StyledPanel)
        right_panel.setStyleSheet('background-color: #f5f5f5; border-radius: 10px;')
        
        right_layout = QVBoxLayout(right_panel)
        right_layout.setContentsMargins(15, 15, 15, 15)
        
        # 显示标题
        display_title = QLabel('检测结果')
        display_title.setFont(QFont('Arial', 16, QFont.Bold))
        display_title.setStyleSheet('color: #2c3e50;')
        display_title.setAlignment(Qt.AlignCenter)
        right_layout.addWidget(display_title)
        
        # 图像显示标签
        self.image_label = QLabel()
        self.image_label.setAlignment(Qt.AlignCenter)
        self.image_label.setStyleSheet('background-color: #000000; border-radius: 5px;')
        self.image_label.setSizePolicy(QSizePolicy.Expanding, QSizePolicy.Expanding)
        self.image_label.setMinimumSize(640, 480)
        right_layout.addWidget(self.image_label)
        
        # 添加控制面板和右侧面板到内容布局
        content_layout.addWidget(control_panel)
        content_layout.addWidget(right_panel)
        
        # 添加内容布局到主布局
        main_layout.addLayout(content_layout)

    def update_conf_thres(self):
        self.conf_thres = self.conf_slider.value() / 100
        self.conf_value_label.setText(f'当前: {self.conf_thres:.2f}')
        
        # 如果视频线程正在运行，更新其置信度阈值
        if self.video_thread and self.video_thread.isRunning():
            self.video_thread.conf_thres = self.conf_thres
    
    def upload_image(self):
        file_path, _ = QFileDialog.getOpenFileName(self, '选择图片', '', 'Images (*.png *.jpg *.jpeg *.bmp)')
        if file_path:
            self.process_image(file_path)
    
    def upload_video(self):
        file_path, _ = QFileDialog.getOpenFileName(self, '选择视频', '', 'Videos (*.mp4 *.avi *.mov *.mkv)')
        if file_path:
            self.process_video(file_path)
    
    def open_camera(self):
        self.process_video(0)  # 0 表示默认摄像头
    
    def process_image(self, image_path):
        try:
            # 更新状态
            self.status_label.setText('状态: 处理图片中...')
            self.current_source = image_path
            
            # 开始计时
            start_time = time.time()
            
            # 读取图像
            img = cv2.imread(image_path)
            if img is None:
                raise Exception(f"无法读取图片: {image_path}")
            
            # 创建output文件夹（如果不存在）
            output_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'output')
            os.makedirs(output_dir, exist_ok=True)
            
            # 转换为RGB
            img_rgb = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
            
            # 准备输入
            img_tensor = torch.from_numpy(img_rgb).to(self.device)
            img_tensor = img_tensor.permute(2, 0, 1).float() / 255.0  # HWC to CHW, 0-255 to 0.0-1.0
            if len(img_tensor.shape) == 3:
                img_tensor = img_tensor[None]  # 扩展批次维度
            
            # 确保图像尺寸符合模型要求
            img_tensor = torch.nn.functional.interpolate(img_tensor, size=self.imgsz, mode='bilinear', align_corners=False)
            
            # 推理
            with torch.no_grad():
                pred = self.model(img_tensor)
                pred = non_max_suppression(pred, self.conf_thres, 0.45, None, False, max_det=1000)
            
            # 处理检测结果
            for i, det in enumerate(pred):
                if len(det):
                    # 将边界框从img_size缩放到im0大小
                    det[:, :4] = scale_boxes(img_tensor.shape[2:], det[:, :4], img.shape).round()
                    
                    # 绘制结果
                    for *xyxy, conf, cls in reversed(det):
                        c = int(cls)
                        label = f'{self.model.names[c]} {conf:.2f}'
                        x1, y1, x2, y2 = int(xyxy[0]), int(xyxy[1]), int(xyxy[2]), int(xyxy[3])
                        
                        # 根据类别选择颜色 (戴口罩:绿色, 未戴口罩:红色, 戴口罩不规范:黄色)
                        if c == 0:  # 戴口罩
                            color = (0, 255, 0)  # 绿色
                        elif c == 1:  # 未戴口罩
                            color = (0, 0, 255)  # 红色
                        else:  # 戴口罩不规范
                            color = (0, 255, 255)  # 黄色
                        
                        # 绘制边界框
                        cv2.rectangle(img, (x1, y1), (x2, y2), color, 2)
                        
                        # 添加标签
                        cv2.putText(img, label, (x1, y1 - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.6, color, 2)
            
            # 保存结果到output文件夹
            output_path = os.path.join(output_dir, f'output_{os.path.basename(image_path)}')
            cv2.imwrite(output_path, img)
            print(f"结果已保存到: {output_path}")
            
            # 计算处理时间
            end_time = time.time()
            self.processing_time = end_time - start_time
            self.time_label.setText(f'处理时间: {self.processing_time:.2f}s')
            
            # 显示结果
            self.display_image(img)
            
            # 更新状态
            self.status_label.setText(f'状态: 图片处理完成，已保存到output文件夹')
            self.fps_label.setText('FPS: -')
            
        except Exception as e:
            print(f"处理图片时出错: {e}")
            QMessageBox.critical(self, "错误", f"处理图片时出错: {e}")
            self.status_label.setText('状态: 错误')
    
    def process_video(self, video_source):
        try:
            # 停止当前线程（如果有）
            self.stop_detection()
            
            # 更新状态
            self.current_source = video_source
            if isinstance(video_source, int) or (isinstance(video_source, str) and video_source.isdigit()):
                self.status_label.setText('状态: 摄像头检测中...')
            else:
                self.status_label.setText('状态: 视频处理中...')
            
            # 创建并启动视频处理线程
            self.video_thread = VideoThread(video_source, self.model, self.device, self.imgsz, self.conf_thres)
            self.video_thread.change_pixmap_signal.connect(self.update_image)
            self.video_thread.fps_signal.connect(self.update_fps)
            self.video_thread.finished_signal.connect(self.on_video_finished)
            self.video_thread.start()
            
            # 启动计时器
            self.processing_time = 0
            self.timer.start(1000)  # 每秒更新一次
            
            # 更新按钮状态
            self.stop_btn.setEnabled(True)
            self.upload_image_btn.setEnabled(False)
            self.upload_video_btn.setEnabled(False)
            self.open_camera_btn.setEnabled(False)
            
        except Exception as e:
            print(f"处理视频时出错: {e}")
            QMessageBox.critical(self, "错误", f"处理视频时出错: {e}")
            self.status_label.setText('状态: 错误')
    
    def stop_detection(self):
        # 停止视频线程
        if self.video_thread and self.video_thread.isRunning():
            self.video_thread.running = False
            self.video_thread.wait()  # 等待线程结束
            
        # 停止计时器
        self.timer.stop()
        
        # 更新UI状态
        self.stop_btn.setEnabled(False)
        self.upload_image_btn.setEnabled(True)
        self.upload_video_btn.setEnabled(True)
        self.open_camera_btn.setEnabled(True)
        self.status_label.setText('状态: 就绪')
        self.processing_time = 0
        self.time_label.setText('处理时间: -')
        self.fps_label.setText('FPS: -')
        
    def on_video_finished(self):
        # 更新UI状态
        self.stop_btn.setEnabled(False)
        self.upload_image_btn.setEnabled(True)
        self.upload_video_btn.setEnabled(True)
        self.open_camera_btn.setEnabled(True)
        self.status_label.setText('状态: 就绪')
        
        # 停止计时器
        self.timer.stop()
        self.processing_time = 0
        self.time_label.setText('处理时间: -')
        self.fps_label.setText('FPS: -')

    def update_image(self, cv_img):
        # 将OpenCV图像转换为Qt图像
        qt_img = self.convert_cv_qt(cv_img)
        self.image_label.setPixmap(qt_img)
    
    def display_image(self, cv_img):
        # 将OpenCV图像转换为Qt图像并显示
        qt_img = self.convert_cv_qt(cv_img)
        self.image_label.setPixmap(qt_img)
    
    def convert_cv_qt(self, cv_img):
        # 将OpenCV图像转换为Qt图像
        rgb_image = cv2.cvtColor(cv_img, cv2.COLOR_BGR2RGB)
        h, w, ch = rgb_image.shape
        bytes_per_line = ch * w
        convert_to_qt_format = QImage(rgb_image.data, w, h, bytes_per_line, QImage.Format_RGB888)
        
        # 缩放图像以适应标签大小
        p = convert_to_qt_format.scaled(self.image_label.width(), self.image_label.height(), 
                                        Qt.KeepAspectRatio, Qt.SmoothTransformation)
        return QPixmap.fromImage(p)
    
    def update_fps(self, fps):
        self.fps_label.setText(f'FPS: {fps:.1f}')
    
    def update_timer(self):
        self.processing_time += 1
        self.time_label.setText(f'处理时间: {self.processing_time}s')
    
    def closeEvent(self, event):
        # 关闭窗口时停止所有线程和计时器
        self.stop_detection()
        event.accept()

# 应用程序入口
if __name__ == "__main__":
    app = QApplication(sys.argv)
    app.setStyle('Fusion')  # 使用Fusion风格
    window = MaskDetectionApp()
    window.show()
    sys.exit(app.exec_())
