import sys
import cv2
import os
import time
from PyQt5.QtWidgets import (
    QApplication, QMainWindow, QLabel, QVBoxLayout, QWidget,
    QPushButton, QHBoxLayout, QSizePolicy
)
from PyQt5.QtGui import QImage, QPixmap
from PyQt5.QtCore import QThread, pyqtSignal, Qt, QTimer, QCoreApplication
from ultralytics import YOLO
from concurrent.futures import ThreadPoolExecutor
import torch


# ------------------------ 配置参数（针对 CPU 优化）------------------------
RTSP_URL = "rtsp://admin:wkkj1234@192.168.200.64:554/Streaming/Channels/102"
# RTSP_URL = "rtsp://admin:wkkj1234@192.168.200.64:554/Streaming/Channels/1"
gst_str = (
    f"{RTSP_URL} "
    f"-rtsp_transport tcp "
    f"-stimeout 5000000 "
    f"-fflags nobuffer+flush_packets "
    f"-flags low_delay "
    f"-probesize 32 "
    f"-analyzeduration 100000"
)
OUTPUT_DIR = "output"
MODEL_DETECT_PATH = r"D:\CodeCNN\yolov8-study\runs\detect\train25\weights\best25.pt"
MODEL_CLS_PATH = r"D:\CodeCNN\yolov8-study\runs\classifier\train2\weights\best-c2.pt"

# ------------------------ 输入源配置 ------------------------
# 设置为 None 使用 RTSP 流
# 设置为字符串路径（如 "images/" 或 r"D:\pics"）则处理该文件夹下的所有图片
# INPUT_SOURCE = None  # 默认使用 RTSP 流
INPUT_SOURCE = r"D:\TestPic\pic"  # 示例：处理本地图片文件夹（取消注释并修改路径）

# 推理参数优化
CONF_THRESHOLD = 0.5
CLS_CONF_THRESHOLD = 0.5
SAVE_INTERVAL = 2
IOU_THRESHOLD = 0.05
VERTICAL_OFFSET = 1.5
SKIP_FRAMES = 3
IMG_SIZE_DETECT = 320
IMG_SIZE_CLS = 128
MAX_SAVE_TASKS = 3

# 创建目录
os.makedirs(f"{OUTPUT_DIR}/original", exist_ok=True)
os.makedirs(f"{OUTPUT_DIR}/detected", exist_ok=True)
os.makedirs(f"{OUTPUT_DIR}/cropped", exist_ok=True)

# 异步线程池
executor = ThreadPoolExecutor(max_workers=3)


# ------------------------ 检测线程 ------------------------
class DetectionThread(QThread):
    frame_ready = pyqtSignal(object)
    status_ready = pyqtSignal(str)

    def __init__(self):
        super().__init__()
        self.running = True
        self.last_save_time = 0
        self.save_feedback = 0
        self.frame_count = 0

    def generate_frame_id(self):
        t = time.strftime("%Y%m%d_%H%M%S")
        ms = int((time.time() % 1) * 1000)
        return f"{t}_{ms:03d}"

    def bb_intersection_over_union(self, boxA, boxB):
        xA = max(boxA[0], boxB[0])
        yA = max(boxA[1], boxB[1])
        xB = min(boxA[2], boxB[2])
        yB = min(boxA[3], boxB[3])
        interArea = max(0, xB - xA) * max(0, yB - yA)
        boxAArea = (boxA[2] - boxA[0]) * (boxA[3] - boxA[1])
        boxBArea = (boxB[2] - boxB[0]) * (boxB[3] - boxB[1])
        iou = interArea / float(boxAArea + boxBArea - interArea)
        return max(0, iou)

    def is_head_on_vehicle(self, head_box, vehicle_box):
        hx1, hy1, hx2, hy2 = head_box
        vx1, vy1, vx2, vy2 = vehicle_box
        h_center_x = (hx1 + hx2) / 2
        if not (vx1 <= h_center_x <= vx2):
            return False
        vehicle_height = vy2 - vy1
        if hy2 > vy1 + vehicle_height * VERTICAL_OFFSET:
            return False
        iou = self.bb_intersection_over_union((hx1, hy1, hx2, hy2), (vx1, vy1, vx2, vy2))
        if iou > IOU_THRESHOLD:
            return True
        if abs(hy2 - vy1) < vehicle_height * 0.5:
            return True
        return False

    def save_images_async(self, frame, annotated_frame, nohel_crops, frame_id):
        def _save():
            try:
                cv2.imwrite(f"{OUTPUT_DIR}/original/frame_{frame_id}.jpg", frame)
                cv2.imwrite(f"{OUTPUT_DIR}/detected/detected_{frame_id}.jpg", annotated_frame)
                for idx, crop_img in enumerate(nohel_crops):
                    idx_str = f"{idx + 1:02d}"
                    cv2.imwrite(f"{OUTPUT_DIR}/cropped/cropped_{frame_id}_{idx_str}.jpg", crop_img)
                self.status_ready.emit(f"✅ 保存 {len(nohel_crops)} 人未戴头盔")
            except Exception as e:
                self.status_ready.emit(f"❌ 保存失败: {e}")

        current_tasks = len([t for t in executor._threads if t.is_alive()])
        if current_tasks < MAX_SAVE_TASKS:
            executor.submit(_save)
        else:
            self.status_ready.emit("⚠️ 保存任务过多，跳过本次保存")

    def run(self):
        self.status_ready.emit("🔄 加载模型...")
        torch.cuda.empty_cache()

        try:
            self.model_detect = YOLO(MODEL_DETECT_PATH)
            self.model_cls = YOLO(MODEL_CLS_PATH)
            self.status_ready.emit("✅ 模型加载完成")
        except Exception as e:
            self.status_ready.emit(f"❌ 模型加载失败: {e}")
            return

        DETECT_CLASSES = self.model_detect.model.names
        HEAD_CLS_ID = None
        VEHICLE_CLS_IDS = []
        for k, v in DETECT_CLASSES.items():
            if v.lower() == 'hel':
                HEAD_CLS_ID = k
            elif v.lower() == 'wheel':
                VEHICLE_CLS_IDS.append(k)

        if HEAD_CLS_ID is None:
            self.status_ready.emit("❌ 未找到 'hel' 类别")
            return
        if not VEHICLE_CLS_IDS:
            self.status_ready.emit("⚠️ 未找到 wheel 类别")

        CLS_CLASSES = self.model_cls.model.names
        NOHEL_LABELS = ['nohel']
        NOHEL_IDS = [i for i, name in CLS_CLASSES.items() if name.lower() in NOHEL_LABELS]

        # ========== 判断输入源类型 ==========
        if INPUT_SOURCE and os.path.isdir(INPUT_SOURCE):
            self.status_ready.emit(f"📁 开始处理图片文件夹: {INPUT_SOURCE}")
            image_extensions = {'.jpg', '.jpeg', '.png', '.bmp', '.webp'}
            image_files = []
            for root, _, files in os.walk(INPUT_SOURCE):
                for f in files:
                    ext = os.path.splitext(f.lower())[1]
                    if ext in image_extensions:
                        image_files.append(os.path.join(root, f))
            if not image_files:
                self.status_ready.emit("❌ 文件夹中未找到图片")
                return
            self.process_images(image_files, HEAD_CLS_ID, VEHICLE_CLS_IDS, CLS_CLASSES, NOHEL_IDS, NOHEL_LABELS)
        else:
            self.status_ready.emit("🟢 开始处理视频流...")
            self.process_video_stream(HEAD_CLS_ID, VEHICLE_CLS_IDS, CLS_CLASSES, NOHEL_IDS, NOHEL_LABELS)

    def process_video_stream(self, HEAD_CLS_ID, VEHICLE_CLS_IDS, CLS_CLASSES, NOHEL_IDS, NOHEL_LABELS):
        cap = cv2.VideoCapture(gst_str, cv2.CAP_FFMPEG)
        cap.set(cv2.CAP_PROP_BUFFERSIZE, 1)

        if not cap.isOpened():
            self.status_ready.emit("❌ 无法打开 RTSP 流")
            return

        prev_time = time.time()
        while self.running:
            ret, frame = cap.read()
            if not ret:
                self.status_ready.emit("⚠️ 视频流中断，重连中...")
                time.sleep(2)
                cap.release()
                cap = cv2.VideoCapture(gst_str, cv2.CAP_FFMPEG)
                cap.set(cv2.CAP_PROP_BUFFERSIZE, 1)
                continue

            self.frame_count += 1
            if self.frame_count % (SKIP_FRAMES + 1) != 0:
                self.display_frame(frame)
                time.sleep(0.01)
                continue

            self.process_single_frame(
                frame, HEAD_CLS_ID, VEHICLE_CLS_IDS,
                CLS_CLASSES, NOHEL_IDS, NOHEL_LABELS, prev_time
            )
            prev_time = time.time()
            time.sleep(0.01)

        cap.release()
        self.status_ready.emit("⏹️ 视频流处理结束")

    def process_images(self, image_files, HEAD_CLS_ID, VEHICLE_CLS_IDS, CLS_CLASSES, NOHEL_IDS, NOHEL_LABELS):
        prev_time = time.time()
        for img_path in image_files:
            if not self.running:
                break
            try:
                frame = cv2.imread(img_path)
                if frame is None:
                    self.status_ready.emit(f"⚠️ 无法读取图片: {os.path.basename(img_path)}")
                    continue
                self.status_ready.emit(f"🖼️ 处理图片: {os.path.basename(img_path)}")
                self.process_single_frame(
                    frame, HEAD_CLS_ID, VEHICLE_CLS_IDS,
                    CLS_CLASSES, NOHEL_IDS, NOHEL_LABELS, prev_time
                )
                prev_time = time.time()
                time.sleep(0.5)  # 控制显示节奏
            except Exception as e:
                self.status_ready.emit(f"❌ 处理图片异常: {os.path.basename(img_path)} - {str(e)}")
                continue
        self.status_ready.emit("✅ 所有图片处理完成")
        # 可选：处理完自动退出
        QTimer.singleShot(2000, QCoreApplication.quit)  # 2秒后退出程序

    def process_single_frame(self, frame, HEAD_CLS_ID, VEHICLE_CLS_IDS, CLS_CLASSES, NOHEL_IDS, NOHEL_LABELS,
                             prev_time):
        current_time = time.time()
        nohel_crops = []
        heads = []
        vehicles = []

        try:
            results = self.model_detect(
                source=frame,
                imgsz=IMG_SIZE_DETECT,
                classes=[HEAD_CLS_ID] + VEHICLE_CLS_IDS,
                conf=CONF_THRESHOLD,
                device='cpu',
                verbose=False,
                max_det=30
            )
        except Exception as e:
            self.status_ready.emit(f"❌ 检测异常: {e}")
            return

        display_frame = frame.copy()
        for result in results:
            boxes = result.boxes.cpu().numpy()
            for box in boxes:
                cls_id = int(box.cls[0])
                x1, y1, x2, y2 = map(int, box.xyxy[0])
                if cls_id == HEAD_CLS_ID:
                    heads.append((x1, y1, x2, y2))
                elif cls_id in VEHICLE_CLS_IDS:
                    vehicles.append((x1, y1, x2, y2))
            display_frame = result.plot()

        crops_to_classify = []
        head_boxes = []

        for (hx1, hy1, hx2, hy2) in heads:
            is_associated = any(
                self.is_head_on_vehicle((hx1, hy1, hx2, hy2), (vx1, vy1, vx2, vy2))
                for (vx1, vy1, vx2, vy2) in vehicles
            )
            if not is_associated:
                continue
            crop = frame[hy1:hy2, hx1:hx2]
            if crop.size == 0:
                continue
            crop_resized = cv2.resize(crop, (IMG_SIZE_CLS, IMG_SIZE_CLS))
            crops_to_classify.append(crop_resized)
            head_boxes.append((hx1, hy1, hx2, hy2))

        if crops_to_classify:
            try:
                cls_results = self.model_cls(
                    source=crops_to_classify,
                    imgsz=IMG_SIZE_CLS,
                    conf=CLS_CONF_THRESHOLD,
                    device='cpu',
                    verbose=False
                )
                for i, r in enumerate(cls_results):
                    top1_idx = r.probs.top1
                    cls_name = CLS_CLASSES[top1_idx].lower()
                    is_nohel = (cls_name in [lbl.lower() for lbl in NOHEL_LABELS]) or (top1_idx in NOHEL_IDS)
                    hx1, hy1, hx2, hy2 = head_boxes[i]
                    label = "No Helmet!" if is_nohel else "Helmet"
                    color = (0, 0, 255) if is_nohel else (0, 255, 0)
                    cv2.putText(display_frame, label, (hx1, hy1 - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.6, color, 2)
                    if is_nohel:
                        nohel_crops.append(frame[hy1:hy2, hx1:hx2])
            except Exception as e:
                self.status_ready.emit(f"❌ 分类异常: {e}")

        # 保存未戴头盔图像
        if nohel_crops and (current_time - self.last_save_time) > SAVE_INTERVAL:
            frame_id = self.generate_frame_id()
            self.save_images_async(frame, display_frame, nohel_crops, frame_id)
            self.last_save_time = current_time
            self.save_feedback = current_time

        # 显示“Saved!”提示
        if current_time - self.save_feedback < 0.8:
            cv2.putText(display_frame, "Saved!", (50, 50), cv2.FONT_HERSHEY_SIMPLEX, 1.5, (0, 255, 0), 3)

        # 计算 FPS
        fps = 1.0 / (current_time - prev_time + 1e-6)
        cv2.putText(display_frame, f"FPS: {fps:.1f}", (10, 30), cv2.FONT_HERSHEY_SIMPLEX, 0.8, (255, 255, 0), 2)

        self.display_frame(display_frame)

    def display_frame(self, frame):
        rgb_image = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
        h, w, ch = rgb_image.shape
        bytes_per_line = ch * w
        qt_image = QImage(rgb_image.data, w, h, bytes_per_line, QImage.Format_RGB888)
        pixmap = QPixmap.fromImage(qt_image)
        self.frame_ready.emit(pixmap)

    def stop(self):
        self.running = False


# ------------------------ 主窗口 ------------------------
class MainWindow(QMainWindow):
    def __init__(self):
        super().__init__()
        self.setWindowTitle("电动车未戴头盔检测系统（多分辨率适配版）")

        # 🖥️ 根据屏幕尺寸设置初始窗口大小
        screen = QApplication.primaryScreen()
        screen_size = screen.size()
        width, height = screen_size.width(), screen_size.height()

        # 设置初始大小（占屏幕 80% 宽，70% 高）
        init_width = int(width * 0.8)
        init_height = int(height * 0.7)
        self.resize(init_width, init_height)

        # 设置最小大小，防止缩得太小
        self.setMinimumSize(800, 600)

        # ------------------------ UI 布局 ------------------------
        central_widget = QWidget()
        main_layout = QVBoxLayout(central_widget)
        main_layout.setContentsMargins(10, 10, 10, 10)
        main_layout.setSpacing(10)

        # 🔹 视频显示区域（自动伸缩）
        self.video_label = QLabel("等待视频流...", self)
        self.video_label.setAlignment(Qt.AlignCenter)
        self.video_label.setStyleSheet("background-color: #000000; color: #AAAAAA; border: 1px solid #333333;")
        self.video_label.setSizePolicy(QSizePolicy.Expanding, QSizePolicy.Expanding)
        self.video_label.setScaledContents(False)  # 手动控制缩放
        self.video_label.setMinimumSize(640, 360)  # 至少 640x360

        # 🔹 状态栏和按钮水平布局
        bottom_layout = QHBoxLayout()
        self.status_label = QLabel("状态：初始化中...")
        self.status_label.setStyleSheet("font-size: 13px; color: #0066cc; padding: 4px;")
        self.status_label.setSizePolicy(QSizePolicy.Expanding, QSizePolicy.Fixed)

        self.stop_btn = QPushButton("停止")
        self.stop_btn.setFixedWidth(120)
        self.stop_btn.setStyleSheet("font-size: 14px; padding: 6px;")
        self.stop_btn.clicked.connect(self.stop_detection)

        bottom_layout.addWidget(self.status_label)
        bottom_layout.addSpacing(20)
        bottom_layout.addWidget(self.stop_btn)

        # 添加到主布局
        main_layout.addWidget(self.video_label)
        main_layout.addLayout(bottom_layout)

        self.setCentralWidget(central_widget)

        # ------------------------ 启动检测线程 ------------------------
        self.detection_thread = DetectionThread()
        self.detection_thread.frame_ready.connect(self.update_frame)
        self.detection_thread.status_ready.connect(self.update_status)
        self.detection_thread.start()

    def update_frame(self, pixmap):
        """根据当前 video_label 大小动态缩放图像，保持 16:9 比例"""
        if pixmap.isNull():
            return

        # 获取当前 label 可用空间
        label_w = self.video_label.width()
        label_h = self.video_label.height()

        if label_w <= 0 or label_h <= 0:
            return

        # 保持 16:9 比例缩放
        scaled_pixmap = pixmap.scaled(
            label_w, label_h,
            Qt.KeepAspectRatio,
            Qt.SmoothTransformation
        )

        # 居中显示
        self.video_label.setPixmap(scaled_pixmap)
        self.video_label.setAlignment(Qt.AlignCenter)

    def resizeEvent(self, event):
        """窗口大小改变时，重新缩放图像"""
        super().resizeEvent(event)
        # 如果已有图像，可在此重新触发 update_frame，但这里依赖实时流更高效

    def update_status(self, msg):
        self.status_label.setText(f"状态：{msg}")

    def stop_detection(self):
        self.detection_thread.stop()
        self.update_status("🛑 正在停止...")
        QTimer.singleShot(1500, QCoreApplication.quit)

    def closeEvent(self, event):
        self.detection_thread.stop()
        event.accept()


# ------------------------ 启动应用 ------------------------
if __name__ == "__main__":
    app = QApplication(sys.argv)
    window = MainWindow()

    # 可选：最大化启动
    # window.showMaximized()

    # 默认窗口化启动（适配屏幕）
    window.show()
    sys.exit(app.exec_())