#MODEL_DETECT_PATH = r"D:\CodeCNN\yolov8-study\runs\detect\train28\weights\best.pt"
#MODEL_CLS_PATH = r"D:\CodeCNN\yolov8-study\runs\classifier\train2\weights\best-c2.pt"
#MODEL_POSE_PATH="weight/yolov8n-pose.pt"

import sys
import cv2
import os
import time
import numpy as np
from PyQt5.QtWidgets import (
    QApplication, QMainWindow, QLabel, QVBoxLayout, QWidget,
    QPushButton, QHBoxLayout, QSizePolicy
)
from PyQt5.QtGui import QImage, QPixmap
from PyQt5.QtCore import QThread, pyqtSignal, Qt, QTimer, QCoreApplication
from ultralytics import YOLO
from concurrent.futures import ThreadPoolExecutor
import torch
import logging
from logging.handlers import TimedRotatingFileHandler
from threading import Lock

# ------------------------ 关键修复 ------------------------
cv2.ocl.setUseOpenCL(False)
cv2.setNumThreads(1)
torch.set_num_threads(1)
torch.set_num_interop_threads(1)

# ------------------------ 安全裁剪函数 ------------------------
def safe_crop(frame, x1, y1, x2, y2):
    """
    安全裁剪图像，防止越界、负值、NaN、inf
    """
    if frame is None or not hasattr(frame, 'shape') or frame.size == 0:
        return None

    h, w = frame.shape[:2]

    # 检查是否 NaN 或 inf
    coords = [x1, y1, x2, y2]
    if any(np.isnan(coords)) or any(np.isinf(coords)):
        return None

    # 修正坐标
    x1 = np.clip(int(x1), 0, w)
    y1 = np.clip(int(y1), 0, h)
    x2 = np.clip(int(x2), 0, w)
    y2 = np.clip(int(y2), 0, h)

    if x1 >= x2 or y1 >= y2:
        return None

    try:
        crop = frame[y1:y2, x1:x2].copy()
        return crop if crop.size > 0 else None
    except:
        return None


# ------------------------ 配置参数 ------------------------
RTSP_URL = "rtsp://admin:wkkj1234@192.168.200.64:554/Streaming/Channels/102"
gst_str = (
    f"{RTSP_URL} "
    f"-rtsp_transport tcp "
    f"-stimeout 5000000 "
    f"-fflags nobuffer+flush_packets "
    f"-flags low_delay "
    f"-probesize 32 "
    f"-analyzeduration 100000"
)
OUTPUT_DIR = "output"
MODEL_DETECT_PATH = r"D:\CodeCNN\yolov8-study\runs\detect\train28\weights\best28.pt"
MODEL_CLS_PATH = r"D:\CodeCNN\yolov8-study\runs\classifier\train2\weights\best-c2.pt"
MODEL_POSE_PATH="weight/yolov8n-pose.pt"

# ------------------------ 输入源配置 ------------------------
# 设置为图片文件夹路径（如 "images/"）来处理本地图片
# 设置为 None 或注释掉，则使用 RTSP 视频流
# INPUT_SOURCE = None  # 默认使用 RTSP 流
# 示例：如果你想处理本地图片文件夹，取消注释下面这行：
INPUT_SOURCE = r"D:\TestPic\pic"  # 改成你的图片文件夹路径

# 推理参数
CONF_THRESHOLD = 0.5
CLS_CONF_THRESHOLD = 0.5
SAVE_INTERVAL = 2
SKIP_FRAMES = 5
IMG_SIZE_DETECT = 320
IMG_SIZE_CLS = 128
MAX_SAVE_TASKS = 3

# 创建目录
os.makedirs(f"{OUTPUT_DIR}/original", exist_ok=True)
os.makedirs(f"{OUTPUT_DIR}/detected", exist_ok=True)
os.makedirs(f"{OUTPUT_DIR}/cropped", exist_ok=True)

# 异步线程池
executor = ThreadPoolExecutor(max_workers=3)
save_lock = Lock()

# ------------------------ 日志配置 ------------------------
LOG_DIR = os.path.join(OUTPUT_DIR, "logs")
os.makedirs(LOG_DIR, exist_ok=True)

logger = logging.getLogger("HelmetDetectionSystem")
logger.setLevel(logging.INFO)

if not logger.handlers:
    formatter = logging.Formatter(
        '%(asctime)s | %(levelname)-8s | %(message)s',
        datefmt='%Y-%m-%d %H:%M:%S'
    )

    file_handler = TimedRotatingFileHandler(
        os.path.join(LOG_DIR, "log"),
        when="midnight",
        interval=1,
        backupCount=7,
        encoding='utf-8'
    )
    file_handler.suffix = "%Y-%m-%d.txt"
    file_handler.setFormatter(formatter)
    file_handler.setLevel(logging.INFO)

    console_handler = logging.StreamHandler(sys.stdout)
    console_handler.setFormatter(formatter)
    console_handler.setLevel(logging.INFO)

    logger.addHandler(file_handler)
    logger.addHandler(console_handler)


# ------------------------ 检测线程 ------------------------
class DetectionThread(QThread):
    frame_ready = pyqtSignal(object)
    status_ready = pyqtSignal(str)

    def __init__(self):
        super().__init__()
        self.running = True
        self.last_save_time = 0
        self.save_feedback = 0
        self.frame_count = 0

    def generate_frame_id(self):
        t = time.strftime("%Y%m%d_%H%M%S")
        ms = int((time.time() % 1) * 1000)
        return f"{t}_{ms:03d}"

    def log_and_emit(self, message, level="info"):
        self.status_ready.emit(message)
        msg_clean = message.strip("✅❌⚠️🟢⏹️🔄🛑")
        if level == "info":
            logger.info(msg_clean)
        elif level == "warning":
            logger.warning(msg_clean)
        elif level == "error":
            logger.error(msg_clean)
        elif level == "debug":
            logger.debug(msg_clean)

    def run(self):
        self.log_and_emit("🔄 加载模型...", "info")
        torch.cuda.empty_cache()

        try:
            self.model_detect = YOLO(MODEL_DETECT_PATH)
            self.model_pose = YOLO(MODEL_POSE_PATH)
            self.model_cls = YOLO(MODEL_CLS_PATH)
            self.log_and_emit("✅ 模型加载完成", "info")
        except Exception as e:
            error_msg = f"❌ 模型加载失败: {e}"
            self.log_and_emit(error_msg, "error")
            logger.exception("模型加载失败")
            return

        try:
            DETECT_CLASSES = self.model_detect.model.names
            VEHICLE_CLS_IDS = [k for k, v in DETECT_CLASSES.items() if v.lower() == 'wheel']
            if not VEHICLE_CLS_IDS:
                self.log_and_emit("⚠️ 未找到 wheel 类别", "warning")
                return

            CLS_CLASSES = self.model_cls.model.names
            NOHEL_LABELS = ['nohel']
            NOHEL_IDS = [i for i, name in CLS_CLASSES.items() if name.lower() in NOHEL_LABELS]
        except Exception as e:
            self.log_and_emit(f"❌ 类别解析失败: {e}", "error")
            logger.exception("类别解析失败")
            return

        # ========== 判断输入源类型 ==========
        if INPUT_SOURCE and os.path.isdir(INPUT_SOURCE):
            self.log_and_emit("📁 开始处理图片文件夹...", "info")
            image_extensions = {'.jpg', '.jpeg', '.png', '.bmp', '.webp'}
            image_files = []
            for root, _, files in os.walk(INPUT_SOURCE):
                for f in files:
                    ext = os.path.splitext(f.lower())[1]
                    if ext in image_extensions:
                        image_files.append(os.path.join(root, f))
            if not image_files:
                self.log_and_emit("❌ 文件夹中未找到图片", "error")
                return
            self.process_images(image_files, DETECT_CLASSES, VEHICLE_CLS_IDS, CLS_CLASSES, NOHEL_IDS, NOHEL_LABELS)
        else:
            self.log_and_emit("🟢 开始处理视频流...", "info")
            self.process_video_stream(DETECT_CLASSES, VEHICLE_CLS_IDS, CLS_CLASSES, NOHEL_IDS, NOHEL_LABELS)

    def process_video_stream(self, DETECT_CLASSES, VEHICLE_CLS_IDS, CLS_CLASSES, NOHEL_IDS, NOHEL_LABELS):
        cap = cv2.VideoCapture(gst_str, cv2.CAP_FFMPEG)
        cap.set(cv2.CAP_PROP_BUFFERSIZE, 1)

        if not cap.isOpened():
            self.log_and_emit("❌ 无法打开 RTSP 流", "error")
            return

        prev_time = time.time()
        while self.running:
            ret, frame = cap.read()
            if not ret:
                self.log_and_emit("⚠️ 视频流中断，重连中...", "warning")
                time.sleep(2)
                cap.release()
                cap = cv2.VideoCapture(gst_str, cv2.CAP_FFMPEG)
                cap.set(cv2.CAP_PROP_BUFFERSIZE, 1)
                continue

            self.process_single_frame(frame, DETECT_CLASSES, VEHICLE_CLS_IDS, CLS_CLASSES, NOHEL_IDS, NOHEL_LABELS,
                                      prev_time)
            prev_time = time.time()
            time.sleep(0.01)

        cap.release()

    def process_images(self, image_files, DETECT_CLASSES, VEHICLE_CLS_IDS, CLS_CLASSES, NOHEL_IDS, NOHEL_LABELS):
        prev_time = time.time()
        for img_path in image_files:
            if not self.running:
                break
            try:
                frame = cv2.imread(img_path)
                if frame is None:
                    self.log_and_emit(f"⚠️ 无法读取图片: {img_path}", "warning")
                    continue
                self.log_and_emit(f"🖼️ 处理图片: {os.path.basename(img_path)}", "info")
                self.process_single_frame(
                    frame, DETECT_CLASSES, VEHICLE_CLS_IDS,
                    CLS_CLASSES, NOHEL_IDS, NOHEL_LABELS, prev_time
                )
                prev_time = time.time()
                # 可选：控制图片处理速度
                time.sleep(0.5)
            except Exception as e:
                logger.error(f"处理图片 {img_path} 异常: {e}")
                continue
        self.log_and_emit("⏹️ 所有图片处理完成", "info")
        # 可自动退出
        QTimer.singleShot(2000, QCoreApplication.quit)  # 2秒后退出

    def process_single_frame(self, frame, DETECT_CLASSES, VEHICLE_CLS_IDS, CLS_CLASSES, NOHEL_IDS, NOHEL_LABELS, prev_time):
        current_time = time.time()
        nohel_crops = []
        display_frame = frame.copy()

        # Step 1: 检测 wheel
        try:
            results = self.model_detect(
                source=frame,
                imgsz=IMG_SIZE_DETECT,
                classes=VEHICLE_CLS_IDS,
                conf=CONF_THRESHOLD,
                device='cpu',
                verbose=False,
                max_det=30
            )
        except Exception as e:
            logger.error(f"检测模型推理异常: {e}")
            return

        for result in results:
            try:
                boxes = result.boxes.cpu().numpy()
            except:
                continue

            for box in boxes:
                try:
                    x1, y1, x2, y2 = map(float, box.xyxy[0])
                    cls_id = int(box.cls[0])

                    # 安全裁剪 vehicle 区域
                    crop = safe_crop(frame, x1, y1, x2, y2)
                    if crop is None:
                        continue

                    # 绘制检测框
                    cv2.rectangle(display_frame, (int(x1), int(y1)), (int(x2), int(y2)), (255, 0, 0), 2)
                    cv2.putText(display_frame, "Vehicle", (int(x1), int(y1) - 10),
                                cv2.FONT_HERSHEY_SIMPLEX, 0.6, (255, 0, 0), 2)

                    # Pose 推理
                    try:
                        pose_results = self.model_pose(
                            source=crop,
                            imgsz=256,
                            conf=0.5,
                            device='cpu',
                            verbose=False,
                            max_det=1
                        )
                    except Exception as e:
                        logger.warning(f"Pose 推理异常: {e}")
                        continue

                    for r in pose_results:
                        if r.keypoints is None:
                            continue
                        try:
                            kpts = r.keypoints.xy[0].cpu().numpy()
                            confs = r.keypoints.conf[0].cpu().numpy()
                        except:
                            continue

                        if confs[0] > 0.5:  # nose 存在
                            nose_x, nose_y = kpts[0]
                            nose_x_global = x1 + nose_x
                            nose_y_global = y1 + nose_y

                            head_size = 80
                            hx1 = nose_x_global - head_size // 2
                            hy1 = nose_y_global - head_size // 2
                            hx2 = nose_x_global + head_size // 2
                            hy2 = nose_y_global + head_size // 2

                            head_crop = safe_crop(frame, hx1, hy1, hx2, hy2)
                            if head_crop is None:
                                continue

                            head_resized = cv2.resize(head_crop, (IMG_SIZE_CLS, IMG_SIZE_CLS))

                            try:
                                cls_results = self.model_cls(
                                    source=[head_resized],
                                    imgsz=IMG_SIZE_CLS,
                                    conf=CLS_CONF_THRESHOLD,
                                    device='cpu',
                                    verbose=False
                                )
                                for cr in cls_results:
                                    top1_idx = cr.probs.top1
                                    cls_name = CLS_CLASSES[top1_idx].lower()
                                    is_nohel = (cls_name in [lbl.lower() for lbl in NOHEL_LABELS]) or (top1_idx in NOHEL_IDS)

                                    label = "No Helmet!" if is_nohel else "Helmet"
                                    color = (0, 0, 255) if is_nohel else (0, 255, 0)
                                    cv2.putText(display_frame, label, (int(nose_x_global), int(nose_y_global) - 10),
                                                cv2.FONT_HERSHEY_SIMPLEX, 0.6, color, 2)
                                    cv2.circle(display_frame, (int(nose_x_global), int(nose_y_global)), 5, color, -1)

                                    if is_nohel:
                                        nohel_crops.append(head_crop.copy())
                            except Exception as e:
                                logger.warning(f"分类异常: {e}")

                except Exception as e:
                    logger.debug(f"单个检测框处理异常: {e}")
                    continue

        # 保存逻辑（仅当有未戴头盔且间隔足够）
        if nohel_crops and (current_time - self.last_save_time) > SAVE_INTERVAL:
            frame_copy = frame.copy()
            annotated_frame_copy = display_frame.copy()
            nohel_crops_copy = [crop.copy() for crop in nohel_crops]
            frame_id = self.generate_frame_id()
            self.save_images_async(frame_copy, annotated_frame_copy, nohel_crops_copy, frame_id)
            self.last_save_time = current_time
            self.save_feedback = current_time

        # 添加“Saved!”提示
        if current_time - self.save_feedback < 0.8:
            cv2.putText(display_frame, "Saved!", (50, 50), cv2.FONT_HERSHEY_SIMPLEX, 1.5, (0, 255, 0), 3)

        # 计算 FPS（图片模式下为模拟）
        fps = 1.0 / (current_time - prev_time + 1e-6)
        cv2.putText(display_frame, f"FPS: {fps:.1f}", (10, 30), cv2.FONT_HERSHEY_SIMPLEX, 0.8, (255, 255, 0), 2)

        # 发送到 UI 显示
        rgb_image = cv2.cvtColor(display_frame, cv2.COLOR_BGR2RGB)
        h, w, ch = rgb_image.shape
        bytes_per_line = ch * w
        qt_image = QImage(rgb_image.data, w, h, bytes_per_line, QImage.Format_RGB888)
        pixmap = QPixmap.fromImage(qt_image)
        self.frame_ready.emit(pixmap)

    def save_images_async(self, frame, annotated_frame, nohel_crops, frame_id):
        def _save():
            with save_lock:
                try:
                    cv2.imwrite(f"{OUTPUT_DIR}/original/frame_{frame_id}.jpg", frame)
                    cv2.imwrite(f"{OUTPUT_DIR}/detected/detected_{frame_id}.jpg", annotated_frame)
                    for idx, crop_img in enumerate(nohel_crops):
                        idx_str = f"{idx + 1:02d}"
                        cv2.imwrite(f"{OUTPUT_DIR}/cropped/cropped_{frame_id}_{idx_str}.jpg", crop_img)
                    self.log_and_emit(f"✅ 保存 {len(nohel_crops)} 人未戴头盔", "info")
                except Exception as e:
                    self.log_and_emit(f"❌ 保存失败: {e}", "error")
                    logger.exception("保存失败")

        current_tasks = len([t for t in executor._threads if t.is_alive()])
        if current_tasks < MAX_SAVE_TASKS:
            executor.submit(_save)
        else:
            self.log_and_emit("⚠️ 保存任务过多，跳过本次保存", "warning")


# ------------------------ 主窗口 ------------------------
class MainWindow(QMainWindow):
    def __init__(self):
        super().__init__()
        self.setWindowTitle("电动车未戴头盔检测系统（最终稳定版）")

        screen = QApplication.primaryScreen()
        screen_size = screen.size()
        width, height = screen_size.width(), screen_size.height()

        init_width = int(width * 0.8)
        init_height = int(height * 0.7)
        self.resize(init_width, init_height)
        self.setMinimumSize(800, 600)

        central_widget = QWidget()
        main_layout = QVBoxLayout(central_widget)
        main_layout.setContentsMargins(10, 10, 10, 10)
        main_layout.setSpacing(10)

        self.video_label = QLabel("等待视频流...", self)
        self.video_label.setAlignment(Qt.AlignCenter)
        self.video_label.setStyleSheet("background-color: #000000; color: #AAAAAA; border: 1px solid #333333;")
        self.video_label.setSizePolicy(QSizePolicy.Expanding, QSizePolicy.Expanding)
        self.video_label.setScaledContents(False)
        self.video_label.setMinimumSize(640, 360)

        bottom_layout = QHBoxLayout()
        self.status_label = QLabel("状态：初始化中...")
        self.status_label.setStyleSheet("font-size: 13px; color: #0066cc; padding: 4px;")
        self.status_label.setSizePolicy(QSizePolicy.Expanding, QSizePolicy.Fixed)

        self.stop_btn = QPushButton("停止")
        self.stop_btn.setFixedWidth(120)
        self.stop_btn.setStyleSheet("font-size: 14px; padding: 6px;")
        self.stop_btn.clicked.connect(self.stop_detection)

        bottom_layout.addWidget(self.status_label)
        bottom_layout.addSpacing(20)
        bottom_layout.addWidget(self.stop_btn)

        main_layout.addWidget(self.video_label)
        main_layout.addLayout(bottom_layout)
        self.setCentralWidget(central_widget)

        self.detection_thread = DetectionThread()
        self.detection_thread.frame_ready.connect(self.update_frame)
        self.detection_thread.status_ready.connect(self.update_status)
        self.detection_thread.start()

    def update_frame(self, pixmap):
        if pixmap.isNull():
            return
        label_w = self.video_label.width()
        label_h = self.video_label.height()
        if label_w <= 0 or label_h <= 0:
            return
        scaled_pixmap = pixmap.scaled(
            label_w, label_h,
            Qt.KeepAspectRatio,
            Qt.SmoothTransformation
        )
        self.video_label.setPixmap(scaled_pixmap)
        self.video_label.setAlignment(Qt.AlignCenter)

    def resizeEvent(self, event):
        super().resizeEvent(event)

    def update_status(self, msg):
        self.status_label.setText(f"状态：{msg}")

    def stop_detection(self):
        self.detection_thread.stop()
        self.update_status("🛑 正在停止...")
        logger.info("用户点击停止按钮")
        QTimer.singleShot(1500, QCoreApplication.quit)

    def closeEvent(self, event):
        self.detection_thread.stop()
        logger.info("应用窗口关闭")
        event.accept()


# ------------------------ 启动应用 ------------------------
if __name__ == "__main__":
    app = QApplication(sys.argv)
    window = MainWindow()
    window.show()
    sys.exit(app.exec_())