import sys
import cv2
import time
from PyQt5.QtWidgets import (
    QApplication, QMainWindow, QLabel, QPushButton,
    QVBoxLayout, QHBoxLayout, QWidget, QFrame, QGridLayout
)
from PyQt5.QtGui import QPixmap, QImage, QFont
from PyQt5.QtCore import Qt, QThread, pyqtSignal
from ultralytics import YOLO
from multiprocessing import Queue
from pathlib import Path
import json
from threading import Thread


def load_config():
    """加载配置文件"""
    if getattr(sys, 'frozen', False):
        base_dir = Path(sys._MEIPASS)
    else:
        base_dir = Path(__file__).parent

    config_path = base_dir / "config.json"
    try:
        with open(config_path, "r", encoding="utf-8") as f:
            config = json.load(f)
    except FileNotFoundError:
        sys.exit(f"错误：配置文件 {config_path} 不存在")
    except json.JSONDecodeError:
        sys.exit(f"错误：配置文件格式不正确")

    model_path = Path(config["model_path"])
    if not model_path.exists():
        sys.exit(f"错误：模型文件 {model_path} 不存在")

    return {
        "video_source": config["video_source"],
        "model_path": str(model_path),
        "save_dirs": config["save_dirs"],
        "alert_classes": config.get("alert_classes", ["nohel"]),
    }


# 保存未框选的原始帧（队列+线程）
class RawFrameSaver:
    def __init__(self, save_dir="raw_frames"):
        config = load_config()
        self.save_dir = Path(config["save_dirs"]["raw_frames"])
        self.save_dir.mkdir(exist_ok=True)
        self.frame_queue = Queue(maxsize=30)
        self.thread = Thread(target=self._save_worker, daemon=True)
        self.thread.start()

    def add_frame(self, frame, timestamp_str):
        if self.frame_queue.qsize() < 30:
            self.frame_queue.put((frame.copy(), timestamp_str))

    def _save_worker(self):
        while True:
            if self.frame_queue.empty():
                time.sleep(0.1)
                continue
            frame, timestamp_str = self.frame_queue.get()
            filename = f"{timestamp_str}.jpg"
            cv2.imwrite(str(self.save_dir / filename), frame)


# 保存带框的完整帧
class FrameSaver:
    def __init__(self, save_dir="full_frames"):
        config = load_config()
        self.save_dir = Path(config["save_dirs"]["full_frames"])
        self.save_dir.mkdir(exist_ok=True)

    def save_full_frame(self, frame, timestamp_str):
        filename = f"{timestamp_str}.jpg"
        save_path = self.save_dir / filename
        cv2.imwrite(str(save_path), frame)


# 保存裁剪的目标图像
class ImageSaver:
    def __init__(self, save_dir="detected_objects"):
        config = load_config()
        self.save_dir = Path(config["save_dirs"]["detected_objects"])
        self.save_dir.mkdir(exist_ok=True)
        self.min_interval = 0.5
        self.last_save_time = {}

    def save_cropped_image(self, frame, box, class_name, conf, track_id):
        current_time = time.time()
        if track_id in self.last_save_time and current_time - self.last_save_time[track_id] < self.min_interval:
            return
        self.last_save_time[track_id] = current_time

        x1, y1, x2, y2 = map(int, box)
        cropped = frame[y1:y2, x1:x2]
        if cropped.size == 0:
            return

        timestamp = time.strftime("%Y%m%d_%H%M%S")
        filename = f"{timestamp}_{class_name}_{conf:.2f}_id{track_id}.jpg"
        save_path = self.save_dir / filename

        try:
            ret, buffer = cv2.imencode('.jpg', cropped, [cv2.IMWRITE_JPEG_QUALITY, 90])
            if ret:
                buffer.tofile(str(save_path))
                print(f"✅ 保存裁剪图: {save_path.name}")
        except Exception as e:
            print(f"❌ 保存失败: {str(e)}")


# 推理处理器（带追踪、去重、视频保存）
class InferenceProcessor:
    def __init__(self, model_path, output_video_path="output/detected_video.mp4"):
        self.model = YOLO(model_path)
        self.saver = ImageSaver()
        self.frame_saver = FrameSaver()
        self.raw_saver = RawFrameSaver()
        self.class_colors = {
            "hel": (0, 255, 0),
            "nohel": (0, 0, 255),
            "wheel": (255, 0, 0),
        }
        config = load_config()
        self.alert_classes = config["alert_classes"]
        self.alerted_tracks = set()
        self.alert_count = 0  # ✅ 报警总数

        # 视频保存
        self.output_video_path = Path(output_video_path)
        self.output_video_path.parent.mkdir(exist_ok=True)
        self.video_writer = None
        self.fourcc = cv2.VideoWriter_fourcc(*'mp4v')

    def process_frame(self, frame):
        raw_frame = frame.copy()
        detected_any_alert = False
        timestamp_ms = int(time.time() * 1000)

        results = self.model.track(
            frame,
            imgsz=640,
            conf=0.5,
            iou=0.45,
            persist=True,
            device='cpu',
            tracker="bytetrack.yaml"
        )

        for result in results:
            if result.boxes is None or result.boxes.id is None:
                continue

            boxes = result.boxes.xyxy.cpu().numpy()
            track_ids = result.boxes.id.cpu().numpy().astype(int)
            class_ids = result.boxes.cls.cpu().numpy().astype(int)
            confidences = result.boxes.conf.cpu().numpy()

            for box, track_id, cls_id, conf in zip(boxes, track_ids, class_ids, confidences):
                class_name = result.names[cls_id]
                x1, y1, x2, y2 = map(int, box)
                color = self.class_colors.get(class_name, (255, 255, 255))

                # 绘制框
                cv2.rectangle(frame, (x1, y1), (x2, y2), color, 3)
                label = f"{class_name} ID:{track_id} {conf:.2f}"
                cv2.putText(frame, label, (x1, max(y1 - 10, 10)),
                            cv2.FONT_HERSHEY_DUPLEX, 0.7, (255, 255, 255), 2, cv2.LINE_AA)

                # 报警逻辑（去重）
                if class_name in self.alert_classes:
                    obj_key = (class_name, track_id)
                    if obj_key not in self.alerted_tracks:
                        print(f"🚨 报警：{class_name} ID:{track_id} (置信度: {conf:.2f})")
                        self.saver.save_cropped_image(frame, [x1, y1, x2, y2], class_name, conf, track_id)
                        self.raw_saver.add_frame(raw_frame, f"alert_{timestamp_ms}_id{track_id}")
                        self.frame_saver.save_full_frame(frame.copy(), f"alert_{timestamp_ms}_id{track_id}")
                        self.alerted_tracks.add(obj_key)
                        self.alert_count += 1  # ✅ 增加计数
                        detected_any_alert = True

        # 视频保存
        h, w = frame.shape[:2]
        if self.video_writer is None:
            self.video_writer = cv2.VideoWriter(
                str(self.output_video_path), self.fourcc, 30, (w, h)
            )
            print(f"📹 开始录制检测视频: {self.output_video_path}")
        self.video_writer.write(frame)

        return frame

    def release(self):
        if self.video_writer:
            self.video_writer.release()
            print("✅ 检测视频已保存")


# 视频处理线程
class VideoThread(QThread):
    frame_ready = pyqtSignal(object)           # (rgb_image, w, h)
    status_update = pyqtSignal(str)            # 状态文本
    stats_update = pyqtSignal(int, float)      # ✅ 报警数, FPS

    def __init__(self, processor, video_source):
        super().__init__()
        self.processor = processor
        self.video_source = video_source
        self.running = True

    def run(self):
        cap = cv2.VideoCapture(self.video_source)
        if not cap.isOpened():
            self.status_update.emit("❌ 无法打开视频源")
            return

        # FPS 计算
        frame_count = 0
        start_time = time.time()

        while self.running:
            ret, frame = cap.read()
            if not ret:
                self.status_update.emit("⏹️ 视频结束")
                time.sleep(1)
                continue

            # 处理帧
            processed = self.processor.process_frame(frame)
            rgb = cv2.cvtColor(processed, cv2.COLOR_BGR2RGB)
            h, w, ch = rgb.shape

            # 发送图像
            self.frame_ready.emit((rgb, w, h))

            # 更新统计（每秒一次）
            frame_count += 1
            elapsed = time.time() - start_time
            if elapsed >= 1.0:
                fps = frame_count / elapsed
                alert_count = self.processor.alert_count
                self.stats_update.emit(alert_count, fps)
                frame_count = 0
                start_time = time.time()

        cap.release()

    def stop(self):
        self.running = False
        self.wait()


# 主窗口
class MainWindow(QMainWindow):
    def __init__(self):
        super().__init__()
        self.setWindowTitle("🛡️ YOLOv8 安全检测系统")
        self.setGeometry(100, 100, 1600, 900)
        self.setStyleSheet("background-color: #f0f0f0; font-family: Arial, sans-serif;")

        # 初始化
        config = load_config()
        for dir_path in config["save_dirs"].values():
            Path(dir_path).mkdir(exist_ok=True)

        self.processor = InferenceProcessor(
            model_path=config["model_path"],
            output_video_path="output/detected_video.mp4"
        )

        self.video_thread = None
        self.setup_ui()
        self.center_window()

    def setup_ui(self):
        central_widget = QWidget()
        self.setCentralWidget(central_widget)

        layout = QHBoxLayout(central_widget)
        layout.setContentsMargins(15, 15, 15, 15)
        layout.setSpacing(15)

        # 左侧：视频显示区
        self.video_label = QLabel("等待视频输入...")
        self.video_label.setAlignment(Qt.AlignCenter)
        self.video_label.setStyleSheet("background-color: #000; color: #fff; font-size: 18px;")
        self.video_label.setMinimumSize(1280, 720)

        # 右侧：控制面板
        control_frame = QFrame()
        control_frame.setFrameShape(QFrame.StyledPanel)
        control_frame.setStyleSheet("""
            background-color: white;
            border-radius: 12px;
            padding: 15px;
            border: 1px solid #ddd;
        """)
        control_layout = QVBoxLayout(control_frame)
        control_layout.setSpacing(20)

        # 标题
        title = QLabel("控制面板")
        title.setFont(QFont("Arial", 16, QFont.Bold))
        title.setAlignment(Qt.AlignCenter)
        control_layout.addWidget(title)

        # 按钮
        self.start_btn = QPushButton("▶️ 开始检测")
        self.start_btn.setFont(QFont("Arial", 12, QFont.Bold))
        self.start_btn.setStyleSheet(self.button_style("#4CAF50"))
        self.start_btn.clicked.connect(self.start_detection)

        self.pause_btn = QPushButton("⏸️ 暂停")
        self.pause_btn.setFont(QFont("Arial", 12))
        self.pause_btn.setStyleSheet(self.button_style("#FF9800"))
        self.pause_btn.clicked.connect(self.pause_detection)
        self.pause_btn.setEnabled(False)

        self.exit_btn = QPushButton("⏹️ 退出")
        self.exit_btn.setFont(QFont("Arial", 12))
        self.exit_btn.setStyleSheet(self.button_style("#F44336"))
        self.exit_btn.clicked.connect(self.close)

        for btn in [self.start_btn, self.pause_btn, self.exit_btn]:
            btn.setFixedHeight(50)
            control_layout.addWidget(btn)

        # 状态信息
        self.status_label = QLabel("状态：就绪")
        self.alert_label = QLabel("报警数：0")
        self.fps_label = QLabel("FPS：0")

        for label in [self.status_label, self.alert_label, self.fps_label]:
            label.setFont(QFont("Arial", 11))
            label.setStyleSheet("padding: 5px;")
            control_layout.addWidget(label)

        control_layout.addStretch()

        # 布局
        layout.addWidget(self.video_label, 3)
        layout.addWidget(control_frame, 1)

    def button_style(self, color):
        return f"padding: 12px; background-color: {color}; color: white; border: none; border-radius: 8px;"

    def center_window(self):
        qr = self.frameGeometry()
        cp = QApplication.desktop().availableGeometry().center()
        qr.moveCenter(cp)
        self.move(qr.topLeft())

    def start_detection(self):
        if self.video_thread and self.video_thread.isRunning():
            return

        config = load_config()
        self.video_thread = VideoThread(self.processor, config["video_source"])
        self.video_thread.frame_ready.connect(self.update_frame)
        self.video_thread.status_update.connect(self.update_status)
        self.video_thread.stats_update.connect(self.update_stats)  # ✅ 连接统计信号
        self.video_thread.start()

        self.start_btn.setEnabled(False)
        self.pause_btn.setEnabled(True)
        self.status_label.setText("状态：检测中...")

    def pause_detection(self):
        if self.video_thread:
            self.video_thread.stop()
            self.video_thread = None
        self.start_btn.setEnabled(True)
        self.pause_btn.setEnabled(False)
        self.status_label.setText("状态：已暂停")

    def update_frame(self, data):
        rgb, w, h = data
        qimg = QImage(rgb.data, w, h, w * 3, QImage.Format_RGB888)
        pixmap = QPixmap.fromImage(qimg)
        self.video_label.setPixmap(pixmap.scaled(
            self.video_label.size(),
            Qt.KeepAspectRatio,
            Qt.SmoothTransformation
        ))

    def update_stats(self, alert_count, fps):
        """更新报警数和 FPS"""
        self.alert_label.setText(f"报警数：{alert_count}")
        self.fps_label.setText(f"FPS：{fps:.1f}")

    def update_status(self, text):
        self.status_label.setText(f"状态：{text}")

    def closeEvent(self, event):
        if self.video_thread and self.video_thread.isRunning():
            self.video_thread.stop()
        self.processor.release()
        event.accept()


if __name__ == "__main__":
    app = QApplication(sys.argv)
    window = MainWindow()
    window.show()
    sys.exit(app.exec_())