import sys
import cv2
import numpy as np
import time
from PyQt5.QtWidgets import QApplication, QMainWindow, QLabel, QVBoxLayout, QWidget
from PyQt5.QtGui import QImage, QPixmap
from PyQt5.QtCore import Qt, QThread, pyqtSignal, pyqtSlot
from ultralytics import YOLO
from multiprocessing import Queue
from pathlib import Path
import json
from threading import Thread


def load_config():
    """加载配置文件"""
    if getattr(sys, 'frozen', False):
        base_dir = Path(sys._MEIPASS)
    else:
        base_dir = Path(__file__).parent

    config_path = base_dir / "config-video.json"
    try:
        with open(config_path, "r", encoding="utf-8") as f:
            config = json.load(f)
    except FileNotFoundError:
        sys.exit(f"错误：配置文件 {config_path} 不存在")
    except json.JSONDecodeError:
        sys.exit(f"错误：配置文件格式不正确")

    sources = []
    for src in config["sources"]:
        if src["type"] == "rtsp":
            url = f"rtsp://{src['username']}:{src['password']}@{src['ip']}:554/Streaming/Channels/{src['channel']}"
            sources.append({"type": "rtsp", "url": url})
        elif src["type"] == "mp4":
            video_path = Path(src["path"])
            if not video_path.exists():
                print(f"警告：视频文件 {video_path} 不存在，跳过")
                continue
            sources.append({"type": "mp4", "url": str(video_path)})
        else:
            print(f"未知输入类型: {src['type']}")
            continue

    model_path = Path(config["model_path"])
    if not model_path.exists():
        sys.exit(f"错误：模型文件 {model_path} 不存在")

    return {
        "sources": sources,
        "model_path": str(model_path),
        "save_dirs": config["save_dirs"],
        "center_detection_ratio": config.get("center_detection_ratio", 0.4),
        "nested_classes": config.get("nested_classes", {"wheel": ["hel", "nohel"]})
    }


# 图像保存模块
class ImageSaver:
    def __init__(self):
        config = load_config()
        self.save_dir = Path(config["save_dirs"]["detected_objects"])
        self.save_dir.mkdir(exist_ok=True)
        self.frame_counter = 0
        self.last_save_time = time.time()
        self.min_interval = 0.5

    def save_cropped_image(self, frame, box, class_name, conf):
        current_time = time.time()
        if current_time - self.last_save_time < self.min_interval:
            return
        x1, y1, x2, y2 = map(int, box)
        cropped = frame[y1:y2, x1:x2]
        if cropped.size == 0:
            return
        timestamp = time.strftime("%Y%m%d_%H%M%S")
        filename = f"{timestamp}_{class_name}_{conf:.2f}_{self.frame_counter:04d}.jpg"
        save_path = self.save_dir / filename
        try:
            ret, buffer = cv2.imencode('.jpg', cropped, [cv2.IMWRITE_JPEG_QUALITY, 90])
            if ret:
                buffer.tofile(str(save_path))
                self.frame_counter += 1
                self.last_save_time = current_time
        except Exception as e:
            print(f"保存图像时发生错误：{str(e)}")


# 推理工作线程
class InferenceWorker(QThread):
    result_ready = pyqtSignal(np.ndarray)  # 发送带框帧

    def __init__(self, model_path, center_ratio, nested_classes):
        super().__init__()
        self.model = YOLO(model_path)
        self.center_ratio = center_ratio
        self.nested_classes_config = nested_classes
        self.saver = ImageSaver()
        self.frame_queue = Queue(maxsize=2)
        self.min_interval = 1.0
        self.last_save_time = 0
        self.running = True

    def run(self):
        while self.running:
            try:
                frame = self.frame_queue.get(timeout=1)
            except:
                continue

            results = self.model(frame, imgsz=320, conf=0.5, device='cpu')  # 可改为 '0' 使用 GPU
            annotated_frame = self.process_results(frame, results)
            self.result_ready.emit(annotated_frame)

            time.sleep(0.01)  # 简单控制

    def process_results(self, frame, results):
        height, width, _ = frame.shape
        base_height = 720.0
        font_scale = max(0.6, height / base_height * 0.8)
        font_thickness = max(1, int(font_scale * 1.5))

        center_x1 = int(width * (0.5 - self.center_ratio / 2))
        center_y1 = int(height * (0.5 - self.center_ratio / 2))
        center_x2 = int(width * (0.5 + self.center_ratio / 2))
        center_y2 = int(height * (0.5 + self.center_ratio / 2))

        helmet_detected = False
        cv2.rectangle(frame, (center_x1, center_y1), (center_x2, center_y2), (0, 255, 255), 2)

        twowheel_boxes = []
        helmet_classes = self.nested_classes_config.get("wheel", [])

        for result in results:
            boxes = result.boxes.xyxy.cpu().numpy()
            confidences = result.boxes.conf.cpu().numpy()
            class_ids = result.boxes.cls.cpu().numpy().astype(int)

            for i, (xyxy, conf, cls_id) in enumerate(zip(boxes, confidences, class_ids)):
                if conf < 0.5:
                    continue
                class_name = result.names[cls_id]
                x_center = int((xyxy[0] + xyxy[2]) / 2)
                y_center = int((xyxy[1] + xyxy[3]) / 2)
                in_center = center_x1 <= x_center <= center_x2 and center_y1 <= y_center <= center_y2
                if not in_center:
                    continue
                if class_name == "wheel":
                    twowheel_boxes.append({"box": xyxy.copy(), "conf": conf})

        for tw_data in twowheel_boxes:
            tw_box = tw_data["box"]
            tw_conf = tw_data["conf"]
            x1, y1, x2, y2 = map(int, tw_box)
            roi = frame[y1:y2, x1:x2]
            if roi.size == 0:
                continue

            roi_results = self.model(roi, imgsz=320, conf=0.65, iou=0.45, device='cpu')

            best_helmet_class = None
            best_conf = 0.0
            best_box = None  # ✅ 初始化

            for r in roi_results:
                for box in r.boxes:
                    if box.conf[0] < 0.5:
                        continue
                    cls_id = int(box.cls[0])
                    class_name = r.names[cls_id]
                    if class_name not in helmet_classes:
                        continue

                    if class_name == "hel":
                        best_helmet_class = "hel"
                        best_conf = float(box.conf[0])
                        best_box = box  # ✅ 赋值
                    elif class_name == "nohel" and best_helmet_class is None:
                        best_helmet_class = "nohel"
                        best_conf = float(box.conf[0])
                        best_box = box  # ✅ 修复：必须赋值 best_box

            # ✅ 只有当 best_box 存在时才画框
            if best_helmet_class and best_box is not None:
                helmet_detected = True
                self.saver.save_cropped_image(frame.copy(), tw_box, "wheel", tw_conf)
                cv2.rectangle(frame, (x1, y1), (x2, y2), (0, 255, 0), 2)
                label = f"wheel {tw_conf:.2f}"
                (tw, th), _ = cv2.getTextSize(label, cv2.FONT_HERSHEY_COMPLEX_SMALL, font_scale, font_thickness)
                overlay = frame.copy()
                cv2.rectangle(overlay, (x1, max(0, y1 - th - 5)), (x1 + tw, y1), (0, 255, 0), -1)
                frame = cv2.addWeighted(overlay, 0.4, frame, 0.6, 0)
                cv2.putText(frame, label, (x1, max(0, y1 - 5)),
                            cv2.FONT_HERSHEY_COMPLEX_SMALL, font_scale, (0, 0, 0), font_thickness, cv2.LINE_AA)

                # 提取头盔框坐标
                roi_x1, roi_y1, roi_x2, roi_y2 = map(int, best_box.xyxy[0].cpu().numpy())
                abs_x1, abs_y1, abs_x2, abs_y2 = x1 + roi_x1, y1 + roi_y1, x1 + roi_x2, y1 + roi_y2

                self.saver.save_cropped_image(frame.copy(), [abs_x1, abs_y1, abs_x2, abs_y2], best_helmet_class, best_conf)

                color = (255, 0, 0) if best_helmet_class == "hel" else (0, 0, 255)
                cv2.rectangle(frame, (abs_x1, abs_y1), (abs_x2, abs_y2), color, 2)
                label = f"{best_helmet_class} {best_conf:.2f}"
                (tw, th), _ = cv2.getTextSize(label, cv2.FONT_HERSHEY_COMPLEX_SMALL, font_scale, font_thickness)
                text_x, text_y = abs_x1, max(th + 5, abs_y1 - 5)
                overlay = frame.copy()
                cv2.rectangle(overlay, (text_x, max(0, text_y - th - 5)), (text_x + tw, text_y), color, -1)
                frame = cv2.addWeighted(overlay, 0.4, frame, 0.6, 0)
                cv2.putText(frame, label, (text_x, text_y),
                            cv2.FONT_HERSHEY_COMPLEX_SMALL, font_scale, (255, 255, 255), font_thickness, cv2.LINE_AA)
            else:
                cv2.rectangle(frame, (x1, y1), (x2, y2), (100, 100, 100), 2)
                label = "No helmet"
                (tw, th), _ = cv2.getTextSize(label, cv2.FONT_HERSHEY_COMPLEX_SMALL, font_scale, font_thickness)
                overlay = frame.copy()
                cv2.rectangle(overlay, (x1, max(0, y1 - th - 5)), (x1 + tw, y1), (100, 100, 100), -1)
                frame = cv2.addWeighted(overlay, 0.4, frame, 0.6, 0)
                cv2.putText(frame, label, (x1, max(0, y1 - 5)),
                            cv2.FONT_HERSHEY_COMPLEX_SMALL, font_scale, (255, 255, 255), font_thickness, cv2.LINE_AA)

        current_time = time.time()
        if helmet_detected and current_time - self.last_save_time >= self.min_interval:
            self.last_save_time = current_time

        return frame  # 返回带框帧


# 视频采集线程
class VideoThread(QThread):
    update_frame = pyqtSignal(QImage)

    def __init__(self, source, model_path, center_ratio, nested_classes, config):
        super().__init__()
        self.source = source
        self.config = config
        self.inference_worker = InferenceWorker(model_path, center_ratio, nested_classes)
        self.inference_worker.result_ready.connect(self.on_inference_done)
        self.inference_worker.start()
        self.running = True
        self.is_mp4_source = self.source["type"] == "mp4"
        self.annotated_frames = []  # 缓存带框帧

    @pyqtSlot(np.ndarray)
    def on_inference_done(self, annotated_frame):
        rgb_image = cv2.cvtColor(annotated_frame, cv2.COLOR_BGR2RGB)
        h, w, ch = rgb_image.shape
        qt_img = QImage(rgb_image.data, w, h, QImage.Format_RGB888)
        self.update_frame.emit(qt_img)

        if self.is_mp4_source:
            self.annotated_frames.append(annotated_frame.copy())

    def run(self):
        cap = cv2.VideoCapture(self.source["url"], cv2.CAP_FFMPEG)
        cap.set(cv2.CAP_PROP_BUFFERSIZE, 1)

        if self.is_mp4_source:
            fps = cap.get(cv2.CAP_PROP_FPS) or 25
            width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
            height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
            video_path = Path(self.source["url"])

            timestamp = time.strftime("%Y%m%d_%H%M%S")
            millis = str(int(time.time() * 1000) % 1000).zfill(3)
            unique_name = f"{video_path.stem}_{timestamp}_{millis}_detected.mp4"
            output_video = Path(self.config["save_dirs"]["full_frames"]) / "videos" / unique_name
            output_video.parent.mkdir(parents=True, exist_ok=True)

        frame_count = 0
        while self.running:
            ret, frame = cap.read()
            if not ret:
                if self.is_mp4_source:
                    break
                else:
                    time.sleep(2)
                    cap.open(self.source["url"])
                    continue

            if self.inference_worker.frame_queue.qsize() < 2:
                self.inference_worker.frame_queue.put(frame.copy())

            frame_count += 1

        # === 保存带框视频 ===
        if self.is_mp4_source and self.annotated_frames:
            success = False
            out = None
            for codec in ['H264', 'X264', 'mp4v']:
                fourcc = cv2.VideoWriter_fourcc(*codec)
                out = cv2.VideoWriter(str(output_video), fourcc, fps, (width, height))
                if out.isOpened():
                    print(f"✅ 使用编码器: {codec}")
                    break
            else:
                print(f"❌ 无法创建视频写入器：{output_video}")
                cap.release()
                self.inference_worker.running = False
                return

            try:
                for annotated_frame in self.annotated_frames:
                    if annotated_frame is None or annotated_frame.size == 0:
                        continue
                    if annotated_frame.shape[1] != width or annotated_frame.shape[0] != height:
                        annotated_frame = cv2.resize(annotated_frame, (width, height))
                    out.write(annotated_frame)
                success = True
            except Exception as e:
                print(f"❌ 写入视频失败: {e}")
            finally:
                out.release()
                if success:
                    print(f"🎉 带框视频已成功保存：{output_video}")
                else:
                    if output_video.exists():
                        output_video.unlink()
                        print(f"🗑️ 已删除损坏文件：{output_video}")

        cap.release()
        self.inference_worker.running = False


# 主窗口
class MainWindow(QMainWindow):
    def __init__(self, sources, model_path, center_ratio, nested_classes, config):
        super().__init__()
        self.sources = sources
        self.model_path = model_path
        self.center_ratio = center_ratio
        self.nested_classes = nested_classes
        self.config = config
        self.init_ui()
        self.init_video_threads()

    def init_ui(self):
        self.setWindowTitle("监控实时画面")
        self.setGeometry(100, 100, 1280, 720)
        central_widget = QWidget()
        self.setCentralWidget(central_widget)
        layout = QVBoxLayout(central_widget)
        self.video_labels = [QLabel(parent=central_widget) for _ in self.sources]
        for label in self.video_labels:
            label.setAlignment(Qt.AlignCenter)
            label.setMinimumSize(640, 480)
            layout.addWidget(label)

    def init_video_threads(self):
        self.threads = []
        for source in self.sources:
            thread = VideoThread(source, self.model_path, self.center_ratio, self.nested_classes, self.config)
            thread.update_frame.connect(self.update_video_frame(len(self.threads)))
            thread.start()
            self.threads.append(thread)

    def closeEvent(self, event):
        for thread in self.threads:
            thread.running = False
            thread.quit()
            thread.wait()
        event.accept()

    def update_video_frame(self, index):
        def handler(image):
            if not hasattr(self, 'video_labels') or index >= len(self.video_labels):
                return
            label = self.video_labels[index]
            scaled_img = image.scaled(label.width(), label.height(), Qt.KeepAspectRatio, Qt.SmoothTransformation)
            label.setPixmap(QPixmap.fromImage(scaled_img))
        return handler


# 主程序入口
if __name__ == "__main__":
    app = QApplication(sys.argv)
    config = load_config()
    for dir_path in config["save_dirs"].values():
        Path(dir_path).mkdir(exist_ok=True)
    window = MainWindow(
        sources=config["sources"],
        model_path=config["model_path"],
        center_ratio=config["center_detection_ratio"],
        nested_classes=config["nested_classes"],
        config=config
    )
    window.show()
    sys.exit(app.exec_())