import sys
import cv2
import numpy as np
import time
from PyQt5.QtWidgets import QApplication, QMainWindow, QLabel, QVBoxLayout, QWidget
from PyQt5.QtGui import QImage, QPixmap, QFont
from PyQt5.QtCore import Qt, QThread, pyqtSignal, pyqtSlot
from ultralytics import YOLO
from multiprocessing import Queue
from pathlib import Path
import json
from threading import Thread


def load_config():
    """加载配置文件"""
    if getattr(sys, 'frozen', False):
        base_dir = Path(sys._MEIPASS)
    else:
        base_dir = Path(__file__).parent

    config_path = base_dir / "config.json"

    try:
        with open(config_path, "r", encoding="utf-8") as f:
            config = json.load(f)
    except FileNotFoundError:
        sys.exit(f"错误：配置文件 {config_path} 不存在")
    except json.JSONDecodeError:
        sys.exit(f"错误：配置文件格式不正确")

    rtsp_urls = []
    for cam in config["cameras"]:
        # url = f"rtsp://{cam['username']}:{cam['password']}@{cam['ip']}:554/Streaming/Channels/{cam['channel']}"
        url = f"rtsp://{cam['username']}:{cam['password']}@{cam['ip']}:554/ch01.264" # 未戴头盔抓拍地址：ch01_sub.264(子码流)
        rtsp_urls.append(url)

    model_path = Path(config["model_path"])
    if not model_path.exists():
        sys.exit(f"错误：模型文件 {model_path} 不存在")

    return {
        "rtsp_urls": rtsp_urls,
        "model_path": str(model_path),
        "save_dirs": config["save_dirs"],
        "center_detection_ratio": config.get("center_detection_ratio", 0.4),
        "nested_classes": config.get("nested_classes", {"wheel": ["hel", "nohel"]})
    }


# 保存未框选的原始帧
class RawFrameSaver:
    def __init__(self, save_dir="raw_frames"):
        config = load_config()
        self.save_dir = Path(config["save_dirs"]["raw_frames"])
        self.save_dir.mkdir(exist_ok=True)
        self.frame_queue = Queue(maxsize=30)
        self.min_interval = 1.0
        self.last_save_time = 0

        self.thread = Thread(target=self._save_worker, daemon=True)
        # **********暂时不保存图片
        self.thread.start()

    def add_frame(self, frame, timestamp_str):
        if self.frame_queue.qsize() < 30:
            self.frame_queue.put((frame.copy(), timestamp_str))

    def _save_worker(self):
        while True:
            if self.frame_queue.empty():
                time.sleep(0.1)
                continue

            current_time = time.time()
            if current_time - self.last_save_time >= self.min_interval:
                frame, timestamp_str = self.frame_queue.get()
                filename = f"{timestamp_str}.jpg"
                cv2.imwrite(str(self.save_dir / filename), frame)
                self.last_save_time = current_time


# 保存框选的完整帧
class FrameSaver:
    def __init__(self, save_dir="full_frames"):
        config = load_config()
        self.save_dir = Path(config["save_dirs"]["full_frames"])
        self.save_dir.mkdir(exist_ok=True)
        self.last_save_time = 0
        self.min_interval = 1.0

    def save_full_frame(self, frame, timestamp_str):
        current_time = time.time()
        if current_time - self.last_save_time >= self.min_interval:
            filename = f"{timestamp_str}.jpg"
            save_path = self.save_dir / filename
            cv2.imwrite(str(save_path), frame)
            self.last_save_time = current_time


# 图像保存模块
class ImageSaver:
    def __init__(self, save_dir="detected_objects"):
        config = load_config()
        self.save_dir = Path(config["save_dirs"]["detected_objects"])
        self.save_dir.mkdir(exist_ok=True)
        self.frame_counter = 0
        self.last_save_time = time.time()
        self.min_interval = 0.5

    def save_cropped_image(self, frame, box, class_name, conf):
        current_time = time.time()
        if current_time - self.last_save_time < self.min_interval:
            return

        x1, y1, x2, y2 = map(int, box)
        cropped = frame[y1:y2, x1:x2]
        if cropped.size == 0:
            return

        timestamp = time.strftime("%Y%m%d_%H%M%S")
        filename = f"{timestamp}_{class_name}_{conf:.2f}_{self.frame_counter:04d}.jpg"
        save_path = self.save_dir / filename

        try:
            ret, buffer = cv2.imencode('.jpg', cropped, [cv2.IMWRITE_JPEG_QUALITY, 90])
            if ret:
                buffer.tofile(str(save_path))
                self.frame_counter += 1
                self.last_save_time = current_time
        except Exception as e:
            print(f"保存图像时发生错误：{str(e)}")


# 视频采集线程
class VideoThread(QThread):
    update_frame = pyqtSignal(QImage)

    def __init__(self, rtsp_url, model_path, center_ratio, nested_classes):
        super().__init__()
        self.rtsp_url = rtsp_url
        self.inference_worker = InferenceWorker(model_path, center_ratio, nested_classes)
        self.inference_worker.result_ready.connect(self.on_inference_done)
        self.inference_worker.start()
        self.running = True

    def run(self):
        print(f"rtsp地址:  {self.rtsp_url}")
        cap = cv2.VideoCapture(self.rtsp_url, cv2.CAP_FFMPEG)
        while self.running:
            ret, frame = cap.read()
            if not ret:
                self.reset_connection(cap)
                continue
            if self.inference_worker.frame_queue.qsize() < 2:
                self.inference_worker.frame_queue.put(frame.copy())

    def reset_connection(self, cap):
        cap.release()
        time.sleep(2)
        cap.open(self.rtsp_url)

    def on_inference_done(self, annotated_frame):
        rgb_image = cv2.cvtColor(annotated_frame, cv2.COLOR_BGR2RGB)
        h, w, ch = rgb_image.shape
        qt_img = QImage(rgb_image.data, w, h, QImage.Format_RGB888)
        self.update_frame.emit(qt_img)


# 主窗口
class MainWindow(QMainWindow):
    def __init__(self, rtsp_urls, model_path, center_ratio, nested_classes):
        super().__init__()
        self.rtsp_urls = rtsp_urls
        self.model_path = model_path
        self.center_ratio = center_ratio
        self.nested_classes = nested_classes
        self.init_ui()
        self.init_video_threads()

    def init_ui(self):
        self.setWindowTitle("海康监控实时画面")
        self.setGeometry(100, 100, 1280, 720)

        central_widget = QWidget()
        self.setCentralWidget(central_widget)
        layout = QVBoxLayout(central_widget)

        self.video_labels = [QLabel(parent=central_widget) for _ in self.rtsp_urls]
        for label in self.video_labels:
            label.setAlignment(Qt.AlignCenter)
            label.setMinimumSize(640, 480)
            layout.addWidget(label)

    def init_video_threads(self):
        self.threads = []
        for i, url in enumerate(self.rtsp_urls):
            thread = VideoThread(url, self.model_path, self.center_ratio, self.nested_classes)
            thread.update_frame.connect(self.update_video_frame(i))
            thread.start()
            self.threads.append(thread)

    def closeEvent(self, event):
        for thread in self.threads:
            thread.running = False
            thread.quit()
            thread.wait()
        event.accept()

    @pyqtSlot(QImage)
    def update_video_frame(self, index):
        def handler(image):
            scaled_img = image.scaled(
                self.video_labels[index].width(),
                self.video_labels[index].height(),
                Qt.KeepAspectRatio
            )
            self.video_labels[index].setPixmap(QPixmap.fromImage(scaled_img))

        return handler


# 推理工作线程
class InferenceWorker(QThread):
    result_ready = pyqtSignal(np.ndarray)

    def __init__(self, model_path, center_ratio, nested_classes):
        super().__init__()
        self.model = YOLO(model_path)
        self.center_ratio = center_ratio
        self.nested_classes_config = nested_classes
        self.saver = ImageSaver()
        self.frame_saver = FrameSaver()
        self.raw_saver = RawFrameSaver()
        self.frame_queue = Queue(maxsize=2)
        self.min_interval = 1.0
        self.last_save_time = 0

    def run(self):
        while True:
            frame = self.frame_queue.get()
            results = self.model(frame, imgsz=640, conf=0.5)
            annotated_frame = self.process_results(frame, results)
            self.result_ready.emit(annotated_frame)

    def process_results(self, frame, results):
        height, width, _ = frame.shape
        base_height = 720.0
        font_scale = max(0.6, height / base_height * 0.8)
        font_thickness = max(1, int(font_scale * 1.5))

        center_x1 = int(width * (0.5 - self.center_ratio / 2))
        center_y1 = int(height * (0.5 - self.center_ratio / 2))
        center_x2 = int(width * (0.5 + self.center_ratio / 2))
        center_y2 = int(height * (0.5 + self.center_ratio / 2))

        helmet_detected = False
        raw_frame = frame.copy()
        cv2.rectangle(frame, (center_x1, center_y1), (center_x2, center_y2), (0, 255, 255), 2)

        twowheel_boxes = []
        helmet_classes = self.nested_classes_config.get("wheel", [])

        for result in results:
            boxes = result.boxes.xyxy.cpu().numpy()
            confidences = result.boxes.conf.cpu().numpy()
            class_ids = result.boxes.cls.cpu().numpy().astype(int)

            for i, (xyxy, conf, cls_id) in enumerate(zip(boxes, confidences, class_ids)):
                if conf < 0.5:
                    continue
                class_name = result.names[cls_id]

                x_center = int((xyxy[0] + xyxy[2]) / 2)
                y_center = int((xyxy[1] + xyxy[3]) / 2)
                in_center = center_x1 <= x_center <= center_x2 and center_y1 <= y_center <= center_y2
                if not in_center:
                    continue

                if class_name == "wheel":
                    twowheel_boxes.append({"box": xyxy.copy(), "conf": conf, "class_name": class_name})

        for tw_data in twowheel_boxes:
            tw_box = tw_data["box"]
            tw_conf = tw_data["conf"]
            x1, y1, x2, y2 = map(int, tw_box)
            roi = frame[y1:y2, x1:x2]

            if roi.size == 0:
                continue

            roi_results = self.model(roi, imgsz=640, conf=0.65, iou=0.45)

            # === 核心优化：聚合头盔结果，避免 hel 和 nohel 冲突 ===
            best_helmet_class = None
            best_conf = 0.0
            best_box = None

            for r in roi_results:
                for box in r.boxes:
                    if box.conf[0] < 0.5:
                        continue
                    cls_id = int(box.cls[0])
                    class_name = r.names[cls_id]
                    if class_name not in helmet_classes:
                        continue

                    if class_name == "hel":
                        best_helmet_class = "hel"
                        best_conf = box.conf[0]
                        best_box = box
                    elif class_name == "nohel" and best_helmet_class is None:
                        best_helmet_class = "nohel"
                        if box.conf[0] > best_conf:
                            best_conf = box.conf[0]
                            best_box = box

            # === 绘制和保存最优结果 ===
            if best_helmet_class:
                helmet_detected = True

                self.saver.save_cropped_image(frame.copy(), tw_box, "wheel", tw_conf)
                cv2.rectangle(frame, (x1, y1), (x2, y2), (0, 255, 0), 2)

                label = f"wheel {tw_conf:.2f}"
                (tw, th), _ = cv2.getTextSize(label, cv2.FONT_HERSHEY_COMPLEX_SMALL, font_scale, font_thickness)
                overlay = frame.copy()
                cv2.rectangle(overlay, (x1, max(0, y1 - th - 5)), (x1 + tw, y1), (0, 255, 0), -1)
                alpha = 0.4
                frame = cv2.addWeighted(overlay, alpha, frame, 1 - alpha, 0)
                cv2.putText(frame, label, (x1, max(0, y1 - 5)),
                            cv2.FONT_HERSHEY_COMPLEX_SMALL, font_scale, (0, 0, 0), font_thickness, cv2.LINE_AA)

                # 处理头盔
                roi_x1, roi_y1, roi_x2, roi_y2 = map(int, best_box.xyxy[0].cpu().numpy())
                abs_x1, abs_y1, abs_x2, abs_y2 = x1 + roi_x1, y1 + roi_y1, x1 + roi_x2, y1 + roi_y2

                self.saver.save_cropped_image(frame.copy(), [abs_x1, abs_y1, abs_x2, abs_y2], best_helmet_class, best_conf)

                color = (255, 0, 0) if best_helmet_class == "hel" else (0, 0, 255)
                cv2.rectangle(frame, (abs_x1, abs_y1), (abs_x2, abs_y2), color, 2)

                label = f"{best_helmet_class} {best_conf:.2f}"
                (tw, th), _ = cv2.getTextSize(label, cv2.FONT_HERSHEY_COMPLEX_SMALL, font_scale, font_thickness)
                text_x, text_y = abs_x1, max(th + 5, abs_y1 - 5)
                overlay = frame.copy()
                cv2.rectangle(overlay, (text_x, max(0, text_y - th - 5)), (text_x + tw, text_y), color, -1)
                frame = cv2.addWeighted(overlay, 0.4, frame, 0.6, 0)
                cv2.putText(frame, label, (text_x, text_y),
                            cv2.FONT_HERSHEY_COMPLEX_SMALL, font_scale, (255, 255, 255), font_thickness, cv2.LINE_AA)

            else:
                cv2.rectangle(frame, (x1, y1), (x2, y2), (100, 100, 100), 2, cv2.LINE_AA)
                label = "No helmet"
                (tw, th), _ = cv2.getTextSize(label, cv2.FONT_HERSHEY_COMPLEX_SMALL, font_scale, font_thickness)
                overlay = frame.copy()
                cv2.rectangle(overlay, (x1, max(0, y1 - th - 5)), (x1 + tw, y1), (100, 100, 100), -1)
                frame = cv2.addWeighted(overlay, 0.4, frame, 0.6, 0)
                cv2.putText(frame, label, (x1, max(0, y1 - 5)),
                            cv2.FONT_HERSHEY_COMPLEX_SMALL, font_scale, (255, 255, 255), font_thickness, cv2.LINE_AA)

        current_time = time.time()
        if helmet_detected and current_time - self.last_save_time >= self.min_interval:
            timestamp_str = time.strftime("%Y%m%d_%H%M%S_") + str(int(current_time * 1000))
            self.raw_saver.add_frame(raw_frame, "detect_" + timestamp_str)
            self.frame_saver.save_full_frame(frame.copy(), "detect_" + timestamp_str)
            self.last_save_time = current_time

        return frame


# 主程序入口
if __name__ == "__main__":
    app = QApplication(sys.argv)

    config = load_config()

    for dir_path in config["save_dirs"].values():
        Path(dir_path).mkdir(exist_ok=True)

    window = MainWindow(
        rtsp_urls=config["rtsp_urls"],
        model_path=config["model_path"],
        center_ratio=config["center_detection_ratio"],
        nested_classes=config["nested_classes"]
    )
    window.show()
    sys.exit(app.exec_())