import sys
from threading import Thread

import cv2
import numpy as np
import time
from PyQt5.QtWidgets import QApplication, QMainWindow, QLabel, QVBoxLayout, QWidget
from PyQt5.QtGui import QImage, QPixmap
from PyQt5.QtCore import Qt, QThread, pyqtSignal
from multiprocessing import Queue
from pathlib import Path
import json
import logging
import os
import shutil
from logging.handlers import RotatingFileHandler
from datetime import datetime
from openvino.runtime import Core
import yaml


# ==================== 按日期+序号滚动日志 ====================
class DateRotatingFileHandler(RotatingFileHandler):
    def __init__(self, filename, maxBytes=0, backupCount=0, encoding=None, delay=False):
        super().__init__(filename, mode='a', maxBytes=maxBytes, backupCount=backupCount, encoding=encoding, delay=delay)
        self.base_dir = os.path.dirname(filename) or "."
        self.base_name = os.path.basename(filename)
        self.stem = os.path.splitext(self.base_name)[0]

    def doRollover(self):
        if self.stream:
            self.stream.close()
            self.stream = None
        today_str = datetime.now().strftime("%Y-%m-%d")
        target_base = f"{self.stem}_{today_str}"
        next_index = 0
        target_file = os.path.join(self.base_dir, f"{target_base}.log")
        while os.path.exists(target_file):
            next_index += 1
            target_file = os.path.join(self.base_dir, f"{target_base}-{next_index}.log")
        try:
            if os.path.exists(self.baseFilename):
                shutil.move(self.baseFilename, target_file)
        except Exception as e:
            self.handleError(None)
        if not self.delay:
            self.stream = self._open()
        if self.backupCount > 0:
            import re
            date_pattern = re.compile(rf"^{re.escape(self.stem)}_\d{{4}}-\d{{2}}-\d{{2}}(-\d+)?\.log$")
            archives = [f for f in os.listdir(self.base_dir) if date_pattern.match(f)]
            archives.sort(key=lambda x: os.path.getmtime(os.path.join(self.base_dir, x)), reverse=True)
            for old_file in archives[self.backupCount - 1:]:
                try:
                    os.remove(os.path.join(self.base_dir, old_file))
                except Exception:
                    pass


def setup_logging(log_dir="logs", log_filename="Record.log", level=logging.INFO, max_bytes=2 * 1024 * 1024, backup_count=3):
    os.makedirs(log_dir, exist_ok=True)
    log_path = os.path.join(log_dir, log_filename)
    logger = logging.getLogger("InferenceSpeed")
    logger.setLevel(level)
    if logger.hasHandlers():
        logger.handlers.clear()
    handler = DateRotatingFileHandler(log_path, maxBytes=max_bytes, backupCount=backup_count, encoding='utf-8')
    formatter = logging.Formatter(fmt='%(asctime)s.%(msecs)03d [%(levelname)s] %(message)s', datefmt='%Y-%m-%d %H:%M:%S')
    handler.setFormatter(formatter)
    logger.addHandler(handler)
    return logger


SPEED_LOGGER = setup_logging()


# ==================== 加载配置文件 ====================
def load_config():
    if getattr(sys, 'frozen', False):
        base_dir = Path(sys._MEIPASS)
    else:
        base_dir = Path(__file__).parent
    config_path = base_dir / "config-pt-onnx-openVINO.json"
    try:
        with open(config_path, "r", encoding="utf-8") as f:
            config = json.load(f)
    except FileNotFoundError:
        sys.exit(f"错误：配置文件 {config_path} 不存在")
    except json.JSONDecodeError:
        sys.exit(f"错误：配置文件格式不正确")

    rtsp_urls = []
    for cam in config["cameras"]:
        url = f"rtsp://{cam['username']}:{cam['password']}@{cam['ip']}:554/Streaming/Channels/{cam['channel']}"
        rtsp_urls.append(url)

    model_path = Path(config["model_path"])
    if not model_path.exists():
        if (model_path / "model.xml").exists():
            model_path = model_path / "model.xml"
        else:
            sys.exit(f"错误：模型文件 {model_path} 不存在")

    return {
        "rtsp_urls": rtsp_urls,
        "model_path": str(model_path),
        "save_dirs": config["save_dirs"],
        "center_detection_ratio": config.get("center_detection_ratio", 0.4),
        "nested_classes": config.get("nested_classes", {"wheel": ["hel", "nohel"]}),
        "classes_to_show": config["classes_to_show"]
    }


# ==================== 图像保存模块 ====================
class RawFrameSaver:
    def __init__(self, save_dir="raw_frames"):
        config = load_config()
        self.save_dir = Path(config["save_dirs"]["raw_frames"])
        self.save_dir.mkdir(exist_ok=True)
        self.frame_queue = Queue(maxsize=30)
        self.min_interval = 1.0
        self.last_save_time = 0
        self.thread = Thread(target=self._save_worker, daemon=True)
        self.thread.start()

    def add_frame(self, frame, timestamp_str):
        if self.frame_queue.qsize() < 30:
            self.frame_queue.put((frame.copy(), timestamp_str))

    def _save_worker(self):
        while True:
            if self.frame_queue.empty():
                time.sleep(0.1)
                continue
            current_time = time.time()
            if current_time - self.last_save_time >= self.min_interval:
                frame, timestamp_str = self.frame_queue.get()
                filename = f"{timestamp_str}.jpg"
                cv2.imwrite(str(self.save_dir / filename), frame)
                self.last_save_time = current_time
                SPEED_LOGGER.info(f"💾 已保存原始帧: {filename}")


class FrameSaver:
    def __init__(self, save_dir="full_frames"):
        config = load_config()
        self.save_dir = Path(config["save_dirs"]["full_frames"])
        self.save_dir.mkdir(exist_ok=True)
        self.last_save_time = 0
        self.min_interval = 1.0

    def save_full_frame(self, frame, timestamp_str):
        current_time = time.time()
        if current_time - self.last_save_time >= self.min_interval:
            filename = f"{timestamp_str}.jpg"
            save_path = self.save_dir / filename
            cv2.imwrite(str(save_path), frame)
            self.last_save_time = current_time
            SPEED_LOGGER.info(f"📸 已保存完整带框图: {filename}")


class ImageSaver:
    def __init__(self, save_dir="detected_objects"):
        config = load_config()
        self.save_dir = Path(config["save_dirs"]["detected_objects"])
        self.save_dir.mkdir(exist_ok=True)
        self.frame_counter = 0
        self.last_save_time = time.time()
        self.min_interval = 0.5

    def save_cropped_image(self, frame, box, class_name, conf):
        current_time = time.time()
        if current_time - self.last_save_time < self.min_interval:
            return
        x1, y1, x2, y2 = map(int, box)
        cropped = frame[y1:y2, x1:x2]
        if cropped.size == 0:
            return
        timestamp = time.strftime("%Y%m%d_%H%M%S")
        filename = f"{timestamp}_{class_name}_{conf:.2f}_{self.frame_counter:04d}.jpg"
        save_path = self.save_dir / filename
        try:
            ret, buffer = cv2.imencode('.jpg', cropped, [cv2.IMWRITE_JPEG_QUALITY, 90])
            if ret:
                buffer.tofile(str(save_path))
                self.frame_counter += 1
                self.last_save_time = current_time
                SPEED_LOGGER.info(f"✂️ 已裁剪保存: {filename}")
        except Exception as e:
            SPEED_LOGGER.error(f"保存图像失败: {str(e)}")


# ==================== 推理线程 ====================
class InferenceWorker(QThread):
    result_ready = pyqtSignal(np.ndarray)

    def __init__(self, model_path, center_ratio, nested_classes, class_names=None):
        super().__init__()
        self.model_path = Path(model_path)
        self.center_ratio = center_ratio
        self.nested_classes_config = nested_classes
        self.class_names = class_names or ["hel", "nohel", "wheel"]
        self.expected_num_classes = len(self.class_names)
        self.saver = ImageSaver()
        self.frame_saver = FrameSaver()
        self.raw_saver = RawFrameSaver()
        self.frame_queue = Queue(maxsize=2)
        self.min_interval = 1.0
        self.last_save_time = 0
        self.unified_imgsz = 640
        self.model = self._load_model()

    def _load_model(self):
        core = Core()
        xml_path = self.model_path if self.model_path.suffix == ".xml" else self.model_path / "model.xml"
        bin_path = xml_path.with_suffix(".bin")

        if not xml_path.exists():
            raise FileNotFoundError(f"未找到模型文件: {xml_path}")
        if not bin_path.exists():
            raise FileNotFoundError(f"未找到权重文件: {bin_path}")

        model = core.read_model(model=str(xml_path))
        compiled_model = core.compile_model(model, "CPU")
        out_shape = compiled_model.output(0).partial_shape
        C = out_shape[1].get_length()
        num_classes = C - 4
        if num_classes != self.expected_num_classes:
            raise ValueError(f"模型输出维度异常：预期 {4 + self.expected_num_classes}，实际 {C}")
        SPEED_LOGGER.info(f"✅ 成功加载 OpenVINO 模型: {xml_path.name}")
        return compiled_model

    def run(self):
        while True:
            frame = self.frame_queue.get()
            results = self.predict(frame)
            annotated_frame = self.process_results(frame, results)
            self.result_ready.emit(annotated_frame)

    def predict(self, frame):
        try:
            # ✅ 1. 确保输入帧是连续的（防崩溃关键）
            frame = np.ascontiguousarray(frame)
            h, w = frame.shape[:2]

            # ✅ 2. 使用 max 缩放：保持长宽比，最长边缩放到 unified_imgsz (640)
            scale = self.unified_imgsz / max(h, w)  # ← 正确！保证另一边 <= 640
            new_h = int(h * scale)
            new_w = int(w * scale)

            SPEED_LOGGER.info(f"📐 缩放: {w}x{h} → {new_w}x{new_h}, scale={scale:.3f}")

            # ✅ 3. resize 并确保输出连续
            resized_frame = cv2.resize(frame, (new_w, new_h), interpolation=cv2.INTER_AREA)
            resized_frame = np.ascontiguousarray(resized_frame)

            # ✅ 4. 计算 padding（上下左右补黑边，得到 640x640）
            pad_h = self.unified_imgsz - new_h
            pad_w = self.unified_imgsz - new_w
            top = pad_h // 2
            bottom = pad_h - top
            left = pad_w // 2
            right = pad_w - left

            # 断言：padding 必须非负
            assert top >= 0 and bottom >= 0 and left >= 0 and right >= 0, \
                f"Padding error: top={top}, bottom={bottom}, left={left}, right={right}"

            padded_frame = cv2.copyMakeBorder(
                resized_frame,
                top=top, bottom=bottom,
                left=left, right=right,
                borderType=cv2.BORDER_CONSTANT,
                value=(0, 0, 0)
            )
            padded_frame = np.ascontiguousarray(padded_frame)

            # ✅ 5. 预处理：BGR → RGB, HWC → CHW, 归一化
            rgb_frame = cv2.cvtColor(padded_frame, cv2.COLOR_BGR2RGB)
            input_tensor = np.expand_dims(rgb_frame.transpose(2, 0, 1), axis=0).astype(np.float32) / 255.0

            # ✅ 6. 推理
            try:
                output = self.model(input_tensor)[0]  # shape: (1, 7, 8400)
            except Exception as e:
                SPEED_LOGGER.error(f"❌ 推理失败: {str(e)}", exc_info=True)
                return [{
                    'boxes': np.empty((0, 4)),
                    'confs': np.empty((0,)),
                    'cls_ids': np.empty((0,), dtype=int),
                    'names': dict(enumerate(self.class_names))
                }]

            # ✅ 7. 后处理
            if output.shape[0] == 1 and len(output.shape) == 3:
                C = output.shape[1]
                num_boxes = output.shape[2]
                output = output[0].transpose(1, 0)  # → (8400, 7)

                boxes_cxcywh = output[:, :4]  # 归一化中心坐标 [0,1]
                cls_scores = output[:, 4:]  # 类别得分
                final_conf = np.max(cls_scores, axis=1)  # 总体置信度
                class_ids = np.argmax(cls_scores, axis=1)

                mask = final_conf >= 0.5
                num_detected = mask.sum()
                SPEED_LOGGER.debug(f"置信度≥0.5 的检测数量: {num_detected}, 最高置信度: {final_conf.max():.3f}")

                if num_detected == 0:
                    unified_results = [{
                        'boxes': np.empty((0, 4)),
                        'confs': np.empty((0,)),
                        'cls_ids': np.empty((0,), dtype=int),
                        'names': dict(enumerate(self.class_names))
                    }]
                else:
                    valid_boxes = boxes_cxcywh[mask]

                    # ==================== 🔥 调试日志链开始 ====================
                    SPEED_LOGGER.info(f"✅ 模型输出 (cxcywh 归一化):")
                    for i, box in enumerate(valid_boxes[:3]):
                        SPEED_LOGGER.info(
                            f"    Box {i}: [cx={box[0]:.3f}, cy={box[1]:.3f}, w={box[2]:.3f}, h={box[3]:.3f}]")

                    # 转为 xyxy 并映射到 640x640
                    boxes_xyxy = self._cxcywh_to_xyxy(valid_boxes)
                    SPEED_LOGGER.info(f"✅ 转换为 xyxy (640x640 上):")
                    for i, (x1, y1, x2, y2) in enumerate(boxes_xyxy[:3]):
                        SPEED_LOGGER.info(f"    Box {i}: [{x1:.1f}, {y1:.1f}, {x2:.1f}, {y2:.1f}]")

                    # 还原到原始图像尺寸
                    boxes_original = self._rescale_boxes(boxes_xyxy, (new_h, new_w), (top, left), (h, w))
                    SPEED_LOGGER.info(f"✅ 还原到原始图像 ({w}x{h}):")
                    for i, (x1, y1, x2, y2) in enumerate(boxes_original[:3]):
                        xc = (x1 + x2) / 2
                        yc = (y1 + y2) / 2
                        SPEED_LOGGER.info(
                            f"    Box {i}: [{x1:.1f}, {y1:.1f}, {x2:.1f}, {y2:.1f}] → 中心点=({xc:.1f}, {yc:.1f})")
                    # ==================== 🔚 调试日志链结束 ====================

                    unified_results = [{
                        'boxes': boxes_original,
                        'confs': final_conf[mask],
                        'cls_ids': class_ids[mask].astype(int),
                        'names': dict(enumerate(self.class_names))
                    }]

                    for conf, cls_id in zip(final_conf[mask], class_ids[mask]):
                        SPEED_LOGGER.info(f"✅ 检测到: {self.class_names[cls_id]} | 置信度: {conf:.3f}")
            else:
                SPEED_LOGGER.warning(f"不支持的输出形状: {output.shape}")
                unified_results = [{
                    'boxes': np.empty((0, 4)),
                    'confs': np.empty((0,)),
                    'cls_ids': np.empty((0,), dtype=int),
                    'names': {}
                }]

            return unified_results

        except Exception as e:
            SPEED_LOGGER.error(f"🚨 predict() 函数发生严重错误: {str(e)}", exc_info=True)
            # 返回空结果避免线程崩溃
            return [{
                'boxes': np.empty((0, 4)),
                'confs': np.empty((0,)),
                'cls_ids': np.empty((0,), dtype=int),
                'names': dict(enumerate(self.class_names))
            }]

    def _cxcywh_to_xyxy(self, boxes):
        """
        boxes: shape (N, 4), format [cx, cy, w, h] on 640x640 image
        return: [x1, y1, x2, y2]
        """
        x_c = boxes[:, 0]
        y_c = boxes[:, 1]
        w = boxes[:, 2]
        h = boxes[:, 3]
        x1 = x_c - w / 2
        y1 = y_c - h / 2
        x2 = x_c + w / 2
        y2 = y_c + h / 2
        return np.stack([x1, y1, x2, y2], axis=-1)

    def _rescale_boxes(self, boxes, resized_shape, padding, original_shape):
        assert boxes[0, 0] < 640 and boxes[0, 1] < 640, f"坐标超出 640x640 范围！{boxes[0]}"
        new_h, new_w = resized_shape
        pad_top, pad_left = padding
        orig_h, orig_w = original_shape

        boxes_copy = boxes.copy()
        boxes_copy[:, [0, 2]] -= pad_left  # x 减去左 padding
        boxes_copy[:, [1, 3]] -= pad_top  # y 减去上 padding
        boxes_copy[:, [0, 2]] *= orig_w / new_w  # 缩放回原始宽度
        boxes_copy[:, [1, 3]] *= orig_h / new_h  # 缩放回原始高度
        boxes_copy[:, :4] = np.clip(boxes_copy[:, :4], 0, [orig_w, orig_h, orig_w, orig_h])
        return boxes_copy

    def process_results(self, frame, results):
        # ✅ 关键：创建连续副本，确保绘图生效
        frame = np.ascontiguousarray(frame.copy())
        h, w = frame.shape[:2]
        font_scale = max(0.6, h / 720.0 * 0.8)
        font_thickness = max(1, int(font_scale * 1.5))

        cx1 = int(w * (0.5 - self.center_ratio / 2))
        cy1 = int(h * (0.5 - self.center_ratio / 2))
        cx2 = int(w * (0.5 + self.center_ratio / 2))
        cy2 = int(h * (0.5 + self.center_ratio / 2))
        cv2.rectangle(frame, (cx1, cy1), (cx2, cy2), (0, 255, 255), 2)

        raw_frame = frame.copy()
        any_target_detected = False
        detected_targets = []

        class_colors = {
            "wheel": (0, 255, 0),
            "hel": (255, 0, 0),
            "nohel": (0, 0, 255),
        }

        for result in results:
            boxes = result.get('boxes', [])
            confs = result.get('confs', [])
            cls_ids = result.get('cls_ids', [])
            names = result.get('names', {})

            for box, conf, cls_id in zip(boxes, confs, cls_ids):
                if conf < 0.5:
                    continue
                class_name = names.get(cls_id, f"class{cls_id}")

                xc = int((box[0] + box[2]) / 2)
                yc = int((box[1] + box[3]) / 2)
                SPEED_LOGGER.info(f"✅ xc: {xc} yc: {yc}")
                SPEED_LOGGER.info(f"✅ cx1: {cx1} cx2: {cx2}")
                SPEED_LOGGER.info(f"✅ cy1: {cy1} cy2: {cy2}")

                if cx1 <= xc <= cx2 and cy1 <= yc <= cy2:
                    detected_targets.append({
                        "box": box.copy(),
                        "conf": conf,
                        "class_name": class_name
                    })
                    any_target_detected = True

        for target in detected_targets:
            x1, y1, x2, y2 = map(int, target["box"])
            self.saver.save_cropped_image(frame, target["box"], target["class_name"], target["conf"])
            color = class_colors.get(target["class_name"], (128, 128, 128))
            cv2.rectangle(frame, (x1, y1), (x2, y2), color, 2)
            label = f"{target['class_name']} {target['conf']:.2f}"
            (tw, th), _ = cv2.getTextSize(label, cv2.FONT_HERSHEY_COMPLEX_SMALL, font_scale, font_thickness)
            overlay = frame.copy()
            cv2.rectangle(overlay, (x1, max(0, y1 - th - 5)), (x1 + tw, y1), color, -1)
            alpha = 0.4
            frame = cv2.addWeighted(overlay, alpha, frame, 1 - alpha, 0)
            cv2.putText(frame, label, (x1, max(0, y1 - 5)),
                        cv2.FONT_HERSHEY_COMPLEX_SMALL, font_scale, (255, 255, 255), font_thickness, cv2.LINE_AA)

        current_time = time.time()
        if any_target_detected and current_time - self.last_save_time >= self.min_interval:
            ts_str = time.strftime("%Y%m%d_%H%M%S_") + str(int(current_time * 1000))
            self.raw_saver.add_frame(raw_frame, "detect_" + ts_str)
            self.frame_saver.save_full_frame(frame.copy(), "detect_" + ts_str)
            self.last_save_time = current_time
            SPEED_LOGGER.info(f"✅ 已触发图像保存: {ts_str}")

        return frame


# ==================== 视频采集与显示线程 ====================
class VideoThread(QThread):
    update_frame = pyqtSignal(QImage)

    def __init__(self, rtsp_url, model_path, center_ratio, nested_classes, class_names):
        super().__init__()
        self.rtsp_url = rtsp_url
        self.inference_worker = InferenceWorker(model_path, center_ratio, nested_classes, class_names)
        self.inference_worker.result_ready.connect(self.on_inference_done)
        self.inference_worker.start()
        self.running = True

    def run(self):
        cap = cv2.VideoCapture(self.rtsp_url, cv2.CAP_FFMPEG)
        while self.running:
            ret, frame = cap.read()
            if not ret:
                time.sleep(2)
                cap.release()
                cap.open(self.rtsp_url)
                continue
            if self.inference_worker.frame_queue.qsize() < 2:
                self.inference_worker.frame_queue.put(frame.copy())

    def on_inference_done(self, annotated_frame):
        try:
            rgb = cv2.cvtColor(annotated_frame, cv2.COLOR_BGR2RGB)
            rgb = np.ascontiguousarray(rgb)  # ✅ 强制连续
            h, w, ch = rgb.shape
            bytes_per_line = ch * w
            qt_image = QImage(rgb.data, w, h, bytes_per_line, QImage.Format_RGB888)
            if qt_image.isNull():
                SPEED_LOGGER.error("❌ QImage is Null! 数据不连续")
            else:
                self.update_frame.emit(qt_image)
        except Exception as e:
            SPEED_LOGGER.error(f"图像转换失败: {str(e)}", exc_info=True)


# ==================== 主窗口 ====================
class MainWindow(QMainWindow):
    def __init__(self, rtsp_urls, model_path, center_ratio, nested_classes, class_names):
        super().__init__()
        self.rtsp_urls=rtsp_urls
        self.init_ui()
        self.init_video_threads(rtsp_urls, model_path, center_ratio, nested_classes, class_names)

    def init_ui(self):
        self.setWindowTitle("非机动车未戴头盔监控实时检测画面")
        self.setGeometry(100, 100, 1280, 720)
        central_widget = QWidget()
        self.setCentralWidget(central_widget)
        layout = QVBoxLayout(central_widget)
        self.video_labels = [QLabel(parent=central_widget) for _ in self.rtsp_urls]
        for label in self.video_labels:
            label.setAlignment(Qt.AlignCenter)
            label.setMinimumSize(640, 480)
            layout.addWidget(label)

    def init_video_threads(self, rtsp_urls, model_path, center_ratio, nested_classes, class_names):
        self.threads = []
        for idx, url in enumerate(rtsp_urls):
            thread = VideoThread(url, model_path, center_ratio, nested_classes, class_names)
            thread.update_frame.connect(
                lambda img, lbl=self.video_labels[idx]: self.update_label(lbl, img)
            )
            thread.start()
            self.threads.append(thread)

    def update_label(self, label, qt_img):
        if not qt_img.isNull():
            scaled = qt_img.scaled(label.width(), label.height(), Qt.KeepAspectRatio, Qt.SmoothTransformation)
            label.setPixmap(QPixmap.fromImage(scaled))
        else:
            SPEED_LOGGER.warning("尝试显示空图像")

    def closeEvent(self, event):
        for t in self.threads:
            t.running = False
            t.quit()
            t.wait()
        event.accept()


# ==================== 启动入口 ====================
if __name__ == "__main__":
    app = QApplication(sys.argv)
    config = load_config()
    for d in config["save_dirs"].values():
        Path(d).mkdir(exist_ok=True)
    window = MainWindow(
        rtsp_urls=config["rtsp_urls"],
        model_path=config["model_path"],
        center_ratio=config["center_detection_ratio"],
        nested_classes=config["nested_classes"],
        class_names=config["classes_to_show"],
    )
    window.show()
    sys.exit(app.exec_())