import os
import shutil
import sys
from threading import Thread
import cv2
import numpy as np
import time
from PyQt5.QtWidgets import QApplication, QMainWindow, QLabel, QVBoxLayout, QWidget
from PyQt5.QtGui import QImage, QPixmap
from PyQt5.QtCore import Qt, QThread, pyqtSignal
from multiprocessing import Queue
from pathlib import Path
import json
import logging
from logging.handlers import RotatingFileHandler
from datetime import datetime
from openvino import Core  # ✅ 推荐方式（OpenVINO >= 2024）

# ==================== 按日期+序号滚动日志 ====================
class DateRotatingFileHandler(RotatingFileHandler):
    def __init__(self, filename, maxBytes=0, backupCount=0, encoding=None, delay=False):
        super().__init__(filename, mode='a', maxBytes=maxBytes, backupCount=backupCount, encoding=encoding, delay=delay)
        self.base_dir = os.path.dirname(filename) or "."
        self.base_name = os.path.basename(filename)
        self.stem = os.path.splitext(self.base_name)[0]

    def doRollover(self):
        if self.stream:
            self.stream.close()
            self.stream = None
        today_str = datetime.now().strftime("%Y-%m-%d")
        target_base = f"{self.stem}_{today_str}"
        next_index = 0
        target_file = os.path.join(self.base_dir, f"{target_base}.log")
        while os.path.exists(target_file):
            next_index += 1
            target_file = os.path.join(self.base_dir, f"{target_base}-{next_index}.log")
        try:
            if os.path.exists(self.baseFilename):
                shutil.move(self.baseFilename, target_file)
        except Exception as e:
            self.handleError(None)
        if not self.delay:
            self.stream = self._open()
        if self.backupCount > 0:
            import re
            date_pattern = re.compile(rf"^{re.escape(self.stem)}_\d{{4}}-\d{{2}}-\d{{2}}(-\d+)?\.log$")
            archives = [f for f in os.listdir(self.base_dir) if date_pattern.match(f)]
            archives.sort(key=lambda x: os.path.getmtime(os.path.join(self.base_dir, x)), reverse=True)
            for old_file in archives[self.backupCount - 1:]:
                try:
                    os.remove(os.path.join(self.base_dir, old_file))
                except Exception:
                    pass


def setup_logging(log_dir="logs", log_filename="Record.log", level=logging.INFO, max_bytes=2 * 1024 * 1024, backup_count=3):
    import os
    os.makedirs(log_dir, exist_ok=True)
    log_path = os.path.join(log_dir, log_filename)
    logger = logging.getLogger("InferenceSpeed")
    logger.setLevel(level)
    if logger.hasHandlers():
        logger.handlers.clear()
    handler = DateRotatingFileHandler(log_path, maxBytes=max_bytes, backupCount=backup_count, encoding='utf-8')
    formatter = logging.Formatter(fmt='%(asctime)s.%(msecs)03d [%(levelname)s] %(message)s', datefmt='%Y-%m-%d %H:%M:%S')
    handler.setFormatter(formatter)
    logger.addHandler(handler)
    return logger


SPEED_LOGGER = setup_logging()


# ==================== 加载配置文件 ====================
def load_config():
    import os
    if getattr(sys, 'frozen', False):
        base_dir = Path(sys.executable).parent
    else:
        base_dir = Path(__file__).parent
    config_path = base_dir / "config-pt-onnx-openVINO.json"
    SPEED_LOGGER.info(f"配置文件config_path路径: {config_path}")
    try:
        with open(config_path, "r", encoding="utf-8") as f:
            config = json.load(f)
    except FileNotFoundError:
        sys.exit(f"错误：配置文件 {config_path} 不存在")
    except json.JSONDecodeError:
        sys.exit(f"错误：配置文件格式不正确")

    rtsp_urls = []
    for cam in config["cameras"]:
        url = cam['url']
        rtsp_urls.append(url)

    model_path = Path(config["model_path"])
    if not model_path.exists():
        if (model_path / "model.xml").exists():
            model_path = model_path / "model.xml"
        else:
            sys.exit(f"错误：模型文件 {model_path} 不存在")

    return {
        "rtsp_urls": rtsp_urls,
        "model_path": str(model_path),
        "save_dirs": config["save_dirs"],
        "center_detection_ratio": config.get("center_detection_ratio", 0.4),
        "nested_classes": config.get("nested_classes", {"wheel": ["hel", "nohel"]}),
        "classes_to_show": config["classes_to_show"],
        "image_size": config["image_size"],
        "frame_interval": config.get("frame_interval", 2),
    }


# ==================== 图像保存模块 ====================
class RawFrameSaver:
    def __init__(self, save_dir="raw_frames"):
        config = load_config()
        self.save_dir = Path(config["save_dirs"]["raw_frames"])
        self.save_dir.mkdir(exist_ok=True)
        self.frame_queue = Queue(maxsize=30)
        self.min_interval = 1.0
        self.last_save_time = 0
        self.thread = Thread(target=self._save_worker, daemon=True)
        self.thread.start()

    def add_frame(self, frame, timestamp_str):
        if self.frame_queue.qsize() < 30:
            self.frame_queue.put((frame.copy(), timestamp_str))

    def _save_worker(self):
        while True:
            if self.frame_queue.empty():
                time.sleep(0.1)
                continue
            current_time = time.time()
            if current_time - self.last_save_time >= self.min_interval:
                frame, timestamp_str = self.frame_queue.get()
                filename = f"{timestamp_str}.jpg"
                cv2.imwrite(str(self.save_dir / filename), frame)
                self.last_save_time = current_time
                SPEED_LOGGER.info(f"💾 已保存原始帧: {filename}")


class FrameSaver:
    def __init__(self, save_dir="full_frames"):
        config = load_config()
        self.save_dir = Path(config["save_dirs"]["full_frames"])
        self.save_dir.mkdir(exist_ok=True)
        self.last_save_time = 0
        self.min_interval = 1.0

    def save_full_frame(self, frame, timestamp_str):
        current_time = time.time()
        if current_time - self.last_save_time >= self.min_interval:
            filename = f"{timestamp_str}.jpg"
            save_path = self.save_dir / filename
            cv2.imwrite(str(save_path), frame)
            self.last_save_time = current_time
            SPEED_LOGGER.info(f"📸 已保存完整带框图: {filename}")


class ImageSaver:
    def __init__(self, save_dir="detected_objects"):
        config = load_config()
        self.save_dir = Path(config["save_dirs"]["detected_objects"])
        self.save_dir.mkdir(exist_ok=True)
        self.frame_counter = 0
        self.last_save_time = time.time()
        self.min_interval = 0.5

    def save_cropped_image(self, frame, box, class_name, conf):
        current_time = time.time()
        if current_time - self.last_save_time < self.min_interval:
            return
        x1, y1, x2, y2 = map(int, box)
        cropped = frame[y1:y2, x1:x2]
        if cropped.size == 0:
            return
        timestamp = time.strftime("%Y%m%d_%H%M%S")
        filename = f"{timestamp}_{class_name}_{conf:.2f}_{self.frame_counter:04d}.jpg"
        save_path = self.save_dir / filename
        try:
            ret, buffer = cv2.imencode('.jpg', cropped, [cv2.IMWRITE_JPEG_QUALITY, 100])
            if ret:
                buffer.tofile(str(save_path))
                self.frame_counter += 1
                self.last_save_time = current_time
                SPEED_LOGGER.info(f"✂️ 已裁剪保存: {filename}")
        except Exception as e:
            SPEED_LOGGER.error(f"保存图像失败: {str(e)}")


# ==================== 推理线程 ====================
class InferenceWorker(QThread):
    result_ready = pyqtSignal(np.ndarray)

    def __init__(self, model_path, center_ratio, nested_classes, class_names=None, image_size=320):
        super().__init__()
        self.model_path = Path(model_path)
        self.center_ratio = center_ratio
        self.nested_classes_config = nested_classes
        self.class_names = class_names or ["hel", "nohel", "wheel"]
        self.expected_num_classes = len(self.class_names)
        self.saver = ImageSaver()
        self.frame_saver = FrameSaver()
        self.raw_saver = RawFrameSaver()
        self.frame_queue = Queue(maxsize=2)
        self.min_interval = 1.0
        self.last_save_time = 0
        self.unified_imgsz = image_size
        self.model = self._load_model()

    def _empty_result(self):
        return [{
            'boxes': np.empty((0, 4)),
            'confs': np.empty((0,)),
            'cls_ids': np.empty((0,), dtype=int),
            'names': dict(enumerate(self.class_names))
        }]

    def _load_model(self):
        core = Core()
        xml_path = self.model_path if self.model_path.suffix == ".xml" else self.model_path / "model.xml"
        bin_path = xml_path.with_suffix(".bin")

        if not xml_path.exists():
            raise FileNotFoundError(f"未找到模型文件: {xml_path}")
        if not bin_path.exists():
            raise FileNotFoundError(f"未找到权重文件: {bin_path}")

        model = core.read_model(model=str(xml_path))
        compiled_model = core.compile_model(model, "CPU")

        output = compiled_model.output(0)
        out_shape = output.partial_shape
        shape_list = [dim.get_length() for dim in out_shape]

        SPEED_LOGGER.info(f"🔧 模型输出形状: {shape_list}")

        # 判断是否为 NMS 输出模型: [1, 300, 6] 或 [1, 300, 7]
        if len(shape_list) == 3 and shape_list[0] == 1 and shape_list[1] >= 100:
            self.is_nms_model = True
            self.nms_output = output
            SPEED_LOGGER.info(f"✅ 已加载【带NMS】的模型，最大输出框数: {shape_list[1]}, 字段数: {shape_list[2]}")
        else:
            C = out_shape[1].get_length()
            num_classes = C - 4
            if num_classes != self.expected_num_classes:
                raise ValueError(f"模型输出维度异常：预期 {4 + self.expected_num_classes}，实际 {C}")
            self.is_nms_model = False
            self.regular_output = output
            SPEED_LOGGER.info(f"⚠️ 已加载【原始输出】模型，通道数 C={C}")

        return compiled_model

    def run(self):
        while True:
            frame = self.frame_queue.get()
            results = self.predict(frame)
            annotated_frame = self.process_results(frame, results)
            self.result_ready.emit(annotated_frame)

    def predict(self, frame):
        try:
            start_total = time.time()
            h, w = frame.shape[:2]

            # -------------------------------
            # 1. 预处理 (Preprocessing)
            # -------------------------------
            start_pre = time.time()
            frame = np.ascontiguousarray(frame)

            scale = self.unified_imgsz / max(h, w)
            new_h, new_w = int(h * scale), int(w * scale)

            resized_frame = cv2.resize(frame, (new_w, new_h), interpolation=cv2.INTER_AREA)
            pad_h = self.unified_imgsz - new_h
            pad_w = self.unified_imgsz - new_w
            top = pad_h // 2
            bottom = pad_h - top
            left = pad_w // 2
            right= pad_w - left

            padded_frame = cv2.copyMakeBorder(resized_frame, top, bottom, left, right,
                                              borderType=cv2.BORDER_CONSTANT, value=(0, 0, 0))
            rgb_frame = cv2.cvtColor(padded_frame, cv2.COLOR_BGR2RGB)
            input_tensor = np.expand_dims(rgb_frame.transpose(2, 0, 1), axis=0).astype(np.float32) / 255.0
            pre_time = time.time() - start_pre

            # -------------------------------
            # 2. 推理 (Inference)
            # -------------------------------
            start_inf = time.time()
            if self.is_nms_model:
                result = self.model(input_tensor)[0]
                result = np.ascontiguousarray(result).astype(np.float32)
                if result.ndim > 2:
                    result = result[0]  # to (N, 6)

                boxes_640 = result[:, :4]
                confs = result[:, 4]
                cls_ids = result[:, 5].astype(int)

                valid_mask = confs > 0.5
                if not valid_mask.any():
                    inf_time = time.time() - start_inf
                    post_time = time.time() - start_inf  # 后处理也快
                    total_time = time.time() - start_total
                    SPEED_LOGGER.info(f"⏱ 推理耗时 | 总: {total_time * 1000:.1f}ms | 预处理: {pre_time * 1000:.1f}ms | "
                                      f"推理: {inf_time * 1000:.1f}ms | 后处理: {post_time * 1000:.1f}ms | 检测数: 0")
                    return self._empty_result()

                boxes_filtered = boxes_640[valid_mask]
                boxes_original = self._rescale_boxes(boxes_filtered, (new_h, new_w), (top, left), (h, w))

                inf_time = time.time() - start_inf
                # -------------------------------
                # 3. 后处理 (Post-processing)
                # -------------------------------
                start_post = time.time()
                res = [{
                    'boxes': boxes_original,
                    'confs': confs[valid_mask],
                    'cls_ids': cls_ids[valid_mask],
                    'names': dict(enumerate(self.class_names))
                }]
                post_time = time.time() - start_post

            else:
                # 原始模型路径
                output = self.model(input_tensor)[0]  # (C, 8400)
                C, N = output.shape
                output = output.transpose(1, 0)  # (N, C)
                box_cxcywh = output[:, :4]
                scores = output[:, 4:]
                max_conf = np.max(scores, axis=1)
                cls_ids = np.argmax(scores, axis=1)

                conf_mask = max_conf >= 0.5
                if not conf_mask.any():
                    inf_time = time.time() - start_inf
                    post_time = time.time() - start_inf
                    total_time = time.time() - start_total
                    SPEED_LOGGER.info(f"⏱ 推理耗时 | 总: {total_time * 1000:.1f}ms | 预处理: {pre_time * 1000:.1f}ms | "
                                      f"推理: {inf_time * 1000:.1f}ms | 后处理: {post_time * 1000:.1f}ms | 检测数: 0")
                    return self._empty_result()

                box_xyxy = self._cxcywh_to_xyxy(box_cxcywh[conf_mask])
                confs = max_conf[conf_mask]
                cls_ids = cls_ids[conf_mask]

                indices = cv2.dnn.NMSBoxes(
                    boxes=box_xyxy.tolist(),
                    scores=confs.tolist(),
                    score_threshold=0.5,
                    nms_threshold=0.45,
                    eta=1.0,
                    top_k=300
                )
                indices = indices.flatten() if len(indices) > 0 else []

                final_boxes = box_xyxy[indices]
                final_confs = confs[indices]
                final_cls_ids = cls_ids[indices]

                final_boxes_orig = self._rescale_boxes(final_boxes, (new_h, new_w), (top, left), (h, w))

                inf_time = time.time() - start_inf
                # -------------------------------
                # 3. 后处理 (Post-processing)
                # -------------------------------
                start_post = time.time()
                res = [{
                    'boxes': final_boxes_orig,
                    'confs': final_confs,
                    'cls_ids': final_cls_ids,
                    'names': dict(enumerate(self.class_names))
                }]
                post_time = time.time() - start_post

            total_time = time.time() - start_total
            num_dets = len(res[0]['boxes'])

            # 📊 日志输出各阶段耗时（毫秒）
            SPEED_LOGGER.info(f"⏱ 推理耗时 | 总: {total_time * 1000:.1f}ms | 预处理: {pre_time * 1000:.1f}ms | "
                              f"推理: {inf_time * 1000:.1f}ms | 后处理: {post_time * 1000:.1f}ms | 检测数: {num_dets}")

            return res

        except Exception as e:
            total_time = time.time() - start_total
            SPEED_LOGGER.error(f"🚨 predict() 发生错误 ({total_time * 1000:.1f}ms): {str(e)}", exc_info=True)
            return self._empty_result()

    def _cxcywh_to_xyxy(self, boxes):
        x_c = boxes[:, 0]
        y_c = boxes[:, 1]
        w = boxes[:, 2]
        h = boxes[:, 3]
        x1 = x_c - w / 2
        y1 = y_c - h / 2
        x2 = x_c + w / 2
        y2 = y_c + h / 2
        return np.stack([x1, y1, x2, y2], axis=-1)

    def _rescale_boxes(self, boxes, resized_shape, padding, original_shape):
        new_h, new_w = resized_shape
        pad_top, pad_left = padding
        orig_h, orig_w = original_shape

        boxes_copy = boxes.copy()
        boxes_copy[:, [0, 2]] -= pad_left
        boxes_copy[:, [1, 3]] -= pad_top
        boxes_copy[:, [0, 2]] *= orig_w / new_w
        boxes_copy[:, [1, 3]] *= orig_h / new_h
        boxes_copy[:, :4] = np.clip(boxes_copy[:, :4], 0, [orig_w, orig_h, orig_w, orig_h])
        return boxes_copy

    def process_results(self, frame, results):
        frame = np.ascontiguousarray(frame.copy())
        h, w = frame.shape[:2]
        font_scale = max(0.6, h / 720.0 * 0.8)
        font_thickness = max(1, int(font_scale * 1.5))
        raw_frame = frame.copy()
        cx1 = int(w * (0.5 - self.center_ratio / 2))
        cy1 = int(h * (0.5 - self.center_ratio / 2))
        cx2 = int(w * (0.5 + self.center_ratio / 2))
        cy2 = int(h * (0.5 + self.center_ratio / 2))
        cv2.rectangle(frame, (cx1, cy1), (cx2, cy2), (0, 255, 255), 2)


        any_target_detected = False
        detected_targets = []

        class_colors = {
            "wheel": (0, 255, 0),
            "hel": (255, 0, 0),
            "nohel": (0, 0, 255),
        }

        for result in results:
            boxes = result.get('boxes', [])
            confs = result.get('confs', [])
            cls_ids = result.get('cls_ids', [])
            names = result.get('names', {})

            for box, conf, cls_id in zip(boxes, confs, cls_ids):
                class_name = names.get(cls_id, f"class{cls_id}")
                xc = int((box[0] + box[2]) / 2)
                yc = int((box[1] + box[3]) / 2)

                if cx1 <= xc <= cx2 and cy1 <= yc <= cy2:
                    detected_targets.append({
                        "box": box.copy(),
                        "conf": conf,
                        "class_name": class_name
                    })
                    any_target_detected = True

        for target in detected_targets:
            x1, y1, x2, y2 = map(int, target["box"])
            self.saver.save_cropped_image(frame, target["box"], target["class_name"], target["conf"])
            color = class_colors.get(target["class_name"], (128, 128, 128))
            cv2.rectangle(frame, (x1, y1), (x2, y2), color, 2)
            label = f"{target['class_name']} {target['conf']:.2f}"
            (tw, th), _ = cv2.getTextSize(label, cv2.FONT_HERSHEY_COMPLEX_SMALL, font_scale, font_thickness)
            overlay = frame.copy()
            cv2.rectangle(overlay, (x1, max(0, y1 - th - 5)), (x1 + tw, y1), color, -1)
            alpha = 0.4
            frame = cv2.addWeighted(overlay, alpha, frame, 1 - alpha, 0)
            cv2.putText(frame, label, (x1, max(0, y1 - 5)),
                        cv2.FONT_HERSHEY_COMPLEX_SMALL, font_scale, (255, 255, 255), font_thickness, cv2.LINE_AA)

        current_time = time.time()
        if any_target_detected and current_time - self.last_save_time >= self.min_interval:
            ts_str = time.strftime("%Y%m%d_%H%M%S_") + str(int(current_time * 1000))
            self.raw_saver.add_frame(raw_frame, "detect_" + ts_str)
            self.frame_saver.save_full_frame(frame.copy(), "detect_" + ts_str)
            self.last_save_time = current_time
            SPEED_LOGGER.info(f"✅ 已触发图像保存: {ts_str}")

        return frame


# ==================== 视频采集与显示线程 ====================
class VideoThread(QThread):
    update_frame = pyqtSignal(QImage)

    def __init__(self, rtsp_url, model_path, center_ratio, nested_classes, class_names, image_size, frame_interval):
        super().__init__()
        self.frame_interval = frame_interval
        self.rtsp_url = rtsp_url
        self.inference_worker = InferenceWorker(model_path, center_ratio, nested_classes, class_names, image_size)
        self.inference_worker.result_ready.connect(self.on_inference_done)
        self.inference_worker.start()
        self.running = True

    def run(self):
        cap = cv2.VideoCapture(self.rtsp_url, cv2.CAP_FFMPEG)
        frame_count = 0  # 添加帧计数器
        while self.running:
            ret, frame = cap.read()
            if not ret:
                time.sleep(2)
                cap.release()
                cap.open(self.rtsp_url)
                continue

            frame_count += 1
            # 只处理每第 2 帧（可调为 2、4、5 等）
            if frame_count % self.frame_interval != 0:
                continue  # 跳过不处理

            if self.inference_worker.frame_queue.qsize() < 2:
                self.inference_worker.frame_queue.put(frame.copy())

    def on_inference_done(self, annotated_frame):
        try:
            rgb = cv2.cvtColor(annotated_frame, cv2.COLOR_BGR2RGB)
            rgb = np.ascontiguousarray(rgb)
            h, w, ch = rgb.shape
            bytes_per_line = ch * w
            qt_image = QImage(rgb.data, w, h, bytes_per_line, QImage.Format_RGB888)
            if qt_image.isNull():
                SPEED_LOGGER.error("❌ QImage is Null! 数据不连续")
            else:
                self.update_frame.emit(qt_image)
        except Exception as e:
            SPEED_LOGGER.error(f"图像转换失败: {str(e)}", exc_info=True)


# ==================== 主窗口 ====================
class MainWindow(QMainWindow):
    def __init__(self, rtsp_urls, model_path, center_ratio, nested_classes, class_names, image_size, frame_interval):
        super().__init__()
        self.rtsp_urls = rtsp_urls
        self.init_ui()
        self.init_video_threads(rtsp_urls, model_path, center_ratio, nested_classes, class_names, image_size, frame_interval)

    def init_ui(self):
        self.setWindowTitle("非机动车未戴头盔监控实时检测画面")
        self.setGeometry(100, 100, 1150, 600)
        central_widget = QWidget()
        self.setCentralWidget(central_widget)
        layout = QVBoxLayout(central_widget)
        self.video_labels = [QLabel(parent=central_widget) for _ in range(len(self.rtsp_urls))]
        for label in self.video_labels:
            label.setAlignment(Qt.AlignCenter)
            label.setMinimumSize(640, 480)
            layout.addWidget(label)

    def init_video_threads(self, rtsp_urls, model_path, center_ratio, nested_classes, class_names, image_size, frame_interval):
        self.threads = []
        for idx, url in enumerate(rtsp_urls):
            thread = VideoThread(url, model_path, center_ratio, nested_classes, class_names, image_size, frame_interval)
            thread.update_frame.connect(
                lambda img, lbl=self.video_labels[idx]: self.update_label(lbl, img)
            )
            thread.start()
            self.threads.append(thread)

    def update_label(self, label, qt_img):
        if not qt_img.isNull():
            scaled = qt_img.scaled(label.width(), label.height(), Qt.KeepAspectRatio, Qt.SmoothTransformation)
            label.setPixmap(QPixmap.fromImage(scaled))
        else:
            SPEED_LOGGER.warning("尝试显示空图像")

    def closeEvent(self, event):
        for t in self.threads:
            t.running = False
            t.quit()
            t.wait()
        event.accept()


# ==================== 启动入口 ====================
if __name__ == "__main__":
    app = QApplication(sys.argv)
    config = load_config()
    for d in config["save_dirs"].values():
        Path(d).mkdir(exist_ok=True)
    window = MainWindow(
        rtsp_urls=config["rtsp_urls"],
        model_path=config["model_path"],
        center_ratio=config["center_detection_ratio"],
        nested_classes=config["nested_classes"],
        class_names=config["classes_to_show"],
        image_size=config["image_size"],
        frame_interval=config["frame_interval"]
    )
    window.show()
    sys.exit(app.exec_())