import sys
import cv2
import numpy as np
import time
from PyQt5.QtWidgets import QApplication, QMainWindow, QLabel, QVBoxLayout, QWidget
from PyQt5.QtGui import QImage, QPixmap, QFont
from PyQt5.QtCore import Qt, QThread, pyqtSignal, pyqtSlot
from ultralytics import YOLO
from multiprocessing import Queue
from pathlib import Path
import json
from threading import Thread
from model_wrappers import ONNXModelWrapper, OpenVINOModelWrapper
import logging
import os
import shutil
from logging.handlers import RotatingFileHandler
from datetime import datetime

class DateRotatingFileHandler(RotatingFileHandler):
    """支持按日期+序号命名归档日志的 Handler"""
    def __init__(self, filename, maxBytes=0, backupCount=0, encoding=None, delay=False):
        super().__init__(
            filename,
            mode='a',
            maxBytes=maxBytes,
            backupCount=backupCount,
            encoding=encoding,
            delay=delay
        )
        self.base_dir = os.path.dirname(filename) or "."
        self.base_name = os.path.basename(filename)  # "Record.log"
        self.stem = os.path.splitext(self.base_name)[0]  # "Record"

    def doRollover(self):
        """
        重写轮转逻辑：
        - Record.log → Record_2025-09-17.log
        - 如果已存在 Record_2025-09-17.log → Record_2025-09-17-1.log
        - 依次递增序号，最多保留 backupCount 个
        """
        if self.stream:
            self.stream.close()
            self.stream = None

        today_str = datetime.now().strftime("%Y-%m-%d")
        target_base = f"{self.stem}_{today_str}"

        # 找到下一个可用序号
        next_index = 0
        target_file = os.path.join(self.base_dir, f"{target_base}.log")
        while os.path.exists(target_file):
            next_index += 1
            target_file = os.path.join(self.base_dir, f"{target_base}-{next_index}.log")

        # 轮转：Record.log → Record_2025-09-17[-序号].log
        try:
            if os.path.exists(self.baseFilename):
                shutil.move(self.baseFilename, target_file)
        except Exception as e:
            self.handleError(None)  # 记录错误但不中断

        # 重新打开新日志文件
        if not self.delay:
            self.stream = self._open()

        # 清理旧归档（保留 backupCount 个）
        if self.backupCount > 0:
            # 获取所有归档文件（匹配 Record_YYYY-MM-DD*.log）
            import re
            date_pattern = re.compile(rf"^{re.escape(self.stem)}_\d{{4}}-\d{{2}}-\d{{2}}(-\d+)?\.log$")
            archives = [
                f for f in os.listdir(self.base_dir)
                if date_pattern.match(f) and f != os.path.basename(target_file)
            ]
            archives.sort(key=lambda x: os.path.getmtime(os.path.join(self.base_dir, x)), reverse=True)

            # 删除超出 backupCount 的旧文件
            for old_file in archives[self.backupCount - 1:]:
                try:
                    os.remove(os.path.join(self.base_dir, old_file))
                except Exception:
                    pass  # 忽略删除失败


def setup_logging(log_dir="logs", log_filename="Record.log", level=logging.INFO, max_bytes=10*1024*1024, backup_count=3):
    """配置日志：写入 logs/Record.log，轮转后按日期+序号命名，如 Record_2025-09-17-1.log"""
    # 确保日志目录存在
    os.makedirs(log_dir, exist_ok=True)
    log_path = os.path.join(log_dir, log_filename)

    logger = logging.getLogger("InferenceSpeed")
    logger.setLevel(level)

    # 避免重复添加 handler
    if logger.hasHandlers():
        logger.handlers.clear()

    # 使用自定义 Handler
    handler = DateRotatingFileHandler(
        log_path,
        maxBytes=max_bytes,
        backupCount=backup_count,
        encoding='utf-8'
    )

    # 设置日志格式
    formatter = logging.Formatter(
        fmt='%(asctime)s.%(msecs)03d [%(levelname)s] %(message)s',
        datefmt='%Y-%m-%d %H:%M:%S'
    )
    handler.setFormatter(formatter)

    logger.addHandler(handler)

    # 可选：控制台输出（调试用）
    # console_handler = logging.StreamHandler()
    # console_handler.setFormatter(formatter)
    # logger.addHandler(console_handler)

    return logger


# 初始化全局日志器
SPEED_LOGGER = setup_logging()

def load_config():
    """加载配置文件"""
    if getattr(sys, 'frozen', False):
        base_dir = Path(sys._MEIPASS)
    else:
        base_dir = Path(__file__).parent

    config_path = base_dir / "config-pt-onnx-openVINO.json"

    try:
        with open(config_path, "r", encoding="utf-8") as f:
            config = json.load(f)
    except FileNotFoundError:
        sys.exit(f"错误：配置文件 {config_path} 不存在")
    except json.JSONDecodeError:
        sys.exit(f"错误：配置文件格式不正确")

    rtsp_urls = []
    for cam in config["cameras"]:
        url = f"rtsp://{cam['username']}:{cam['password']}@{cam['ip']}:554/Streaming/Channels/{cam['channel']}"
        # url = f"rtsp://{cam['username']}:{cam['password']}@{cam['ip']}:554/ch01.264" # 未戴头盔抓拍地址：ch01_sub.264(子码流)
        rtsp_urls.append(url)

    model_path = Path(config["model_path"])
    if not model_path.exists():
        sys.exit(f"错误：模型文件 {model_path} 不存在")

    return {
        "rtsp_urls": rtsp_urls,
        "model_path": str(model_path),
        "save_dirs": config["save_dirs"],
        "center_detection_ratio": config.get("center_detection_ratio", 0.4),
        "nested_classes": config.get("nested_classes", {"wheel": ["hel", "nohel"]}),
        "classes_to_show": config["classes_to_show"]

    }


# 保存未框选的原始帧
class RawFrameSaver:
    def __init__(self, save_dir="raw_frames"):
        config = load_config()
        self.save_dir = Path(config["save_dirs"]["raw_frames"])
        self.save_dir.mkdir(exist_ok=True)
        self.frame_queue = Queue(maxsize=30)
        self.min_interval = 1.0
        self.last_save_time = 0

        self.thread = Thread(target=self._save_worker, daemon=True)
        # **********暂时不保存图片
        self.thread.start()

    def add_frame(self, frame, timestamp_str):
        if self.frame_queue.qsize() < 30:
            self.frame_queue.put((frame.copy(), timestamp_str))

    def _save_worker(self):
        while True:
            if self.frame_queue.empty():
                time.sleep(0.1)
                continue

            current_time = time.time()
            if current_time - self.last_save_time >= self.min_interval:
                frame, timestamp_str = self.frame_queue.get()
                filename = f"{timestamp_str}.jpg"
                cv2.imwrite(str(self.save_dir / filename), frame)
                self.last_save_time = current_time


# 保存框选的完整帧
class FrameSaver:
    def __init__(self, save_dir="full_frames"):
        config = load_config()
        self.save_dir = Path(config["save_dirs"]["full_frames"])
        self.save_dir.mkdir(exist_ok=True)
        self.last_save_time = 0
        self.min_interval = 1.0

    def save_full_frame(self, frame, timestamp_str):
        current_time = time.time()
        if current_time - self.last_save_time >= self.min_interval:
            filename = f"{timestamp_str}.jpg"
            save_path = self.save_dir / filename
            cv2.imwrite(str(save_path), frame)
            self.last_save_time = current_time


# 图像保存模块
class ImageSaver:
    def __init__(self, save_dir="detected_objects"):
        config = load_config()
        self.save_dir = Path(config["save_dirs"]["detected_objects"])
        self.save_dir.mkdir(exist_ok=True)
        self.frame_counter = 0
        self.last_save_time = time.time()
        self.min_interval = 0.5

    def save_cropped_image(self, frame, box, class_name, conf):
        current_time = time.time()
        if current_time - self.last_save_time < self.min_interval:
            return

        x1, y1, x2, y2 = map(int, box)
        cropped = frame[y1:y2, x1:x2]
        if cropped.size == 0:
            return

        timestamp = time.strftime("%Y%m%d_%H%M%S")
        filename = f"{timestamp}_{class_name}_{conf:.2f}_{self.frame_counter:04d}.jpg"
        save_path = self.save_dir / filename

        try:
            ret, buffer = cv2.imencode('.jpg', cropped, [cv2.IMWRITE_JPEG_QUALITY, 90])
            if ret:
                buffer.tofile(str(save_path))
                self.frame_counter += 1
                self.last_save_time = current_time
        except Exception as e:
            SPEED_LOGGER.info(f"保存图像时发生错误：{str(e)}")


# 视频采集线程
class VideoThread(QThread):
    update_frame = pyqtSignal(QImage)

    def __init__(self, rtsp_url, model_path, center_ratio, nested_classes, class_names=None):
        super().__init__()
        self.rtsp_url = rtsp_url
        self.inference_worker = InferenceWorker(model_path, center_ratio, nested_classes, class_names)
        self.inference_worker.result_ready.connect(self.on_inference_done)
        self.inference_worker.start()
        self.running = True

    def run(self):
        SPEED_LOGGER.info(f"rtsp地址:  {self.rtsp_url}")
        cap = cv2.VideoCapture(self.rtsp_url, cv2.CAP_FFMPEG)
        while self.running:
            ret, frame = cap.read()
            if not ret:
                self.reset_connection(cap)
                continue
            if self.inference_worker.frame_queue.qsize() < 2:
                self.inference_worker.frame_queue.put(frame.copy())

    def reset_connection(self, cap):
        cap.release()
        time.sleep(2)
        cap.open(self.rtsp_url)

    def on_inference_done(self, annotated_frame):
        rgb_image = cv2.cvtColor(annotated_frame, cv2.COLOR_BGR2RGB)
        h, w, ch = rgb_image.shape
        qt_img = QImage(rgb_image.data, w, h, QImage.Format_RGB888)
        self.update_frame.emit(qt_img)


# 主窗口
class MainWindow(QMainWindow):
    def __init__(self, rtsp_urls, model_path, center_ratio, nested_classes, class_names):
        super().__init__()
        self.rtsp_urls = rtsp_urls
        self.model_path = model_path
        self.center_ratio = center_ratio
        self.nested_classes = nested_classes
        self.class_names = class_names
        self.init_ui()
        self.init_video_threads()

    def init_ui(self):
        self.setWindowTitle("海康监控实时画面")
        self.setGeometry(100, 100, 1280, 720)

        central_widget = QWidget()
        self.setCentralWidget(central_widget)
        layout = QVBoxLayout(central_widget)

        self.video_labels = [QLabel(parent=central_widget) for _ in self.rtsp_urls]
        for label in self.video_labels:
            label.setAlignment(Qt.AlignCenter)
            label.setMinimumSize(640, 480)
            layout.addWidget(label)

    def init_video_threads(self):
        self.threads = []
        for i, url in enumerate(self.rtsp_urls):
            thread = VideoThread(url, self.model_path, self.center_ratio, self.nested_classes, self.class_names)
            thread.update_frame.connect(self.update_video_frame(i))
            thread.start()
            self.threads.append(thread)

    def closeEvent(self, event):
        for thread in self.threads:
            thread.running = False
            thread.quit()
            thread.wait()
        event.accept()

    @pyqtSlot(QImage)
    def update_video_frame(self, index):
        def handler(image):
            scaled_img = image.scaled(
                self.video_labels[index].width(),
                self.video_labels[index].height(),
                Qt.KeepAspectRatio
            )
            self.video_labels[index].setPixmap(QPixmap.fromImage(scaled_img))

        return handler


# 推理工作线程
class InferenceWorker(QThread):
    result_ready = pyqtSignal(np.ndarray)

    def __init__(self, model_path, center_ratio, nested_classes, class_names=None):
        super().__init__()
        self.model_path = model_path
        self.center_ratio = center_ratio
        self.nested_classes_config = nested_classes
        self.class_names = class_names or []  # 👈 保存类别名
        self.saver = ImageSaver()
        self.frame_saver = FrameSaver()
        self.raw_saver = RawFrameSaver()
        self.frame_queue = Queue(maxsize=2)
        self.min_interval = 1.0
        self.last_save_time = 0
        # 1. 新增：统一所有模型的输入尺寸（与YOLOv8.pt保持一致）
        self.unified_imgsz = 640  # 关键：强制所有模型用640尺寸推理
        # 根据模型后缀初始化推理引擎
        self.model = self._load_model(model_path)

    def _load_model(self, model_path):
        """根据模型文件后缀加载对应推理引擎"""
        model_path = Path(model_path)
        suffix = model_path.suffix.lower()

        if suffix == ".pt":
            from ultralytics import YOLO
            SPEED_LOGGER.info("Loading YOLO model...")
            return YOLO(model_path)

        elif suffix == ".onnx":
            import onnxruntime as ort
            SPEED_LOGGER.info("Loading ONNX model...")
            ort_session = ort.InferenceSession(model_path, providers=['CUDAExecutionProvider', 'CPUExecutionProvider'])
            return ONNXModelWrapper(ort_session)

        elif suffix == ".xml":  # OpenVINO 模型由 .xml + .bin 组成
            from openvino.runtime import Core
            SPEED_LOGGER.info("Loading OpenVINO model...")
            core = Core()
            model = core.read_model(model_path)
            #compiled_model = core.compile_model(model, "AUTO")
            # 4. 新增：指定CPU设备，关闭动态批处理（降低内存占用）
            compiled_model = core.compile_model(
                model=model,
                device_name="CPU",  # 强制用CPU推理（无GPU环境）
            )
            return OpenVINOModelWrapper(compiled_model, class_names=self.class_names)

        else:
            raise ValueError(f"Unsupported model format: {suffix}. Supported: .pt, .onnx, .xml")

    def run(self):
        while True:
            frame = self.frame_queue.get()
            results = self.predict(frame)
            annotated_frame = self.process_results(frame, results)
            self.result_ready.emit(annotated_frame)

    def predict(self, frame):
        t0 = time.time()
        # 2. 新增：统一缩放输入帧到640尺寸（所有模型通用）
        # 计算缩放比例，保持宽高比，避免失真
        h, w = frame.shape[:2]
        scale = self.unified_imgsz / max(h, w)
        new_h, new_w = int(h * scale), int(w * scale)
        # 缩放帧（INTER_AREA适合缩小，INTER_LINEAR适合放大）
        resized_frame = cv2.resize(frame, (new_w, new_h), interpolation=cv2.INTER_AREA)
        # 补黑边（确保输入尺寸为正方形，符合YOLO模型要求）
        pad_h = self.unified_imgsz - new_h
        pad_w = self.unified_imgsz - new_w
        padded_frame = cv2.copyMakeBorder(
            resized_frame,
            top=pad_h // 2, bottom=pad_h - pad_h // 2,
            left=pad_w // 2, right=pad_w - pad_w // 2,
            borderType=cv2.BORDER_CONSTANT, value=(0, 0, 0)
        )

        if isinstance(self.model, YOLO):
            t_pre = time.time()
            results = self.model(frame, imgsz=self.unified_imgsz, conf=0.5)
            t_inf = time.time()

            unified_results = []
            for result in results:
                boxes = result.boxes.xyxy.cpu().numpy()
                confs = result.boxes.conf.cpu().numpy()
                cls_ids = result.boxes.cls.cpu().numpy().astype(int)
                names = result.names
                unified_results.append({
                    'boxes': boxes,
                    'confs': confs,
                    'cls_ids': cls_ids,
                    'names': names
                })

            t_post = time.time()

            # 获取 Ultralytics 内部计时（v8.0+）
            if hasattr(results[0], 'speed'):
                speed = results[0].speed
                preprocess_ms = speed.get('preprocess', (t_inf - t_pre) * 1000)
                inference_ms = speed.get('inference', (t_inf - t_pre) * 1000)
                postprocess_ms = speed.get('postprocess', (t_post - t_inf) * 1000)
            else:
                preprocess_ms = (t_inf - t_pre) * 1000
                inference_ms = (t_inf - t_pre) * 1000
                postprocess_ms = (t_post - t_inf) * 1000

            h, w = frame.shape[:2]
            model_name = "YOLOv8"
            SPEED_LOGGER.info(f"{model_name}: {h}x{w} (no detections), {inference_ms:.1f}ms")
            SPEED_LOGGER.info(f"Speed: {preprocess_ms:.1f}ms preprocess, {inference_ms:.1f}ms inference, {postprocess_ms:.1f}ms postprocess per image at shape (1, 3, {h}, {w})")

            return unified_results

        else:
            # OpenVINO/ONNX：用缩放后的padded_frame推理（关键修改）
            # 替换原frame为padded_frame，确保输入尺寸是640x640
            result, preprocess_ms, inference_ms, postprocess_ms = self.model.predict_with_timing(padded_frame)
            # 日志中的尺寸改为统一尺寸（避免误导）
            h, w = padded_frame.shape[:2]  # 此时h=w=640
            model_name = self.model.__class__.__name__.replace("ModelWrapper", "")
            SPEED_LOGGER.info(f"{model_name}: {h}x{w} (no detections), {inference_ms:.1f}ms")
            SPEED_LOGGER.info(
                f"Speed: {preprocess_ms:.1f}ms preprocess, {inference_ms:.1f}ms inference, {postprocess_ms:.1f}ms postprocess per image at shape (1, 3, {h}, {w})")

            return result

    def process_results(self, frame, results):
        height, width, _ = frame.shape
        base_height = 720.0
        font_scale = max(0.6, height / base_height * 0.8)
        font_thickness = max(1, int(font_scale * 1.5))

        center_x1 = int(width * (0.5 - self.center_ratio / 2))
        center_y1 = int(height * (0.5 - self.center_ratio / 2))
        center_x2 = int(width * (0.5 + self.center_ratio / 2))
        center_y2 = int(height * (0.5 + self.center_ratio / 2))

        helmet_detected = False
        raw_frame = frame.copy()
        cv2.rectangle(frame, (center_x1, center_y1), (center_x2, center_y2), (0, 255, 255), 2)

        twowheel_boxes = []
        helmet_classes = self.nested_classes_config.get("wheel", [])

        for result in results:
            boxes = result['boxes']
            confidences = result['confs']
            class_ids = result['cls_ids']
            names = result['names']

            for i, (xyxy, conf, cls_id) in enumerate(zip(boxes, confidences, class_ids)):
                if conf < 0.5:
                    continue
                class_name = names[cls_id]

                x_center = int((xyxy[0] + xyxy[2]) / 2)
                y_center = int((xyxy[1] + xyxy[3]) / 2)
                in_center = center_x1 <= x_center <= center_x2 and center_y1 <= y_center <= center_y2
                if not in_center:
                    continue

                if class_name == "wheel":
                    twowheel_boxes.append({"box": xyxy.copy(), "conf": conf, "class_name": class_name})

        for tw_data in twowheel_boxes:
            tw_box = tw_data["box"]
            tw_conf = tw_data["conf"]
            x1, y1, x2, y2 = map(int, tw_box)
            roi = frame[y1:y2, x1:x2]

            if roi.size == 0:
                continue

            roi_results = self.predict(roi)

            # === 核心优化：聚合头盔结果，避免 hel 和 nohel 冲突 ===
            best_helmet_class = None
            best_conf = 0.0
            best_box = None

            for r in roi_results:
                for j, (box, conf, cls_id) in enumerate(zip(r['boxes'], r['confs'], r['cls_ids'])):
                    if conf < 0.5:
                        continue
                    class_name = r['names'][cls_id]
                    if class_name not in helmet_classes:
                        continue

                    if class_name == "hel":
                        best_helmet_class = "hel"
                        best_conf = conf
                        best_box = box
                    elif class_name == "nohel" and best_helmet_class is None:
                        best_helmet_class = "nohel"
                        if conf > best_conf:
                            best_conf = conf
                            best_box = box

            # === 绘制和保存最优结果 ===
            if best_helmet_class:
                helmet_detected = True

                self.saver.save_cropped_image(frame.copy(), tw_box, "wheel", tw_conf)
                cv2.rectangle(frame, (x1, y1), (x2, y2), (0, 255, 0), 2)

                label = f"wheel {tw_conf:.2f}"
                (tw, th), _ = cv2.getTextSize(label, cv2.FONT_HERSHEY_COMPLEX_SMALL, font_scale, font_thickness)
                overlay = frame.copy()
                cv2.rectangle(overlay, (x1, max(0, y1 - th - 5)), (x1 + tw, y1), (0, 255, 0), -1)
                alpha = 0.4
                frame = cv2.addWeighted(overlay, alpha, frame, 1 - alpha, 0)
                cv2.putText(frame, label, (x1, max(0, y1 - 5)),
                            cv2.FONT_HERSHEY_COMPLEX_SMALL, font_scale, (0, 0, 0), font_thickness, cv2.LINE_AA)

                # 处理头盔
                roi_x1, roi_y1, roi_x2, roi_y2 = map(int, best_box)
                abs_x1, abs_y1, abs_x2, abs_y2 = x1 + roi_x1, y1 + roi_y1, x1 + roi_x2, y1 + roi_y2

                self.saver.save_cropped_image(frame.copy(), [abs_x1, abs_y1, abs_x2, abs_y2], best_helmet_class, best_conf)

                color = (255, 0, 0) if best_helmet_class == "hel" else (0, 0, 255)
                cv2.rectangle(frame, (abs_x1, abs_y1), (abs_x2, abs_y2), color, 2)

                label = f"{best_helmet_class} {best_conf:.2f}"
                (tw, th), _ = cv2.getTextSize(label, cv2.FONT_HERSHEY_COMPLEX_SMALL, font_scale, font_thickness)
                text_x, text_y = abs_x1, max(th + 5, abs_y1 - 5)
                overlay = frame.copy()
                cv2.rectangle(overlay, (text_x, max(0, text_y - th - 5)), (text_x + tw, text_y), color, -1)
                frame = cv2.addWeighted(overlay, 0.4, frame, 0.6, 0)
                cv2.putText(frame, label, (text_x, text_y),
                            cv2.FONT_HERSHEY_COMPLEX_SMALL, font_scale, (255, 255, 255), font_thickness, cv2.LINE_AA)

            else:
                cv2.rectangle(frame, (x1, y1), (x2, y2), (100, 100, 100), 2, cv2.LINE_AA)
                label = "No helmet"
                (tw, th), _ = cv2.getTextSize(label, cv2.FONT_HERSHEY_COMPLEX_SMALL, font_scale, font_thickness)
                overlay = frame.copy()
                cv2.rectangle(overlay, (x1, max(0, y1 - th - 5)), (x1 + tw, y1), (100, 100, 100), -1)
                frame = cv2.addWeighted(overlay, 0.4, frame, 0.6, 0)
                cv2.putText(frame, label, (x1, max(0, y1 - 5)),
                            cv2.FONT_HERSHEY_COMPLEX_SMALL, font_scale, (255, 255, 255), font_thickness, cv2.LINE_AA)

        current_time = time.time()
        if helmet_detected and current_time - self.last_save_time >= self.min_interval:
            timestamp_str = time.strftime("%Y%m%d_%H%M%S_") + str(int(current_time * 1000))
            self.raw_saver.add_frame(raw_frame, "detect_" + timestamp_str)
            self.frame_saver.save_full_frame(frame.copy(), "detect_" + timestamp_str)
            self.last_save_time = current_time

        return frame


# 主程序入口
if __name__ == "__main__":
    app = QApplication(sys.argv)

    config = load_config()

    for dir_path in config["save_dirs"].values():
        Path(dir_path).mkdir(exist_ok=True)

    window = MainWindow(
        rtsp_urls=config["rtsp_urls"],
        model_path=config["model_path"],
        center_ratio=config["center_detection_ratio"],
        nested_classes=config["nested_classes"],
        class_names=config["classes_to_show"],
    )
    window.show()
    sys.exit(app.exec_())