import sys
import cv2
import numpy as np
import time
from PyQt5.QtWidgets import QApplication, QMainWindow, QLabel, QVBoxLayout, QWidget
from PyQt5.QtGui import QImage, QPixmap, QFont
from PyQt5.QtCore import Qt, QThread, pyqtSignal, pyqtSlot
from ultralytics import YOLO
from multiprocessing import Queue
from pathlib import Path
import json
from threading import Thread
import logging
import os
import shutil
from logging.handlers import RotatingFileHandler
from datetime import datetime


class DateRotatingFileHandler(RotatingFileHandler):
    """支持按日期+序号命名归档日志的 Handler"""

    def __init__(self, filename, maxBytes=0, backupCount=0, encoding=None, delay=False):
        super().__init__(
            filename,
            mode='a',
            maxBytes=maxBytes,
            backupCount=backupCount,
            encoding=encoding,
            delay=delay
        )
        self.base_dir = os.path.dirname(filename) or "."
        self.base_name = os.path.basename(filename)  # "Record.log"
        self.stem = os.path.splitext(self.base_name)[0]  # "Record"

    def doRollover(self):
        """重写轮转逻辑：按日期+序号命名归档日志"""
        if self.stream:
            self.stream.close()
            self.stream = None

        today_str = datetime.now().strftime("%Y-%m-%d")
        target_base = f"{self.stem}_{today_str}"

        # 找到下一个可用序号
        next_index = 0
        target_file = os.path.join(self.base_dir, f"{target_base}.log")
        while os.path.exists(target_file):
            next_index += 1
            target_file = os.path.join(self.base_dir, f"{target_base}-{next_index}.log")

        # 轮转日志文件
        try:
            if os.path.exists(self.baseFilename):
                shutil.move(self.baseFilename, target_file)
        except Exception as e:
            self.handleError(None)  # 记录错误但不中断

        # 重新打开新日志文件
        if not self.delay:
            self.stream = self._open()

        # 清理旧归档
        if self.backupCount > 0:
            import re
            date_pattern = re.compile(rf"^{re.escape(self.stem)}_\d{{4}}-\d{{2}}-\d{{2}}(-\d+)?\.log$")
            archives = [
                f for f in os.listdir(self.base_dir)
                if date_pattern.match(f) and f != os.path.basename(target_file)
            ]
            archives.sort(key=lambda x: os.path.getmtime(os.path.join(self.base_dir, x)), reverse=True)

            # 删除超出 backupCount 的旧文件
            for old_file in archives[self.backupCount - 1:]:
                try:
                    os.remove(os.path.join(self.base_dir, old_file))
                except Exception:
                    pass  # 忽略删除失败


def setup_logging(log_dir="logs", log_filename="Record.log", level=logging.INFO, max_bytes=10 * 1024 * 1024,
                  backup_count=3):
    """配置日志系统"""
    os.makedirs(log_dir, exist_ok=True)
    log_path = os.path.join(log_dir, log_filename)

    logger = logging.getLogger("InferenceSpeed")
    logger.setLevel(level)

    # 避免重复添加 handler
    if logger.hasHandlers():
        logger.handlers.clear()

    # 使用自定义 Handler
    handler = DateRotatingFileHandler(
        log_path,
        maxBytes=max_bytes,
        backupCount=backup_count,
        encoding='utf-8'
    )

    # 设置日志格式
    formatter = logging.Formatter(
        fmt='%(asctime)s.%(msecs)03d [%(levelname)s] %(message)s',
        datefmt='%Y-%m-%d %H:%M:%S'
    )
    handler.setFormatter(formatter)
    logger.addHandler(handler)

    return logger


# 初始化全局日志器
SPEED_LOGGER = setup_logging()


def load_config():
    """加载配置文件"""
    if getattr(sys, 'frozen', False):
        base_dir = Path(sys._MEIPASS)
    else:
        base_dir = Path(__file__).parent

    config_path = base_dir / "config-pt-onnx-openVINO.json"

    try:
        with open(config_path, "r", encoding="utf-8") as f:
            config = json.load(f)
    except FileNotFoundError:
        sys.exit(f"错误：配置文件 {config_path} 不存在")
    except json.JSONDecodeError:
        sys.exit(f"错误：配置文件格式不正确")

    rtsp_urls = []
    for cam in config["cameras"]:
        url = cam['url']
        rtsp_urls.append(url)

    model_path = Path(config["model_path"])
    if not model_path.exists():
        sys.exit(f"错误：模型文件 {model_path} 不存在")

    return {
        "rtsp_urls": rtsp_urls,
        "model_path": str(model_path),
        "save_dirs": config["save_dirs"],
        "center_detection_ratio": config.get("center_detection_ratio", 0.4),
        "nested_classes": config.get("nested_classes", {"wheel": ["hel", "nohel"]}),
        "classes_to_show": config["classes_to_show"]
    }


# 保存未框选的原始帧
class RawFrameSaver:
    def __init__(self, save_dir="raw_frames"):
        config = load_config()
        self.save_dir = Path(config["save_dirs"]["raw_frames"])
        self.save_dir.mkdir(exist_ok=True)
        self.frame_queue = Queue(maxsize=30)
        self.min_interval = 1.0
        self.last_save_time = 0

        self.thread = Thread(target=self._save_worker, daemon=True)
        self.thread.start()

    def add_frame(self, frame, timestamp_str):
        if self.frame_queue.qsize() < 30:
            self.frame_queue.put((frame.copy(), timestamp_str))

    def _save_worker(self):
        while True:
            if self.frame_queue.empty():
                time.sleep(0.1)
                continue

            current_time = time.time()
            if current_time - self.last_save_time >= self.min_interval:
                frame, timestamp_str = self.frame_queue.get()
                filename = f"{timestamp_str}.jpg"
                cv2.imwrite(str(self.save_dir / filename), frame)
                self.last_save_time = current_time


# 保存框选的完整帧
class FrameSaver:
    def __init__(self, save_dir="full_frames"):
        config = load_config()
        self.save_dir = Path(config["save_dirs"]["full_frames"])
        self.save_dir.mkdir(exist_ok=True)
        self.last_save_time = 0
        self.min_interval = 1.0

    def save_full_frame(self, frame, timestamp_str):
        current_time = time.time()
        if current_time - self.last_save_time >= self.min_interval:
            filename = f"{timestamp_str}.jpg"
            save_path = self.save_dir / filename
            cv2.imwrite(str(save_path), frame)
            self.last_save_time = current_time


# 图像保存模块
class ImageSaver:
    def __init__(self, save_dir="detected_objects"):
        config = load_config()
        self.save_dir = Path(config["save_dirs"]["detected_objects"])
        self.save_dir.mkdir(exist_ok=True)
        self.frame_counter = 0
        self.last_save_time = time.time()
        self.min_interval = 0.5

    def save_cropped_image(self, frame, box, class_name, conf):
        current_time = time.time()
        if current_time - self.last_save_time < self.min_interval:
            return

        x1, y1, x2, y2 = map(int, box)
        cropped = frame[y1:y2, x1:x2]
        if cropped.size == 0:
            return

        timestamp = time.strftime("%Y%m%d_%H%M%S")
        filename = f"{timestamp}_{class_name}_{conf:.2f}_{self.frame_counter:04d}.jpg"
        save_path = self.save_dir / filename

        try:
            ret, buffer = cv2.imencode('.jpg', cropped, [cv2.IMWRITE_JPEG_QUALITY, 90])
            if ret:
                buffer.tofile(str(save_path))
                self.frame_counter += 1
                self.last_save_time = current_time
        except Exception as e:
            SPEED_LOGGER.info(f"保存图像时发生错误：{str(e)}")


# 视频采集线程
class VideoThread(QThread):
    update_frame = pyqtSignal(QImage)

    def __init__(self, rtsp_url, model_path, center_ratio, nested_classes, class_names=None):
        super().__init__()
        self.rtsp_url = rtsp_url
        self.inference_worker = InferenceWorker(model_path, center_ratio, nested_classes, class_names)
        self.inference_worker.result_ready.connect(self.on_inference_done)
        self.inference_worker.start()
        self.running = True

    def run(self):
        SPEED_LOGGER.info(f"rtsp地址:  {self.rtsp_url}")
        cap = cv2.VideoCapture(self.rtsp_url, cv2.CAP_FFMPEG)
        while self.running:
            ret, frame = cap.read()
            if not ret:
                self.reset_connection(cap)
                continue
            if self.inference_worker.frame_queue.qsize() < 2:
                self.inference_worker.frame_queue.put(frame.copy())

    def reset_connection(self, cap):
        cap.release()
        time.sleep(2)
        cap.open(self.rtsp_url)

    def on_inference_done(self, annotated_frame):
        rgb_image = cv2.cvtColor(annotated_frame, cv2.COLOR_BGR2RGB)
        h, w, ch = rgb_image.shape
        qt_img = QImage(rgb_image.data, w, h, QImage.Format_RGB888)
        self.update_frame.emit(qt_img)


# 主窗口
class MainWindow(QMainWindow):
    def __init__(self, rtsp_urls, model_path, center_ratio, nested_classes, class_names):
        super().__init__()
        self.rtsp_urls = rtsp_urls
        self.model_path = model_path
        self.center_ratio = center_ratio
        self.nested_classes = nested_classes
        self.class_names = class_names
        self.init_ui()
        self.init_video_threads()

    def init_ui(self):
        self.setWindowTitle("海康监控实时画面")
        self.setGeometry(100, 100, 1280, 720)

        central_widget = QWidget()
        self.setCentralWidget(central_widget)
        layout = QVBoxLayout(central_widget)

        self.video_labels = [QLabel(parent=central_widget) for _ in self.rtsp_urls]
        for label in self.video_labels:
            label.setAlignment(Qt.AlignCenter)
            label.setMinimumSize(640, 480)
            layout.addWidget(label)

    def init_video_threads(self):
        self.threads = []
        for i, url in enumerate(self.rtsp_urls):
            thread = VideoThread(url, self.model_path, self.center_ratio, self.nested_classes, self.class_names)
            thread.update_frame.connect(self.update_video_frame(i))
            thread.start()
            self.threads.append(thread)

    def closeEvent(self, event):
        for thread in self.threads:
            thread.running = False
            thread.quit()
            thread.wait()
        event.accept()

    @pyqtSlot(QImage)
    def update_video_frame(self, index):
        def handler(image):
            scaled_img = image.scaled(
                self.video_labels[index].width(),
                self.video_labels[index].height(),
                Qt.KeepAspectRatio
            )
            self.video_labels[index].setPixmap(QPixmap.fromImage(scaled_img))

        return handler


# ==============================================================================
#                        InferenceWorker 类（含 meta 传递）
# ==============================================================================

class InferenceWorker(QThread):
    result_ready = pyqtSignal(np.ndarray)

    def __init__(self, model_path, center_ratio, nested_classes, class_names=None):
        super().__init__()
        self.model_path = model_path
        self.center_ratio = center_ratio
        self.nested_classes_config = nested_classes
        self.class_names = class_names or []
        self.saver = ImageSaver()
        self.frame_saver = FrameSaver()
        self.raw_saver = RawFrameSaver()
        self.frame_queue = Queue(maxsize=2)
        self.min_interval = 1.0
        self.last_save_time = 0
        self.unified_imgsz = 320  # 统一输入尺寸
        self.model = self._load_model(model_path)

    def _load_model(self, model_path):
        model_path = Path(model_path)
        suffix = model_path.suffix.lower()

        if suffix == ".pt":
            from ultralytics import YOLO
            SPEED_LOGGER.info("Loading YOLO model...")
            return YOLO(model_path)

        else:
            raise ValueError(f"Unsupported model format: {suffix}")

    def run(self):
        while True:
            frame = self.frame_queue.get()
            results = self.predict(frame)  # 获取结果和元信息
            annotated_frame = self.process_results(frame, results)
            self.result_ready.emit(annotated_frame)

    def predict(self, frame):
        t0 = time.time()
        h, w = frame.shape[:2]
        scale = self.unified_imgsz / max(h, w)
        new_h, new_w = int(h * scale), int(w * scale)

        resized_frame = cv2.resize(frame, (new_w, new_h), interpolation=cv2.INTER_AREA)

        pad_h = self.unified_imgsz - new_h
        pad_w = self.unified_imgsz - new_w
        top, left = pad_h // 2, pad_w // 2
        padded_frame = cv2.copyMakeBorder(
            resized_frame,
            top=top, bottom=pad_h - top,
            left=left, right=pad_w - left,
            borderType=cv2.BORDER_CONSTANT, value=(0, 0, 0)
        )

        if isinstance(self.model, YOLO):
            t_pre = time.time()
            results = self.model(frame, imgsz=self.unified_imgsz, conf=0.5)
            t_inf = time.time()

            unified_results = []
            for result in results:
                boxes = result.boxes.xyxy.cpu().numpy()
                confs = result.boxes.conf.cpu().numpy()
                cls_ids = result.boxes.cls.cpu().numpy().astype(int)
                names = result.names
                unified_results.append({
                    'boxes': boxes,
                    'confs': confs,
                    'cls_ids': cls_ids,
                    'names': names
                })

            t_post = time.time()
            preprocess_ms = (t_inf - t_pre) * 1000
            inference_ms = (t_inf - t_pre) * 1000
            postprocess_ms = (t_post - t_inf) * 1000

            SPEED_LOGGER.info(f"YOLOv8: {h}x{w}, {inference_ms:.1f}ms")
            SPEED_LOGGER.info(f"Speed: {preprocess_ms:.1f}ms pre, {inference_ms:.1f}ms inf, {postprocess_ms:.1f}ms post")

            return unified_results

    def process_results(self, frame, results):
        height, width, _ = frame.shape
        base_height = 720.0
        font_scale = max(0.6, height / base_height * 0.8)
        font_thickness = max(1, int(font_scale * 1.5))
        raw_frame = frame.copy()
        center_x1 = int(width * (0.5 - self.center_ratio / 2))
        center_y1 = int(height * (0.5 - self.center_ratio / 2))
        center_x2 = int(width * (0.5 + self.center_ratio / 2))
        center_y2 = int(height * (0.5 + self.center_ratio / 2))
        cv2.rectangle(frame, (center_x1, center_y1), (center_x2, center_y2), (0, 255, 255), 2)


        any_target_detected = False
        detected_targets = []

        class_colors = {
            "wheel": (0, 255, 0),
            "hel": (255, 0, 0),
            "nohel": (0, 0, 255),
        }

        for result in results:
            boxes = result['boxes']
            confidences = result['confs']
            class_ids = result['cls_ids']
            names = result['names']

            for i, (xyxy, conf, cls_id) in enumerate(zip(boxes, confidences, class_ids)):
                if conf < 0.5:
                    continue
                class_name = names.get(cls_id, f"class{cls_id}")

                x_center = int((xyxy[0] + xyxy[2]) / 2)
                y_center = int((xyxy[1] + xyxy[3]) / 2)
                in_center = center_x1 <= x_center <= center_x2 and center_y1 <= y_center <= center_y2
                if not in_center:
                    continue

                detected_targets.append({
                    "box": xyxy.copy(),
                    "conf": conf,
                    "class_name": class_name
                })
                any_target_detected = True

        for target in detected_targets:
            box = target["box"]
            conf = target["conf"]
            class_name = target["class_name"]
            x1, y1, x2, y2 = map(int, box)

            self.saver.save_cropped_image(frame.copy(), box, class_name, conf)

            color = class_colors.get(class_name, (128, 128, 128))
            cv2.rectangle(frame, (x1, y1), (x2, y2), color, 2)

            label = f"{class_name} {conf:.2f}"
            (tw, th), _ = cv2.getTextSize(label, cv2.FONT_HERSHEY_COMPLEX_SMALL, font_scale, font_thickness)
            overlay = frame.copy()
            cv2.rectangle(overlay, (x1, max(0, y1 - th - 5)), (x1 + tw, y1), color, -1)
            alpha = 0.4
            frame = cv2.addWeighted(overlay, alpha, frame, 1 - alpha, 0)
            cv2.putText(frame, label, (x1, max(0, y1 - 5)),
                        cv2.FONT_HERSHEY_COMPLEX_SMALL, font_scale, (255, 255, 255), font_thickness, cv2.LINE_AA)

        current_time = time.time()
        if any_target_detected and current_time - self.last_save_time >= self.min_interval:
            timestamp_str = time.strftime("%Y%m%d_%H%M%S_") + str(int(current_time * 1000))
            self.raw_saver.add_frame(raw_frame, "detect_" + timestamp_str)
            self.frame_saver.save_full_frame(frame.copy(), "detect_" + timestamp_str)
            self.last_save_time = current_time

        return frame


# 主程序入口
if __name__ == "__main__":
    app = QApplication(sys.argv)

    config = load_config()

    # 创建保存目录
    for dir_path in config["save_dirs"].values():
        Path(dir_path).mkdir(exist_ok=True)

    window = MainWindow(
        rtsp_urls=config["rtsp_urls"],
        model_path=config["model_path"],
        center_ratio=config["center_detection_ratio"],
        nested_classes=config["nested_classes"],
        class_names=config["classes_to_show"],
    )
    window.show()
    sys.exit(app.exec_())
