from datetime import datetime

from ultralytics import YOLO
import cv2
import time
from threading import Thread, Lock, Event
import logging
from concurrent.futures import ThreadPoolExecutor
import torch
import atexit
import os
import gc
from celery_app.data_sms.tasks import send_alarm_task

# 配置日志
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')
os.environ["OPENCV_FFMPEG_READ_ATTEMPTS"] = "50000"

# 修改后的配置参数（增加独立阈值配置）
class_config = {
    'classes': ['持械', '火患', '打斗', '人员', '动物'],
    'model_paths': {
        '持械': "/workspace/test/models/armed11m.pt",
        '火患': "/workspace/test/models/fire11m.pt",
        '打斗': "/workspace/test/models/fighting11m.pt",
        '人员': "/workspace/test/models/person11m.pt",
        '动物': "/workspace/test/models/dac11m.pt"
    },
    'priority_order': ['持械', '火患', '打斗', '人员', '动物'],
    'gpu_mapping': {
        0: 'cuda:0',
        1: 'cuda:0',
        2: 'cuda:0',
        3: 'cuda:1',
        4: 'cuda:1'
    },
    # 独立阈值配置
    'conf_thresholds': {
        '持械': 0.4,
        '火患': 0.3,
        '打斗': 0.6,
        '人员': 0.5,
        '动物': 0.4
    },
    'min_detection_duration': 8,
    'freeze_duration': 60,
    'max_retries': 3,
    'reconnect_interval': 3,
    'frame_size': (640, 640),
    'max_workers_per_gpu': 6,
    'cpu_max_workers': 12,
    'cache_clean_interval': 300,
    'record_clean_interval': 30,
    'connection_check_interval': 15,
    'max_frame_queue_size': 8
}

# 全局控制变量
is_running = True
global_cleanup_event = Event()

# GPU设备检测
available_gpus = torch.cuda.device_count()
logging.info(f"检测到可用GPU数量: {available_gpus}")

# 加载模型
models = {}
for idx, cls in enumerate(class_config['priority_order']):
    # 分配设备
    if available_gpus >= 2:
        device = class_config['gpu_mapping'].get(idx, 'cuda:0')
    else:
        device = 'cuda:0' if torch.cuda.is_available() else 'cpu'

    # 验证设备可用性
    try:
        if 'cuda' in device:
            gpu_id = int(device.split(':')[-1])
            if gpu_id >= available_gpus:
                raise ValueError(f"GPU {gpu_id} 不可用")
    except Exception as e:
        logging.warning(f"设备配置错误: {str(e)}, 回退到CPU")
        device = 'cpu'

    model_path = class_config['model_paths'][cls]
    models[cls] = YOLO(model_path).to(device)
    # 恢复原始阈值设置（根据模型默认值）
    models[cls].conf = 0.25  # 设置为YOLO默认阈值或你的基准值
    logging.info(f"已加载模型: {cls} | 设备: {device} | 基准阈值: {models[cls].conf}")

# 摄像头配置
CAMERAS = {
    "area1": "rtmp://192.168.12.20/live/34020000002000001031",
    "area2": "rtmp://192.168.12.20/live/34020000002000001017",
    "area3": "rtmp://192.168.12.20/live/34020000002000001016",
}

class CameraProcessor:
    def __init__(self, url, area_id):
        self.area_id = area_id
        self.url = url
        self.lock = Lock()
        self.active = True
        self.cap = None

        # 动态计算线程池大小
        if torch.cuda.is_available():
            total_gpus = torch.cuda.device_count()
            max_workers = class_config['max_workers_per_gpu'] * total_gpus
        else:
            max_workers = class_config['cpu_max_workers']

        self.executor = ThreadPoolExecutor(max_workers=max_workers)
        logging.info(f"[{self.area_id}] 初始化线程池 (workers={max_workers})")

        self.status = "disconnected"
        self.error_count = 0
        self.last_valid_time = time.time()

        # 状态跟踪
        self.detection_records = {cls: [] for cls in class_config['priority_order']}
        self.freeze_until = {cls: 0 for cls in class_config['priority_order']}
        self.last_alarm_time = 0

        # 资源管理相关属性
        #  FFmpeg 重置相关属性
        self.last_ffmpeg_reset = time.time()
        self.ffmpeg_reset_interval = 3600  # 每 1 小时重置一次

        self.frame_queue = []
        self.last_cache_clean = time.time()
        self.last_record_clean = time.time()
        self.last_connection_check = time.time()
        self.resource_lock = Lock()

        logging.info(f"[{self.area_id}] 初始化摄像头处理器...")
        self._init_capture()
        Thread(target=self._process_stream, daemon=True).start()
        Thread(target=self._maintenance_task, daemon=True).start()

    def _maintenance_task(self):
        """维护任务线程"""
        while self.active and is_running:
            try:
                current_time = time.time()

                # 定期清理检测记录
                if current_time - self.last_record_clean > class_config['record_clean_interval']:
                    with self.lock:
                        for cls in class_config['priority_order']:
                            cutoff = current_time - class_config['min_detection_duration']
                            self.detection_records[cls] = [t for t in self.detection_records[cls] if t > cutoff]
                    self.last_record_clean = current_time
                    logging.debug(f"[{self.area_id}] 已清理过期检测记录")

                # 定期清理帧缓存
                if current_time - self.last_cache_clean > class_config['cache_clean_interval']:
                    with self.resource_lock:
                        self.frame_queue.clear()
                        if torch.cuda.is_available():
                            torch.cuda.empty_cache()
                        gc.collect()
                    self.last_cache_clean = current_time
                    logging.debug(f"[{self.area_id}] 已执行缓存清理")

                # 定期检查连接状态
                if current_time - self.last_connection_check > class_config['connection_check_interval']:
                    if not self.cap.isOpened() or self.status != "connected":
                        self._reset_connection()
                    self.last_connection_check = current_time

                # 定期重置 FFmpeg 读取尝试次数
                if current_time - self.last_ffmpeg_reset > self.ffmpeg_reset_interval:
                    self._reset_ffmpeg_attempts()
                    self.last_ffmpeg_reset = current_time

                time.sleep(1)

            except Exception as e:
                logging.error(f"[{self.area_id}] 维护任务异常: {str(e)}")

    def _reset_ffmpeg_attempts(self):
        """重置 FFmpeg 读取尝试次数"""
        os.environ["OPENCV_FFMPEG_READ_ATTEMPTS"] = "50000"
        logging.info(f"[{self.area_id}] 已重置 FFmpeg 读取尝试次数为 50000")

    def _init_capture(self):
        try:
            self.cap = cv2.VideoCapture(self.url, cv2.CAP_FFMPEG)
            if self.cap.isOpened():
                self.cap.set(cv2.CAP_PROP_BUFFERSIZE, 1)
                self.cap.set(cv2.CAP_PROP_FRAME_WIDTH, class_config['frame_size'][0])
                self.cap.set(cv2.CAP_PROP_FRAME_HEIGHT, class_config['frame_size'][1])
                self._update_status("connected")
                logging.info(f"[{self.area_id}] 摄像头连接成功")
                return True
            self._update_status("disconnected")
            logging.error(f"[{self.area_id}] 摄像头连接失败")
            return False
        except Exception as e:
            logging.error(f"[{self.area_id}] 摄像头初始化异常: {str(e)}")
            return False

    def _update_status(self, status):
        if self.status != status:
            self.status = status
            logging.debug(f"[{self.area_id}] 状态更新: {status}")

    def _process_stream(self):
        logging.info(f"[{self.area_id}] 开始处理视频流...")
        while self.active and is_running:
            try:
                # 重连冷却
                if time.time() - self.last_valid_time < class_config['reconnect_interval']:
                    time.sleep(1)
                    continue

                # 获取帧
                ret, frame = self._read_frame()
                if not ret:
                    self.error_count += 1
                    logging.warning(f"[{self.area_id}] 读取帧失败，错误计数: {self.error_count}")
                    if self.error_count > class_config['max_retries']:
                        logging.warning(f"[{self.area_id}] 达到最大重试次数，尝试重置连接...")
                        self._reset_connection()
                    continue

                # 按优先级处理
                current_time = time.time()
                active_gpus = self._get_available_devices()
                logging.debug(f"[{self.area_id}] 可用设备: {active_gpus}")

                for idx, cls in enumerate(class_config['priority_order']):
                    if current_time < self.freeze_until[cls]:
                        logging.debug(f"[{self.area_id}] {cls} 类别处于冻结状态，跳过检测")
                        continue

                    # 获取设备并检查可用性
                    device = self._get_safe_device(cls)
                    if not device:
                        logging.warning(f"[{self.area_id}] {cls} 无可用设备，跳过检测")
                        continue

                    # 提交检测任务
                    logging.debug(f"[{self.area_id}] 提交检测任务: {cls} | 设备: {device}")
                    self.executor.submit(
                        self._process_detection,
                        cls,
                        frame.copy(),  # 传递当前帧的副本
                        time.time(),  # 传递当前时间戳
                        device
                    )

                self.error_count = 0
                self.last_valid_time = current_time
                time.sleep(0.05)

            except Exception as e:
                logging.error(f"[{self.area_id}] 视频流处理异常: {str(e)}")
                self._reset_connection()
                time.sleep(1)

    def _read_frame(self):
        try:
            if not self.cap or not self.cap.isOpened():
                logging.warning(f"[{self.area_id}] 摄像头未打开，尝试重新初始化...")
                if not self._init_capture():
                    return False, None

            ret = self.cap.grab()
            if not ret:
                logging.error(f"[{self.area_id}] 抓取帧失败")
                return False, None

            ret, frame = self.cap.retrieve()
            if ret and frame is not None and frame.size > 0:
                frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)

                # 中心裁剪或填充为正方形
                h, w = frame.shape[:2]
                if h != w:
                    size = max(h, w)
                    pad_h = (size - h) // 2
                    pad_w = (size - w) // 2
                    frame = cv2.copyMakeBorder(frame, pad_h, pad_h, pad_w, pad_w, cv2.BORDER_CONSTANT, value=(0, 0, 0))
                    frame = cv2.resize(frame, class_config['frame_size'])

                logging.info(f"[{self.area_id}] 成功读取帧，尺寸: {frame.shape}")
                return True, frame
            logging.warning(f"[{self.area_id}] 读取帧失败，帧无效")
            return False, None
        except Exception as e:
            logging.error(f"[{self.area_id}] 读取帧异常: {str(e)}")
            return False, None

    def _process_detection(self, cls, frame, frame_timestamp, device):
        max_retries = 3
        for attempt in range(max_retries):
            try:
                # 获取当前处理时间
                processing_time = time.time()

                # 丢弃超过1ms的旧帧
                if processing_time - frame_timestamp > 1.5:
                    logging.debug(f"[{self.area_id}] 丢弃过期帧 | 延迟: {processing_time - frame_timestamp:.2f}s")
                    return

                logging.debug(
                    f"[{self.area_id}] 开始检测: {cls} | 设备: {device} | 帧延迟: {processing_time - frame_timestamp:.3f}s")

                # 获取当前类别的独立阈值
                current_threshold = class_config['conf_thresholds'][cls]

                # 将帧调整为模型所需的大小 (640, 640)
                frame_resized = cv2.resize(frame, (640, 640))

                # 转换颜色空间（假设模型需要RGB格式）
                frame_rgb = cv2.cvtColor(frame_resized, cv2.COLOR_BGR2RGB)

                # 将 NumPy 数组转换为 PyTorch 张量，并归一化到 [0, 1] 范围
                tensor_frame = torch.from_numpy(frame_rgb).permute(2, 0, 1).float() / 255.0

                # 添加批次维度，变为 (1, C, H, W)
                tensor_frame = tensor_frame.unsqueeze(0).to(device)

                # 执行推理
                results = models[cls](tensor_frame, verbose=False)

                if len(results[0].boxes) == 0:
                    logging.info(f"[{self.area_id}] {cls} 检测结果: 无目标")
                    return

                # 二次过滤：仅保留超过独立阈值的结果
                valid_boxes = []
                for box in results[0].boxes:
                    box_conf = box.conf.item()
                    if box_conf >= current_threshold:
                        valid_boxes.append(box)
                        logging.info(f"[{self.area_id}] {cls} 有效检测 | 置信度: {box_conf:.4f}")

                if not valid_boxes:
                    return

                # 保存检测到的帧图片（使用处理时间戳）
                self._save_detected_frame(frame_resized, valid_boxes, cls, processing_time)

                # 记录检测时间（仅记录有效检测）
                with self.lock:
                    self.detection_records[cls].append(processing_time)
                    # 清理过期记录
                    cutoff = processing_time - class_config['min_detection_duration']
                    self.detection_records[cls] = [t for t in self.detection_records[cls] if t > cutoff]

                # 报警逻辑（使用最近3次有效检测）
                if len(self.detection_records[cls]) >= 3:
                    recent_detections = self.detection_records[cls][-3:]
                    time_window = recent_detections[-1] - recent_detections[0]

                    if time_window <= class_config['min_detection_duration']:
                        logging.info(f"[{self.area_id}] 触发报警: {cls} | 时间窗口: {time_window:.2f}s")
                        self._trigger_alarm(cls, processing_time)  # 使用处理时间戳
                        with self.lock:
                            self.detection_records[cls].clear()

            except Exception as e:
                logging.error(f"[{self.area_id}] {cls} 检测异常: {str(e)}")
                if attempt == max_retries - 1:
                    logging.error(f"[{self.area_id}] {cls} 检测失败已达最大重试次数")

    def _save_detected_frame(self, frame, boxes, cls, timestamp):
        """
        保存检测到的帧图片，并在图片上绘制检测框。
        Args:
            frame (numpy.ndarray): 检测到的帧图片。
            boxes (list): 检测到的目标框列表。
            cls (str): 检测到的类别。
            timestamp (float): 检测时间戳。
        """
        try:
            # 创建保存图片的目录
            save_dir = os.path.join("detected_frames", self.area_id, cls)
            os.makedirs(save_dir, exist_ok=True)

            # 生成文件名
            file_name = f"{datetime.fromtimestamp(timestamp).strftime('%Y%m%d_%H%M%S')}_{cls}.jpg"
            file_path = os.path.join(save_dir, file_name)

            # 在帧上绘制检测框
            for box in boxes:
                x1, y1, x2, y2 = map(int, box.xyxy[0].tolist())  # 获取框的坐标
                confidence = box.conf.item()  # 获取置信度
                label = f"{cls} {confidence:.2f}"
                cv2.rectangle(frame, (x1, y1), (x2, y2), (0, 255, 0), 2)  # 绘制矩形框
                cv2.putText(frame, label, (x1, y1 - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.9, (0, 255, 0), 2)  # 绘制标签

            # 保存图片
            cv2.imwrite(file_path, frame)
            logging.info(f"[{self.area_id}] 已保存检测到的帧图片: {file_path}")

        except Exception as e:
            logging.error(f"[{self.area_id}] 保存检测帧图片时发生异常: {str(e)}")

    def _trigger_alarm(self, cls, timestamp):
        self.freeze_until[cls] = timestamp + class_config['freeze_duration']
        logging.info(f"[{self.area_id}] 触发报警: {cls} | 冻结至: {self.freeze_until[cls]} | 设备: {models[cls].device}")

        alarm_data = {
            "area_id": self.area_id,
            "detected_class": cls,
            "timestamp": timestamp,
            "freeze_until": self.freeze_until[cls]
        }
        send_alarm_task.delay(alarm_data)

    def _reset_connection(self):
        with self.lock:
            if self.cap:
                self.cap.release()
            self._init_capture()
            logging.info(f"[{self.area_id}] 连接已重置")

    def _get_available_devices(self):
        """获取当前可用设备列表"""
        devices = []
        if torch.cuda.is_available():
            for i in range(torch.cuda.device_count()):
                try:
                    # 检查GPU是否可用
                    torch.cuda.get_device_properties(i)
                    devices.append(f'cuda:{i}')
                except:
                    continue
        else:
            devices.append('cpu')
        return devices

    def _get_safe_device(self, cls):
        """安全获取设备，带自动回退机制"""
        original_device = str(models[cls].device)

        # 检查原始设备是否可用
        try:
            if 'cuda' in original_device:
                gpu_id = int(original_device.split(':')[-1])
                torch.cuda.get_device_properties(gpu_id)
                return original_device
        except:
            logging.warning(f"[{self.area_id}] {cls} 原始设备 {original_device} 不可用，尝试回退")

        # 回退策略
        available_devices = self._get_available_devices()
        if available_devices:
            # 优先选择同类型设备
            for dev in available_devices:
                if 'cuda' in dev and 'cuda' in original_device:
                    logging.info(f"[{self.area_id}] {cls} 回退到 {dev}")
                    return dev
            # 最后回退到CPU
            logging.info(f"[{self.area_id}] {cls} 回退到 CPU")
            return 'cpu'
        else:
            logging.error(f"[{self.area_id}] 无可用设备")
            return None

    def release(self):
        self.active = False
        with self.lock:
            if self.cap:
                self.cap.release()
        self.executor.shutdown(wait=True)
        logging.info(f"[{self.area_id}] 资源已释放")


# 初始化摄像头处理器
camera_processors = {aid: CameraProcessor(url, aid) for aid, url in CAMERAS.items()}


def cleanup():
    logging.info("\n正在执行全局清理...")
    global is_running
    is_running = False  # 停止所有循环

    # 先停止所有摄像头处理器
    for cp in camera_processors.values():
        cp.release()

    # 新增GPU缓存清理
    if torch.cuda.is_available():
        for i in range(torch.cuda.device_count()):
            torch.cuda.set_device(i)
            torch.cuda.empty_cache()
            logging.info(f"已清理 GPU {i} 缓存")

    # 强制垃圾回收
    gc.collect()
    logging.info("全局清理完成")


atexit.register(cleanup)

if __name__ == '__main__':
    try:
        logging.info("程序启动，开始检测...")
        while True:
            time.sleep(1)  # 主线程保持运行
    except KeyboardInterrupt:
        logging.info("用户中断程序")
    finally:
        cleanup()