import os
import cv2 as cv
import requests
from enum import Enum
from ultralytics import YOLO
from collections import deque, defaultdict
import torch
import numpy as np
from torchvision.ops import nms


class VideoType(Enum):
    LOCAL = "local"
    HTTP = "http"
    RTSP = "rtsp"


class VideoHandle:
    def __init__(self, video_type: VideoType, source: str, model: str):
        self.type = video_type
        self.source = source
        self.model = model

    def checkout_url(self, url):
        try:
            resp = requests.head(url, timeout=5, allow_redirects=True)
            return resp.status_code == 200
        except requests.RequestException:
            return False

    def get_video(self):
        if self.type == VideoType.LOCAL:
            if os.path.exists(self.source):
                return cv.VideoCapture(self.source)
            else:
                raise FileNotFoundError(f"本地文件不存在: {self.source}")

        elif self.type == VideoType.HTTP:
            if self.checkout_url(self.source):
                return cv.VideoCapture(self.source)
            else:
                raise ValueError(f"HTTP 视频 URL 无法访问: {self.source}")

        elif self.type == VideoType.RTSP:
            cap = cv.VideoCapture(self.source)
            if cap.isOpened():
                return cap
            else:
                raise ValueError(f"RTSP 流无法连接: {self.source}")

        else:
            raise ValueError(f"未知视频类型: {self.type}")

    def load_model(self):
        return YOLO(self.model)

    def visual_identity(self):
        return self.type.name


class VideoVisual:
    fps = 30
    grid_rows, grid_cols = 6, 6
    frame_count = 0
    second_count = 0
    person_counts_in_second = []
    person_count_queue = deque(maxlen=5)

    # 保存每个块的首帧人数
    initial_counts = {}
    # 设置阈值之后不再检测的块
    skip_blocks = set()

    # 追踪框移动
    track_memory = defaultdict(list)  # key: track_id, value: 位置列表

    # 是否进行块跳过逻辑的标志（首次初始化）
    initialized = False

    def __init__(self, video_handle: VideoHandle):
        self.video_handle = video_handle

    def visual(self):
        video_capture = self.video_handle.get_video()
        model = self.video_handle.load_model()

        while True:
            ret, frame = video_capture.read()
            if not ret:
                break

            self.frame_count += 1
            h, w = frame.shape[:2]
            block_h = h // self.grid_rows
            block_w = w // self.grid_cols

            block_person_counts = []
            total_person_count = 0

            for i in range(self.grid_rows):
                for j in range(self.grid_cols):
                    block_id = (i, j)
                    if block_id in self.skip_blocks:
                        continue

                    x1, y1 = j * block_w, i * block_h
                    x2, y2 = (j + 1) * block_w if j != self.grid_cols - 1 else w, (
                                                                                          i + 1) * block_h if i != self.grid_rows - 1 else h
                    block = frame[y1:y2, x1:x2]

                    results = model(block, conf=0.01, iou=0.7, imgsz=640)[0]

                    if results.boxes is not None and len(results.boxes) > 0:
                        boxes = results.boxes.xyxy.cpu().numpy()
                        scores = results.boxes.conf.cpu().numpy()
                        classes = results.boxes.cls.cpu().numpy()

                        boxes_tensor = torch.tensor(boxes, dtype=torch.float32)
                        scores_tensor = torch.tensor(scores, dtype=torch.float32)
                        keep = nms(boxes_tensor, scores_tensor, iou_threshold=0.3)
                        indices = keep.cpu().numpy()

                        person_count = 0
                        for idx in indices:
                            if int(classes[idx]) == 0:
                                person_count += 1

                                bx1, by1, bx2, by2 = map(int, boxes[idx])
                                global_x1, global_y1 = x1 + bx1, y1 + by1
                                global_x2, global_y2 = x1 + bx2, y1 + by2

                                # 位置追踪（简易方法：使用中心点）
                                cx, cy = (global_x1 + global_x2) // 2, (global_y1 + global_y2) // 2
                                self.track_memory[(i, j, idx)].append((cx, cy))

                                # 绘制检测框
                                cv.rectangle(frame, (global_x1, global_y1), (global_x2, global_y2), (0, 255, 0), 2)

                        block_person_counts.append((block_id, person_count))
                        total_person_count += person_count

                        # 绘制区域边界
                        cv.rectangle(frame, (x1, y1), (x2, y2), (255, 0, 0), 1)

            # 初始化跳过区域（只运行一次）
            if not initialized and block_person_counts:
                values = [c for _, c in block_person_counts]
                max_count = max(values)
                threshold = max_count * 0.1  # 阈值为最高值的10%

                for block_id, count in block_person_counts:
                    self.initial_counts[block_id] = count
                    if count < threshold:
                        self.skip_blocks.add(block_id)
                initialized = True
                print(f"[初始化完成] 跳过 {len(self.skip_blocks)} 个低密度块（人数 < 最高人数的10%）")

            # 判断哪些识别框“静止” → 不计入人数
            still_track_ids = 0
            for key, positions in list(self.track_memory.items()):
                if len(positions) >= 5:
                    movements = [np.linalg.norm(np.array(positions[i]) - np.array(positions[i - 1])) for i in
                                 range(1, len(positions))]
                    avg_move = sum(movements) / len(movements)
                    if avg_move < 5:  # 移动小于5像素
                        still_track_ids += 1
                        del self.track_memory[key]

            total_person_count = max(0, total_person_count - still_track_ids)

            # 平滑处理
            self.person_count_queue.append(total_person_count)
            smoothed_count = round(sum(self.person_count_queue) / len(self.person_count_queue))

            # 显示计数
            cv.putText(frame, f"Raw Persons: {total_person_count}", (10, 30),
                       cv.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 2)
            cv.putText(frame, f"Smoothed Persons: {smoothed_count}", (10, 70),
                       cv.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 0), 2)

            # 秒级统计
            self.person_counts_in_second.append(smoothed_count)
            if self.frame_count % self.fps == 0:
                self.second_count += 1
                max_count = max(self.person_counts_in_second) if self.person_counts_in_second else 0
                print(f"=== 第 {self.second_count} 秒最大人数: {max_count} ===")
                self.person_counts_in_second.clear()

            cv.imshow(f"Grid Detection ({self.grid_rows}x{self.grid_cols})", frame)
            if cv.waitKey(1) & 0xFF == ord('q'):
                break

        video_capture.release()
        cv.destroyAllWindows()
