import os
import cv2
import torch
import numpy as np
from torchvision import transforms
from config import cfg
from model import make_model
from ultralytics import YOLO
import ultralytics
from deep_sort_realtime.deepsort_tracker import DeepSort
import argparse
from yacs.config import CfgNode  # 确保导入 CfgNode
# 禁用 TensorFlow oneDNN 警告
os.environ["TF_ENABLE_ONEDNN_OPTS"] = "0"
os.environ["TF_CPP_MIN_LOG_LEVEL"] = "2"


def load_reid_model(cfg_path, model_path, num_class=751, camera_num=6, view_num=1):
    """加载 ReID 模型（动态调整维度）"""
    try:
        if not os.path.exists(cfg_path):
            raise FileNotFoundError(f"Config file not found: {cfg_path}")
        if not os.path.exists(model_path):
            raise FileNotFoundError(f"Model weights not found: {model_path}")

        print("正在加载配置...")
        from config import cfg

        print(f"使用 UTF-8 编码读取配置文件: {cfg_path}")
        with open(cfg_path, 'r', encoding='utf-8') as f:
            user_cfg = CfgNode.load_cfg(f)
            cfg.merge_from_other_cfg(user_cfg)
            print("✓ 配置加载并合并成功!")

        # 保持原始SIE配置（不修改）
        print("保持原始SIE配置:")
        print(f"  SIE_CAMERA: {cfg.MODEL.SIE_CAMERA}")
        print(f"  SIE_VIEW: {cfg.MODEL.SIE_VIEW}")
        print(f"  使用 camera_num: {camera_num}, view_num: {view_num}")

        cfg.freeze()
        device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')

        # 构建模型
        model = make_model(cfg, num_class=num_class, camera_num=camera_num, view_num=view_num)

        # 加载权重
        weights = torch.load(model_path, map_location=device)
        if 'model' in weights:
            weights = weights['model']

        # 动态调整pos_embed维度
        if 'base.pos_embed' in weights:
            weight_shape = weights['base.pos_embed'].shape
            model_shape = model.state_dict()['base.pos_embed'].shape

            print(f"pos_embed 维度: 权重 {weight_shape} -> 模型 {model_shape}")

            if weight_shape != model_shape:
                print("动态调整 pos_embed 维度...")
                from torch.nn import functional as F

                # 计算需要调整的比例
                weight_seq_len = weight_shape[1]
                model_seq_len = model_shape[1]

                if weight_seq_len < model_seq_len:
                    # 权重维度较小，需要插值扩展
                    weights['base.pos_embed'] = F.interpolate(
                        weights['base.pos_embed'].permute(0, 2, 1),
                        size=model_seq_len,
                        mode='linear'
                    ).permute(0, 2, 1)
                    print(f"扩展 pos_embed 从 {weight_seq_len} 到 {model_seq_len}")
                else:
                    # 权重维度较大，需要截取
                    weights['base.pos_embed'] = weights['base.pos_embed'][:, :model_seq_len, :]
                    print(f"截取 pos_embed 从 {weight_seq_len} 到 {model_seq_len}")

        # 加载权重（非严格模式）
        print("以非严格模式加载权重...")
        missing_keys, unexpected_keys = model.load_state_dict(weights, strict=False)

        if missing_keys:
            print(f"缺失的键: {missing_keys}")
        if unexpected_keys:
            print(f"意外的键: {unexpected_keys}")

        print("✓ 权重加载成功!")

        model.to(device)
        model.eval()

        # 返回模型、设备和配置
        return model, device, cfg  # 修改这行，返回配置

    except Exception as e:
        print(f"加载 ReID 模型时出错: {e}")
        raise


def preprocess_reid_image(image, cfg=None):
    """预处理 ReID 输入图像（使用配置中的尺寸和归一化参数）"""
    # 从配置中获取输入尺寸和归一化参数
    if cfg:
        size = tuple(cfg.INPUT.SIZE_TEST)
        pixel_mean = cfg.INPUT.PIXEL_MEAN
        pixel_std = cfg.INPUT.PIXEL_STD
    else:
        # 如果没有提供配置，则使用默认值（但强烈建议提供配置）
        size = (256, 128)
        pixel_mean = [0.5, 0.5, 0.5]  # 确保与训练时一致
        pixel_std = [0.5, 0.5, 0.5]   # 确保与训练时一致

    transform = transforms.Compose([
        transforms.ToPILImage(),
        transforms.Resize(size),
        transforms.ToTensor(),
        # 使用从配置中读取的归一化参数
        transforms.Normalize(mean=pixel_mean, std=pixel_std),
    ])
    return transform(image).unsqueeze(0)


def main(args):
    # 加载 YOLOv11 模型
    yolo_model = YOLO(args.yolo_weights)
    yolo_model.to('cuda' if torch.cuda.is_available() else 'cpu')

    # 加载 ReID 模型（需要修改load_reid_model函数返回配置）
    reid_model, device, reid_cfg = load_reid_model(args.config_file, args.reid_weights)  # 修改这行

    # 初始化 DeepSORT 跟踪器   #max_age参数一个轨迹在多少帧内没有被重新关联上就会被删除
    tracker = DeepSort(max_age=60, n_init=5, nn_budget=200)

    # 初始化视频输入
    cap = cv2.VideoCapture(args.video_path if args.video_path else 0)
    if not cap.isOpened():
        print("Error: Cannot open video source")
        return

    # 获取视频参数
    width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
    height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
    fps = int(cap.get(cv2.CAP_PROP_FPS))
    out = None
    if args.output:
        fourcc = cv2.VideoWriter_fourcc(*'mp4v')
        out = cv2.VideoWriter(args.output, fourcc, fps, (width, height))

    while cap.isOpened():
        ret, frame = cap.read()
        if not ret:
            break

        # YOLOv11检测
        results = yolo_model(frame, conf=args.conf_thres, iou=args.iou_thres, classes=[0])  # 只检测人 (class 0)
        detections = []
        bboxes = results[0].boxes.xyxy.cpu().numpy()  # 边界框 [x1, y1, x2, y2]
        scores = results[0].boxes.conf.cpu().numpy()  # 置信度
        # =======================> 开始修改 (批量推理) <=======================
        person_crops = []
        valid_bboxes = []
        valid_scores = []

        # 1. 收集所有裁剪的行人图像
        for i, bbox in enumerate(bboxes):
            x1, y1, x2, y2 = map(int, bbox)
            person_img = frame[y1:y2, x1:x2]
            if person_img.size > 0:
                # 预处理但不立即推理
                person_tensor = preprocess_reid_image(person_img, reid_cfg)
                person_crops.append(person_tensor)
                valid_bboxes.append(bbox)
                valid_scores.append(scores[i])

        # 2. 如果有检测到人，则进行批量推理
        if person_crops:
            # 将图像列表堆叠成一个 batch
            batch_tensor = torch.cat(person_crops, dim=0).to(device)

            # 准备对应的 cam_id 和 view_id
            num_persons = len(person_crops)
            cam_ids = torch.zeros(num_persons, dtype=torch.long).to(device)
            view_ids = torch.zeros(num_persons, dtype=torch.long).to(device)

            with torch.no_grad():
                # 一次性提取所有特征
                features = reid_model(batch_tensor, cam_label=cam_ids, view_label=view_ids).cpu().numpy()

            # 3. 将特征与检测框重新组合
            for i in range(len(valid_bboxes)):
                x1, y1, x2, y2 = map(int, valid_bboxes[i])
                # 注意：DeepSORT 需要的 bbox 格式是 [x, y, width, height]
                detections.append(([x1, y1, x2 - x1, y2 - y1], valid_scores[i], features[i]))
        # =======================> 修改结束 <=======================

        # 更新 DeepSORT 跟踪器
        tracks = tracker.update_tracks(detections, frame=frame)

        # 绘制边界框和跟踪ID
        for track in tracks:
            if not track.is_confirmed():
                continue
            track_id = track.track_id
            bbox = track.to_tlbr()  # [x1, y1, x2, y2]
            x1, y1, x2, y2 = map(int, bbox)
            cv2.rectangle(frame, (x1, y1), (x2, y2), (255, 0, 0), 2)
            cv2.putText(frame, f"ID: {track_id}", (x1, y1 - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.9, (255, 0, 0), 2)

        # 显示或保存结果
        if args.show:
            cv2.imshow("Tracking", frame)
            if cv2.waitKey(1) & 0xFF == ord('q'):
                break
        if out:
            out.write(frame)

    # 释放资源
    cap.release()
    if out:
        out.release()
    cv2.destroyAllWindows()

if __name__ == '__main__':
    parser = argparse.ArgumentParser(description="Video Stream Person Tracking with YOLOv11 and ReID")
    parser.add_argument('--yolo-weights', type=str, default=r"C:\Users\xuboyang\Desktop\train5\weights\best.pt", help='Path to YOLOv11 weights (.pt)')
    parser.add_argument('--reid-weights', type=str, default=r"C:\Users\xuboyang\Desktop\TransReID-main\logs\trained\vit_transreid_market.pth", help='Path to ReID weights (.pth)')
    parser.add_argument('--config-file', type=str, default='configs/Market/vit_transreid.yml', help='Path to TransReID config file')
    parser.add_argument('--video-path', type=str, default=r"C:\Users\xuboyang\Desktop\9月25日.mp4", help='Path to input video file (empty for webcam)')
    parser.add_argument('--output', type=str, default='videos/output_video8.mp4', help='Path to output video file')
    parser.add_argument('--conf-thres', type=float, default=0.4, help='YOLOv11 confidence threshold')
    parser.add_argument('--iou-thres', type=float, default=0.4, help='YOLOv11 NMS IoU threshold')
    parser.add_argument('--show', action='store_true', help='Show video stream')
    args = parser.parse_args()

    main(args)