import os
import cv2
import torch
import numpy as np
from torchvision import transforms
from config import cfg
from model import make_model
from ultralytics import YOLO
import ultralytics
from deep_sort_realtime.deepsort_tracker import DeepSort
import argparse
from yacs.config import CfgNode  # 确保导入 CfgNode
# 禁用 TensorFlow oneDNN 警告
os.environ["TF_ENABLE_ONEDNN_OPTS"] = "0"
os.environ["TF_CPP_MIN_LOG_LEVEL"] = "2"


def load_reid_model(cfg_path, model_path, num_class=751, camera_num=0, view_num=0):
    """加载 ReID 模型（带完整错误处理）"""
    try:
        if not os.path.exists(cfg_path):
            raise FileNotFoundError(f"Config file not found: {cfg_path}")
        if not os.path.exists(model_path):
            raise FileNotFoundError(f"Model weights not found: {model_path}")

        # ==================== 终极解决方案：手动加载并合并配置 ====================
        print("正在加载配置...")
        # 步骤 1: 加载基础配置 (defaults.py)
        from config import cfg

        # 步骤 2: 使用 UTF-8 编码安全地读取 YML 文件
        print(f"使用 UTF-8 编码读取配置文件: {cfg_path}")
        with open(cfg_path, 'r', encoding='utf-8') as f:
            # 步骤 3: 将 YML 内容加载为临时的配置节点
            user_cfg = CfgNode.load_cfg(f)

            # 步骤 4: 将用户的配置合并（覆盖）到基础配置上
            # 我们不再使用有编码问题的 merge_from_file
            cfg.merge_from_other_cfg(user_cfg)
            print("✓ 配置加载并合并成功!")

        # 确保必要的配置参数存在 (这一部分逻辑现在不再需要，因为基础配置已经加载)
        # 但为了安全起见，我们保留对 RE_ARRANGE 的检查，以防万一
        if not hasattr(cfg.MODEL, 'RE_ARRANGE'):
            cfg.MODEL.RE_ARRANGE = False
            print("设置默认 RE_ARRANGE: False")

        cfg.freeze()

        device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')

        # 调试信息
        print("最终模型配置:")
        for key in ['NAME', 'SIE_CAMERA', 'SIE_VIEW', 'JPM']:
            if hasattr(cfg.MODEL, key):
                print(f"  {key}: {getattr(cfg.MODEL, key)}")
        # Market-1501 数据集有 6 个摄像头。我们必须提供这个数字，
        # 才能让 make_model 正确地构建出带有SIE模块的模型。
        # 对于 Market-1501, camera_num=6, view_num=1 (view_num在此应用中不重要)
        print("为 Market-1501 权重构建模型，使用摄像头数量: 6")
        model = make_model(cfg, num_class=num_class, camera_num=6, view_num=view_num)

        # 加载权重
        weights = torch.load(model_path, map_location=device)
        if 'model' in weights:
            weights = weights['model']

        # ==================== 关键：移除维度调整代码 ====================
        # 因为配置文件现在已正确加载，模型结构将与权重匹配，不再需要手动调整。
        if 'base.pos_embed' in weights:
            print(
                f"检测到 pos_embed 维度: 权重 {weights['base.pos_embed'].shape} -> 模型 {model.state_dict()['base.pos_embed'].shape}")
            # 确保二者维度一致
            if weights['base.pos_embed'].shape != model.state_dict()['base.pos_embed'].shape:
                print("警告: pos_embed 维度仍然不匹配! 请检查 YML 配置文件中的 SIE_CAMERA/SIE_VIEW 设置是否正确。")
        # =============================================================

        # 使用严格模式加载，因为现在模型和权重应该完全匹配
        try:
            model.load_state_dict(weights, strict=True)
            print("✓ 权重以严格模式加载成功!")
        except RuntimeError as e:
            print(f"严格模式加载失败: {e}")
            print("尝试非严格模式加载...")
            missing_keys, unexpected_keys = model.load_state_dict(weights, strict=False)
            if missing_keys:
                print(f"缺失的键: {missing_keys}")
            if unexpected_keys:
                print(f"意外的键: {unexpected_keys}")
            print("✓ 非严格模式加载成功")

        model.to(device)
        model.eval()
        return model, device

    except Exception as e:
        print(f"加载 ReID 模型时出错: {e}")
        raise


def preprocess_reid_image(image, size=(224, 224)):
    """预处理 ReID 输入图像"""
    transform = transforms.Compose([
        transforms.ToPILImage(),
        transforms.Resize(size),
        transforms.ToTensor(),
        transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
    ])
    return transform(image).unsqueeze(0)


def main(args):
    # 加载 YOLOv5 模型
    yolo_model = YOLO(args.yolo_weights)
    yolo_model.to('cuda' if torch.cuda.is_available() else 'cpu')

    # 加载 ReID 模型
    reid_model, device = load_reid_model(args.config_file, args.reid_weights)

    # 初始化 DeepSORT 跟踪器
    tracker = DeepSort(max_age=30, n_init=3, nn_budget=100)

    # 初始化视频输入
    cap = cv2.VideoCapture(args.video_path if args.video_path else 0)
    if not cap.isOpened():
        print("Error: Cannot open video source")
        return

    # 获取视频参数
    width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
    height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
    fps = int(cap.get(cv2.CAP_PROP_FPS))
    out = None
    if args.output:
        fourcc = cv2.VideoWriter_fourcc(*'mp4v')
        out = cv2.VideoWriter(args.output, fourcc, fps, (width, height))

    while cap.isOpened():
        ret, frame = cap.read()
        if not ret:
            break

        # YOLOv5 检测
        results = yolo_model(frame, conf=args.conf_thres, iou=args.iou_thres, classes=[0])  # 只检测人 (class 0)
        detections = []
        bboxes = results[0].boxes.xyxy.cpu().numpy()  # 边界框 [x1, y1, x2, y2]
        scores = results[0].boxes.conf.cpu().numpy()  # 置信度

        # 为每个检测到的边界框提取 ReID 特征
        for i, bbox in enumerate(bboxes):
            x1, y1, x2, y2 = map(int, bbox)
            # 裁剪人区域
            person_img = frame[y1:y2, x1:x2]
            if person_img.size == 0:
                continue
            # 预处理并提取特征
            person_tensor = preprocess_reid_image(person_img).to(device)
            with torch.no_grad():
                feature = reid_model(person_tensor).cpu().numpy()
            # 构造 DeepSORT 检测格式: [bbox, confidence, feature]
            detections.append(([x1, y1, x2 - x1, y2 - y1], scores[i], feature))

        # 更新 DeepSORT 跟踪器
        tracks = tracker.update_tracks(detections, frame=frame)

        # 绘制边界框和跟踪ID
        for track in tracks:
            if not track.is_confirmed():
                continue
            track_id = track.track_id
            bbox = track.to_tlbr()  # [x1, y1, x2, y2]
            x1, y1, x2, y2 = map(int, bbox)
            cv2.rectangle(frame, (x1, y1), (x2, y2), (0, 255, 0), 2)
            cv2.putText(frame, f"ID: {track_id}", (x1, y1 - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.9, (0, 255, 0), 2)

        # 显示或保存结果
        if args.show:
            cv2.imshow("Tracking", frame)
            if cv2.waitKey(1) & 0xFF == ord('q'):
                break
        if out:
            out.write(frame)

    # 释放资源
    cap.release()
    if out:
        out.release()
    cv2.destroyAllWindows()

if __name__ == '__main__':
    parser = argparse.ArgumentParser(description="Video Stream Person Tracking with YOLOv11 and ReID")
    parser.add_argument('--yolo-weights', type=str, default=r"C:\Users\xuboyang\Desktop\train4\weights\best.pt", help='Path to YOLOv11 weights (.pt)')
    parser.add_argument('--reid-weights', type=str, default=r"C:\Users\xuboyang\Desktop\TransReID-main\logs\trained\vit_transreid_market.pth", help='Path to ReID weights (.pth)')
    parser.add_argument('--config-file', type=str, default='configs/Market/vit_base.yml', help='Path to TransReID config file')
    parser.add_argument('--video-path', type=str, default=r"C:\Users\xuboyang\Desktop\9月25日 (2).mp4", help='Path to input video file (empty for webcam)')
    parser.add_argument('--output', type=str, default='videos/output_video5.mp4', help='Path to output video file')
    parser.add_argument('--conf-thres', type=float, default=0.5, help='YOLOv11 confidence threshold')
    parser.add_argument('--iou-thres', type=float, default=0.5, help='YOLOv11 NMS IoU threshold')
    parser.add_argument('--show', action='store_true', help='Show video stream')
    args = parser.parse_args()

    main(args)