import os
import cv2
import torch
import numpy as np
import glob
from torchvision import transforms
from yacs.config import CfgNode
from ultralytics import YOLO

# 确保 TransReID 的模型和配置代码在 Python 路径中
# 如果你的文件结构如上所述，这些导入应该可以正常工作
from config import cfg
from model import make_model

# 禁用 TensorFlow oneDNN 警告
os.environ["TF_ENABLE_ONEDNN_OPTS"] = "0"
os.environ["TF_CPP_MIN_LOG_LEVEL"] = "2"


# --- 模型加载和预处理函数 (与您之前的代码类似，稍作调整) ---

def load_reid_model(cfg_path, model_path):
    """加载 ReID 模型"""
    try:
        print("正在加载 ReID 配置...")
        with open(cfg_path, 'r', encoding='utf-8') as f:
            user_cfg = CfgNode.load_cfg(f)
            cfg.merge_from_other_cfg(user_cfg)
        cfg.freeze()
        print("✓ ReID 配置加载成功!")

        device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
        print(f"使用设备: {device}")

        # 构建模型 (使用配置中的默认参数)
        model = make_model(cfg, num_class=751, camera_num=6, view_num=1)

        print(f"正在从 '{model_path}' 加载 ReID 权重...")
        weights = torch.load(model_path, map_location=device)
        if 'model' in weights:
            weights = weights['model']

        # 使用非严格模式加载，以防万一
        model.load_state_dict(weights, strict=False)
        print("✓ ReID 权重加载成功!")

        model.to(device)
        model.eval()
        return model, device, cfg
    except Exception as e:
        print(f"加载 ReID 模型时出错: {e}")
        raise


def preprocess_image(image, cfg):
    """为 ReID 模型预处理单张图像"""
    size = tuple(cfg.INPUT.SIZE_TEST)
    pixel_mean = cfg.INPUT.PIXEL_MEAN
    pixel_std = cfg.INPUT.PIXEL_STD

    transform = transforms.Compose([
        transforms.ToPILImage(),
        transforms.Resize(size),
        transforms.ToTensor(),
        transforms.Normalize(mean=pixel_mean, std=pixel_std),
    ])
    # 将 BGR 图像 (OpenCV 格式) 转换为 RGB
    image_rgb = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
    return transform(image_rgb).unsqueeze(0)


@torch.no_grad()
# 新版本（已修复）
@torch.no_grad()
def extract_features(model, device, image_tensor):
    """提取图像的特征（支持批量处理）"""
    image_tensor = image_tensor.to(device)

    # --- 关键修复 ---
    # TransReID 的 SIE 模块即使在推理时也需要 cam_label 和 view_label。
    # 我们为它们提供虚拟的默认值 (0)。
    # 获取当前批次的大小 (batch size)
    num_images = image_tensor.shape[0]

    # 创建对应批次大小的 cam_label 和 view_label 张量
    cam_ids = torch.zeros(num_images, dtype=torch.long).to(device)
    view_ids = torch.zeros(num_images, dtype=torch.long).to(device)

    # 在调用模型时传入这些标签
    features = model(image_tensor, cam_label=cam_ids, view_label=view_ids)
    # ----------------

    # L2 归一化特征，这对于计算余弦相似度很重要
    features = torch.nn.functional.normalize(features, p=2, dim=1)
    return features.cpu()


# --- 核心功能函数 ---

def create_target_gallery(reid_model, device, reid_cfg, target_dir):
    """
    创建目标人物特征库。
    通过文件名前缀 (如 'personA_') 对同一人的多张图片进行分组，并计算平均特征。
    """
    print(f"\n--- 正在创建聚合的目标人物特征库 ---")

    # 临时存储每个人的所有特征
    person_features = {}

    image_paths = glob.glob(os.path.join(target_dir, '*.jpg')) + \
                  glob.glob(os.path.join(target_dir, '*.png'))

    if not image_paths:
        print(f"警告: 在 '{target_dir}' 目录中没有找到任何目标图片。")
        return {}

    # 1. 提取所有图片的特征并按人分组
    for path in image_paths:
        # 通过下划线前的部分来识别人名，例如 "elon_musk_01.jpg" -> "elon_musk"
        try:
            person_name = os.path.basename(path).split('_')[0]
        except IndexError:
            # 如果文件名不含下划线，则用整个文件名作为人名
            person_name = os.path.splitext(os.path.basename(path))[0]

        print(f"处理目标: {os.path.basename(path)} (属于: {person_name})")
        image = cv2.imread(path)
        if image is None:
            continue

        image_tensor = preprocess_image(image, reid_cfg)
        features = extract_features(reid_model, device, image_tensor)

        if person_name not in person_features:
            person_features[person_name] = []
        person_features[person_name].append(features)

    # 2. 对每个人的特征进行平均和归一化，创建最终的 gallery
    final_gallery = {}
    for person_name, features_list in person_features.items():
        if features_list:
            # 将特征列表堆叠成一个张量
            all_features = torch.cat(features_list, dim=0)
            # 计算平均特征
            mean_feature = torch.mean(all_features, dim=0, keepdim=True)
            # 重新进行 L2 归一化，这非常重要！
            mean_feature = torch.nn.functional.normalize(mean_feature, p=2, dim=1)
            final_gallery[person_name] = mean_feature
            print(f"  ✓ 为 '{person_name}' 创建了聚合特征 (来自 {len(features_list)} 张图片)。")

    print(f"--- 特征库创建完毕，共 {len(final_gallery)} 个独立目标 ---")
    return final_gallery


def main():
    # --- 参数配置 ---
    YOLO_WEIGHTS = r"C:\Users\xuboyang\Desktop\train5\weights\best.pt"
    REID_WEIGHTS = r"C:\Users\xuboyang\Desktop\TransReID-main\logs\trained\vit_transreid_market.pth"
    REID_CONFIG = 'configs/Market/vit_transreid.yml'
    TARGET_DIR = 'targets/'  # 存放目标人物图片的文件夹！！
    VIDEO_PATH = r"C:\Users\xuboyang\Desktop\9月25日.mp4"
    OUTPUT_PATH = 'videos/output_person_search_4.mp4'

    CONF_THRES = 0.43  # YOLO 检测的置信度阈值
    SIMILARITY_THRES = 0.75 # ReID 特征匹配的相似度阈值 (这是一个关键参数，需要根据实际情况调整)
    SHOW_VIDEO = True  # 是否实时显示处理结果

    # --- 1. 加载模型 ---
    print("--- 正在加载模型 ---")
    yolo_model = YOLO(YOLO_WEIGHTS)
    reid_model, device, reid_cfg = load_reid_model(REID_CONFIG, REID_WEIGHTS)
    print("--- 所有模型加载完毕 ---\n")

    # --- 2. 创建目标人物特征库 ---
    target_gallery = create_target_gallery(reid_model, device, reid_cfg, TARGET_DIR)
    if not target_gallery:
        print("错误: 目标特征库为空，程序退出。请在 'targets' 文件夹中添加图片。")
        return

    # 将 gallery 的特征张量堆叠起来以便进行高效的批量计算
    gallery_names = list(target_gallery.keys())
    gallery_features = torch.cat(list(target_gallery.values()), dim=0).to(device)

    # --- 3. 视频处理 ---
    print("\n--- 开始处理视频 ---")
    cap = cv2.VideoCapture(VIDEO_PATH)
    if not cap.isOpened():
        print(f"错误: 无法打开视频 '{VIDEO_PATH}'")
        return

    # 视频输出设置
    width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
    height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
    fps = int(cap.get(cv2.CAP_PROP_FPS))
    fourcc = cv2.VideoWriter_fourcc(*'mp4v')
    out = cv2.VideoWriter(OUTPUT_PATH, fourcc, fps, (width, height))

    frame_count = 0
    while cap.isOpened():
        ret, frame = cap.read()
        if not ret:
            break

        frame_count += 1
        print(f"处理第 {frame_count} 帧...")

        # --- a. YOLO 检测 ---
        results = yolo_model(frame, conf=CONF_THRES, classes=[0], verbose=False)  # 只检测人

        person_crops = []
        valid_bboxes = []

        for bbox in results[0].boxes.xyxy.cpu().numpy():
            x1, y1, x2, y2 = map(int, bbox)
            person_img = frame[y1:y2, x1:x2]
            if person_img.size > 0:
                person_tensor = preprocess_image(person_img, reid_cfg)
                person_crops.append(person_tensor)
                valid_bboxes.append((x1, y1, x2, y2))

        # --- b. 批量特征提取 ---
        if person_crops:
            batch_tensor = torch.cat(person_crops, dim=0)
            current_features = extract_features(reid_model, device, batch_tensor).to(device)

            # --- c. 特征比对 ---
            # 使用矩阵乘法高效计算所有检测到的人与所有目标之间的余弦相似度
            # (current_features @ gallery_features.T) -> 形状为 [N_detected, N_gallery]
            similarity_matrix = torch.matmul(current_features, gallery_features.T)

            # 对每个检测到的人，找到与他最匹配的目标
            best_matches = torch.max(similarity_matrix, dim=1)
            best_match_scores = best_matches.values.cpu().numpy()
            best_match_indices = best_matches.indices.cpu().numpy()

            # --- d. 结果绘制 ---
            for i, bbox in enumerate(valid_bboxes):
                score = best_match_scores[i]

                # 如果最高相似度超过阈值，则认为是目标人物
                if score > SIMILARITY_THRES:
                    match_name = gallery_names[best_match_indices[i]]
                    label = f"Target: {os.path.splitext(match_name)[0]} ({score:.2f})"

                    # 绘制高亮的边界框和标签
                    x1, y1, x2, y2 = bbox
                    cv2.rectangle(frame, (x1, y1), (x2, y2), (0, 0, 255), 3)  # 红色粗框
                    cv2.putText(frame, label, (x1, y1 - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.8, (0, 255, 0), 2)

        # 写入输出视频
        out.write(frame)

        # 实时显示
        if SHOW_VIDEO:
            cv2.imshow("Target Person Search", frame)
            if cv2.waitKey(1) & 0xFF == ord('q'):
                break

    # --- 4. 释放资源 ---
    cap.release()
    out.release()
    cv2.destroyAllWindows()
    print(f"--- 处理完成，结果已保存至 '{OUTPUT_PATH}' ---")


if __name__ == '__main__':
    main()