import sys
import os
from pathlib import Path

# 获取当前文件的绝对路径
current_file = Path(__file__).resolve()
# 计算项目根目录：上溯两级（假设train.py在scripts/目录下）
project_root = current_file.parent.parent
# 将项目根目录添加到系统路径
sys.path.insert(0, str(project_root))  # 确保能正确导入项目内自定义模块（Veri776Dataset、VehicleTransformer）


import torch
import time
from PIL import Image
import argparse
from tqdm import tqdm
from torch.utils.data import Dataset
from torchvision import transforms
import torch.nn as nn
import torch.nn.functional as F
from torchvision.models import vit_b_16
from einops import rearrange, repeat
from collections import defaultdict
from torch.utils.data import DataLoader  # 新增导入
from torch.optim import AdamW
from pathlib import Path
import matplotlib.pyplot as plt
from collections import Counter
from sklearn.metrics import average_precision_score  # 新增导入
import numpy as np
from torch.optim.lr_scheduler import CosineAnnealingLR
from src.datasets.veri776_dataset import Veri776Dataset
from src.models.vehicle_transformer import VehicleTransformer
import warnings
from torch.optim.lr_scheduler import LambdaLR


def evaluate(model, gallery_loader, query_loader):
    model.eval()
    device = next(model.parameters()).device
    
    # 特征提取函数 ，通过模型前向传播，从query和gallery图像中提取归一化的特征向量（bn_feature），用于后续检索。
    def extract_features(loader):
        features, ids, cam_ids = [], [], []
        for batch in tqdm(loader, desc="特征提取"):
            with torch.no_grad():
                outputs = model(batch["image"].to(device))
                features.append(F.normalize(outputs["bn_feature"], dim=1))
                ids.append(batch["vehicle_id"])
                cam_ids.append(batch["camera_id"])  # 摄像头ID收集
        # 处理空数据情况（新增代码）
        if not features:  # 列表为空
            # 假设特征维度为outputs["bn_feature"].shape[-1]
            feat_dim = outputs["bn_feature"].shape[-1] if features else 128  # 默认特征维度
            return (
                torch.empty(0, feat_dim),                 # 空特征张量
                torch.empty(0, dtype=torch.long),         # 空ID张量
                torch.empty(0, dtype=torch.long)          # 空摄像头ID张量
            )
        return torch.cat(features), torch.cat(ids), torch.cat(cam_ids)  # 修改返回
    
    # 提取Gallery和Query特征
    gallery_feats, gallery_ids, gallery_cams = extract_features(gallery_loader)
    
    # 检查图库是否为空（新增代码）
    if gallery_feats.numel() == 0:
        raise ValueError("图库数据为空，无交集车辆ID")
    
    query_feats, query_ids, query_cams = extract_features(query_loader)
    
    # 计算相似度矩阵，反映每个查询样本与库中所有样本的相似程度
    sim_matrix = torch.mm(query_feats, gallery_feats.T)

    print(f"Query特征范数: {torch.norm(query_feats, dim=1).mean():.4f}")  # 预期接近1.0
    print(f"Gallery特征范数: {torch.norm(gallery_feats, dim=1).mean():.4f}")
    
    # 计算mAP，平均精度均值，衡量模型在所有查询样本上的整体检索精度
    aps = []
    for i in range(len(query_ids)):
        # 排除同摄像头样本（跨摄像头验证核心逻辑）
        cross_cam_mask = (gallery_cams != query_cams[i])
        same_id_mask = (gallery_ids == query_ids[i])

        # 有效正样本：相同ID且不同摄像头
        y_true = ((gallery_ids == query_ids[i]) & (gallery_cams != query_cams[i])).cpu().numpy()# 仅当gallery和query的车辆ID相同时才视为正样本，自动忽略未知车辆
        y_score = sim_matrix[i].cpu().numpy()
        if y_true.sum() == 0:
            continue  # 跳过无效查询
            
        aps.append(average_precision_score(y_true, y_score))

    if len(aps) == 0:
        raise ValueError("所有查询样本均无跨摄像头正样本，请检查数据集划分！")
    map_score = np.mean(aps) if aps else 0.0  # 处理空列表
    

    # 计算CMC，累积匹配特性，计算不同 Rank（如 Rank-1、Rank-5）下的命中率，反映模型的 Top-K 检索能力
    # 确保所有计算在CPU进行
    sim_matrix = sim_matrix.cpu()
    gallery_ids = gallery_ids.cpu()
    query_ids = query_ids.cpu()
    gallery_cams = gallery_cams.cpu()  # 新增：获取gallery的摄像头ID
    query_cams = query_cams.cpu()     # 新增：获取query的摄像头ID

    max_rank = 50
    cmc = torch.zeros(max_rank, dtype=torch.float)
    valid_queries = 0  # 记录有效查询数（存在跨摄像头样本的查询）

    # 计算余弦距离（1 - 余弦相似度）
    cos_dist = 1 - torch.mm(query_feats, gallery_feats.T)

    for i in range(len(query_ids)):
        q_id = query_ids[i]
        q_cam = query_cams[i]
        
        # 生成跨摄像头掩码并过滤Gallery样本
        cross_cam_mask = (gallery_cams != q_cam)
        valid_dist = cos_dist[i][cross_cam_mask]
        valid_gallery_ids = gallery_ids[cross_cam_mask]
        
        if valid_dist.numel() == 0:
            continue  # 无跨摄像头样本，跳过
        
        valid_queries += 1
        # 对跨摄像头样本按距离升序排序（距离越小越相似）
        _, indices = valid_dist.topk(max_rank, dim=0, largest=False)
        indices = indices.cpu()  # 将indices移至CPU
        ranked_ids = valid_gallery_ids[indices].tolist()  # 转换为列表便于查询
        
         # 查找正确ID的位置
        first_hit = -1
        for k in range(max_rank):
            if q_id in ranked_ids[:k+1]:
                first_hit = k
                break
        
        # 修正：若找到命中位置，更新所有k' >= first_hit的CMC
        if first_hit != -1:
            for k in range(first_hit, max_rank):
                cmc[k] += 1
    
    # 计算平均命中率（仅对有效查询求平均）
    if valid_queries > 0:
        cmc = (cmc / valid_queries) * 100
    else:
        cmc = torch.zeros(max_rank)  # 所有查询均无跨摄像头样本
    
    return map_score * 100, cmc


if __name__ == "__main__":
    # 获取项目根目录
    current_dir = os.path.dirname(os.path.abspath(__file__))  # 当前文件所在目录 → e:\codes\project\scripts
    project_root = os.path.dirname(os.path.dirname(current_dir))  # 上溯两级 → e:\codes

    # 配置参数
    config = {
        "model_path": os.path.join(project_root, "checkpoints", "best_model.pth"),
        "batch_size": 16,
        "num_workers": 4,
        "input_size": (224, 224),
        "device": "cuda:0" if torch.cuda.is_available() else "cpu"
    }
    
    # 数据预处理
    transform = transforms.Compose([
        transforms.Resize(256),
        transforms.CenterCrop(224),
        transforms.ToTensor(),
        transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
    ])

    # 加载数据
    try:
        gallery_set = Veri776Dataset(mode="test", transform=transform)  # 测试集
        query_set = Veri776Dataset(mode="query", transform=transform)    # 查询集
        
        gallery_loader = DataLoader(
            gallery_set,
            batch_size=config["batch_size"],
            shuffle=False,
            num_workers=config["num_workers"],
            pin_memory=True
        )
        
        query_loader = DataLoader(
            query_set,
            batch_size=config["batch_size"],
            shuffle=False,
            num_workers=config["num_workers"],
            pin_memory=True
        )

    except Exception as e:
        raise RuntimeError(f"数据加载失败: {str(e)}")
    
    # 加载模型
    model_path = config["model_path"]
    if not os.path.exists(model_path):
        raise FileNotFoundError(f"模型文件不存在: {model_path}")
    
    # 检查文件大小
    file_size = os.path.getsize(model_path)
    print(f"模型文件大小: {file_size/1024/1024:.2f} MB")
    if file_size < 1024*1024:  # 小于1MB视为异常
        raise ValueError("模型文件异常，可能下载不完整")

    try:
        print(f"\n=== 模型加载调试 ===")
        print(f"加载路径: {os.path.abspath(model_path)}")
        print(f"文件大小: {os.path.getsize(model_path)/1024/1024:.2f} MB")
    
        # 使用安全加载模式
        checkpoint = torch.load(
            model_path,
            map_location=config["device"],
            weights_only=False  # 安全模式
        )

        # 初始化模型
        model = VehicleTransformer(
            num_classes=776,
            img_size=(224, 224),
            patch_sizes=[16, 8], 
            local_parts=7,
            embed_dim=128,
            depth=4,
            num_heads=4,
            mlp_ratio=4
        ).to(config["device"])
        
        # 宽松加载（兼容部分参数不匹配）
        missing, unexpected = model.load_state_dict(checkpoint['state_dict'], strict=True)
    
        print(f"缺失参数: {missing}")
        print(f"意外参数: {unexpected}")

        model.eval()
        print(f"成功加载模型: {config['model_path']}")

        # 开始评估
        print("\n=== 开始评估 ===")
        map_score, cmc = evaluate(model, gallery_loader, query_loader)
        print(f"\n评估结果 mAP: {map_score:.2f}%")
        print("CMC指标:")
        print("Rank-1 : {:.2f}%".format(cmc[0]))
        print("Rank-2 : {:.2f}%".format(cmc[1]))
        print("Rank-5 : {:.2f}%".format(cmc[4]))
        print("Rank-10: {:.2f}%".format(cmc[9]))

    except Exception as model_load_error:
        print(f"\n!!! 加载失败分析 !!!")
        print(f"错误类型: {type(model_load_error).__name__}")
        print(f"错误详情: {str(model_load_error)}")
    
        # 尝试原始加载方式诊断
        try:
            with open(model_path, "rb") as f:
                data = f.read()
            print(f"文件头信息: {data[:100]}")  # 打印前100字节
        except Exception as file_error:
            print(f"文件读取失败: {str(file_error)}")
    
        raise RuntimeError("模型加载失败，请检查文件完整性") from model_load_error