{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "db801b1d",
   "metadata": {},
   "outputs": [],
   "source": [
    "#新增eval.py\n",
    "import sys\n",
    "import os\n",
    "from pathlib import Path\n",
    "\n",
    "# 获取当前文件的绝对路径\n",
    "current_file = Path(__file__).resolve()\n",
    "# 计算项目根目录：上溯两级（假设train.py在scripts/目录下）\n",
    "project_root = current_file.parent.parent\n",
    "# 将项目根目录添加到系统路径\n",
    "sys.path.insert(0, str(project_root))  #作用：确保能正确导入项目内自定义模块（Veri776Dataset、VehicleTransformer）\n",
    "\n",
    "# 打印验证路径\n",
    "#print(f\"\\n=== 当前工作目录: {os.getcwd()}\")\n",
    "#print(f\"=== 项目根目录: {project_root}\")\n",
    "#print(f\"=== 系统路径: {sys.path[:3]}\\n\")\n",
    "\n",
    "\n",
    "import torch\n",
    "import time\n",
    "from PIL import Image\n",
    "import argparse\n",
    "from tqdm import tqdm\n",
    "from torch.utils.data import Dataset\n",
    "from torchvision import transforms\n",
    "import torch.nn as nn\n",
    "import torch.nn.functional as F\n",
    "from torchvision.models import vit_b_16\n",
    "from einops import rearrange, repeat\n",
    "from collections import defaultdict\n",
    "from torch.utils.data import DataLoader  # 新增导入\n",
    "from torch.optim import AdamW\n",
    "from pathlib import Path\n",
    "import matplotlib.pyplot as plt\n",
    "from collections import Counter\n",
    "from sklearn.metrics import average_precision_score  # 新增导入\n",
    "import numpy as np\n",
    "from torch.optim.lr_scheduler import CosineAnnealingLR\n",
    "from src.datasets.veri776_dataset import Veri776Dataset\n",
    "from src.models.vehicle_transformer import VehicleTransformer\n",
    "import warnings\n",
    "from torch.optim.lr_scheduler import LambdaLR\n",
    "\n",
    "\n",
    "\n",
    "\n",
    "\n",
    "def evaluate(model, gallery_loader, query_loader):\n",
    "    model.eval()\n",
    "    device = next(model.parameters()).device\n",
    "    \n",
    "    # 特征提取函数\n",
    "    def extract_features(loader):\n",
    "        features, ids, cam_ids = [], [], []\n",
    "        for batch in tqdm(loader, desc=\"特征提取\"):\n",
    "            with torch.no_grad():\n",
    "                outputs = model(batch[\"image\"].to(device))\n",
    "                features.append(F.normalize(outputs[\"bn_feature\"], dim=1))\n",
    "                ids.append(batch[\"vehicle_id\"])\n",
    "                cam_ids.append(batch[\"camera_id\"])  # 新增摄像头ID收集\n",
    "        return torch.cat(features), torch.cat(ids), torch.cat(cam_ids)  # 修改返回\n",
    "    \n",
    "    # 提取Gallery和Query特征\n",
    "    gallery_feats, gallery_ids, gallery_cams = extract_features(gallery_loader)\n",
    "    query_feats, query_ids, query_cams = extract_features(query_loader)\n",
    "    \n",
    "    # 计算相似度矩阵\n",
    "    sim_matrix = torch.mm(query_feats, gallery_feats.T)\n",
    "\n",
    "    print(f\"Query特征范数: {torch.norm(query_feats, dim=1).mean():.4f}\")  # 预期接近1.0\n",
    "    print(f\"Gallery特征范数: {torch.norm(gallery_feats, dim=1).mean():.4f}\")\n",
    "    \n",
    "    \n",
    "    # 计算mAP\n",
    "    aps = []\n",
    "    for i in range(len(query_ids)):\n",
    "        # 排除同摄像头样本（跨摄像头验证核心逻辑）\n",
    "        cross_cam_mask = (gallery_cams != query_cams[i])\n",
    "        same_id_mask = (gallery_ids == query_ids[i])\n",
    "\n",
    "        # 有效正样本：相同ID且不同摄像头\n",
    "        y_true = ((gallery_ids == query_ids[i]) & (gallery_cams != query_cams[i])).cpu().numpy()\n",
    "        y_score = sim_matrix[i].cpu().numpy()\n",
    "        if y_true.sum() == 0:\n",
    "            continue  # 跳过无效查询\n",
    "            \n",
    "        aps.append(average_precision_score(y_true, y_score))\n",
    "\n",
    "    if len(aps) == 0:\n",
    "        raise ValueError(\"所有查询样本均无跨摄像头正样本，请检查数据集划分！\")\n",
    "    map_score = np.mean(aps) if aps else 0.0  # 处理空列表\n",
    "    \n",
    "    # 计算CMC\n",
    "    # 确保所有计算在CPU进行\n",
    "    sim_matrix = sim_matrix.cpu()\n",
    "    gallery_ids = gallery_ids.cpu()\n",
    "    query_ids = query_ids.cpu()\n",
    "    gallery_cams = gallery_cams.cpu()  # 新增：获取gallery的摄像头ID\n",
    "    query_cams = query_cams.cpu()     # 新增：获取query的摄像头ID\n",
    "\n",
    "\n",
    "    # 计算余弦距离（1 - 余弦相似度）\n",
    "    cos_dist = 1 - torch.mm(query_feats, gallery_feats.T)\n",
    "    _, indices = cos_dist.topk(50, dim=1, largest=False)  # ✅ 关键修改：按距离升序排列  \n",
    "    indices = indices.cpu()  # 🚨 新增：强制 indices 在 CPU 上\n",
    "    \n",
    "    # 扩展维度以便广播比较\n",
    "    query_cams_expanded = query_cams.view(-1, 1).expand(-1, indices.size(1))\n",
    "    gallery_cams_expanded = gallery_cams[indices]\n",
    "\n",
    "# 正确匹配条件：ID相同且摄像头不同\n",
    "    correct_id = (gallery_ids[indices] == query_ids.view(-1, 1))\n",
    "    different_cam = (gallery_cams_expanded != query_cams_expanded)\n",
    "    valid_matches = correct_id & different_cam  # 同时满足两个条件\n",
    "\n",
    "    cmc = torch.zeros(50, dtype=torch.float)\n",
    "    for k in range(1, 51):\n",
    "        cmc[k-1] = valid_matches[:, :k].any(dim=1).float().mean()\n",
    "    cmc = cmc * 100  # 转换为百分比\n",
    "    \n",
    "    return map_score * 100, cmc\n",
    "\n",
    "\n",
    "\n",
    "\n",
    "\n",
    "\n",
    "\n",
    "\n",
    "\n",
    "\n",
    "\n",
    "\n",
    "#功能：评估模型在验证集上的检索性能\n",
    "#关键指标：\n",
    "#mAP（平均精度均值）：衡量整体检索精度\n",
    "#CMC（累积匹配特性）：Top-K命中率（Rank-1/5/10）\n",
    "\n",
    "\n",
    "\n",
    "\n",
    "\n",
    "if __name__ == \"__main__\":\n",
    "     # 获取项目根目录\n",
    "    current_dir = os.path.dirname(os.path.abspath(__file__))  # 当前文件所在目录 → e:\\codes\\project\\scripts\n",
    "    project_root =  os.path.dirname(os.path.dirname(current_dir))  # 上溯两级 → e:\\codes\n",
    "\n",
    "    # 配置参数\n",
    "    config = {\n",
    "        \"model_path\": os.path.join(project_root,\"checkpoints\", \"best_model.pth\"),  # → e:\\codes\\checkpoints\\best_model.pth\n",
    "        \"batch_size\": 24,\n",
    "        \"num_workers\": 4,\n",
    "        \"input_size\": (256, 256),\n",
    "        \"device\": \"cuda:0\" if torch.cuda.is_available() else \"cpu\"\n",
    "    }\n",
    "    \n",
    "    # ========== 数据预处理 ==========\n",
    "    transform = transforms.Compose([\n",
    "         transforms.Resize(288),\n",
    "    transforms.CenterCrop(256),\n",
    "    transforms.ToTensor(),\n",
    "    transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])\n",
    "    ])\n",
    "\n",
    " \n",
    "    \n",
    "    # ========== 加载数据 ==========\n",
    "    try:\n",
    "        gallery_set = Veri776Dataset(mode=\"test\",  transform=transform)  # 正确调用\n",
    "\n",
    "    \n",
    "        query_set = Veri776Dataset(mode=\"query\", transform=transform)    # 正确调用\n",
    "        \n",
    "        gallery_loader = DataLoader(\n",
    "            gallery_set,\n",
    "            batch_size=config[\"batch_size\"],\n",
    "            shuffle=False,\n",
    "            num_workers=config[\"num_workers\"],\n",
    "            pin_memory=True\n",
    "        )\n",
    "        \n",
    "        query_loader = DataLoader(\n",
    "            query_set,\n",
    "            batch_size=config[\"batch_size\"],\n",
    "            shuffle=False,\n",
    "            num_workers=config[\"num_workers\"],\n",
    "            pin_memory=True\n",
    "        )\n",
    "\n",
    "    except Exception as e:\n",
    "        raise RuntimeError(f\"数据加载失败: {str(e)}\")\n",
    "    \n",
    "\n",
    "    \n",
    "\n",
    "    # ========== 加载模型 ==========\n",
    "\n",
    "    # 在加载模型前添加路径检查\n",
    "\n",
    "    \n",
    "\n",
    "    model_path = config[\"model_path\"]\n",
    "    if not os.path.exists(model_path):\n",
    "        raise FileNotFoundError(f\"模型文件不存在: {model_path}\")\n",
    "    \n",
    "    # 检查文件大小（示例：预期大小约300MB）\n",
    "    file_size = os.path.getsize(model_path)\n",
    "    print(f\"模型文件大小: {file_size/1024/1024:.2f} MB\")\n",
    "    if file_size < 1024*1024:  # 小于1MB视为异常\n",
    "        raise ValueError(\"模型文件异常，可能下载不完整\")\n",
    "\n",
    "\n",
    "\n",
    "    try:\n",
    "        # 打印调试信息\n",
    "        print(f\"\\n=== 模型加载调试 ===\")\n",
    "        print(f\"加载路径: {os.path.abspath(model_path)}\")\n",
    "        print(f\"文件大小: {os.path.getsize(model_path)/1024/1024:.2f} MB\")\n",
    "    \n",
    "        # 使用安全加载模式\n",
    "        checkpoint = torch.load(\n",
    "            model_path,\n",
    "            map_location=config[\"device\"],\n",
    "            weights_only=False  # 安全模式\n",
    "    )\n",
    "\n",
    "\n",
    "\n",
    "    # 初始化模型\n",
    "        # 修改后（保持与训练配置一致）\n",
    "        model = VehicleTransformer(\n",
    "     num_classes=776,\n",
    "     img_size=(256, 256),\n",
    "     patch_size=16,\n",
    "     local_parts=4,\n",
    "     embed_dim=128,\n",
    "     depth=4,\n",
    "     num_heads=4,\n",
    "     mlp_ratio=4).to(config[\"device\"])\n",
    "        \n",
    "        \n",
    "        # 宽松加载（兼容部分参数不匹配）\n",
    "        missing, unexpected = model.load_state_dict(checkpoint['state_dict'], strict=True)  # 关键修改点\n",
    "    \n",
    "        print(f\"缺失参数: {missing}\")\n",
    "        print(f\"意外参数: {unexpected}\")\n",
    "\n",
    "\n",
    "\n",
    "        model.eval()\n",
    "        print(f\"成功加载模型: {config['model_path']}\")\n",
    "\n",
    "        # === 新增评估代码 ===\n",
    "        print(\"\\n=== 开始评估 ===\")\n",
    "        map_score, cmc = evaluate(model, gallery_loader, query_loader)\n",
    "        print(f\"\\n评估结果 mAP: {map_score:.2f}%\")\n",
    "        print(\"CMC指标:\")\n",
    "        print(\"Rank-1 : {:.2f}%\".format(cmc[0]))\n",
    "        print(\"Rank-5 : {:.2f}%\".format(cmc[4]))\n",
    "        print(\"Rank-10: {:.2f}%\".format(cmc[9]))\n",
    "\n",
    "        \n",
    "        \n",
    "    except Exception as model_load_error:\n",
    "        print(f\"\\n!!! 加载失败分析 !!!\")\n",
    "        print(f\"错误类型: {type(model_load_error).__name__}\")\n",
    "        print(f\"错误详情: {str(model_load_error)}\")\n",
    "    \n",
    "        # 尝试原始加载方式诊断\n",
    "        try:\n",
    "            with open(model_path, \"rb\") as f:\n",
    "                data = f.read()\n",
    "            print(f\"文件头信息: {data[:100]}\")  # 打印前100字节\n",
    "        except Exception as file_error:\n",
    "            print(f\"文件读取失败: {str(file_error)}\")\n",
    "    \n",
    "        raise RuntimeError(\"模型加载失败，请检查文件完整性\") from model_load_error\n",
    "\n",
    "\n",
    "\n",
    "#执行流程：\n",
    "#环境准备：配置路径、加载依赖库\n",
    "#数据加载：Gallery/Query数据集标准化处理\n",
    "#模型验证：加载预训练模型，提取特征向量\n",
    "#相似度匹配：计算查询集与Gallery的相似度矩阵\n",
    "#性能评估：输出mAP和CMC关键指标\n",
    "#异常处理：模型文件校验、加载错误诊断\n",
    "\n",
    "\n",
    "\n",
    "\n",
    "#该脚本完整实现了车辆重识别模型的性能评估流程，输出行业标准指标，是模型迭代优化的重要依据。"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "6e778bc7",
   "metadata": {},
   "outputs": [],
   "source": [
    "import sys\n",
    "import os\n",
    "from pathlib import Path\n",
    "\n",
    "# 获取当前文件的绝对路径\n",
    "current_file = Path(__file__).resolve()\n",
    "# 计算项目根目录：上溯两级（假设train.py在scripts/目录下）\n",
    "project_root = current_file.parent.parent\n",
    "# 将项目根目录添加到系统路径\n",
    "sys.path.insert(0, str(project_root))  # 确保能正确导入项目内自定义模块（Veri776Dataset、VehicleTransformer）\n",
    "\n",
    "\n",
    "import torch\n",
    "import time\n",
    "from PIL import Image\n",
    "import argparse\n",
    "from tqdm import tqdm\n",
    "from torch.utils.data import Dataset\n",
    "from torchvision import transforms\n",
    "import torch.nn as nn\n",
    "import torch.nn.functional as F\n",
    "from torchvision.models import vit_b_16\n",
    "from einops import rearrange, repeat\n",
    "from collections import defaultdict\n",
    "from torch.utils.data import DataLoader  # 新增导入\n",
    "from torch.optim import AdamW\n",
    "from pathlib import Path\n",
    "import matplotlib.pyplot as plt\n",
    "from collections import Counter\n",
    "from sklearn.metrics import average_precision_score  # 新增导入\n",
    "import numpy as np\n",
    "from torch.optim.lr_scheduler import CosineAnnealingLR\n",
    "from src.datasets.veri776_dataset import Veri776Dataset\n",
    "from src.models.vehicle_transformer import VehicleTransformer\n",
    "import warnings\n",
    "from torch.optim.lr_scheduler import LambdaLR\n",
    "\n",
    "\n",
    "def evaluate(model, gallery_loader, query_loader):\n",
    "    model.eval()\n",
    "    device = next(model.parameters()).device\n",
    "    \n",
    "    # 特征提取函数 ，通过模型前向传播，从query和gallery图像中提取归一化的特征向量（bn_feature），用于后续检索。\n",
    "    def extract_features(loader):\n",
    "        features, ids, cam_ids = [], [], []\n",
    "        for batch in tqdm(loader, desc=\"特征提取\"):\n",
    "            with torch.no_grad():\n",
    "                outputs = model(batch[\"image\"].to(device))\n",
    "                features.append(F.normalize(outputs[\"bn_feature\"], dim=1))\n",
    "                ids.append(batch[\"vehicle_id\"])\n",
    "                cam_ids.append(batch[\"camera_id\"])  # 摄像头ID收集\n",
    "        # 处理空数据情况（新增代码）\n",
    "        if not features:  # 列表为空\n",
    "            # 假设特征维度为outputs[\"bn_feature\"].shape[-1]\n",
    "            feat_dim = outputs[\"bn_feature\"].shape[-1] if features else 128  # 默认特征维度\n",
    "            return (\n",
    "                torch.empty(0, feat_dim),                 # 空特征张量\n",
    "                torch.empty(0, dtype=torch.long),         # 空ID张量\n",
    "                torch.empty(0, dtype=torch.long)          # 空摄像头ID张量\n",
    "            )\n",
    "        return torch.cat(features), torch.cat(ids), torch.cat(cam_ids)  # 修改返回\n",
    "    \n",
    "    # 提取Gallery和Query特征\n",
    "    gallery_feats, gallery_ids, gallery_cams = extract_features(gallery_loader)\n",
    "    \n",
    "    # 检查图库是否为空（新增代码）\n",
    "    if gallery_feats.numel() == 0:\n",
    "        raise ValueError(\"图库数据为空，无交集车辆ID\")\n",
    "    \n",
    "    query_feats, query_ids, query_cams = extract_features(query_loader)\n",
    "    \n",
    "    # 计算相似度矩阵，反映每个查询样本与库中所有样本的相似程度\n",
    "    sim_matrix = torch.mm(query_feats, gallery_feats.T)\n",
    "\n",
    "    print(f\"Query特征范数: {torch.norm(query_feats, dim=1).mean():.4f}\")  # 预期接近1.0\n",
    "    print(f\"Gallery特征范数: {torch.norm(gallery_feats, dim=1).mean():.4f}\")\n",
    "    \n",
    "    # 计算mAP，平均精度均值，衡量模型在所有查询样本上的整体检索精度\n",
    "    aps = []\n",
    "    for i in range(len(query_ids)):\n",
    "        # 排除同摄像头样本（跨摄像头验证核心逻辑）\n",
    "        cross_cam_mask = (gallery_cams != query_cams[i])\n",
    "        same_id_mask = (gallery_ids == query_ids[i])\n",
    "\n",
    "        # 有效正样本：相同ID且不同摄像头\n",
    "        y_true = ((gallery_ids == query_ids[i]) & (gallery_cams != query_cams[i])).cpu().numpy()# 仅当gallery和query的车辆ID相同时才视为正样本，自动忽略未知车辆\n",
    "        y_score = sim_matrix[i].cpu().numpy()\n",
    "        if y_true.sum() == 0:\n",
    "            continue  # 跳过无效查询\n",
    "            \n",
    "        aps.append(average_precision_score(y_true, y_score))\n",
    "\n",
    "    if len(aps) == 0:\n",
    "        raise ValueError(\"所有查询样本均无跨摄像头正样本，请检查数据集划分！\")\n",
    "    map_score = np.mean(aps) if aps else 0.0  # 处理空列表\n",
    "    \n",
    "    # 计算CMC，累积匹配特性，计算不同 Rank（如 Rank-1、Rank-5）下的命中率，反映模型的 Top-K 检索能力\n",
    "    # 确保所有计算在CPU进行\n",
    "    sim_matrix = sim_matrix.cpu()\n",
    "    gallery_ids = gallery_ids.cpu()\n",
    "    query_ids = query_ids.cpu()\n",
    "    gallery_cams = gallery_cams.cpu()  # 新增：获取gallery的摄像头ID\n",
    "    query_cams = query_cams.cpu()     # 新增：获取query的摄像头ID\n",
    "\n",
    "    # 计算余弦距离（1 - 余弦相似度）\n",
    "    cos_dist = 1 - torch.mm(query_feats, gallery_feats.T)\n",
    "    _, indices = cos_dist.topk(50, dim=1, largest=False)  # 按距离升序排列  \n",
    "    indices = indices.cpu()  # 强制 indices 在 CPU 上\n",
    "    \n",
    "    # 扩展维度以便广播比较\n",
    "    query_cams_expanded = query_cams.view(-1, 1).expand(-1, indices.size(1))\n",
    "    gallery_cams_expanded = gallery_cams[indices]\n",
    "\n",
    "    # 正确匹配条件：ID相同且摄像头不同\n",
    "    correct_id = (gallery_ids[indices] == query_ids.view(-1, 1))\n",
    "    different_cam = (gallery_cams_expanded != query_cams_expanded)\n",
    "    valid_matches = correct_id   & different_cam  # 同时满足两个条件\n",
    "\n",
    "    cmc = torch.zeros(50, dtype=torch.float)\n",
    "    for k in range(1, 51):\n",
    "        cmc[k-1] = valid_matches[:, :k].any(dim=1).float().mean()\n",
    "    cmc = cmc * 100  # 转换为百分比\n",
    "    \n",
    "    return map_score * 100, cmc\n",
    "\n",
    "#当前代码在计算 CMC 时，直接对所有 Gallery 样本排序（包括同摄像头样本），但在判断有效匹配时才过滤同摄像头样本。这会导致：\n",
    "#若 Gallery 中与查询样本同 ID 的最优匹配（Rank-1 位置）来自同摄像头，虽然会被different_cam条件排除，但排序时该样本仍会占据 Rank-1 位置，\n",
    "# 导致真正的跨摄像头正样本被挤到后面\n",
    "\n",
    "if __name__ == \"__main__\":\n",
    "    # 获取项目根目录\n",
    "    current_dir = os.path.dirname(os.path.abspath(__file__))  # 当前文件所在目录 → e:\\codes\\project\\scripts\n",
    "    project_root = os.path.dirname(os.path.dirname(current_dir))  # 上溯两级 → e:\\codes\n",
    "\n",
    "    # 配置参数\n",
    "    config = {\n",
    "        \"model_path\": os.path.join(project_root, \"checkpoints\", \"best_model.pth\"),\n",
    "        \"batch_size\": 16,\n",
    "        \"num_workers\": 4,\n",
    "        \"input_size\": (224, 224),\n",
    "        \"device\": \"cuda:0\" if torch.cuda.is_available() else \"cpu\"\n",
    "    }\n",
    "    \n",
    "    # 数据预处理\n",
    "    transform = transforms.Compose([\n",
    "        transforms.Resize(256),\n",
    "        transforms.CenterCrop(224),\n",
    "        transforms.ToTensor(),\n",
    "        transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])\n",
    "    ])\n",
    "\n",
    "    # 加载数据\n",
    "    try:\n",
    "        gallery_set = Veri776Dataset(mode=\"test\", transform=transform)  # 测试集\n",
    "        query_set = Veri776Dataset(mode=\"query\", transform=transform)    # 查询集\n",
    "        \n",
    "        gallery_loader = DataLoader(\n",
    "            gallery_set,\n",
    "            batch_size=config[\"batch_size\"],\n",
    "            shuffle=False,\n",
    "            num_workers=config[\"num_workers\"],\n",
    "            pin_memory=True\n",
    "        )\n",
    "        \n",
    "        query_loader = DataLoader(\n",
    "            query_set,\n",
    "            batch_size=config[\"batch_size\"],\n",
    "            shuffle=False,\n",
    "            num_workers=config[\"num_workers\"],\n",
    "            pin_memory=True\n",
    "        )\n",
    "\n",
    "    except Exception as e:\n",
    "        raise RuntimeError(f\"数据加载失败: {str(e)}\")\n",
    "    \n",
    "    # 加载模型\n",
    "    model_path = config[\"model_path\"]\n",
    "    if not os.path.exists(model_path):\n",
    "        raise FileNotFoundError(f\"模型文件不存在: {model_path}\")\n",
    "    \n",
    "    # 检查文件大小\n",
    "    file_size = os.path.getsize(model_path)\n",
    "    print(f\"模型文件大小: {file_size/1024/1024:.2f} MB\")\n",
    "    if file_size < 1024*1024:  # 小于1MB视为异常\n",
    "        raise ValueError(\"模型文件异常，可能下载不完整\")\n",
    "\n",
    "    try:\n",
    "        print(f\"\\n=== 模型加载调试 ===\")\n",
    "        print(f\"加载路径: {os.path.abspath(model_path)}\")\n",
    "        print(f\"文件大小: {os.path.getsize(model_path)/1024/1024:.2f} MB\")\n",
    "    \n",
    "        # 使用安全加载模式\n",
    "        checkpoint = torch.load(\n",
    "            model_path,\n",
    "            map_location=config[\"device\"],\n",
    "            weights_only=False  # 安全模式\n",
    "        )\n",
    "\n",
    "        # 初始化模型\n",
    "        model = VehicleTransformer(\n",
    "            num_classes=776,\n",
    "            img_size=(224, 224),\n",
    "            patch_sizes=[16, 8], \n",
    "            local_parts=7,\n",
    "            embed_dim=128,\n",
    "            depth=4,\n",
    "            num_heads=4,\n",
    "            mlp_ratio=4\n",
    "        ).to(config[\"device\"])\n",
    "        \n",
    "        # 宽松加载（兼容部分参数不匹配）\n",
    "        missing, unexpected = model.load_state_dict(checkpoint['state_dict'], strict=True)\n",
    "    \n",
    "        print(f\"缺失参数: {missing}\")\n",
    "        print(f\"意外参数: {unexpected}\")\n",
    "\n",
    "        model.eval()\n",
    "        print(f\"成功加载模型: {config['model_path']}\")\n",
    "\n",
    "        # 开始评估\n",
    "        print(\"\\n=== 开始评估 ===\")\n",
    "        map_score, cmc = evaluate(model, gallery_loader, query_loader)\n",
    "        print(f\"\\n评估结果 mAP: {map_score:.2f}%\")\n",
    "        print(\"CMC指标:\")\n",
    "        print(\"Rank-1 : {:.2f}%\".format(cmc[0]))\n",
    "        print(\"Rank-2 : {:.2f}%\".format(cmc[1]))\n",
    "        print(\"Rank-5 : {:.2f}%\".format(cmc[4]))\n",
    "        print(\"Rank-10: {:.2f}%\".format(cmc[9]))\n",
    "\n",
    "    except Exception as model_load_error:\n",
    "        print(f\"\\n!!! 加载失败分析 !!!\")\n",
    "        print(f\"错误类型: {type(model_load_error).__name__}\")\n",
    "        print(f\"错误详情: {str(model_load_error)}\")\n",
    "    \n",
    "        # 尝试原始加载方式诊断\n",
    "        try:\n",
    "            with open(model_path, \"rb\") as f:\n",
    "                data = f.read()\n",
    "            print(f\"文件头信息: {data[:100]}\")  # 打印前100字节\n",
    "        except Exception as file_error:\n",
    "            print(f\"文件读取失败: {str(file_error)}\")\n",
    "    \n",
    "        raise RuntimeError(\"模型加载失败，请检查文件完整性\") from model_load_error"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "pytorch2.0",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "name": "python",
   "version": "3.12.9"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 5
}
