{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 7,
   "id": "5344a7f0",
   "metadata": {},
   "outputs": [
    {
     "ename": "NameError",
     "evalue": "name '__file__' is not defined",
     "output_type": "error",
     "traceback": [
      "\u001b[31m---------------------------------------------------------------------------\u001b[39m",
      "\u001b[31mNameError\u001b[39m                                 Traceback (most recent call last)",
      "\u001b[36mCell\u001b[39m\u001b[36m \u001b[39m\u001b[32mIn[7]\u001b[39m\u001b[32m, line 448\u001b[39m\n\u001b[32m    441\u001b[39m     \u001b[38;5;28;01mreturn\u001b[39;00m map_score * \u001b[32m100\u001b[39m, cmc\n\u001b[32m    446\u001b[39m \u001b[38;5;28;01mif\u001b[39;00m \u001b[34m__name__\u001b[39m == \u001b[33m\"\u001b[39m\u001b[33m__main__\u001b[39m\u001b[33m\"\u001b[39m:\n\u001b[32m    447\u001b[39m      \u001b[38;5;66;03m# 获取项目根目录\u001b[39;00m\n\u001b[32m--> \u001b[39m\u001b[32m448\u001b[39m     current_dir = os.path.dirname(os.path.abspath(\u001b[34m__file__\u001b[39m))  \u001b[38;5;66;03m# 当前文件所在目录 → e:\\codes\\project\\scripts\u001b[39;00m\n\u001b[32m    449\u001b[39m     project_root = os.path.dirname(current_dir)  \u001b[38;5;66;03m# 上溯一级 → e:\\codes\\project\u001b[39;00m\n\u001b[32m    451\u001b[39m     \u001b[38;5;66;03m# 配置参数\u001b[39;00m\n",
      "\u001b[31mNameError\u001b[39m: name '__file__' is not defined"
     ]
    }
   ],
   "source": [
    "# 新增eval.py\n",
    "import torch.distributed as dist\n",
    "import os\n",
    "import time\n",
    "import torch\n",
    "from PIL import Image\n",
    "from tqdm import tqdm\n",
    "from torch.utils.data import Dataset\n",
    "from torchvision import transforms\n",
    "import torch.nn as nn\n",
    "import torch.nn.functional as F\n",
    "from torchvision.models import vit_b_16\n",
    "from einops import rearrange, repeat\n",
    "from collections import defaultdict\n",
    "from torch.utils.data import DataLoader  # 新增导入\n",
    "import numpy as np\n",
    "from pathlib import Path\n",
    "import matplotlib.pyplot as plt\n",
    "from collections import Counter\n",
    "from sklearn.metrics import average_precision_score  # 新增导入\n",
    "\n",
    "\n",
    "class Veri776Dataset(Dataset):\n",
    "    def __init__(self,  mode=\"test\", transform=None):\n",
    "        \n",
    "        super().__init__()\n",
    "        start_time = time.time()\n",
    "\n",
    "\n",
    "\n",
    "           # ========== 动态生成数据集路径 ==========\n",
    "       \n",
    "        try:\n",
    "            # 优先尝试获取当前文件路径\n",
    "            current_dir = os.path.dirname(os.path.abspath(__file__))\n",
    "        except NameError:\n",
    "            # 回退方案：使用当前工作目录\n",
    "            current_dir = os.getcwd()\n",
    "            \n",
    "        # 项目根目录需根据实际结构调整\n",
    "        # 修改后正确代码\n",
    "        current_dir = os.path.dirname(os.path.abspath(__file__))\n",
    "        project_root = os.path.dirname(os.path.dirname(current_dir))  # 上溯两级到项目根目录\n",
    "        self.root_dir = os.path.join(project_root, \"datas\", \"VeRi-776\")\n",
    "        \n",
    "        \n",
    "        # ========== 验证路径有效性 ==========\n",
    "        if not os.path.exists(self.root_dir):\n",
    "            raise FileNotFoundError(f\"数据集路径不存在: {self.root_dir}\")\n",
    "\n",
    "\n",
    "\n",
    "\n",
    "        self.mode = mode\n",
    "        self.transform = transform\n",
    "        # 初始化存储容器\n",
    "        self.image_paths = []\n",
    "        self.vehicle_ids = []\n",
    "        self.camera_ids = []\n",
    "        \n",
    "        \n",
    "    \n",
    "         # ========== 文件映射配置 ==========\n",
    "        mode_config = {\n",
    "            \"train\": (\"name_train.txt\", \"image_train\"),\n",
    "            \"test\": (\"name_test.txt\", \"image_test\"),\n",
    "            \"query\": (\"name_query.txt\", \"image_query\")\n",
    "        }\n",
    "        \n",
    "        # ========== 数据加载流程 ==========\n",
    "        try:\n",
    "            # 加载文件名列表\n",
    "            txt_file, img_dir = mode_config[mode]\n",
    "            with open(os.path.join(self.root_dir, txt_file)) as f:\n",
    "                file_names = [line.strip() for line in f.readlines()]\n",
    "\n",
    "\n",
    "       # 加载摄像头ID映射\n",
    "            camera_dict = {}\n",
    "            with open(os.path.join(self.root_dir, \"camera_ID.txt\")) as f:\n",
    "                for line in f:\n",
    "                    parts = line.strip().split()\n",
    "                    if len(parts) >= 2:\n",
    "                        camera_dict[parts[0]] = parts[1]\n",
    "                    \n",
    "        \n",
    "         # 构建数据记录\n",
    "            image_folder = os.path.join(self.root_dir, img_dir)\n",
    "            for fname in tqdm(file_names, desc=f\"加载{mode}数据\"):\n",
    "                try:\n",
    "                    vid = int(fname.split(\"_\")[0])\n",
    "                    img_path = os.path.join(image_folder, fname)\n",
    "                    if os.path.exists(img_path):\n",
    "                        self.image_paths.append(img_path)\n",
    "                        self.vehicle_ids.append(vid)\n",
    "                        self.camera_ids.append(camera_dict.get(fname, \"0\"))\n",
    "                except Exception as e:\n",
    "                    print(f\"处理文件{fname}出错: {str(e)}\")\n",
    "\n",
    "        \n",
    "\n",
    "\n",
    "\n",
    "        \n",
    "\n",
    "        # 构建ID映射\n",
    "            self.unique_ids = sorted(set(self.vehicle_ids))\n",
    "            self.id_to_class = {v: k for k, v in enumerate(self.unique_ids)}\n",
    "            self.class_ids = [self.id_to_class[vid] for vid in self.vehicle_ids]\n",
    "\n",
    "            print(f\"数据集初始化完成 | 模式: {mode} | 样本数: {len(self)} | 耗时: {time.time()-start_time:.2f}s\")\n",
    "        \n",
    "        except Exception as e:\n",
    "            raise RuntimeError(f\"数据集初始化失败: {str(e)}\")\n",
    "\n",
    "        \n",
    "\n",
    "\n",
    "\n",
    "    def __len__(self):\n",
    "        return len(self.image_paths)\n",
    "    \n",
    "    def __getitem__(self, idx):\n",
    "        img = Image.open(self.image_paths[idx]).convert(\"RGB\")\n",
    "        if self.transform:\n",
    "            img = self.transform(img)\n",
    "        return {\n",
    "            \"image\": img,\n",
    "            \"vehicle_id\": self.vehicle_ids[idx],\n",
    "            \"class_id\": self.class_ids[idx],\n",
    "            \"camera_id\": self.camera_ids[idx]\n",
    "        }\n",
    "\n",
    "\n",
    "class VehicleTransformer(nn.Module):\n",
    "    def __init__(self, \n",
    "                 num_classes=776,\n",
    "                 img_size=(256, 128),\n",
    "                 patch_size=16,\n",
    "                 embed_dim=768,\n",
    "                 depth=12,\n",
    "                 num_heads=12,\n",
    "                 mlp_ratio=4.,\n",
    "                 local_parts=8,\n",
    "                 pretrained=True):\n",
    "        super().__init__()\n",
    "\n",
    "        # 添加部件分类器\n",
    "        self.part_classifiers = nn.ModuleList([\n",
    "            nn.Linear(embed_dim, num_classes) for _ in range(local_parts)\n",
    "        ])\n",
    "        \n",
    "        # 参数校验\n",
    "        assert img_size[0] % patch_size == 0, \"高度必须能被分块尺寸整除\"\n",
    "        assert img_size[1] % patch_size == 0, \"宽度必须能被分块尺寸整除\"\n",
    "        h_patches = img_size[0] // patch_size\n",
    "        w_patches = img_size[1] // patch_size\n",
    "        assert h_patches % local_parts == 0, \"local_parts必须能整除高度方向分块数\"\n",
    "\n",
    "        # ----------------- 基础参数 -----------------\n",
    "        self.embed_dim = embed_dim #新增\n",
    "        self.img_size = img_size\n",
    "        self.patch_size = patch_size\n",
    "        self.local_parts = local_parts  # 水平分块数量\n",
    "        self.part_height = h_patches // local_parts  # 分块高度（patch数）\n",
    "        self.part_width = w_patches  # 分块宽度\n",
    "\n",
    "        # 部件分类器应在参数初始化之后定义\n",
    "        self.part_classifiers = nn.ModuleList([\n",
    "            nn.Linear(embed_dim, num_classes) for _ in range(local_parts)\n",
    "        ])\n",
    "        \n",
    "        # ----------------- 主干网络 -----------------\n",
    "        self.vit = vit_b_16(pretrained=pretrained)\n",
    "        self.vit.conv_proj = nn.Conv2d(\n",
    "            3, embed_dim, \n",
    "            kernel_size=patch_size, \n",
    "            stride=patch_size, \n",
    "            bias=False\n",
    "        )\n",
    "        \n",
    "         # ----------------- 位置编码适配 -----------------\n",
    "        num_patches = h_patches * w_patches\n",
    "        self.pos_embed = nn.Parameter(\n",
    "            torch.zeros(1, num_patches + 1, embed_dim)\n",
    "        )\n",
    "        if pretrained:\n",
    "            # 插值预训练位置编码\n",
    "            orig_pos_embed = self.vit.encoder.pos_embedding\n",
    "            orig_num_patches = orig_pos_embed.shape[1] - 1\n",
    "            \n",
    "            # 线性插值\n",
    "            new_pos_embed = F.interpolate(\n",
    "                orig_pos_embed[:, 1:].permute(0, 2, 1),\n",
    "                size=num_patches,\n",
    "                mode='linear'\n",
    "            ).permute(0, 2, 1)\n",
    "            \n",
    "            # 拼接cls token\n",
    "            self.pos_embed.data = torch.cat([\n",
    "                orig_pos_embed[:, :1],\n",
    "                new_pos_embed\n",
    "            ], dim=1)\n",
    "        else:\n",
    "            nn.init.trunc_normal_(self.pos_embed, std=0.02)\n",
    "\n",
    "\n",
    "        # ----------------- 局部特征模块 -----------------\n",
    "        self.local_attention = nn.ModuleList([\n",
    "            LocalFeatureModule(\n",
    "                embed_dim=embed_dim,\n",
    "                num_heads=num_heads//2,\n",
    "                mlp_ratio=mlp_ratio,\n",
    "                part_height=self.part_height,\n",
    "                part_width=self.part_width\n",
    "            ) for _ in range(local_parts)\n",
    "        ])\n",
    "             \n",
    "        \n",
    "        # ----------------- 特征融合模块 -----------------\n",
    "        self.fusion = FeatureFusion(\n",
    "            embed_dim=embed_dim,\n",
    "            num_parts=local_parts+1  # 全局+局部\n",
    "        )\n",
    "        \n",
    "       # 修正后的输出层定义\n",
    "        self.head = nn.Sequential(\n",
    "            nn.LayerNorm(embed_dim * (local_parts + 1) * 2),  # 恢复LayerNorm\n",
    "            nn.Linear(embed_dim * (local_parts + 1) * 2, num_classes)\n",
    "        )\n",
    "\n",
    "         # 调整BN层位置\n",
    "        self.bn_neck = nn.BatchNorm1d(embed_dim * (local_parts+1) * 2)\n",
    "\n",
    "       \n",
    "        \n",
    "    def forward_features(self, x):\n",
    "        # 全局特征提取\n",
    "        B = x.shape[0]\n",
    "        x = self.vit.conv_proj(x)\n",
    "        x = x.flatten(2).transpose(1, 2)\n",
    "        cls_token = self.vit.class_token.expand(B, -1, -1)\n",
    "        x = torch.cat((cls_token, x), dim=1)\n",
    "        x = x + self.pos_embed #使用适配后的位置编码\n",
    "        x = self.vit.encoder.dropout(x)\n",
    "        \n",
    "        # 通过Transformer Encoder\n",
    "        global_features = []\n",
    "        for blk in self.vit.encoder.layers:\n",
    "            x = blk(x)\n",
    "            global_features.append(x[:, 0])  # 收集各层CLS token\n",
    "        \n",
    "        global_feature = torch.stack(global_features, dim=1).mean(dim=1)\n",
    "        \n",
    "        # 局部特征提取\n",
    "        local_features = []\n",
    "        x_patches = x[:, 1:].reshape(B, -1, self.part_height, self.part_width, self.embed_dim)\n",
    "        for i in range(self.local_parts):\n",
    "            part_feature = x_patches[:, i].reshape(B, -1, self.embed_dim)\n",
    "            part_feature = self.local_attention[i](part_feature)\n",
    "            local_features.append(part_feature)\n",
    "        \n",
    "        return global_feature, local_features\n",
    "    \n",
    "\n",
    "    \n",
    "    \n",
    "    \n",
    "    \n",
    "\n",
    "    \n",
    "    def forward(self, x):\n",
    "        global_feat, local_feats = self.forward_features(x)\n",
    "        \n",
    "        # 特征融合\n",
    "        fused_feature = self.fusion(global_feat, local_feats)\n",
    "        \n",
    "        # 分类头\n",
    "        logits = self.head(fused_feature)\n",
    "\n",
    "        # 返回部件级分类结果\n",
    "        part_logits = [cls(feat) for cls, feat in zip(self.part_classifiers, local_feats)]\n",
    "        \n",
    "        #强化度量学习，特征归一化与BNNeck   修正特征处理流程\n",
    "        fused = self.fusion(global_feat, local_feats)\n",
    "        fused_bn = self.bn_neck(fused)\n",
    "        logits = self.head(fused_bn)\n",
    "\n",
    "\n",
    "        return {\n",
    "            'global': global_feat,\n",
    "            'local': local_feats,\n",
    "            'fused': fused_feature,\n",
    "            'logits': logits,\n",
    "            'part_logits': part_logits,\n",
    "            'bn_feature': fused_bn\n",
    "        }\n",
    "\n",
    "\n",
    "class LocalFeatureModule(nn.Module):\n",
    "    \"\"\"局部特征增强模块\"\"\"\n",
    "    def __init__(self, embed_dim, num_heads, mlp_ratio, \n",
    "                 part_height,  # 接收分块高度参数\n",
    "                 part_width): # 接收分块宽度参数\n",
    "        super().__init__()\n",
    "        self.attention = nn.MultiheadAttention(embed_dim, num_heads)\n",
    "        self.norm1 = nn.LayerNorm(embed_dim)\n",
    "        self.mlp = nn.Sequential(\n",
    "            nn.Linear(embed_dim, int(embed_dim * mlp_ratio)),\n",
    "            nn.GELU(),\n",
    "            nn.Linear(int(embed_dim * mlp_ratio), embed_dim)\n",
    "        )\n",
    "        self.norm2 = nn.LayerNorm(embed_dim)\n",
    "\n",
    "        # 新增通道注意力\n",
    "        self.channel_att = nn.Sequential(\n",
    "            nn.Linear(embed_dim, embed_dim//4),\n",
    "            nn.ReLU(),\n",
    "            nn.Linear(embed_dim//4, embed_dim),\n",
    "            nn.Sigmoid()\n",
    "        )\n",
    "        \n",
    "       \n",
    "        \n",
    "         # 根据分块尺寸生成位置编码\n",
    "        self.pos_embed = nn.Parameter(\n",
    "            torch.randn(1, part_height * part_width, embed_dim)\n",
    "        )\n",
    "\n",
    "    def forward(self, x):\n",
    "        B, N, D = x.shape\n",
    "        pos_embed = self.pos_embed[:, :N]  # 动态适配序列长度\n",
    "        x = x + pos_embed\n",
    "        attn_out, _ = self.attention(x, x, x)\n",
    "        x = self.norm1(x + attn_out)\n",
    "        x = self.norm2(x + self.mlp(x))\n",
    "\n",
    "        # 在自注意力后添加\n",
    "        channel_weights = self.channel_att(x.mean(dim=1, keepdim=True))\n",
    "        x = x * channel_weights\n",
    "\n",
    "\n",
    "\n",
    "        return x.mean(dim=1)\n",
    "\n",
    "\n",
    "\n",
    "\n",
    "\n",
    "class FeatureFusion(nn.Module):\n",
    "\n",
    "\n",
    "    \"\"\"多特征融合模块\"\"\"\n",
    "    def __init__(self, embed_dim, num_parts):\n",
    "        super().__init__()\n",
    "        self.embed_dim = embed_dim\n",
    "        self.num_parts = num_parts\n",
    "\n",
    "        # 关键修复：添加batch_first参数\n",
    "        self.attention = nn.MultiheadAttention(\n",
    "            embed_dim, \n",
    "            num_heads=4,\n",
    "            batch_first=True  # 允许(Batch, Seq, Embed)输入格式\n",
    "        )\n",
    "\n",
    "\n",
    "        # 修正归一化层维度\n",
    "        self.norm = nn.LayerNorm(embed_dim * num_parts * 2)  # 维度翻倍\n",
    "        \n",
    "        # 可学习的融合权重\n",
    "        self.weights = nn.Parameter(torch.ones(num_parts))\n",
    "        \n",
    "    def forward(self, global_feat, local_feats):\n",
    "        \n",
    "        # 拼接所有特征 [B, P, D]\n",
    "        all_feats = torch.stack(\n",
    "            [global_feat] + local_feats, \n",
    "            dim=1\n",
    "        )\n",
    "        \n",
    "        # 自适应权重融合\n",
    "        weights = torch.softmax(self.weights, dim=0)\n",
    "        weighted_feats = all_feats * weights.view(1, -1, 1)\n",
    "\n",
    "        # 维度验证\n",
    "        assert weighted_feats.dim() == 3, f\"期望3D输入，实际维度{weighted_feats.dim()}\"# 确保输入为3D张量\n",
    "        \n",
    "        # 注意力交互 [B, P, D]\n",
    "        interacted, _ = self.attention(\n",
    "            weighted_feats, weighted_feats, weighted_feats\n",
    "        )\n",
    "        \n",
    "        # 拼接特征 [B, P*D*2]\n",
    "        fused = torch.cat([\n",
    "            weighted_feats.reshape(weighted_feats.size(0), -1),\n",
    "            interacted.reshape(interacted.size(0), -1)\n",
    "        ], dim=1)\n",
    "        \n",
    "         # 添加维度验证\n",
    "        expected_dim = self.embed_dim * self.num_parts * 2\n",
    "        assert fused.shape[1] == expected_dim, \\\n",
    "            f\"特征维度错误！期望 {expected_dim}, 实际 {fused.shape[1]}\"\n",
    "\n",
    "        return self.norm(fused)\n",
    "\n",
    "\n",
    "def evaluate(model, gallery_loader, query_loader):\n",
    "    model.eval()\n",
    "    device = next(model.parameters()).device\n",
    "    \n",
    "    # 特征提取函数\n",
    "    def extract_features(loader):\n",
    "        features, ids = [], []\n",
    "        for batch in tqdm(loader, desc=\"特征提取\"):\n",
    "            with torch.no_grad():\n",
    "                outputs = model(batch[\"image\"].to(device))\n",
    "                features.append(F.normalize(outputs[\"bn_feature\"], dim=1))\n",
    "                ids.append(batch[\"vehicle_id\"])\n",
    "        return torch.cat(features), torch.cat(ids)\n",
    "    \n",
    "    # 提取Gallery和Query特征\n",
    "    gallery_feats, gallery_ids = extract_features(gallery_loader)\n",
    "    query_feats, query_ids = extract_features(query_loader)\n",
    "    \n",
    "    # 计算相似度矩阵\n",
    "    sim_matrix = torch.mm(query_feats, gallery_feats.T)\n",
    "    \n",
    "    # 计算mAP\n",
    "    aps = []\n",
    "    for i in range(len(query_ids)):\n",
    "        y_true = (gallery_ids == query_ids[i]).cpu().numpy()\n",
    "        y_score = sim_matrix[i].cpu().numpy()\n",
    "        aps.append(average_precision_score(y_true, y_score))\n",
    "    map_score = np.mean(aps)\n",
    "    \n",
    "    # 计算CMC\n",
    "    _, indices = sim_matrix.topk(50, dim=1)\n",
    "    matches = (gallery_ids[indices] == query_ids.unsqueeze(1)).float()\n",
    "    cmc = matches.cumsum(dim=1).mean(dim=0) * 100\n",
    "    \n",
    "    return map_score * 100, cmc\n",
    "\n",
    "\n",
    "\n",
    "\n",
    "if __name__ == \"__main__\":\n",
    "     # 获取项目根目录\n",
    "    current_dir = os.path.dirname(os.path.abspath(__file__))  # 当前文件所在目录 → e:\\codes\\project\\scripts\n",
    "    project_root = os.path.dirname(current_dir)  # 上溯一级 → e:\\codes\\project\n",
    "\n",
    "    # 配置参数\n",
    "    config = {\n",
    "        \"model_path\": os.path.join(project_root, \"best_model.pth\"),  # → e:\\codes\\project\\best_model.pth\n",
    "        \"batch_size\": 128,\n",
    "        \"num_workers\": 8,\n",
    "        \"input_size\": (256, 128),\n",
    "        \"device\": \"cuda:0\" if torch.cuda.is_available() else \"cpu\"\n",
    "    }\n",
    "    \n",
    "    # ========== 数据预处理 ==========\n",
    "    transform = transforms.Compose([\n",
    "        transforms.Resize(config[\"input_size\"]),\n",
    "        transforms.ToTensor(),\n",
    "        transforms.Normalize(mean=[0.485, 0.456, 0.406], \n",
    "                            std=[0.229, 0.224, 0.225])\n",
    "    ])\n",
    "    \n",
    "    # ========== 加载数据 ==========\n",
    "    try:\n",
    "        gallery_set = Veri776Dataset(mode=\"test\", transform=transform)  # 正确调用\n",
    "        query_set = Veri776Dataset(mode=\"query\", transform=transform)    # 正确调用\n",
    "        \n",
    "        gallery_loader = DataLoader(\n",
    "            gallery_set,\n",
    "            batch_size=config[\"batch_size\"],\n",
    "            shuffle=False,\n",
    "            num_workers=config[\"num_workers\"],\n",
    "            pin_memory=True\n",
    "        )\n",
    "        \n",
    "        query_loader = DataLoader(\n",
    "            query_set,\n",
    "            batch_size=config[\"batch_size\"],\n",
    "            shuffle=False,\n",
    "            num_workers=config[\"num_workers\"],\n",
    "            pin_memory=True\n",
    "        )\n",
    "    except Exception as e:\n",
    "        raise RuntimeError(f\"数据加载失败: {str(e)}\")\n",
    "    \n",
    "\n",
    "    \n",
    "\n",
    "    # ========== 加载模型 ==========\n",
    "\n",
    "    # 在加载模型前添加路径检查\n",
    "\n",
    "    \n",
    "\n",
    "    model_path = config[\"model_path\"]\n",
    "    if not os.path.exists(model_path):\n",
    "        raise FileNotFoundError(f\"模型文件不存在: {model_path}\")\n",
    "\n",
    "\n",
    "\n",
    "    try:\n",
    "        # 打印调试信息\n",
    "        print(f\"\\n=== 模型加载调试 ===\")\n",
    "        print(f\"加载路径: {os.path.abspath(model_path)}\")\n",
    "        print(f\"文件大小: {os.path.getsize(model_path)/1024/1024:.2f} MB\")\n",
    "    \n",
    "        # 使用安全加载模式\n",
    "        checkpoint = torch.load(\n",
    "            model_path,\n",
    "            map_location=config[\"device\"],\n",
    "            weights_only=True  # 启用安全模式\n",
    "    )\n",
    "\n",
    "\n",
    "\n",
    "    # 初始化模型\n",
    "        model = VehicleTransformer(num_classes=776).to(config[\"device\"])\n",
    "         # 宽松加载（兼容部分参数不匹配）\n",
    "        missing, unexpected = model.load_state_dict(checkpoint, strict=False)\n",
    "    \n",
    "        print(f\"缺失参数: {missing}\")\n",
    "        print(f\"意外参数: {unexpected}\")\n",
    "\n",
    "\n",
    "\n",
    "        model.eval()\n",
    "        print(f\"成功加载模型: {config['model_path']}\")\n",
    "\n",
    "        \n",
    "        \n",
    "    except Exception as e:\n",
    "        print(f\"\\n!!! 加载失败分析 !!!\")\n",
    "        print(f\"错误类型: {type(e).__name__}\")\n",
    "        print(f\"错误详情: {str(e)}\")\n",
    "    \n",
    "    # 尝试原始加载方式诊断\n",
    "    try:\n",
    "        with open(model_path, \"rb\") as f:\n",
    "            data = f.read()\n",
    "            print(f\"文件头信息: {data[:100]}\")  # 打印前100字节\n",
    "    except Exception as file_error:\n",
    "        print(f\"文件读取失败: {str(file_error)}\")\n",
    "    \n",
    "    raise RuntimeError(\"模型加载失败，请检查文件完整性\") from e\n",
    "\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 1,
   "id": "7563a51b",
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "<>:32: SyntaxWarning: invalid escape sequence '\\c'\n",
      "<>:32: SyntaxWarning: invalid escape sequence '\\c'\n",
      "C:\\Users\\admin\\AppData\\Local\\Temp\\ipykernel_27404\\159109784.py:32: SyntaxWarning: invalid escape sequence '\\c'\n",
      "  os.path.join(current_dir, \"E:\\codes\\project\\datas\\VeRi-776\")\n"
     ]
    }
   ],
   "source": [
    "# 新建train.py验证模型保存功能\n",
    "import torch\n",
    "import os\n",
    "import time\n",
    "from PIL import Image\n",
    "from tqdm import tqdm\n",
    "from torch.utils.data import Dataset\n",
    "from torchvision import transforms\n",
    "import torch.nn as nn\n",
    "import torch.nn.functional as F\n",
    "from torchvision.models import vit_b_16\n",
    "from einops import rearrange, repeat\n",
    "from collections import defaultdict\n",
    "from torch.utils.data import DataLoader  # 新增导入\n",
    "import numpy as np\n",
    "from pathlib import Path\n",
    "import matplotlib.pyplot as plt\n",
    "from collections import Counter\n",
    "from sklearn.metrics import average_precision_score  # 新增导入\n",
    "\n",
    "class Veri776Dataset(Dataset):\n",
    "    def __init__(self, root_dir, mode=\"train\", transform=None):\n",
    "        start_time = time.time()\n",
    "        super().__init__()\n",
    "\n",
    "\n",
    "\n",
    "           # ========== 动态生成数据集路径 ==========\n",
    "       \n",
    "        current_dir = os.path.dirname(os.path.abspath(__file__))\n",
    "        self.root_dir = os.path.abspath(\n",
    "            os.path.join(current_dir, \"E:\\codes\\project\\datas\\VeRi-776\")\n",
    "        )\n",
    "        \n",
    "        # ========== 验证路径有效性 ==========\n",
    "        if not os.path.exists(self.root_dir):\n",
    "            raise FileNotFoundError(f\"数据集路径不存在: {self.root_dir}\")\n",
    "\n",
    "\n",
    "\n",
    "\n",
    "        self.mode = mode\n",
    "        self.transform = transform\n",
    "        # 初始化存储容器\n",
    "        self.image_paths = []\n",
    "        self.vehicle_ids = []\n",
    "        self.camera_ids = []\n",
    "        \n",
    "        \n",
    "        # 模式验证\n",
    "        valid_modes = [\"train\", \"test\", \"query\"]\n",
    "        if mode not in valid_modes:\n",
    "            raise ValueError(f\"Invalid mode {mode}, expected {valid_modes}\")\n",
    "        \n",
    "        # 文件映射\n",
    "        name_file_map = {\n",
    "            \"train\": \"name_train.txt\",\n",
    "            \"test\": \"name_test.txt\", \n",
    "            \"query\": \"name_query.txt\"\n",
    "        }\n",
    "        image_dir_map = {\n",
    "            \"train\": \"image_train\",\n",
    "            \"test\": \"image_test\",\n",
    "            \"query\": \"image_query\"\n",
    "        }\n",
    "        \n",
    "        # 读取文件名列表\n",
    "        name_file = os.path.join(root_dir, name_file_map[mode])\n",
    "        if not os.path.exists(name_file):\n",
    "            raise FileNotFoundError(f\"{name_file} 不存在!\")\n",
    "        with open(name_file, \"r\") as f:\n",
    "            file_names = [line.strip() for line in f.readlines()]\n",
    "        print(f\"共加载 {len(file_names)} 个文件名，耗时 {time.time()-start_time:.2f}s\")\n",
    "        \n",
    "        # 读取摄像头ID（批量处理）\n",
    "        camera_file = os.path.join(root_dir, \"camera_ID.txt\")\n",
    "        camera_id_dict = {}\n",
    "        if os.path.exists(camera_file):\n",
    "            with open(camera_file, \"r\") as f:\n",
    "                lines = f.readlines()\n",
    "                camera_id_dict = dict(line.strip().split()[:2] for line in lines if len(line.strip().split()) >= 2)\n",
    "        \n",
    "        # 构建数据列表（带进度条）\n",
    "        image_dir = os.path.join(root_dir, image_dir_map[mode])\n",
    "        for file_name in tqdm(file_names, desc=f\"处理 {mode} 数据\"):\n",
    "            # 解析车辆ID\n",
    "            try:\n",
    "                vehicle_id = int(file_name.split(\"_\")[0])\n",
    "            except:\n",
    "                print(f\"无法解析文件名: {file_name}\")\n",
    "                continue\n",
    "            \n",
    "            # 检查文件存在性\n",
    "            img_path = os.path.join(image_dir, file_name)\n",
    "            if not os.path.exists(img_path):\n",
    "                print(f\"文件缺失: {img_path}\")\n",
    "                continue\n",
    "                \n",
    "            self.image_paths.append(img_path)\n",
    "            self.vehicle_ids.append(vehicle_id)\n",
    "            self.camera_ids.append(camera_id_dict.get(file_name, 0))\n",
    "        \n",
    "\n",
    "\n",
    "\n",
    "        \n",
    "\n",
    "        # ID映射\n",
    "        self.unique_ids = sorted(set(self.vehicle_ids))\n",
    "        self.id_to_class = {v:k for k,v in enumerate(self.unique_ids)}\n",
    "        self.class_ids = [self.id_to_class[v] for v in self.vehicle_ids]\n",
    "        print(f\"数据集初始化完成，总耗时 {time.time()-start_time:.2f}s\")\n",
    "\n",
    "        print(f\"动态生成的数据集路径: {self.root_dir}\")\n",
    "\n",
    "\n",
    "        \n",
    "\n",
    "\n",
    "\n",
    "    def __len__(self):\n",
    "        return len(self.image_paths)\n",
    "    \n",
    "    def __getitem__(self, idx):\n",
    "        img = Image.open(self.image_paths[idx]).convert(\"RGB\")\n",
    "        if self.transform:\n",
    "            img = self.transform(img)\n",
    "        return {\n",
    "            \"image\": img,\n",
    "            \"vehicle_id\": self.vehicle_ids[idx],\n",
    "            \"class_id\": self.class_ids[idx],\n",
    "            \"camera_id\": self.camera_ids[idx]\n",
    "        }\n",
    "\n",
    "\n",
    "class VehicleTransformer(nn.Module):\n",
    "    def __init__(self, \n",
    "                 num_classes=776,\n",
    "                 img_size=(256, 128),\n",
    "                 patch_size=16,\n",
    "                 embed_dim=768,\n",
    "                 depth=12,\n",
    "                 num_heads=12,\n",
    "                 mlp_ratio=4.,\n",
    "                 local_parts=8,\n",
    "                 pretrained=True):\n",
    "        super().__init__()\n",
    "\n",
    "        # 添加部件分类器\n",
    "        self.part_classifiers = nn.ModuleList([\n",
    "            nn.Linear(embed_dim, num_classes) for _ in range(local_parts)\n",
    "        ])\n",
    "        \n",
    "        # 参数校验\n",
    "        assert img_size[0] % patch_size == 0, \"高度必须能被分块尺寸整除\"\n",
    "        assert img_size[1] % patch_size == 0, \"宽度必须能被分块尺寸整除\"\n",
    "        h_patches = img_size[0] // patch_size\n",
    "        w_patches = img_size[1] // patch_size\n",
    "        assert h_patches % local_parts == 0, \"local_parts必须能整除高度方向分块数\"\n",
    "\n",
    "        # ----------------- 基础参数 -----------------\n",
    "        self.embed_dim = embed_dim #新增\n",
    "        self.img_size = img_size\n",
    "        self.patch_size = patch_size\n",
    "        self.local_parts = local_parts  # 水平分块数量\n",
    "        self.part_height = h_patches // local_parts  # 分块高度（patch数）\n",
    "        self.part_width = w_patches  # 分块宽度\n",
    "\n",
    "        # 部件分类器应在参数初始化之后定义\n",
    "        self.part_classifiers = nn.ModuleList([\n",
    "            nn.Linear(embed_dim, num_classes) for _ in range(local_parts)\n",
    "        ])\n",
    "        \n",
    "        # ----------------- 主干网络 -----------------\n",
    "        self.vit = vit_b_16(pretrained=pretrained)\n",
    "        self.vit.conv_proj = nn.Conv2d(\n",
    "            3, embed_dim, \n",
    "            kernel_size=patch_size, \n",
    "            stride=patch_size, \n",
    "            bias=False\n",
    "        )\n",
    "        \n",
    "         # ----------------- 位置编码适配 -----------------\n",
    "        num_patches = h_patches * w_patches\n",
    "        self.pos_embed = nn.Parameter(\n",
    "            torch.zeros(1, num_patches + 1, embed_dim)\n",
    "        )\n",
    "        if pretrained:\n",
    "            # 插值预训练位置编码\n",
    "            orig_pos_embed = self.vit.encoder.pos_embedding\n",
    "            orig_num_patches = orig_pos_embed.shape[1] - 1\n",
    "            \n",
    "            # 线性插值\n",
    "            new_pos_embed = F.interpolate(\n",
    "                orig_pos_embed[:, 1:].permute(0, 2, 1),\n",
    "                size=num_patches,\n",
    "                mode='linear'\n",
    "            ).permute(0, 2, 1)\n",
    "            \n",
    "            # 拼接cls token\n",
    "            self.pos_embed.data = torch.cat([\n",
    "                orig_pos_embed[:, :1],\n",
    "                new_pos_embed\n",
    "            ], dim=1)\n",
    "        else:\n",
    "            nn.init.trunc_normal_(self.pos_embed, std=0.02)\n",
    "\n",
    "\n",
    "        # ----------------- 局部特征模块 -----------------\n",
    "        self.local_attention = nn.ModuleList([\n",
    "            LocalFeatureModule(\n",
    "                embed_dim=embed_dim,\n",
    "                num_heads=num_heads//2,\n",
    "                mlp_ratio=mlp_ratio,\n",
    "                part_height=self.part_height,\n",
    "                part_width=self.part_width\n",
    "            ) for _ in range(local_parts)\n",
    "        ])\n",
    "             \n",
    "        \n",
    "        # ----------------- 特征融合模块 -----------------\n",
    "        self.fusion = FeatureFusion(\n",
    "            embed_dim=embed_dim,\n",
    "            num_parts=local_parts+1  # 全局+局部\n",
    "        )\n",
    "        \n",
    "       # 修正后的输出层定义\n",
    "        self.head = nn.Sequential(\n",
    "            nn.LayerNorm(embed_dim * (local_parts + 1) * 2),  # 恢复LayerNorm\n",
    "            nn.Linear(embed_dim * (local_parts + 1) * 2, num_classes)\n",
    "        )\n",
    "\n",
    "         # 调整BN层位置\n",
    "        self.bn_neck = nn.BatchNorm1d(embed_dim * (local_parts+1) * 2)\n",
    "        self.head = nn.Sequential(\n",
    "            nn.Linear(embed_dim * (local_parts + 1) * 2, num_classes)\n",
    "        )  #强化度量学习，特征归一化与BNNeck\n",
    "\n",
    "        \n",
    "    def forward_features(self, x):\n",
    "        # 全局特征提取\n",
    "        B = x.shape[0]\n",
    "        x = self.vit.conv_proj(x)\n",
    "        x = x.flatten(2).transpose(1, 2)\n",
    "        cls_token = self.vit.class_token.expand(B, -1, -1)\n",
    "        x = torch.cat((cls_token, x), dim=1)\n",
    "        x = x + self.pos_embed #使用适配后的位置编码\n",
    "        x = self.vit.encoder.dropout(x)\n",
    "        \n",
    "        # 通过Transformer Encoder\n",
    "        global_features = []\n",
    "        for blk in self.vit.encoder.layers:\n",
    "            x = blk(x)\n",
    "            global_features.append(x[:, 0])  # 收集各层CLS token\n",
    "        \n",
    "        global_feature = torch.stack(global_features, dim=1).mean(dim=1)\n",
    "        \n",
    "        # 局部特征提取\n",
    "        local_features = []\n",
    "        x_patches = x[:, 1:].reshape(B, -1, self.part_height, self.part_width, self.embed_dim)\n",
    "        for i in range(self.local_parts):\n",
    "            part_feature = x_patches[:, i].reshape(B, -1, self.embed_dim)\n",
    "            part_feature = self.local_attention[i](part_feature)\n",
    "            local_features.append(part_feature)\n",
    "        \n",
    "        return global_feature, local_features\n",
    "    \n",
    "       \n",
    "    \n",
    "\n",
    "    \n",
    "    def forward(self, x):\n",
    "        global_feat, local_feats = self.forward_features(x)\n",
    "        \n",
    "        # 特征融合\n",
    "        fused_feature = self.fusion(global_feat, local_feats)\n",
    "        \n",
    "        # 分类头\n",
    "        logits = self.head(fused_feature)\n",
    "\n",
    "        # 返回部件级分类结果\n",
    "        part_logits = [cls(feat) for cls, feat in zip(self.part_classifiers, local_feats)]\n",
    "        \n",
    "        #强化度量学习，特征归一化与BNNeck   修正特征处理流程\n",
    "        fused = self.fusion(global_feat, local_feats)\n",
    "        fused_bn = self.bn_neck(fused)\n",
    "        logits = self.head(fused_bn)\n",
    "\n",
    "\n",
    "        return {\n",
    "            'global': global_feat,\n",
    "            'local': local_feats,\n",
    "            'fused': fused_feature,\n",
    "            'logits': logits,\n",
    "            'part_logits': part_logits,\n",
    "            'bn_feature': fused_bn\n",
    "        }\n",
    "\n",
    "\n",
    "class LocalFeatureModule(nn.Module):\n",
    "    \"\"\"局部特征增强模块\"\"\"\n",
    "    def __init__(self, embed_dim, num_heads, mlp_ratio, \n",
    "                 part_height,  # 接收分块高度参数\n",
    "                 part_width): # 接收分块宽度参数\n",
    "        super().__init__()\n",
    "        self.attention = nn.MultiheadAttention(embed_dim, num_heads)\n",
    "        self.norm1 = nn.LayerNorm(embed_dim)\n",
    "        self.mlp = nn.Sequential(\n",
    "            nn.Linear(embed_dim, int(embed_dim * mlp_ratio)),\n",
    "            nn.GELU(),\n",
    "            nn.Linear(int(embed_dim * mlp_ratio), embed_dim)\n",
    "        )\n",
    "        self.norm2 = nn.LayerNorm(embed_dim)\n",
    "\n",
    "        # 新增通道注意力\n",
    "        self.channel_att = nn.Sequential(\n",
    "            nn.Linear(embed_dim, embed_dim//4),\n",
    "            nn.ReLU(),\n",
    "            nn.Linear(embed_dim//4, embed_dim),\n",
    "            nn.Sigmoid()\n",
    "        )\n",
    "        \n",
    "       \n",
    "        \n",
    "         # 根据分块尺寸生成位置编码\n",
    "        self.pos_embed = nn.Parameter(\n",
    "            torch.randn(1, part_height * part_width, embed_dim)\n",
    "        )\n",
    "\n",
    "    def forward(self, x):\n",
    "        B, N, D = x.shape\n",
    "        pos_embed = self.pos_embed[:, :N]  # 动态适配序列长度\n",
    "        x = x + pos_embed\n",
    "        attn_out, _ = self.attention(x, x, x)\n",
    "        x = self.norm1(x + attn_out)\n",
    "        x = self.norm2(x + self.mlp(x))\n",
    "\n",
    "        # 在自注意力后添加\n",
    "        channel_weights = self.channel_att(x.mean(dim=1, keepdim=True))\n",
    "        x = x * channel_weights\n",
    "\n",
    "\n",
    "\n",
    "        return x.mean(dim=1)\n",
    "\n",
    "\n",
    "\n",
    "\n",
    "\n",
    "class FeatureFusion(nn.Module):\n",
    "\n",
    "\n",
    "    \"\"\"多特征融合模块\"\"\"\n",
    "    def __init__(self, embed_dim, num_parts):\n",
    "        super().__init__()\n",
    "        self.embed_dim = embed_dim\n",
    "        self.num_parts = num_parts\n",
    "\n",
    "        # 关键修复：添加batch_first参数\n",
    "        self.attention = nn.MultiheadAttention(\n",
    "            embed_dim, \n",
    "            num_heads=4,\n",
    "            batch_first=True  # 允许(Batch, Seq, Embed)输入格式\n",
    "        )\n",
    "\n",
    "\n",
    "        # 修正归一化层维度\n",
    "        self.norm = nn.LayerNorm(embed_dim * num_parts * 2)  # 维度翻倍\n",
    "        \n",
    "        # 可学习的融合权重\n",
    "        self.weights = nn.Parameter(torch.ones(num_parts))\n",
    "        \n",
    "    def forward(self, global_feat, local_feats):\n",
    "        \n",
    "        # 拼接所有特征 [B, P, D]\n",
    "        all_feats = torch.stack(\n",
    "            [global_feat] + local_feats, \n",
    "            dim=1\n",
    "        )\n",
    "        \n",
    "        # 自适应权重融合\n",
    "        weights = torch.softmax(self.weights, dim=0)\n",
    "        weighted_feats = all_feats * weights.view(1, -1, 1)\n",
    "\n",
    "        # 维度验证\n",
    "        assert weighted_feats.dim() == 3, f\"期望3D输入，实际维度{weighted_feats.dim()}\"# 确保输入为3D张量\n",
    "        \n",
    "        # 注意力交互 [B, P, D]\n",
    "        interacted, _ = self.attention(\n",
    "            weighted_feats, weighted_feats, weighted_feats\n",
    "        )\n",
    "        \n",
    "        # 拼接特征 [B, P*D*2]\n",
    "        fused = torch.cat([\n",
    "            weighted_feats.reshape(weighted_feats.size(0), -1),\n",
    "            interacted.reshape(interacted.size(0), -1)\n",
    "        ], dim=1)\n",
    "        \n",
    "         # 添加维度验证\n",
    "        expected_dim = self.embed_dim * self.num_parts * 2\n",
    "        assert fused.shape[1] == expected_dim, \\\n",
    "            f\"特征维度错误！期望 {expected_dim}, 实际 {fused.shape[1]}\"\n",
    "\n",
    "        return self.norm(fused)\n",
    "\n",
    "\n",
    "\n",
    "# 核心功能代码框架\n",
    "def main():\n",
    "    # 数据\n",
    "    train_set = Veri776Dataset(mode=\"train\")\n",
    "    train_loader = DataLoader(train_set, batch_size=64, shuffle=True)\n",
    "    \n",
    "    # 模型\n",
    "    model = VehicleTransformer().cuda()\n",
    "    \n",
    "    # 优化器\n",
    "    optimizer = torch.optim.Adam(model.parameters(), lr=1e-4)\n",
    "    \n",
    "    # 训练循环\n",
    "    for epoch in range(100):\n",
    "        model.train()\n",
    "        for batch in train_loader:\n",
    "            images = batch[\"image\"].cuda()\n",
    "            targets = batch[\"class_id\"].cuda()\n",
    "            \n",
    "            outputs = model(images)\n",
    "            loss = F.cross_entropy(outputs[\"logits\"], targets)\n",
    "            \n",
    "            optimizer.zero_grad()\n",
    "            loss.backward()\n",
    "            optimizer.step()\n",
    "        \n",
    "        # 简单保存\n",
    "        torch.save(model.state_dict(), \"best_model.pth\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 1,
   "id": "489a9432",
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "e:\\codes\\project\\src\\datasets\\veri776_dataset.py:11: SyntaxWarning: invalid escape sequence '\\c'\n",
      "  def verify_dataset(root=\"E:\\codes\\project\\datas\\VeRi-776\"):\n",
      "e:\\codes\\project\\src\\datasets\\veri776_dataset.py:46: SyntaxWarning: invalid escape sequence '\\c'\n",
      "  os.path.join(current_dir, \"E:\\codes\\project\\datas\\VeRi-776\")\n",
      "e:\\codes\\project\\src\\datasets\\veri776_dataset.py:167: SyntaxWarning: invalid escape sequence '\\c'\n",
      "  root_dir=\"E:\\codes\\project\\datas\\VeRi-776\",\n",
      "e:\\codes\\project\\src\\datasets\\veri776_dataset.py:11: SyntaxWarning: invalid escape sequence '\\c'\n",
      "  def verify_dataset(root=\"E:\\codes\\project\\datas\\VeRi-776\"):\n",
      "e:\\codes\\project\\src\\datasets\\veri776_dataset.py:46: SyntaxWarning: invalid escape sequence '\\c'\n",
      "  os.path.join(current_dir, \"E:\\codes\\project\\datas\\VeRi-776\")\n",
      "e:\\codes\\project\\src\\datasets\\veri776_dataset.py:167: SyntaxWarning: invalid escape sequence '\\c'\n",
      "  root_dir=\"E:\\codes\\project\\datas\\VeRi-776\",\n"
     ]
    },
    {
     "ename": "NameError",
     "evalue": "name 'dataset' is not defined",
     "output_type": "error",
     "traceback": [
      "\u001b[31m---------------------------------------------------------------------------\u001b[39m",
      "\u001b[31mNameError\u001b[39m                                 Traceback (most recent call last)",
      "\u001b[36mCell\u001b[39m\u001b[36m \u001b[39m\u001b[32mIn[1]\u001b[39m\u001b[32m, line 25\u001b[39m\n\u001b[32m     23\u001b[39m \u001b[38;5;28;01mfrom\u001b[39;00m\u001b[38;5;250m \u001b[39m\u001b[34;01msklearn\u001b[39;00m\u001b[34;01m.\u001b[39;00m\u001b[34;01mmetrics\u001b[39;00m\u001b[38;5;250m \u001b[39m\u001b[38;5;28;01mimport\u001b[39;00m average_precision_score  \u001b[38;5;66;03m# 新增导入\u001b[39;00m\n\u001b[32m     24\u001b[39m \u001b[38;5;28;01mfrom\u001b[39;00m\u001b[38;5;250m \u001b[39m\u001b[34;01mtorch\u001b[39;00m\u001b[34;01m.\u001b[39;00m\u001b[34;01moptim\u001b[39;00m\u001b[34;01m.\u001b[39;00m\u001b[34;01mlr_scheduler\u001b[39;00m\u001b[38;5;250m \u001b[39m\u001b[38;5;28;01mimport\u001b[39;00m CosineAnnealingLR\n\u001b[32m---> \u001b[39m\u001b[32m25\u001b[39m \u001b[38;5;28;01mfrom\u001b[39;00m\u001b[38;5;250m \u001b[39m\u001b[34;01msrc\u001b[39;00m\u001b[34;01m.\u001b[39;00m\u001b[34;01mdatasets\u001b[39;00m\u001b[34;01m.\u001b[39;00m\u001b[34;01mveri776_dataset\u001b[39;00m\u001b[38;5;250m \u001b[39m\u001b[38;5;28;01mimport\u001b[39;00m Veri776Dataset\n\u001b[32m     26\u001b[39m \u001b[38;5;28;01mfrom\u001b[39;00m\u001b[38;5;250m \u001b[39m\u001b[34;01msrc\u001b[39;00m\u001b[34;01m.\u001b[39;00m\u001b[34;01mmodels\u001b[39;00m\u001b[34;01m.\u001b[39;00m\u001b[34;01mvehicle_transformer\u001b[39;00m\u001b[38;5;250m \u001b[39m\u001b[38;5;28;01mimport\u001b[39;00m VehicleTransformer\n\u001b[32m     28\u001b[39m \u001b[38;5;66;03m# 在导入语句后添加\u001b[39;00m\n",
      "\u001b[36mFile \u001b[39m\u001b[32me:\\codes\\project\\src\\datasets\\veri776_dataset.py:188\u001b[39m\n\u001b[32m    186\u001b[39m \u001b[38;5;66;03m# 统计摄像头分布\u001b[39;00m\n\u001b[32m    187\u001b[39m \u001b[38;5;28;01mfrom\u001b[39;00m\u001b[38;5;250m \u001b[39m\u001b[34;01mcollections\u001b[39;00m\u001b[38;5;250m \u001b[39m\u001b[38;5;28;01mimport\u001b[39;00m Counter\n\u001b[32m--> \u001b[39m\u001b[32m188\u001b[39m camera_counts = Counter(dataset.camera_ids)\n\u001b[32m    189\u001b[39m \u001b[38;5;28mprint\u001b[39m(\u001b[33m\"\u001b[39m\u001b[33m摄像头分布:\u001b[39m\u001b[33m\"\u001b[39m, camera_counts.most_common())\n\u001b[32m    191\u001b[39m \u001b[38;5;66;03m# 验证ID是否连续\u001b[39;00m\n",
      "\u001b[31mNameError\u001b[39m: name 'dataset' is not defined"
     ]
    }
   ],
   "source": [
    "# 新建train.py验证模型保存功能\n",
    "import torch\n",
    "import sys\n",
    "import os\n",
    "from pathlib import Path\n",
    "import time\n",
    "from PIL import Image\n",
    "import argparse\n",
    "from tqdm import tqdm\n",
    "from torch.utils.data import Dataset\n",
    "from torchvision import transforms\n",
    "import torch.nn as nn\n",
    "import torch.nn.functional as F\n",
    "from torchvision.models import vit_b_16\n",
    "from einops import rearrange, repeat\n",
    "from collections import defaultdict\n",
    "from torch.utils.data import DataLoader  # 新增导入\n",
    "from torch.optim import AdamW\n",
    "import numpy as np\n",
    "from pathlib import Path\n",
    "import matplotlib.pyplot as plt\n",
    "from collections import Counter\n",
    "from sklearn.metrics import average_precision_score  # 新增导入\n",
    "from torch.optim.lr_scheduler import CosineAnnealingLR\n",
    "from src.datasets.veri776_dataset import Veri776Dataset\n",
    "from src.models.vehicle_transformer import VehicleTransformer\n",
    "\n",
    "# 在导入语句后添加\n",
    "try:\n",
    "    from src.datasets.veri776_dataset import Veri776Dataset\n",
    "    print(\"=== 成功导入数据集模块 ===\")\n",
    "except ImportError as e:\n",
    "    print(f\"=== 导入失败: {str(e)} ===\")\n",
    "    raise\n",
    "\n",
    "\n",
    "# 获取当前文件的绝对路径\n",
    "current_file = Path(__file__).resolve()\n",
    "# 计算项目根目录：上溯两级（假设train.py在scripts/目录下）\n",
    "project_root = current_file.parent.parent\n",
    "# 将项目根目录添加到系统路径\n",
    "sys.path.insert(0, str(project_root))\n",
    "# 打印验证路径\n",
    "print(f\"\\n=== 当前工作目录: {os.getcwd()}\")\n",
    "print(f\"=== 项目根目录: {project_root}\")\n",
    "print(f\"=== 系统路径: {sys.path[:3]}\\n\")\n",
    "\n",
    "\n",
    "# 修改损失计算逻辑（结合全局和部件分类损失）\n",
    "def calculate_loss(outputs, targets):\n",
    "    main_loss = F.cross_entropy(outputs[\"logits\"], targets)\n",
    "    part_loss = torch.stack([F.cross_entropy(p, targets) \n",
    "                           for p in outputs[\"part_logits\"]]).mean()\n",
    "    return main_loss + 0.5 * part_loss\n",
    "\n",
    "\n",
    "\n",
    "# 添加验证循环（防止过拟合）\n",
    "def validate(model, val_loader, device):\n",
    "    model.eval()\n",
    "    total_correct = 0\n",
    "    with torch.no_grad():\n",
    "        for batch in val_loader:\n",
    "            images = batch[\"image\"].to(device)\n",
    "            targets = batch[\"class_id\"].to(device)\n",
    "            outputs = model(images)\n",
    "            preds = outputs[\"logits\"].argmax(dim=1)\n",
    "            total_correct += (preds == targets).sum().item()\n",
    "    return total_correct / len(val_loader.dataset)\n",
    "\n",
    "\n",
    "# 配置参数\n",
    "def get_args():\n",
    "    parser = argparse.ArgumentParser()\n",
    "    parser.add_argument(\"--batch_size\", type=int, default=64)\n",
    "    parser.add_argument(\"--lr\", type=float, default=3e-4)\n",
    "    parser.add_argument(\"--epochs\", type=int, default=100)\n",
    "    parser.add_argument(\"--save_dir\", type=str, default=\"checkpoints\")\n",
    "    return parser.parse_args()\n",
    "\n",
    "\n",
    "\n",
    "\n",
    "# 训练函数\n",
    "def train(args):\n",
    "    device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n",
    "    \n",
    "    # 数据增强\n",
    "    train_transform = transforms.Compose([\n",
    "        transforms.Resize((256, 128)),\n",
    "        transforms.RandomHorizontalFlip(),\n",
    "        transforms.ToTensor(),\n",
    "        transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])\n",
    "    ])\n",
    "    \n",
    "    # 数据集\n",
    "    train_set = Veri776Dataset(mode=\"train\", transform=train_transform)\n",
    "    val_set = Veri776Dataset(mode=\"test\", transform=train_transform)  # 验证集使用相同变换\n",
    "    \n",
    "    train_loader = DataLoader(train_set, batch_size=args.batch_size, \n",
    "                            shuffle=True, num_workers=4, pin_memory=True)\n",
    "    val_loader = DataLoader(val_set, batch_size=args.batch_size,\n",
    "                          num_workers=4, pin_memory=True)\n",
    "    \n",
    "    # 模型\n",
    "    model = VehicleTransformer(num_classes=776).to(device)\n",
    "    \n",
    "    # 优化器\n",
    "    optimizer = AdamW(model.parameters(), lr=args.lr, weight_decay=1e-4)\n",
    "    scheduler = CosineAnnealingLR(optimizer, T_max=args.epochs)\n",
    "    \n",
    "    best_acc = 0.0\n",
    "    for epoch in range(args.epochs):\n",
    "        model.train()\n",
    "        train_bar = tqdm(train_loader, desc=f\"Epoch {epoch+1}/{args.epochs}\")\n",
    "        for batch in train_bar:\n",
    "            images = batch[\"image\"].to(device)\n",
    "            targets = batch[\"class_id\"].to(device)\n",
    "            \n",
    "            optimizer.zero_grad()\n",
    "            outputs = model(images)\n",
    "            loss = calculate_loss(outputs, targets)\n",
    "            loss.backward()\n",
    "            torch.nn.utils.clip_grad_norm_(model.parameters(), 1.0)\n",
    "            optimizer.step()\n",
    "            \n",
    "            train_bar.set_postfix(loss=f\"{loss.item():.4f}\")\n",
    "        \n",
    "        val_acc = validate(model, val_loader, device)\n",
    "        print(f\"Val Acc: {val_acc:.2%}\")\n",
    "        \n",
    "        if val_acc > best_acc:\n",
    "            best_acc = val_acc\n",
    "            torch.save(model.state_dict(), \n",
    "                      os.path.join(args.save_dir, \"best_model.pth\"))\n",
    "        \n",
    "        scheduler.step()\n",
    "\n",
    "if __name__ == \"__main__\":\n",
    "    args = get_args()\n",
    "    os.makedirs(args.save_dir, exist_ok=True)\n",
    "    train(args)\n",
    "\n",
    "  "
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "pytorch2.0",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.12.9"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 5
}
