{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "f0307420",
   "metadata": {},
   "outputs": [],
   "source": [
    "import sys\n",
    "import os\n",
    "from pathlib import Path\n",
    "\n",
    "\n",
    "# 获取当前文件的绝对路径\n",
    "current_file = Path(__file__).resolve()\n",
    "# 计算项目根目录：上溯两级（假设train.py在scripts/目录下）\n",
    "project_root = current_file.parent.parent\n",
    "# 将项目根目录添加到系统路径\n",
    "sys.path.insert(0, str(project_root))\n",
    "\n",
    "# 打印验证路径\n",
    "#print(f\"\\n=== 当前工作目录: {os.getcwd()}\")\n",
    "#print(f\"=== 项目根目录: {project_root}\")\n",
    "#print(f\"=== 系统路径: {sys.path[:3]}\\n\")\n",
    "\n",
    "\n",
    "\n",
    "import torch\n",
    "import time\n",
    "from PIL import Image\n",
    "import argparse\n",
    "from tqdm import tqdm\n",
    "from torch.utils.data import Dataset\n",
    "from torchvision import transforms\n",
    "import torch.nn as nn\n",
    "import torch.nn.functional as F\n",
    "from torchvision.models import vit_b_16\n",
    "from einops import rearrange, repeat\n",
    "from collections import defaultdict\n",
    "from torch.utils.data import DataLoader  # 新增导入\n",
    "from torch.optim import AdamW\n",
    "import numpy as np\n",
    "from pathlib import Path\n",
    "import matplotlib.pyplot as plt\n",
    "from collections import Counter\n",
    "from sklearn.metrics import average_precision_score  # 新增导入\n",
    "from torch.optim.lr_scheduler import CosineAnnealingLR\n",
    "from datasets.veri776_dataset import Veri776Dataset\n",
    "\n",
    "\n",
    "#定义模型的核心参数（如分块尺寸、嵌入维度、Transformer 层数等）和网络结构。\n",
    "class VehicleTransformer(nn.Module):   #核心功能：定义基于Transformer的车辆特征提取网络\n",
    "    def __init__(self, \n",
    "                 num_classes=776,\n",
    "                 img_size=(256, 256),\n",
    "                 patch_size=16,\n",
    "                 embed_dim=128, \n",
    "                 depth=4,\n",
    "                 num_heads=4,\n",
    "                 mlp_ratio=4,\n",
    "                 local_parts=4,\n",
    "                 pretrained=False):\n",
    "        super().__init__()\n",
    "\n",
    "        # 参数校验必须最先执行\n",
    "        # ========== 新增：参数校验前置 ==========\n",
    "\n",
    "        assert embed_dim % num_heads == 0, \\\n",
    "        f\"embed_dim必须能被num_heads整除 ({embed_dim}%{num_heads} != 0)\"\n",
    "\n",
    "        assert img_size[0] % patch_size == 0, \"高度必须能被分块尺寸整除\"\n",
    "        assert img_size[1] % patch_size == 0, \"宽度必须能被分块尺寸整除\"\n",
    "\n",
    "        #计算分块网络\n",
    "        self.grid_size = (img_size[0] // patch_size, \n",
    "                 img_size[1] // patch_size)\n",
    "        \n",
    "        self.num_patches = self.grid_size[0] * self.grid_size[1]  # 直接保存分块数\n",
    "\n",
    "        #校验local_parts是否合理\n",
    "        assert self.grid_size[0] % local_parts == 0, \\\n",
    "        f\"local_parts必须能整除高度方向分块数 ({self.grid_size[0]}%{local_parts} != 0)\"\n",
    "\n",
    "\n",
    "       \n",
    "        self.img_size = img_size\n",
    "        self.patch_size = patch_size\n",
    "        self.embed_dim = embed_dim #新增\n",
    "         # === 新增关键参数保存 ===\n",
    "        self.depth = depth  # 保存depth参数\n",
    "        self.num_heads = num_heads  # 保存num_heads参数\n",
    "        self.mlp_ratio = mlp_ratio  # 保存mlp_ratio参数\n",
    "        self.local_parts = local_parts\n",
    "        self.part_height = self.grid_size[0] // local_parts\n",
    "        self.part_width = self.grid_size[1]\n",
    "        self.grad_check = nn.Identity()  # 新增\n",
    "\n",
    "\n",
    "        # 打印分块信息\n",
    "        print(f\"分块验证：{self.grid_size[0]}x{self.grid_size[1]}={self.num_patches}分块\")\n",
    "       \n",
    "        \n",
    "\n",
    "        # 添加部件分类器\n",
    "        self.part_classifiers = nn.ModuleList([\n",
    "            nn.Linear(embed_dim, num_classes) for _ in range(local_parts)\n",
    "        ])\n",
    "        \n",
    "        \n",
    "\n",
    "        # ----------------- 基础参数 -----------------\n",
    "        self.class_token = nn.Parameter(torch.zeros(1, 1, embed_dim))  # 新增自定义class_token\n",
    "\n",
    "        \n",
    "        # --- 统一使用自定义结构 ---\n",
    "        # 保留自定义的patch_embed\n",
    "        self.patch_embed = nn.Conv2d(\n",
    "            3, embed_dim, \n",
    "            kernel_size=patch_size, \n",
    "            stride=patch_size\n",
    "        )         # 图像分块嵌入\n",
    "        \n",
    "\n",
    "        # --- 使用统一的Transformer Encoder ---\n",
    "        encoder_layer = nn.TransformerEncoderLayer(\n",
    "            d_model=embed_dim,  # 必须等于embed_dim参数值\n",
    "             nhead=num_heads,  # 新增此行\n",
    "            dim_feedforward=int(embed_dim * mlp_ratio),\n",
    "            activation=\"gelu\",\n",
    "            batch_first=True\n",
    "        )\n",
    "\n",
    "          # 修改位置：在初始化函数中添加残差连接\n",
    "        self.encoder = nn.TransformerEncoder(\n",
    "            encoder_layer,\n",
    "            num_layers=depth,\n",
    "            norm=nn.LayerNorm(embed_dim)  # 新增层归一化 ← 这里修改\n",
    "        )      # Transformer编码器\n",
    "\n",
    "        # 修改位置：添加跳跃连接\n",
    "        self.skip_conn = nn.ModuleList([\n",
    "            nn.Sequential(\n",
    "                nn.Linear(embed_dim, embed_dim*2),\n",
    "                nn.GELU(),\n",
    "                nn.Linear(embed_dim*2, embed_dim)\n",
    "            ) for _ in range(depth)  # 每层一个跳跃连接\n",
    "        ])\n",
    "\n",
    "\n",
    "        \n",
    "\n",
    "        # 部件分类器应在参数初始化之后定义\n",
    "        self.part_classifiers = nn.ModuleList([\n",
    "            nn.Linear(embed_dim, num_classes) for _ in range(local_parts)\n",
    "        ])\n",
    "\n",
    "        \n",
    "        \n",
    "        \n",
    "        \n",
    "         # ----------------- 位置编码适配 -----------------\n",
    "        \n",
    "        \n",
    "\n",
    "        self.pos_embed = nn.Parameter(torch.zeros(1, self.num_patches + 1, embed_dim)* 0.02)\n",
    "\n",
    "        if pretrained:\n",
    "    # 禁用预训练位置编码加载\n",
    "            print(\"警告：自定义位置编码无法加载预训练权重\")\n",
    "            nn.init.trunc_normal_(self.pos_embed, std=0.02)\n",
    "        else:\n",
    "            nn.init.trunc_normal_(self.pos_embed, std=0.02)\n",
    "\n",
    "\n",
    "        # ----------------- 局部特征增强模块 -----------------\n",
    "        self.local_attention = nn.ModuleList([\n",
    "            LocalFeatureModule(\n",
    "                embed_dim=embed_dim,\n",
    "                num_heads=num_heads//2,\n",
    "                mlp_ratio=mlp_ratio,\n",
    "                part_height=self.part_height,\n",
    "                part_width=self.part_width\n",
    "            ) for _ in range(local_parts)\n",
    "        ])          \n",
    "             \n",
    "        \n",
    "        # ----------------- 特征融合模块 -----------------\n",
    "        self.fusion = FeatureFusion(\n",
    "            embed_dim=embed_dim,\n",
    "            num_parts=local_parts+1  # 全局+局部\n",
    "        )\n",
    "        \n",
    "      \n",
    "\n",
    "         # 调整BN层位置\n",
    "        self.bn_neck = nn.BatchNorm1d(embed_dim * (local_parts+1) * 2)\n",
    "        self.head = nn.Sequential(\n",
    "            nn.Linear(embed_dim * (local_parts + 1) * 2, num_classes)\n",
    "        )  #强化度量学习，特征归一化与BNNeck\n",
    "\n",
    "\n",
    "         # ----------------- 新增归一化层 -----------------\n",
    "        self.feat_norm = nn.LayerNorm(embed_dim)  # 特征归一化层\n",
    "\n",
    "        \n",
    "\n",
    "\n",
    "\n",
    "\n",
    "\n",
    "\n",
    "    def _init_weights(self, pretrained):\n",
    "        if pretrained:\n",
    "            print(\"警告：自定义结构无法直接加载完整预训练权重！\")\n",
    "        # 可添加部分权重加载逻辑（如patch_embed）\n",
    "        else:\n",
    "            nn.init.normal_(self.class_token, std=0.02)\n",
    "            nn.init.normal_(self.pos_embed, std=0.02)\n",
    "\n",
    "        \n",
    "    def forward_features(self, x):\n",
    "\n",
    "\n",
    "        B, C, H, W = x.shape\n",
    "        # 分块验证\n",
    "        assert H == self.img_size[0], f\"输入高度{H}与模型设定{self.img_size[0]}不符\"\n",
    "        assert W == self.img_size[1], f\"输入宽度{W}与模型设定{self.img_size[1]}不符\"\n",
    "    \n",
    "       # 使用预计算的网格尺寸\n",
    "        h_patches, w_patches = self.grid_size\n",
    "        actual_patches = h_patches * w_patches\n",
    "    \n",
    "    # 检查位置编码维度\n",
    "        expected_dim = actual_patches + 1  # 包含class token\n",
    "        assert self.pos_embed.shape[1] == expected_dim, \\\n",
    "            f\"位置编码维度{self.pos_embed.shape[1]}与输入分块数{actual_patches}+1不匹配\"\n",
    "\n",
    "         # === 特征处理 ===\n",
    "        x = self.patch_embed(x)  # [B, E, H_patch, W_patch]\n",
    "        x = x.flatten(2)         # [B, E, N_patches]\n",
    "        x = x.transpose(1, 2)    # [B, N_patches, E]\n",
    "        \n",
    "       # 添加class token\n",
    "        cls_tokens = self.class_token.expand(B, -1, -1)\n",
    "        x = torch.cat((cls_tokens, x), dim=1)\n",
    "\n",
    "         \n",
    "\n",
    "        x += self.pos_embed\n",
    "\n",
    "        # 通过统一的Transformer Encoder\n",
    "        x = self.encoder(x)  # 使用自定义encoder\n",
    "\n",
    "\n",
    "        # --- 局部特征提取修正 ---\n",
    "        global_feature = x[:, 0]  # [B, 512]\n",
    "        # === 新增特征归一化 ===\n",
    "        global_feature = self.feat_norm(global_feature)  # 层归一化\n",
    "        global_feature = F.normalize(global_feature, dim=-1)  # L2归一化\n",
    "\n",
    "\n",
    "        # 提取局部特征\n",
    "        local_features = []\n",
    "        x_patches = x[:, 1:].reshape(B, self.local_parts, self.part_height, self.part_width, self.embed_dim)\n",
    "        for i in range(self.local_parts):\n",
    "            part_feature = x_patches[:, i].reshape(B, -1, self.embed_dim)\n",
    "            part_feature = self.local_attention[i](part_feature)\n",
    "            # 对局部特征也进行归一化（可选扩展）\n",
    "            part_feature = F.normalize(part_feature, dim=-1)\n",
    "            local_features.append(part_feature)\n",
    " \n",
    "\n",
    "            # 添加调试打印\n",
    "       # print(f\"\\n=== 特征维度 ===\")\n",
    "      #  print(f\"输入维度: {x.shape}\")               # 应显示原始输入维度\n",
    "      #  print(f\"分块后维度: {x.shape}\")             # 显示分块后的维度\n",
    "       # print(f\"全局特征维度: {global_feature.shape}\")\n",
    "       # print(f\"局部特征数: {len(local_features)}\")\n",
    "        \n",
    "        return global_feature, local_features   # 返回归一化后的特征\n",
    "    \n",
    "       \n",
    "    \n",
    "\n",
    "    \n",
    "    def forward(self, x):\n",
    "        x = self.grad_check(x)  #  强制梯度流经模型，避免反向传播时梯度截断。\n",
    "        global_feat, local_feats = self.forward_features(x)\n",
    "        \n",
    "        \n",
    "        \n",
    "    \n",
    "        #强化度量学习，特征归一化与BNNeck   修正特征处理流程\n",
    "        fused = self.fusion(global_feat, local_feats)\n",
    "        fused_bn = self.bn_neck(fused)  # BN层处理\n",
    "        # 修正：归一化应在BN层之后\n",
    "        fused_bn_normalized = F.normalize(fused_bn, dim=1)  # 确保特征L2归一化\n",
    "\n",
    "\n",
    "        logits = self.head(fused_bn)  # 分类头使用原始特征\n",
    "\n",
    "        \n",
    "         # 返回部件级分类结果\n",
    "        part_logits = [cls(feat) for cls, feat in zip(self.part_classifiers, local_feats)]\n",
    "        \n",
    "        \n",
    "\n",
    "        return {\n",
    "             \n",
    "\n",
    "            'global': global_feat,\n",
    "            'local': local_feats,\n",
    "            'fused': fused,\n",
    "            'logits': logits,\n",
    "            'part_logits': part_logits,\n",
    "            'bn_feature': fused_bn_normalized,  # 返回归一化后的特征\n",
    "        }\n",
    "\n",
    "\n",
    "\n",
    "\n",
    "#对图像分块的局部区域进行自注意力和通道注意力处理，增强细粒度特征\n",
    "class LocalFeatureModule(nn.Module):   #功能：对每个水平区域的特征进行细粒度特征提取 创新点：在自注意力后引入通道注意力，增强重要通道的响应\n",
    "    \"\"\"局部特征增强模块\"\"\"\n",
    "    def __init__(self, embed_dim, num_heads, mlp_ratio, \n",
    "                 part_height,  # 接收分块高度参数\n",
    "                 part_width): # 接收分块宽度参数\n",
    "        super().__init__()\n",
    "        self.attention = nn.MultiheadAttention(embed_dim, num_heads)      # 多头注意力\n",
    "        self.norm1 = nn.LayerNorm(embed_dim)\n",
    "        self.mlp = nn.Sequential(\n",
    "            nn.Linear(embed_dim, int(embed_dim * mlp_ratio)),\n",
    "            nn.GELU(),\n",
    "            nn.Linear(int(embed_dim * mlp_ratio), embed_dim)\n",
    "        )\n",
    "        self.norm2 = nn.LayerNorm(embed_dim)\n",
    "\n",
    "        # 新增通道注意力\n",
    "        self.channel_att = nn.Sequential(\n",
    "            nn.Linear(embed_dim, embed_dim//4),\n",
    "            nn.ReLU(),\n",
    "            nn.Linear(embed_dim//4, embed_dim),\n",
    "            nn.Sigmoid()\n",
    "        )\n",
    "        \n",
    "       \n",
    "        \n",
    "         # 根据分块尺寸生成位置编码\n",
    "        self.pos_embed = nn.Parameter(\n",
    "            torch.randn(1, part_height * part_width, embed_dim)\n",
    "        )\n",
    "\n",
    "        \n",
    "\n",
    "    def forward(self, x):\n",
    "        B, N, D = x.shape\n",
    "        pos_embed = self.pos_embed[:, :N]  # 动态适配序列长度\n",
    "        x = x + pos_embed\n",
    "        attn_out, _ = self.attention(x, x, x)\n",
    "        x = self.norm1(x + attn_out)\n",
    "        x = self.norm2(x + self.mlp(x))\n",
    "\n",
    "        # 在自注意力后添加\n",
    "        channel_weights = self.channel_att(x.mean(dim=1, keepdim=True))\n",
    "        x = x * channel_weights\n",
    "\n",
    "\n",
    "\n",
    "        return x.mean(dim=1)\n",
    "\n",
    "\n",
    "\n",
    " \n",
    "#通过注意力机制动态融合全局和局部特征，生成最终判别特征\n",
    "class FeatureFusion(nn.Module):  #功能：使用多头注意力机制动态融合多层级特征       特点：自适应权重学习 + 特征交互增强\n",
    "\n",
    "\n",
    "    \"\"\"多特征融合模块\"\"\"\n",
    "    def __init__(self, embed_dim, num_parts):\n",
    "        super().__init__()\n",
    "        self.embed_dim = embed_dim\n",
    "        self.num_parts = num_parts\n",
    "\n",
    "        # 关键修复：添加batch_first参数\n",
    "        self.attention = nn.MultiheadAttention(\n",
    "            embed_dim, \n",
    "            num_heads=4,\n",
    "            batch_first=True  # 允许(Batch, Seq, Embed)输入格式\n",
    "        )\n",
    "\n",
    "\n",
    "        # 修正归一化层维度\n",
    "        self.norm = nn.LayerNorm(embed_dim * num_parts * 2)  # 维度翻倍\n",
    "        \n",
    "        # 可学习的融合权重\n",
    "        self.weights = nn.Parameter(torch.ones(num_parts))\n",
    "        \n",
    "    def forward(self, global_feat, local_feats):\n",
    "        \n",
    "        # 拼接所有特征 [B, P, D]\n",
    "        all_feats = torch.stack(\n",
    "            [global_feat] + local_feats, \n",
    "            dim=1\n",
    "        )\n",
    "        \n",
    "        # 自适应权重融合\n",
    "        weights = torch.softmax(self.weights, dim=0)\n",
    "        weighted_feats = all_feats * weights.view(1, -1, 1)\n",
    "\n",
    "        # 维度验证\n",
    "        assert weighted_feats.dim() == 3, f\"期望3D输入，实际维度{weighted_feats.dim()}\"# 确保输入为3D张量\n",
    "        \n",
    "        # 注意力交互 [B, P, D]\n",
    "        interacted, _ = self.attention(\n",
    "            weighted_feats, weighted_feats, weighted_feats\n",
    "        )\n",
    "        \n",
    "        # 拼接特征 [B, P*D*2]\n",
    "        fused = torch.cat([\n",
    "            weighted_feats.reshape(weighted_feats.size(0), -1),\n",
    "            interacted.reshape(interacted.size(0), -1)\n",
    "        ], dim=1)\n",
    "        \n",
    "         # 添加维度验证\n",
    "        expected_dim = self.embed_dim * self.num_parts * 2\n",
    "        assert fused.shape[1] == expected_dim, \\\n",
    "            f\"特征维度错误！期望 {expected_dim}, 实际 {fused.shape[1]}\"\n",
    "\n",
    "        return self.norm(fused)\n",
    "\n",
    "\n",
    "\n",
    "\n",
    "\n",
    "\n",
    "\n",
    "\n",
    "def test_patch_embed():\n",
    "    test_model = VehicleTransformer(\n",
    "        num_classes=776,\n",
    "        img_size=(256, 256),\n",
    "        patch_size=16,\n",
    "        local_parts=4,  # 需要与模型配置一致\n",
    "        num_heads=4,\n",
    "        embed_dim=128\n",
    "    )\n",
    "    dummy_input = torch.randn(2, 3, 256, 256)\n",
    "    \n",
    "    # 获取全局特征和局部特征\n",
    "    global_feat, local_feats = test_model.forward_features(dummy_input)\n",
    "    \n",
    "    # 打印关键维度\n",
    "    print(\"\\n=== 特征维度验证 ===\")\n",
    "    print(f\"全局特征维度: {global_feat.shape}\")        \n",
    "    print(f\"局部特征数量: {len(local_feats)}\")         # 应等于 local_parts参数值\n",
    "    print(f\"单个局部特征维度: {local_feats[0].shape}\") \n",
    "\n",
    "\n",
    "\n",
    "\n",
    "\n",
    "\n",
    "# 测试代码\n",
    "if __name__ == \"__main__\":\n",
    "\n",
    "    model = VehicleTransformer(\n",
    "        img_size=(256, 256),# 必须与默认尺寸一致\n",
    "        patch_size=16,\n",
    "        local_parts=4,\n",
    "        embed_dim=128,  # 保持默认值\n",
    "        num_heads=4 ,\n",
    "        depth=4\n",
    "    )\n",
    "      # 测试前向传播\n",
    "    dummy_input = torch.randn(2, 3, 256, 256)  # 输入尺寸匹配img_size\n",
    "    outputs = model(dummy_input)\n",
    "\n",
    "    # 检查归一化后的特征范数\n",
    "    global_feat = outputs['global']\n",
    "    local_feat = outputs['local'][0]\n",
    "    \n",
    "    print(\"全局特征范数:\", torch.norm(global_feat, dim=1))  # 应接近1.0\n",
    "    print(\"局部特征范数:\", torch.norm(local_feat, dim=1))  # 若启用局部归一化则接近1.0\n",
    "\n",
    "\n",
    "\n",
    "\n",
    "    test_patch_embed()  # 新增调用\n",
    "    # 修正索引访问\n",
    "    print(\"分类层结构:\", model.head)\n",
    "    print(\"分类层输入维度:\", model.head[0].in_features)  # 访问LayerNorm层\n",
    "    print(\"分类输出尺寸:\", outputs['logits'].shape)\n",
    "\n",
    "\n",
    "\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "591b76b4",
   "metadata": {},
   "outputs": [],
   "source": [
    "import sys\n",
    "import os\n",
    "from pathlib import Path\n",
    "\n",
    "\n",
    "# 获取当前文件的绝对路径\n",
    "current_file = Path(__file__).resolve()\n",
    "# 计算项目根目录：上溯两级（假设train.py在scripts/目录下）\n",
    "project_root = current_file.parent.parent\n",
    "# 将项目根目录添加到系统路径\n",
    "sys.path.insert(0, str(project_root))\n",
    "\n",
    "# 打印验证路径\n",
    "#print(f\"\\n=== 当前工作目录: {os.getcwd()}\")\n",
    "#print(f\"=== 项目根目录: {project_root}\")\n",
    "#print(f\"=== 系统路径: {sys.path[:3]}\\n\")\n",
    "\n",
    "\n",
    "\n",
    "import torch\n",
    "import time\n",
    "from PIL import Image\n",
    "import argparse\n",
    "from tqdm import tqdm\n",
    "from torch.utils.data import Dataset\n",
    "from torchvision import transforms\n",
    "import torch.nn as nn\n",
    "import torch.nn.functional as F\n",
    "from torchvision.models import vit_b_16\n",
    "from einops import rearrange, repeat\n",
    "from collections import defaultdict\n",
    "from torch.utils.data import DataLoader  # 新增导入\n",
    "from torch.optim import AdamW\n",
    "import numpy as np\n",
    "from pathlib import Path\n",
    "import matplotlib.pyplot as plt\n",
    "from collections import Counter\n",
    "from sklearn.metrics import average_precision_score  # 新增导入\n",
    "from torch.optim.lr_scheduler import CosineAnnealingLR\n",
    "from datasets.veri776_dataset import Veri776Dataset\n",
    "\n",
    "#核心功能：基于 Transformer 的车辆重识别模型架构\n",
    "class VehicleTransformer(nn.Module):\n",
    "    def __init__(self, \n",
    "                 num_classes=776,\n",
    "                 img_size=(224, 224),\n",
    "                 patch_sizes=[16, 8],\n",
    "                 embed_dim=128, \n",
    "                 depth=4,\n",
    "                 num_heads=4,\n",
    "                 mlp_ratio=4,\n",
    "                 local_parts=7,\n",
    "                 pretrained=False,\n",
    "                 use_checkpoint=True  ,# 新增参数，启用梯度检查点\n",
    "                 apply_checkpoint=None  # 新增参数\n",
    "                 ):\n",
    "        super().__init__()\n",
    "\n",
    "        # 参数校验\n",
    "        assert len(patch_sizes) > 1, \"多尺度Patch至少需要两种尺寸\"\n",
    "        self.patch_sizes = patch_sizes\n",
    "        self.img_size = img_size\n",
    "        \n",
    "        # 多尺度Patch Embedding\n",
    "        self.patch_embeds = nn.ModuleList([\n",
    "            nn.Conv2d(3, embed_dim, kernel_size=ps, stride=ps)\n",
    "            for ps in patch_sizes\n",
    "        ])\n",
    "        \n",
    "        # 计算网格尺寸和分块数\n",
    "        self.grid_sizes = [(img_size[0]//ps, img_size[1]//ps) for ps in patch_sizes]\n",
    "        self.num_patches = [gs[0]*gs[1] for gs in self.grid_sizes]\n",
    "        \n",
    "        # 校验local_parts合理性\n",
    "        for i, gs in enumerate(self.grid_sizes):\n",
    "            assert gs[0] % local_parts == 0, \\\n",
    "            f\"local_parts必须能整除第{i}个尺度的高度分块数 ({gs[0]}%{local_parts} != 0)\"\n",
    "        \n",
    "        # 保存参数\n",
    "        self.local_parts = local_parts\n",
    "        self.part_height = [gs[0]//local_parts for gs in self.grid_sizes]\n",
    "        self.part_width = [gs[1] for gs in self.grid_sizes]\n",
    "        self.embed_dim = embed_dim\n",
    "        self.depth = depth\n",
    "        self.num_heads = num_heads\n",
    "        self.mlp_ratio = mlp_ratio\n",
    "        # 保存apply_checkpoint函数\n",
    "        self.apply_checkpoint = apply_checkpoint\n",
    "        self.grad_check = nn.Identity()\n",
    "        \n",
    "        # 打印分块信息\n",
    "        for i, (ps, gs, np) in enumerate(zip(patch_sizes, self.grid_sizes, self.num_patches)):\n",
    "            print(f\"尺度{i+1} - Patch大小: {ps}x{ps}, 网格: {gs[0]}x{gs[1]}, 分块数: {np}\")\n",
    "        \n",
    "        # 基础参数\n",
    "        self.class_token = nn.Parameter(torch.zeros(1, 1, embed_dim))\n",
    "        \n",
    "        # 位置编码参数列表\n",
    "        self.pos_embeds = nn.ParameterList()\n",
    "        \n",
    "        # 多尺度分层编码器\n",
    "        self.shallow_encoders = nn.ModuleList()  # 浅层编码器（前2层）\n",
    "        self.deep_encoders = nn.ModuleList()     # 深层编码器（后2层）\n",
    "        self.skip_conns = nn.ModuleList()        # 跨层跳跃连接\n",
    "        \n",
    "        for i in range(len(patch_sizes)):\n",
    "            # 位置编码（每个尺度独立）\n",
    "            pos_embed = nn.Parameter(\n",
    "                torch.zeros(1, self.num_patches[i] + 1, embed_dim) * 0.02\n",
    "            )\n",
    "            nn.init.trunc_normal_(pos_embed, std=0.02)\n",
    "            self.pos_embeds.append(pos_embed)\n",
    "            \n",
    "            # 浅层编码器（局部特征提取）\n",
    "            shallow_layer = nn.TransformerEncoderLayer(\n",
    "                d_model=embed_dim,\n",
    "                nhead=num_heads,\n",
    "                dim_feedforward=int(embed_dim * mlp_ratio),\n",
    "                batch_first=True\n",
    "            )\n",
    "            shallow_encoder = nn.TransformerEncoder(shallow_layer, num_layers=2)\n",
    "            self.shallow_encoders.append(shallow_encoder)\n",
    "\n",
    "            # 深层编码器（全局语义聚合）\n",
    "            deep_layer = nn.TransformerEncoderLayer(\n",
    "                d_model=embed_dim,\n",
    "                nhead=num_heads,\n",
    "                dim_feedforward=int(embed_dim * mlp_ratio),\n",
    "                batch_first=True\n",
    "            )\n",
    "            deep_encoder = nn.TransformerEncoder(deep_layer, num_layers=2)\n",
    "            self.deep_encoders.append(deep_encoder)\n",
    "\n",
    "            # 跨层跳跃连接（浅层输出→深层输入）\n",
    "            skip_conn = nn.Sequential(\n",
    "                nn.Linear(embed_dim, embed_dim*2),\n",
    "                nn.GELU(),\n",
    "                nn.Linear(embed_dim*2, embed_dim)\n",
    "            )\n",
    "            self.skip_conns.append(skip_conn)\n",
    "\n",
    "        # 局部注意力模块\n",
    "        self.local_attentions = nn.ModuleList([\n",
    "            nn.ModuleList([\n",
    "                LocalFeatureModule(\n",
    "                    embed_dim=embed_dim,\n",
    "                    num_heads=num_heads//2,\n",
    "                    mlp_ratio=mlp_ratio,\n",
    "                    part_height=self.part_height[i],\n",
    "                    part_width=self.part_width[i]\n",
    "                ) for _ in range(local_parts)\n",
    "            ]) for i in range(len(patch_sizes))\n",
    "        ])\n",
    "        \n",
    "        # 部件分类器\n",
    "        self.part_classifiers = nn.ModuleList([\n",
    "            nn.ModuleList([\n",
    "                nn.Linear(embed_dim, num_classes) for _ in range(local_parts)\n",
    "            ]) for i in range(len(patch_sizes))\n",
    "        ])\n",
    "\n",
    "        # 特征融合模块（更新为增强版）\n",
    "        total_parts = len(patch_sizes) * (local_parts + 1)\n",
    "        self.fusion = FeatureFusion(\n",
    "            embed_dim=embed_dim,\n",
    "            num_scales=len(patch_sizes),\n",
    "            local_parts=local_parts\n",
    "        )\n",
    "        \n",
    "        # BN层和分类头\n",
    "        \n",
    "        self.bn_neck = nn.BatchNorm1d(embed_dim * total_parts * 2)\n",
    "        self.head = nn.Sequential(\n",
    "            nn.Linear(embed_dim * total_parts * 2, num_classes)\n",
    "        )\n",
    "        \n",
    "        # 特征归一化层\n",
    "        self.feat_norm = nn.LayerNorm(embed_dim)\n",
    "\n",
    "\n",
    "        self.use_checkpoint = use_checkpoint\n",
    "        if use_checkpoint:\n",
    "            print(\"启用梯度检查点，减少显存占用\")\n",
    "\n",
    "\n",
    "    \n",
    "    def forward_features(self, x):\n",
    "        B, C, H, W = x.shape\n",
    "        multi_scale_feats = []\n",
    "        multi_scale_local = []\n",
    "        \n",
    "        for i, (pe, shallow_encoder, deep_encoder, skip_conn, local_attention) in enumerate(\n",
    "            zip(self.patch_embeds, self.shallow_encoders, self.deep_encoders, \n",
    "                self.skip_conns, self.local_attentions)\n",
    "        ):\n",
    "            # 分块验证\n",
    "            assert H == self.img_size[0], f\"输入高度{H}与模型设定{self.img_size[0]}不符\"\n",
    "            assert W == self.img_size[1], f\"输入宽度{W}与模型设定{self.img_size[1]}不符\"\n",
    "            \n",
    "            h_patches, w_patches = self.grid_sizes[i]\n",
    "            actual_patches = h_patches * w_patches\n",
    "            \n",
    "            # 位置编码验证\n",
    "            pos_embed = self.pos_embeds[i]\n",
    "            expected_dim = actual_patches + 1\n",
    "            assert pos_embed.shape[1] == expected_dim, \\\n",
    "                f\"尺度{i}位置编码维度{pos_embed.shape[1]}与分块数{actual_patches}+1不匹配\"\n",
    "            \n",
    "\n",
    "            # 特征处理\n",
    "            # 1. Patch Embedding\n",
    "            feat = pe(x)  # [B, E, H_patch, W_patch]\n",
    "            feat = feat.flatten(2).transpose(1, 2)  # [B, N_patches, E]\n",
    "            feat = torch.cat((self.class_token.expand(B, -1, -1), feat), dim=1)  # 添加class token\n",
    "            feat += pos_embed  # 添加位置编码\n",
    "\n",
    "            # 2. 浅层编码器（局部特征提取）\n",
    "            shallow_feat = shallow_encoder(feat)  # 前2层处理\n",
    "\n",
    "            # 3. 深层编码器（全局语义聚合）\n",
    "            # 跨层跳跃连接：shallow_feat → deep_feat\n",
    "            residual = shallow_feat\n",
    "\n",
    "            # 应用梯度检查点\n",
    "            if self.use_checkpoint and self.apply_checkpoint:\n",
    "                deep_feat = self.apply_checkpoint(deep_encoder, shallow_feat)\n",
    "            else:\n",
    "                deep_feat = deep_encoder(shallow_feat)\n",
    "\n",
    "            deep_feat = residual + skip_conn(deep_feat)  # 应用跨层跳跃连接\n",
    "            \n",
    "\n",
    "            # 4. 提取全局特征（来自深层编码器）\n",
    "            global_feature = deep_feat[:, 0]  # class token位置\n",
    "            global_feature = self.feat_norm(global_feature)\n",
    "            global_feature = F.normalize(global_feature, dim=-1)\n",
    "            multi_scale_feats.append(global_feature)\n",
    "\n",
    "            # 5. 提取局部特征（来自浅层编码器，保留更多局部细节）\n",
    "            local_features = []\n",
    "            # 从浅层输出中提取Patch特征（排除class token）\n",
    "            shallow_patches = shallow_feat[:, 1:]  \n",
    "            # 重塑为[B, local_parts, part_height, part_width, E]\n",
    "            shallow_patches = shallow_patches.reshape(B, self.local_parts, \n",
    "                                                    self.part_height[i], self.part_width[i], \n",
    "                                                    self.embed_dim)\n",
    "            for p in range(self.local_parts):\n",
    "                part_feature = shallow_patches[:, p].reshape(B, -1, self.embed_dim)\n",
    "                part_feature = local_attention[p](part_feature)  # 局部注意力增强\n",
    "                part_feature = F.normalize(part_feature, dim=-1)\n",
    "                local_features.append(part_feature)\n",
    "            multi_scale_local.append(local_features)\n",
    "        \n",
    "        return multi_scale_feats, multi_scale_local\n",
    "    \n",
    "    def forward(self, x):\n",
    "        x = self.grad_check(x)\n",
    "        global_feats, multi_scale_local = self.forward_features(x)\n",
    "        \n",
    "        # 构建所有特征列表（每个尺度的全局特征 + 该尺度的局部特征）\n",
    "        all_feats = []\n",
    "        for i in range(len(global_feats)):\n",
    "            all_feats.append(global_feats[i])\n",
    "            all_feats.extend(multi_scale_local[i])\n",
    "        \n",
    "        # 特征融合\n",
    "        fused = self.fusion(global_feats=global_feats, local_feats=multi_scale_local)\n",
    "        fused_bn = self.bn_neck(fused)\n",
    "        fused_bn_normalized = F.normalize(fused_bn, dim=1)\n",
    "        \n",
    "        # 分类头\n",
    "        logits = self.head(fused_bn)\n",
    "        \n",
    "        # 部件分类结果\n",
    "        part_logits = []\n",
    "        for i, scale_local in enumerate(multi_scale_local):\n",
    "            for p, feat in enumerate(scale_local):\n",
    "                part_logits.append(self.part_classifiers[i][p](feat))\n",
    "        \n",
    "        return {\n",
    "            'global': global_feats,\n",
    "            'local': multi_scale_local,\n",
    "            'fused': fused,\n",
    "            'logits': logits,\n",
    "            'part_logits': part_logits,\n",
    "            'bn_feature': fused_bn_normalized,\n",
    "        }\n",
    "\n",
    "\n",
    "\n",
    "#局部特征增强模块\n",
    "class LocalFeatureModule(nn.Module):\n",
    "    def __init__(self, embed_dim, num_heads, mlp_ratio, part_height, part_width):\n",
    "        super().__init__()\n",
    "        self.attention = nn.MultiheadAttention(embed_dim, num_heads, batch_first=True)\n",
    "        self.norm1 = nn.LayerNorm(embed_dim)\n",
    "        self.mlp = nn.Sequential(\n",
    "            nn.Linear(embed_dim, int(embed_dim * mlp_ratio)),\n",
    "            nn.GELU(),\n",
    "            nn.Linear(int(embed_dim * mlp_ratio), embed_dim)\n",
    "        )\n",
    "        self.norm2 = nn.LayerNorm(embed_dim)\n",
    "        self.channel_att = nn.Sequential(\n",
    "            nn.Linear(embed_dim, embed_dim//4),\n",
    "            nn.ReLU(),\n",
    "            nn.Linear(embed_dim//4, embed_dim),\n",
    "            nn.Sigmoid()\n",
    "        )\n",
    "        self.pos_embed = nn.Parameter(\n",
    "            torch.randn(1, part_height * part_width, embed_dim)\n",
    "        )\n",
    "    \n",
    "    def forward(self, x):\n",
    "        B, N, D = x.shape\n",
    "        pos_embed = self.pos_embed[:, :N]\n",
    "        x = x + pos_embed\n",
    "        attn_out, _ = self.attention(x, x, x)\n",
    "        x = self.norm1(x + attn_out)\n",
    "        x = self.norm2(x + self.mlp(x))\n",
    "        channel_weights = self.channel_att(x.mean(dim=1, keepdim=True))\n",
    "        x = x * channel_weights\n",
    "        return x.mean(dim=1)\n",
    "\n",
    "\n",
    "#动态特征融合模块\n",
    "class FeatureFusion(nn.Module):\n",
    "    \"\"\"增强版动态特征融合模块\"\"\"\n",
    "    def __init__(self, embed_dim, num_scales, local_parts):\n",
    "        super().__init__()\n",
    "        self.embed_dim = embed_dim\n",
    "        self.num_scales = num_scales\n",
    "        self.local_parts = local_parts\n",
    "        self.num_parts = num_scales * (local_parts + 1)\n",
    "        self.total_dim = embed_dim * self.num_parts * 2\n",
    "        \n",
    "        # 通道注意力机制\n",
    "        self.channel_att = nn.Sequential(\n",
    "            nn.Conv1d(embed_dim, embed_dim//4, kernel_size=1),\n",
    "            nn.ReLU(),\n",
    "            nn.Conv1d(embed_dim//4, embed_dim, kernel_size=1),\n",
    "            nn.Sigmoid()\n",
    "        )\n",
    "        \n",
    "        # 空间注意力机制\n",
    "        self.spatial_att = nn.Sequential(\n",
    "            nn.Conv2d(embed_dim, 1, kernel_size=1),\n",
    "            nn.Sigmoid()\n",
    "        )\n",
    "        \n",
    "        # 动态融合权重\n",
    "        self.fusion_weights = nn.ModuleList([\n",
    "            nn.Sequential(\n",
    "                nn.Linear(embed_dim, embed_dim//4),\n",
    "                nn.ReLU(),\n",
    "                nn.Linear(embed_dim//4, 1)\n",
    "            ) for _ in range(self.num_parts)\n",
    "        ])\n",
    "        \n",
    "        self.attention = nn.MultiheadAttention(\n",
    "            embed_dim,\n",
    "            num_heads=min(4, self.num_parts),\n",
    "            batch_first=True\n",
    "        )\n",
    "        \n",
    "        self.norm = nn.LayerNorm(self.total_dim)\n",
    "    \n",
    "\n",
    "\n",
    "    def forward(self, global_feats, local_feats):\n",
    "        B = global_feats[0].shape[0]\n",
    "        # 拼接所有特征 [B, P, D]\n",
    "        all_feats = []\n",
    "        for i in range(self.num_scales):\n",
    "            all_feats.append(global_feats[i])\n",
    "            all_feats.extend(local_feats[i])\n",
    "        \n",
    "        # 特征数验证\n",
    "        assert len(all_feats) == self.num_parts, \\\n",
    "            f\"特征数不匹配！期望{self.num_parts}，实际{len(all_feats)}\"\n",
    "        \n",
    "        all_feats = torch.stack(all_feats, dim=1)  # [B, P, D]\n",
    "        P, D = all_feats.shape[1], all_feats.shape[2]\n",
    "        \n",
    "        # 通道注意力\n",
    "        channel_att = self.channel_att(all_feats.permute(0, 2, 1))  # [B, D, P]\n",
    "        channel_att = channel_att.permute(0, 2, 1)  # [B, P, D]\n",
    "        channel_weighted = all_feats * channel_att  # [B, P, D]\n",
    "        \n",
    "        # 空间注意力\n",
    "        spatial_feat = channel_weighted.permute(0, 2, 1).unsqueeze(3)  # [B, D, P, 1]\n",
    "        spatial_att = self.spatial_att(spatial_feat).squeeze(3)  # [B, 1, P]\n",
    "        spatial_att = spatial_att.permute(0, 2, 1)  # [B, P, 1]\n",
    "        spatial_weighted = channel_weighted * spatial_att\n",
    "        \n",
    "        # 动态权重融合\n",
    "        weights = []\n",
    "        for i in range(self.num_parts):\n",
    "            weight = self.fusion_weights[i](spatial_weighted[:, i])  # [B, 1]\n",
    "            weights.append(weight)\n",
    "        weights = torch.stack(weights, dim=1)  # [B, P, 1]\n",
    "        weights = F.softmax(weights, dim=1)    \n",
    "        \n",
    "       \n",
    "\n",
    "        weighted_feats = spatial_weighted * weights  # [B, P, D]\n",
    "        \n",
    "        # 特征交互\n",
    "        interacted, _ = self.attention(weighted_feats, weighted_feats, weighted_feats)\n",
    "        \n",
    "        # 拼接特征\n",
    "        fused = torch.cat([\n",
    "            weighted_feats.reshape(B, -1),\n",
    "            interacted.reshape(B, -1)\n",
    "        ], dim=1)  # [B, P*D*2]\n",
    "        \n",
    "        return self.norm(fused)\n",
    "    \n",
    "\n",
    "\n",
    "# 测试代码\n",
    "if __name__ == \"__main__\":\n",
    "    model = VehicleTransformer(\n",
    "        img_size=(224, 224),\n",
    "        patch_sizes=[16, 8],\n",
    "        local_parts=7,\n",
    "        embed_dim=128,\n",
    "        num_heads=4,\n",
    "        depth=4\n",
    "    )\n",
    "    dummy_input = torch.randn(2, 3, 224, 224)\n",
    "    outputs = model(dummy_input)\n",
    "\n",
    "    with torch.no_grad():\n",
    "        # 获取第一个尺度的编码器\n",
    "        pe = model.patch_embeds[0]\n",
    "        shallow_encoder = model.shallow_encoders[0]\n",
    "        deep_encoder = model.deep_encoders[0]\n",
    "    \n",
    "        # 模拟特征处理流程\n",
    "        feat = pe(dummy_input)\n",
    "        feat = feat.flatten(2).transpose(1, 2)\n",
    "        feat = torch.cat((model.class_token.expand(2, -1, -1), feat), dim=1)\n",
    "        feat += model.pos_embeds[0]  # 现在pos_embeds已正确初始化\n",
    "    \n",
    "        shallow_feat = shallow_encoder(feat)\n",
    "        deep_feat = deep_encoder(shallow_feat)\n",
    "    \n",
    "        print(\"浅层编码器输出维度:\", shallow_feat.shape)  # 应输出 [2, 197, 128]\n",
    "        print(\"深层编码器输出维度:\", deep_feat.shape)    # 应输出 [2, 197, 128]\n",
    "    \n",
    "    # 检查特征范数\n",
    "        global_feat = outputs['global'][0]  # 第一个尺度的全局特征\n",
    "        print(\"全局特征范数:\", torch.norm(global_feat, dim=1))\n",
    "    \n",
    "        local_feat = outputs['local'][0][0]  # 第一个尺度的第一个局部特征\n",
    "        print(\"局部特征范数:\", torch.norm(local_feat, dim=1))\n",
    "    \n",
    "    # 打印分类层信息\n",
    "        print(\"分类层输入维度:\", model.head[0].in_features)\n",
    "        print(\"分类输出尺寸:\", outputs['logits'].shape)\n",
    "\n",
    "    # 验证融合模块输出维度\n",
    "        fused = outputs['fused']\n",
    "        expected_dim = model.fusion.total_dim\n",
    "        print(f\"融合特征维度: {fused.shape} (期望: {expected_dim})\")\n",
    "        \n",
    "    \n",
    "    # 验证注意力权重分布\n",
    "    with torch.no_grad():\n",
    "        global_feats, local_feats = model.forward_features(dummy_input)\n",
    "        all_feats = torch.stack(\n",
    "            [global_feats[i] for i in range(len(global_feats))] + [feat for feats in local_feats for feat in feats],\n",
    "            dim=1\n",
    "        )\n",
    "        fusion = model.fusion\n",
    "        channel_att = fusion.channel_att(all_feats.transpose(1, 2)).transpose(1, 2)\n",
    "        \n",
    "        print(\"通道注意力权重分布:\", channel_att.mean(dim=0).squeeze())  # 各特征的通道权重均值\n",
    "\n",
    "\n",
    "\n",
    "    # 完整维度验证\n",
    "    with torch.no_grad():\n",
    "        global_feats, local_feats = model.forward_features(dummy_input)\n",
    "        fusion = model.fusion\n",
    "        \n",
    "        # 拼接所有特征\n",
    "        all_feats = []\n",
    "        for i in range(fusion.num_scales):\n",
    "            all_feats.append(global_feats[i])\n",
    "            all_feats.extend(local_feats[i])\n",
    "        all_feats = torch.stack(all_feats, dim=1)  # [B, P, D]\n",
    "        print(f\"all_feats.shape: {all_feats.shape}\")  # 应输出 [2, 16, 128]\n",
    "        \n",
    "        # 通道注意力\n",
    "        channel_att = fusion.channel_att(all_feats.permute(0, 2, 1))  # [B, D, P]\n",
    "        channel_att = channel_att.permute(0, 2, 1)  # [B, P, D]\n",
    "        channel_weighted = all_feats * channel_att  # [B, P, D]\n",
    "        print(f\"channel_weighted.shape: {channel_weighted.shape}\")  # 应输出 [2, 16, 128]\n",
    "        \n",
    "        # 空间注意力\n",
    "        spatial_feat = channel_weighted.permute(0, 2, 1).unsqueeze(3)  # [B, D, P, 1]\n",
    "        print(f\"spatial_feat.shape: {spatial_feat.shape}\")  # 应输出 [2, 128, 16, 1]\n",
    "        spatial_att = fusion.spatial_att(spatial_feat)  # [B, 1, P, 1]\n",
    "        print(f\"spatial_att.shape: {spatial_att.shape}\")  # 应输出 [2, 1, 16, 1]\n",
    "        spatial_att = spatial_att.squeeze(1)  # [B, P, 1]\n",
    "        print(f\"squeezed spatial_att.shape: {spatial_att.shape}\")  # 应输出 [2, 16, 1]\n",
    "        spatial_weighted = channel_weighted * spatial_att  # [B, P, D]\n",
    "        print(f\"spatial_weighted.shape: {spatial_weighted.shape}\")  # 应输出 [2, 16, 128]\n",
    "        \n",
    "      \n",
    "        # 动态权重\n",
    "        weights = []\n",
    "        for i in range(fusion.num_parts):\n",
    "            weight = fusion.fusion_weights[i](spatial_weighted[:, i])  # [B, 1]\n",
    "            weights.append(weight)\n",
    "        weights = torch.stack(weights, dim=1)  # [B, P]\n",
    "        weights = F.softmax(weights, dim=1) \n",
    "        print(f\"weights.shape: {weights.shape}\") # 应输出 [2, 16, 1]\n",
    "        \n",
    "        # 加权特征\n",
    "        weighted_feats = spatial_weighted * weights  # [B, P, D]\n",
    "        print(f\"weighted_feats.shape: {weighted_feats.shape}\")  # 应输出 [2, 16, 128]\n",
    "\n",
    "\n",
    "\n",
    "\n",
    "\n"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "pytorch2.0",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.12.9"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 5
}
