{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "338ec033",
   "metadata": {},
   "outputs": [],
   "source": [
    "# 新建train.py验证模型保存功能\n",
    "\n",
    "import sys\n",
    "import os\n",
    "from pathlib import Path\n",
    "\n",
    "# 获取当前文件的绝对路径\n",
    "current_file = Path(__file__).resolve()\n",
    "# 计算项目根目录：上溯两级（假设train.py在scripts/目录下）\n",
    "project_root = current_file.parent.parent\n",
    "# 将项目根目录添加到系统路径\n",
    "sys.path.insert(0, str(project_root))\n",
    "\n",
    "#作用：确保项目内自定义模块（如 Veri776Dataset）可被正确导入\n",
    "#关键点：通过路径解析动态添加项目根目录，避免绝对路径依赖\n",
    "\n",
    "import torch\n",
    "import time\n",
    "from PIL import Image\n",
    "import argparse\n",
    "from tqdm import tqdm\n",
    "from torch.utils.data import Dataset\n",
    "from torchvision import transforms\n",
    "import torch.nn as nn\n",
    "import torch.nn.functional as F\n",
    "from torchvision.models import vit_b_16\n",
    "from einops import rearrange, repeat\n",
    "from collections import defaultdict\n",
    "from torch.utils.data import DataLoader  # 新增导入\n",
    "from torch.optim import AdamW\n",
    "import matplotlib.pyplot as plt\n",
    "from collections import Counter\n",
    "from sklearn.metrics import average_precision_score  # 新增导入\n",
    "import numpy as np\n",
    "from torch.optim.lr_scheduler import CosineAnnealingLR\n",
    "from src.datasets.veri776_dataset import Veri776Dataset\n",
    "from src.models.vehicle_transformer import VehicleTransformer\n",
    "import warnings\n",
    "from torch.optim.lr_scheduler import LambdaLR\n",
    "\n",
    "\n",
    "\n",
    "warnings.filterwarnings(\"ignore\", category=FutureWarning)\n",
    "\n",
    "# 复合损失函数  ，主损失权重， 过小导致特征判断力不足\n",
    "class CombinedLoss(nn.Module):\n",
    "    def __init__(self, alpha=0.5, margin=0.5):\n",
    "        super().__init__()\n",
    "        self.ce = nn.CrossEntropyLoss(label_smoothing=0.1)  # 添加标签平滑\n",
    "        \n",
    "        # === 修复方案 ===\n",
    "        # 确保margin是标量值\n",
    "        if isinstance(margin, torch.Tensor):\n",
    "            assert margin.numel() == 1, \"Margin must be a scalar tensor\"\n",
    "            margin = margin.item()\n",
    "        self.margin = float(margin)\n",
    "        # ================\n",
    "        \n",
    "        # 恢复三元组损失（内存安全版）\n",
    "        self.triplet = nn.TripletMarginLoss(margin=margin, reduction='none')\n",
    "\n",
    "        self.alpha = alpha\n",
    "\n",
    "    def forward(self, outputs, targets):\n",
    "        main_loss = self.ce(outputs[\"logits\"], targets)\n",
    "        part_loss = sum([self.ce(p, targets) for p in outputs[\"part_logits\"]]) \n",
    "        \n",
    "        # === 新增在线难例挖掘 ===\n",
    "        bn_feature = outputs[\"bn_feature\"]\n",
    "        pairwise_dist = torch.cdist(bn_feature, bn_feature)  # 计算所有样本距离\n",
    "        # 生成有效三元组\n",
    "        pos_mask = targets.unsqueeze(1) == targets.unsqueeze(0)\n",
    "        neg_mask = targets.unsqueeze(1) != targets.unsqueeze(0)\n",
    "    \n",
    "        # 寻找最难负样本\n",
    "        with torch.no_grad():\n",
    "            neg_dist = pairwise_dist * neg_mask.float()\n",
    "            hardest_negative = neg_dist.argmax(dim=1)\n",
    "    \n",
    "        # 计算有效triplet loss\n",
    "        tri_loss = self.triplet(bn_feature, \n",
    "                           bn_feature, \n",
    "                           bn_feature[hardest_negative]).mean()\n",
    "    \n",
    "        return main_loss + 0.2*part_loss + 1.2*tri_loss  # 调高triplet权重\n",
    "\n",
    "#设计亮点：\n",
    "#标签平滑：防止模型对标签过拟合\n",
    "#动态难例挖掘：自动选择最难区分的负样本增强特征判别性\n",
    "#多损失融合：分类损失 + 部件分类(30%) + 三元组损失(80%)\n",
    "\n",
    "\n",
    "\n",
    "\n",
    "def euclidean_dist(x, y):\n",
    "    \"\"\"\n",
    "    计算欧氏距离矩阵\n",
    "    x: (n, feat_dim)\n",
    "    y: (m, feat_dim)\n",
    "    return: (n, m) \n",
    "    \"\"\"\n",
    "    x2 = torch.sum(x ** 2, dim=1).unsqueeze(1)\n",
    "    y2 = torch.sum(y ** 2, dim=1).unsqueeze(0)\n",
    "    dist = x2 + y2 - 2.0 * torch.mm(x, y.transpose(0, 1))\n",
    "    return torch.sqrt(torch.clamp(dist, min=1e-12))\n",
    "\n",
    "\n",
    "#mAP计算函数\n",
    "def calc_mAP(query_feats, query_ids, gallery_feats, gallery_ids):\n",
    "    \"\"\"\n",
    "    计算mAP\n",
    "    \"\"\"\n",
    "    # 在计算距离前添加数值检查\n",
    "    if torch.isnan(query_feats).any() or torch.isnan(gallery_feats).any():\n",
    "        print(\"警告：特征包含NaN值，跳过当前验证\")\n",
    "        return 0.0  # 返回0避免崩溃\n",
    "    \n",
    "\n",
    "    # 修改距离计算为余弦相似度\n",
    "    query_feats = F.normalize(query_feats, p=2, dim=1)\n",
    "    gallery_feats = F.normalize(gallery_feats, p=2, dim=1)\n",
    "    distmat = 1 - torch.mm(query_feats, gallery_feats.t())  # 余弦距离\n",
    "    \n",
    "    \n",
    "    # ===== 新增分块参数，防止内存溢出 =====\n",
    "    block_size = 256  # 每次处理256个查询样本\n",
    "    aps = []\n",
    "    \n",
    "    # 分块处理查询集\n",
    "    for i in tqdm(range(0, len(query_feats), block_size), \n",
    "                 desc=\"计算mAP\", unit=\"block\"):\n",
    "        start = i\n",
    "        end = min(i + block_size, len(query_feats))\n",
    "        \n",
    "        # 分块计算距离\n",
    "        block_feats = query_feats[start:end]\n",
    "        block_dist = euclidean_dist(block_feats, gallery_feats).cpu().numpy()\n",
    "        \n",
    "        # 处理NaN/Inf\n",
    "        block_dist = np.nan_to_num(block_dist, nan=1e6, posinf=1e6)\n",
    "        \n",
    "        # 逐样本计算AP\n",
    "        for j in range(block_dist.shape[0]):\n",
    "            y_true = (gallery_ids == query_ids[start+j]).astype(np.uint8)  # 使用uint8节省内存\n",
    "            y_score = -block_dist[j]\n",
    "            if np.sum(y_true) == 0:\n",
    "                continue  # 跳过无效查询\n",
    "            ap = average_precision_score(y_true, y_score)\n",
    "            aps.append(ap)\n",
    "    \n",
    "    return np.mean(aps) if aps else 0.0\n",
    "\n",
    "#创新点：\n",
    "#分块处理：将大型矩阵运算分解为小块，避免OOM（内存不足）\n",
    "#余弦相似度：更鲁棒的度量方式\n",
    "#NaN检测：防止无效特征导致计算崩溃\n",
    "\n",
    "\n",
    "\n",
    "# 添加验证循环（防止过拟合）\n",
    "def validate(model, val_loader, device):\n",
    "    model.eval()\n",
    "    feats, pids = [], []\n",
    "    with torch.no_grad():      # 特征收集与内存优化\n",
    "        for batch in val_loader:\n",
    "            images = batch[\"image\"].to(device)\n",
    "            outputs = model(images)\n",
    "            feats.append(outputs[\"bn_feature\"].cpu())\n",
    "            pids.extend(batch[\"class_id\"].cpu().numpy())\n",
    "            del images, outputs  # ✅ 新增内存释放\n",
    "    \n",
    "    feats = torch.cat(feats)\n",
    "    # 假设验证集本身作为gallery\n",
    "    # 转换为float32节省内存\n",
    "    feats = feats.to(dtype=torch.float32)  # ✅ 新增类型转换节省内存\n",
    "    return calc_mAP(feats, np.array(pids), feats, np.array(pids))\n",
    "\n",
    "#关键优化：\n",
    "#特征类型转换：float32比默认float64节省50%内存\n",
    "#及时释放显存：del images防止显存泄漏\n",
    "\n",
    "\n",
    "# 配置参数\n",
    "def get_args():\n",
    "    parser = argparse.ArgumentParser()\n",
    "    parser.add_argument(\"--batch_size\", type=int, default=24)   #批次大小， 过小导致统计量不准\n",
    "    parser.add_argument(\"--lr\", type=float, default=1e-4)\n",
    "    parser.add_argument(\"--epochs\", type=int, default=100)\n",
    "    parser.add_argument(\"--val_freq\", type=int, default=2)  # 每2个epoch验证一次\n",
    "    # 添加缺失的save_dir参数\n",
    "    parser.add_argument(\"--save_dir\", type=str, default=\"checkpoints\")\n",
    "    parser.add_argument(\"--resume\", type=str, default=\"\",\n",
    "                        help=\"checkpoint path to resume\")   # 恢复训练路径\n",
    "    return parser.parse_args()\n",
    "\n",
    "\n",
    "# 在train.py中添加测试代码\n",
    "# 修改后\n",
    "def test_patch_embed():\n",
    "    test_model = VehicleTransformer(\n",
    "        num_classes=776,\n",
    "        img_size=(224, 224),\n",
    "        patch_size=16,\n",
    "        local_parts=7,  # 需要与模型配置一致\n",
    "        num_heads=6,\n",
    "        embed_dim=192\n",
    "    )\n",
    "    dummy_input = torch.randn(1, 3, 224, 224)\n",
    "    \n",
    "    # 获取全局特征和局部特征\n",
    "    global_feat, local_feats = test_model.forward_features(dummy_input)\n",
    "    \n",
    "    # 打印关键维度\n",
    "    print(\"\\n=== 特征维度验证 ===\")\n",
    "    print(f\"全局特征维度: {global_feat.shape}\")        # 应输出 [1, 192]\n",
    "    print(f\"局部特征数量: {len(local_feats)}\")         # 应等于 local_parts参数值\n",
    "    print(f\"单个局部特征维度: {local_feats[0].shape}\") # 应输出 [1, 192]\n",
    "\n",
    "\n",
    "\n",
    "\n",
    "# 训练函数\n",
    "def train(args):\n",
    "\n",
    "    # ===== 新增梯度累积参数 =====\n",
    "    accum_steps = 2  # 添加到train函数开头参数部分\n",
    "\n",
    "    device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n",
    "\n",
    "    # 在train函数开头添加\n",
    "    torch.backends.cuda.matmul.allow_tf32 = True  # 启用TF32计算 ，保持精度前提下加速矩阵运算\n",
    "    torch.backends.cudnn.benchmark = True  # 启用cuDNN自动优化器\n",
    "    torch.cuda.empty_cache()  # 清空缓存\n",
    "    \n",
    "\n",
    "    # 数据增强\n",
    "    train_transform = transforms.Compose([\n",
    "        transforms.Resize((256, 256)),# 作用于PIL.Image\n",
    "        transforms.RandomCrop(224),# 作用于PIL.Image\n",
    "        transforms.RandomHorizontalFlip(),# 作用于PIL.Image\n",
    "        transforms.ColorJitter(brightness=0.3, contrast=0.3, saturation=0.3),  # 作用于PIL.Image\n",
    "        transforms.RandomRotation(15),# 作用于PIL.Image\n",
    "        transforms.RandomGrayscale(p=0.1),  # 新增灰度化,作用于PIL.Image\n",
    "        transforms.ToTensor(),# 转换为Tensor\n",
    "        transforms.RandomErasing(p=0.7, scale=(0.1, 0.3), ratio=(0.3, 3.0)),  # 新增随机擦除,作用于Tensor\n",
    "        \n",
    "        transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])\n",
    "        ])\n",
    "    \n",
    "    # 修改后（添加验证专用transform）\n",
    "    val_transform = transforms.Compose([\n",
    "     transforms.Resize(256),\n",
    "        transforms.CenterCrop(224),\n",
    "        transforms.ToTensor(),\n",
    "        transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])])\n",
    "\n",
    "\n",
    "    \n",
    "    # 数据集\n",
    "    train_set = Veri776Dataset(mode=\"train\", transform=train_transform)\n",
    "    val_set = Veri776Dataset(mode=\"test\", transform=val_transform)  # 移除数据增强\n",
    "    \n",
    "\n",
    "    \n",
    "    train_loader = DataLoader(\n",
    "        train_set,\n",
    "     batch_size=args.batch_size,  # 使用参数中的batch_size\n",
    "    shuffle=True,\n",
    "    num_workers=4,       # 匹配CPU核心数\n",
    "     pin_memory=True,\n",
    "    persistent_workers=False,\n",
    "    drop_last=True,\n",
    "    prefetch_factor=2       # 添加预取\n",
    ")\n",
    "    \n",
    "\n",
    "    val_loader = DataLoader(\n",
    "         val_set,\n",
    "          batch_size=args.batch_size,  # 扩大验证batch_size\n",
    "        num_workers=2,\n",
    "        pin_memory=True,\n",
    "        persistent_workers=False)\n",
    "    \n",
    "    # 模型初始化\n",
    "    model = VehicleTransformer(\n",
    "        num_classes=776,\n",
    "    img_size=(224, 224),    # 缩小输入尺寸\n",
    "    patch_size=16,           # 增大分块尺寸\n",
    "    local_parts=7,\n",
    "    embed_dim=192,          # 特征维度， 过低导致信息丢失\n",
    "    depth=3,                # 层数  3层Transformer\n",
    "    num_heads=6,            # 注意力头\n",
    "    mlp_ratio=2,\n",
    "    \n",
    "    pretrained=False).to(device)\n",
    "    \n",
    "    # 优化器，修改为（添加权重衰减和梯度裁剪）\n",
    "    optimizer = AdamW(\n",
    "        model.parameters(), \n",
    "         lr=3e-4,                  #学习率， 过高引发梯度爆炸\n",
    "        weight_decay=0.01,  # ✅ 新增权重衰减\n",
    "        fused=True\n",
    ")                 # 启用融合优化\n",
    "\n",
    "  # 更换学习率调度器（新增）\n",
    "    from torch.optim.lr_scheduler import OneCycleLR\n",
    "    scheduler = OneCycleLR(optimizer, \n",
    "                      max_lr=3e-4,\n",
    "                      total_steps=args.epochs * len(train_loader),\n",
    "                      pct_start=0.3)           # 高效学习率调度\n",
    "\n",
    "\n",
    "\n",
    "    # === 新增检查点恢复逻辑 ===\n",
    "    start_epoch = 0\n",
    "    if args.resume:\n",
    "        if Path(args.resume).exists():\n",
    "            checkpoint = torch.load(args.resume, map_location=device)\n",
    "            model.load_state_dict(checkpoint['state_dict'])\n",
    "            optimizer.load_state_dict(checkpoint['optimizer'])\n",
    "            start_epoch = checkpoint['epoch'] # 继续完成被中断的epoch\n",
    "\n",
    "\n",
    "\n",
    "            # 安全加载scheduler状态\n",
    "            if 'scheduler' in checkpoint:  # 新增判断\n",
    "                scheduler.load_state_dict(checkpoint['scheduler'])\n",
    "            else:\n",
    "                print(\"警告：检查点缺少scheduler状态，学习率调度可能异常\")\n",
    "\n",
    "            best_acc = checkpoint['best_acc']\n",
    "\n",
    "\n",
    "            print(f\"成功恢复训练：从epoch {start_epoch}开始，历史最佳mAP {best_acc:.2%}\")\n",
    "        else:\n",
    "            print(f\"警告：未找到检查点文件 {args.resume}，从头开始训练\")   \n",
    "\n",
    "\n",
    "    torch.nn.utils.clip_grad_norm_(model.parameters(), max_norm=1.0)  # ✅ 调整梯度裁剪阈值,防止梯度爆炸\n",
    "\n",
    "\n",
    "   # 修改位置2：学习率调度器\n",
    "    from torch.optim.lr_scheduler import LambdaLR\n",
    "    warmup_epochs = 10   # 加速初始学习,从10——>5缩短预热\n",
    "\n",
    "  \n",
    "\n",
    "    \n",
    "    #学习率预热，避免初期不稳定更新\n",
    "    warmup_scheduler = LambdaLR(optimizer, \n",
    "                                lr_lambda=lambda e: min(1.0, (e+1)/warmup_epochs)) # 前10epoch逐步提高学习率\n",
    "\n",
    "    best_acc = 0.0\n",
    "\n",
    "    \n",
    "\n",
    "    \n",
    "    # 混合精度初始化\n",
    "    scaler = torch.cuda.amp.GradScaler()  # 移除device_type参数\n",
    "\n",
    "    # 设置环境变量（在训练开始前）\n",
    "    \n",
    "    os.environ[\"PYTORCH_CUDA_ALLOC_CONF\"] = \"expandable_segments:True\"\n",
    "\n",
    "\n",
    "\n",
    "    \n",
    "\n",
    "    \n",
    "    # === 这里就是训练代码开头的最佳位置 ===\n",
    "    print(\"\\n=== 训练配置摘要 ===\")\n",
    "    print(f\"输入尺寸: {model.img_size}\")\n",
    "    print(f\"批次大小: {args.batch_size}\")\n",
    "    print(f\"初始学习率: {args.lr}\")\n",
    "    print(f\"嵌入维度: {model.embed_dim}\")\n",
    "    print(f\"Transformer深度: {model.depth}\")\n",
    "    print(f\"设备: {device}\")\n",
    "    print(f\"训练样本数: {len(train_set)}\")\n",
    "    print(f\"验证样本数: {len(val_set)}\")\n",
    "    print(f\"混合精度训练: {scaler.is_enabled()}\")\n",
    "    print(\"=====================\\n\")\n",
    "\n",
    "    # 确保损失函数参数正确\n",
    "    criterion = CombinedLoss(alpha=0.5, margin=0.3)  # 显式传递标量参数\n",
    "\n",
    "\n",
    "    # 在训练循环前添加自动保存路径\n",
    "    checkpoint_dir = Path(args.save_dir) / \"interrupt\"\n",
    "    checkpoint_dir.mkdir(parents=True, exist_ok=True)\n",
    "\n",
    "    \n",
    "\n",
    "    # 训练循环中\n",
    "    for epoch in range(start_epoch, args.epochs):\n",
    "\n",
    "\n",
    "\n",
    "        # === 每个epoch开始前保存恢复点 ===\n",
    "        torch.save({\n",
    "        'epoch': epoch,\n",
    "        'state_dict': model.state_dict(),\n",
    "        'optimizer': optimizer.state_dict(),\n",
    "        'scheduler': scheduler.state_dict(),  # 新增此行\n",
    "        'best_acc': best_acc,\n",
    "        'args': args\n",
    "     }, checkpoint_dir / \"last_checkpoint.pth\")  # 保存last_checkpoint.pth\n",
    "\n",
    "\n",
    "        model.train()\n",
    "\n",
    "         # 添加enumerate获取batch_idx\n",
    "        train_bar = tqdm(enumerate(train_loader), \n",
    "                         total=len(train_loader),\n",
    "                         desc=f\"Epoch {epoch+1}/{args.epochs}\",\n",
    "        mininterval=0.5,   # 降低刷新频率到0.5秒\n",
    "        maxinterval=1.0,\n",
    "        smoothing=0.1,\n",
    "        dynamic_ncols=True,  # 自适应终端宽度\n",
    "        bar_format='{l_bar}{bar}| {n_fmt}/{total_fmt} [{elapsed}<{remaining}, {rate_fmt}{postfix}]')\n",
    "         \n",
    "        \n",
    "        for batch_idx, batch in train_bar:\n",
    "            images = batch[\"image\"].to(device)\n",
    "            targets = batch[\"class_id\"].to(device)\n",
    "            \n",
    "            optimizer.zero_grad()\n",
    "     \n",
    "            # 修改前向传播部分\n",
    "            with torch.cuda.amp.autocast(dtype=torch.float16):  # 使用cuda子模块\n",
    "                outputs = model(images)\n",
    "                \n",
    "                # 训练循环中使用\n",
    "                loss = criterion(outputs, targets) / accum_steps  # 梯度平均\n",
    "\n",
    "                # 添加loss缩放保护\n",
    "                if not torch.isfinite(loss):\n",
    "                    print(f\"检测到非有限loss值: {loss.item()}, 跳过当前批次\")\n",
    "                    optimizer.zero_grad()\n",
    "                    continue  # ✅ 跳过问题批次\n",
    "            \n",
    "            # 反向传播\n",
    "            scaler.scale(loss).backward()\n",
    "\n",
    "\n",
    "            # ===== 梯度累积条件判断 =====\n",
    "            if (batch_idx + 1) % accum_steps == 0:  # 每accum_steps步更新一次\n",
    "            # 梯度裁剪和参数更新\n",
    "                scaler.unscale_(optimizer)\n",
    "                torch.nn.utils.clip_grad_norm_(model.parameters(), 1.0)\n",
    "                scaler.step(optimizer)\n",
    "                scaler.update()\n",
    "                optimizer.zero_grad()  # 清空梯度\n",
    "\n",
    "             # 更新进度条显示\n",
    "              #  current_lr = optimizer.param_groups[0][\"lr\"]\n",
    "\n",
    "\n",
    "             # 计算梯度范数（在scaler.update()之后）\n",
    "                grads = [p.grad.detach() for p in model.parameters() if p.grad is not None]\n",
    "                grad_norm = torch.norm(torch.stack([torch.norm(g, 2) for g in grads]), 2) if grads else 0.0\n",
    "            \n",
    "\n",
    "\n",
    "            # 再更新进度条\n",
    "                train_bar.set_postfix(\n",
    "                    loss=f\"{loss.item():.4f}\", \n",
    "                    lr=f\"{optimizer.param_groups[0]['lr']:.2e}\",  # 直接获取最新学习率\n",
    "                    grad_norm=f\"{grad_norm:.2f}\"  # 需要先计算grad_norm\n",
    "            )\n",
    "\n",
    "        # ===== 未达到累积步数时不更新参数 =====\n",
    "            else:\n",
    "            # 仅更新进度条不执行参数更新\n",
    "                train_bar.set_postfix(\n",
    "                    loss=f\"{loss.item()*accum_steps:.4f}\",  # 显示实际loss值\n",
    "                    lr=f\"{optimizer.param_groups[0]['lr']:.2e}\",  # 直接获取优化器当前学习率\n",
    "                    grad_norm=\"accumulating\"   # 提示当前处于梯度累积状态\n",
    "            )\n",
    "\n",
    "\n",
    "            # 在训练循环末尾添加显存清理\n",
    "            del images, targets, outputs, loss\n",
    "            torch.cuda.empty_cache()\n",
    "\n",
    "\n",
    "\n",
    "        # 每2个epoch验证一次\n",
    "        val_acc=0.0\n",
    "        if (epoch+1) % args.val_freq == 0:\n",
    "        \n",
    "            val_acc = validate(model, val_loader, device)\n",
    "            print(f\"mAP: {val_acc:.2%}\")\n",
    "\n",
    "        else:\n",
    "            val_acc = 0.0\n",
    "        \n",
    "        if (epoch+1) % args.val_freq == 0 and val_acc > best_acc:\n",
    "            best_acc = val_acc\n",
    "            torch.save( { 'epoch': epoch,\n",
    "                                  'state_dict': model.state_dict(),\n",
    "                                   'optimizer': optimizer.state_dict(),\n",
    "                                 'best_acc': best_acc,  # ✅ 正确键名\n",
    "                                    'args': args,\n",
    "                                     'scheduler': scheduler.state_dict()  # 新增调度器状态\n",
    "                                     },\n",
    "                      os.path.join(args.save_dir, \"best_model.pth\"))\n",
    "        \n",
    "        if epoch < warmup_epochs:\n",
    "            warmup_scheduler.step()\n",
    "        else:\n",
    "            scheduler.step()\n",
    "\n",
    "        #关键机制：\n",
    "        #梯度累积：模拟更大batch_size（accum_steps=2）\n",
    "        #自动恢复：异常中断后可从中断点继续训练\n",
    "        #最优模型保存：仅保留验证集最佳模型\n",
    "\n",
    "    \n",
    "\n",
    "        \n",
    "#核心技术：\n",
    "#混合精度训练：float16计算加速，自动梯度缩放\n",
    "#OneCycle策略：动态调整学习率提高收敛速度\n",
    "#融合优化器：CUDA内核融合减少GPU操作开销\n",
    "        \n",
    "\n",
    "\n",
    "if __name__ == \"__main__\":\n",
    "    args = get_args()\n",
    "    os.makedirs(args.save_dir, exist_ok=True)\n",
    "    train(args)\n",
    "\n",
    " \n",
    "\n",
    "#训练流程：\n",
    "#数据准备：加载VeRi-776数据集，应用强数据增强\n",
    "#模型构建：初始化自定义Transformer，配置混合精度\n",
    "#训练配置：设置复合损失函数、优化策略、验证机制\n",
    "#核心训练：梯度累积更新参数，定期验证模型性能\n",
    "#模型保存：保存最佳模型和中断恢复点\n",
    "#性能优化：通过TF32、OneCycle策略等提升训练效率\n",
    "\n",
    "\n",
    "#代码完整实现了从数据加载到模型训练的完整闭环，包含多项工业级优化技巧，适合大规模车辆重识别任务的训练需求。\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "50ec609e",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "可用GPU数量: 1\n",
      "当前设备: 0\n",
      "显存总量: 2.00 GB\n",
      "已分配显存: 0.00 GB\n",
      "缓存保留显存: 0.00 GB\n",
      "空闲显存: 2147483648 bytes\n",
      "2.5.1\n",
      "True\n",
      "PyTorch CUDA版本: 11.8\n"
     ]
    }
   ],
   "source": [
    "import torch\n",
    "\n",
    "# 查看是否可用GPU\n",
    "print(f\"可用GPU数量: {torch.cuda.device_count()}\")\n",
    "print(f\"当前设备: {torch.cuda.current_device()}\")\n",
    "\n",
    "# 查看显存总量\n",
    "print(f\"显存总量: {torch.cuda.get_device_properties(0).total_memory/1024**3:.2f} GB\")\n",
    "\n",
    "# 实时显存监控\n",
    "print(f\"已分配显存: {torch.cuda.memory_allocated(0)/1024**3:.2f} GB\")\n",
    "print(f\"缓存保留显存: {torch.cuda.memory_reserved(0)/1024**3:.2f} GB\")\n",
    "print(f\"空闲显存: {torch.cuda.get_device_properties(0).total_memory - torch.cuda.memory_allocated(0)} bytes\")\n",
    "\n",
    "print(torch.__version__)\n",
    "print(torch.cuda.is_available())  # 应返回True\n",
    "print(f\"PyTorch CUDA版本: {torch.version.cuda}\") \n",
    "\n",
    "\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "62848529",
   "metadata": {},
   "outputs": [],
   "source": [
    "# 新建train.py验证模型保存功能\n",
    "\n",
    "import sys\n",
    "import os\n",
    "from pathlib import Path\n",
    "\n",
    "# 获取当前文件的绝对路径\n",
    "current_file = Path(__file__).resolve()\n",
    "# 计算项目根目录：上溯两级（假设train.py在scripts/目录下）\n",
    "project_root = current_file.parent.parent\n",
    "# 将项目根目录添加到系统路径\n",
    "sys.path.insert(0, str(project_root))\n",
    "\n",
    "#作用：确保项目内自定义模块（如 Veri776Dataset）可被正确导入\n",
    "#关键点：通过路径解析动态添加项目根目录，避免绝对路径依赖\n",
    "\n",
    "import torch\n",
    "import time\n",
    "from PIL import Image\n",
    "import argparse\n",
    "from tqdm import tqdm\n",
    "from torch.utils.data import Dataset\n",
    "from torchvision import transforms\n",
    "import torch.nn as nn\n",
    "import torch.nn.functional as F\n",
    "from torchvision.models import vit_b_16\n",
    "from einops import rearrange, repeat\n",
    "from collections import defaultdict\n",
    "from torch.utils.data import DataLoader  # 新增导入\n",
    "from torch.optim import AdamW\n",
    "import matplotlib.pyplot as plt\n",
    "from collections import Counter\n",
    "from sklearn.metrics import average_precision_score  # 新增导入\n",
    "import numpy as np\n",
    "from torch.optim.lr_scheduler import CosineAnnealingLR\n",
    "from src.datasets.veri776_dataset import Veri776Dataset\n",
    "from src.models.vehicle_transformer import VehicleTransformer\n",
    "import warnings\n",
    "from torch.optim.lr_scheduler import LambdaLR\n",
    "\n",
    "\n",
    "\n",
    "warnings.filterwarnings(\"ignore\", category=FutureWarning)\n",
    "\n",
    "# 复合损失函数  ，主损失权重， 过小导致特征判断力不足\n",
    "class CombinedLoss(nn.Module):\n",
    "    def __init__(self, alpha=0.5, margin=0.5):\n",
    "        super().__init__()\n",
    "        self.ce = nn.CrossEntropyLoss(label_smoothing=0.1)  # 添加标签平滑\n",
    "        \n",
    "        # === 修复方案 ===\n",
    "        # 确保margin是标量值\n",
    "        if isinstance(margin, torch.Tensor):\n",
    "            assert margin.numel() == 1, \"Margin must be a scalar tensor\"\n",
    "            margin = margin.item()\n",
    "        self.margin = float(margin)\n",
    "        # ================\n",
    "        \n",
    "        # 恢复三元组损失（内存安全版）\n",
    "        self.triplet = nn.TripletMarginLoss(margin=margin, reduction='none')\n",
    "\n",
    "        self.alpha = alpha\n",
    "\n",
    "    def forward(self, outputs, targets):\n",
    "        main_loss = self.ce(outputs[\"logits\"], targets)\n",
    "        part_loss = sum([self.ce(p, targets) for p in outputs[\"part_logits\"]]) \n",
    "        \n",
    "        # === 新增在线难例挖掘 ===\n",
    "        bn_feature = outputs[\"bn_feature\"]\n",
    "        pairwise_dist = torch.cdist(bn_feature, bn_feature)  # 计算所有样本距离\n",
    "        # 生成有效三元组\n",
    "        pos_mask = targets.unsqueeze(1) == targets.unsqueeze(0)\n",
    "        neg_mask = targets.unsqueeze(1) != targets.unsqueeze(0)\n",
    "    \n",
    "        # 寻找最难负样本\n",
    "        with torch.no_grad():\n",
    "            neg_dist = pairwise_dist * neg_mask.float()\n",
    "            hardest_negative = neg_dist.argmax(dim=1)\n",
    "    \n",
    "        # 计算有效triplet loss\n",
    "        tri_loss = self.triplet(bn_feature, \n",
    "                           bn_feature, \n",
    "                           bn_feature[hardest_negative]).mean()\n",
    "    \n",
    "        return main_loss + 0.2*part_loss + 1.2*tri_loss  # 调高triplet权重\n",
    "\n",
    "#设计亮点：\n",
    "#标签平滑：防止模型对标签过拟合\n",
    "#动态难例挖掘：自动选择最难区分的负样本增强特征判别性\n",
    "#多损失融合：分类损失 + 部件分类(30%) + 三元组损失(80%)\n",
    "\n",
    "\n",
    "\n",
    "\n",
    "def euclidean_dist(x, y):\n",
    "    \"\"\"\n",
    "    计算欧氏距离矩阵\n",
    "    x: (n, feat_dim)\n",
    "    y: (m, feat_dim)\n",
    "    return: (n, m) \n",
    "    \"\"\"\n",
    "    x2 = torch.sum(x ** 2, dim=1).unsqueeze(1)\n",
    "    y2 = torch.sum(y ** 2, dim=1).unsqueeze(0)\n",
    "    dist = x2 + y2 - 2.0 * torch.mm(x, y.transpose(0, 1))\n",
    "    return torch.sqrt(torch.clamp(dist, min=1e-12))\n",
    "\n",
    "\n",
    "#mAP计算函数\n",
    "def calc_mAP(query_feats, query_ids, gallery_feats, gallery_ids): \n",
    "    \"\"\"\n",
    "    计算mAP\n",
    "    \"\"\"\n",
    "    # 在计算距离前添加数值检查\n",
    "    if torch.isnan(query_feats).any() or torch.isnan(gallery_feats).any():\n",
    "        print(\"警告：特征包含NaN值，跳过当前验证\")\n",
    "        return 0.0  # 返回0避免崩溃\n",
    "    \n",
    "\n",
    "    # 修改距离计算为余弦相似度\n",
    "    query_feats = F.normalize(query_feats, p=2, dim=1)\n",
    "    gallery_feats = F.normalize(gallery_feats, p=2, dim=1)\n",
    "    distmat = 1 - torch.mm(query_feats, gallery_feats.t())  # 余弦距离\n",
    "    \n",
    "    \n",
    "    # ===== 新增分块参数，防止内存溢出 =====\n",
    "    block_size = 256  # 每次处理256个查询样本\n",
    "    aps = []\n",
    "    \n",
    "    # 分块处理查询集\n",
    "    for i in tqdm(range(0, len(query_feats), block_size), \n",
    "                 desc=\"计算mAP\", unit=\"block\"):\n",
    "        start = i\n",
    "        end = min(i + block_size, len(query_feats))\n",
    "        \n",
    "        # 分块计算距离\n",
    "        block_feats = query_feats[start:end]\n",
    "        block_dist = euclidean_dist(block_feats, gallery_feats).cpu().numpy()\n",
    "        \n",
    "        # 处理NaN/Inf\n",
    "        block_dist = np.nan_to_num(block_dist, nan=1e6, posinf=1e6)\n",
    "        \n",
    "        # 逐样本计算AP\n",
    "        for j in range(block_dist.shape[0]):\n",
    "            y_true = (gallery_ids == query_ids[start+j]).astype(np.uint8)  # 使用uint8节省内存\n",
    "            y_score = -block_dist[j]\n",
    "            if np.sum(y_true) == 0:\n",
    "                continue  # 跳过无效查询\n",
    "            ap = average_precision_score(y_true, y_score)\n",
    "            aps.append(ap)\n",
    "    \n",
    "    return np.mean(aps) if aps else 0.0\n",
    "\n",
    "#创新点：\n",
    "#分块处理：将大型矩阵运算分解为小块，避免OOM（内存不足）\n",
    "#余弦相似度：更鲁棒的度量方式\n",
    "  #NaN检测：防止无效特征导致计算崩溃\n",
    "#在validate函数中，调用了calc_mAP(feats, np.array(pids), feats, np.array(pids))，这意味着将同一个数据集同时作为query和gallery，这会使得每个样本都能匹配到自己，导致mAP虚高。\n",
    "\n",
    "\n",
    "\n",
    "\n",
    "\n",
    "\n",
    "\n",
    "\n",
    " # 添加验证循环（防止过拟合）\n",
    "def validate(model, val_loader, device):    #train.py中的验证只是用来监控训练过程\n",
    "    model.eval()\n",
    "    feats, pids = [], []\n",
    "    with torch.no_grad():      # 特征收集与内存优化\n",
    "        for batch in val_loader:\n",
    "            images = batch[\"image\"].to(device)\n",
    "            outputs = model(images)\n",
    "            feats.append(outputs[\"bn_feature\"].cpu())\n",
    "            pids.extend(batch[\"class_id\"].cpu().numpy())\n",
    "            del images, outputs  # ✅ 新增内存释放\n",
    "    \n",
    "    feats = torch.cat(feats)\n",
    "    # 假设验证集本身作为gallery\n",
    "    # 转换为float32节省内存\n",
    "    feats = feats.to(dtype=torch.float32)  # ✅ 新增类型转换节省内存\n",
    "    return calc_mAP(feats, np.array(pids), feats, np.array(pids))\n",
    "\n",
    "#关键优化：\n",
    "#特征类型转换：float32比默认float64节省50%内存\n",
    "#及时释放显存：del images防止显存泄漏\n",
    "\n",
    "\n",
    "# 配置参数\n",
    "def get_args():\n",
    "    parser = argparse.ArgumentParser()\n",
    "    parser.add_argument(\"--batch_size\", type=int, default=24)   #批次大小， 过小导致统计量不准\n",
    "    parser.add_argument(\"--lr\", type=float, default=1e-4)\n",
    "    parser.add_argument(\"--epochs\", type=int, default=100)\n",
    "    parser.add_argument(\"--val_freq\", type=int, default=2)  # 每2个epoch验证一次\n",
    "    # 添加缺失的save_dir参数\n",
    "    parser.add_argument(\"--save_dir\", type=str, default=\"checkpoints\")\n",
    "    parser.add_argument(\"--resume\", type=str, default=\"\",\n",
    "                        help=\"checkpoint path to resume\")   # 恢复训练路径\n",
    "    return parser.parse_args()\n",
    "\n",
    "\n",
    "# 在train.py中添加测试代码\n",
    "# 修改后\n",
    "def test_patch_embed():\n",
    "    test_model = VehicleTransformer(\n",
    "        num_classes=776,\n",
    "        img_size=(224, 224),\n",
    "        patch_size=16,\n",
    "        local_parts=7,  # 需要与模型配置一致\n",
    "        num_heads=6,\n",
    "        embed_dim=192\n",
    "    )\n",
    "    dummy_input = torch.randn(1, 3, 224, 224)\n",
    "    \n",
    "    # 获取全局特征和局部特征\n",
    "    global_feat, local_feats = test_model.forward_features(dummy_input)\n",
    "    \n",
    "    # 打印关键维度\n",
    "    print(\"\\n=== 特征维度验证 ===\")\n",
    "    print(f\"全局特征维度: {global_feat.shape}\")        # 应输出 [1, 192]\n",
    "    print(f\"局部特征数量: {len(local_feats)}\")         # 应等于 local_parts参数值\n",
    "    print(f\"单个局部特征维度: {local_feats[0].shape}\") # 应输出 [1, 192]\n",
    "\n",
    "\n",
    "\n",
    "\n",
    "# 训练函数\n",
    "def train(args):\n",
    "\n",
    "    # ===== 新增梯度累积参数 =====\n",
    "    accum_steps = 2  # 添加到train函数开头参数部分\n",
    "\n",
    "    device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n",
    "\n",
    "    # 在train函数开头添加\n",
    "    torch.backends.cuda.matmul.allow_tf32 = True  # 启用TF32计算 ，保持精度前提下加速矩阵运算\n",
    "    torch.backends.cudnn.benchmark = True  # 启用cuDNN自动优化器\n",
    "    torch.cuda.empty_cache()  # 清空缓存\n",
    "    \n",
    "\n",
    "    # 数据增强\n",
    "    train_transform = transforms.Compose([\n",
    "        transforms.Resize((256, 256)),# 作用于PIL.Image\n",
    "        transforms.RandomCrop(224),# 作用于PIL.Image\n",
    "        transforms.RandomHorizontalFlip(),# 作用于PIL.Image\n",
    "        transforms.ColorJitter(brightness=0.3, contrast=0.3, saturation=0.3),  # 作用于PIL.Image\n",
    "        transforms.RandomRotation(15),# 作用于PIL.Image\n",
    "        transforms.RandomGrayscale(p=0.1),  # 新增灰度化,作用于PIL.Image\n",
    "        transforms.ToTensor(),# 转换为Tensor\n",
    "        transforms.RandomErasing(p=0.7, scale=(0.1, 0.3), ratio=(0.3, 3.0)),  # 新增随机擦除,作用于Tensor\n",
    "        \n",
    "        transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])\n",
    "        ])\n",
    "    \n",
    "    # 修改后（添加验证专用transform）\n",
    "    val_transform = transforms.Compose([\n",
    "     transforms.Resize(256),\n",
    "        transforms.CenterCrop(224),\n",
    "        transforms.ToTensor(),\n",
    "        transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])])\n",
    "\n",
    "\n",
    "    \n",
    "    # 数据集\n",
    "    train_set = Veri776Dataset(mode=\"train\", transform=train_transform)\n",
    "    val_set = Veri776Dataset(mode=\"test\", transform=val_transform)  # 移除数据增强\n",
    "    \n",
    "\n",
    "    \n",
    "    train_loader = DataLoader(\n",
    "        train_set,\n",
    "     batch_size=args.batch_size,  # 使用参数中的batch_size\n",
    "    shuffle=True,\n",
    "    num_workers=4,       # 匹配CPU核心数\n",
    "     pin_memory=True,\n",
    "    persistent_workers=False,\n",
    "    drop_last=True,\n",
    "    prefetch_factor=2       # 添加预取\n",
    ")\n",
    "    \n",
    "\n",
    "    val_loader = DataLoader(\n",
    "         val_set,\n",
    "          batch_size=args.batch_size,  # 扩大验证batch_size\n",
    "        num_workers=2,\n",
    "        pin_memory=True,\n",
    "        persistent_workers=False)\n",
    "    \n",
    "    # 模型初始化\n",
    "    model = VehicleTransformer(\n",
    "        num_classes=776,\n",
    "    img_size=(224, 224),    # 缩小输入尺寸\n",
    "    patch_size=16,           # 增大分块尺寸\n",
    "    local_parts=7,\n",
    "    embed_dim=192,          # 特征维度， 过低导致信息丢失\n",
    "    depth=3,                # 层数  3层Transformer\n",
    "    num_heads=6,            # 注意力头\n",
    "    mlp_ratio=2,\n",
    "    \n",
    "    pretrained=False).to(device)\n",
    "    \n",
    "    # 优化器，修改为（添加权重衰减和梯度裁剪）\n",
    "    optimizer = AdamW(\n",
    "        model.parameters(), \n",
    "         lr=3e-4,                  #学习率， 过高引发梯度爆炸\n",
    "        weight_decay=0.01,  # ✅ 新增权重衰减\n",
    "        fused=True\n",
    ")                 # 启用融合优化\n",
    "\n",
    "  # 更换学习率调度器（新增）\n",
    "    from torch.optim.lr_scheduler import OneCycleLR\n",
    "    scheduler = OneCycleLR(optimizer, \n",
    "                      max_lr=3e-4,\n",
    "                      total_steps=args.epochs * len(train_loader),\n",
    "                      pct_start=0.3)           # 高效学习率调度\n",
    "\n",
    "\n",
    "\n",
    "    # === 新增检查点恢复逻辑 ===\n",
    "    start_epoch = 0\n",
    "    if args.resume:\n",
    "        if Path(args.resume).exists():\n",
    "            checkpoint = torch.load(args.resume, map_location=device)\n",
    "            model.load_state_dict(checkpoint['state_dict'])\n",
    "            optimizer.load_state_dict(checkpoint['optimizer'])\n",
    "            start_epoch = checkpoint['epoch'] # 继续完成被中断的epoch\n",
    "\n",
    "\n",
    "\n",
    "            # 安全加载scheduler状态\n",
    "            if 'scheduler' in checkpoint:  # 新增判断\n",
    "                scheduler.load_state_dict(checkpoint['scheduler'])\n",
    "            else:\n",
    "                print(\"警告：检查点缺少scheduler状态，学习率调度可能异常\")\n",
    "\n",
    "            best_acc = checkpoint['best_acc']\n",
    "\n",
    "\n",
    "            print(f\"成功恢复训练：从epoch {start_epoch}开始，历史最佳mAP {best_acc:.2%}\")\n",
    "        else:\n",
    "            print(f\"警告：未找到检查点文件 {args.resume}，从头开始训练\")   \n",
    "\n",
    "\n",
    "    torch.nn.utils.clip_grad_norm_(model.parameters(), max_norm=1.0)  # ✅ 调整梯度裁剪阈值,防止梯度爆炸\n",
    "\n",
    "\n",
    "   # 修改位置2：学习率调度器\n",
    "    from torch.optim.lr_scheduler import LambdaLR\n",
    "    warmup_epochs = 10   # 加速初始学习,从10——>5缩短预热\n",
    "\n",
    "  \n",
    "\n",
    "    \n",
    "    #学习率预热，避免初期不稳定更新\n",
    "    warmup_scheduler = LambdaLR(optimizer, \n",
    "                                lr_lambda=lambda e: min(1.0, (e+1)/warmup_epochs)) # 前10epoch逐步提高学习率\n",
    "\n",
    "    best_acc = 0.0\n",
    "\n",
    "    \n",
    "\n",
    "    \n",
    "    # 混合精度初始化\n",
    "    scaler = torch.cuda.amp.GradScaler()  # 移除device_type参数\n",
    "\n",
    "    # 设置环境变量（在训练开始前）\n",
    "    \n",
    "    os.environ[\"PYTORCH_CUDA_ALLOC_CONF\"] = \"expandable_segments:True\"\n",
    "\n",
    "\n",
    "\n",
    "    \n",
    "\n",
    "    \n",
    "    # === 这里就是训练代码开头的最佳位置 ===\n",
    "    print(\"\\n=== 训练配置摘要 ===\")\n",
    "    print(f\"输入尺寸: {model.img_size}\")\n",
    "    print(f\"批次大小: {args.batch_size}\")\n",
    "    print(f\"初始学习率: {args.lr}\")\n",
    "    print(f\"嵌入维度: {model.embed_dim}\")\n",
    "    print(f\"Transformer深度: {model.depth}\")\n",
    "    print(f\"设备: {device}\")\n",
    "    print(f\"训练样本数: {len(train_set)}\")\n",
    "    print(f\"验证样本数: {len(val_set)}\")\n",
    "    print(f\"混合精度训练: {scaler.is_enabled()}\")\n",
    "    print(\"=====================\\n\")\n",
    "\n",
    "    # 确保损失函数参数正确\n",
    "    criterion = CombinedLoss(alpha=0.5, margin=0.3)  # 显式传递标量参数\n",
    "\n",
    "\n",
    "    # 在训练循环前添加自动保存路径\n",
    "    checkpoint_dir = Path(args.save_dir) / \"interrupt\"\n",
    "    checkpoint_dir.mkdir(parents=True, exist_ok=True)\n",
    "\n",
    "    \n",
    "\n",
    "    # 训练循环中\n",
    "    for epoch in range(start_epoch, args.epochs):\n",
    "\n",
    "\n",
    "\n",
    "        # === 每个epoch开始前保存恢复点 ===\n",
    "        torch.save({\n",
    "        'epoch': epoch,\n",
    "        'state_dict': model.state_dict(),\n",
    "        'optimizer': optimizer.state_dict(),\n",
    "        'scheduler': scheduler.state_dict(),  # 新增此行\n",
    "        'best_acc': best_acc,\n",
    "        'args': args\n",
    "     }, checkpoint_dir / \"last_checkpoint.pth\")  # 保存last_checkpoint.pth\n",
    "\n",
    "\n",
    "        model.train()\n",
    "\n",
    "         # 添加enumerate获取batch_idx\n",
    "        train_bar = tqdm(enumerate(train_loader), \n",
    "                         total=len(train_loader),\n",
    "                         desc=f\"Epoch {epoch+1}/{args.epochs}\",\n",
    "        mininterval=0.5,   # 降低刷新频率到0.5秒\n",
    "        maxinterval=1.0,\n",
    "        smoothing=0.1,\n",
    "        dynamic_ncols=True,  # 自适应终端宽度\n",
    "        bar_format='{l_bar}{bar}| {n_fmt}/{total_fmt} [{elapsed}<{remaining}, {rate_fmt}{postfix}]')\n",
    "         \n",
    "        \n",
    "        for batch_idx, batch in train_bar:\n",
    "            images = batch[\"image\"].to(device)\n",
    "            targets = batch[\"class_id\"].to(device)\n",
    "            \n",
    "            optimizer.zero_grad()\n",
    "     \n",
    "            # 修改前向传播部分\n",
    "            with torch.cuda.amp.autocast(dtype=torch.float16):  # 使用cuda子模块\n",
    "                outputs = model(images)\n",
    "                \n",
    "                # 训练循环中使用\n",
    "                loss = criterion(outputs, targets) / accum_steps  # 梯度平均\n",
    "\n",
    "                # 添加loss缩放保护\n",
    "                if not torch.isfinite(loss):\n",
    "                    print(f\"检测到非有限loss值: {loss.item()}, 跳过当前批次\")\n",
    "                    optimizer.zero_grad()\n",
    "                    continue  # ✅ 跳过问题批次\n",
    "            \n",
    "            # 反向传播\n",
    "            scaler.scale(loss).backward()\n",
    "\n",
    "\n",
    "            # ===== 梯度累积条件判断 =====\n",
    "            if (batch_idx + 1) % accum_steps == 0:  # 每accum_steps步更新一次\n",
    "            # 梯度裁剪和参数更新\n",
    "                scaler.unscale_(optimizer)\n",
    "                torch.nn.utils.clip_grad_norm_(model.parameters(), 1.0)\n",
    "                scaler.step(optimizer)\n",
    "                scaler.update()\n",
    "                optimizer.zero_grad()  # 清空梯度\n",
    "\n",
    "             # 更新进度条显示\n",
    "              #  current_lr = optimizer.param_groups[0][\"lr\"]\n",
    "\n",
    "\n",
    "             # 计算梯度范数（在scaler.update()之后）\n",
    "                grads = [p.grad.detach() for p in model.parameters() if p.grad is not None]\n",
    "                grad_norm = torch.norm(torch.stack([torch.norm(g, 2) for g in grads]), 2) if grads else 0.0\n",
    "            \n",
    "\n",
    "\n",
    "            # 再更新进度条\n",
    "                train_bar.set_postfix(\n",
    "                    loss=f\"{loss.item():.4f}\", \n",
    "                    lr=f\"{optimizer.param_groups[0]['lr']:.2e}\",  # 直接获取最新学习率\n",
    "                    grad_norm=f\"{grad_norm:.2f}\"  # 需要先计算grad_norm\n",
    "            )\n",
    "\n",
    "        # ===== 未达到累积步数时不更新参数 =====\n",
    "            else:\n",
    "            # 仅更新进度条不执行参数更新\n",
    "                train_bar.set_postfix(\n",
    "                    loss=f\"{loss.item()*accum_steps:.4f}\",  # 显示实际loss值\n",
    "                    lr=f\"{optimizer.param_groups[0]['lr']:.2e}\",  # 直接获取优化器当前学习率\n",
    "                    grad_norm=\"accumulating\"   # 提示当前处于梯度累积状态\n",
    "            )\n",
    "\n",
    "\n",
    "            # 在训练循环末尾添加显存清理\n",
    "            del images, targets, outputs, loss\n",
    "            torch.cuda.empty_cache()\n",
    "\n",
    "\n",
    "\n",
    "        # 每2个epoch验证一次\n",
    "        val_acc=0.0\n",
    "        if (epoch+1) % args.val_freq == 0:\n",
    "        \n",
    "            val_acc = validate(model, val_loader, device)\n",
    "            print(f\"mAP: {val_acc:.2%}\")\n",
    "\n",
    "        else:\n",
    "            val_acc = 0.0\n",
    "        \n",
    "        if (epoch+1) % args.val_freq == 0 and val_acc > best_acc:\n",
    "            best_acc = val_acc\n",
    "            torch.save( { 'epoch': epoch,\n",
    "                                  'state_dict': model.state_dict(),\n",
    "                                   'optimizer': optimizer.state_dict(),\n",
    "                                 'best_acc': best_acc,  # ✅ 正确键名\n",
    "                                    'args': args,\n",
    "                                     'scheduler': scheduler.state_dict()  # 新增调度器状态\n",
    "                                     },\n",
    "                      os.path.join(args.save_dir, \"best_model.pth\"))\n",
    "        \n",
    "        if epoch < warmup_epochs:\n",
    "            warmup_scheduler.step()\n",
    "        else:\n",
    "            scheduler.step()\n",
    "\n",
    "        #关键机制：\n",
    "        #梯度累积：模拟更大batch_size（accum_steps=2）\n",
    "        #自动恢复：异常中断后可从中断点继续训练\n",
    "        #最优模型保存：仅保留验证集最佳模型\n",
    "\n",
    "    \n",
    "\n",
    "        \n",
    "#核心技术：\n",
    "#混合精度训练：float16计算加速，自动梯度缩放\n",
    "#OneCycle策略：动态调整学习率提高收敛速度\n",
    "#融合优化器：CUDA内核融合减少GPU操作开销\n",
    "        \n",
    "\n",
    "\n",
    "if __name__ == \"__main__\":\n",
    "    args = get_args()\n",
    "    os.makedirs(args.save_dir, exist_ok=True)\n",
    "    train(args)\n",
    "\n",
    " \n",
    "\n",
    "#训练流程：\n",
    "#数据准备：加载VeRi-776数据集，应用强数据增强\n",
    "#模型构建：初始化自定义Transformer，配置混合精度\n",
    "#训练配置：设置复合损失函数、优化策略、验证机制\n",
    "#核心训练：梯度累积更新参数，定期验证模型性能\n",
    "#模型保存：保存最佳模型和中断恢复点\n",
    "#性能优化：通过TF32、OneCycle策略等提升训练效率\n",
    "\n",
    "\n",
    "#代码完整实现了从数据加载到模型训练的完整闭环，包含多项工业级优化技巧，适合大规模车辆重识别任务的训练需求。\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "505e7212",
   "metadata": {},
   "outputs": [],
   "source": [
    "#梯度积累\n",
    "# 新建train.py验证模型保存功能\n",
    "\n",
    "import sys\n",
    "import os\n",
    "from pathlib import Path\n",
    "\n",
    "# 获取当前文件的绝对路径\n",
    "current_file = Path(__file__).resolve()\n",
    "# 计算项目根目录：上溯两级（假设train.py在scripts/目录下）\n",
    "project_root = current_file.parent.parent\n",
    "# 将项目根目录添加到系统路径\n",
    "sys.path.insert(0, str(project_root))\n",
    "\n",
    "#作用：确保项目内自定义模块（如 Veri776Dataset）可被正确导入\n",
    "#关键点：通过路径解析动态添加项目根目录，避免绝对路径依赖\n",
    "\n",
    "import torch\n",
    "import time\n",
    "from PIL import Image\n",
    "import argparse\n",
    "from tqdm import tqdm\n",
    "from torch.utils.data import Dataset\n",
    "from torchvision import transforms\n",
    "import torch.nn as nn\n",
    "import torch.nn.functional as F\n",
    "from torchvision.models import vit_b_16\n",
    "from einops import rearrange, repeat\n",
    "from collections import defaultdict\n",
    "from torch.utils.data import DataLoader  # 新增导入\n",
    "from torch.optim import AdamW\n",
    "import matplotlib.pyplot as plt\n",
    "from collections import Counter\n",
    "from sklearn.metrics import average_precision_score  # 新增导入\n",
    "import numpy as np\n",
    "from torch.optim.lr_scheduler import CosineAnnealingLR\n",
    "from src.datasets.veri776_dataset import Veri776Dataset\n",
    "from src.models.vehicle_transformer import VehicleTransformer\n",
    "import warnings\n",
    "from torch.optim.lr_scheduler import LambdaLR\n",
    "\n",
    "\n",
    "\n",
    "warnings.filterwarnings(\"ignore\", category=FutureWarning)\n",
    "\n",
    "# 复合损失函数  ，主损失权重， 过小导致特征判断力不足\n",
    "class CombinedLoss(nn.Module):\n",
    "    def __init__(self, alpha=0.5, margin=0.5):\n",
    "        super().__init__()\n",
    "        self.ce = nn.CrossEntropyLoss(label_smoothing=0.1)  # 添加标签平滑\n",
    "        \n",
    "        # === 修复方案 ===\n",
    "        # 确保margin是标量值\n",
    "        if isinstance(margin, torch.Tensor):\n",
    "            assert margin.numel() == 1, \"Margin must be a scalar tensor\"\n",
    "            margin = margin.item()\n",
    "        self.margin = float(margin)\n",
    "        # ================\n",
    "        \n",
    "        # 恢复三元组损失（内存安全版）\n",
    "        self.triplet = nn.TripletMarginLoss(margin=margin, reduction='none')\n",
    "\n",
    "        self.alpha = alpha\n",
    "\n",
    "    def forward(self, outputs, targets):\n",
    "        main_loss = self.ce(outputs[\"logits\"], targets)\n",
    "        part_loss = sum([self.ce(p, targets) for p in outputs[\"part_logits\"]]) \n",
    "        \n",
    "        # === 新增在线难例挖掘 ===\n",
    "        bn_feature = outputs[\"bn_feature\"]\n",
    "        pairwise_dist = torch.cdist(bn_feature, bn_feature)  # 计算所有样本距离\n",
    "        # 生成有效三元组\n",
    "        pos_mask = targets.unsqueeze(1) == targets.unsqueeze(0)\n",
    "        neg_mask = targets.unsqueeze(1) != targets.unsqueeze(0)\n",
    "    \n",
    "        # 寻找最难负样本\n",
    "        with torch.no_grad():\n",
    "            neg_dist = pairwise_dist * neg_mask.float()\n",
    "            hardest_negative = neg_dist.argmax(dim=1)\n",
    "    \n",
    "        # 计算有效triplet loss\n",
    "        tri_loss = self.triplet(bn_feature, \n",
    "                           bn_feature, \n",
    "                           bn_feature[hardest_negative])\n",
    "        tri_loss = torch.clamp(tri_loss, min=0, max=10).mean()  # 添加数值截断 影响范围：防止三元组损失爆炸\n",
    "        \n",
    "    \n",
    "        return main_loss + 0.7*part_loss + 0.3*tri_loss  \n",
    "\n",
    "#设计亮点：\n",
    "#标签平滑：防止模型对标签过拟合\n",
    "#动态难例挖掘：自动选择最难区分的负样本增强特征判别性\n",
    "#多损失融合：分类损失 + 部件分类+ 三元组损失\n",
    "\n",
    "\n",
    "\n",
    "\n",
    "def euclidean_dist(x, y):\n",
    "    \"\"\"\n",
    "    计算欧氏距离矩阵\n",
    "    x: (n, feat_dim)\n",
    "    y: (m, feat_dim)\n",
    "    return: (n, m) \n",
    "    \"\"\"\n",
    "    x2 = torch.sum(x ** 2, dim=1).unsqueeze(1)\n",
    "    y2 = torch.sum(y ** 2, dim=1).unsqueeze(0)\n",
    "    dist = x2 + y2 - 2.0 * torch.mm(x, y.transpose(0, 1))\n",
    "    return torch.sqrt(torch.clamp(dist, min=1e-12))\n",
    "\n",
    "\n",
    "#mAP计算函数\n",
    "def calc_mAP(query_feats, query_ids, query_cams, \n",
    "            gallery_feats, gallery_ids, gallery_cams): \n",
    "    \"\"\"\n",
    "    修改后参数说明：\n",
    "    query_cams: 查询集的摄像头ID数组\n",
    "    gallery_cams: gallery集的摄像头ID数组\n",
    "    \"\"\"\n",
    "    # 在计算距离前添加数值检查\n",
    "    if torch.isnan(query_feats).any() or torch.isnan(gallery_feats).any():\n",
    "        print(\"警告：特征包含NaN值，跳过当前验证\")\n",
    "        return 0.0  # 返回0避免崩溃\n",
    "    \n",
    "\n",
    "    # 确保输入为numpy数组\n",
    "    query_ids = np.asarray(query_ids)\n",
    "    query_cams = np.asarray(query_cams)\n",
    "    gallery_ids = np.asarray(gallery_ids)\n",
    "    gallery_cams = np.asarray(gallery_cams)\n",
    "    \n",
    "    # 余弦相似度计算（已归一化）\n",
    "    sim_matrix = torch.mm(query_feats, gallery_feats.T).cpu().numpy()\n",
    "    \n",
    "    # ===== 新增分块参数，防止内存溢出 =====\n",
    "    block_size = 256  # 每次处理256个查询样本\n",
    "    aps = []\n",
    "    \n",
    "    for i in tqdm(range(len(query_ids)), desc=\"计算跨摄像头mAP\"):\n",
    "        q_id = query_ids[i]\n",
    "        q_cam = query_cams[i]\n",
    "        \n",
    "        # === 核心修改：生成跨摄像头正样本掩码 ===\n",
    "        valid_pos_mask = (gallery_ids == q_id) & (gallery_cams != q_cam)\n",
    "        \n",
    "        # 跳过无效查询（无跨摄像头正样本）\n",
    "        if valid_pos_mask.sum() == 0:\n",
    "            continue\n",
    "            \n",
    "        y_true = valid_pos_mask.astype(np.int32)\n",
    "        y_score = sim_matrix[i]\n",
    "        \n",
    "        ap = average_precision_score(y_true, y_score)\n",
    "        aps.append(ap)\n",
    "    \n",
    "    return np.mean(aps) if aps else 0.0\n",
    "\n",
    "#创新点：\n",
    "#分块处理：将大型矩阵运算分解为小块，避免OOM（内存不足）\n",
    "#余弦相似度：更鲁棒的度量方式\n",
    "#NaN检测：防止无效特征导致计算崩溃\n",
    "#在validate函数中，调用了calc_mAP(feats, np.array(pids), feats, np.array(pids))，这意味着将同一个数据集同时作为query和gallery，这会使得每个样本都能匹配到自己，导致mAP虚高。\n",
    "\n",
    "\n",
    "\n",
    "\n",
    "# 添加验证循环（防止过拟合）\n",
    "def validate(model,  query_loader, gallery_loader,device):    #train.py中的验证只是用来监控训练过程\n",
    "    model.eval()\n",
    "     # === 提取Query特征和元数据 ===\n",
    "    query_feats, query_ids, query_cams = [], [], []\n",
    "    with torch.no_grad():      # 特征收集与内存优化\n",
    "        for batch in query_loader:\n",
    "            images = batch[\"image\"].to(device)\n",
    "            outputs = model(images)\n",
    "            query_feats.append(outputs[\"bn_feature\"].cpu())\n",
    "            query_ids.extend(batch[\"vehicle_id\"].cpu().numpy())  # ✅ 使用原始车辆ID\n",
    "            query_cams.extend(batch[\"camera_id\"].cpu().numpy())  # ✅ 新增摄像头ID收集\n",
    "    \n",
    "    # === 提取Gallery特征 ===\n",
    "    gallery_feats, gallery_ids, gallery_cams = [], [], []\n",
    "    with torch.no_grad():\n",
    "        for batch in gallery_loader:\n",
    "            images = batch[\"image\"].to(device)\n",
    "            outputs = model(images)\n",
    "            gallery_feats.append(outputs[\"bn_feature\"].cpu())\n",
    "            gallery_ids.extend(batch[\"vehicle_id\"].cpu().numpy())   # ✅ 使用原始车辆ID\n",
    "            gallery_cams.extend(batch[\"camera_id\"].cpu().numpy())    # ✅ 新增摄像头ID收集\n",
    "    \n",
    "    # === 转换为Tensor ===\n",
    "    query_feats = torch.cat(query_feats).float()\n",
    "    gallery_feats = torch.cat(gallery_feats).float()\n",
    "    query_ids = np.array(query_ids)\n",
    "    query_cams = np.array(query_cams)\n",
    "    gallery_ids = np.array(gallery_ids)\n",
    "    gallery_cams = np.array(gallery_cams)\n",
    "    \n",
    "    return calc_mAP(query_feats, query_ids, query_cams, \n",
    "                   gallery_feats, gallery_ids, gallery_cams)  # ✅ 传递摄像头信息\n",
    "\n",
    "#关键优化：\n",
    "#特征类型转换：float32比默认float64节省50%内存\n",
    "#及时释放显存：del images防止显存泄漏\n",
    "\n",
    "\n",
    "# 配置参数\n",
    "def get_args():\n",
    "    parser = argparse.ArgumentParser()\n",
    "    parser.add_argument(\"--batch_size\", type=int, default=8)   #批次大小， 过小导致统计量不准\n",
    "    parser.add_argument(\"--lr\", type=float, default=1e-4)\n",
    "    parser.add_argument(\"--epochs\", type=int, default=100)\n",
    "    parser.add_argument(\"--val_freq\", type=int, default=2)  # 每2个epoch验证一次\n",
    "    # 添加缺失的save_dir参数\n",
    "    parser.add_argument(\"--save_dir\", type=str, default=\"checkpoints\")\n",
    "    parser.add_argument(\"--resume\", type=str, default=\"\",\n",
    "                        help=\"checkpoint path to resume\")   # 恢复训练路径\n",
    "    return parser.parse_args()\n",
    "\n",
    "\n",
    "# 在train.py中添加测试代码\n",
    "# 修改后\n",
    "def test_patch_embed():\n",
    "    test_model = VehicleTransformer(\n",
    "        num_classes=776,\n",
    "        img_size=(128, 128),\n",
    "        patch_size=16,\n",
    "        local_parts=4,  # 需要与模型配置一致\n",
    "        num_heads=4,\n",
    "        embed_dim=128\n",
    "    )\n",
    "    dummy_input = torch.randn(1, 3,128, 128)\n",
    "    \n",
    "    # 获取全局特征和局部特征\n",
    "    global_feat, local_feats = test_model.forward_features(dummy_input)\n",
    "    \n",
    "    # 打印关键维度\n",
    "    print(\"\\n=== 特征维度验证 ===\")\n",
    "    print(f\"全局特征维度: {global_feat.shape}\")        \n",
    "    print(f\"局部特征数量: {len(local_feats)}\")         # 应等于 local_parts参数值\n",
    "    print(f\"单个局部特征维度: {local_feats[0].shape}\") \n",
    "\n",
    "\n",
    "\n",
    "\n",
    "# 训练函数\n",
    "def train(args):\n",
    "\n",
    "    # ===== 新增梯度累积参数 =====\n",
    "    accum_steps = 2  # 添加到train函数开头参数部分\n",
    "\n",
    "    device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n",
    "\n",
    "    # 在train函数开头添加\n",
    "    torch.backends.cuda.matmul.allow_tf32 = True  # 启用TF32计算 ，保持精度前提下加速矩阵运算\n",
    "    torch.backends.cudnn.benchmark = True  # 启用cuDNN自动优化器\n",
    "    torch.cuda.empty_cache()  # 清空缓存\n",
    "    \n",
    "\n",
    "    # 数据增强\n",
    "    train_transform = transforms.Compose([\n",
    "        transforms.Resize((128, 128)),# 作用于PIL.Image\n",
    "        transforms.RandomCrop(128),# 作用于PIL.Image\n",
    "        transforms.RandomHorizontalFlip(),# 作用于PIL.Image\n",
    "        transforms.ColorJitter(brightness=0.3, contrast=0.3, saturation=0.3),  # 作用于PIL.Image\n",
    "        transforms.RandomRotation(15),# 作用于PIL.Image\n",
    "        transforms.RandomGrayscale(p=0.1),  # 新增灰度化,作用于PIL.Image\n",
    "        transforms.ToTensor(),# 转换为Tensor\n",
    "        transforms.RandomErasing(p=0.2, scale=(0.05, 0.2)),  # 降低擦除强度\n",
    "        transforms.RandomApply([transforms.GaussianBlur(3)], p=0.1),  # 新增模糊增强\n",
    "        transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])\n",
    "        ])\n",
    "    \n",
    "    # 修改后（添加验证专用transform）\n",
    "    \n",
    "    \n",
    "    query_transform = transforms.Compose([\n",
    "     transforms.Resize(128),\n",
    "        transforms.CenterCrop(128),\n",
    "        transforms.ToTensor(),\n",
    "        transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])])\n",
    "    \n",
    "\n",
    "    test_transform = transforms.Compose([\n",
    "     transforms.Resize(128),\n",
    "        transforms.CenterCrop(128),\n",
    "        transforms.ToTensor(),\n",
    "        transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])])\n",
    "\n",
    "\n",
    "    \n",
    "    # 数据集\n",
    "    train_set = Veri776Dataset(mode=\"train\", transform=train_transform)\n",
    "   \n",
    "\n",
    "    # 正确修改（使用查询集作为验证参考）\n",
    "    query_set = Veri776Dataset(mode=\"query\", transform=query_transform)  # Query\n",
    "    gallery_set = Veri776Dataset(mode=\"test\", transform=test_transform) # Gallery\n",
    "\n",
    "    \n",
    "    train_loader = DataLoader(\n",
    "        train_set,\n",
    "     batch_size=args.batch_size,  # 使用参数中的batch_size\n",
    "    shuffle=True,\n",
    "    num_workers=4,       # 匹配CPU核心数\n",
    "     pin_memory=True,\n",
    "    persistent_workers=False,\n",
    "    drop_last=True,\n",
    "    prefetch_factor=2       # 添加预取\n",
    ")\n",
    "    \n",
    "\n",
    " \n",
    "    # 创建 Query 和 Gallery 的 DataLoader\n",
    "    query_loader = DataLoader(query_set, \n",
    "                              batch_size=args.batch_size, \n",
    "                              shuffle=False,\n",
    "    num_workers=4,       # 匹配CPU核心数\n",
    "     pin_memory=True,\n",
    "    drop_last=True,\n",
    "    prefetch_factor=2   ,    # 添加预取\n",
    "        persistent_workers=False)\n",
    "\n",
    "    gallery_loader = DataLoader(gallery_set, \n",
    "                                batch_size=args.batch_size,\n",
    "                                 shuffle=False,\n",
    "    num_workers=4,       # 匹配CPU核心数\n",
    "     pin_memory=True,\n",
    "    persistent_workers=False,\n",
    "    drop_last=True,\n",
    "    prefetch_factor=2       )# 添加预取\n",
    "\n",
    "\n",
    "\n",
    "\n",
    "\n",
    "    # 模型初始化\n",
    "    model = VehicleTransformer(\n",
    "        num_classes=776,\n",
    "    img_size=(128, 128),    # 缩小输入尺寸\n",
    "    patch_size=16,           # 增大分块尺寸\n",
    "    local_parts=4,\n",
    "    embed_dim=128,          # 特征维度， 过低导致信息丢失\n",
    "    depth=3,                # 层数  2层Transformer\n",
    "    num_heads=4,            # 注意力头\n",
    "    mlp_ratio=2,\n",
    "    \n",
    "    pretrained=False).to(device)\n",
    "\n",
    "# 新增梯度检查点（节省显存）\n",
    "    from torch.utils.checkpoint import checkpoint_sequential\n",
    "    model.encoder.use_checkpoint = True  # 自定义属性\n",
    "\n",
    "    # 修改encoder前向传播\n",
    "    def custom_encoder_forward(x):\n",
    "        if model.encoder.use_checkpoint:\n",
    "            return checkpoint_sequential(model.encoder.layers, 2, x)\n",
    "        else:\n",
    "            return model.encoder(x)\n",
    "\n",
    "\n",
    "    def log_gradients(model, epoch):\n",
    "        \"\"\"梯度分布记录函数\"\"\"\n",
    "        grad_info = []\n",
    "        for name, param in model.named_parameters():\n",
    "            if param.grad is not None:\n",
    "                grad_norm = param.grad.data.norm(2).item()\n",
    "                grad_info.append(f\"{name[:15]:<15} : {grad_norm:.4e}\")\n",
    "        with open(\"gradient_log.txt\", \"a\") as f:\n",
    "            f.write(f\"\\n=== Epoch {epoch+1} 梯度分布 ===\\n\")\n",
    "            f.write(\"\\n\".join(grad_info))\n",
    "\n",
    "\n",
    "\n",
    "\n",
    "\n",
    "\n",
    "\n",
    "\n",
    "    \n",
    "    # 优化器，修改为（添加权重衰减和梯度裁剪）\n",
    "    optimizer = AdamW(\n",
    "        model.parameters(), \n",
    "         lr=3e-4,                  #学习率， 过高引发梯度爆炸\n",
    "        weight_decay=0.01,  # ✅ 新增权重衰减\n",
    "        fused=True\n",
    ")                 # 启用融合优化\n",
    "# 新增梯度监控回调\n",
    "\n",
    "\n",
    " # 修改位置1：模型初始化后添加参数初始化\n",
    "    for p in model.parameters():\n",
    "        if p.dim() > 1:\n",
    "            nn.init.kaiming_normal_(p, mode='fan_out', nonlinearity='relu')  # 新增初始化\n",
    "\n",
    "\n",
    "\n",
    "  # 更换学习率调度器（新增）\n",
    "    from torch.optim.lr_scheduler import OneCycleLR\n",
    "    scheduler = OneCycleLR(optimizer, \n",
    "                      max_lr=1e-4,\n",
    "                      total_steps=args.epochs * len(train_loader),  # ✅ 新增关键参数\n",
    "                      pct_start=0.3, # 延长学习率上升阶段\n",
    "                      div_factor=10,  #更平缓的初始学习率\n",
    "                      final_div_factor=1e2)           # ✅ 避免末期学习率过小\n",
    "\n",
    "\n",
    "\n",
    "    # === 新增检查点恢复逻辑 ===\n",
    "    start_epoch = 0\n",
    "    if args.resume:\n",
    "        if Path(args.resume).exists():\n",
    "            checkpoint = torch.load(args.resume, map_location=device)\n",
    "\n",
    "            model.load_state_dict(checkpoint['state_dict'])\n",
    "            optimizer.load_state_dict(checkpoint['optimizer'])\n",
    "            start_epoch = checkpoint['epoch'] # 继续完成被中断的epoch\n",
    "\n",
    "\n",
    "              # 新增scaler恢复\n",
    "            if 'scaler' in checkpoint:\n",
    "                scaler.load_state_dict(checkpoint['scaler'])\n",
    "            else:\n",
    "                print(\"警告：未找到scaler状态，混合精度训练可能异常\")\n",
    "\n",
    "\n",
    "\n",
    "            # 安全加载scheduler状态\n",
    "            if 'scheduler' in checkpoint:  # 新增判断\n",
    "                scheduler.load_state_dict(checkpoint['scheduler'])\n",
    "            else:\n",
    "                print(\"警告：检查点缺少scheduler状态，学习率调度可能异常\")\n",
    "\n",
    "            best_acc = checkpoint['best_acc']\n",
    "\n",
    "\n",
    "            print(f\"成功恢复训练：从epoch {start_epoch}开始，历史最佳mAP {best_acc:.2%}\")\n",
    "        else:\n",
    "            print(f\"警告：未找到检查点文件 {args.resume}，从头开始训练\")   \n",
    "\n",
    "\n",
    "\n",
    "\n",
    "\n",
    "   # 修改位置2：学习率调度器\n",
    "    from torch.optim.lr_scheduler import LambdaLR\n",
    "    warmup_epochs = 5   # 加速初始学习,从10——>5缩短预热\n",
    "\n",
    "  \n",
    "\n",
    "    \n",
    "    #学习率预热，避免初期不稳定更新\n",
    "    warmup_scheduler = LambdaLR(optimizer, \n",
    "                                lr_lambda=lambda e: min(1.0, (e+1)/warmup_epochs)) # 前10epoch逐步提高学习率\n",
    "\n",
    "    best_acc = 0.0\n",
    "\n",
    "    \n",
    "\n",
    "    \n",
    "    # 混合精度初始化\n",
    "    scaler = torch.cuda.amp.GradScaler()  # 移除device_type参数\n",
    "\n",
    "    # 设置环境变量（在训练开始前）\n",
    "    \n",
    "    os.environ[\"PYTORCH_CUDA_ALLOC_CONF\"] = \"expandable_segments:True\"\n",
    "\n",
    "\n",
    "\n",
    "    \n",
    "\n",
    "    \n",
    "    # === 这里就是训练代码开头的最佳位置 ===\n",
    "    print(\"\\n=== 训练配置摘要 ===\")\n",
    "    print(f\"输入尺寸: {model.img_size}\")\n",
    "    print(f\"批次大小: {args.batch_size}\")\n",
    "    print(f\"初始学习率: {args.lr}\")\n",
    "    print(f\"嵌入维度: {model.embed_dim}\")\n",
    "    print(f\"Transformer深度: {model.depth}\")\n",
    "    print(f\"设备: {device}\")\n",
    "    print(f\"训练样本数: {len(train_set)}\")\n",
    "    print(f\"验证样本数: {len(query_set)}\")\n",
    "    print(f\"混合精度训练: {scaler.is_enabled()}\")\n",
    "    print(\"=====================\\n\")\n",
    "\n",
    "    # 确保损失函数参数正确\n",
    "    criterion = CombinedLoss(alpha=0.5, margin=0.3)  # 显式传递标量参数\n",
    "\n",
    "\n",
    "    # 在训练循环前添加自动保存路径\n",
    "    checkpoint_dir = Path(args.save_dir) / \"interrupt\"\n",
    "    checkpoint_dir.mkdir(parents=True, exist_ok=True)\n",
    "\n",
    "    \n",
    "\n",
    "    # 训练循环中\n",
    "    for epoch in range(start_epoch, args.epochs):\n",
    "        empty_grad_count = 0  # 新增\n",
    "        model.train()\n",
    "\n",
    "\n",
    "\n",
    "        # === 每个epoch开始前保存恢复点 ===\n",
    "        torch.save({\n",
    "        'epoch': epoch,\n",
    "        'state_dict': model.state_dict(),\n",
    "        'optimizer': optimizer.state_dict(),\n",
    "        'scheduler': scheduler.state_dict(),  # 新增此行\n",
    "        'best_acc': best_acc,\n",
    "        'args': args,\n",
    "        'scaler': scaler.state_dict()  # 新增此行\n",
    "     }, checkpoint_dir / \"last_checkpoint.pth\")  # 保存last_checkpoint.pth\n",
    "\n",
    "\n",
    "        \n",
    "\n",
    "         # 添加enumerate获取batch_idx\n",
    "        train_bar = tqdm(\n",
    "                         enumerate(train_loader), \n",
    "                         total=len(train_loader),\n",
    "                         desc=f\"Epoch {epoch+1}/{args.epochs}\".ljust(15),  # 固定描述长度\n",
    "                         position=0,      # 新增：固定位置\n",
    "                         leave=True,      # 新增：保留进度条                      \n",
    "                         mininterval=1,   # 降低刷新频率到0.5秒\n",
    "                         maxinterval=5,\n",
    "                         smoothing=0.1,\n",
    "                         dynamic_ncols=True,  # 自适应终端宽度\n",
    "      bar_format='{l_bar}{bar}| {n_fmt}/{total_fmt} [{elapsed}<{remaining}, {rate_fmt}{postfix}]')\n",
    "         \n",
    "        \n",
    "        for batch_idx, batch in train_bar:\n",
    "            images = batch[\"image\"].to(device)\n",
    "            targets = batch[\"class_id\"].to(device) # ✅ 删除梯度清零\n",
    "            \n",
    "            \n",
    "     \n",
    "            # 前向传播部分\n",
    "            with torch.cuda.amp.autocast(dtype=torch.float16):  # 使用cuda子模块\n",
    "                outputs = model(images)\n",
    "                \n",
    "                # 训练循环中使用\n",
    "                loss = criterion(outputs, targets) / accum_steps  # 梯度平均\n",
    "               # loss = torch.clamp(loss, min=0, max=20)  # ✅ 新增\n",
    "\n",
    "                # 添加loss缩放保护\n",
    "                if not torch.isfinite(loss):\n",
    "                    print(f\"检测到非有限loss值: {loss.item()}, 跳过当前批次\")\n",
    "                    optimizer.zero_grad(set_to_none=True)  # 优化内存使用\n",
    "                    scaler.update()  # ✅ 新增scaler状态重置\n",
    "                    continue  # ✅ 跳过问题批次\n",
    "            \n",
    "            # 反向传播\n",
    "            scaler.scale(loss).backward()\n",
    "            \n",
    "            \n",
    "             #梯度累计\n",
    "            if (batch_idx + 1) % accum_steps == 0:  # 每accum_steps步更新一次\n",
    "\n",
    "                # 确保这是本累积周期内第一次也是唯一一次unscale\n",
    "                scaler.unscale_(optimizer)\n",
    "                \n",
    "                # 检查是否存在有效梯度\n",
    "                has_valid_grad = False\n",
    "                for p in model.parameters():\n",
    "                    if p.grad is not None and torch.sum(torch.abs(p.grad)) > 0:\n",
    "                        has_valid_grad = True\n",
    "                        break\n",
    "            \n",
    "                if not has_valid_grad:\n",
    "                    empty_grad_count += 1\n",
    "                    print(f\"Batch {batch_idx+1}: 未检测到有效梯度，跳过更新\")\n",
    "                    optimizer.zero_grad(set_to_none=True)\n",
    "                    scaler.update()\n",
    "                    continue\n",
    "\n",
    "                # 梯度裁剪必须在unscale之后\n",
    "                try:\n",
    "                    torch.nn.utils.clip_grad_norm_(\n",
    "                        model.parameters(),\n",
    "                        max_norm=1.0,  # 降低裁剪阈值 ← 这里修改\n",
    "                        norm_type=2.0,\n",
    "                        error_if_nonfinite=False  # 改为False\n",
    "    )\n",
    "                except Exception as e:\n",
    "                    print(f\"梯度裁剪异常: {str(e)}，跳过当前批次\")\n",
    "                    optimizer.zero_grad(set_to_none=True)\n",
    "                    scaler.update()\n",
    "                    continue\n",
    "    \n",
    "\n",
    "\n",
    "            \n",
    "             # 梯度裁剪和参数更新\n",
    "             # 更新参数\n",
    "                scaler.step(optimizer)\n",
    "                scaler.update()\n",
    "                optimizer.zero_grad(set_to_none=True)  # 使用set_to_none优化内存\n",
    "\n",
    "             \n",
    "\n",
    "            # 再更新进度条\n",
    "                train_bar.set_postfix(\n",
    "                  loss=f\"{loss.item()*accum_steps:.4f}\",\n",
    "                 lr=f\"{optimizer.param_groups[0]['lr']:.2e}\"# 直接获取最新学习率\n",
    "                    \n",
    "            )\n",
    "\n",
    "        # ===== 未达到累积步数时不更新参数 =====\n",
    "            else:\n",
    "            # 仅更新进度条不执行参数更新\n",
    "                train_bar.set_postfix(\n",
    "                    loss=f\"{loss.item()*accum_steps:.4f}\",  # 显示实际loss值\n",
    "                    lr=f\"{optimizer.param_groups[0]['lr']:.2e}\",  # 直接获取优化器当前学习率\n",
    "                    grad_norm=\"accumulating\"   # 提示当前处于梯度累积状态\n",
    "            )\n",
    "\n",
    "\n",
    "            # 在训练循环末尾添加显存清理\n",
    "            del images, targets, outputs, loss\n",
    "            torch.cuda.empty_cache()\n",
    "\n",
    "        # 在epoch结束时添加统计信息\n",
    "        print(f\"\\nEpoch {epoch+1} 空梯度批次数: {empty_grad_count}\")\n",
    "\n",
    "        # 每2个epoch验证一次\n",
    "        val_acc=0.0\n",
    "        if (epoch+1) % args.val_freq == 0:\n",
    "        \n",
    "            val_acc = validate(model,query_loader, gallery_loader,  device)\n",
    "            print(f\"mAP: {val_acc:.2%}\")\n",
    "\n",
    "\n",
    "            # ↓↓↓ 新增梯度日志记录 ↓↓↓\n",
    "            log_gradients(model, epoch)  # 每个验证周期记录一次\n",
    "            # ↑↑↑ 新增结束 ↑↑↑\n",
    "\n",
    "\n",
    "\n",
    "\n",
    "        else:\n",
    "            val_acc = 0.0\n",
    "        \n",
    "        if  val_acc > best_acc:\n",
    "            best_acc = val_acc\n",
    "            torch.save( { 'epoch': epoch,\n",
    "                        'state_dict': model.state_dict(),\n",
    "                        'optimizer': optimizer.state_dict(),\n",
    "                        'best_acc': best_acc,  # ✅ 正确键名\n",
    "                        'args': args,\n",
    "                         'scheduler': scheduler.state_dict() , # 新增调度器状态\n",
    "                         'scaler': scaler.state_dict()  # 新增此行\n",
    "                                     },\n",
    "                      os.path.join(args.save_dir, \"best_model.pth\"))\n",
    "        \n",
    "        if epoch < warmup_epochs:\n",
    "            warmup_scheduler.step()\n",
    "        else:\n",
    "            scheduler.step()\n",
    "\n",
    "        #关键机制：\n",
    "        #梯度累积：模拟更大batch_size（accum_steps=2）\n",
    "        #自动恢复：异常中断后可从中断点继续训练\n",
    "        #最优模型保存：仅保留验证集最佳模型\n",
    "\n",
    "    \n",
    "\n",
    "        \n",
    "#核心技术：\n",
    "#混合精度训练：float16计算加速，自动梯度缩放\n",
    "#OneCycle策略：动态调整学习率提高收敛速度\n",
    "#融合优化器：CUDA内核融合减少GPU操作开销\n",
    "        \n",
    "\n",
    "\n",
    "if __name__ == \"__main__\":\n",
    "    args = get_args()\n",
    "    os.makedirs(args.save_dir, exist_ok=True)\n",
    "    train(args)\n",
    "\n",
    " \n",
    "\n",
    "#训练流程：\n",
    "#数据准备：加载VeRi-776数据集，应用强数据增强\n",
    "#模型构建：初始化自定义Transformer，配置混合精度\n",
    "#训练配置：设置复合损失函数、优化策略、验证机制\n",
    "#核心训练：梯度累积更新参数，定期验证模型性能\n",
    "#模型保存：保存最佳模型和中断恢复点\n",
    "#性能优化：通过TF32、OneCycle策略等提升训练效率\n",
    "\n",
    "\n",
    "#代码完整实现了从数据加载到模型训练的完整闭环，包含多项工业级优化技巧，适合大规模车辆重识别任务的训练需求。\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 1,
   "id": "7171c209",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "3.12.9 | packaged by Anaconda, Inc. | (main, Feb  6 2025, 18:49:16) [MSC v.1929 64 bit (AMD64)]\n"
     ]
    }
   ],
   "source": [
    "import sys\n",
    "print(sys.version)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 1,
   "id": "8aef98d8",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "CPU核心数: 8\n"
     ]
    }
   ],
   "source": [
    "import multiprocessing\n",
    "\n",
    "# 在训练函数或主函数中调用\n",
    "print(f\"CPU核心数: {multiprocessing.cpu_count()}\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "4740337d",
   "metadata": {},
   "outputs": [],
   "source": [
    "\n",
    "# 新建train.py验证模型保存功能\n",
    "\n",
    "import sys\n",
    "import os\n",
    "from pathlib import Path\n",
    "\n",
    "# 获取当前文件的绝对路径\n",
    "current_file = Path(__file__).resolve()\n",
    "# 计算项目根目录：上溯两级（假设train.py在scripts/目录下）\n",
    "project_root = current_file.parent.parent\n",
    "# 将项目根目录添加到系统路径\n",
    "sys.path.insert(0, str(project_root))\n",
    "\n",
    "#作用：确保项目内自定义模块（如 Veri776Dataset）可被正确导入\n",
    "#关键点：通过路径解析动态添加项目根目录，避免绝对路径依赖\n",
    "import pynvml\n",
    "import torch\n",
    "import time\n",
    "from PIL import Image\n",
    "import argparse\n",
    "from tqdm import tqdm\n",
    "from torch.utils.data import Dataset\n",
    "from torchvision import transforms\n",
    "import torch.nn as nn\n",
    "import torch.nn.functional as F\n",
    "from torchvision.models import vit_b_16\n",
    "from einops import rearrange, repeat\n",
    "from collections import defaultdict\n",
    "from torch.utils.data import DataLoader  # 新增导入\n",
    "from torch.optim import AdamW\n",
    "import matplotlib.pyplot as plt\n",
    "from collections import Counter\n",
    "from sklearn.metrics import average_precision_score  # 新增导入\n",
    "import numpy as np\n",
    "from torch.optim.lr_scheduler import CosineAnnealingLR\n",
    "from src.datasets.veri776_dataset import Veri776Dataset\n",
    "from src.models.vehicle_transformer import VehicleTransformer\n",
    "import warnings\n",
    "from torch.optim.lr_scheduler import LambdaLR\n",
    "import multiprocessing\n",
    "import math\n",
    "\n",
    "\n",
    "\n",
    "warnings.filterwarnings(\"ignore\", category=FutureWarning)\n",
    "\n",
    "\n",
    "# 复合损失函数  ，主损失权重， 过小导致特征判断力不足\n",
    "\n",
    "class CombinedLoss(nn.Module):\n",
    "    def __init__(self, alpha=0.6, initial_margin=0.1, \n",
    "                 final_margin=0.5, total_epochs=100, \n",
    "                 threshold=0.3, initial_threshold=0.0, \n",
    "                 final_threshold=0.5 ,\n",
    "                 early_stop_epoch=20 ):\n",
    "        super().__init__()\n",
    "        self.ce = nn.CrossEntropyLoss(label_smoothing=0.2)\n",
    "        self.initial_margin = initial_margin\n",
    "        self.final_margin = final_margin\n",
    "        self.total_epochs = total_epochs\n",
    "        self.current_epoch = 0\n",
    "        self.triplet = nn.TripletMarginLoss(margin=initial_margin, reduction='mean')\n",
    "        self.initial_threshold = initial_threshold  # 训练初期阈值设为0.0\n",
    "        self.final_threshold = final_threshold\n",
    "        self.current_threshold = initial_threshold\n",
    "        self.epoch_pos_cos_sim = []\n",
    "        self.early_stop_epoch = early_stop_epoch  # 记录早停epoch\n",
    "        self.alpha=alpha\n",
    "\n",
    "    def set_current_epoch(self, epoch):\n",
    "        new_margin = self.initial_margin + (self.final_margin - self.initial_margin) * (epoch / self.total_epochs)#随训练进行，margin 从 0.1 逐步增至 0.5，增加分类难度。\n",
    "        self.triplet.margin = new_margin\n",
    "        self.current_threshold = self.initial_threshold + (self.final_threshold - self.initial_threshold) * (epoch / self.total_epochs)\n",
    "        self.epoch_pos_cos_sim = []\n",
    "\n",
    "    def reset_epoch_cache(self):\n",
    "        self.epoch_pos_cos_sim = []\n",
    "\n",
    "    def forward(self, outputs, targets, batch):\n",
    "        main_loss = self.ce(outputs[\"logits\"], targets)\n",
    "        part_loss = sum([self.ce(p, targets) for p in outputs[\"part_logits\"]])\n",
    "        bn_feature = outputs[\"bn_feature\"]\n",
    "        device = targets.device  \n",
    "        camera_ids = batch['camera_id'].to(device)\n",
    "        idx = torch.arange(len(targets), device=device)\n",
    "\n",
    "        # 计算余弦相似度和掩码\n",
    "        pairwise_dist = 1 - F.cosine_similarity(bn_feature.unsqueeze(1), bn_feature.unsqueeze(0), dim=2)\n",
    "        cos_sim = F.cosine_similarity(bn_feature.unsqueeze(1), bn_feature.unsqueeze(0), dim=2)\n",
    "        pos_mask = (\n",
    "            (targets.unsqueeze(1) == targets.unsqueeze(0)) &\n",
    "            (camera_ids.unsqueeze(1) != camera_ids.unsqueeze(0)) &\n",
    "            (idx != idx.unsqueeze(1))\n",
    "        )\n",
    "        neg_mask = targets.unsqueeze(1) != targets.unsqueeze(0)\n",
    "\n",
    "        with torch.no_grad():\n",
    "            rows, cols = torch.where(pos_mask)\n",
    "            pos_cos_sim = cos_sim[pos_mask]\n",
    "            if pos_cos_sim.numel() > 0:\n",
    "                self.epoch_pos_cos_sim.append(pos_cos_sim.cpu().numpy())\n",
    "            \n",
    "            # 处理正样本分组\n",
    "            if rows.numel() == 0:\n",
    "                valid_mask = torch.zeros(len(targets), dtype=torch.bool, device=device)\n",
    "            else:\n",
    "                pos_cos_per_anchor = []\n",
    "                for i in range(len(targets)):\n",
    "                    anchor_pos = (rows == i)\n",
    "                    pos_values = cos_sim[i, cols[anchor_pos]]\n",
    "                    pos_cos_per_anchor.append(pos_values)\n",
    "                pos_cos_per_anchor = torch.nn.utils.rnn.pad_sequence(\n",
    "                    pos_cos_per_anchor, batch_first=True, padding_value=-1\n",
    "                )\n",
    "                # 压缩为一维有效锚点掩码\n",
    "                has_positive = (pos_cos_per_anchor != -1).any(dim=1)  # [batch_size]\n",
    "                above_threshold = (pos_cos_per_anchor > self.current_threshold).any(dim=1)  # [batch_size]\n",
    "                \n",
    "                # 新增：根据当前epoch动态调整阈值判断\n",
    "                if self.current_epoch < self.early_stop_epoch:\n",
    "                    valid_mask = has_positive  # 仅检查是否存在正样本\n",
    "                else:\n",
    "                    above_threshold = (pos_cos_per_anchor > self.current_threshold).any(dim=1)  # 正常阈值判断\n",
    "                    valid_mask = has_positive & above_threshold  # 组合条件\n",
    "                \n",
    "                \n",
    "\n",
    "            valid_ratio = valid_mask.float().mean()\n",
    "\n",
    "\n",
    "            \n",
    "\n",
    "            # 处理有效三元组\n",
    "            if valid_mask.sum() == 0:\n",
    "                tri_loss = torch.zeros(1, device=device)\n",
    "                tri_weight = 0.0\n",
    "            else:\n",
    "            # 获取有效样本的局部索引（关键修正）\n",
    "                valid_indices = torch.where(valid_mask)[0]  # [M] 有效样本在原批次中的索引\n",
    "                M = valid_indices.size(0)  # 有效样本数\n",
    "\n",
    "                 # ✅ 新增：仅在有效样本范围内计算距离（减少计算量）\n",
    "                bn_feature_valid = bn_feature[valid_indices]  # [M, D]\n",
    "                pairwise_dist_valid = 1 - F.cosine_similarity(\n",
    "                bn_feature_valid.unsqueeze(1),  # [M, 1, D]\n",
    "                bn_feature_valid.unsqueeze(0),  # [1, M, D]\n",
    "                dim=2  # 计算M×M的余弦距离矩阵\n",
    "            )  # [M, M]\n",
    "\n",
    "                # ✅ 新增：有效样本内的正负掩码（基于有效样本的元数据）\n",
    "                targets_valid = targets[valid_indices]  # [M]\n",
    "                camera_ids_valid = camera_ids[valid_indices]  # [M]\n",
    "                idx_valid = torch.arange(M, device=device)  # [M]（有效样本的局部索引）\n",
    "\n",
    "                pos_mask_valid = (\n",
    "                (targets_valid.unsqueeze(1) == targets_valid.unsqueeze(0)) &  # [M, M]\n",
    "                (camera_ids_valid.unsqueeze(1) != camera_ids_valid.unsqueeze(0)) &  # [M, M]\n",
    "                (idx_valid != idx_valid.unsqueeze(1))  # [M, M]（排除自对比）\n",
    "            )\n",
    "                neg_mask_valid = targets_valid.unsqueeze(1) != targets_valid.unsqueeze(0)  # [M, M]\n",
    "            \n",
    "\n",
    "\n",
    "            \n",
    "            # ✅ 新增：有效样本内的难例挖掘（直接基于局部距离矩阵）\n",
    "            # 最难负样本：在负样本中找距离最大（最相似）的\n",
    "                neg_dist_valid = pairwise_dist_valid.clone()  # [M, M]\n",
    "                neg_dist_valid[~neg_mask_valid] = -float('inf')  # 无效位置设为负无穷\n",
    "                hardest_negative_local = neg_dist_valid.argmax(dim=1)  # [M]（每个锚点的最难负样本局部索引）\n",
    "\n",
    "            # 最难正样本：在正样本中找距离最小（最不相似）的\n",
    "                pos_dist_valid = pairwise_dist_valid.clone()  # [M, M]\n",
    "                pos_dist_valid[~pos_mask_valid] = float('inf')  # 无效位置设为正无穷\n",
    "                hardest_positive_local = pos_dist_valid.argmin(dim=1)  # [M]（每个锚点的最难正样本局部索引）\n",
    "\n",
    "            # 防御性编程：确保索引在有效范围内（M-1为最大局部索引）\n",
    "                hardest_positive_local = hardest_positive_local.clamp(0, M-1)\n",
    "                hardest_negative_local = hardest_negative_local.clamp(0, M-1)\n",
    "\n",
    "            # 提取有效三元组特征（使用局部索引）\n",
    "                valid_anchor = bn_feature_valid  # [M, D]（有效样本的锚点特征）\n",
    "                valid_positive = bn_feature_valid[hardest_positive_local]  # [M, D]（最难正样本特征）\n",
    "                valid_negative = bn_feature_valid[hardest_negative_local]  # [M, D]（最难负样本特征）\n",
    "\n",
    "            \n",
    "            # 维度校验（关键）\n",
    "                assert valid_anchor.shape == valid_positive.shape == valid_negative.shape, \\\n",
    "                f\"三元组维度不匹配: {valid_anchor.shape}, {valid_positive.shape}, {valid_negative.shape}\"\n",
    "            \n",
    "                tri_loss = self.triplet(valid_anchor, valid_positive, valid_negative)\n",
    "                tri_weight = max(0.5, 0.5 * valid_ratio)#当有效三元组比例valid_ratio较低时，权重不低于 0.5，保证损失稳定性。\n",
    "    \n",
    "\n",
    "\n",
    "        total_loss = self.alpha * main_loss + 0.05 * part_loss + tri_weight * tri_loss\n",
    "        return {\n",
    "            \"total\": total_loss,\n",
    "            \"main\": main_loss,\n",
    "            \"part\": part_loss,\n",
    "            \"triplet\": tri_loss,\n",
    "            \"tri_weight\": tri_weight,\n",
    "            \"valid_ratio\": valid_ratio,\n",
    "        }\n",
    "#设计亮点：\n",
    "#标签平滑：防止模型对标签过拟合\n",
    "#动态难例挖掘：自动选择最难区分的负样本增强特征判别性\n",
    "#多损失融合：分类损失 + 部件分类+ 三元组损失\n",
    "\n",
    "\n",
    "\n",
    "\n",
    "def euclidean_dist(x, y):\n",
    "    \"\"\"\n",
    "    计算欧氏距离矩阵\n",
    "    x: (n, feat_dim)\n",
    "    y: (m, feat_dim)\n",
    "    return: (n, m) \n",
    "    \"\"\"\n",
    "    x2 = torch.sum(x ** 2, dim=1).unsqueeze(1)\n",
    "    y2 = torch.sum(y ** 2, dim=1).unsqueeze(0)\n",
    "    dist = x2 + y2 - 2.0 * torch.mm(x, y.transpose(0, 1))\n",
    "    return torch.sqrt(torch.clamp(dist, min=1e-12))\n",
    "\n",
    "\n",
    "\n",
    "\n",
    "#mAP计算函数\n",
    "# 修改calc_mAP函数，使用更高效的计算方式\n",
    "\n",
    "def calc_mAP(query_feats, query_ids, query_cams, gallery_feats, gallery_ids, gallery_cams):\n",
    "    \"\"\"内存优化版mAP计算\"\"\"\n",
    "    device = query_feats.device  # 保持设备一致性\n",
    "    \n",
    "    # 转换为numpy数组（在CPU处理）\n",
    "    query_ids = np.array(query_ids)\n",
    "    query_cams = np.array(query_cams)\n",
    "    gallery_ids = np.array(gallery_ids)\n",
    "    gallery_cams = np.array(gallery_cams)\n",
    "    \n",
    "    aps = []\n",
    "    \n",
    "    # 分块处理查询集\n",
    "    block_size = 128  # 可根据内存调整\n",
    "    for i in tqdm(range(0, len(query_feats), block_size), \n",
    "                 desc=\"计算mAP\", leave=False):\n",
    "        # 当前块数据\n",
    "        start = i\n",
    "        end = min(i+block_size, len(query_feats))\n",
    "        \n",
    "        # 分块计算相似度\n",
    "        with torch.no_grad():\n",
    "            q_block = query_feats[start:end].to(device, non_blocking=True)  # 保持在GPU\n",
    "            g_block = gallery_feats.to(device, non_blocking=True)  # 保持在GPU\n",
    "            sim_block = torch.mm(q_block, g_block.T)\n",
    "            # 立即释放GPU内存\n",
    "            del g_block\n",
    "            torch.cuda.empty_cache()\n",
    "        \n",
    "        # 仅传输sim_block到CPU\n",
    "        sim_block = sim_block.cpu().numpy()\n",
    "        \n",
    "        # 逐样本计算AP\n",
    "        for j in range(sim_block.shape[0]):\n",
    "            orig_idx = start + j\n",
    "            sim = sim_block[j]\n",
    "            \n",
    "            # 生成有效性mask\n",
    "            same_id_mask = (gallery_ids == query_ids[orig_idx])\n",
    "            diff_cam_mask = (gallery_cams != query_cams[orig_idx])\n",
    "            valid_pos_mask = same_id_mask & diff_cam_mask\n",
    "            \n",
    "            # 计算单个AP\n",
    "            y_true = valid_pos_mask.astype(np.int32)\n",
    "            y_score = sim\n",
    "            \n",
    "            if y_true.sum() == 0:\n",
    "                aps.append(0.0)  # 无正样本时AP设为0，而非跳过\n",
    "                continue\n",
    "            \n",
    "             # 按相似度降序排序\n",
    "            order = np.argsort(y_score)[::-1]\n",
    "            y_true_sorted = y_true[order]\n",
    "            y_score_sorted = y_score[order]\n",
    "            \n",
    "             # 计算累积精度和召回率\n",
    "            tp = np.cumsum(y_true_sorted)\n",
    "            precision = tp / np.arange(1, len(tp) + 1)\n",
    "            recall = tp / y_true.sum()\n",
    "            \n",
    "            # 使用np.trapezoid计算AP\n",
    "            if np.sum(y_true) > 0:\n",
    "                ap = np.trapezoid(precision, recall)\n",
    "                aps.append(ap)\n",
    "\n",
    "        # 及时释放显存\n",
    "        del q_block, sim_block\n",
    "        torch.cuda.empty_cache()\n",
    "    \n",
    "    return np.mean(aps) if aps else 0.0\n",
    "\n",
    "\n",
    "#创新点：\n",
    "#分块处理：将大型矩阵运算分解为小块，避免OOM（内存不足）\n",
    "#余弦相似度：更鲁棒的度量方式\n",
    "#NaN检测：防止无效特征导致计算崩溃\n",
    "#在validate函数中，调用了calc_mAP(feats, np.array(pids), feats, np.array(pids))，这意味着将同一个数据集同时作为query和gallery，这会使得每个样本都能匹配到自己，导致mAP虚高。\n",
    "\n",
    "\n",
    "\n",
    "\n",
    "# 添加验证循环（防止过拟合）\n",
    "def validate(model,  query_loader, gallery_loader,device):    #train.py中的验证只是用来监控训练过程\n",
    "    model.eval()\n",
    "     # === 提取Query特征和元数据 ===\n",
    "    query_ids, query_cams = [], []\n",
    "    query_feats = []  # GPU上的特征分块存储\n",
    "\n",
    "    # === 新增：收集查询集和测试集的vehicle_id ===\n",
    "    all_query_ids = []\n",
    "    all_gallery_ids = []\n",
    "    \n",
    "\n",
    "  \n",
    "\n",
    "    with torch.no_grad():      # 特征收集与内存优化\n",
    "        for batch in query_loader:\n",
    "            images = batch[\"image\"].to(device, non_blocking=True)\n",
    "            outputs = model(images)\n",
    "\n",
    "\n",
    "           # 直接使用模型输出的归一化特征（已在模型内部归一化）\n",
    "            query_feats.append(outputs[\"bn_feature\"].float())  # 修改行\n",
    "            query_ids.extend(batch[\"vehicle_id\"].tolist())\n",
    "            query_cams.extend(batch[\"camera_id\"].tolist())\n",
    "            all_query_ids.extend(batch[\"vehicle_id\"].tolist())  # 收集所有查询ID\n",
    "           \n",
    "    query_feats = torch.cat(query_feats, dim=0)         # ✅ 合并为单一Tensor\n",
    "\n",
    "    \n",
    "\n",
    "    # 添加以下代码\n",
    "    valid_mask = torch.any(query_feats != 0, dim=1)  # 过滤全零向量\n",
    "    valid_feats = query_feats[valid_mask]\n",
    "    if valid_feats.numel() == 0:\n",
    "        norm = 0.0\n",
    "    else:\n",
    "        norm = torch.norm(valid_feats, dim=1).mean().item()\n",
    "    print(f\"查询特征平均范数: {norm:.4f}\")  # 应接近1.0\n",
    "    \n",
    "\n",
    "\n",
    "\n",
    "    # === 提取Gallery特征 ===\n",
    "    gallery_ids, gallery_cams = [], []\n",
    "    gallery_feats = []\n",
    "    with torch.no_grad():\n",
    "        for batch in gallery_loader:\n",
    "            images = batch[\"image\"].to(device, non_blocking=True)\n",
    "            outputs = model(images)\n",
    "            gallery_feats.append(outputs[\"bn_feature\"].float())  # 修改行\n",
    "            gallery_ids.extend(batch[\"vehicle_id\"].tolist())\n",
    "            gallery_cams.extend(batch[\"camera_id\"].tolist())\n",
    "            all_gallery_ids.extend(batch[\"vehicle_id\"].tolist())  # 收集所有测试ID\n",
    "\n",
    "            \n",
    "\n",
    "\n",
    "\n",
    "\n",
    "    # === 新增：检查ID交集 ===\n",
    "    common_ids = np.intersect1d(all_query_ids, all_gallery_ids)\n",
    "   # print(f\"查询集与测试集共有车辆ID数: {len(common_ids)} / {len(set(all_query_ids))}\")\n",
    "    assert len(common_ids) > 0, \"查询集与测试集无交集车辆ID，数据集划分错误！\"\n",
    "\n",
    "\n",
    "\n",
    "    # === 检查相机ID交集（新增代码③） ===\n",
    "    common_cams = set(query_cams) & set(gallery_cams)  # 计算相机ID交集\n",
    "    if not common_cams:\n",
    "        raise ValueError(\"查询集与测试集无跨相机样本，无法计算mAP\")\n",
    "\n",
    "\n",
    "\n",
    "    # 保持特征在GPU内存中，分块处理   \n",
    "    gallery_feats = torch.cat(gallery_feats, dim=0)     # ✅ 合并为单一Tensor\n",
    "    \n",
    "     # 计算mAP\n",
    "    return calc_mAP(query_feats.cpu(), query_ids, query_cams, gallery_feats.cpu(), gallery_ids, gallery_cams)\n",
    "\n",
    "\n",
    "\n",
    "\n",
    "#关键优化：\n",
    "#特征类型转换：float32比默认float64节省50%内存\n",
    "#及时释放显存：del images防止显存泄漏\n",
    "\n",
    "\n",
    "# 配置参数\n",
    "def get_args():\n",
    "    parser = argparse.ArgumentParser()\n",
    "    parser.add_argument(\"--batch_size\", type=int, default=24)   #批次大小， 过小导致统计量不准\n",
    "    parser.add_argument(\"--lr\", type=float, default=5e-4)\n",
    "    parser.add_argument(\"--epochs\", type=int, default=100)\n",
    "    parser.add_argument(\"--val_freq\", type=int, default=2)  # 每2个epoch验证一次\n",
    "    # 添加缺失的save_dir参数\n",
    "    parser.add_argument(\"--save_dir\", type=str, default=\"checkpoints\") # 恢复训练路径\n",
    "    parser.add_argument(\"--resume\", type=str, default=\"\",\n",
    "                        help=\"checkpoint path to resume\") \n",
    "    parser.add_argument(\"--pct_start\", type=float, default=0.3, help=\"学习率上升阶段占比\") \n",
    "    \n",
    "    return parser.parse_args()\n",
    "\n",
    "\n",
    "# 在train.py中添加测试代码\n",
    "# 修改后\n",
    "def test_patch_embed():\n",
    "\n",
    "    test_model = VehicleTransformer(\n",
    "        num_classes=776,\n",
    "        img_size=(256, 256),\n",
    "        patch_size=16,\n",
    "        local_parts=4,  # 需要与模型配置一致\n",
    "        num_heads=4,  # 保持head_dim=32\n",
    "        embed_dim=128\n",
    "    )\n",
    "    dummy_input = torch.randn(1, 3,256, 256)\n",
    "    \n",
    "    # 获取全局特征和局部特征\n",
    "    global_feat, local_feats = test_model.forward_features(dummy_input)\n",
    "    \n",
    "    # 打印关键维度\n",
    "    print(\"\\n=== 特征维度验证 ===\")\n",
    "    print(f\"全局特征维度: {global_feat.shape}\")        \n",
    "    print(f\"局部特征数量: {len(local_feats)}\")         # 应等于 local_parts参数值\n",
    "    print(f\"单个局部特征维度: {local_feats[0].shape}\") \n",
    "\n",
    "\n",
    "# 定义全局collate函数,使数据结构对齐，避免类型错误\n",
    "def custom_collate(batch):\n",
    "    return {\n",
    "        'image': torch.stack([item['image'] for item in batch]),\n",
    "        'class_id': torch.tensor([item['class_id'] for item in batch]),\n",
    "        'vehicle_id': torch.tensor([item['vehicle_id'] for item in batch]),\n",
    "        'camera_id': torch.tensor([item['camera_id'] for item in batch])\n",
    "    }\n",
    "\n",
    "\n",
    "\n",
    "\n",
    "\n",
    "# 训练函数\n",
    "def train(args):\n",
    "    torch.set_num_threads(multiprocessing.cpu_count())  # 自动使用全部CPU核心\n",
    "    # ===== 新增梯度累积参数 =====\n",
    "    accum_steps = 4  # 添加到train函数开头参数部分\n",
    "\n",
    "    device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n",
    "\n",
    "    \n",
    "\n",
    "    # 在train函数开头添加\n",
    "    torch.backends.cuda.matmul.allow_tf32 = True  # 启用TF32计算 ，保持精度前提下加速矩阵运算\n",
    "    torch.backends.cudnn.benchmark = True  # 启用cuDNN自动优化器\n",
    "    torch.cuda.empty_cache()  # 清空缓存\n",
    "\n",
    "    # 基础transform（共享部分）\n",
    "    base_transform = transforms.Compose([\n",
    "    transforms.Resize((288, 288)),\n",
    "    transforms.CenterCrop(256), \n",
    "    transforms.ToTensor(),\n",
    "    transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])\n",
    "  ])\n",
    "    \n",
    "    train_set_base = Veri776Dataset(mode=\"train\", transform=base_transform)\n",
    "    total_train_samples = len(train_set_base)  # 训练集总样本数\n",
    "    del train_set_base  # 释放内存\n",
    "    actual_updates_per_epoch = math.ceil(total_train_samples / (args.batch_size * accum_steps))  # 替换为 total_train_samples\n",
    "    total_steps = args.epochs * actual_updates_per_epoch\n",
    "    \n",
    "\n",
    "    # 数据增强\n",
    "    query_transform = base_transform\n",
    "    test_transform = base_transform\n",
    "\n",
    "    \n",
    "   \n",
    "\n",
    "    # 正确修改（使用查询集作为验证参考）\n",
    "    query_set = Veri776Dataset(mode=\"query\", transform=query_transform)  # Query\n",
    "    gallery_set = Veri776Dataset(mode=\"test\", transform=test_transform) # Gallery\n",
    "\n",
    "    \n",
    " \n",
    "    # 创建 Query 和 Gallery 的 DataLoader\n",
    "    query_loader = DataLoader(query_set, \n",
    "                              batch_size=args.batch_size ,  \n",
    "                              shuffle=False,\n",
    "    num_workers=4,\n",
    "     pin_memory=True,\n",
    "    drop_last=False,\n",
    "        collate_fn=custom_collate,  # 使用全局函数\n",
    "        \n",
    "    prefetch_factor=2,  # 每个worker预取2个批次\n",
    "    \n",
    "        )\n",
    "\n",
    "\n",
    "    gallery_loader = DataLoader(gallery_set, \n",
    "                                batch_size=args.batch_size,\n",
    "                                 shuffle=False,\n",
    "    num_workers=4,\n",
    "     pin_memory=True,\n",
    "    drop_last=False,\n",
    "   collate_fn=custom_collate,  # 使用全局函数     \n",
    "    prefetch_factor=2,  # 每个worker预取2个批次\n",
    "        \n",
    "   )# 添加预取\n",
    "\n",
    "\n",
    "\n",
    "\n",
    "\n",
    "    # 模型初始化\n",
    "    model = VehicleTransformer(\n",
    "        num_classes=776,\n",
    "    img_size=(256, 256),    # 输入尺寸\n",
    "    patch_size=16,           # 增大分块尺寸\n",
    "    local_parts=4,\n",
    "    embed_dim=128,          # 特征维度， 过低导致信息丢失\n",
    "    depth=4,                # 层数  \n",
    "    num_heads=4,            # 注意力头\n",
    "    mlp_ratio=4,\n",
    "    \n",
    "    pretrained=False).to(device)\n",
    "    model = model.to(memory_format=torch.channels_last)  # 新增此行\n",
    "\n",
    "# 新增梯度检查点（节省显存）\n",
    "    from torch.utils.checkpoint import checkpoint_sequential\n",
    "    model.encoder.use_checkpoint = True  # 自定义属性\n",
    "\n",
    "\n",
    "\n",
    "\n",
    "    \n",
    "\n",
    "    # 修改encoder前向传播\n",
    "    def custom_encoder_forward(x):\n",
    "        if model.encoder.use_checkpoint:\n",
    "            return checkpoint_sequential(model.encoder.layers, 4, x)\n",
    "        else:\n",
    "            return model.encoder(x)\n",
    "\n",
    "\n",
    "    def log_gradients(model, epoch):\n",
    "        \"\"\"梯度分布记录函数\"\"\"\n",
    "        grad_info = []\n",
    "        for name, param in model.named_parameters():\n",
    "            if param.grad is not None:\n",
    "                grad_norm = param.grad.data.norm(2).item()\n",
    "                grad_info.append(f\"{name[:15]:<15} : {grad_norm:.4e}\")\n",
    "        with open(\"gradient_log.txt\", \"a\") as f:\n",
    "            f.write(f\"\\n=== Epoch {epoch+1} 梯度分布 ===\\n\")\n",
    "            f.write(\"\\n\".join(grad_info))\n",
    "\n",
    "\n",
    "\n",
    "\n",
    "\n",
    "\n",
    "\n",
    "\n",
    "    \n",
    "    # 优化器，修改为（添加权重衰减和梯度裁剪）\n",
    "    optimizer = AdamW(\n",
    "        model.parameters(), \n",
    "         lr=args.lr,                  #学习率， 过高引发梯度爆炸\n",
    "        weight_decay=0.001,  # ✅ 新增权重衰减\n",
    "        fused=True\n",
    ")                 # 启用融合优化\n",
    "\n",
    "\n",
    "# === 关键插入位置：计算修正后的total_steps ===\n",
    "    # 计算实际总步数（考虑梯度累积）\n",
    "    train_set = Veri776Dataset(mode=\"train\", transform=base_transform)  # 临时创建一次数据集以获取总样本数\n",
    "    total_samples = len(train_set)\n",
    "\n",
    "    actual_updates_per_epoch = math.ceil(total_samples / (args.batch_size * accum_steps))  # 考虑梯度累积\n",
    "    total_steps = args.epochs * actual_updates_per_epoch  # 修正后的总步数\n",
    "    del train_set  # 释放临时数据集\n",
    "\n",
    "   \n",
    "\n",
    "\n",
    "\n",
    "\n",
    "\n",
    "\n",
    "  # 更换学习率调度器（新增）\n",
    "    from torch.optim.lr_scheduler import OneCycleLR\n",
    "  \n",
    "\n",
    "\n",
    "    warmup_steps = int(total_steps * args.pct_start)  # 新增参数pct_start到args中\n",
    "\n",
    "    scheduler_onecycle = OneCycleLR(optimizer, \n",
    "                      max_lr=args.lr,\n",
    "                      total_steps=total_steps,  # 使用修正后的总步数\n",
    "                      pct_start=args.pct_start, # 学习率上升阶段\n",
    "                      div_factor=10,  #初始学习率=max_lr/div_factor\n",
    "                      final_div_factor=1e4,  # 最终学习率 = max_lr / final_div_factor\n",
    "                      anneal_strategy='cos')  # 使用余弦退火策略          \n",
    "    \n",
    "    \n",
    "\n",
    "    # 混合精度初始化\n",
    "    scaler = torch.cuda.amp.GradScaler()  # 移除device_type参数\n",
    "\n",
    "     # 损失函数\n",
    "    criterion = CombinedLoss(alpha=0.6,\n",
    "        initial_margin=0.1,    \n",
    "        final_margin=0.5,      \n",
    "        total_epochs=args.epochs , # 总epoch数（与训练配置一致）\n",
    "        early_stop_epoch=20,  # 前20个epoch不设阈值\n",
    "        )  # 显式传递标量参数\n",
    "\n",
    "    # === 新增检查点恢复逻辑 ===\n",
    "    start_epoch = 0\n",
    "    best_acc = 0.0\n",
    "    # 在训练循环前添加自动保存路径\n",
    "    checkpoint_dir = Path(args.save_dir) / \"interrupt\"\n",
    "    checkpoint_dir.mkdir(parents=True, exist_ok=True)\n",
    "\n",
    "\n",
    "   \n",
    "    if args.resume and Path(args.resume).exists():\n",
    "        checkpoint = torch.load(args.resume, map_location=device)\n",
    "        model.load_state_dict(checkpoint['state_dict'])\n",
    "        optimizer.load_state_dict(checkpoint['optimizer'])\n",
    "        start_epoch = checkpoint['epoch']\n",
    "        best_acc = checkpoint.get('best_acc', 0.0)  # 安全获取最佳acc\n",
    "        scaler.load_state_dict(checkpoint.get('scaler', scaler.state_dict()))  # 安全加载scaler\n",
    "        \n",
    "         # 安全加载调度器状态\n",
    "        if 'scheduler' in checkpoint:\n",
    "            scheduler_state = checkpoint['scheduler']\n",
    "            if 'onecycle' in scheduler_state:\n",
    "                scheduler_onecycle.load_state_dict(scheduler_state['onecycle'])\n",
    "                print(\"成功加载OneCycleLR调度器状态\")\n",
    "            else:\n",
    "                print(\"未检测到有效调度器状态，使用初始配置\")\n",
    "        \n",
    "        print(f\"恢复训练：从epoch {start_epoch}开始，历史最佳mAP {best_acc:.2%}\")\n",
    "\n",
    "    \n",
    "\n",
    "    # 设置环境变量（在训练开始前）\n",
    "    \n",
    "    os.environ[\"PYTORCH_CUDA_ALLOC_CONF\"] = \"expandable_segments:True\"\n",
    "\n",
    "    \n",
    "    # === 这里就是训练代码开头的最佳位置 ===\n",
    "    print(\"\\n=== 训练配置摘要 ===\")\n",
    "    print(f\"输入尺寸: {model.img_size}\")\n",
    "    print(f\"批次大小: {args.batch_size}\")\n",
    "    print(f\"初始学习率: {args.lr}\")\n",
    "    print(f\"嵌入维度: {model.embed_dim}\")\n",
    "    print(f\"Transformer深度: {model.depth}\")\n",
    "    print(f\"设备: {device}\")\n",
    "    print(f\"训练样本数: {total_train_samples}\")  # 使用循环外的 total_train_samples\n",
    "    print(f\"验证样本数: {len(query_set)}\")\n",
    "    print(f\"混合精度训练: {scaler.is_enabled()}\")\n",
    "    print(\"=====================\\n\")\n",
    "    print(f\"Total training steps: {total_steps}, pct_start: {args.pct_start:.2f}\")  # 新增行\n",
    "   \n",
    "\n",
    "\n",
    "    \n",
    "\n",
    "    \n",
    "\n",
    "    # 训练循环中\n",
    "    for epoch in range(start_epoch, args.epochs):\n",
    "        empty_grad_count = 0  \n",
    "        model.train()\n",
    " \n",
    "        # 在每个epoch开始时更新当前epoch，动态调整margin\n",
    "        epoch_valid_ratio = []  # 存储每个批次的有效比例\n",
    "        epoch_grad_norms = []    # 新增：存储每个梯度更新的范数\n",
    "        criterion.set_current_epoch(epoch)  # 传入当前epoch（从0开始）\n",
    "\n",
    "        if epoch < 20:\n",
    "            train_transform = transforms.Compose([\n",
    "        transforms.Resize((288, 288)),\n",
    "        transforms.RandomCrop((256, 256)),\n",
    "        \n",
    "        transforms.ToTensor(),\n",
    "        transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]),\n",
    "\n",
    "    ])\n",
    "            \n",
    "\n",
    "        elif epoch < 40:\n",
    "        # 阶段2：初级增强\n",
    "            train_transform = transforms.Compose([\n",
    "            transforms.Resize((288, 288)),\n",
    "            transforms.RandomCrop((256, 256)),\n",
    "            transforms.RandomHorizontalFlip(p=0.2),       # 低概率翻转\n",
    "            transforms.ColorJitter(brightness=0.1, contrast=0.1),  # 轻微颜色扰动\n",
    "            transforms.ToTensor(),\n",
    "            transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]),\n",
    "        ])\n",
    "            \n",
    "        elif epoch < 60:  # 阶段3：中等强度增强\n",
    "            train_transform = transforms.Compose([\n",
    "            transforms.Resize((288, 288)),\n",
    "            transforms.RandomCrop((256, 256)),\n",
    "            transforms.RandomHorizontalFlip(p=0.4),  # 翻转概率提升\n",
    "            transforms.ColorJitter(brightness=0.2, contrast=0.2, saturation=0.2),  # 颜色扰动（低强度）\n",
    "            transforms.RandomGrayscale(p=0.05),  # 灰度化（低概率）\n",
    "            transforms.RandomApply([transforms.GaussianBlur(kernel_size=3, sigma=(0.1, 1.0))], p=0.2),  # 高斯模糊（低概率）\n",
    "            transforms.ToTensor(),\n",
    "            transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])\n",
    "        ])\n",
    "\n",
    "        else:\n",
    "             train_transform = transforms.Compose([\n",
    "            transforms.Resize((288, 288)),\n",
    "            transforms.RandomCrop((256, 256)),\n",
    "            transforms.RandomHorizontalFlip(p=0.6),  # 高概率翻转\n",
    "            transforms.ColorJitter(brightness=0.3, contrast=0.3, saturation=0.3),  # 颜色扰动（高强度）\n",
    "            transforms.RandomGrayscale(p=0.1),  # 灰度化（中等概率）\n",
    "            transforms.RandomApply([transforms.GaussianBlur(kernel_size=3, sigma=(0.1, 2.0))], p=0.3),  # 高斯模糊（中等概率）\n",
    "            transforms.RandomRotation(10),  # 小角度旋转\n",
    "            transforms.ToTensor(),\n",
    "            transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]),\n",
    "            transforms.RandomErasing(p=0.3, scale=(0.02, 0.2), ratio=(0.3, 2.0))  # 擦除（降低scale上限）\n",
    "        ])\n",
    "            \n",
    "                 # 数据集\n",
    "        train_set = Veri776Dataset(mode=\"train\", transform=train_transform)\n",
    "\n",
    "        \n",
    "   \n",
    "        train_loader = DataLoader(\n",
    "         train_set,\n",
    "    batch_size=args.batch_size,\n",
    "    shuffle=True,\n",
    "    num_workers=4,  # 设置为0时禁用多进程\n",
    "    pin_memory=True,\n",
    "    drop_last=True,\n",
    "    collate_fn=custom_collate  ,\n",
    "    prefetch_factor=2,  # 每个worker预取2个批次\n",
    "    \n",
    ")\n",
    "        current_total_steps = (epoch + 1) * actual_updates_per_epoch  # 无需重新计算样本数\n",
    "\n",
    "        scheduler_state = {}\n",
    "        \n",
    "        scheduler_state['onecycle'] = scheduler_onecycle.state_dict()\n",
    "\n",
    "        \n",
    "\n",
    "        # === 每个epoch开始前保存恢复点 ===\n",
    "        torch.save({\n",
    "        'epoch': epoch,\n",
    "        'state_dict': model.state_dict(),\n",
    "        'optimizer': optimizer.state_dict(),\n",
    "        'total_steps': total_steps,  # 可选：保存总步数用于恢复\n",
    "        'best_acc': best_acc,\n",
    "        'args': args,\n",
    "        'scaler': scaler.state_dict() , # 新增此行\n",
    "        'scheduler': scheduler_state  # 空字典或有效状态字典\n",
    "                \n",
    "            \n",
    "     }, checkpoint_dir / \"last_checkpoint.pth\")  # 保存last_checkpoint.pth\n",
    "\n",
    "\n",
    "        \n",
    "\n",
    "         # 添加enumerate获取batch_idx\n",
    "        train_bar = tqdm(\n",
    "                         enumerate(train_loader), \n",
    "                         total=len(train_loader),\n",
    "                         desc=f\"Epoch {epoch+1}/{args.epochs}\".ljust(15),  # 固定描述长度\n",
    "                         postfix={},  # 清空postfix或保留其他自定义信息\n",
    "                         position=0,      # 新增：固定位置\n",
    "                         leave=True,      # 新增：保留进度条                      \n",
    "                         mininterval=1,   # 降低刷新频率到0.5秒\n",
    "                         maxinterval=5,\n",
    "                         smoothing=0.1,\n",
    "                         dynamic_ncols=True,\n",
    "            bar_format='{l_bar}{bar}| {n_fmt}/{total_fmt} [{elapsed}<{remaining}, {rate_fmt}{postfix}]'\n",
    "      )\n",
    "         \n",
    "        \n",
    "        for batch_idx, batch in train_bar:\n",
    "            images = batch[\"image\"].to(device)\n",
    "            targets = batch[\"class_id\"].to(device) # ✅ 删除梯度清零\n",
    "            optimizer.zero_grad(set_to_none=True)\n",
    "            \n",
    "            \n",
    "     \n",
    "            # 前向传播部分\n",
    "            with torch.cuda.amp.autocast(dtype=torch.float16):  # 使用cuda子模块\n",
    "                outputs = model(images)\n",
    "                \n",
    "                # 训练循环中使用\n",
    "                loss_dict = criterion(outputs, targets, batch)  # 正确传递 batch 参数\n",
    "                \n",
    "\n",
    "\n",
    "                total_loss = loss_dict[\"total\"]  # 用于反向传播的总损失\n",
    "\n",
    "                # 收集有效比例\n",
    "                epoch_valid_ratio.append(loss_dict[\"valid_ratio\"].item())\n",
    "\n",
    "                # 添加loss缩放保护\n",
    "                if not torch.isfinite(total_loss):\n",
    "                  #  print(f\"检测到非有限loss值: {total_loss.item()}, 跳过当前批次\")\n",
    "                    \n",
    "                    continue  # ✅ 跳过问题批次\n",
    "            \n",
    "             # 梯度累积：总损失需按累积步数缩放\n",
    "            scaled_loss = total_loss / accum_steps\n",
    "            scaler.scale(scaled_loss).backward()\n",
    "            \n",
    "            # 只有当累积步数达到设定值时才更新参数\n",
    "            if (batch_idx + 1) % accum_steps == 0:\n",
    "             #梯度裁剪\n",
    "                scaler.unscale_(optimizer)\n",
    "                \n",
    "                # 计算裁剪前的梯度范数\n",
    "\n",
    "                 # 新增：鲁棒性梯度范数计算（避免NaN）\n",
    "                grad_norm = 0.0\n",
    "                has_valid_grad = False\n",
    "                for param in model.parameters():\n",
    "                    if param.grad is not None and param.grad.numel() > 0:\n",
    "            # 累加L2范数的平方\n",
    "                        grad_norm += torch.sum(param.grad.pow(2)).item()\n",
    "                        has_valid_grad = True\n",
    "    \n",
    "                if has_valid_grad:\n",
    "                    grad_norm = math.sqrt(grad_norm)  # 计算整体L2范数\n",
    "        # 梯度裁剪（确保不超过max_norm=1.0）\n",
    "                    torch.nn.utils.clip_grad_norm_(model.parameters(), max_norm=1.0)\n",
    "                    clipped_grad_norm = torch.nn.utils.clip_grad_norm_(model.parameters(), max_norm=1.0)\n",
    "                else:\n",
    "                    grad_norm = 0.0  # 无有效梯度时设为0，避免NaN\n",
    "                    print(\"警告：当前批次所有参数梯度为0，跳过更新\")\n",
    "    \n",
    "                epoch_grad_norms.append(grad_norm)  # 记录有效梯度范数\n",
    "\n",
    "\n",
    "                \n",
    "\n",
    "\n",
    "                # 检查梯度是否为空\n",
    "                all_zero_grad = True\n",
    "                for param in model.parameters():\n",
    "                    if param.grad is not None and torch.sum(param.grad.abs()) > 0:\n",
    "                       all_zero_grad = False\n",
    "                       break\n",
    "                if all_zero_grad:\n",
    "                 empty_grad_count += 1\n",
    "\n",
    "\n",
    "\n",
    "                scaler.step(optimizer)\n",
    "                scaler.update()\n",
    "                  # 重置梯度\n",
    "                optimizer.zero_grad(set_to_none=True)\n",
    "             \n",
    "                scheduler_onecycle.step()  # 前10%的epoch使用OneCycleLR\n",
    "\n",
    "\n",
    "\n",
    "\n",
    "\n",
    "            # **关键修改点**：合并GPU指标和损失指标\n",
    "            postfix_dict = {\n",
    "            \"total\": f\"{total_loss.item():.4f}\",\n",
    "            \"main\": f\"{loss_dict['main'].item():.4f}\",\n",
    "            \"part\": f\"{loss_dict['part'].item():.4f}\",\n",
    "            \"triplet\": f\"{loss_dict['triplet'].item():.4f}\",\n",
    "            \"lr\": f\"{optimizer.param_groups[0]['lr']:.6f}\",\n",
    "             \n",
    "        }\n",
    "        \n",
    "        \n",
    "        \n",
    "        # 更新进度条\n",
    "            train_bar.set_postfix(postfix_dict)\n",
    "\n",
    "\n",
    "         # epoch 结束后打印 Triplet 有效比例（平均值或最后一次值）\n",
    "        if epoch_valid_ratio:\n",
    "            avg_valid_ratio = sum(epoch_valid_ratio) / len(epoch_valid_ratio)\n",
    "            cos_info = \"无正样本对\"\n",
    "            if len(criterion.epoch_pos_cos_sim) > 0:\n",
    "                all_pos_cos_sim = np.concatenate(criterion.epoch_pos_cos_sim)\n",
    "                min_cos = all_pos_cos_sim.min()\n",
    "                max_cos = all_pos_cos_sim.max()\n",
    "                mean_cos = all_pos_cos_sim.mean()\n",
    "                cos_info = f\"正样本余弦相似度：min={min_cos:.2f}, max={max_cos:.2f}, mean={mean_cos:.2f}\"\n",
    "            else:\n",
    "                cos_info = \"无正样本对\"\n",
    "        # === 合并打印 ===\n",
    "\n",
    "            # 新增：计算梯度范数均值\n",
    "            grad_norm_mean = sum(epoch_grad_norms)/len(epoch_grad_norms) if epoch_grad_norms else 0.0\n",
    "\n",
    "            print(f\"Epoch {epoch+1} Triplet有效三元组平均比例: {avg_valid_ratio:.2%} | {cos_info} | 梯度范数均值: {clipped_grad_norm:.4f}\")\n",
    "        else:\n",
    "            print(f\"Epoch {epoch+1} 无有效三元组\") \n",
    "\n",
    "             # 清空缓存（关键！否则下个epoch数据会累积）\n",
    "        criterion.reset_epoch_cache()\n",
    "\n",
    "        # 在epoch结束时添加统计信息\n",
    "        current_total_steps = (epoch + 1) * actual_updates_per_epoch  # 计算总步数\n",
    "        print(f\"Epoch {epoch+1} 空梯度批次数: {empty_grad_count}, 当前已完成的总步数：{current_total_steps}  \")\n",
    "\n",
    "\n",
    "        \n",
    "        \n",
    "\n",
    "     \n",
    "\n",
    "        \n",
    "\n",
    "\n",
    "        # 每2个epoch验证一次\n",
    "        val_acc=0.0\n",
    "        if (epoch+1) % args.val_freq == 0:\n",
    "        \n",
    "            val_acc = validate(model,query_loader, gallery_loader,  device)\n",
    "            print(f\"Epoch {epoch+1} mAP: {val_acc:.2%}\")\n",
    "\n",
    "\n",
    "            # ↓↓↓ 新增梯度日志记录 ↓↓↓\n",
    "            log_gradients(model, epoch)  # 每个验证周期记录一次\n",
    "            # ↑↑↑ 新增结束 ↑↑↑\n",
    "\n",
    "\n",
    "\n",
    "        if  val_acc > best_acc:\n",
    "            best_acc = val_acc\n",
    "\n",
    "            torch.save( { 'epoch': epoch,\n",
    "                        'state_dict': model.state_dict(),\n",
    "                        'optimizer': optimizer.state_dict(),\n",
    "                        'best_acc': best_acc,  # ✅ 正确键名\n",
    "                        'args': args,\n",
    "                         'scaler': scaler.state_dict()  ,# 新增此行\n",
    "                         'scheduler': {\n",
    "                        'onecycle': scheduler_onecycle.state_dict() \n",
    "                    }\n",
    "                                     },\n",
    "                      os.path.join(args.save_dir, \"best_model.pth\"))\n",
    "            print(f\"保存最佳模型: mAP {best_acc:.2%}\")\n",
    "        \n",
    "        \n",
    "        #关键机制：\n",
    "        #梯度累积：模拟更大batch_size\n",
    "        #自动恢复：异常中断后可从中断点继续训练\n",
    "        #最优模型保存：仅保留验证集最佳模型\n",
    "\n",
    "    \n",
    "\n",
    "        \n",
    "#核心技术：\n",
    "#混合精度训练：float16计算加速，自动梯度缩放\n",
    "#OneCycle策略：动态调整学习率提高收敛速度\n",
    "#融合优化器：CUDA内核融合减少GPU操作开销\n",
    "        \n",
    "\n",
    "\n",
    "if __name__ == \"__main__\":\n",
    "    import torch.multiprocessing as mp\n",
    "    mp.freeze_support()  # 必须在Windows下调用，且位于最前\n",
    "    \n",
    "    # 强制设置spawn模式（避免默认使用fork）\n",
    "    if sys.platform.startswith('win'):\n",
    "        mp.set_start_method('spawn', force=True)\n",
    "        print(\"Windows平台: 使用spawn启动方法\")\n",
    "    \n",
    "    args = get_args()\n",
    "    os.makedirs(args.save_dir, exist_ok=True)\n",
    "    train(args)\n",
    "\n",
    " \n",
    "\n",
    "#训练流程：\n",
    "#数据准备：加载VeRi-776数据集，应用强数据增强\n",
    "#模型构建：初始化自定义Transformer，配置混合精度\n",
    "#训练配置：设置复合损失函数、优化策略、验证机制\n",
    "#核心训练：梯度累积更新参数，定期验证模型性能\n",
    "#模型保存：保存最佳模型和中断恢复点\n",
    "#性能优化：通过TF32、OneCycle策略等提升训练效率\n",
    "\n",
    "\n",
    "#代码完整实现了从数据加载到模型训练的完整闭环，包含多项工业级优化技巧，适合大规模车辆重识别任务的训练需求。\n",
    "\n",
    "\n",
    "\n",
    "\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "bb7833f7",
   "metadata": {},
   "outputs": [],
   "source": [
    "\n",
    "# 新建train.py验证模型保存功能\n",
    "\n",
    "import sys\n",
    "import os\n",
    "from pathlib import Path\n",
    "\n",
    "# 获取当前文件的绝对路径\n",
    "current_file = Path(__file__).resolve()\n",
    "# 计算项目根目录：上溯两级（假设train.py在scripts/目录下）\n",
    "project_root = current_file.parent.parent\n",
    "# 将项目根目录添加到系统路径\n",
    "sys.path.insert(0, str(project_root))\n",
    "\n",
    "#作用：确保项目内自定义模块（如 Veri776Dataset）可被正确导入\n",
    "#关键点：通过路径解析动态添加项目根目录，避免绝对路径依赖\n",
    "import pynvml\n",
    "import torch\n",
    "import time\n",
    "from PIL import Image\n",
    "import argparse\n",
    "from tqdm import tqdm\n",
    "from torch.utils.data import Dataset\n",
    "from torchvision import transforms\n",
    "import torch.nn as nn\n",
    "import torch.nn.functional as F\n",
    "from torchvision.models import vit_b_16\n",
    "from einops import rearrange, repeat\n",
    "from collections import defaultdict\n",
    "from torch.utils.data import DataLoader  # 新增导入\n",
    "from torch.optim import AdamW\n",
    "import matplotlib.pyplot as plt\n",
    "from collections import Counter\n",
    "from sklearn.metrics import average_precision_score  # 新增导入\n",
    "import numpy as np\n",
    "from torch.optim.lr_scheduler import CosineAnnealingLR\n",
    "from src.datasets.veri776_dataset import Veri776Dataset\n",
    "from src.models.vehicle_transformer import VehicleTransformer\n",
    "import warnings\n",
    "from torch.optim.lr_scheduler import LambdaLR\n",
    "import multiprocessing\n",
    "import math\n",
    "\n",
    "\n",
    "\n",
    "warnings.filterwarnings(\"ignore\", category=FutureWarning)\n",
    "\n",
    "\n",
    "# 复合损失函数  ，主损失权重， 过小导致特征判断力不足\n",
    "\n",
    "class CombinedLoss(nn.Module):\n",
    "    def __init__(self, alpha=0.6, initial_margin=0.1, \n",
    "                 final_margin=0.5, total_epochs=100, \n",
    "                 threshold=0.3, initial_threshold=0.0, \n",
    "                 final_threshold=0.5 ,\n",
    "                 early_stop_epoch=20,\n",
    "                 weight_adaptive=True # 新增自适应权重参数\n",
    "                  \n",
    "                   ):\n",
    "        super().__init__()\n",
    "        self.ce = nn.CrossEntropyLoss(label_smoothing=0.2)\n",
    "        self.initial_margin = initial_margin\n",
    "        self.final_margin = final_margin\n",
    "        self.total_epochs = total_epochs\n",
    "        self.current_epoch = 0\n",
    "        self.triplet = nn.TripletMarginLoss(margin=initial_margin, reduction='none')  # 改为none计算每个三元组损失\n",
    "        self.initial_threshold = initial_threshold  # 训练初期阈值设为0.0\n",
    "        self.final_threshold = final_threshold\n",
    "        self.current_threshold = initial_threshold\n",
    "        self.epoch_pos_cos_sim = []\n",
    "        self.early_stop_epoch = early_stop_epoch  # 记录早停epoch\n",
    "        self.alpha=alpha\n",
    "        self.weight_adaptive = weight_adaptive\n",
    "\n",
    "        # 自适应权重网络（仅在weight_adaptive=True时启用）\n",
    "        if weight_adaptive:\n",
    "            self.weight_net = nn.Sequential(\n",
    "                nn.Linear(2, 16),\n",
    "                nn.ReLU(),\n",
    "                nn.Linear(16, 1),\n",
    "                nn.Sigmoid()\n",
    "            ).to(torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\"))\n",
    "\n",
    "\n",
    "\n",
    "\n",
    "\n",
    "    def set_current_epoch(self, epoch):\n",
    "        new_margin = self.initial_margin + (self.final_margin - self.initial_margin) * (epoch / self.total_epochs)#随训练进行，margin 从 0.1 逐步增至 0.5，增加分类难度。\n",
    "        self.triplet.margin = new_margin\n",
    "        self.current_threshold = self.initial_threshold + (self.final_threshold - self.initial_threshold) * (epoch / self.total_epochs)\n",
    "        self.epoch_pos_cos_sim = []\n",
    "\n",
    "    def reset_epoch_cache(self):\n",
    "        self.epoch_pos_cos_sim = []\n",
    "\n",
    "    def forward(self, outputs, targets, batch):\n",
    "        main_loss = self.ce(outputs[\"logits\"], targets)\n",
    "        part_loss = sum([self.ce(p, targets) for p in outputs[\"part_logits\"]])\n",
    "        bn_feature = outputs[\"bn_feature\"]\n",
    "        device = targets.device  \n",
    "        camera_ids = batch['camera_id'].to(device)\n",
    "        idx = torch.arange(len(targets), device=device)\n",
    "\n",
    "        # 计算余弦相似度和掩码\n",
    "        pairwise_dist = 1 - F.cosine_similarity(bn_feature.unsqueeze(1), bn_feature.unsqueeze(0), dim=2)\n",
    "        cos_sim = F.cosine_similarity(bn_feature.unsqueeze(1), bn_feature.unsqueeze(0), dim=2)\n",
    "        pos_mask = (\n",
    "            (targets.unsqueeze(1) == targets.unsqueeze(0)) &\n",
    "            (camera_ids.unsqueeze(1) != camera_ids.unsqueeze(0)) &\n",
    "            (idx != idx.unsqueeze(1))\n",
    "        )\n",
    "        neg_mask = targets.unsqueeze(1) != targets.unsqueeze(0)\n",
    "\n",
    "        with torch.no_grad():\n",
    "            rows, cols = torch.where(pos_mask)\n",
    "            pos_cos_sim = cos_sim[pos_mask]\n",
    "            if pos_cos_sim.numel() > 0:\n",
    "                self.epoch_pos_cos_sim.append(pos_cos_sim.cpu().numpy())\n",
    "            \n",
    "            # 处理正样本分组\n",
    "            if rows.numel() == 0:\n",
    "                valid_mask = torch.zeros(len(targets), dtype=torch.bool, device=device)\n",
    "            else:\n",
    "                pos_cos_per_anchor = []\n",
    "                for i in range(len(targets)):\n",
    "                    anchor_pos = (rows == i)\n",
    "                    pos_values = cos_sim[i, cols[anchor_pos]]\n",
    "                    pos_cos_per_anchor.append(pos_values)\n",
    "                pos_cos_per_anchor = torch.nn.utils.rnn.pad_sequence(\n",
    "                    pos_cos_per_anchor, batch_first=True, padding_value=-1\n",
    "                )\n",
    "                # 压缩为一维有效锚点掩码\n",
    "                has_positive = (pos_cos_per_anchor != -1).any(dim=1)  # [batch_size]\n",
    "                above_threshold = (pos_cos_per_anchor > self.current_threshold).any(dim=1)  # [batch_size]\n",
    "                \n",
    "                # 根据当前epoch动态调整阈值判断\n",
    "                if self.current_epoch < self.early_stop_epoch:\n",
    "                    valid_mask = has_positive  # 仅检查是否存在正样本\n",
    "                else:\n",
    "                    valid_mask = has_positive & above_threshold  # 组合条件\n",
    "                \n",
    "                \n",
    "\n",
    "            valid_ratio = valid_mask.float().mean()\n",
    "\n",
    "\n",
    "            \n",
    "\n",
    "            # 处理有效三元组\n",
    "            if valid_mask.sum() == 0:\n",
    "                tri_loss = torch.zeros(1, device=device)\n",
    "                tri_weight = torch.tensor(0.0, device=device)\n",
    "            else:\n",
    "            # 获取有效样本的局部索引（关键修正）\n",
    "                valid_indices = torch.where(valid_mask)[0]  # [M] 有效样本在原批次中的索引\n",
    "                M = valid_indices.size(0)  # 有效样本数\n",
    "\n",
    "                 # ✅ 新增：仅在有效样本范围内计算距离（减少计算量）\n",
    "                bn_feature_valid = bn_feature[valid_indices]  # [M, D]\n",
    "                pairwise_dist_valid = 1 - F.cosine_similarity(\n",
    "                bn_feature_valid.unsqueeze(1),  # [M, 1, D]\n",
    "                bn_feature_valid.unsqueeze(0),  # [1, M, D]\n",
    "                dim=2  # 计算M×M的余弦距离矩阵\n",
    "            )  # [M, M]\n",
    "\n",
    "                # ✅ 新增：有效样本内的正负掩码（基于有效样本的元数据）\n",
    "                targets_valid = targets[valid_indices]  # [M]\n",
    "                camera_ids_valid = camera_ids[valid_indices]  # [M]\n",
    "                idx_valid = torch.arange(M, device=device)  # [M]（有效样本的局部索引）\n",
    "\n",
    "                pos_mask_valid = (\n",
    "                (targets_valid.unsqueeze(1) == targets_valid.unsqueeze(0)) &  # [M, M]\n",
    "                (camera_ids_valid.unsqueeze(1) != camera_ids_valid.unsqueeze(0)) &  # [M, M]\n",
    "                (idx_valid != idx_valid.unsqueeze(1))  # [M, M]（排除自对比）\n",
    "            )\n",
    "                neg_mask_valid = targets_valid.unsqueeze(1) != targets_valid.unsqueeze(0)  # [M, M]\n",
    "            \n",
    "\n",
    "\n",
    "            \n",
    "            # 有效样本内的难例挖掘（直接基于局部距离矩阵）\n",
    "            # 最难负样本：在负样本中找距离最大（最相似）的\n",
    "                neg_dist_valid = pairwise_dist_valid.clone()  # [M, M]\n",
    "                neg_dist_valid[~neg_mask_valid] = -float('inf')  # 无效位置设为负无穷\n",
    "                hardest_negative_local = neg_dist_valid.argmax(dim=1)  # [M]（每个锚点的最难负样本局部索引）\n",
    "\n",
    "            # 最难正样本：在正样本中找距离最小（最不相似）的\n",
    "                pos_dist_valid = pairwise_dist_valid.clone()  # [M, M]\n",
    "                pos_dist_valid[~pos_mask_valid] = float('inf')  # 无效位置设为正无穷\n",
    "                hardest_positive_local = pos_dist_valid.argmin(dim=1)  # [M]（每个锚点的最难正样本局部索引）\n",
    "\n",
    "            # 防御性编程：确保索引在有效范围内（M-1为最大局部索引）\n",
    "                hardest_positive_local = hardest_positive_local.clamp(0, M-1)\n",
    "                hardest_negative_local = hardest_negative_local.clamp(0, M-1)\n",
    "\n",
    "            # 提取有效三元组特征（使用局部索引）\n",
    "                valid_anchor = bn_feature_valid  # [M, D]（有效样本的锚点特征）\n",
    "                valid_positive = bn_feature_valid[hardest_positive_local]  # [M, D]（最难正样本特征）\n",
    "                valid_negative = bn_feature_valid[hardest_negative_local]  # [M, D]（最难负样本特征）\n",
    "\n",
    "            \n",
    "            # 维度校验（关键）\n",
    "                assert valid_anchor.shape == valid_positive.shape == valid_negative.shape, \\\n",
    "                f\"三元组维度不匹配: {valid_anchor.shape}, {valid_positive.shape}, {valid_negative.shape}\"\n",
    "            \n",
    "                 # 计算每个三元组的距离\n",
    "                valid_positive_dist = 1 - F.cosine_similarity(valid_anchor, valid_positive, dim=1)\n",
    "                valid_negative_dist = 1 - F.cosine_similarity(valid_anchor, valid_negative, dim=1)\n",
    "\n",
    "            # 计算每个三元组的损失\n",
    "                tri_loss_per = self.triplet(valid_anchor, valid_positive, valid_negative)\n",
    "            \n",
    "            # 自适应权重计算\n",
    "                if self.weight_adaptive and M > 0:\n",
    "                # 基于三元组难度计算权重\n",
    "                    difficulty = (valid_negative_dist - valid_positive_dist) / self.triplet.margin\n",
    "                    difficulty = difficulty.clamp(min=0, max=1)\n",
    "                    cos_sim_pos = F.cosine_similarity(valid_anchor, valid_positive, dim=1)\n",
    "                    cos_sim_neg = F.cosine_similarity(valid_anchor, valid_negative, dim=1)\n",
    "                \n",
    "                # 特征对输入网络计算权重\n",
    "                    weight_input = torch.stack([difficulty, cos_sim_pos - cos_sim_neg], dim=1)\n",
    "                    weights = self.weight_net(weight_input).squeeze()  # [M]\n",
    "                    tri_loss = (tri_loss_per * weights).mean()\n",
    "                else:\n",
    "                    tri_loss = tri_loss_per.mean()\n",
    "                \n",
    "                \n",
    "                \n",
    "                # 关键修改：使用PyTorch函数保持tri_weight为张量\n",
    "                tri_weight = torch.clamp(0.5 * valid_ratio, min=0.5)\n",
    "\n",
    "\n",
    "        total_loss = self.alpha * main_loss + 0.01 * part_loss + tri_weight * tri_loss\n",
    "        return {\n",
    "            \"total\": total_loss,\n",
    "            \"main\": main_loss,\n",
    "            \"part\": part_loss,\n",
    "            \"triplet\": tri_loss,\n",
    "            \"tri_weight\": tri_weight,\n",
    "            \"valid_ratio\": valid_ratio,\n",
    "        }\n",
    "#设计亮点：\n",
    "#标签平滑：防止模型对标签过拟合\n",
    "#动态难例挖掘：自动选择最难区分的负样本增强特征判别性\n",
    "#多损失融合：分类损失 + 部件分类+ 三元组损失\n",
    "\n",
    "\n",
    "\n",
    "\n",
    "def euclidean_dist(x, y):\n",
    "    \"\"\"\n",
    "    计算欧氏距离矩阵\n",
    "    x: (n, feat_dim)\n",
    "    y: (m, feat_dim)\n",
    "    return: (n, m) \n",
    "    \"\"\"\n",
    "    x2 = torch.sum(x ** 2, dim=1).unsqueeze(1)\n",
    "    y2 = torch.sum(y ** 2, dim=1).unsqueeze(0)\n",
    "    dist = x2 + y2 - 2.0 * torch.mm(x, y.transpose(0, 1))\n",
    "    return torch.sqrt(torch.clamp(dist, min=1e-12))\n",
    "\n",
    "\n",
    "\n",
    "\n",
    "#mAP计算函数\n",
    "# 修改calc_mAP函数，使用更高效的计算方式\n",
    "\n",
    "def calc_mAP(query_feats, query_ids, query_cams, gallery_feats, gallery_ids, gallery_cams):\n",
    "    \"\"\"内存优化版mAP计算\"\"\"\n",
    "    device = query_feats.device  # 保持设备一致性\n",
    "    \n",
    "    # 转换为numpy数组（在CPU处理）\n",
    "    query_ids = np.array(query_ids)\n",
    "    query_cams = np.array(query_cams)\n",
    "    gallery_ids = np.array(gallery_ids)\n",
    "    gallery_cams = np.array(gallery_cams)\n",
    "    \n",
    "    aps = []\n",
    "    \n",
    "    # 分块处理查询集\n",
    "    block_size = 32  # 可根据内存调整\n",
    "    gallery_feats = gallery_feats.float()  # 确保为float32类型\n",
    "    for i in tqdm(range(0, len(query_feats), block_size), \n",
    "                 desc=\"计算mAP\", leave=False):\n",
    "        # 当前块数据\n",
    "        start = i\n",
    "        end = min(i+block_size, len(query_feats))\n",
    "        q_block = query_feats[start:end].float()  # 转换为float32\n",
    "        \n",
    "        # 在CPU计算相似度矩阵（核心优化）\n",
    "        sim_block = torch.mm(q_block, gallery_feats.T)\n",
    "        sim_block = sim_block.numpy()  # 转换为numpy数组\n",
    "        \n",
    "        # 逐样本计算AP\n",
    "        for j in range(sim_block.shape[0]):\n",
    "            orig_idx = start + j\n",
    "            sim = sim_block[j]\n",
    "            \n",
    "            # 生成有效性mask\n",
    "            same_id_mask = (gallery_ids == query_ids[orig_idx])\n",
    "            diff_cam_mask = (gallery_cams != query_cams[orig_idx])\n",
    "            valid_pos_mask = same_id_mask & diff_cam_mask\n",
    "            \n",
    "            # 计算单个AP\n",
    "            y_true = valid_pos_mask.astype(np.int32)\n",
    "            y_score = sim\n",
    "            \n",
    "            if y_true.sum() == 0:\n",
    "                aps.append(0.0)  # 无正样本时AP设为0，而非跳过\n",
    "                continue\n",
    "            \n",
    "             # 按相似度降序排序\n",
    "            order = np.argsort(y_score)[::-1]\n",
    "            y_true_sorted = y_true[order]\n",
    "            y_score_sorted = y_score[order]\n",
    "            \n",
    "             # 计算累积精度和召回率\n",
    "            tp = np.cumsum(y_true_sorted)\n",
    "            precision = tp / np.arange(1, len(tp) + 1)\n",
    "            recall = tp / y_true.sum()\n",
    "            \n",
    "            # 使用np.trapezoid计算AP\n",
    "            if np.sum(y_true) > 0:\n",
    "                ap = np.trapezoid(precision, recall)\n",
    "\n",
    "                aps.append(ap)\n",
    "\n",
    "        # 及时释放显存\n",
    "        del q_block, sim_block\n",
    "        torch.cuda.empty_cache()\n",
    "    \n",
    "    return np.mean(aps) if aps else 0.0\n",
    "\n",
    "\n",
    "#创新点：\n",
    "#分块处理：将大型矩阵运算分解为小块，避免OOM（内存不足）\n",
    "#余弦相似度：更鲁棒的度量方式\n",
    "#NaN检测：防止无效特征导致计算崩溃\n",
    "#在validate函数中，调用了calc_mAP(feats, np.array(pids), feats, np.array(pids))，这意味着将同一个数据集同时作为query和gallery，这会使得每个样本都能匹配到自己，导致mAP虚高。\n",
    "\n",
    "\n",
    "\n",
    "\n",
    "# 添加验证循环（防止过拟合）\n",
    "def validate(model,  query_loader, gallery_loader,device):    #train.py中的验证只是用来监控训练过程\n",
    "    model.eval()\n",
    "     # === 提取Query特征和元数据 ===\n",
    "    query_ids, query_cams = [], []\n",
    "    query_feats_list = []\n",
    "    gallery_ids, gallery_cams = [], []\n",
    "    gallery_feats_list = []\n",
    "\n",
    "    # === 新增：收集查询集和测试集的vehicle_id ===\n",
    "    all_query_ids = []\n",
    "    all_gallery_ids = []\n",
    "    \n",
    "\n",
    "  # 分块收集特征并直接存储在CPU（关键）\n",
    "\n",
    "    with torch.no_grad():      # 特征收集与内存优化\n",
    "        for batch in query_loader:\n",
    "            images = batch[\"image\"].to(device, non_blocking=True)\n",
    "            outputs = model(images)\n",
    "\n",
    "\n",
    "            query_feats_list.append(outputs[\"bn_feature\"].cpu())\n",
    "            query_ids.extend(batch[\"vehicle_id\"].tolist())\n",
    "            query_cams.extend(batch[\"camera_id\"].tolist())\n",
    "    \n",
    "\n",
    "            all_query_ids.extend(batch[\"vehicle_id\"].tolist())  # 收集所有查询ID\n",
    "           \n",
    "    \n",
    "\n",
    "\n",
    "    # === 提取Gallery特征 ===\n",
    "    gallery_ids, gallery_cams = [], []\n",
    "    gallery_feats = []\n",
    "    with torch.no_grad():\n",
    "        for batch in gallery_loader:\n",
    "            images = batch[\"image\"].to(device, non_blocking=True)\n",
    "            outputs = model(images)\n",
    "            gallery_feats_list.append(outputs[\"bn_feature\"].cpu())\n",
    "            gallery_ids.extend(batch[\"vehicle_id\"].tolist())\n",
    "            gallery_cams.extend(batch[\"camera_id\"].tolist())\n",
    "\n",
    "            all_gallery_ids.extend(batch[\"vehicle_id\"].tolist())  # 收集所有测试ID\n",
    "\n",
    "            \n",
    "   # 在CPU合并特征\n",
    "    query_feats = torch.cat(query_feats_list, dim=0)\n",
    "    gallery_feats = torch.cat(gallery_feats_list, dim=0)\n",
    "\n",
    "\n",
    "      \n",
    "    valid_mask = torch.any(query_feats != 0, dim=1)  # 过滤全零向量\n",
    "    valid_feats = query_feats[valid_mask]\n",
    "    if valid_feats.numel() == 0:\n",
    "        norm = 0.0\n",
    "    else:\n",
    "        norm = torch.norm(valid_feats, dim=1).mean().item()\n",
    "    print(f\"查询特征平均范数: {norm:.4f}\")  # 应接近1.0\n",
    "    \n",
    "\n",
    "\n",
    "\n",
    "\n",
    "    # === 新增：检查ID交集 ===\n",
    "    common_ids = np.intersect1d(all_query_ids, all_gallery_ids)\n",
    "   # print(f\"查询集与测试集共有车辆ID数: {len(common_ids)} / {len(set(all_query_ids))}\")\n",
    "    assert len(common_ids) > 0, \"查询集与测试集无交集车辆ID，数据集划分错误！\"\n",
    "\n",
    "\n",
    "\n",
    "    # === 检查相机ID交集（新增代码③） ===\n",
    "    common_cams = set(query_cams) & set(gallery_cams)  # 计算相机ID交集\n",
    "    if not common_cams:\n",
    "        raise ValueError(\"查询集与测试集无跨相机样本，无法计算mAP\")\n",
    "\n",
    "\n",
    "\n",
    "    \n",
    "    \n",
    "     # 计算mAP\n",
    "    return calc_mAP(query_feats, query_ids, query_cams, gallery_feats, gallery_ids, gallery_cams)\n",
    "\n",
    "\n",
    "\n",
    "\n",
    "#关键优化：\n",
    "#特征类型转换：float32比默认float64节省50%内存\n",
    "#及时释放显存：del images防止显存泄漏\n",
    "\n",
    "\n",
    "# 配置参数\n",
    "def get_args():\n",
    "    parser = argparse.ArgumentParser()\n",
    "    parser.add_argument(\"--batch_size\", type=int, default=16)   #批次大小， 过小导致统计量不准\n",
    "    parser.add_argument(\"--lr\", type=float, default=5e-4)\n",
    "    parser.add_argument(\"--epochs\", type=int, default=100)\n",
    "    parser.add_argument(\"--val_freq\", type=int, default=2)  # 每2个epoch验证一次\n",
    "    # 添加缺失的save_dir参数\n",
    "    parser.add_argument(\"--save_dir\", type=str, default=\"checkpoints\") # 恢复训练路径\n",
    "    parser.add_argument(\"--resume\", type=str, default=\"\",\n",
    "                        help=\"checkpoint path to resume\") \n",
    "    parser.add_argument(\"--pct_start\", type=float, default=0.3, help=\"学习率上升阶段占比\") \n",
    "    \n",
    "    return parser.parse_args()\n",
    "\n",
    "\n",
    "# 在train.py中添加测试代码\n",
    "# 修改后\n",
    "def test_patch_embed():\n",
    "\n",
    "    test_model = VehicleTransformer(\n",
    "        num_classes=776,\n",
    "        img_size=(224, 224),\n",
    "        patch_sizes=[16, 8],  # 修正参数名并使用列表\n",
    "        local_parts=7,  # 需要与模型配置一致\n",
    "        num_heads=4,  # 保持head_dim=32\n",
    "        embed_dim=128\n",
    "    )\n",
    "    dummy_input = torch.randn(1, 3,224, 224)\n",
    "    \n",
    "    # 获取全局特征和局部特征\n",
    "    global_feat, local_feats = test_model.forward_features(dummy_input)\n",
    "    \n",
    "    # 打印关键维度\n",
    "    print(\"\\n=== 特征维度验证 ===\")\n",
    "    print(f\"全局特征维度: {global_feat.shape}\")        \n",
    "    print(f\"局部特征数量: {len(local_feats)}\")         # 应等于 local_parts参数值\n",
    "    print(f\"单个局部特征维度: {local_feats[0].shape}\") \n",
    "\n",
    "\n",
    "# 定义全局collate函数,使数据结构对齐，避免类型错误\n",
    "def custom_collate(batch):\n",
    "    return {\n",
    "        'image': torch.stack([item['image'] for item in batch]),\n",
    "        'class_id': torch.tensor([item['class_id'] for item in batch]),\n",
    "        'vehicle_id': torch.tensor([item['vehicle_id'] for item in batch]),\n",
    "        'camera_id': torch.tensor([item['camera_id'] for item in batch])\n",
    "    }\n",
    "\n",
    "\n",
    "\n",
    "\n",
    "\n",
    "# 训练函数\n",
    "def train(args):\n",
    "    torch.set_num_threads(multiprocessing.cpu_count())  # 自动使用全部CPU核心\n",
    "    # ===== 新增梯度累积参数 =====\n",
    "    accum_steps = 2  # 添加到train函数开头参数部分\n",
    "\n",
    "    device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n",
    "\n",
    "    \n",
    "\n",
    "    # 在train函数开头添加\n",
    "    torch.backends.cuda.matmul.allow_tf32 = True  # 启用TF32计算 ，保持精度前提下加速矩阵运算\n",
    "    torch.backends.cudnn.benchmark = True  # 启用cuDNN自动优化器\n",
    "    torch.cuda.empty_cache()  # 清空缓存\n",
    "\n",
    "\n",
    "\n",
    "    # 基础transform（共享部分）\n",
    "    base_transform = transforms.Compose([\n",
    "    transforms.Resize((256, 256)),\n",
    "    transforms.CenterCrop(224), \n",
    "    transforms.ToTensor(),\n",
    "    transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])\n",
    "  ])\n",
    "    \n",
    "    train_set_base = Veri776Dataset(mode=\"train\", transform=base_transform)\n",
    "    total_train_samples = len(train_set_base)  # 训练集总样本数\n",
    "    del train_set_base  # 释放内存\n",
    "    actual_updates_per_epoch = math.ceil(total_train_samples / (args.batch_size * accum_steps))  # 替换为 total_train_samples\n",
    "    total_steps = args.epochs * actual_updates_per_epoch\n",
    "    \n",
    "\n",
    "    # 数据增强\n",
    "    query_transform = base_transform\n",
    "    test_transform = base_transform\n",
    "\n",
    "    \n",
    "   \n",
    "\n",
    "    # 正确修改（使用查询集作为验证参考）\n",
    "    query_set = Veri776Dataset(mode=\"query\", transform=query_transform)  # Query\n",
    "    gallery_set = Veri776Dataset(mode=\"test\", transform=test_transform) # Gallery\n",
    "\n",
    "    \n",
    " \n",
    "    # 创建 Query 和 Gallery 的 DataLoader\n",
    "    query_loader = DataLoader(query_set, \n",
    "                              batch_size=args.batch_size ,  \n",
    "                              shuffle=False,\n",
    "    num_workers=4,\n",
    "     pin_memory=True,\n",
    "    drop_last=False,\n",
    "        collate_fn=custom_collate,  # 使用全局函数\n",
    "        \n",
    "    prefetch_factor=2,  # 每个worker预取2个批次\n",
    "    \n",
    "        )\n",
    "\n",
    "\n",
    "    gallery_loader = DataLoader(gallery_set, \n",
    "                                batch_size=args.batch_size,\n",
    "                                 shuffle=False,\n",
    "    num_workers=4,\n",
    "     pin_memory=True,\n",
    "    drop_last=False,\n",
    "   collate_fn=custom_collate,  # 使用全局函数     \n",
    "    prefetch_factor=2,  # 每个worker预取2个批次\n",
    "        \n",
    "   )# 添加预取\n",
    "\n",
    "\n",
    "    # 调整混合精度训练\n",
    "    scaler = torch.cuda.amp.GradScaler(enabled=True)  # 显式启用\n",
    "\n",
    "\n",
    "# 新增梯度检查点（节省显存）\n",
    "    from torch.utils.checkpoint import checkpoint_sequential\n",
    "    \n",
    "    \n",
    "\n",
    "    def apply_checkpoint(module, inp):\n",
    "    # 确保输入张量启用梯度\n",
    "        if not inp.requires_grad:\n",
    "            inp.requires_grad_(True)  # 临时启用梯度\n",
    "    \n",
    "    # 显式指定use_reentrant参数，并仅对需要梯度的模块应用检查点\n",
    "        if isinstance(module, nn.TransformerEncoder) and inp.requires_grad:\n",
    "        # 分解TransformerEncoder为层列表\n",
    "            encoder_layers = list(module.layers)\n",
    "            return checkpoint_sequential(encoder_layers, 2, inp, use_reentrant=False)\n",
    "        return module(inp)\n",
    "\n",
    "    # 模型初始化\n",
    "    model = VehicleTransformer(\n",
    "        num_classes=776,\n",
    "    img_size=(224, 224),    # 输入尺寸\n",
    "    patch_sizes=[16, 8],  # 修正参数名并使用列表\n",
    "    local_parts=7,\n",
    "    embed_dim=128,          # 特征维度， 过低导致信息丢失\n",
    "    depth=4,                # 层数  \n",
    "    num_heads=4,            # 注意力头\n",
    "    mlp_ratio=4,\n",
    "    \n",
    "    pretrained=False,\n",
    "    use_checkpoint=True , # 启用梯度检查点\n",
    "    apply_checkpoint=apply_checkpoint  # 传入apply_checkpoint函数\n",
    "    \n",
    "    ).to(device)\n",
    "\n",
    "    model = model.to(memory_format=torch.channels_last)  # 新增此行\n",
    "\n",
    "\n",
    "    # 定义新的前向传播函数以应用检查点（直接修改模型的forward方法）\n",
    "    def forward_with_checkpoint(self, x):\n",
    "        x = self.grad_check(x)\n",
    "    # 特征提取部分应用检查点\n",
    "        global_feats, multi_scale_local = self.forward_features(x)\n",
    "    \n",
    "    # 特征融合和分类部分（不应用检查点）\n",
    "        all_feats = []\n",
    "        for i in range(len(global_feats)):\n",
    "            all_feats.append(global_feats[i])\n",
    "            all_feats.extend(multi_scale_local[i])\n",
    "    \n",
    "        fused = self.fusion(global_feats=global_feats, local_feats=multi_scale_local)\n",
    "        fused_bn = self.bn_neck(fused)\n",
    "        fused_bn_normalized = F.normalize(fused_bn, dim=1)\n",
    "        logits = self.head(fused_bn)\n",
    "    \n",
    "        part_logits = []\n",
    "        for i, scale_local in enumerate(multi_scale_local):\n",
    "            for p, feat in enumerate(scale_local):\n",
    "                part_logits.append(self.part_classifiers[i][p](feat))\n",
    "    \n",
    "        return {\n",
    "        'global': global_feats,\n",
    "        'local': multi_scale_local,\n",
    "        'fused': fused,\n",
    "        'logits': logits,\n",
    "        'part_logits': part_logits,\n",
    "        'bn_feature': fused_bn_normalized,\n",
    "    }\n",
    "    \n",
    "\n",
    "    model.forward = forward_with_checkpoint.__get__(model, type(model))\n",
    "\n",
    "\n",
    "\n",
    "    def log_gradients(model, epoch):\n",
    "        \"\"\"梯度分布记录函数\"\"\"\n",
    "        grad_info = []\n",
    "        for name, param in model.named_parameters():\n",
    "            if param.grad is not None:\n",
    "                grad_norm = param.grad.data.norm(2).item()\n",
    "                grad_info.append(f\"{name[:15]:<15} : {grad_norm:.4e}\")\n",
    "        with open(\"gradient_log.txt\", \"a\") as f:\n",
    "            f.write(f\"\\n=== Epoch {epoch+1} 梯度分布 ===\\n\")\n",
    "            f.write(\"\\n\".join(grad_info))\n",
    "\n",
    "\n",
    "\n",
    "\n",
    "\n",
    "\n",
    "\n",
    "\n",
    "    \n",
    "    # 优化器，修改为（添加权重衰减和梯度裁剪）\n",
    "    optimizer = AdamW(\n",
    "        model.parameters(), \n",
    "         lr=args.lr,                  #学习率， 过高引发梯度爆炸\n",
    "        weight_decay=0.001,  # ✅ 新增权重衰减\n",
    "        fused=True\n",
    ")                 # 启用融合优化\n",
    "\n",
    "\n",
    "# === 关键插入位置：计算修正后的total_steps ===\n",
    "    # 计算实际总步数（考虑梯度累积）\n",
    "    train_set = Veri776Dataset(mode=\"train\", transform=base_transform)  # 临时创建一次数据集以获取总样本数\n",
    "    total_samples = len(train_set)\n",
    "\n",
    "    actual_updates_per_epoch = math.ceil(total_samples / (args.batch_size * accum_steps))  # 考虑梯度累积\n",
    "    total_steps = args.epochs * actual_updates_per_epoch  # 修正后的总步数\n",
    "    del train_set  # 释放临时数据集\n",
    "\n",
    "   \n",
    "\n",
    "\n",
    "\n",
    "\n",
    "\n",
    "\n",
    "  # 更换学习率调度器（新增）\n",
    "    from torch.optim.lr_scheduler import OneCycleLR\n",
    "  \n",
    "\n",
    "    scheduler_onecycle = OneCycleLR(optimizer, \n",
    "                      max_lr=args.lr,\n",
    "                      total_steps=total_steps,  # 使用修正后的总步数\n",
    "                      pct_start=args.pct_start, # 学习率上升阶段\n",
    "                      div_factor=10,  #初始学习率=max_lr/div_factor\n",
    "                      final_div_factor=1e4,  # 最终学习率 = max_lr / final_div_factor\n",
    "                      anneal_strategy='cos')  # 使用余弦退火策略          \n",
    "    \n",
    "    \n",
    "\n",
    "\n",
    "     # 损失函数\n",
    "    criterion = CombinedLoss(alpha=0.6,\n",
    "        initial_margin=0.1,    \n",
    "        final_margin=0.5,      \n",
    "        total_epochs=args.epochs , # 总epoch数（与训练配置一致）\n",
    "        early_stop_epoch=20,  # 前20个epoch不设阈值\n",
    "         weight_adaptive=True  # 启用自适应权重\n",
    "        )  # 显式传递标量参数\n",
    "\n",
    "    # === 新增检查点恢复逻辑 ===\n",
    "    start_epoch = 0\n",
    "    best_acc = 0.0\n",
    "    # 在训练循环前添加自动保存路径\n",
    "    checkpoint_dir = Path(args.save_dir) / \"interrupt\"\n",
    "    checkpoint_dir.mkdir(parents=True, exist_ok=True)\n",
    "\n",
    "\n",
    "   \n",
    "    if args.resume and Path(args.resume).exists():\n",
    "        checkpoint = torch.load(args.resume, map_location=device)\n",
    "        model.load_state_dict(checkpoint['state_dict'])\n",
    "        optimizer.load_state_dict(checkpoint['optimizer'])\n",
    "\n",
    "        # 新增：加载损失函数状态\n",
    "        if 'criterion' in checkpoint:\n",
    "            criterion.load_state_dict(checkpoint['criterion'])\n",
    "            print(\"成功加载损失函数状态\")\n",
    "\n",
    "\n",
    "\n",
    "        start_epoch = checkpoint['epoch']\n",
    "        best_acc = checkpoint.get('best_acc', 0.0)  # 安全获取最佳acc\n",
    "        scaler.load_state_dict(checkpoint.get('scaler', scaler.state_dict()))  # 安全加载scaler\n",
    "        \n",
    "         # 安全加载调度器状态\n",
    "        if 'scheduler' in checkpoint:\n",
    "            scheduler_state = checkpoint['scheduler']\n",
    "            if 'onecycle' in scheduler_state:\n",
    "                scheduler_onecycle.load_state_dict(scheduler_state['onecycle'])\n",
    "                print(\"成功加载OneCycleLR调度器状态\")\n",
    "            else:\n",
    "                print(\"未检测到有效调度器状态，使用初始配置\")\n",
    "        \n",
    "        print(f\"恢复训练：从epoch {start_epoch}开始，历史最佳mAP {best_acc:.2%}\")\n",
    "\n",
    "    \n",
    "\n",
    "    # 设置环境变量（在训练开始前）\n",
    "    \n",
    "    os.environ[\"PYTORCH_CUDA_ALLOC_CONF\"] = \"expandable_segments:True\"\n",
    "\n",
    "    \n",
    "    # === 这里就是训练代码开头的最佳位置 ===\n",
    "    print(\"\\n=== 训练配置摘要 ===\")\n",
    "    print(f\"输入尺寸: {model.img_size}\")\n",
    "    print(f\"批次大小: {args.batch_size}\")\n",
    "    print(f\"初始学习率: {args.lr}\")\n",
    "    print(f\"嵌入维度: {model.embed_dim}\")\n",
    "    print(f\"Transformer深度: {model.depth}\")\n",
    "    print(f\"设备: {device}\")\n",
    "    print(f\"训练样本数: {total_train_samples}\")  # 使用循环外的 total_train_samples\n",
    "    print(f\"验证样本数: {len(query_set)}\")\n",
    "    print(f\"混合精度训练: {scaler.is_enabled()}\")\n",
    "    print(\"=====================\\n\")\n",
    "    print(f\"Total training steps: {total_steps}, pct_start: {args.pct_start:.2f}\")  # 新增行\n",
    "   \n",
    "\n",
    "\n",
    "    \n",
    "\n",
    "    \n",
    "\n",
    "    # 训练循环中\n",
    "    for epoch in range(start_epoch, args.epochs):\n",
    "        empty_grad_count = 0  \n",
    "        model.train()\n",
    " \n",
    "        # 在每个epoch开始时更新当前epoch，动态调整margin\n",
    "        epoch_valid_ratio = []  # 存储每个批次的有效比例\n",
    "        epoch_grad_norms = []    # 新增：存储每个梯度更新的范数\n",
    "        criterion.set_current_epoch(epoch)  # 传入当前epoch（从0开始）\n",
    "\n",
    "        if epoch < 20:\n",
    "            train_transform = transforms.Compose([\n",
    "        transforms.Resize((256, 256)),\n",
    "        transforms.RandomCrop((224, 224)),\n",
    "        \n",
    "        transforms.ToTensor(),\n",
    "        transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]),\n",
    "\n",
    "    ])\n",
    "            \n",
    "\n",
    "        elif epoch < 40:\n",
    "        # 阶段2：初级增强\n",
    "            train_transform = transforms.Compose([\n",
    "            transforms.Resize((256, 256)),\n",
    "            transforms.RandomCrop((224, 224)),\n",
    "            transforms.RandomHorizontalFlip(p=0.2),       # 低概率翻转\n",
    "            transforms.ColorJitter(brightness=0.1, contrast=0.1),  # 轻微颜色扰动\n",
    "            transforms.ToTensor(),\n",
    "            transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]),\n",
    "        ])\n",
    "            \n",
    "        elif epoch < 60:  # 阶段3：中等强度增强\n",
    "            train_transform = transforms.Compose([\n",
    "            transforms.Resize((256, 256)),\n",
    "            transforms.RandomCrop((224, 224)),\n",
    "            transforms.RandomHorizontalFlip(p=0.4),  # 翻转概率提升\n",
    "            transforms.ColorJitter(brightness=0.2, contrast=0.2, saturation=0.2),  # 颜色扰动（低强度）\n",
    "            transforms.RandomGrayscale(p=0.05),  # 灰度化（低概率）\n",
    "            transforms.RandomApply([transforms.GaussianBlur(kernel_size=3, sigma=(0.1, 1.0))], p=0.2),  # 高斯模糊（低概率）\n",
    "            transforms.ToTensor(),\n",
    "            transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])\n",
    "        ])\n",
    "\n",
    "        else:\n",
    "             train_transform = transforms.Compose([\n",
    "            transforms.Resize((256, 256)),\n",
    "            transforms.RandomCrop((224, 224)),\n",
    "            transforms.RandomHorizontalFlip(p=0.6),  # 高概率翻转\n",
    "            transforms.ColorJitter(brightness=0.3, contrast=0.3, saturation=0.3),  # 颜色扰动（高强度）\n",
    "            transforms.RandomGrayscale(p=0.1),  # 灰度化（中等概率）\n",
    "            transforms.RandomApply([transforms.GaussianBlur(kernel_size=3, sigma=(0.1, 2.0))], p=0.3),  # 高斯模糊（中等概率）\n",
    "            transforms.RandomRotation(10),  # 小角度旋转\n",
    "            transforms.ToTensor(),\n",
    "            transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]),\n",
    "            transforms.RandomErasing(p=0.3, scale=(0.02, 0.2), ratio=(0.3, 2.0))  # 擦除（降低scale上限）\n",
    "        ])\n",
    "            \n",
    "                 # 数据集\n",
    "        train_set = Veri776Dataset(mode=\"train\", transform=train_transform)\n",
    "\n",
    "        \n",
    "   \n",
    "        train_loader = DataLoader(\n",
    "         train_set,\n",
    "    batch_size=args.batch_size,\n",
    "    shuffle=True,\n",
    "    num_workers=4,  # 设置为0时禁用多进程\n",
    "    pin_memory=True,\n",
    "    drop_last=True,\n",
    "    collate_fn=custom_collate  ,\n",
    "    prefetch_factor=2,  # 每个worker预取2个批次\n",
    "    \n",
    ")\n",
    "        current_total_steps = (epoch + 1) * actual_updates_per_epoch  # 无需重新计算样本数\n",
    "\n",
    "        scheduler_state = {}\n",
    "        \n",
    "        scheduler_state['onecycle'] = scheduler_onecycle.state_dict()\n",
    "\n",
    "        \n",
    "\n",
    "        # === 每个epoch开始前保存恢复点 ===\n",
    "        torch.save({\n",
    "        'epoch': epoch,\n",
    "        'state_dict': model.state_dict(),\n",
    "        'optimizer': optimizer.state_dict(),\n",
    "        'total_steps': total_steps,  # 可选：保存总步数用于恢复\n",
    "        'best_acc': best_acc,\n",
    "        'args': args,\n",
    "        'scaler': scaler.state_dict() , # 新增此行\n",
    "        'scheduler': scheduler_state  # 空字典或有效状态字典\n",
    "                \n",
    "            \n",
    "     }, checkpoint_dir / \"last_checkpoint.pth\")  # 保存last_checkpoint.pth\n",
    "\n",
    "\n",
    "        # 每个epoch开始前释放缓存\n",
    "        torch.cuda.empty_cache()\n",
    "\n",
    "         # 添加enumerate获取batch_idx\n",
    "        train_bar = tqdm(\n",
    "                         enumerate(train_loader), \n",
    "                         total=len(train_loader),\n",
    "                         desc=f\"Epoch {epoch+1}/{args.epochs}\".ljust(15),  # 固定描述长度\n",
    "                         postfix={},  # 清空postfix或保留其他自定义信息\n",
    "                         position=0,      # 新增：固定位置\n",
    "                         leave=True,      # 新增：保留进度条                      \n",
    "                         mininterval=1,   # 降低刷新频率到0.5秒\n",
    "                         maxinterval=5,\n",
    "                         smoothing=0.1,\n",
    "                         dynamic_ncols=True,\n",
    "            bar_format='{l_bar}{bar}| {n_fmt}/{total_fmt} [{elapsed}<{remaining}, {rate_fmt}{postfix}]'\n",
    "      )\n",
    "         \n",
    "        \n",
    "        for batch_idx, batch in train_bar:\n",
    "            images = batch[\"image\"].to(device)\n",
    "            targets = batch[\"class_id\"].to(device) # ✅ 删除梯度清零\n",
    "            optimizer.zero_grad(set_to_none=True)\n",
    "            \n",
    "            \n",
    "     \n",
    "            # 前向传播部分\n",
    "            with torch.cuda.amp.autocast(dtype=torch.float16, enabled=True):  # 显式启用混合精度\n",
    "                outputs = model(images)\n",
    "                \n",
    "                # 训练循环中使用\n",
    "                loss_dict = criterion(outputs, targets, batch)  # 正确传递 batch 参数\n",
    "                \n",
    "\n",
    "\n",
    "                total_loss = loss_dict[\"total\"]  # 用于反向传播的总损失\n",
    "\n",
    "                # 收集有效比例\n",
    "                epoch_valid_ratio.append(loss_dict[\"valid_ratio\"].item())\n",
    "\n",
    "                # 添加loss缩放保护\n",
    "                if not torch.isfinite(total_loss):\n",
    "                  #  print(f\"检测到非有限loss值: {total_loss.item()}, 跳过当前批次\")\n",
    "                    \n",
    "                    continue  # ✅ 跳过问题批次\n",
    "            \n",
    "             # 梯度累积：总损失需按累积步数缩放\n",
    "            scaled_loss = total_loss / accum_steps\n",
    "            scaler.scale(scaled_loss).backward()\n",
    "            \n",
    "            # 只有当累积步数达到设定值时才更新参数\n",
    "            if (batch_idx + 1) % accum_steps == 0:\n",
    "             #梯度裁剪\n",
    "                scaler.unscale_(optimizer)\n",
    "                \n",
    "                # 计算裁剪前的梯度范数\n",
    "\n",
    "                 # 新增：鲁棒性梯度范数计算（避免NaN）\n",
    "                grad_norm = 0.0\n",
    "                has_valid_grad = False\n",
    "                for param in model.parameters():\n",
    "                    if param.grad is not None and param.grad.numel() > 0:\n",
    "            # 累加L2范数的平方\n",
    "                        grad_norm += torch.sum(param.grad.pow(2)).item()\n",
    "                        has_valid_grad = True\n",
    "    \n",
    "                if has_valid_grad:\n",
    "                    grad_norm = math.sqrt(grad_norm)  # 计算整体L2范数\n",
    "        # 梯度裁剪（确保不超过max_norm=1.0）\n",
    "                    torch.nn.utils.clip_grad_norm_(model.parameters(), max_norm=1.0)\n",
    "                    clipped_grad_norm = torch.nn.utils.clip_grad_norm_(model.parameters(), max_norm=1.0)\n",
    "                else:\n",
    "                    grad_norm = 0.0  # 无有效梯度时设为0，避免NaN\n",
    "                    print(\"警告：当前批次所有参数梯度为0，跳过更新\")\n",
    "    \n",
    "                epoch_grad_norms.append(grad_norm)  # 记录有效梯度范数\n",
    "\n",
    "\n",
    "                \n",
    "\n",
    "\n",
    "                # 检查梯度是否为空\n",
    "                all_zero_grad = True\n",
    "                for param in model.parameters():\n",
    "                    if param.grad is not None and torch.sum(param.grad.abs()) > 0:\n",
    "                       all_zero_grad = False\n",
    "                       break\n",
    "                if all_zero_grad:\n",
    "                 empty_grad_count += 1\n",
    "\n",
    "\n",
    "\n",
    "                scaler.step(optimizer)\n",
    "                scaler.update()\n",
    "                  # 重置梯度\n",
    "                optimizer.zero_grad(set_to_none=True)\n",
    "             \n",
    "                scheduler_onecycle.step()  # 前10%的epoch使用OneCycleLR\n",
    "\n",
    "\n",
    "\n",
    "\n",
    "\n",
    "            # **关键修改点**：合并GPU指标和损失指标\n",
    "            postfix_dict = {\n",
    "            \"total\": f\"{total_loss.item():.4f}\",\n",
    "            \"main\": f\"{loss_dict['main'].item():.4f}\",\n",
    "            \"part\": f\"{loss_dict['part'].item():.4f}\",\n",
    "            \"triplet\": f\"{loss_dict['triplet'].item():.4f}\",\n",
    "            \"tri_weight\": f\"{loss_dict['tri_weight'].item():.2f}\",\n",
    "            \"lr\": f\"{optimizer.param_groups[0]['lr']:.6f}\",\n",
    "             \n",
    "        }\n",
    "        \n",
    "        \n",
    "        \n",
    "        # 更新进度条\n",
    "            train_bar.set_postfix(postfix_dict)\n",
    "\n",
    "\n",
    "         # epoch 结束后打印 Triplet 有效比例（平均值或最后一次值）\n",
    "        if epoch_valid_ratio:\n",
    "            avg_valid_ratio = sum(epoch_valid_ratio) / len(epoch_valid_ratio)\n",
    "            cos_info = \"无正样本对\"\n",
    "            if len(criterion.epoch_pos_cos_sim) > 0:\n",
    "                all_pos_cos_sim = np.concatenate(criterion.epoch_pos_cos_sim)\n",
    "                min_cos = all_pos_cos_sim.min()\n",
    "                max_cos = all_pos_cos_sim.max()\n",
    "                mean_cos = all_pos_cos_sim.mean()\n",
    "                cos_info = f\"正样本余弦相似度：min={min_cos:.2f}, max={max_cos:.2f}, mean={mean_cos:.2f}\"\n",
    "            else:\n",
    "                cos_info = \"无正样本对\"\n",
    "        # === 合并打印 ===\n",
    "\n",
    "            # 新增：计算梯度范数均值\n",
    "            grad_norm_mean = sum(epoch_grad_norms)/len(epoch_grad_norms) if epoch_grad_norms else 0.0\n",
    "\n",
    "            print(f\"Epoch {epoch+1} Triplet有效三元组平均比例: {avg_valid_ratio:.2%} | {cos_info} | 梯度范数均值: {clipped_grad_norm:.4f}\")\n",
    "        else:\n",
    "            print(f\"Epoch {epoch+1} 无有效三元组\") \n",
    "\n",
    "             # 清空缓存（关键！否则下个epoch数据会累积）\n",
    "        criterion.reset_epoch_cache()\n",
    "\n",
    "        # 在epoch结束时添加统计信息\n",
    "        current_total_steps = (epoch + 1) * actual_updates_per_epoch  # 计算总步数\n",
    "        print(f\"Epoch {epoch+1} 空梯度批次数: {empty_grad_count}, 当前已完成的总步数：{current_total_steps}  \")\n",
    "\n",
    "\n",
    "        \n",
    "        \n",
    "\n",
    "     \n",
    "\n",
    "        \n",
    "\n",
    "\n",
    "        # 每2个epoch验证一次\n",
    "        val_acc=0.0\n",
    "        if (epoch+1) % args.val_freq == 0:\n",
    "        \n",
    "            val_acc = validate(model,query_loader, gallery_loader,  device)\n",
    "            print(f\"Epoch {epoch+1} mAP: {val_acc:.2%}\")\n",
    "\n",
    "\n",
    "            # ↓↓↓ 新增梯度日志记录 ↓↓↓\n",
    "            log_gradients(model, epoch)  # 每个验证周期记录一次\n",
    "            # ↑↑↑ 新增结束 ↑↑↑\n",
    "\n",
    "\n",
    "\n",
    "        if  val_acc > best_acc:\n",
    "            best_acc = val_acc\n",
    "\n",
    "            torch.save( { 'epoch': epoch,\n",
    "                        'state_dict': model.state_dict(),\n",
    "                        'optimizer': optimizer.state_dict(),\n",
    "                        'criterion': criterion.state_dict(),  # 新增：保存损失函数状态\n",
    "                        'best_acc': best_acc,  # ✅ 正确键名\n",
    "                        'args': args,\n",
    "                         'scaler': scaler.state_dict()  ,# 新增此行\n",
    "                         'scheduler': {\n",
    "                        'onecycle': scheduler_onecycle.state_dict() \n",
    "                    }\n",
    "                                     },\n",
    "                      os.path.join(args.save_dir, \"best_model.pth\"))\n",
    "            print(f\"保存最佳模型: mAP {best_acc:.2%}\")\n",
    "        \n",
    "        \n",
    "        #关键机制：\n",
    "        #梯度累积：模拟更大batch_size\n",
    "        #自动恢复：异常中断后可从中断点继续训练\n",
    "        #最优模型保存：仅保留验证集最佳模型\n",
    "\n",
    "    \n",
    "\n",
    "        \n",
    "#核心技术：\n",
    "#混合精度训练：float16计算加速，自动梯度缩放\n",
    "#OneCycle策略：动态调整学习率提高收敛速度\n",
    "#融合优化器：CUDA内核融合减少GPU操作开销\n",
    "        \n",
    "\n",
    "\n",
    "if __name__ == \"__main__\":\n",
    "    import torch.multiprocessing as mp\n",
    "    mp.freeze_support()  # 必须在Windows下调用，且位于最前\n",
    "    \n",
    "    # 强制设置spawn模式（避免默认使用fork）\n",
    "    if sys.platform.startswith('win'):\n",
    "        mp.set_start_method('spawn', force=True)\n",
    "        print(\"Windows平台: 使用spawn启动方法\")\n",
    "    \n",
    "    args = get_args()\n",
    "    os.makedirs(args.save_dir, exist_ok=True)\n",
    "    train(args)\n",
    "\n",
    " \n",
    "\n",
    "#训练流程：\n",
    "#数据准备：加载VeRi-776数据集，应用强数据增强\n",
    "#模型构建：初始化自定义Transformer，配置混合精度\n",
    "#训练配置：设置复合损失函数、优化策略、验证机制\n",
    "#核心训练：梯度累积更新参数，定期验证模型性能\n",
    "#模型保存：保存最佳模型和中断恢复点\n",
    "#性能优化：通过TF32、OneCycle策略等提升训练效率\n",
    "\n",
    "\n",
    "#代码完整实现了从数据加载到模型训练的完整闭环，包含多项工业级优化技巧，适合大规模车辆重识别任务的训练需求。\n",
    "\n",
    "\n",
    "\n",
    "\n"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "pytorch2.0",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.12.9"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 5
}
