
# 新建train.py验证模型保存功能

import sys
import os
from pathlib import Path

# 获取当前文件的绝对路径
current_file = Path(__file__).resolve()
# 计算项目根目录：上溯两级（假设train.py在scripts/目录下）
project_root = current_file.parent.parent
# 将项目根目录添加到系统路径
sys.path.insert(0, str(project_root))

#作用：确保项目内自定义模块（如 Veri776Dataset）可被正确导入
#关键点：通过路径解析动态添加项目根目录，避免绝对路径依赖
import pynvml
import torch
import time
from PIL import Image
import argparse
from tqdm import tqdm
from torch.utils.data import Dataset
from torchvision import transforms
import torch.nn as nn
import torch.nn.functional as F
from torchvision.models import vit_b_16
from einops import rearrange, repeat
from collections import defaultdict
from torch.utils.data import DataLoader  # 新增导入
from torch.optim import AdamW
import matplotlib.pyplot as plt
from collections import Counter
from sklearn.metrics import average_precision_score  # 新增导入
import numpy as np
from torch.optim.lr_scheduler import CosineAnnealingLR
from src.datasets.veri776_dataset import Veri776Dataset
from src.models.vehicle_transformer import VehicleTransformer
import warnings
from torch.optim.lr_scheduler import LambdaLR
import multiprocessing
import math



warnings.filterwarnings("ignore", category=FutureWarning)


# 复合损失函数  ，主损失权重， 过小导致特征判断力不足

class CombinedLoss(nn.Module):
    def __init__(self, alpha=0.6, initial_margin=0.1, 
                 final_margin=0.5, total_epochs=100, 
                 threshold=0.3, initial_threshold=0.0, 
                 final_threshold=0.5 ,
                 early_stop_epoch=20,
                 weight_adaptive=True # 新增自适应权重参数
                  
                   ):
        super().__init__()
        self.ce = nn.CrossEntropyLoss(label_smoothing=0.2)
        self.initial_margin = initial_margin
        self.final_margin = final_margin
        self.total_epochs = total_epochs
        self.current_epoch = 0
        self.triplet = nn.TripletMarginLoss(margin=initial_margin, reduction='none')  # 改为none计算每个三元组损失
        self.initial_threshold = initial_threshold  # 训练初期阈值设为0.0
        self.final_threshold = final_threshold
        self.current_threshold = initial_threshold
        self.epoch_pos_cos_sim = []
        self.early_stop_epoch = early_stop_epoch  # 记录早停epoch
        self.alpha=alpha
        self.weight_adaptive = weight_adaptive

        # 自适应权重网络（仅在weight_adaptive=True时启用）
        if weight_adaptive:
            self.weight_net = nn.Sequential(
                nn.Linear(2, 16),
                nn.ReLU(),
                nn.Linear(16, 1),
                nn.Sigmoid()
            ).to(torch.device("cuda" if torch.cuda.is_available() else "cpu"))





    def set_current_epoch(self, epoch):
        new_margin = self.initial_margin + (self.final_margin - self.initial_margin) * (epoch / self.total_epochs)#随训练进行，margin 从 0.1 逐步增至 0.5，增加分类难度。
        self.triplet.margin = new_margin
        self.current_threshold = self.initial_threshold + (self.final_threshold - self.initial_threshold) * (epoch / self.total_epochs)
        self.epoch_pos_cos_sim = []

    def reset_epoch_cache(self):
        self.epoch_pos_cos_sim = []

    def forward(self, outputs, targets, batch):
        main_loss = self.ce(outputs["logits"], targets)
        part_loss = sum([self.ce(p, targets) for p in outputs["part_logits"]])
        bn_feature = outputs["bn_feature"]
        device = targets.device  
        camera_ids = batch['camera_id'].to(device)
        idx = torch.arange(len(targets), device=device)

        # 计算余弦相似度和掩码
        pairwise_dist = 1 - F.cosine_similarity(bn_feature.unsqueeze(1), bn_feature.unsqueeze(0), dim=2)
        cos_sim = F.cosine_similarity(bn_feature.unsqueeze(1), bn_feature.unsqueeze(0), dim=2)
        pos_mask = (
            (targets.unsqueeze(1) == targets.unsqueeze(0)) &
            (camera_ids.unsqueeze(1) != camera_ids.unsqueeze(0)) &
            (idx != idx.unsqueeze(1))
        )
        neg_mask = targets.unsqueeze(1) != targets.unsqueeze(0)

        with torch.no_grad():
            rows, cols = torch.where(pos_mask)
            pos_cos_sim = cos_sim[pos_mask]
            if pos_cos_sim.numel() > 0:
                self.epoch_pos_cos_sim.append(pos_cos_sim.cpu().numpy())
            
            # 处理正样本分组
            if rows.numel() == 0:
                valid_mask = torch.zeros(len(targets), dtype=torch.bool, device=device)
            else:
                pos_cos_per_anchor = []
                for i in range(len(targets)):
                    anchor_pos = (rows == i)
                    pos_values = cos_sim[i, cols[anchor_pos]]
                    pos_cos_per_anchor.append(pos_values)
                pos_cos_per_anchor = torch.nn.utils.rnn.pad_sequence(
                    pos_cos_per_anchor, batch_first=True, padding_value=-1
                )
                # 压缩为一维有效锚点掩码
                has_positive = (pos_cos_per_anchor != -1).any(dim=1)  # [batch_size]
                above_threshold = (pos_cos_per_anchor > self.current_threshold).any(dim=1)  # [batch_size]
                
                # 根据当前epoch动态调整阈值判断
                if self.current_epoch < self.early_stop_epoch:
                    valid_mask = has_positive  # 仅检查是否存在正样本
                else:
                    valid_mask = has_positive & above_threshold  # 组合条件
                
                

            valid_ratio = valid_mask.float().mean()


            

            # 处理有效三元组
            if valid_mask.sum() == 0:
                tri_loss = torch.zeros(1, device=device)
                tri_weight = torch.tensor(0.0, device=device)
            else:
            # 获取有效样本的局部索引（关键修正）
                valid_indices = torch.where(valid_mask)[0]  # [M] 有效样本在原批次中的索引
                M = valid_indices.size(0)  # 有效样本数

                 # ✅ 新增：仅在有效样本范围内计算距离（减少计算量）
                bn_feature_valid = bn_feature[valid_indices]  # [M, D]
                pairwise_dist_valid = 1 - F.cosine_similarity(
                bn_feature_valid.unsqueeze(1),  # [M, 1, D]
                bn_feature_valid.unsqueeze(0),  # [1, M, D]
                dim=2  # 计算M×M的余弦距离矩阵
            )  # [M, M]

                # ✅ 新增：有效样本内的正负掩码（基于有效样本的元数据）
                targets_valid = targets[valid_indices]  # [M]
                camera_ids_valid = camera_ids[valid_indices]  # [M]
                idx_valid = torch.arange(M, device=device)  # [M]（有效样本的局部索引）

                pos_mask_valid = (
                (targets_valid.unsqueeze(1) == targets_valid.unsqueeze(0)) &  # [M, M]
                (camera_ids_valid.unsqueeze(1) != camera_ids_valid.unsqueeze(0)) &  # [M, M]
                (idx_valid != idx_valid.unsqueeze(1))  # [M, M]（排除自对比）
            )
                neg_mask_valid = targets_valid.unsqueeze(1) != targets_valid.unsqueeze(0)  # [M, M]
            


            
            # 有效样本内的难例挖掘（直接基于局部距离矩阵）
            # 最难负样本：在负样本中找距离最大（最相似）的
                neg_dist_valid = pairwise_dist_valid.clone()  # [M, M]
                neg_dist_valid[~neg_mask_valid] = -float('inf')  # 无效位置设为负无穷
                hardest_negative_local = neg_dist_valid.argmax(dim=1)  # [M]（每个锚点的最难负样本局部索引）

            # 最难正样本：在正样本中找距离最小（最不相似）的
                pos_dist_valid = pairwise_dist_valid.clone()  # [M, M]
                pos_dist_valid[~pos_mask_valid] = float('inf')  # 无效位置设为正无穷
                hardest_positive_local = pos_dist_valid.argmin(dim=1)  # [M]（每个锚点的最难正样本局部索引）

            # 防御性编程：确保索引在有效范围内（M-1为最大局部索引）
                hardest_positive_local = hardest_positive_local.clamp(0, M-1)
                hardest_negative_local = hardest_negative_local.clamp(0, M-1)

            # 提取有效三元组特征（使用局部索引）
                valid_anchor = bn_feature_valid  # [M, D]（有效样本的锚点特征）
                valid_positive = bn_feature_valid[hardest_positive_local]  # [M, D]（最难正样本特征）
                valid_negative = bn_feature_valid[hardest_negative_local]  # [M, D]（最难负样本特征）

            
            # 维度校验（关键）
                assert valid_anchor.shape == valid_positive.shape == valid_negative.shape, \
                f"三元组维度不匹配: {valid_anchor.shape}, {valid_positive.shape}, {valid_negative.shape}"
            
                 # 计算每个三元组的距离
                valid_positive_dist = 1 - F.cosine_similarity(valid_anchor, valid_positive, dim=1)
                valid_negative_dist = 1 - F.cosine_similarity(valid_anchor, valid_negative, dim=1)

            # 计算每个三元组的损失
                tri_loss_per = self.triplet(valid_anchor, valid_positive, valid_negative)
            
            # 自适应权重计算
                if self.weight_adaptive and M > 0:
                # 基于三元组难度计算权重
                    difficulty = (valid_negative_dist - valid_positive_dist) / self.triplet.margin
                    difficulty = difficulty.clamp(min=0, max=1)
                    cos_sim_pos = F.cosine_similarity(valid_anchor, valid_positive, dim=1)
                    cos_sim_neg = F.cosine_similarity(valid_anchor, valid_negative, dim=1)
                
                # 特征对输入网络计算权重
                    weight_input = torch.stack([difficulty, cos_sim_pos - cos_sim_neg], dim=1)
                    weights = self.weight_net(weight_input).squeeze()  # [M]
                    tri_loss = (tri_loss_per * weights).mean()
                else:
                    tri_loss = tri_loss_per.mean()
                
                
                
                # 关键修改：使用PyTorch函数保持tri_weight为张量
                tri_weight = torch.clamp(0.5 * valid_ratio, min=0.5)


        total_loss = self.alpha * main_loss + 0.01 * part_loss + tri_weight * tri_loss
        
        return {
            "total": total_loss,
            "main": main_loss,
            "part": part_loss,
            "triplet": tri_loss,
            "tri_weight": tri_weight,
            "valid_ratio": valid_ratio,
        }
#设计亮点：
#标签平滑：防止模型对标签过拟合
#动态难例挖掘：自动选择最难区分的负样本增强特征判别性
#多损失融合：分类损失 + 部件分类+ 三元组损失




def euclidean_dist(x, y):
    """
    计算欧氏距离矩阵
    x: (n, feat_dim)
    y: (m, feat_dim)
    return: (n, m) 
    """
    x2 = torch.sum(x ** 2, dim=1).unsqueeze(1)
    y2 = torch.sum(y ** 2, dim=1).unsqueeze(0)
    dist = x2 + y2 - 2.0 * torch.mm(x, y.transpose(0, 1))
    return torch.sqrt(torch.clamp(dist, min=1e-12))




#mAP计算函数,修改calc_mAP函数，使用更高效的计算方式

def calc_mAP(query_feats, query_ids, query_cams, gallery_feats, gallery_ids, gallery_cams):
    """内存优化版mAP计算"""
    device = query_feats.device  # 保持设备一致性
    
    # 转换为numpy数组（在CPU处理）
    query_ids = np.array(query_ids)
    query_cams = np.array(query_cams)
    gallery_ids = np.array(gallery_ids)
    gallery_cams = np.array(gallery_cams)
    
    aps = []
    
    # 分块处理查询集
    block_size = 32  # 可根据内存调整
    gallery_feats = gallery_feats.float()  # 确保为float32类型
    for i in tqdm(range(0, len(query_feats), block_size), 
                 desc="计算mAP", leave=False):
        # 当前块数据
        start = i
        end = min(i+block_size, len(query_feats))
        q_block = query_feats[start:end].float()  # 转换为float32
        
        # 在CPU计算相似度矩阵（核心优化）
        sim_block = torch.mm(q_block, gallery_feats.T)
        sim_block = sim_block.numpy()  # 转换为numpy数组
        
        # 逐样本计算AP
        for j in range(sim_block.shape[0]):
            orig_idx = start + j
            sim = sim_block[j]
            
            # 生成有效性mask
            same_id_mask = (gallery_ids == query_ids[orig_idx])
            diff_cam_mask = (gallery_cams != query_cams[orig_idx])
            valid_pos_mask = same_id_mask & diff_cam_mask
            
            # 计算单个AP
            y_true = valid_pos_mask.astype(np.int32)
            y_score = sim
            
            if y_true.sum() == 0:
                aps.append(0.0)  # 无正样本时AP设为0，而非跳过
                continue
            
             # 按相似度降序排序
            order = np.argsort(y_score)[::-1]
            y_true_sorted = y_true[order]
            y_score_sorted = y_score[order]
            
             # 计算累积精度和召回率
            tp = np.cumsum(y_true_sorted)
            precision = tp / np.arange(1, len(tp) + 1)
            recall = tp / y_true.sum()
            
            # 使用np.trapezoid计算AP
            if np.sum(y_true) > 0:
                ap = np.trapezoid(precision, recall)

                aps.append(ap)

        # 及时释放显存
        del q_block, sim_block
        torch.cuda.empty_cache()
    
    return np.mean(aps) if aps else 0.0

 #分块处理：将大规模矩阵运算分解为 32 样本块，避免显存溢出
 #余弦相似度：使用更适合特征匹配的距离度量
 #有效样本过滤：仅处理跨摄像头的正样本对





# 添加验证循环（防止过拟合）
def validate(model,  query_loader, gallery_loader,device):    #train.py中的验证只是用来监控训练过程
    model.eval()
     # === 提取Query特征和元数据 ===
    query_ids, query_cams = [], []
    query_feats_list = []
    gallery_ids, gallery_cams = [], []
    gallery_feats_list = []

    # === 新增：收集查询集和测试集的vehicle_id ===
    all_query_ids = []
    all_gallery_ids = []
    

  # 分块收集特征并直接存储在CPU（关键）

    with torch.no_grad():      # 特征收集与内存优化
        for batch in query_loader:
            images = batch["image"].to(device, non_blocking=True)
            outputs = model(images)


            query_feats_list.append(outputs["bn_feature"].cpu())
            query_ids.extend(batch["vehicle_id"].tolist())
            query_cams.extend(batch["camera_id"].tolist())
    

            all_query_ids.extend(batch["vehicle_id"].tolist())  # 收集所有查询ID
           
    


    # === 提取Gallery特征 ===
    gallery_ids, gallery_cams = [], []
    gallery_feats = []
    with torch.no_grad():
        for batch in gallery_loader:
            images = batch["image"].to(device, non_blocking=True)
            outputs = model(images)
            gallery_feats_list.append(outputs["bn_feature"].cpu())
            gallery_ids.extend(batch["vehicle_id"].tolist())
            gallery_cams.extend(batch["camera_id"].tolist())

            all_gallery_ids.extend(batch["vehicle_id"].tolist())  # 收集所有测试ID

            
   # 在CPU合并特征
    query_feats = torch.cat(query_feats_list, dim=0)
    gallery_feats = torch.cat(gallery_feats_list, dim=0)


      
    valid_mask = torch.any(query_feats != 0, dim=1)  # 过滤全零向量
    valid_feats = query_feats[valid_mask]
    if valid_feats.numel() == 0:
        norm = 0.0
    else:
        norm = torch.norm(valid_feats, dim=1).mean().item()
    print(f"查询特征平均范数: {norm:.4f}")  # 应接近1.0
    




    # === 新增：检查ID交集 ===
    common_ids = np.intersect1d(all_query_ids, all_gallery_ids)
   # print(f"查询集与测试集共有车辆ID数: {len(common_ids)} / {len(set(all_query_ids))}")
    assert len(common_ids) > 0, "查询集与测试集无交集车辆ID，数据集划分错误！"



    # === 检查相机ID交集（新增代码③） ===
    common_cams = set(query_cams) & set(gallery_cams)  # 计算相机ID交集
    if not common_cams:
        raise ValueError("查询集与测试集无跨相机样本，无法计算mAP")



    
    
     # 计算mAP
    return calc_mAP(query_feats, query_ids, query_cams, gallery_feats, gallery_ids, gallery_cams)


#关键优化：
#特征类型转换：float32比默认float64节省50%内存
#及时释放显存：del images防止显存泄漏


# 配置参数
def get_args():
    parser = argparse.ArgumentParser()
    parser.add_argument("--batch_size", type=int, default=16)   #批次大小， 过小导致统计量不准
    parser.add_argument("--lr", type=float, default=5e-4)
    parser.add_argument("--epochs", type=int, default=100)
    parser.add_argument("--val_freq", type=int, default=2)  # 每2个epoch验证一次
    # 添加缺失的save_dir参数
    parser.add_argument("--save_dir", type=str, default="checkpoints") # 恢复训练路径
    parser.add_argument("--resume", type=str, default="",
                        help="checkpoint path to resume") 
    parser.add_argument("--pct_start", type=float, default=0.3, help="学习率上升阶段占比") 
    
    return parser.parse_args()


# 在train.py中添加测试代码
# 修改后
def test_patch_embed():

    test_model = VehicleTransformer(
        num_classes=776,
        img_size=(224, 224),
        patch_sizes=[16, 8],  # 修正参数名并使用列表
        local_parts=7,  # 需要与模型配置一致
        num_heads=4,  # 保持head_dim=32
        embed_dim=128
    )
    dummy_input = torch.randn(1, 3,224, 224)
    
    # 获取全局特征和局部特征
    global_feat, local_feats = test_model.forward_features(dummy_input)
    
    # 打印关键维度
    print("\n=== 特征维度验证 ===")
    print(f"全局特征维度: {global_feat.shape}")        
    print(f"局部特征数量: {len(local_feats)}")         # 应等于 local_parts参数值
    print(f"单个局部特征维度: {local_feats[0].shape}") 


# 定义全局collate函数,使数据结构对齐，避免类型错误
def custom_collate(batch):
    return {
        'image': torch.stack([item['image'] for item in batch]),
        'class_id': torch.tensor([item['class_id'] for item in batch]),
        'vehicle_id': torch.tensor([item['vehicle_id'] for item in batch]),
        'camera_id': torch.tensor([item['camera_id'] for item in batch])
    }





# 训练函数
def train(args):
    torch.set_num_threads(multiprocessing.cpu_count())  # 自动使用全部CPU核心
    # ===== 新增梯度累积参数 =====
    accum_steps = 2  # 添加到train函数开头参数部分

    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

    

    # 在train函数开头添加
    torch.backends.cuda.matmul.allow_tf32 = True  # 启用TF32计算 ，保持精度前提下加速矩阵运算
    torch.backends.cudnn.benchmark = True  # 启用cuDNN自动优化器
    torch.cuda.empty_cache()  # 清空缓存



    # 基础transform（共享部分）
    base_transform = transforms.Compose([
    transforms.Resize((256, 256)),
    transforms.CenterCrop(224), 
    transforms.ToTensor(),
    transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
  ])
    
    train_set_base = Veri776Dataset(mode="train", transform=base_transform)
    total_train_samples = len(train_set_base)  # 训练集总样本数
    del train_set_base  # 释放内存
    actual_updates_per_epoch = math.ceil(total_train_samples / (args.batch_size * accum_steps))  # 替换为 total_train_samples
    total_steps = args.epochs * actual_updates_per_epoch
    

    # 数据增强
    query_transform = base_transform
    test_transform = base_transform

    
   

    # 正确修改（使用查询集作为验证参考）
    query_set = Veri776Dataset(mode="query", transform=query_transform)  # Query
    gallery_set = Veri776Dataset(mode="test", transform=test_transform) # Gallery

    
 
    # 创建 Query 和 Gallery 的 DataLoader
    query_loader = DataLoader(query_set, 
                              batch_size=args.batch_size ,  
                              shuffle=False,
    num_workers=4,
     pin_memory=True,
    drop_last=False,
        collate_fn=custom_collate,  # 使用全局函数
        
    prefetch_factor=2,  # 每个worker预取2个批次
    
        )


    gallery_loader = DataLoader(gallery_set, 
                                batch_size=args.batch_size,
                                 shuffle=False,
    num_workers=4,
     pin_memory=True,
    drop_last=False,
   collate_fn=custom_collate,  # 使用全局函数     
    prefetch_factor=2,  # 每个worker预取2个批次
        
   )# 添加预取


    # 调整混合精度训练
    scaler = torch.cuda.amp.GradScaler(enabled=True)  # 显式启用


# 新增梯度检查点（节省显存）
    from torch.utils.checkpoint import checkpoint_sequential
    
    

    def apply_checkpoint(module, inp):
    # 确保输入张量启用梯度
        if not inp.requires_grad:
            inp.requires_grad_(True)  # 临时启用梯度
    
    # 显式指定use_reentrant参数，并仅对需要梯度的模块应用检查点
        if isinstance(module, nn.TransformerEncoder) and inp.requires_grad:
        # 分解TransformerEncoder为层列表
            encoder_layers = list(module.layers)
            return checkpoint_sequential(encoder_layers, 2, inp, use_reentrant=False)
        return module(inp)

    # 模型初始化
    model = VehicleTransformer(
        num_classes=776,
    img_size=(224, 224),    # 输入尺寸
    patch_sizes=[16, 8],  # 修正参数名并使用列表
    local_parts=7,
    embed_dim=128,          # 特征维度， 过低导致信息丢失
    depth=4,                # 层数  
    num_heads=4,            # 注意力头
    mlp_ratio=4,
    
    pretrained=False,
    use_checkpoint=True , # 启用梯度检查点
    apply_checkpoint=apply_checkpoint  # 传入apply_checkpoint函数
    
    ).to(device)

    model = model.to(memory_format=torch.channels_last)  # 新增此行


    # 定义新的前向传播函数以应用检查点（直接修改模型的forward方法）
    def forward_with_checkpoint(self, x):
        x = self.grad_check(x)
    # 特征提取部分应用检查点
        global_feats, multi_scale_local = self.forward_features(x)
    
    # 特征融合和分类部分（不应用检查点）
        all_feats = []
        for i in range(len(global_feats)):
            all_feats.append(global_feats[i])
            all_feats.extend(multi_scale_local[i])
    
        fused = self.fusion(global_feats=global_feats, local_feats=multi_scale_local)
        fused_bn = self.bn_neck(fused)
        fused_bn_normalized = F.normalize(fused_bn, dim=1)
        logits = self.head(fused_bn)
    
        part_logits = []
        for i, scale_local in enumerate(multi_scale_local):
            for p, feat in enumerate(scale_local):
                part_logits.append(self.part_classifiers[i][p](feat))
    
        return {
        'global': global_feats,
        'local': multi_scale_local,
        'fused': fused,
        'logits': logits,
        'part_logits': part_logits,
        'bn_feature': fused_bn_normalized,
    }
    

    model.forward = forward_with_checkpoint.__get__(model, type(model))



    def log_gradients(model, epoch):
        """梯度分布记录函数"""
        grad_info = []
        for name, param in model.named_parameters():
            if param.grad is not None:
                grad_norm = param.grad.data.norm(2).item()
                grad_info.append(f"{name[:15]:<15} : {grad_norm:.4e}")
        with open("gradient_log.txt", "a") as f:
            f.write(f"\n=== Epoch {epoch+1} 梯度分布 ===\n")
            f.write("\n".join(grad_info))








    
    # 优化器，修改为（添加权重衰减和梯度裁剪）
    optimizer = AdamW(
        model.parameters(), 
         lr=args.lr,                  #学习率， 过高引发梯度爆炸
        weight_decay=0.001,  # ✅ 新增权重衰减
        fused=True
)                 # 启用融合优化


# === 关键插入位置：计算修正后的total_steps ===
    # 计算实际总步数（考虑梯度累积）
    train_set = Veri776Dataset(mode="train", transform=base_transform)  # 临时创建一次数据集以获取总样本数
    total_samples = len(train_set)

    actual_updates_per_epoch = math.ceil(total_samples / (args.batch_size * accum_steps))  # 考虑梯度累积
    total_steps = args.epochs * actual_updates_per_epoch  # 修正后的总步数
    del train_set  # 释放临时数据集

   






  # 更换学习率调度器（新增）
    from torch.optim.lr_scheduler import OneCycleLR
  

    scheduler_onecycle = OneCycleLR(optimizer, 
                      max_lr=args.lr,
                      total_steps=total_steps,  # 使用修正后的总步数
                      pct_start=args.pct_start, # 学习率上升阶段
                      div_factor=10,  #初始学习率=max_lr/div_factor
                      final_div_factor=1e4,  # 最终学习率 = max_lr / final_div_factor
                      anneal_strategy='cos')  # 使用余弦退火策略          
    
    


     # 损失函数
    criterion = CombinedLoss(alpha=0.6,
        initial_margin=0.1,    
        final_margin=0.5,      
        total_epochs=args.epochs , # 总epoch数（与训练配置一致）
        early_stop_epoch=20,  # 前20个epoch不设阈值
         weight_adaptive=True  # 启用自适应权重
        )  # 显式传递标量参数

    # === 新增检查点恢复逻辑 ===
    start_epoch = 0
    best_acc = 0.0
    # 在训练循环前添加自动保存路径
    checkpoint_dir = Path(args.save_dir) / "interrupt"
    checkpoint_dir.mkdir(parents=True, exist_ok=True)


   
    if args.resume and Path(args.resume).exists():
        checkpoint = torch.load(args.resume, map_location=device)
        model.load_state_dict(checkpoint['state_dict'])
        optimizer.load_state_dict(checkpoint['optimizer'])

        # 新增：加载损失函数状态
        if 'criterion' in checkpoint:
            criterion.load_state_dict(checkpoint['criterion'])
            print("成功加载损失函数状态")



        start_epoch = checkpoint['epoch']
        best_acc = checkpoint.get('best_acc', 0.0)  # 安全获取最佳acc
        scaler.load_state_dict(checkpoint.get('scaler', scaler.state_dict()))  # 安全加载scaler
        
         # 安全加载调度器状态
        if 'scheduler' in checkpoint:
            scheduler_state = checkpoint['scheduler']
            if 'onecycle' in scheduler_state:
                scheduler_onecycle.load_state_dict(scheduler_state['onecycle'])
                print("成功加载OneCycleLR调度器状态")
            else:
                print("未检测到有效调度器状态，使用初始配置")
        
        print(f"恢复训练：从epoch {start_epoch}开始，历史最佳mAP {best_acc:.2%}")

    

    # 设置环境变量（在训练开始前）
    
    os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True"

    
    # === 这里就是训练代码开头的最佳位置 ===
    print("\n=== 训练配置摘要 ===")
    print(f"输入尺寸: {model.img_size}")
    print(f"批次大小: {args.batch_size}")
    print(f"初始学习率: {args.lr}")
    print(f"嵌入维度: {model.embed_dim}")
    print(f"Transformer深度: {model.depth}")
    print(f"设备: {device}")
    print(f"训练样本数: {total_train_samples}")  # 使用循环外的 total_train_samples
    print(f"验证样本数: {len(query_set)}")
    print(f"混合精度训练: {scaler.is_enabled()}")
    print("=====================\n")
    print(f"Total training steps: {total_steps}, pct_start: {args.pct_start:.2f}")  # 新增行
   


    

    

    # 训练循环中
    for epoch in range(start_epoch, args.epochs):
        empty_grad_count = 0  
        model.train()
 
        # 在每个epoch开始时更新当前epoch，动态调整margin
        epoch_valid_ratio = []  # 存储每个批次的有效比例
        epoch_grad_norms = []    # 新增：存储每个梯度更新的范数
        criterion.set_current_epoch(epoch)  # 传入当前epoch（从0开始）

        if epoch < 20:
            train_transform = transforms.Compose([
        transforms.Resize((256, 256)),
        transforms.RandomCrop((224, 224)),
        
        transforms.ToTensor(),
        transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]),

    ])
            

        elif epoch < 40:
        # 阶段2：初级增强
            train_transform = transforms.Compose([
            transforms.Resize((256, 256)),
            transforms.RandomCrop((224, 224)),
            transforms.RandomHorizontalFlip(p=0.2),       # 低概率翻转
            transforms.ColorJitter(brightness=0.1, contrast=0.1),  # 轻微颜色扰动
            transforms.ToTensor(),
            transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]),
        ])
            
        elif epoch < 60:  # 阶段3：中等强度增强
            train_transform = transforms.Compose([
            transforms.Resize((256, 256)),
            transforms.RandomCrop((224, 224)),
            transforms.RandomHorizontalFlip(p=0.4),  # 翻转概率提升
            transforms.ColorJitter(brightness=0.2, contrast=0.2, saturation=0.2),  # 颜色扰动（低强度）
            transforms.RandomGrayscale(p=0.05),  # 灰度化（低概率）
            transforms.RandomApply([transforms.GaussianBlur(kernel_size=3, sigma=(0.1, 1.0))], p=0.2),  # 高斯模糊（低概率）
            transforms.ToTensor(),
            transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
        ])

        else:
             train_transform = transforms.Compose([
            transforms.Resize((256, 256)),
            transforms.RandomCrop((224, 224)),
            transforms.RandomHorizontalFlip(p=0.6),  # 高概率翻转
            transforms.ColorJitter(brightness=0.3, contrast=0.3, saturation=0.3),  # 颜色扰动（高强度）
            transforms.RandomGrayscale(p=0.1),  # 灰度化（中等概率）
            transforms.RandomApply([transforms.GaussianBlur(kernel_size=3, sigma=(0.1, 2.0))], p=0.3),  # 高斯模糊（中等概率）
            transforms.RandomRotation(10),  # 小角度旋转
            transforms.ToTensor(),
            transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]),
            transforms.RandomErasing(p=0.3, scale=(0.02, 0.2), ratio=(0.3, 2.0))  # 擦除（降低scale上限）
        ])
            
                 # 数据集
        train_set = Veri776Dataset(mode="train", transform=train_transform)

        
   
        train_loader = DataLoader(
         train_set,
    batch_size=args.batch_size,
    shuffle=True,
    num_workers=4,  # 设置为0时禁用多进程
    pin_memory=True,
    drop_last=True,
    collate_fn=custom_collate  ,
    prefetch_factor=2,  # 每个worker预取2个批次
    
)
        current_total_steps = (epoch + 1) * actual_updates_per_epoch  # 无需重新计算样本数

        scheduler_state = {}
        
        scheduler_state['onecycle'] = scheduler_onecycle.state_dict()

        

        # === 每个epoch开始前保存恢复点 ===
        torch.save({
        'epoch': epoch,
        'state_dict': model.state_dict(),
        'optimizer': optimizer.state_dict(),
        'total_steps': total_steps,  # 可选：保存总步数用于恢复
        'best_acc': best_acc,
        'args': args,
        'scaler': scaler.state_dict() , # 新增此行
        'scheduler': scheduler_state  # 空字典或有效状态字典
                
            
     }, checkpoint_dir / "last_checkpoint.pth")  # 保存last_checkpoint.pth


        # 每个epoch开始前释放缓存
        torch.cuda.empty_cache()

         # 添加enumerate获取batch_idx
        train_bar = tqdm(
                         enumerate(train_loader), 
                         total=len(train_loader),
                         desc=f"Epoch {epoch+1}/{args.epochs}".ljust(15),  # 固定描述长度
                         postfix={},  # 清空postfix或保留其他自定义信息
                         position=0,      # 新增：固定位置
                         leave=True,      # 新增：保留进度条                      
                         mininterval=1,   # 降低刷新频率到0.5秒
                         maxinterval=5,
                         smoothing=0.1,
                         dynamic_ncols=True,
            bar_format='{l_bar}{bar}| {n_fmt}/{total_fmt} [{elapsed}<{remaining}, {rate_fmt}{postfix}]'
      )
         
        
        for batch_idx, batch in train_bar:
            images = batch["image"].to(device)
            targets = batch["class_id"].to(device) # ✅ 删除梯度清零
            optimizer.zero_grad(set_to_none=True)
            
            
     
            # 前向传播部分
            with torch.cuda.amp.autocast(dtype=torch.float16, enabled=True):  # 显式启用混合精度
                outputs = model(images)
                
                # 训练循环中使用
                loss_dict = criterion(outputs, targets, batch)  # 正确传递 batch 参数
                


                total_loss = loss_dict["total"]  # 用于反向传播的总损失

                # 收集有效比例
                epoch_valid_ratio.append(loss_dict["valid_ratio"].item())

                # 添加loss缩放保护
                if not torch.isfinite(total_loss):
                  #  print(f"检测到非有限loss值: {total_loss.item()}, 跳过当前批次")
                    
                    continue  # ✅ 跳过问题批次
            
             # 梯度累积：总损失需按累积步数缩放
            scaled_loss = total_loss / accum_steps
            scaler.scale(scaled_loss).backward()
            
            # 只有当累积步数达到设定值时才更新参数
            if (batch_idx + 1) % accum_steps == 0:
             #梯度裁剪
                scaler.unscale_(optimizer)
                
                # 计算裁剪前的梯度范数

                 # 新增：鲁棒性梯度范数计算（避免NaN）
                grad_norm = 0.0
                has_valid_grad = False
                for param in model.parameters():
                    if param.grad is not None and param.grad.numel() > 0:
            # 累加L2范数的平方
                        grad_norm += torch.sum(param.grad.pow(2)).item()
                        has_valid_grad = True
    
                if has_valid_grad:
                    grad_norm = math.sqrt(grad_norm)  # 计算整体L2范数
        # 梯度裁剪（确保不超过max_norm=1.0）
                    torch.nn.utils.clip_grad_norm_(model.parameters(), max_norm=1.0)
                    clipped_grad_norm = torch.nn.utils.clip_grad_norm_(model.parameters(), max_norm=1.0)
                else:
                    grad_norm = 0.0  # 无有效梯度时设为0，避免NaN
                    print("警告：当前批次所有参数梯度为0，跳过更新")
    
                epoch_grad_norms.append(grad_norm)  # 记录有效梯度范数


                


                # 检查梯度是否为空
                all_zero_grad = True
                for param in model.parameters():
                    if param.grad is not None and torch.sum(param.grad.abs()) > 0:
                       all_zero_grad = False
                       break
                if all_zero_grad:
                 empty_grad_count += 1



                scaler.step(optimizer)
                scaler.update()
                  # 重置梯度
                optimizer.zero_grad(set_to_none=True)
             
                scheduler_onecycle.step()  # 前10%的epoch使用OneCycleLR





            # **关键修改点**：合并GPU指标和损失指标
            postfix_dict = {
            "total": f"{total_loss.item():.4f}",
            "main": f"{loss_dict['main'].item():.4f}",
            "part": f"{loss_dict['part'].item():.4f}",
            "triplet": f"{loss_dict['triplet'].item():.4f}",
            "tri_weight": f"{loss_dict['tri_weight'].item():.2f}",
            "lr": f"{optimizer.param_groups[0]['lr']:.6f}",
             
        }
        
        
        
        # 更新进度条
            train_bar.set_postfix(postfix_dict)


         # epoch 结束后打印 Triplet 有效比例（平均值或最后一次值）
        if epoch_valid_ratio:
            avg_valid_ratio = sum(epoch_valid_ratio) / len(epoch_valid_ratio)
            cos_info = "无正样本对"
            if len(criterion.epoch_pos_cos_sim) > 0:
                all_pos_cos_sim = np.concatenate(criterion.epoch_pos_cos_sim)
                min_cos = all_pos_cos_sim.min()
                max_cos = all_pos_cos_sim.max()
                mean_cos = all_pos_cos_sim.mean()
                cos_info = f"正样本余弦相似度：min={min_cos:.2f}, max={max_cos:.2f}, mean={mean_cos:.2f}"
            else:
                cos_info = "无正样本对"
        # === 合并打印 ===

            # 新增：计算梯度范数均值
            grad_norm_mean = sum(epoch_grad_norms)/len(epoch_grad_norms) if epoch_grad_norms else 0.0

            print(f"Epoch {epoch+1} Triplet有效三元组平均比例: {avg_valid_ratio:.2%} | {cos_info} | 梯度范数均值: {clipped_grad_norm:.4f}")
        else:
            print(f"Epoch {epoch+1} 无有效三元组") 

             # 清空缓存（关键！否则下个epoch数据会累积）
        criterion.reset_epoch_cache()

        # 在epoch结束时添加统计信息
        current_total_steps = (epoch + 1) * actual_updates_per_epoch  # 计算总步数
        print(f"Epoch {epoch+1} 空梯度批次数: {empty_grad_count}, 当前已完成的总步数：{current_total_steps}  ")


        
        

     

        


        # 每2个epoch验证一次
        val_acc=0.0
        if (epoch+1) % args.val_freq == 0:
        
            val_acc = validate(model,query_loader, gallery_loader,  device)
            print(f"Epoch {epoch+1} mAP: {val_acc:.2%}")


            # ↓↓↓ 新增梯度日志记录 ↓↓↓
            log_gradients(model, epoch)  # 每个验证周期记录一次
            # ↑↑↑ 新增结束 ↑↑↑



        if  val_acc > best_acc:
            best_acc = val_acc

            torch.save( { 'epoch': epoch,
                        'state_dict': model.state_dict(),
                        'optimizer': optimizer.state_dict(),
                        'criterion': criterion.state_dict(),  # 新增：保存损失函数状态
                        'best_acc': best_acc,  # ✅ 正确键名
                        'args': args,
                         'scaler': scaler.state_dict()  ,# 新增此行
                         'scheduler': {
                        'onecycle': scheduler_onecycle.state_dict() 
                    }
                                     },
                      os.path.join(args.save_dir, "best_model.pth"))
            print(f"保存最佳模型: mAP {best_acc:.2%}")
        
        
        #关键机制：
        #梯度累积：模拟更大batch_size
        #自动恢复：异常中断后可从中断点继续训练
        #最优模型保存：仅保留验证集最佳模型

    
#功能包括：数据加载与增强，混合精度训练，梯度累积与裁剪，学习率动态调整（OneCycle 策略），自动检查点保存与恢复，定期验证与最佳模型保存
    
#训练流程:
#1. 数据处理流程
#数据集加载：从 VeRi-776 数据集加载训练 / 查询 / 测试数据
#数据增强：根据训练阶段动态调整增强策略
#批处理：使用DataLoader实现多进程加载和预处理

#2. 模型训练循环
#前向传播：通过VehicleTransformer提取多尺度特征
#损失计算：组合分类损失、部件损失和三元组损失
#反向传播：使用梯度累积和混合精度训练
#参数更新：带权重衰减的 AdamW 优化器
#学习率调整：OneCycleLR 动态调整学习率

#3. 验证与评估
#特征提取：从查询集和测试集提取特征
#mAP 计算：分块计算平均精度均值
#模型保存：根据验证结果保存最佳模型


if __name__ == "__main__":
    import torch.multiprocessing as mp
    mp.freeze_support()  # 必须在Windows下调用，且位于最前
    
    # 强制设置spawn模式（避免默认使用fork）
    if sys.platform.startswith('win'):
        mp.set_start_method('spawn', force=True)
        print("Windows平台: 使用spawn启动方法")
    
    args = get_args()
    os.makedirs(args.save_dir, exist_ok=True)
    train(args)

 

#训练流程：
#数据准备：加载VeRi-776数据集，应用强数据增强
#模型构建：初始化自定义Transformer，配置混合精度
#训练配置：设置复合损失函数、优化策略、验证机制
#核心训练：梯度累积更新参数，定期验证模型性能
#模型保存：保存最佳模型和中断恢复点
#性能优化：通过TF32、OneCycle策略等提升训练效率


#在运行 train.py 进行模型训练时，veri776_dataset.py 和 vehicle_transformer.py 分别承担数据加载和模型定义的核心功能




