import sys
import os
from pathlib import Path


# 获取当前文件的绝对路径
current_file = Path(__file__).resolve()
# 计算项目根目录：上溯两级（假设train.py在scripts/目录下）
project_root = current_file.parent.parent
# 将项目根目录添加到系统路径
sys.path.insert(0, str(project_root))

# 打印验证路径
#print(f"\n=== 当前工作目录: {os.getcwd()}")
#print(f"=== 项目根目录: {project_root}")
#print(f"=== 系统路径: {sys.path[:3]}\n")



import torch
import time
from PIL import Image
import argparse
from tqdm import tqdm
from torch.utils.data import Dataset
from torchvision import transforms
import torch.nn as nn
import torch.nn.functional as F
from torchvision.models import vit_b_16
from einops import rearrange, repeat
from collections import defaultdict
from torch.utils.data import DataLoader  # 新增导入
from torch.optim import AdamW
import numpy as np
from pathlib import Path
import matplotlib.pyplot as plt
from collections import Counter
from sklearn.metrics import average_precision_score  # 新增导入
from torch.optim.lr_scheduler import CosineAnnealingLR
from datasets.veri776_dataset import Veri776Dataset

#核心功能：基于 Transformer 的车辆重识别模型架构
class VehicleTransformer(nn.Module):
    def __init__(self, 
                 num_classes=776,
                 img_size=(224, 224),
                 patch_sizes=[16, 8],
                 embed_dim=128, 
                 depth=4,
                 num_heads=4,
                 mlp_ratio=4,
                 local_parts=7,
                 pretrained=False,
                 use_checkpoint=True  ,# 新增参数，启用梯度检查点
                 apply_checkpoint=None  # 新增参数
                 ):
        super().__init__()

        # 参数校验
        assert len(patch_sizes) > 1, "多尺度Patch至少需要两种尺寸"
        self.patch_sizes = patch_sizes
        self.img_size = img_size
        
        # 多尺度Patch Embedding
        self.patch_embeds = nn.ModuleList([
            nn.Conv2d(3, embed_dim, kernel_size=ps, stride=ps)
            for ps in patch_sizes
        ])
        
        # 计算网格尺寸和分块数
        self.grid_sizes = [(img_size[0]//ps, img_size[1]//ps) for ps in patch_sizes]
        self.num_patches = [gs[0]*gs[1] for gs in self.grid_sizes]
        
        # 校验local_parts合理性
        for i, gs in enumerate(self.grid_sizes):
            assert gs[0] % local_parts == 0, \
            f"local_parts必须能整除第{i}个尺度的高度分块数 ({gs[0]}%{local_parts} != 0)"
        
        # 保存参数
        self.local_parts = local_parts
        self.part_height = [gs[0]//local_parts for gs in self.grid_sizes]
        self.part_width = [gs[1] for gs in self.grid_sizes]
        self.embed_dim = embed_dim
        self.depth = depth
        self.num_heads = num_heads
        self.mlp_ratio = mlp_ratio
        # 保存apply_checkpoint函数
        self.apply_checkpoint = apply_checkpoint
        self.grad_check = nn.Identity()
        
        # 打印分块信息
        for i, (ps, gs, np) in enumerate(zip(patch_sizes, self.grid_sizes, self.num_patches)):
            print(f"尺度{i+1} - Patch大小: {ps}x{ps}, 网格: {gs[0]}x{gs[1]}, 分块数: {np}")
        
        # 基础参数
        self.class_token = nn.Parameter(torch.zeros(1, 1, embed_dim))
        
        # 位置编码参数列表
        self.pos_embeds = nn.ParameterList()
        
        # 多尺度分层编码器
        self.shallow_encoders = nn.ModuleList()  # 浅层编码器（前2层）
        self.deep_encoders = nn.ModuleList()     # 深层编码器（后2层）
        self.skip_conns = nn.ModuleList()        # 跨层跳跃连接
        
        for i in range(len(patch_sizes)):
            # 位置编码（每个尺度独立）
            pos_embed = nn.Parameter(
                torch.zeros(1, self.num_patches[i] + 1, embed_dim) * 0.02
            )
            nn.init.trunc_normal_(pos_embed, std=0.02)
            self.pos_embeds.append(pos_embed)
            
            # 浅层编码器（局部特征提取）
            shallow_layer = nn.TransformerEncoderLayer(
                d_model=embed_dim,
                nhead=num_heads,
                dim_feedforward=int(embed_dim * mlp_ratio),
                batch_first=True
            )
            shallow_encoder = nn.TransformerEncoder(shallow_layer, num_layers=2)
            self.shallow_encoders.append(shallow_encoder)

            # 深层编码器（全局语义聚合）
            deep_layer = nn.TransformerEncoderLayer(
                d_model=embed_dim,
                nhead=num_heads,
                dim_feedforward=int(embed_dim * mlp_ratio),
                batch_first=True
            )
            deep_encoder = nn.TransformerEncoder(deep_layer, num_layers=2)
            self.deep_encoders.append(deep_encoder)

            # 跨层跳跃连接（浅层输出→深层输入）
            skip_conn = nn.Sequential(
                nn.Linear(embed_dim, embed_dim*2),
                nn.GELU(),
                nn.Linear(embed_dim*2, embed_dim)
            )
            self.skip_conns.append(skip_conn)

        # 局部注意力模块
        self.local_attentions = nn.ModuleList([
            nn.ModuleList([
                LocalFeatureModule(
                    embed_dim=embed_dim,
                    num_heads=num_heads//2,
                    mlp_ratio=mlp_ratio,
                    part_height=self.part_height[i],
                    part_width=self.part_width[i]
                ) for _ in range(local_parts)
            ]) for i in range(len(patch_sizes))
        ])
        
        # 部件分类器
        self.part_classifiers = nn.ModuleList([
            nn.ModuleList([
                nn.Linear(embed_dim, num_classes) for _ in range(local_parts)
            ]) for i in range(len(patch_sizes))
        ])

        # 特征融合模块（更新为增强版）
        total_parts = len(patch_sizes) * (local_parts + 1)
        self.fusion = FeatureFusion(
            embed_dim=embed_dim,
            num_scales=len(patch_sizes),
            local_parts=local_parts
        )
        
        # BN层和分类头
        
        self.bn_neck = nn.BatchNorm1d(embed_dim * total_parts * 2)
        self.head = nn.Sequential(
            nn.Linear(embed_dim * total_parts * 2, num_classes)
        )
        
        # 特征归一化层
        self.feat_norm = nn.LayerNorm(embed_dim)


        self.use_checkpoint = use_checkpoint
        if use_checkpoint:
            print("启用梯度检查点，减少显存占用")


    
    def forward_features(self, x):
        B, C, H, W = x.shape
        multi_scale_feats = []
        multi_scale_local = []
        
        for i, (pe, shallow_encoder, deep_encoder, skip_conn, local_attention) in enumerate(
            zip(self.patch_embeds, self.shallow_encoders, self.deep_encoders, 
                self.skip_conns, self.local_attentions)
        ):
            # 分块验证
            assert H == self.img_size[0], f"输入高度{H}与模型设定{self.img_size[0]}不符"
            assert W == self.img_size[1], f"输入宽度{W}与模型设定{self.img_size[1]}不符"
            
            h_patches, w_patches = self.grid_sizes[i]
            actual_patches = h_patches * w_patches
            
            # 位置编码验证
            pos_embed = self.pos_embeds[i]
            expected_dim = actual_patches + 1
            assert pos_embed.shape[1] == expected_dim, \
                f"尺度{i}位置编码维度{pos_embed.shape[1]}与分块数{actual_patches}+1不匹配"
            

            # 特征处理
            # 1. Patch Embedding
            feat = pe(x)  # [B, E, H_patch, W_patch]
            feat = feat.flatten(2).transpose(1, 2)  # [B, N_patches, E]
            feat = torch.cat((self.class_token.expand(B, -1, -1), feat), dim=1)  # 添加class token
            feat += pos_embed  # 添加位置编码

            # 2. 浅层编码器（局部特征提取）
            shallow_feat = shallow_encoder(feat)  # 前2层处理

            # 3. 深层编码器（全局语义聚合）
            # 跨层跳跃连接：shallow_feat → deep_feat
            residual = shallow_feat

            # 应用梯度检查点
            if self.use_checkpoint and self.apply_checkpoint:
                deep_feat = self.apply_checkpoint(deep_encoder, shallow_feat)
            else:
                deep_feat = deep_encoder(shallow_feat)

            deep_feat = residual + skip_conn(deep_feat)  # 应用跨层跳跃连接
            

            # 4. 提取全局特征（来自深层编码器）
            global_feature = deep_feat[:, 0]  # class token位置
            global_feature = self.feat_norm(global_feature)
            global_feature = F.normalize(global_feature, dim=-1)
            multi_scale_feats.append(global_feature)

            # 5. 提取局部特征（来自浅层编码器，保留更多局部细节）
            local_features = []
            # 从浅层输出中提取Patch特征（排除class token）
            shallow_patches = shallow_feat[:, 1:]  
            # 重塑为[B, local_parts, part_height, part_width, E]
            shallow_patches = shallow_patches.reshape(B, self.local_parts, 
                                                    self.part_height[i], self.part_width[i], 
                                                    self.embed_dim)
            for p in range(self.local_parts):
                part_feature = shallow_patches[:, p].reshape(B, -1, self.embed_dim)
                part_feature = local_attention[p](part_feature)  # 局部注意力增强
                part_feature = F.normalize(part_feature, dim=-1)
                local_features.append(part_feature)
            multi_scale_local.append(local_features)
        
        return multi_scale_feats, multi_scale_local
    
    def forward(self, x):
        x = self.grad_check(x)
        global_feats, multi_scale_local = self.forward_features(x)
        
        # 构建所有特征列表（每个尺度的全局特征 + 该尺度的局部特征）
        all_feats = []
        for i in range(len(global_feats)):
            all_feats.append(global_feats[i])
            all_feats.extend(multi_scale_local[i])
        
        # 特征融合
        fused = self.fusion(global_feats=global_feats, local_feats=multi_scale_local)
        fused_bn = self.bn_neck(fused)
        fused_bn_normalized = F.normalize(fused_bn, dim=1)
        
        # 分类头
        logits = self.head(fused_bn)
        
        # 部件分类结果
        part_logits = []
        for i, scale_local in enumerate(multi_scale_local):
            for p, feat in enumerate(scale_local):
                part_logits.append(self.part_classifiers[i][p](feat))
        
        return {
            'global': global_feats,
            'local': multi_scale_local,
            'fused': fused,
            'logits': logits,
            'part_logits': part_logits,
            'bn_feature': fused_bn_normalized,
        }



#局部特征增强模块
class LocalFeatureModule(nn.Module):
    def __init__(self, embed_dim, num_heads, mlp_ratio, part_height, part_width):
        super().__init__()
        self.attention = nn.MultiheadAttention(embed_dim, num_heads, batch_first=True)
        self.norm1 = nn.LayerNorm(embed_dim)
        self.mlp = nn.Sequential(
            nn.Linear(embed_dim, int(embed_dim * mlp_ratio)),
            nn.GELU(),
            nn.Linear(int(embed_dim * mlp_ratio), embed_dim)
        )
        self.norm2 = nn.LayerNorm(embed_dim)
        self.channel_att = nn.Sequential(
            nn.Linear(embed_dim, embed_dim//4),
            nn.ReLU(),
            nn.Linear(embed_dim//4, embed_dim),
            nn.Sigmoid()
        )
        self.pos_embed = nn.Parameter(
            torch.randn(1, part_height * part_width, embed_dim)
        )
    
    def forward(self, x):
        B, N, D = x.shape
        pos_embed = self.pos_embed[:, :N]
        x = x + pos_embed
        attn_out, _ = self.attention(x, x, x)
        x = self.norm1(x + attn_out)
        x = self.norm2(x + self.mlp(x))
        channel_weights = self.channel_att(x.mean(dim=1, keepdim=True))
        x = x * channel_weights
        return x.mean(dim=1)


#动态特征融合模块
class FeatureFusion(nn.Module):
    """实现多尺度特征动态融合"""
    def __init__(self, embed_dim, num_scales, local_parts):
        super().__init__()
        self.embed_dim = embed_dim
        self.num_scales = num_scales
        self.local_parts = local_parts
        self.num_parts = num_scales * (local_parts + 1)
        self.total_dim = embed_dim * self.num_parts * 2
        
        # 通道注意力机制
        self.channel_att = nn.Sequential(
            nn.Conv1d(embed_dim, embed_dim//4, kernel_size=1),
            nn.ReLU(),
            nn.Conv1d(embed_dim//4, embed_dim, kernel_size=1),
            nn.Sigmoid()
        )
        
        # 空间注意力机制
        self.spatial_att = nn.Sequential(
            nn.Conv2d(embed_dim, 1, kernel_size=1),
            nn.Sigmoid()
        )
        
        # 动态融合权重
        self.fusion_weights = nn.ModuleList([
            nn.Sequential(
                nn.Linear(embed_dim, embed_dim//4),
                nn.ReLU(),
                nn.Linear(embed_dim//4, 1)
            ) for _ in range(self.num_parts)
        ])
        
        self.attention = nn.MultiheadAttention(
            embed_dim,
            num_heads=min(4, self.num_parts),
            batch_first=True
        )
        
        self.norm = nn.LayerNorm(self.total_dim)
    


    def forward(self, global_feats, local_feats):
        B = global_feats[0].shape[0]
        # 拼接所有特征 [B, P, D]
        all_feats = []
        for i in range(self.num_scales):
            all_feats.append(global_feats[i])
            all_feats.extend(local_feats[i])
        
        # 特征数验证
        assert len(all_feats) == self.num_parts, \
            f"特征数不匹配！期望{self.num_parts}，实际{len(all_feats)}"
        
        all_feats = torch.stack(all_feats, dim=1)  # [B, P, D]
        P, D = all_feats.shape[1], all_feats.shape[2]
        
        # 通道注意力
        channel_att = self.channel_att(all_feats.permute(0, 2, 1))  # [B, D, P]
        channel_att = channel_att.permute(0, 2, 1)  # [B, P, D]
        channel_weighted = all_feats * channel_att  # [B, P, D]
        
        # 空间注意力
        spatial_feat = channel_weighted.permute(0, 2, 1).unsqueeze(3)  # [B, D, P, 1]
        spatial_att = self.spatial_att(spatial_feat).squeeze(3)  # [B, 1, P]
        spatial_att = spatial_att.permute(0, 2, 1)  # [B, P, 1]
        spatial_weighted = channel_weighted * spatial_att
        
        # 动态权重融合
        weights = []
        for i in range(self.num_parts):
            weight = self.fusion_weights[i](spatial_weighted[:, i])  # [B, 1]
            weights.append(weight)
        weights = torch.stack(weights, dim=1)  # [B, P, 1]
        weights = F.softmax(weights, dim=1)    
        
       

        weighted_feats = spatial_weighted * weights  # [B, P, D]
        
        # 特征交互
        interacted, _ = self.attention(weighted_feats, weighted_feats, weighted_feats)
        
        # 拼接特征
        fused = torch.cat([
            weighted_feats.reshape(B, -1),
            interacted.reshape(B, -1)
        ], dim=1)  # [B, P*D*2]
        
        return self.norm(fused)
    


# 测试代码
if __name__ == "__main__":
    model = VehicleTransformer(
        img_size=(224, 224),
        patch_sizes=[16, 8],
        local_parts=7,
        embed_dim=128,
        num_heads=4,
        depth=4
    )
    dummy_input = torch.randn(2, 3, 224, 224)  #批量大小为2
    outputs = model(dummy_input)

    with torch.no_grad():
        # 获取第一个尺度的编码器
        pe = model.patch_embeds[0]
        shallow_encoder = model.shallow_encoders[0]
        deep_encoder = model.deep_encoders[0]
    
        # 模拟特征处理流程
        feat = pe(dummy_input)
        feat = feat.flatten(2).transpose(1, 2)
        feat = torch.cat((model.class_token.expand(2, -1, -1), feat), dim=1)
        feat += model.pos_embeds[0]  # 现在pos_embeds已正确初始化
    
        shallow_feat = shallow_encoder(feat)
        deep_feat = deep_encoder(shallow_feat)
    
        print("浅层编码器输出维度:", shallow_feat.shape)  # 应输出 [2, 197, 128]
        print("深层编码器输出维度:", deep_feat.shape)    # 应输出 [2, 197, 128]
    
    # 检查特征范数
        global_feat = outputs['global'][0]  # 第一个尺度的全局特征
        print("全局特征范数:", torch.norm(global_feat, dim=1))
    
        local_feat = outputs['local'][0][0]  # 第一个尺度的第一个局部特征
        print("局部特征范数:", torch.norm(local_feat, dim=1))
    
    # 打印分类层信息
        print("分类层输入维度:", model.head[0].in_features)
        print("分类输出尺寸:", outputs['logits'].shape)

    # 验证融合模块输出维度
        fused = outputs['fused']
        expected_dim = model.fusion.total_dim
        print(f"融合特征维度: {fused.shape} (期望: {expected_dim})")
        
    
    # 验证注意力权重分布
    with torch.no_grad():
        global_feats, local_feats = model.forward_features(dummy_input)
        all_feats = torch.stack(
            [global_feats[i] for i in range(len(global_feats))] + [feat for feats in local_feats for feat in feats],
            dim=1
        )
        fusion = model.fusion
        channel_att = fusion.channel_att(all_feats.transpose(1, 2)).transpose(1, 2)
        
        print("通道注意力权重分布:", channel_att.mean(dim=0).squeeze())  # 各特征的通道权重均值



    # 完整维度验证
    with torch.no_grad():
        global_feats, local_feats = model.forward_features(dummy_input)
        fusion = model.fusion
        
        # 拼接所有特征
        all_feats = []
        for i in range(fusion.num_scales):
            all_feats.append(global_feats[i])
            all_feats.extend(local_feats[i])
        all_feats = torch.stack(all_feats, dim=1)  # [B, P, D]
        print(f"all_feats.shape: {all_feats.shape}")  # 应输出 [2, 16, 128]2表示批量大小，16为特征部件总数（2*（7+1）），128表示特征维度
        
        # 通道注意力
        channel_att = fusion.channel_att(all_feats.permute(0, 2, 1))  # [B, D, P]
        channel_att = channel_att.permute(0, 2, 1)  # [B, P, D]
        channel_weighted = all_feats * channel_att  # [B, P, D]
        print(f"channel_weighted.shape: {channel_weighted.shape}")  # 应输出 [2, 16, 128]通过通道注意力机制为每个特征部件的每个通道生成权重
        
        # 空间注意力
        spatial_feat = channel_weighted.permute(0, 2, 1).unsqueeze(3)  # [B, D, P, 1]
        print(f"spatial_feat.shape: {spatial_feat.shape}")  # 应输出 [2, 128, 16, 1] 1表示空间维度（压缩为单值，用于空间注意力计算）
        spatial_att = fusion.spatial_att(spatial_feat)  # [B, 1, P, 1]
        print(f"spatial_att.shape: {spatial_att.shape}")  # 应输出 [2, 1, 16, 1]第一个1表示空间注意力的输出通道数（通过 1x1 卷积生成），第二个1表示空间维度
        spatial_att = spatial_att.squeeze(1)  # [B, P, 1]
        print(f"squeezed spatial_att.shape: {spatial_att.shape}")  # 应输出 [2, 16, 1] 移除 spatial_att 中大小为 1 的维度（dim=1），便于后续乘法运算
        spatial_weighted = channel_weighted * spatial_att  # [B, P, D]
        print(f"spatial_weighted.shape: {spatial_weighted.shape}")  # 应输出 [2, 16, 128] 维度含义：与 channel_weighted 形状相同，空间注意力处理后保持原始形状
        
      
        # 动态权重
        weights = []
        for i in range(fusion.num_parts):
            weight = fusion.fusion_weights[i](spatial_weighted[:, i])  # [B, 1]
            weights.append(weight)
        weights = torch.stack(weights, dim=1)  # [B, P]
        weights = F.softmax(weights, dim=1) 
        print(f"weights.shape: {weights.shape}") # 应输出 [2, 16, 1]  1：每个部件的动态权重值
        
        # 加权特征
        weighted_feats = spatial_weighted * weights  # [B, P, D]
        print(f"weighted_feats.shape: {weighted_feats.shape}")  # 应输出 [2, 16, 128]与 spatial_weighted 形状相同，动态权重处理后保持原始形状，将动态权重应用于空间加权特征


#特征融合模块整体流程总结
#输入特征：2 个尺度的全局特征（2 个）+ 每个尺度 7 个局部特征（2×7=14 个），共 16 个特征部件。
#通道注意力：为每个部件的每个通道生成权重，增强关键语义信息。
#空间注意力：为每个部件生成空间重要性权重，聚焦关键区域。
#动态权重：通过全连接网络和 softmax 生成部件级权重，自适应融合不同特征。
#输出：加权后的特征保持原始形状，用于后续拼接和分类。

#数值意义说明
#张量中的数值是经过归一化或加权后的特征值，具体含义：
#global_feat norm: tensor([1.0000, 1.0000])：全局特征经过 F.normalize 后范数为 1，确保相似度计算合理。
#channel_att.mean(dim=0).squeeze()：通道注意力权重的均值，反映各部件的平均重要性（接近 0.5 表示均匀分布）。
#weights：通过 softmax 生成的动态权重，和为 1，数值越大表示对应部件越重要。

#通过这些张量形状和数值，可验证模型的特征融合逻辑是否正确，以及注意力机制是否按预期工作。


