import os

import torch
import torch.nn as nn
import torch.optim as optim
from timm.data.random_erasing import RandomErasing
from torch.cuda import amp
from torch.utils.data import DataLoader, Sampler
from torchvision import transforms
import random
from collections import defaultdict

from tqdm import tqdm

from dataset.make_dataloader import make_dataloader
from dataset.market1501 import Market1501, ImageDataset  # 直接调用用户提供的 Market1501 数据解析代码
import torchvision.transforms as T
from PIL import Image
import torch.nn.functional as F
from utils.meter import AverageMeter
from utils.metrics import R1_mAP_eval
from vit import TransReID, trunc_normal_

# 配置参数
BATCH_SIZE = 32  # 每个批次的样本数
N_IDENTITY = 4  # 每个批次包含的不同行人 ID 数量
M_IMAGES = BATCH_SIZE // N_IDENTITY  # 每个行人包含的图片数
EMBED_DIM = 768  # 假设特征维度为 768
EPOCHS = 300  # 训练的轮数
LR = 0.0003  # 学习率
MARGIN = 0.3
DEVICE = torch.device("cuda" if torch.cuda.is_available() else "cpu")  # 选择设备（GPU 或 CPU）
DATA_PATH = "./data"  # 数据集根路径
IMG_HEIGHT=224
IMG_WIDTH=224
OUTPUT_DIR="./MYoutput"
SAVE_PATH = "./checkpoints"  # 指定保存目录
os.makedirs(SAVE_PATH, exist_ok=True)  # 如果目录不存在，就创建

def save_model(epoch, model, optimizer, loss, path=SAVE_PATH):
    """
    保存模型权重、优化器状态和损失信息
    """
    save_dict = {
        "epoch": epoch,
        "model_state_dict": model.state_dict(),
        "optimizer_state_dict": optimizer.state_dict(),
        "loss": loss,
    }
    torch.save(save_dict, f"{path}/vit_epoch_{epoch}.pth")
    print(f" Model saved at epoch {epoch}: {path}/vit_epoch_{epoch}.pth")
# 自定义 Transformer 编码层，实现 QKV 注意力机制
class TransformerEncoderLayer(nn.Module):
    def __init__(self, embed_dim, num_heads, forward_expansion=4, dropout=0.3):
        super(TransformerEncoderLayer, self).__init__()
        self.num_heads = num_heads
        self.embed_dim = embed_dim

        self.head_dim = embed_dim // num_heads

        assert self.head_dim * num_heads == embed_dim, "embed_dim must be divisible by num_heads"

        # 线性层投影 Q, K, V
        self.qkv_proj = nn.Linear(embed_dim, embed_dim * 3)
        self.fc_out = nn.Linear(embed_dim, embed_dim)
        self.scale = (self.head_dim) ** 0.5  # 缩放因子

        self.norm1 = nn.LayerNorm(embed_dim)
        self.norm2 = nn.LayerNorm(embed_dim)

        self.feed_forward = nn.Sequential(
            nn.Linear(embed_dim, embed_dim * forward_expansion),
            nn.ReLU(),
            nn.Linear(embed_dim * forward_expansion, embed_dim)
        )

        self.dropout = nn.Dropout(dropout)

    def forward(self, x):
        #  LayerNorm1+attention
        x = self.norm1(x)
        batch_size, seq_len, embed_dim = x.shape
        # 计算 Q, K, V，保持 batch_size, seq_len不变，将embed_dim拆成3*头数*每头dim数
        qkv = self.qkv_proj(x).reshape(batch_size, seq_len, 3, self.num_heads, self.head_dim)
        q, k, v = qkv.permute(2, 0, 3, 1, 4)  # 分割 Q, K, V
        # 计算注意力分数qk矩阵乘法，之后得到（patch数+1）*（patch数+1）每一行 33 个数表示 一个 Query 对所有 Keys 的注意力分数。
        # 归一化后，每一行的 权重之和 = 1（Softmax）。
        attn_scores = (q @ k.transpose(-2, -1)) / self.scale  # 计算 QK^T / sqrt(d_k)
        attn_probs = torch.softmax(attn_scores, dim=-1)  # 归一化得到权重
        attn_probs=self.dropout(attn_probs)
        attn_output = (attn_probs @ v).transpose(1, 2).reshape(batch_size, seq_len, embed_dim)  # 计算最终的注意力输出
        attn_output = self.fc_out(attn_output)
        attn_output = self.dropout(attn_output)
        #LayerNorm2+MPL
        x = self.norm2(self.dropout(attn_output))
        feed_forward_output = self.feed_forward(x)
        return feed_forward_output
# 自定义 ViT Backbone
class VisionTransformer(nn.Module):
    def __init__(self, img_height=IMG_HEIGHT, img_width=IMG_WIDTH,patch_size=16, in_channels=3, embed_dim=EMBED_DIM, num_heads=8, num_layers=12,num_classes=751):
        super(VisionTransformer, self).__init__()
        self.num_patches = (img_height  // patch_size) * (img_width // patch_size)

        # 线性映射层：将图像分割为 patch，并转换为嵌入向量,kernel_size=patch_size：表示卷积核的大小等于 patch 的大小（即每个 patch 的尺寸）。
        # stride=patch_size：表示步长等于 patch 的大小，确保每次滑动正好切出一个完整的 patch，而不会有重叠。
        self.patch_embed = nn.Conv2d(in_channels, embed_dim, kernel_size=patch_size, stride=patch_size)

        # 分类标记 (CLS token)
        self.cls_token = nn.Parameter(torch.randn(1, 1, embed_dim))

        # 位置编码
        #self.pos_embed = nn.Parameter(torch.randn(1, self.num_patches + 1, embed_dim))
        self.pos_embed = nn.Parameter(torch.empty(1, self.num_patches + 1, embed_dim))
        nn.init.xavier_uniform_(self.pos_embed)

        # 手动实现 Transformer 编码层
        self.encoder_layers = nn.ModuleList([
            TransformerEncoderLayer(embed_dim, num_heads) for _ in range(num_layers)
        ])
        self.layerNorm = nn.LayerNorm(embed_dim)
        # 线性分类层
        self.mlp_head = nn.Sequential(
            nn.BatchNorm1d(embed_dim),
            nn.Linear(embed_dim, num_classes),
        )
        self.apply(self._init_weights)
    def _init_weights(self, m):
        if isinstance(m, nn.Linear):
            trunc_normal_(m.weight, std=.02)
            if isinstance(m, nn.Linear) and m.bias is not None:
                nn.init.constant_(m.bias, 0)
        elif isinstance(m, nn.LayerNorm):
            nn.init.constant_(m.bias, 0)
            nn.init.constant_(m.weight, 1.0)
        elif isinstance(m, nn.Conv2d):
            # 对卷积层采用 Kaiming 正态初始化，适合 ReLU 激活函数
            nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
            if m.bias is not None:
                nn.init.constant_(m.bias, 0)
    def forward(self, x):
        batch_size = x.shape[0]
        #输入是128*64，patch_size16，所以 self.patch_embed(x)输出形状：[batch_size, embed_dim, 8, 4]
        #x.flatten(2) 把 [batch_size, embed_dim, 8, 4] 变成 [batch_size, embed_dim, 32]
        #.transpose(1, 2) 交换维度，得到 [batch_size, 32, embed_dim]
        x = self.patch_embed(x).flatten(2).transpose(1, 2)  # 分割为 patch，并展平成向量
        cls_tokens = self.cls_token.expand(batch_size, -1, -1)  # 扩展 CLS token例如 32 个 Patch，最终变成 33 个 Token（一个patch是一个token，再多出一个用于分类）
        x = torch.cat((cls_tokens, x), dim=1)  # 拼接分类标记和图像 patch 向量
        # 添加位置编码，在 PyTorch 中，如果两个张量的 形状不完全匹配，但 某些维度是 1，那么这个维度会自动扩展，匹配另一个张量的大小。
        # x.shape = [batch_size, 33, embed_dim]
        # pos_embed.shape = [1, 33, embed_dim]
        x = x + self.pos_embed

        for layer in self.encoder_layers:
            x = layer(x)  # 逐层通过 Transformer 编码器
        x = self.layerNorm(x)
        x = x[:, 0]  # 取 CLS token 作为最终的特征表示   完整写法：x = x[:, 0, :]
        if self.training:
            logits = self.mlp_head(x)  # 通过全连接层获得最终分类结果
            return x, logits
        else:
            return x


# 初始化模型
model = VisionTransformer(num_classes=751).to(DEVICE)
checkpoint = torch.load("pretrain/jx_vit_base_p16_224-80ecf9dd.pth", map_location="cuda")
model.load_state_dict(checkpoint, strict=False)
# 定义损失函数
criterion_id = nn.CrossEntropyLoss()
criterion_triplet = nn.TripletMarginLoss(margin=0.3)

# 定义优化器
optimizer = optim.Adam(model.parameters(), lr=LR)



# 生成 DataLoader
train_loader, val_loader, query_num, classes_num, cam_num, view_num=make_dataloader()

# 训练循环
def train():
    loss_meter = AverageMeter()
    acc_meter = AverageMeter()
    evaluator = R1_mAP_eval(query_num, max_rank=50, feat_norm=True)
    scaler = amp.GradScaler()
    for epoch in range(EPOCHS):
        model.train()
        acc_meter.reset()
        evaluator.reset()

        for (imgs, pids, camids, viewids) in tqdm(train_loader, desc=f"Epoch {epoch + 1}/{EPOCHS} Training"):
            # Move data to device
            imgs = imgs.to(DEVICE)
            pids = pids.to(DEVICE)
            with amp.autocast(enabled=True):
                # Forward pass: get classification logits and feature embeddings
                logits, feats = model(imgs)
                loss_ce = criterion_id(logits, pids)
                with torch.no_grad():
                    dist_mat = torch.cdist(feats, feats, p=2)  # 求欧氏距离矩阵, shape [batch, batch]
                loss_triplet = torch.zeros(1, device=DEVICE)
                num_triplets = 0
                # Loop through each sample in the batch to find a triplet
                for i in range(feats.size(0)):
                    pid_anchor = pids[i].item()
                    # Get distances of all other samples to this anchor
                    distances = dist_mat[i]
                    # Find indices for positive and negative samples
                    # Positive: same person ID, Negative: different person ID
                    pos_indices = (pids == pid_anchor).nonzero(as_tuple=False).flatten()
                    neg_indices = (pids != pid_anchor).nonzero(as_tuple=False).flatten()
                    if len(pos_indices) > 1 and len(neg_indices) > 0:
                        # Remove the anchor itself from positive indices
                        pos_indices = pos_indices[pos_indices != i]
                        # Hard positive: the farthest positive (max distance to anchor)
                        pos_distances = distances[pos_indices]
                        hard_pos_index = pos_indices[pos_distances.argmax().item()].item()
                        # Hard negative: the closest negative (min distance to anchor)
                        neg_distances = distances[neg_indices]
                        hard_neg_index = neg_indices[neg_distances.argmin().item()].item()
                        # Compute triplet loss for this anchor (max(0, d(anchor,pos) - d(anchor,neg) + margin))
                        d_pos = distances[hard_pos_index]
                        d_neg = distances[hard_neg_index]
                        # Triplet loss for this anchor
                        loss_triplet += F.relu(d_pos - d_neg + MARGIN)  # 不能是负数
                        num_triplets += 1
                # If we found at least one valid triplet in the batch, take the average triplet loss
                if num_triplets > 0:
                    loss_triplet = loss_triplet / num_triplets
                else:
                    loss_triplet = torch.tensor(0.0, device=DEVICE)
                # Total loss is combination of cross-entropy and triplet losses
                loss = loss_ce + loss_triplet
            # Backpropagation
            acc = (logits.max(1)[1] == pids).float().mean()
            acc_meter.update(acc, 1)
            optimizer.zero_grad()
            scaler.scale(loss).backward()
            scaler.step(optimizer)  # scaler.step(optimizer)：更新权重，同时自动处理梯度缩放。
            scaler.update()
            loss_meter.update(loss.item(), imgs.shape[0])
        print(f"Epoch [{epoch + 1}/{EPOCHS}] - Average Loss: {loss_meter.avg:.4f} - Average Acc: {acc_meter.avg:.4f}")
        # 10个epoch存一次
        if epoch+1 % 30 == 0:
            torch.save(model.state_dict(), os.path.join(OUTPUT_DIR, 'transformer_{}.pth'.format(epoch)))
        # 5个epoch评估一次
        if (epoch+1) % 30 == 0:
            model.eval()
            print("测试中。。。")
            for n_iter, (img, vid, camid, _, _, _) in enumerate(val_loader):
                with torch.no_grad():
                    img = img.to(DEVICE)
                    feat = model(img)
                    evaluator.update((feat, vid, camid))
            cmc, mAP, _, _, _, _, _ = evaluator.compute()
            print("Validation Results - Epoch: {}".format(epoch))
            print("mAP: {:.1%}".format(mAP))
            for r in [1, 5, 10]:
                print("CMC curve, Rank-{:<3}:{:.1%}".format(r, cmc[r - 1]))
            torch.cuda.empty_cache()

if __name__ == "__main__":
    train()  # 运行训练
