import os

import torch
import torch.nn as nn
import torch.optim as optim
from torch.cuda import amp
from torch.optim.lr_scheduler import CosineAnnealingLR
from torch.utils.data import DataLoader, Sampler
from torchvision import transforms
import random
from collections import defaultdict

from tqdm import tqdm

from dataset.make_dataloader import make_dataloader
from dataset.market1501 import Market1501  # 直接调用用户提供的 Market1501 数据解析代码
from PIL import Image

from vit import TransReID

# 配置参数
BATCH_SIZE = 64  # 每个批次的样本数
N_IDENTITY = 4  # 每个批次包含的不同行人 ID 数量
M_IMAGES = BATCH_SIZE // N_IDENTITY  # 每个行人包含的图片数
EMBED_DIM = 768  # 假设特征维度为 768
EPOCHS = 500  # 训练的轮数
LR = 0.0003  # 学习率
DEVICE = torch.device("cuda" if torch.cuda.is_available() else "cpu")
DATA_PATH = "./data"  # 数据集根路径
IMG_HEIGHT=224
IMG_WIDTH=224

SAVE_PATH = "./checkpoints"  # 指定保存目录
os.makedirs(SAVE_PATH, exist_ok=True)  # 如果目录不存在，就创建

def save_model(epoch, model, optimizer, loss, path=SAVE_PATH):
    """
    保存模型权重、优化器状态和损失信息
    """
    save_dict = {
        "epoch": epoch,
        "model_state_dict": model.state_dict(),
        "optimizer_state_dict": optimizer.state_dict(),
        "loss": loss,
    }
    torch.save(save_dict, f"{path}/vit_epoch_{epoch}.pth")
    print(f" Model saved at epoch {epoch}: {path}/vit_epoch_{epoch}.pth")
# 自定义 Transformer 编码层，实现 QKV 注意力机制
class TransformerEncoderLayer(nn.Module):
    def __init__(self, embed_dim, num_heads, forward_expansion=4, dropout=0.3):
        super(TransformerEncoderLayer, self).__init__()
        self.num_heads = num_heads
        self.embed_dim = embed_dim

        self.head_dim = embed_dim // num_heads

        assert self.head_dim * num_heads == embed_dim, "embed_dim must be divisible by num_heads"

        # 线性层投影 Q, K, V
        self.qkv_proj = nn.Linear(embed_dim, embed_dim * 3)
        self.fc_out = nn.Linear(embed_dim, embed_dim)
        self.scale = (self.head_dim) ** 0.5  # 缩放因子

        self.norm1 = nn.LayerNorm(embed_dim)
        self.norm2 = nn.LayerNorm(embed_dim)

        self.feed_forward = nn.Sequential(
            nn.Linear(embed_dim, embed_dim * forward_expansion),
            nn.ReLU(),
            nn.Linear(embed_dim * forward_expansion, embed_dim)
        )

        self.dropout = nn.Dropout(dropout)

    def forward(self, x):
        #  LayerNorm
        x = self.norm1(x)
        batch_size, seq_len, embed_dim = x.shape
        # 计算 Q, K, V，保持 batch_size, seq_len不变，将embed_dim拆成3*头数*每头dim数
        qkv = self.qkv_proj(x).reshape(batch_size, seq_len, 3, self.num_heads, self.head_dim)
        q, k, v = qkv.permute(2, 0, 3, 1, 4)  # 分割 Q, K, V

        # 计算注意力分数qk矩阵乘法，之后得到（patch数+1）*（patch数+1）每一行 33 个数表示 一个 Query 对所有 Keys 的注意力分数。
        # 归一化后，每一行的 权重之和 = 1（Softmax）。
        attn_scores = (q @ k.transpose(-2, -1)) / self.scale  # 计算 QK^T / sqrt(d_k)
        attn_probs = torch.softmax(attn_scores, dim=-1)  # 归一化得到权重
        attn_output = (attn_probs @ v).transpose(1, 2).reshape(batch_size, seq_len, embed_dim)  # 计算最终的注意力输出

        x = self.norm2(self.dropout(attn_output))
        feed_forward_output = self.feed_forward(x)
        return feed_forward_output


# 自定义 ViT Backbone
class VisionTransformer(nn.Module):
    def __init__(self, img_height=IMG_HEIGHT, img_width=IMG_WIDTH,patch_size=16, in_channels=3, embed_dim=EMBED_DIM, num_heads=8, num_layers=12,num_classes=751):
        super(VisionTransformer, self).__init__()
        self.num_patches = (img_height  // patch_size) * (img_width // patch_size)

        # 线性映射层：将图像分割为 patch，并转换为嵌入向量,kernel_size=patch_size：表示卷积核的大小等于 patch 的大小（即每个 patch 的尺寸）。
        # stride=patch_size：表示步长等于 patch 的大小，确保每次滑动正好切出一个完整的 patch，而不会有重叠。
        self.patch_embed = nn.Conv2d(in_channels, embed_dim, kernel_size=patch_size, stride=patch_size)

        # 分类标记 (CLS token)
        self.cls_token = nn.Parameter(torch.randn(1, 1, embed_dim))

        # 位置编码
        #self.pos_embed = nn.Parameter(torch.randn(1, self.num_patches + 1, embed_dim))
        self.pos_embed = nn.Parameter(torch.empty(1, self.num_patches + 1, embed_dim))
        nn.init.xavier_uniform_(self.pos_embed)

        # 手动实现 Transformer 编码层
        self.encoder_layers = nn.ModuleList([
            TransformerEncoderLayer(embed_dim, num_heads) for _ in range(num_layers)
        ])

        # 线性分类层
        self.mlp_head = nn.Sequential(
            nn.LayerNorm(embed_dim),
            nn.Linear(embed_dim, num_classes),
        )

    def forward(self, x):
        batch_size = x.shape[0]
        #输入是128*64，patch_size16，所以 self.patch_embed(x)输出形状：[batch_size, embed_dim, 8, 4]
        #x.flatten(2) 把 [batch_size, embed_dim, 8, 4] 变成 [batch_size, embed_dim, 32]
        #.transpose(1, 2) 交换维度，得到 [batch_size, 32, embed_dim]
        x = self.patch_embed(x).flatten(2).transpose(1, 2)  # 分割为 patch，并展平成向量
        cls_tokens = self.cls_token.expand(batch_size, -1, -1)  # 扩展 CLS token例如 32 个 Patch，最终变成 33 个 Token（一个patch是一个token，再多出一个用于分类）
        x = torch.cat((cls_tokens, x), dim=1)  # 拼接分类标记和图像 patch 向量
        # 添加位置编码，在 PyTorch 中，如果两个张量的 形状不完全匹配，但 某些维度是 1，那么这个维度会自动扩展，匹配另一个张量的大小。
        # x.shape = [batch_size, 33, embed_dim]
        # pos_embed.shape = [1, 33, embed_dim]
        x = x + self.pos_embed

        for layer in self.encoder_layers:
            x = layer(x)  # 逐层通过 Transformer 编码器

        x = x[:, 0]  # 取 CLS token 作为最终的特征表示   完整写法：x = x[:, 0, :]

        logits = self.mlp_head(x)  # 通过全连接层获得最终分类结果
        return x, logits





# 自定义 Triplet Sampler，确保每个批次包含多个相同 ID 的样本
class TripletSampler(Sampler):
    """
    每个 batch 选取 N_IDENTITY 个不同行人，每个行人选取 M_IMAGES 张图片。采样时保证每个 batch 既包含正样本（相同 ID 的不同图片），也包含负样本（不同 ID）。
    """

    def __init__(self, dataset, batch_size, n_identity):
        print(f"TripletSampler初始化ing, id={id(self)}")
        self.dataset = dataset  # 传入 Market1501 解析的数据集
        self.batch_size = batch_size  # 批次大小
        self.n_identity = n_identity  # 每个批次中包含的不同行人 ID 数量
        self.m_images = batch_size // n_identity  # 每个行人包含的图片数
        self.data_by_id = defaultdict(list)
        for idx, (img_path, pid, camid) in enumerate(dataset):
            self.data_by_id[pid].append(idx)  # 按行人 ID 存储样本
        self.pids = list(self.data_by_id.keys())  # 获取所有行人 ID

    def __iter__(self):

        print("TripletSampler的__iter__执行中")
        random.shuffle(self.pids)  # 随机打乱行人 ID 顺序
        batch = []
        final_idxs = []
        for pid in self.pids:
            # 选取 M_IMAGES 张图像，保证同一批次有多个相同 ID 的样本
            samples = random.sample(self.data_by_id[pid], min(self.m_images, len(self.data_by_id[pid])))
            # print(len(samples))
            while len(samples) < self.m_images:
                extra_sample = random.choice(self.data_by_id[pid])  # 从已有图片中随机选择一张
                samples.append(extra_sample)
            batch.extend(samples)  # 获取索引
            if len(batch) >= self.batch_size:
                # print(f"Sampled batch: {batch}")  # Debug: 打印 batch 结果
                final_idxs.extend(batch[:self.batch_size])  # 生成当前批次
                batch = []  # 清空 batch，开始下一个批次
        return iter(final_idxs)

    def __len__(self):
        """
        返回总批次数量，方便 DataLoader 计算 epoch 内有多少个 batch。让for _ in dataloader:知道什么时候结束
        """
        return len(self.pids)*self.m_images  # 计算 batch 数量


# 生成 DataLoader
#dataset 仍然是必须的，因为 DataLoader 需要它来通过索引获取数据。
#batch_sampler 只是控制索引的生成，但不返回具体的数据。
train_loader, val_loader, query_num, classes_num, cam_num, view_num=make_dataloader()
# 初始化模型
model = VisionTransformer(num_classes=classes_num).to(DEVICE)

# 定义损失函数
criterion_id = nn.CrossEntropyLoss()

# 定义优化器
optimizer = torch.optim.SGD(model.parameters(), lr=0.0003,momentum=0.9)
scheduler = CosineAnnealingLR(optimizer, T_max=50, eta_min=0)
scaler = amp.GradScaler()#scaler: 用于混合精度训练（自动调整梯度缩放，避免数值溢出）。在混合精度训练中，模型的 前向传播 和 反向传播 使用 FP16，而 权重更新 使用 FP32。由于 FP16 的数值范围较小，梯度值可能会变得非常小（下溢）或非常大（溢出），导致训练不稳定。
                                #为了解决这个问题，引入了 梯度缩放：放大梯度：在反向传播之前，将损失值乘以一个缩放因子（scale_factor），使得梯度值保持在 FP16 的有效范围内。缩小梯度：在更新权重之前，将梯度除以相同的缩放因子，恢复原始大小。
def anp(features,n_images=8):

    dist_mat = torch.cdist(features, features, p=2)# shape = [32, 32]
    anchors, positives, negatives = [], [], []
    for i in range(N_IDENTITY):  # 遍历每个 ID
        id_start = i * M_IMAGES  # 该 ID 在 batch 中的起始索引
        id_end = id_start + M_IMAGES  # 该 ID 在 batch 中的结束索引
        # **使用 random.sample() 选 4 个不重复的 A**
        anchor_candidates = random.sample(range(id_start, id_end), n_images)
        for anchor_idx in anchor_candidates:
            positive_dists = dist_mat[anchor_idx, id_start:id_end]  # 该 ID 里的所有距离
            hard_positive_idx = id_start + torch.argmax(positive_dists).item()  # 选最远的 P

            # 选择 Hard Negative（距离最近的不同 ID 样本）
            negative_dists = dist_mat[anchor_idx].clone()
            negative_dists[id_start:id_end] = float("inf")  # 屏蔽同 ID 样本
            hard_negative_idx = torch.argmin(negative_dists).item()  # 找最小距离的 N

            # 添加 (A, P, N)
            anchors.append(features[anchor_idx])
            positives.append(features[hard_positive_idx])
            negatives.append(features[hard_negative_idx])
    #torch.stack() 是否可以用于 list？可以,参数是 list 时，torch.stack() 依然可以正常使用！但要求 list 里的元素必须是形状相同的 tensor!
    anchors = torch.stack(anchors)
    positives = torch.stack(positives)
    negatives = torch.stack(negatives)
    return anchors, positives, negatives

def evaluate(model, query_loader, gallery_loader):
    model.eval()  # 设置为评估模式
    query_features, query_labels = [], []
    gallery_features, gallery_labels = [], []

    # 提取 Query 特征
    with torch.no_grad():
        for imgs, labels, _ in query_loader:
            #print(labels)
            imgs = imgs.to(DEVICE)
            features, _ = model(imgs)
            query_features.append(features.cpu())
            query_labels.append(labels)

    query_features = torch.cat(query_features, dim=0)
    query_labels = torch.cat(query_labels, dim=0)

    # 提取 Gallery 特征
    with torch.no_grad():
        for imgs, labels, _ in gallery_loader:
            imgs = imgs.to(DEVICE)
            features, _ = model(imgs)
            gallery_features.append(features.cpu())
            gallery_labels.append(labels)

    gallery_features = torch.cat(gallery_features, dim=0)
    gallery_labels = torch.cat(gallery_labels, dim=0)

    # 计算 Query-Gallery 的欧式距离矩阵
    dist_matrix = torch.cdist(query_features, gallery_features)

    # 计算 Rank-1 准确率
    correct = 0
    for i in range(len(query_features)):
        sorted_indices = dist_matrix[i].argsort()  # 按距离升序排序
        if gallery_labels[sorted_indices[0]] == query_labels[i]:  # 最相似的 ID 是否匹配
            correct += 1

    rank1_acc = correct / len(query_features)  # Rank-1 Accuracy
    print(f"Evaluation Rank-1 Accuracy: {rank1_acc * 100:.2f}%")
    return rank1_acc
# 训练循环
def triplet_loss(feats,pids):
    num_triplets = 0
    loss_triplet = torch.zeros(1, device=DEVICE)
    with torch.no_grad():
        dist_mat = torch.cdist(feats, feats, p=2)  # 求欧氏距离矩阵, shape [batch, batch]
    # 循环每个样本的三元组
    for i in range(feats.size(0)):
        pid_anchor = pids[i].item()
        # Get distances of all other samples to this anchor
        distances = dist_mat[i]
        # Find indices for positive and negative samples
        # Positive: same person ID, Negative: different person ID
        pos_indices = (pids == pid_anchor).nonzero(as_tuple=False).flatten()
        neg_indices = (pids != pid_anchor).nonzero(as_tuple=False).flatten()
        if len(pos_indices) > 1 and len(neg_indices) > 0:
            # 在正样本中移除自己
            pos_indices = pos_indices[pos_indices != i]
            # Hard positive
            pos_distances = distances[pos_indices]
            hard_pos_index = pos_indices[pos_distances.argmax().item()].item()
            # Hard negative:
            neg_distances = distances[neg_indices]
            hard_neg_index = neg_indices[neg_distances.argmin().item()].item()
            # 计算三元组损失max(0, d(anchor,pos) - d(anchor,neg) + margin)
            d_pos = distances[hard_pos_index]
            d_neg = distances[hard_neg_index]
            # Triplet loss for this anchor
            loss_triplet += max(0,d_pos - d_neg + 0.3)  # 不能是负数
            num_triplets += 1
    # If we found at least one valid triplet in the batch, take the average triplet loss
    if num_triplets > 0:
        loss_triplet = loss_triplet / num_triplets
    else:
        loss_triplet = torch.tensor(0.0, device=DEVICE)
    return loss_triplet

def train():
    model.train()  # 设置模型为训练模式
    print("开始训练。。。")
    for epoch in range(EPOCHS):
        total_loss = 0.0
        i=0
        for imgs, pids,camid,_ in tqdm(train_loader,desc=f"Epoch {epoch+1}/{EPOCHS} Training"):
            #print(labels)
            imgs, pids = imgs.to(DEVICE), pids.to(DEVICE)  # 将数据移动到 GPU 或 CPU
            with amp.autocast(enabled=True):
                features, logits = model(imgs)  # 前向传播，提取特征并计算分类 logits
                loss_id = criterion_id(logits, pids)  # 计算 ID 分类损失
                loss_triplet = triplet_loss(features, pids)  # 计算三元组损失
                loss = loss_id + loss_triplet  # 计算总损失

            optimizer.zero_grad()  # 清空梯度
            scaler.scale(loss).backward()
            scaler.step(optimizer)  # scaler.step(optimizer)：更新权重，同时自动处理梯度缩放。
            scaler.update()
            scheduler.step()
            total_loss += loss.item()  # 记录损失
            acc = (logits.max(1)[1] == pids).float().mean()
            avg_loss = total_loss/(len(train_loader))
        print(f"Epoch {epoch + 1}/{EPOCHS}, Loss: {total_loss / len(train_loader):.4f}，acc：{acc:4f}")  # 输出当前轮次的平均损失
        # ✅ 每 10 轮保存一次
        if (epoch + 1) % 10 == 0:
            #rank1_acc = evaluate(model, query_loader, gallery_loader)
            save_model(epoch + 1, model, optimizer, avg_loss)
            model.train()

if __name__ == "__main__":
    train()  # 运行训练
