import math
import os
import random
import time
from collections import defaultdict
import logging
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.nn.init as init
from torch import optim

from torch.cuda import amp
from torch.optim.lr_scheduler import CosineAnnealingLR
from torch.utils.data import DataLoader, Sampler
from torchvision import transforms
from timm.data.random_erasing import RandomErasing
from tqdm import tqdm
import torchvision.transforms as T


from dataset.make_dataloader import make_dataloader
# Import the Market1501 dataset class from the provided file
from dataset.market1501 import Market1501, ImageDataset

from utils.meter import AverageMeter
from utils.metrics import R1_mAP_eval
from vit import TransReID

# ---------------------
# Hyperparameters and settings
# ---------------------
IMG_HEIGHT, IMG_WIDTH = 224, 224   # image size
EMBED_DIM = 768                  # embedding dimension for transformer
NUM_HEADS = 8                    # number of attention heads in the transformer
NUM_LAYERS = 12                  # number of transformer encoder layers
BATCH_SIZE = 32                  # training batch size
LEARNING_RATE = 0.0003           # initial learning rate for optimizer
MARGIN = 0.3                     # margin for triplet loss
EPOCHS = 50                      # total training epochs
EVAL_FREQ = 1                   # evaluate every 3 epochs
DATA_PATH = "./data"  # 数据集根路径
OUTPUT_DIR="./output"
DEVICE = torch.device('cuda' if torch.cuda.is_available() else 'cpu')  # use GPU if available


# 加载 Market-1501 数据集dataloader
train_loader, val_loader, query_num, classes_num, cam_num, view_num=make_dataloader()

criterion_ce = nn.CrossEntropyLoss()                 # cross-entropy loss for identity classification


model=TransReID(img_size=[224,224], patch_size=16, stride_size=16, in_chans=3, num_classes=classes_num, embed_dim=768, depth=12, num_heads=12, mlp_ratio=4., qkv_bias=False, qk_scale=None, drop_rate=0., attn_drop_rate=0., camera=cam_num, view=view_num, drop_path_rate=0.1, norm_layer=nn.LayerNorm, local_feature=False, sie_xishu =1.0).to(DEVICE)
checkpoint = torch.load("output/transformer_49.pth", map_location="cuda")
optimizer = torch.optim.SGD(model.parameters(), lr=LEARNING_RATE,momentum=0.9)
scheduler = CosineAnnealingLR(optimizer, T_max=50, eta_min=0)


model.load_state_dict(checkpoint, strict=False)  # 直接加载，而不是 checkpoint["model_state_dict"]

loss_meter = AverageMeter()
acc_meter = AverageMeter()
evaluator = R1_mAP_eval(query_num, max_rank=50, feat_norm=True)
scaler = amp.GradScaler()
for epoch in range(EPOCHS):
    model.train()
    acc_meter.reset()
    evaluator.reset()

    for (imgs, pids, camids, viewids) in tqdm(train_loader,desc=f"Epoch {epoch+1}/{EPOCHS} Training"):
        # Move data to device
        imgs = imgs.to(DEVICE)
        pids = pids.to(DEVICE)
        with amp.autocast(enabled=True):
            # Forward pass: get classification logits and feature embeddings
            logits, feats = model(imgs,pids,camids,viewids)
            loss_ce = criterion_ce(logits, pids)
            with torch.no_grad():
                dist_mat = torch.cdist(feats, feats, p=2)  # 求欧氏距离矩阵, shape [batch, batch]
            loss_triplet = torch.zeros(1, device=DEVICE)
            num_triplets = 0
            # 循环每个样本的三元组
            for i in range(feats.size(0)):
                pid_anchor = pids[i].item()
                # Get distances of all other samples to this anchor
                distances = dist_mat[i]
                # Find indices for positive and negative samples
                # Positive: same person ID, Negative: different person ID
                pos_indices = (pids == pid_anchor).nonzero(as_tuple=False).flatten()
                neg_indices = (pids != pid_anchor).nonzero(as_tuple=False).flatten()
                if len(pos_indices) > 1 and len(neg_indices) > 0:
                    # 在正样本中移除自己
                    pos_indices = pos_indices[pos_indices != i]
                    # Hard positive
                    pos_distances = distances[pos_indices]
                    hard_pos_index = pos_indices[pos_distances.argmax().item()].item()
                    # Hard negative:
                    neg_distances = distances[neg_indices]
                    hard_neg_index = neg_indices[neg_distances.argmin().item()].item()
                    # 计算三元组损失max(0, d(anchor,pos) - d(anchor,neg) + margin)
                    d_pos = distances[hard_pos_index]
                    d_neg = distances[hard_neg_index]
                    # Triplet loss for this anchor
                    loss_triplet += F.relu(d_pos - d_neg + MARGIN)#不能是负数
                    num_triplets += 1
            # If we found at least one valid triplet in the batch, take the average triplet loss
            if num_triplets > 0:
                loss_triplet = loss_triplet / num_triplets
            else:
                loss_triplet = torch.tensor(0.0, device=DEVICE)
            # Total loss is combination of cross-entropy and triplet losses
            loss = loss_ce + loss_triplet
        # Backpropagation
        acc = (logits.max(1)[1] == pids).float().mean()
        acc_meter.update(acc, 1)
        optimizer.zero_grad()
        scaler.scale(loss).backward()
        scaler.step(optimizer)  #scaler.step(optimizer)：更新权重，同时自动处理梯度缩放。
        scaler.update()
        scheduler.step()
        loss_meter.update(loss.item(), imgs.shape[0])
    print(f"Epoch [{epoch+1}/{EPOCHS}] - Average Loss: {loss_meter.avg:.4f} - Average Acc: {acc_meter.avg:.4f}")
    #10个epoch存一次
    if (epoch+1) % 10 == 0:
        torch.save(model.state_dict(), os.path.join(OUTPUT_DIR, 'transformer_{}.pth'.format(epoch+1)))
    #5个epoch评估一次
    if (epoch+1) % 5 == 0:
        model.eval()
        for n_iter, (img, vid, camid, camids, target_view, _) in enumerate(val_loader):
            with torch.no_grad():
                img = img.to(DEVICE)
                camids = camids.to(DEVICE)
                target_view = target_view.to(DEVICE)
                feat = model(img, vid, cam_label=camids, view_label=target_view)
                evaluator.update((feat, vid, camid))
        cmc, mAP, _, _, _, _, _ = evaluator.compute()
        print("Validation Results - Epoch: {}".format(epoch))
        print("mAP: {:.1%}".format(mAP))
        for r in [1, 5, 10]:
            print("CMC curve, Rank-{:<3}:{:.1%}".format(r, cmc[r - 1]))
        torch.cuda.empty_cache()