import torch
from torch import nn

from dataset.make_dataloader import make_duke_dataloader
from utils.meter import AverageMeter
from utils.metrics import R1_mAP_eval
from vit import TransReID
# ---------------------
# Hyperparameters and settings
# ---------------------
IMG_HEIGHT, IMG_WIDTH = 224, 224   # image size
EMBED_DIM = 768                  # embedding dimension for transformer
NUM_HEADS = 8                    # number of attention heads in the transformer
NUM_LAYERS = 12                  # number of transformer encoder layers
BATCH_SIZE = 32                  # training batch size
LEARNING_RATE = 0.0003           # initial learning rate for optimizer
MARGIN = 0.3                     # margin for triplet loss
EPOCHS = 50                      # total training epochs
EVAL_FREQ = 1                   # evaluate every 3 epochs
DATA_PATH = "./data"  # 数据集根路径
OUTPUT_DIR="./output"
DEVICE = torch.device('cuda' if torch.cuda.is_available() else 'cpu')  # use GPU if available


# 加载 Market-1501 数据集dataloader
train_loader, val_loader, query_num, classes_num, cam_num, view_num=make_duke_dataloader()
model=TransReID(img_size=[224,224], patch_size=16, stride_size=16, in_chans=3, num_classes=classes_num, embed_dim=768, depth=12, num_heads=12, mlp_ratio=4., qkv_bias=False, qk_scale=None, drop_rate=0., attn_drop_rate=0., camera=cam_num, view=view_num, drop_path_rate=0.1, norm_layer=nn.LayerNorm, local_feature=False, sie_xishu =1.0).to(DEVICE)
checkpoint = torch.load("output/transformer_40.pth", map_location="cuda")
optimizer = torch.optim.Adam(model.parameters(), lr=LEARNING_RATE)
model.load_state_dict(checkpoint, strict=False)  # 直接加载，而不是 checkpoint["model_state_dict"]
criterion_ce = nn.CrossEntropyLoss()                 # cross-entropy loss for identity classification
loss_meter = AverageMeter()
acc_meter = AverageMeter()
evaluator = R1_mAP_eval(query_num, max_rank=50, feat_norm=True)
model.eval()
for n_iter, (img, vid, camid, camids, target_view, _) in enumerate(val_loader):
        with torch.no_grad():
            img = img.to(DEVICE)
            camids = camids.to(DEVICE)
            target_view = target_view.to(DEVICE)
            feat = model(img, vid, cam_label=camids, view_label=target_view)
            evaluator.update((feat, vid, camid))
cmc, mAP, _, _, _, _, _ = evaluator.compute()
print("Validation Results - ")
print("mAP: {:.1%}".format(mAP))
for r in [1, 5, 10]:
    print("CMC curve, Rank-{:<3}:{:.1%}".format(r, cmc[r - 1]))
torch.cuda.empty_cache()