import torch
import torch.optim as optim
from data import get_data_loaders
from model import FasterViT, TextFeatureExtractor, ContrastiveLoss, CrossModalModel
import torch.nn.functional as F
from tqdm import tqdm
import numpy as np
import argparse


def cosine_similarity(x, y):
    x_norm = np.linalg.norm(x, axis=1, keepdims=True)
    y_norm = np.linalg.norm(y, axis=1, keepdims=True)
    return np.dot(x, y.T) / (x_norm * y_norm.T + 1e-8)


def i2t(images, captions, npts=None):
    npts = images.shape[0]
    ranks = np.zeros(npts)
    
    # 计算相似度矩阵
    sims = cosine_similarity(images, captions)
    
    for index in range(npts):
        inds = np.argsort(sims[index])[::-1]
        rank = 1e20
        for i in range(5 * index, 5 * index + 5, 1):
            tmp = np.where(inds == i)[0][0]
            if tmp < rank:
                rank = tmp
        ranks[index] = rank

    # 计算召回率
    r1 = 100.0 * len(np.where(ranks < 1)[0]) / len(ranks)
    r5 = 100.0 * len(np.where(ranks < 5)[0]) / len(ranks)
    r10 = 100.0 * len(np.where(ranks < 10)[0]) / len(ranks)
    medr = np.floor(np.median(ranks)) + 1
    meanr = ranks.mean() + 1
    return r1, r5, r10, medr, meanr

def t2i(images, captions):
    npts = images.shape[0]
    ranks = np.zeros(5 * npts)
    
    # 计算相似度矩阵
    sims = cosine_similarity(images, captions).T
    
    for index in range(npts):
        for i in range(5):
            inds = np.argsort(sims[5 * index + i])[::-1]
            ranks[5 * index + i] = np.where(inds == index)[0][0]

    # 计算召回率
    r1 = 100.0 * len(np.where(ranks < 1)[0]) / len(ranks)
    r5 = 100.0 * len(np.where(ranks < 5)[0]) / len(ranks)
    r10 = 100.0 * len(np.where(ranks < 10)[0]) / len(ranks)
    medr = np.floor(np.median(ranks)) + 1
    meanr = ranks.mean() + 1
    return r1, r5, r10, medr, meanr


# 1. 训练和验证流程
def train_epoch(model, loader, optimizer, device, opt, scheduler):
    model.train()
    total_loss = 0.0
    criterion = ContrastiveLoss(opt, margin=0.5)
    for images, txt_emb, cap_len, _ in tqdm(loader):
        images = images.to(device)
        txt_emb = txt_emb.to(device)
        
        optimizer.zero_grad()
        img_feats, txt_feats = model(images, txt_emb)
        loss = criterion(img_feats, txt_feats, cap_len, device)

        # torch.nn.utils.clip_grad_norm_(model.parameters(), max_norm=1.0)
        loss.backward()
        optimizer.step()

        total_loss += loss.item()
    scheduler.step()
    return total_loss / len(loader)


# 2. 验证代码
# def validate(model, val_loader, device):
#     model.eval()
#     img_embs = []
#     txt_embs = []
#     img_names = []

#     # 提取所有特征
#     with torch.no_grad():
#         for images, txt_emb, names in tqdm(val_loader):
#             images = images.to(device)
#             txt_emb = txt_emb.to(device)
            
#             # 前向传播
#             img_feats, txt_feats = model(images, txt_emb)
            
#             # 平均池化
#             img_feats = img_feats.mean(dim=1).cpu().numpy()
#             txt_feats = txt_feats.mean(dim=1).cpu().numpy()
            
#             img_embs.append(img_feats)
#             txt_embs.append(txt_feats)
#             img_names.extend(names)

#     # 拼接所有特征
#     img_embs = np.concatenate(img_embs)
#     txt_embs = np.concatenate(txt_embs)
    
#     # 计算相似度矩阵
#     sims = cosine_similarity(img_embs, txt_embs)
    
#     # 计算评估指标
#     (r1, r5, r10, medr, meanr) = i2t(img_embs, txt_embs)
#     (r1i, r5i, r10i, medri, meanri) = t2i(img_embs, txt_embs)
    
#     print("Image to text: R@1: {:.1f} R@5: {:.1f} R@10: {:.1f}".format(r1, r5, r10))
#     print("Text to image: R@1: {:.1f} R@5: {:.1f} R@10: {:.1f}".format(r1i, r5i, r10i))
    
#     return (r1 + r5 + r10 + r1i + r5i + r10i)


# 3. 主函数
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument('--data_path', default='./data/',
                        help='path to datasets')
    parser.add_argument('--data_name', default='precomp',
                        help='{coco,f30k}_precomp')
    parser.add_argument('--vocab_path', default='./vocab/',
                        help='Path to saved vocabulary json files.')
    parser.add_argument('--margin', default=0.2, type=float,
                        help='Rank loss margin.')
    parser.add_argument('--num_epochs', default=30, type=int,
                        help='Number of training epochs.')
    parser.add_argument('--batch_size', default=128, type=int,
                        help='Size of a training mini-batch.')
    parser.add_argument('--word_dim', default=300, type=int,
                        help='Dimensionality of the word embedding.')
    parser.add_argument('--embed_size', default=1024, type=int,
                        help='Dimensionality of the joint embedding.')
    parser.add_argument('--grad_clip', default=2., type=float,
                        help='Gradient clipping threshold.')
    parser.add_argument('--num_layers', default=1, type=int,
                        help='Number of GRU layers.')
    parser.add_argument('--learning_rate', default=.0002, type=float,
                        help='Initial learning rate.')
    parser.add_argument('--lr_update', default=15, type=int,
                        help='Number of epochs to update the learning rate.')
    parser.add_argument('--workers', default=10, type=int,
                        help='Number of data loader workers.')
    parser.add_argument('--log_step', default=10, type=int,
                        help='Number of steps to print and record the log.')
    parser.add_argument('--val_step', default=500, type=int,
                        help='Number of steps to run validation.')
    parser.add_argument('--logger_name', default='./runs/runX/log',
                        help='Path to save Tensorboard log.')
    parser.add_argument('--model_name', default='./runs/runX/checkpoint',
                        help='Path to save the model.')
    parser.add_argument('--resume', default='', type=str, metavar='PATH',
                        help='path to latest checkpoint (default: none)')
    parser.add_argument('--max_violation', action='store_true',
                        help='Use max instead of sum in the rank loss.')
    parser.add_argument('--img_dim', default=2048, type=int,
                        help='Dimensionality of the image embedding.')
    parser.add_argument('--no_imgnorm', action='store_true',
                        help='Do not normalize the image embeddings.')
    parser.add_argument('--no_txtnorm', action='store_true',
                        help='Do not normalize the text embeddings.')
    parser.add_argument('--raw_feature_norm', default="clipped_l2norm",
                        help='clipped_l2norm|l2norm|clipped_l1norm|l1norm|no_norm|softmax')
    parser.add_argument('--agg_func', default="LogSumExp",
                        help='LogSumExp|Mean|Max|Sum')
    parser.add_argument('--cross_attn', default="t2i",
                        help='t2i|i2t')
    parser.add_argument('--precomp_enc_type', default="basic",
                        help='basic|weight_norm')
    parser.add_argument('--bi_gru', action='store_true',
                        help='Use bidirectional GRU.')
    parser.add_argument('--lambda_lse', default=6., type=float,
                        help='LogSumExp temp.')
    parser.add_argument('--lambda_softmax', default=9., type=float,
                        help='Attention softmax temperature.')
    opt = parser.parse_args()
    # 初始化模型
    faster_vit = FasterViT(
        dim=96, in_dim=64, depths=[2, 2, 6, 2],
        window_size=[8, 8, 8, 8], ct_size=1, mlp_ratio=4,
        num_heads=[3, 6, 12, 24], resolution=224, num_classes=0
    )
    txt_encoder = TextFeatureExtractor(
        input_dim=768, hidden_dim=512, num_tencoder=4,
        num_heads=8, max_seq_len=64, dropout=0.1
    )
    model = CrossModalModel(faster_vit, txt_encoder, 512, 512)
    
    # 数据加载
    train_loader, val_loader, _ = get_data_loaders(batch_size=32)
    
    # 优化器
    optimizer = optim.AdamW(model.parameters(), lr=2e-4, weight_decay=0.05)
    scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=5, gamma=0.1)
    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
    
    model.to(device)
    
    best_r1 = 0.0  # 记录最高 Recall@1
    best_model_path = "best_model.pth"

    # 训练循环
    for epoch in range(10):
        train_loss = train_epoch(model, train_loader, optimizer, device, opt, scheduler)
        print('train_loss:', train_loss)
        # val_rsum = validate(model, val_loader, device)  # 新增验证步骤
        
        # # 保存最佳模型
        # if val_rsum > best_rsum:
        #     best_rsum = val_rsum
        #     torch.save(model.state_dict(), "best_model.pth")


if __name__ == "__main__":
    main()