import torch 
from dataset import get_loader
import torch.nn.functional as F
import os 
import ml_collections
from blip import BLIP

DEVICE='cuda' if torch.cuda.is_available() else 'cpu'   # 设备
vocabulary = ['airplane', 'automobile', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck', "this", "is", "a", "picture", "of", "[PAD]", "[CLS]", "[END]", "[ENC]"]
vocab_size = len(vocabulary)
vocab_d = {}
for i in range(vocab_size):
    vocab_d[vocabulary[i]] = i

def tokenize(text:list, max_len:int):
    text_bs = len(text) #获得bs
    pad_index = vocab_d["[PAD]"] #获得[PAD]这个词的index
    self_attn_mask = torch.full([text_bs, 1], max_len) #二维数组，大小是(bs, 1)，表示每一个batch中有效seq_len大小
    tokenized_text = torch.full([text_bs, max_len], pad_index)
    for i in range(text_bs):
        split_s = text[i].split() #把一个句子split
        self_attn_mask[i][0] = len(split_s)
        for j in range(len(split_s)):
            word = split_s[j]
            index = vocab_d[word] #获得每一个单词在词表中的index
            tokenized_text[i][j] = index
    return tokenized_text.to(DEVICE), self_attn_mask.to(DEVICE)

def get_text_config():
    config = ml_collections.ConfigDict()
    config.hidden_size = 256  # text_hidden_size和img_hidden_size必须相同
    config.mlp_dim = 1024  # 通常为 Transformer 的隐层维度的四倍
    config.num_heads = 8
    config.all_head_size = 512
    config.num_block_layer = 3
    config.attention_dropout_rate = 0.05
    config.mlp_dropout_rate = 0.05
    config.vocabulary = vocabulary
    config.vocab_size = len(config.vocabulary)
    config.vocab_d = vocab_d
    config.alpha = 0.4  #动量蒸馏算loss的参数
    config.device = DEVICE
    config.momentum = 0.995
    return config

def get_cifar_config():
    config = ml_collections.ConfigDict()
    config.dataset = "cifar10"
    config.class_num = 10
    config.output_dir = "./output"
    config.img_size = 32
    config.train_batch_size = 16
    config.eval_batch_size = 32
    config.device = DEVICE
    return config

def get_train_config():
    config = ml_collections.ConfigDict()
    config.epoch = 10
    config.lr = 1e-5
    config.weight_decay = 1e-4
    config.num_steps = 10
    config.decay_type = "consine"
    config.warmup_steps = 5
    config.device = DEVICE
    return config

def evaluation(model, eval_loader, tokenized_all_class, all_class_self_attn_mask):
    text_emb = model.text_enc(tokenized_all_class, self_attn_mask = all_class_self_attn_mask)  #(bs=10, N, hidden_size)
    text_feat = F.normalize(model.text_proj(text_emb[:,0,:]),dim=-1)  #(bs=10, hidden_size)

    acc_i2t_num, acc_t2i_num = 0, 0
    total_i2t_num, total_t2i_num = 0, 0

    for x, y in eval_loader:
        x, y = x.to(DEVICE), y.to(DEVICE)
        total_i2t_num += len(y)
        img_emb=model.img_enc(x).unsqueeze(1)
        image_feat = F.normalize(model.img_proj(img_emb[:,0,:]),dim=-1) #(bs, hidden_size)

        sim_i2t = (image_feat @ text_feat.t()).softmax(dim=1)  #(bs_img, bs_text = 10)
        predict_y = sim_i2t.max(dim=1).indices #根据图像预测label
        for i in range(len(y)):
            if predict_y[i] == y[i]:
                acc_i2t_num += 1

        sim_t2i = (text_feat @ image_feat.t()).softmax(dim=1)  #(bs_text = 10, bs_img)
        predict_x = sim_t2i.max(dim=1).indices #根据label预测图像
        total_t2i_num += len(predict_x)
        for i in range(len(predict_x)):
            predict_img = predict_x[i] #这是一个索引值
            if i == y[predict_img]:  #eval的batch大小是32，因此这里是假设一个batch里面必定存在10个类的图片
                acc_t2i_num += 1

    acc_i2t = acc_i2t_num / total_i2t_num
    acc_t2i = acc_t2i_num / total_t2i_num
    return acc_i2t, acc_t2i

        

if __name__ == "__main__":
    tokenize_max_len = 10
    cifar_config = get_cifar_config()
    text_config = get_text_config()
    train_config = get_train_config()
    train_loader, test_loader = get_loader(cifar_config)
    model = BLIP(text_config).to(device = DEVICE)
    optimizer = torch.optim.SGD(model.parameters(),lr = train_config.lr, weight_decay = train_config.weight_decay)   # 优化器
    try:    # 加载模型
        checkpoint = torch.load('model.pth')
        model.load_state_dict(checkpoint)
        optimizer.load_state_dict(checkpoint['optimizer_state'])
    except:
        pass 
    lr_scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, T_max=5000, eta_min=1e-6)
    # 创建eval阶段使用的数据
    all_classes_sentence = []
    for i in range(cifar_config.class_num):
        label_str = vocabulary[i]
        sentence = "[CLS] this is a picture of {} [END]".format(label_str)
        all_classes_sentence.append(sentence)
    tokenized_all_class, all_class_self_attn_mask = tokenize(all_classes_sentence, max_len=tokenize_max_len)

    x, y = next(iter(train_loader))
    print(x.shape)

    for e in range(train_config.epoch):
        i = 0
        model.train()
        for x, y in train_loader:
            x, y = x.to(DEVICE), y.to(DEVICE)
            batch_sentences = []
            for label in y:  #根据标签生成句子
                label_word = vocabulary[int(label)]
                sentence = "[CLS] this is a picture of {} [END]".format(label_word)
                batch_sentences.append(sentence)
            tokenized_text, self_attn_mask = tokenize(batch_sentences, max_len=tokenize_max_len)
            loss_itc, loss_itm = model(x, tokenized_text, y, self_attn_mask)
            if i % 100 == 0:
                print("epoch = {}, i = {}, lr={}".format(e, i, lr_scheduler.get_lr()))
                print("loss_itc:", loss_itc)
                print("loss_itm:", loss_itm)
            loss = loss_itc + loss_itm     
            optimizer.zero_grad()
            loss.backward()
            optimizer.step()  
            lr_scheduler.step()
            i+=1

        model.eval()
        acc_i2t, acc_t2i = evaluation(model, test_loader, tokenized_all_class, all_class_self_attn_mask)
        print("epoch {}, acc_i2t = {}, acc_t2i = {}".format(e, acc_i2t, acc_t2i))
    torch.save(model.state_dict(), "./model.pth")
