import os
import time

import numpy as np
import torch
from torch.utils.data import DataLoader
from tqdm import tqdm

import arguments
import data
from arguments import getparser
from data import MyDataset
from mymodel import MyModule
from utils import WriterSingleton, LoggerSingleton, save_checkpoint
from valtest import encode_data_u, compute_sim_i2t, recall_at_k, calc_sims_metric, compute_sim_t2i
from vocab import deserialize_vocab

os.environ["CUDA_VISIBLE_DEVICES"] = "0"


def main(opt):
    if not os.path.exists(opt.logs_dir):
        os.makedirs(opt.logs_dir)
    writer = WriterSingleton(opt.logs_dir)
    logger = LoggerSingleton(opt.logs_dir, opt.suffix)

    # Load Tokenizer and Vocabulary
    # tokenizer = data.tokenizer
    # vocab = tokenizer.vocab
    # opt.vocab_size = len(vocab)
    vocab = deserialize_vocab(os.path.join(opt.vocab_path, f'{opt.data_name}.json'))
    vocab.add_word('<mask>')  # add the mask, for testing cloze
    opt.vocab_size = len(vocab)
    # vocab = None
    # logger.info(opt)
    arguments.save_parameters(opt, opt.logs_dir)
    collate_fn = data.Collate()
    traindatas = MyDataset(opt, 'val', vocab, train=False)
    trainloader = DataLoader(dataset=traindatas, batch_size=opt.batch_size, shuffle=True, collate_fn=collate_fn,
                             pin_memory=True, drop_last=True)
    valdatas = MyDataset(opt, 'val', vocab, train=False)
    valloader = DataLoader(dataset=valdatas, batch_size=opt.batch_size, shuffle=False, collate_fn=collate_fn,
                           pin_memory=True, drop_last=False)
    logger.info(f'train data len:{len(traindatas)}')
    logger.info(f'val data len:{len(valdatas)}')
    # 构建模型
    model = MyModule(opt, logger, writer)
    model.log_structure()
    # # 加载权重
    # weights_dict = torch.load(opt.weights, map_location=get_device())
    # # 删除图像不需要的权重
    # del_keys = ['head.weight', 'head.bias'] if model.img_enc.has_logits \
    #     else ['pre_logits.fc.weight', 'pre_logits.fc.bias', 'head.weight', 'head.bias']
    # for k in del_keys:
    #     del weights_dict[k]
    # print(model.img_enc.load_state_dict(weights_dict, strict=False))
    lr_schedules = [opt.lr_update, ]
    start_epoch = 0
    # 是否继续训练？

    if opt.resume:
        if os.path.isfile(opt.resume):
            logger.info("=> loading checkpoint '{}'".format(opt.resume))
            checkpoint = torch.load(opt.resume)
            start_epoch = checkpoint['epoch']
            best_rsum = checkpoint['best_rsum']

            model.load_state_dict(checkpoint['model'])
            # Eiters is used to show logs as the continuation of another training
            model.eiter = checkpoint['eiter']
            logger.info(f"=> loaded checkpoint '{opt.resume}' (epoch {start_epoch}, best_rsum {best_rsum})")
            # validate(opt, val_loader, model)
            if opt.reset_start_epoch:
                start_epoch = 0
        else:
            logger.info(f"no checkpoint found at '{opt.resume}'")

    best_rsum = 0
    best_epoch = 0
    is_best = False
    num_epoches = opt.epoches
    # train
    for epoch in range(start_epoch, num_epoches):
        # train for one epoch
        # model.set_lr(opt, epoch)
        logger.info(50 * '-' + f'Epoch:{epoch + 1}' + 50 * '-')
        if epoch == 0:
            model.set_max_violation(False)
        else:
            model.set_max_violation(True)
        adjust_learning_rate(opt, model.optimizer, epoch, lr_schedules)
        # model.unfreeze_backbone(fixed_blocks=2)
        time.sleep(1.0)
        train(opt, trainloader, model, epoch, num_epoches, logger, writer)

        val_maps = valdatas.l_map
        rsum = validate(opt, valloader, model, val_maps, epoch, logger, writer)

        # remember best R@sum and save checkpoint
        is_best = rsum > best_rsum
        best_rsum = max(rsum, best_rsum)
        if is_best:
            best_epoch = epoch + 1
        save_checkpoint({
            'epoch': epoch + 1,
            'model': model.state_dict(),
            'best_rsum': best_rsum,
            'opt': opt,
            'eiter': model.eiter,
        }, is_best, prefix=opt.result_model, filename=f'ck_{epoch + 1}.pth', )
        logger.info(f'best epoch: {best_epoch}')
        logger.info(f'best rsum: {best_rsum}')


def train(opt, train_loader, model, epoch, epoches, log, writer):
    log.info(f'img encoder trainable parameters: \t{count_params(model.img_enc)}')
    log.info(f'txt encoder trainable parameters: \t{count_params(model.txt_enc)}')
    bs = train_loader.batch_size
    batch_num = round((len(train_loader.dataset) / bs), 1)
    loop = tqdm(train_loader, desc='Train', ncols=120)
    for i, train_data in enumerate(loop):
        model.train_start()
        (images, img_len,
         captions, cap_len,
         _, _, _) = train_data
        if epoch == 1:
            warmup_alpha = float(i) / batch_num
            loss = model.train_emb(images, captions, cap_len, img_len, warmup_alpha=warmup_alpha, )
        else:
            loss = model.train_emb(images, captions, cap_len, img_len)

        # 记录日志
        # if model.Eiters % opt.log_step == 0:
        # log.info(f'Train: [{epoch}][{i}/{batch_num}]\tloss: {loss:.2f}')
        writer.add_scalar('Train/epoch', epoch, model.eiter)
        writer.add_scalar('Train/iter', i, model.eiter)
        writer.add_scalar('Train/loss', round(loss, 2), model.eiter)
        loop.set_description(f'Train:\t[{epoch + 1}/{epoches}][{i + 1}/{batch_num}][{model.eiter}][{bs}]')
        loop.set_postfix({'loss': round(loss, 2)})


def adjust_learning_rate(opt, optimizer, epoch, lr_schedules):
    logger = LoggerSingleton(opt.logs_dir, opt.suffix)
    if epoch in lr_schedules:
        logger.info(f'Current epoch num is {epoch}, decrease all lr by 10')
        for param_group in optimizer.param_groups:
            old_lr = param_group['lr']
            new_lr = old_lr * 0.1
            param_group['lr'] = new_lr
            logger.info(f'new lr {new_lr}')


def count_params(model):
    """计算参数
    """
    model_parameters = filter(lambda p: p.requires_grad, model.parameters())
    params = sum([np.prod(p.size()) for p in model_parameters])
    return params


def validate(opt, loader, model, labels, epoch, log=None, writer=None):
    model.val_start()
    with torch.no_grad():
        img_embs, cap_embs = encode_data_u(model, loader, log, writer)
    # 相似度矩阵
    sims_01 = calc_sims_metric(labels)
    # np.save('img_embs.npy', img_embs)
    # np.save('cap_embs.npy', cap_embs)
    sims_i2t = compute_sim_i2t(img_embs, cap_embs)  # 最终目的：得到相似度矩阵
    sims_t2i = compute_sim_t2i(img_embs, cap_embs)  # 最终目的：得到相似度矩阵
    # np.save(f'{opt.runs_dir + opt.suffix}/sim.npy', sims)
    # log.info(f"saved similarity matrix {epoch}")
    r10 = recall_at_k(sims_i2t, sims_01, 10, 'i2t')
    r5 = recall_at_k(sims_i2t, sims_01, 5, 'i2t')
    r1 = recall_at_k(sims_i2t, sims_01, 1, 'i2t')
    r10_ = recall_at_k(sims_t2i, sims_01, 10, 't2i')
    r5_ = recall_at_k(sims_t2i, sims_01, 5, 't2i')
    r1_ = recall_at_k(sims_t2i, sims_01, 1, 't2i')
    rsum = r10 + r10_ + r5_ + r5 + r1 + r1_
    # k1, k5, k10, medr, meanr = i2t(sims.shape[0], sims, 5, False)
    writer.add_scalar('Val/R1 i2t(%)', r1, epoch)
    writer.add_scalar('Val/R5 i2t(%)', r5, epoch)
    writer.add_scalar('Val/R10 i2t(%)', r10, epoch)
    writer.add_scalar('Val/R1 t2i(%)', r1_, epoch)
    writer.add_scalar('Val/R5 t2i(%)', r5_, epoch)
    writer.add_scalar('Val/R10 t2i(%)', r10_, epoch)
    writer.add_scalar('Val/RSUM (%)', rsum, epoch)
    log.info(f"------------ Val:{epoch + 1} ------------")
    log.info(f"i2t R10: {r10}%\tR5: {r5}%\tR1: {r1}%")
    log.info(f"t2i R10: {r10_}%\tR5: {r5_}%\tR1: {r1_}%\tRSUM: {rsum}")
    return rsum


if __name__ == '__main__':
    args = getparser()
    args.suffix = 'train'
    main(args)
