from options.base_options import print_options_with_logging
from util.util import init_random_seed
from util import log
from options.train_options import TrainOptions
import logging
from data_loader.msvd_dataloader import MSVDDataLoader
from utils import todevice, loss_dependence, common_loss, batch_accuracy, step_decay, save_checkpoint
from model.msvqa import MSVQA
import torch.nn as nn
from torch.utils.tensorboard import SummaryWriter
import torch
import sys
from termcolor import colored
import os
from validate import validate

###############################
# 初始化参数
###############################
opt = TrainOptions().parse()  # get training options
log.init_logger(opt.save_dir, opt.exp_name)  # 初始化日志
print_options_with_logging(opt)  # 打印命令行参数到控制台
init_random_seed(opt.random_seed)  # 初始化随机种子
writer = SummaryWriter('/root/tf-logs/' + opt.exp_name)  # 初始化tensorboard


###############################
# 初始化 dataloader
###############################
logging.info("Create train_loader")
train_loader_kwargs = {
    'question_pt': opt.train_question_pt,
    'vocab_json': opt.vocab_json,
    'batch_size': opt.batch_size,
    'annotation_file': opt.train_annotation_file,
    'video_name_mapping': opt.video_name_mapping,
    'video_dir': opt.video_dir,
    'num_clips': opt.num_clips,
    'num_frames_per_clip': opt.num_frames_per_clip,
    'image_height': opt.image_height,
    'image_width': opt.image_width,
    'shuffle': True,
    'is_video_preprocessed': opt.is_video_preprocessed
}
train_loader = MSVDDataLoader(**train_loader_kwargs)
logging.info("number of train instances: {}".format(len(train_loader.dataset)))  # 30933
logging.info("number of train batches: {}".format(len(train_loader)))
logging.info("Create val_loader")
val_loader_kwargs = {
    'question_pt': opt.val_question_pt,
    'vocab_json': opt.vocab_json,
    'batch_size': opt.batch_size,
    'annotation_file': opt.val_annotation_file,
    'video_name_mapping': opt.video_name_mapping,
    'video_dir': opt.video_dir,
    'num_clips': opt.num_clips,
    'num_frames_per_clip': opt.num_frames_per_clip,
    'image_height': opt.image_height,
    'image_width': opt.image_width,
    'shuffle': True,
    'is_video_preprocessed': opt.is_video_preprocessed
}
val_loader = MSVDDataLoader(**val_loader_kwargs)
logging.info("number of val instances: {}".format(len(val_loader.dataset)))
logging.info("number of val batches: {}".format(len(val_loader)))


###############################
# 初始化模型-阶段一
###############################
model = MSVQA(
    resnext101_pretrained_path=opt.resnext101_pretrained_path,
    vocab=train_loader.vocab,
    batch_size=opt.batch_size,
    num_clips=opt.num_clips,
    num_frames_per_clip=opt.num_frames_per_clip,
    image_height=opt.image_height,
    image_width=opt.image_width,
    num_of_nodes=opt.num_of_nodes,
    graph_module=opt.graph_module,
    graph_layers=opt.graph_layers,
    vision_dim=opt.vision_dim,
    module_dim=opt.module_dim,
    word_dim=opt.word_dim,
    unit_layers=opt.unit_layers
).to('cuda')
pytorch_total_params = sum(p.numel() for p in model.parameters() if p.requires_grad)
resnet_101_params = sum(p.numel() for p in model.resnet101_model.parameters() if p.requires_grad)
logging.info('num of resnet_101 params: {}'.format(resnet_101_params))
resnext_101_params = sum(p.numel() for p in model.resnext101_model.parameters() if p.requires_grad)
logging.info('num of resnext_101 params: {}'.format(resnext_101_params))
AEDecoder_params = sum(p.numel() for p in model.AE.parameters() if p.requires_grad)
logging.info('num of AEDecoder params: {}'.format(AEDecoder_params))
logging.info('num of params: {}'.format(pytorch_total_params))
logging.info(model)
logging.info('load glove vectors')
train_loader.glove_matrix = torch.FloatTensor(train_loader.glove_matrix).to('cuda')
with torch.no_grad():
    model.linguistic_input_unit.encoder_embed.weight.set_(train_loader.glove_matrix)
MSELoss = torch.nn.MSELoss()
optimizer = torch.optim.Adam(model.parameters(), lr=opt.lr, betas=(opt.beta1, 0.999))


if opt.stage_one and not opt.restore:  # 如果设置了stage_one或者没有设置restore，则进行阶段一的训练
    ###############################
    # 开始训练-阶段一
    ###############################
    logging.info("Start training stage 1 ........")
    for i, batch in enumerate(iter(train_loader)):
        model.train()  # 设置为训练模式, 会启用 BatchNormalization 和 Dropout
        optimizer.zero_grad() # 梯度清零

        # 取出一个batch的数据
        video_idx, question_idx, answers, appearance_clips, motion_clips, question, question_len = [ todevice(x, 'cuda') for x in batch ]
        answers = answers.cuda().squeeze()  # squeeze()去掉维度为1的维度

        # 前向传播
        rebuild_appearance, rebuild_motion = model(appearance_clips, motion_clips, question, question_len) # tensor[2,768]

        # appearance_clips 默认是torch.Tensor[2, 8, 16, 3, 128, 128]
        # 将其改为 torch.Tensor[2, 8*16*3*128*128]
        real_appearance_clips = appearance_clips.view(appearance_clips.shape[0], -1)
        real_motion_clips = motion_clips.view(motion_clips.shape[0], -1)
        
        rebuild_appearance = rebuild_appearance.view(rebuild_appearance.shape[0], -1)
        rebuild_motion = rebuild_motion.view(rebuild_motion.shape[0], -1)

        real_appearance_clips = real_appearance_clips.to(torch.float32)
        real_motion_clips = real_motion_clips.to(torch.float32)
        rebuild_appearance = rebuild_appearance.to(torch.float32)
        rebuild_motion = rebuild_motion.to(torch.float32)

        # 计算loss
        total_loss = MSELoss(rebuild_appearance, real_appearance_clips) + MSELoss(rebuild_motion, real_motion_clips)
        logging.info('train batch: {}, loss: {}'.format(i, total_loss.item()))
        writer.add_scalar('ae_loss', total_loss.item(), i)
        total_loss.backward()  # 反向传播
        optimizer.step()  # 更新参数


        ###############################
        # 每50个batch进行一次验证
        ###############################
        if i % 50 == 0:
            model.eval()  # 设置为评估模式, 会关闭 BatchNormalization 和 Dropout
            total_val_loss = 0
            count = 0
            with torch.no_grad():
                for j, batch in enumerate(iter(val_loader)):
                    video_idx, question_idx, answers, appearance_clips, motion_clips, question, question_len = [ todevice(x, 'cuda') for x in batch ]
                    answers = answers.cuda().squeeze()  # squeeze()去掉维度为1的维度

                    # 前向传播
                    rebuild_appearance, rebuild_motion = model(appearance_clips, motion_clips, question, question_len)
                    real_appearance_clips = appearance_clips.view(appearance_clips.shape[0], -1)
                    real_motion_clips = motion_clips.view(motion_clips.shape[0], -1)

                    rebuild_appearance = rebuild_appearance.view(rebuild_appearance.shape[0], -1)
                    rebuild_motion = rebuild_motion.view(rebuild_motion.shape[0], -1)

                    real_appearance_clips = real_appearance_clips.to(torch.float32)
                    real_motion_clips = real_motion_clips.to(torch.float32)
                    rebuild_appearance = rebuild_appearance.to(torch.float32)
                    rebuild_motion = rebuild_motion.to(torch.float32)

                    # 计算loss
                    total_loss = MSELoss(rebuild_appearance, real_appearance_clips) + MSELoss(rebuild_motion, real_motion_clips)
                    total_val_loss += total_loss.item()
                    count += opt.batch_size
                    logging.info('val batch: {}, loss: {}'.format(j, total_loss.item()))
                    if j == 10:
                        break
            logging.info('avg val loss: {}'.format(total_val_loss / count))
            writer.add_scalar('ae_val_loss', total_val_loss / count, i)

            if i >= 300:
                break


###############################
# 初始化模型-阶段二
###############################
CrossEntropyLoss = torch.nn.CrossEntropyLoss().to('cuda')
start_epoch = 0
best_val = 0.
best_what = 0.  # 关于what类型的问题的准确率
best_how = 0.  # 关于how类型的问题的准确率
best_when = 0.  # 关于when类型的问题的准确率
best_who = 0.  # 关于who类型的问题的准确率
best_where = 0.  # 关于where类型的问题的准确率
model_kwargs = {
    'vision_dim': opt.vision_dim,
    'module_dim': opt.module_dim,
    'word_dim': opt.word_dim,
    'vocab': train_loader.vocab,
    'num_of_nodes': opt.num_of_nodes,
    'graph_module': opt.graph_module,
    'graph_layers': opt.graph_layers
}
model_kwargs_tosave = {k: v for k, v in model_kwargs.items() if k != 'vocab'}


if opt.restore:  # 如果设置了restore，则从checkpoint中恢复模型和优化器
    logging.info("Restore checkpoint and optimizer...")
    ckpt = os.path.join(opt.restore_path, 'ckpt', 'model.pt')
    ckpt = torch.load(ckpt, map_location=lambda storage, loc: storage)
    start_epoch = ckpt['epoch'] + 1
    model.load_state_dict(ckpt['state_dict'])
    optimizer.load_state_dict(ckpt['optimizer'])


###############################
# 开始训练-阶段二
###############################
logging.info("Start training stage 2 ........")
for epoch in range(start_epoch, opt.max_epochs):
    logging.info('>>>>>> epoch {epoch} <<<<<<'.format(epoch=colored("{}".format(epoch), "green", attrs=["bold"])))
    model.train()  # 训练模式，会启用 BatchNormalization 和 Dropout
    total_acc, count = 0, 0
    total_loss, avg_loss = 0.0, 0.0
    avg_loss = 0
    train_accuracy = 0
    for i, batch in enumerate(iter(train_loader)):
        progress = epoch + i / len(train_loader)
        video_idx, question_idx, answers, appearance_clips, motion_clips, question, question_len = [ todevice(x, 'cuda') for x in batch ]
        answers = answers.cuda().squeeze()  # squeeze()去掉维度为1的维度
        optimizer.zero_grad()  # 梯度清零
        logits, aq_embed, mq_embed, com_app, com_motion, aq_fusion, mq_fusion = model(appearance_clips, motion_clips, question, question_len, stage=2)  # 前向传播
        loss = CrossEntropyLoss(logits, answers)  # 计算loss

        loss_dep = 0
        loss_com = 0
        temp = len(aq_fusion)
        for j in range(temp):
            loss_dep += (loss_dependence(aq_fusion[j].cuda(), com_app[j].cuda(), opt.num_of_nodes)
                                 + loss_dependence(mq_fusion[j].cuda(), com_motion[j].cuda(), opt.num_of_nodes))
            loss_com += common_loss(com_app[j].cuda(), com_motion[j].cuda())
        loss = loss + opt.alpha * loss_com / temp + opt.beta * loss_dep / temp

        loss.backward()  # 反向传播
        total_loss += loss.detach()  # loss.detach()表示不反向传播
        avg_loss = total_loss / (i + 1)
        nn.utils.clip_grad_norm_(model.parameters(), max_norm=12)  # 梯度裁剪,防止梯度爆炸
        optimizer.step()  # 更新参数
        aggreeings = batch_accuracy(logits, answers)  # 计算准确率

        total_acc += aggreeings.sum().item()  # 正确的个数
        count += answers.size(0)  # 答案
        train_accuracy = total_acc / count

        logging.info('train epoch: {}, batch: {}, ce_loss: {}, avg_loss: {}, train_acc: {}, avg_acc: {}'.format(epoch, i, loss.item(), avg_loss, aggreeings.float().mean().cpu().numpy(), train_accuracy))
        writer.add_scalar('ce_loss', loss.item(), epoch * len(train_loader) + i)
        writer.add_scalar('avg_loss', avg_loss, epoch * len(train_loader) + i)
        writer.add_scalar('train_acc', aggreeings.float().mean().cpu().numpy(), epoch * len(train_loader) + i)
        writer.add_scalar('avg_acc', train_accuracy, epoch * len(train_loader) + i)

    # sys.stdout.write("\n")
    if (epoch + 1) % 10 == 0:
        optimizer = step_decay(opt, optimizer)
    # sys.stdout.flush()
    logging.info("Epoch = %s   avg_loss = %.3f    avg_acc = %.3f" % (epoch, avg_loss, train_accuracy))

    ###############################
    # 开始验证-阶段二
    ###############################

    # 检查和创建输出目录
    output_dir = os.path.join(opt.save_dir, opt.exp_name)
    assert os.path.isdir(output_dir), "Directory %s does not exist" % output_dir
    output_dir = os.path.join(output_dir, 'preds')
    if not os.path.exists(output_dir):
        os.makedirs(output_dir)
    else:
        assert os.path.isdir(output_dir)
    valid_acc, *valid_output = validate(opt.batch_size, model, val_loader, 'cuda', write_preds=False)
    
    # 如果验证准确率大于最好的验证准确率，则更新最好的验证准确率
    if valid_acc > best_val:
        best_val = valid_acc
        best_what = valid_output[0]
        best_who = valid_output[1]
        best_when = valid_output[3]
        best_how = valid_output[2]
        best_where = valid_output[4]
        # Save best model
        ckpt_dir = os.path.join(opt.save_dir, opt.exp_name)
        ckpt_dir = os.path.join(ckpt_dir, 'ckpt')
        if not os.path.exists(ckpt_dir):
            os.makedirs(ckpt_dir)
        else:
            assert os.path.isdir(ckpt_dir)
        save_checkpoint(epoch, model, optimizer, model_kwargs_tosave, os.path.join(ckpt_dir, 'model.pt'))
        logging.info('save to %s' % (ckpt_dir))

    logging.info('~~~~~~ Valid Accuracy: %.4f ~~~~~~~' % valid_acc)
    writer.add_scalar('valid_acc', valid_acc, epoch)
    logging.info('~~~~~~ Valid What Accuracy: %.4f ~~~~~~~' % valid_output[0])
    logging.info('~~~~~~ Valid Who Accuracy: %.4f ~~~~~~' % valid_output[1])
    logging.info('~~~~~~ Valid How Accuracy: %.4f ~~~~~~' % valid_output[2])
    logging.info('~~~~~~ Valid When Accuracy: %.4f ~~~~~~' % valid_output[3])
    logging.info('~~~~~~ Valid Where Accuracy: %.4f ~~~~~~' % valid_output[4])

    # sys.stdout.write(
    #     '~~~~~~ Valid Accuracy: {valid_acc}, What Accuracy: {what_acc}, Who Accuracy: {who_acc}, How Accuracy: {how_acc}, When Accuracy: {when_acc}, Where Accuracy: {where_acc} ~~~~~~~\n'.format(
    #         valid_acc=colored("{:.4f}".format(valid_acc), "red", attrs=['bold']),
    #         what_acc=colored("{:.4f}".format(valid_output[0]), "red", attrs=['bold']),
    #         who_acc=colored('{:.4f}'.format(valid_output[1]), "red", attrs=['bold']),
    #         how_acc=colored('{:.4f}'.format(valid_output[2]), "red", attrs=['bold']),
    #         when_acc=colored('{:.4f}'.format(valid_output[3]), "red", attrs=['bold']),
    #         where_acc=colored('{:.4f}'.format(valid_output[4]), "red", attrs=['bold'])
    #     ))
    # sys.stdout.flush()

    logging.info('~~~~~ Best Valid Accuracy: %.4f ~~~~~' % best_val)
    writer.add_scalar('best_valid_acc', best_val, epoch)
    logging.info('~~~~~ Best What Accuracy: %.4f ~~~~~' % best_what)
    logging.info('~~~~~ Best How Accuracy: %.4f ~~~~~' % best_how)
    logging.info('~~~~~ Best When Accuracy: %.4f ~~~~~' % best_when)
    logging.info('~~~~~ Best Where Accuracy: %.4f ~~~~~' % best_where)
    logging.info('~~~~~ Best Who Accuracy: %.4f ~~~~~~' % best_who)


writer.close()
