import os
os.environ["CUDA_VISIBLE_DEVICES"] = "0"
import opts
from dataloader import *
import time
from six.moves import cPickle
from models.Swin_TransformerModel import *
import misc.utils as utils
import eval_utils
import models
# from script import convert_to_caption
from misc.rewards import init_score, get_self_critical_reward
from get_caption import get_caption

try:
    import tensorboardX as tb
except ImportError:
    print("tensorboardX is not installed")
    tb = None


def add_summary_value(writer, key, value, iteration):
    if writer:
        writer.add_scalar(key, value, iteration)


def train(opt):
    loader = DataLoader(opt)
    opt.vocab_size = loader.vocab_size
    opt.seq_length = loader.seq_length

    tb_summary_writer = tb and tb.SummaryWriter(opt.checkpoint_path)

    infos = {}
    histories = {}
    if opt.start_from is not None:
        with open(os.path.join(opt.start_from, 'infos_' + opt.id + '.pkl'), 'rb') as f:
            infos = cPickle.load(f)
            saved_model_opt = infos['opt']
            need_be_same = ["rnn_size", "encode_num_layers"]
            for checkme in need_be_same:
                assert vars(saved_model_opt)[checkme] == vars(opt)[
                    checkme], "Command line argument and saved model disagree on '%s' " % checkme

        if os.path.isfile(os.path.join(opt.start_from, 'histories_' + opt.id + '.pkl')):
            with open(os.path.join(opt.start_from, 'histories_' + opt.id + '.pkl'), 'rb') as f:
                histories = cPickle.load(f)

    iteration = infos.get('iter', 0)
    epoch = infos.get('epoch', 0)
    val_result_history = histories.get('val_result_history', {})
    loss_history = histories.get('loss_history', {})
    lr_history = histories.get('lr_history', {})
    ss_prob_history = histories.get('ss_prob_history', {})

    if opt.load_best_score == 1:
        best_val_score = infos.get('best_val_score', None)

    model = models.setup(opt).cuda()

    epoch_done = True

    model.train()

    crit = utils.LanguageModelCriterion()
    rl_crit = utils.RewardCriterion()

    if opt.noamopt:
        for param in model.bert_encoder.parameters():
            param.requires_grad = False
        optimizer = utils.get_std_opt(model, factor=opt.noamopt_factor, warmup=opt.noamopt_warmup)
        optimizer._step = iteration
    else:
        for param in model.bert_encoder.parameters():
            param.requires_grad = False
        optimizer = utils.build_optimizer(filter(lambda p: p.requires_grad, model.parameters()), opt)

    if vars(opt).get('start_from', None) is not None and os.path.isfile(os.path.join(opt.start_from, "optimizer.pth")):
        optimizer.load_state_dict(torch.load(os.path.join(opt.start_from, 'optimizer.pth')))


    while True:
        if epoch_done:
            if not opt.noamopt and not opt.reduce_on_plateau:
                if epoch > opt.learning_rate_decay_start and opt.learning_rate_decay_start >= 0:
                    frac = (epoch - opt.learning_rate_decay_start) // opt.learning_rate_decay_every
                    decay_factor = opt.learning_rate_decay_rate ** frac
                    opt.current_lr = opt.learning_rate * decay_factor
                else:
                    opt.current_lr = opt.learning_rate
                utils.set_lr(optimizer, opt.current_lr)

            if epoch > opt.scheduled_sampling_start and opt.scheduled_sampling_start >= 0:
                frac = (epoch - opt.scheduled_sampling_start) // opt.scheduled_sampling_increase_every
                opt.ss_prob = min(opt.scheduled_sampling_increase_prob * frac, opt.scheduled_sampling_max_prob)
                model.ss_prob = opt.ss_prob

            if opt.self_critical_after != -1 and epoch >= opt.self_critical_after:
                sc_flag = True
                init_score(opt.cached_tokens)
            else:
                sc_flag = False

            epoch_done = False

        start = time.time()
        # Load data from train split
        data = loader.get_batch('train')

        tokens, token_masks = get_caption(data)
        # tokens, token_masks = convert_to_caption(data['infos'], loader.ix_to_word)
        tokens = tokens.cuda()
        token_masks = token_masks.cuda()

        tmp = [data['img'], data['fc_feats'], data['att_feats'], data['labels'], data['masks'], data['att_masks']]

        tmp = [_ if _ is None else torch.from_numpy(_).cuda() for _ in tmp]
        '''
        img 是图片特征，fc_feats是经过restnet提取的2048维特征
        att_feats是关于每个bounding_box的2048维特征
        labels是caption的one-hot表示
        '''
        img, fc_feats, att_feats, labels, masks, att_masks = tmp
        boxes = data['boxes'] if data['boxes'] is None else torch.from_numpy(data['boxes']).cuda()

        optimizer.zero_grad()

        if not sc_flag:
            scores = model(img, fc_feats, att_feats, labels, masks, tokens, token_masks, att_masks)
            loss = crit(scores, labels[:, 1:], masks[:, 1:])
        else:
            gen_result, sample_logprobs = model(img, fc_feats, att_feats, tokens, token_masks, att_masks, opt={'sample_max': 0}, mode='sample')
            reward = get_self_critical_reward(model, img, fc_feats, att_feats, tokens, token_masks, att_masks, data, gen_result, opt)

            loss = rl_crit(sample_logprobs, gen_result.data, torch.from_numpy(reward).float().cuda())

        loss.backward()
        utils.clip_gradient(optimizer, opt.grad_clip)
        optimizer.step()
        train_loss = loss.item()
        torch.cuda.synchronize()
        end = time.time()

        if not sc_flag:
            print("iter {} (epoch {}), train_loss = {:.3f}, Preplexity: = {:.3f}, time/batch = {:.3f}"\
                            .format(iteration, epoch, train_loss, np.exp(train_loss), end - start))
        else:
            print("iter {} (epoch {}), avg_reward = {:.3f}, time/batch = {:.3f}"\
                            .format(iteration, epoch, np.mean(reward[:, 0]), end - start))
        iteration += 1

        if data['bounds']['wrapped']:
            epoch += 1
            epoch_done = True

        if iteration % opt.losses_log_every == 0:
            add_summary_value(tb_summary_writer, "train_loss", train_loss, iteration)
            if opt.noamopt:
                opt.current_lr = optimizer.rate()
            add_summary_value(tb_summary_writer, 'learning_rate', opt.current_lr, iteration)
            # add_summary_value(tb_summary_writer, 'scheduled_sampling_prob', model.ss_prob, iteration)
            if sc_flag:
                add_summary_value(tb_summary_writer, 'avg_reward', np.mean(reward[:, 0]), iteration)

            loss_history[iteration] = train_loss
            lr_history[iteration] = opt.current_lr
            # ss_prob_history[iteration] = model.ss_prob

        if iteration % opt.save_checkpoint_every == 0:
        # if True:
            # eval model
            eval_kwargs = {'split': 'val',
                           'dataset': opt.input_json}
            eval_kwargs.update(vars(opt))
            val_loss, predictions, lang_stats = eval_utils.eval_split(model, crit, loader, eval_kwargs)

            if opt.reduce_on_plateau:
                if 'CIDEr' in lang_stats:
                    optimizer.scheduler_step(-lang_stats['CIDEr'])
                else:
                    optimizer.scheduler_step(val_loss)
            add_summary_value(tb_summary_writer, 'validation loss', val_loss, iteration)
            if lang_stats:
                for k, v in lang_stats.items():
                    add_summary_value(tb_summary_writer, k, v, iteration)
            val_result_history[iteration] = {'loss': val_loss, 'lang_stats': lang_stats, 'predictions': predictions}

            # Save model if is improving on validation result
            if opt.language_eval == 1:
                current_score = lang_stats['CIDEr']
            else:
                current_score = - val_loss

            best_flag = False
            if True:  # if true
                if best_val_score is None or current_score > best_val_score:
                    best_val_score = current_score
                    best_flag = True

                if not os.path.isdir(opt.checkpoint_path):
                    os.makedirs(opt.checkpoint_path)
                checkpoint_path = os.path.join(opt.checkpoint_path, 'model.pth')
                torch.save(model.state_dict(), checkpoint_path)
                print("model saved to {}".format(checkpoint_path))
                optimizer_path = os.path.join(opt.checkpoint_path, 'optimizer.pth')
                torch.save(optimizer.state_dict(), optimizer_path)

                # Dump miscalleous informations
                infos['iter'] = iteration
                infos['epoch'] = epoch
                infos['iterators'] = loader.iterators
                infos['split_ix'] = loader.split_ix
                infos['best_val_score'] = best_val_score
                infos['opt'] = opt
                infos['vocab'] = loader.get_vocab()

                histories['val_result_history'] = val_result_history
                histories['loss_history'] = loss_history
                histories['lr_history'] = lr_history
                histories['ss_prob_history'] = ss_prob_history
                with open(os.path.join(opt.checkpoint_path, 'infos_' + opt.id + '.pkl'), 'wb') as f:
                    cPickle.dump(infos, f)
                with open(os.path.join(opt.checkpoint_path, 'histories_' + opt.id + '.pkl'), 'wb') as f:
                    cPickle.dump(histories, f)

                if best_flag:
                    checkpoint_path = os.path.join(opt.checkpoint_path, 'model-best.pth')
                    torch.save(model.state_dict(), checkpoint_path)
                    print("model saved to {}".format(checkpoint_path))
                    with open(os.path.join(opt.checkpoint_path, 'infos_' + opt.id + '-best.pkl'), 'wb') as f:
                        cPickle.dump(infos, f)

        # Stop if reaching max epochs
        if epoch >= opt.max_epochs and opt.max_epochs != -1:
            break


opt = opts.parse_opt()
train(opt)
