"""
@author: 石沙
@date: 2020-10-27
@reference:
"""

from models import Seq2SeqGRU
import configs.settings as conf
from torch.utils.data import DataLoader
from site_packages.utils.job import DataOp
from features import MarketingData, sentences_from_indices
import torch
from torch.optim import Adam
import os
from torch.nn.utils import clip_grad_norm_
import matplotlib.pyplot as plt


torch.backends.cudnn.enabled = False


dic = DataOp.load_data('dictionary')


TRAIN_CONF = {
    'learning_ratio': 0.0005,
    'teacher_forcing': 0.9,
    'teacher_forcing_decay_factor': 0.95,
    'scheduled_sampling': True,
    'save_frequency': 50,
    'learning_decay_factor': 0.9,
    'decay_frequency': 50,
    'clip': 1,
    'print_loss': 25,
}


LOSS_SEQ = []


def reduce_learning_ratio(optimizer):
    if hasattr(optimizer, 'lr'):
        setattr(optimizer.lr * TRAIN_CONF['learning_decay_factor'])


def save_checkpoint(iteration, seq2seq, optimizer, loss, LOSS_SEQ, final=False, trained_file_name='trained'):
    # 生成对应模型的路径
    if not final:
        path = os.path.join(
            conf.MODEL_CONF['save_model_path'],
            'checkpoints',
            '{}-{}-{}'.format(
                conf.MODEL_CONF['encoder_num_layers'],
                conf.MODEL_CONF['decoder_num_layers'],
                conf.MODEL_CONF['hidden_size']
            )
        )
        file_name = '{}_{}.tar'.format('chp', iteration)
    else:
        path = os.path.join(conf.MODEL_CONF['save_model_path'])
        file_name = '{}.tar'.format(trained_file_name)

    if not os.path.exists(path):
        os.makedirs(path)

    # 要保存的信息
    checkpoint = {
        'iteration': iteration,
        'seq2seq': seq2seq,
        'optimizer': optimizer,
        'loss': loss,
        'loss_seq': LOSS_SEQ
    }

    # 保存
    torch.save(checkpoint, os.path.join(path, file_name))


def negative_log_loss_by_mask(seq_outputs, mask):
    """
    计算log损失值
    :param seq_outputs: seq2seq在训练模式下输出;
    :param mask: 对于seq_outputs的掩码，已去掉PADDING造成的影响维度(batch_size * max_len of targets, )
    :return: loss值
    """
    log_loss = -torch.log(seq_outputs[mask] + conf.epsilon)
    loss = log_loss.mean()
    return loss


def reduce_teacher_forcing():
    TRAIN_CONF['teacher_forcing'] *= TRAIN_CONF['teacher_forcing_decay_factor']
    setattr(seq2seq.teacher_forcing, TRAIN_CONF['teacher_forcing'])


def train_iters(data_loader, seq2seq, optimizer, epoch_nums, epoch, iteration=0, pointer=False):
    """
    :param seq2seq: seq2seq模型
    :param optimizer: 已设置好的Adam优化器
    :param iteration: 迭代轮次，在复现加载的模型时候需要
    :return: seq2seq, optimizer

    训练过程关键变量维度信息:
    inputs: 输入序列 (batch_size, max_len of this inputs)，目前全局inputs的最大长度为100， 但每一批次inputs的最大长度由本批次中最长的
        句子长度来决定；
    input_lens: 记录输入序列每句长度的序列 (batch_size,)
    targets: 目标序列 (batch_size, max_len of this targets), 目前全局targets的最大长度为50， 但每一批次targets的最大长度由本批次中最长的
        句子长度来决定;
    target_lens: 记录目标序列每句长度的序列 (batch_size,)
    mask: targets的掩码 (batch_size, max_len of this targets)
    """

    total_batch = len(data_loader) - 1
    for iteration, batch in enumerate(data_loader, iteration):
        optimizer.zero_grad()

        if not pointer:
            inputs, input_lens, input_mask, targets, target_lens, target_mask = batch
        else:
            inputs, input_lens, input_mask, expanded_inputs, targets, target_lens, target_mask, expanded_targets = batch

        inputs = inputs.to(conf.DEVICE)
        input_lens = input_lens.to(conf.DEVICE)
        input_mask = input_mask.to(conf.DEVICE)
        targets = targets.to(conf.DEVICE)
        max_target_len = target_lens.max().to(conf.DEVICE)
        target_mask = target_mask.to(conf.DEVICE)

        if pointer:
            expanded_inputs = expanded_inputs.to(conf.DEVICE)
            expanded_targets = expanded_targets.to(conf.DEVICE)
        else:
            expanded_inputs = None
            expanded_targets = None

        seq_outputs, mask, coverages = seq2seq(inputs, input_lens, input_mask, targets, max_target_len, target_mask, expanded_inputs=expanded_inputs, expanded_targets=expanded_targets)

        loss = negative_log_loss_by_mask(seq_outputs, mask)
        if conf.MODEL_CONF['is_coverage']:
            coverage_loss = torch.sum(coverages[mask])
            # print('coverage_loss.item():', coverage_loss.item())
            loss += conf.MODEL_CONF['coverage_loss_weight'] * coverage_loss
        # print('loss:', loss.item())
        LOSS_SEQ.append(loss.cpu().detach().numpy())
        # 执行反向传
        loss.backward()

        # 梯度裁剪
        clip_grad_norm_(seq2seq.parameters(), TRAIN_CONF['clip'])

        # 更新参数
        optimizer.step()

        # print(loss.item())
        if (iteration + 1) % TRAIN_CONF['print_loss'] == 0:
            print('共{}轮Epoch, 本轮为第{}轮Epoch迭代总次数{}，当前迭代第{}次, loss={}, 进度{:.2%}'.format(
                epoch_nums, epoch + 1, total_batch, iteration + 1, loss.item(), (iteration + 1) / total_batch))

        if (iteration + 1) % TRAIN_CONF['decay_frequency'] == 0:
            reduce_learning_ratio(optimizer)

        if (iteration + 1) % TRAIN_CONF['save_frequency'] == 0:
            save_checkpoint(iteration, seq2seq, optimizer, loss, LOSS_SEQ)

        if iteration == total_batch - 1:
            save_checkpoint(iteration, seq2seq, optimizer, loss, LOSS_SEQ, final=True)
            break
    if TRAIN_CONF['scheduled_sampling']:
        reduce_teacher_forcing()
    return seq2seq, optimizer


def train_epochs(seq2seq, optimizer, epoch_nums=10, iteration=0, pointer=False):
    marketing_data = MarketingData(pointer=pointer)
    data_loader = DataLoader(marketing_data, batch_size=conf.MODEL_CONF['batch_size'])

    for epoch in range(epoch_nums):
        seq2seq, optimizer = train_iters(data_loader, seq2seq, optimizer, epoch_nums, epoch, iteration, pointer=pointer)


def build_model(training=True, load_checkpoint=False, file_name='trained.tar', keep_iter=False, use_loss_seq=False):
    if training:
        if not load_checkpoint:
            seq2seq = Seq2SeqGRU(teacher_forcing=TRAIN_CONF['teacher_forcing'], dropout=conf.MODEL_CONF['dropout'])
            seq2seq = seq2seq.to(conf.DEVICE)
            optimizer = Adam(seq2seq.parameters(), lr=TRAIN_CONF['learning_ratio'])
            iteration = 0
        else:
            if file_name == 'trained.tar':
                path = os.path.join(conf.MODEL_CONF['save_model_path'], file_name)
            else:
                path = os.path.join(conf.MODEL_CONF['save_model_path'], r'checkpoints\2-2-500', file_name)
            checkpoint = torch.load(path)
            seq2seq = checkpoint['seq2seq'].to(conf.DEVICE)
            optimizer = checkpoint['optimizer']
            iteration = checkpoint['iteration'] if keep_iter else 0
            loss = checkpoint['loss']
            loss_seq = checkpoint['loss_seq']
            print('当前载入模型迭代轮次为{}，loss为{:.4f}'.format(iteration + 1, loss.item()))

        if use_loss_seq:
            return seq2seq, optimizer, iteration, loss_seq
        else:
            return seq2seq, optimizer, iteration
    else:
        model = torch.load(os.path.join(conf.MODEL_CONF['save_model_path'], file_name))
        return model['seq2seq']


if __name__ == '__main__':
    # seq2seq, optimizer, iteration, LOSS_SEQ = build_model(load_checkpoint=True, file_name='chp_299.tar', use_loss_seq=True)
    seq2seq, optimizer, iteration = build_model()
    seq2seq.train()
    train_epochs(seq2seq, optimizer, epoch_nums=30, iteration=0, pointer=conf.MODEL_CONF['pointer'])

    # 绘制loss曲线
    plt.plot(range(len(LOSS_SEQ)), LOSS_SEQ)
    plt.show()
