import random
import matplotlib.pyplot as plt
import torch
import torch.nn as nn
from src.Networks.EncoderRNN import EncoderRNN
from src.Main.config import *
from torch import optim
import os
import itertools
from src.Networks.Attn import *
from src.LoadData.dataLoader import Voc
import numpy as np

from gensim.models.word2vec import Word2Vec

word2vec_model = Word2Vec.load(word2vec_model_path)


def loadData(voc_path, pairs_path):
    """
    :param voc_path:
    :param pairs_path:
    :return:
    """
    voc = torch.load(voc_path)
    pairs = torch.load(pairs_path)
    return voc, pairs


def printLoss(iteration, print_loss, iter_list, loss_list):
    """
    打印损失信息
    :param iteration: 当前迭代次数
    :param print_loss: 输出的平均损失
    :param iter_list: 打印loss图的iter坐标，间隔为print_every
    :param loss_list: 打印loss图的loss坐标
    :return:
    """
    print_loss_avg = print_loss / print_every
    print("Iteration: {}; Percent complete: {:.1f}%; Average loss: {:.4f}".format(iteration,
                                                                                  iteration / MAX_ITERATION * 100,
                                                                                  print_loss_avg))
    iter_list.append(iteration)
    loss_list.append(print_loss_avg)
    draw(iter_list, loss_list)


def saveModel(iteration, embedding_layer,
              encoder, decoder,
              encoder_optimizer, decoder_optimizer,
              loss, voc, save_dir='../Models/LCCC_data_word2vec/'):
    # 保存模型
    if iteration % save_every == 0:
        directory = os.path.join(save_dir,
                                 '{}-{}_{}'.format(encoder_n_layers, decoder_n_layers, hidden_size))
        if not os.path.exists(directory):
            os.makedirs(directory)
        torch.save({
            'iteration': iteration,
            'encoder': encoder.state_dict(),
            'decoder': decoder.state_dict(),
            'encoder_opt': encoder_optimizer.state_dict(),
            'decoder_opt': decoder_optimizer.state_dict(),
            'embedding_layer': embedding_layer.state_dict(),
            'loss': loss,
            'voc_dict': voc.__dict__,
        }, os.path.join(directory, '{}_{}.tar'.format(iteration, 'checkpoint')))


def draw(x_axis, y_axis):
    """
    绘制loss图
    :param x_axis:
    :param y_axis:
    :return:
    """
    plt.plot(x_axis, y_axis)
    plt.xlabel('iter')
    plt.ylabel('loss')
    plt.title('loss figure')
    plt.savefig('loss.jpg')


def run(load_model=False, checkpoint_path=None):
    """
    :param load_model: 是否加载模型继续训练
    :param checkpoint_path: 保存的模型的信息文件
    :return:None
    """

    # 加载数据 输入句子索引+填充， 每个句子的长度， 输出句子索引+填充， 输出句子的最大长度, target
    voc, pairs = loadData(voc_path=VOC_PATH,
                          pairs_path=TRAIN_PAIRS_PATH)
    start_iteration = 0

    # 编码
    embedding_layer = nn.Embedding(voc.num_words, embedding_dim=embedding_size)
    if load_model:
        checkpoint = torch.load(checkpoint_path)
        start_iteration = checkpoint['iteration']
        encoder_state_dict = checkpoint['encoder']
        decoder_state_dict = checkpoint['decoder']
        encoder_optim_dict = checkpoint['encoder_opt']
        decoder_optim_dict = checkpoint['decoder_opt']
        embedding_layer_state_dict = checkpoint['embedding_layer']
        embedding_layer.load_state_dict(embedding_layer_state_dict)
        voc.__dict__ = checkpoint['voc_dict']

    # 编码器
    encoder = EncoderRNN(input_size=voc.num_words,
                         hidden_size=hidden_size,
                         embedding_layer=embedding_layer,
                         # embedding_layer=None,
                         n_layer=encoder_n_layers,
                         dropout=dropout)
    # 解码器
    decoder = LuongAttnDecoderRNN(attn_model=attn_model,
                                  embedding=embedding_layer,
                                  # embedding=None,
                                  hidden_size=hidden_size,
                                  output_size=voc.num_words,
                                  n_layers=decoder_n_layers,
                                  drop_out=dropout)
    # 定义优化器
    encoder_optimizer = optim.Adam(encoder.parameters(), lr=learning_rate)
    decoder_optimizer = optim.Adam(decoder.parameters(), lr=learning_rate * decoder_learning_ratio)

    if load_model:
        encoder.load_state_dict(encoder_state_dict)
        decoder.load_state_dict(decoder_state_dict)
        encoder_optimizer.load_state_dict(encoder_optim_dict)
        decoder_optimizer.load_state_dict(decoder_optim_dict)

    # 将优化器参数加载到gpu上
    for state in encoder_optimizer.state.values():
        for k, v in state.items():
            if isinstance(v, torch.Tensor):
                state[k] = v.cuda()

    for state in decoder_optimizer.state.values():
        for k, v in state.items():
            if isinstance(v, torch.Tensor):
                state[k] = v.cuda()

    encoder.to(device=device), decoder.to(device=device)
    # 准备开始训练
    encoder.train(), decoder.train()
    train(voc=voc,
          pairs=pairs,
          embedding_layer=embedding_layer,
          # embedding_layer=None,
          encoder=encoder,
          encoder_optimizer=encoder_optimizer,
          decoder=decoder,
          decoder_optimizer=decoder_optimizer,
          start_iteration=start_iteration)


# l是二维的padding后的list
# 返回m和l的大小一样，如果某个位置是padding，那么值为0，否则为1
def binaryMatrix(l, value=PAD_token):
    m = []
    for i, seq in enumerate(l):
        m.append([])
        for token in seq:
            if token == PAD_token:
                m[i].append(0)
            else:
                m[i].append(1)
    return m


def embeddingWord(batch_pairs):
    batch_pairs.sort(key=lambda x: len(x[0]), reverse=True)
    inp = list()
    output = list()
    for pair in batch_pairs:
        inp.append(pair[0])
        output.append(pair[1])

    # 输入长度
    input_lengths = [len(line_input) for line_input in inp]
    input_lengths = torch.tensor(input_lengths)

    # 填充后转为tensor
    pad_inp = list(itertools.zip_longest(*inp, fillvalue=PAD_token))
    pad_inp = torch.LongTensor(pad_inp)

    # 最长输出长度
    max_target_len = max([len(line_output) for line_output in output])
    # 填充后转为tensor
    pad_output = list(itertools.zip_longest(*output, fillvalue=PAD_token))

    # 判断是否填充的mask
    mask = binaryMatrix(pad_output)
    mask = torch.BoolTensor(mask)
    pad_output = torch.LongTensor(pad_output)

    # 输入句子索引+填充， 每个句子的长度， 输出句子索引+填充， target， 输出句子的最大长度
    return pad_inp, input_lengths, pad_output, mask, max_target_len


def getRandomData(voc, pairs):
    """
    :param voc:
    :param pairs:
    :return:
    """
    # [MAX_ITERATION, batch_size], 每个item为 pair=[问, 答] shape=(MAX_ITERATION, batch_size, 2]
    max_iteration_batch_pairs = list()

    for _ in range(MAX_ITERATION):
        batch_pairs = [random.choice(pairs) for _ in range(batch_size)]
        max_iteration_batch_pairs.append(embeddingWord(batch_pairs))
    return max_iteration_batch_pairs


def getWord2vecLoss(word2vec_model, max_input_index, target, mask, voc):
    n_total = mask.sum()
    loss = 0
    loss_list = []
    for i in range(max_input_index.shape[0]):
        if mask[i]:
            word1 = voc.index2word[max_input_index[i].item()]
            word2 = voc.index2word[target[i].item()]
            loss += word2vec_model.wv.similarity(word1,
                                                 word2)
    loss = loss.item() / n_total.item()
    # print(loss)
    return loss


def maskNLLLoss(inp, target, mask, voc):
    """
    :param inp: 实际输出
    :param target: 目标输出
    :param mask: 若为填充位则为false，否则为true
    :return: loss：平均损失， nTotal：非填充位的位数
    """
    nTotal = mask.sum()
    if nTotal == 0:
        loss = torch.zeros(1).mean()
        return loss, 0

    max_input_index = torch.argmax(inp, dim=1)

    scale = getWord2vecLoss(word2vec_model, max_input_index, target, mask, voc)
    if scale >= 0.7:
        scale = 0
    else:
        scale = 0.7 - scale

    a = torch.gather(inp, 1, target.view(-1, 1)).squeeze(1)
    crossEntropy = -torch.log(a)
    loss = scale * crossEntropy.masked_select(mask).mean()

    loss = loss.to(device)

    return loss, nTotal.item()


def train(voc, pairs, embedding_layer, encoder, encoder_optimizer, decoder, decoder_optimizer, start_iteration=0):
    """
    训练模型
    :param voc: 词典
    :param pairs: 句子
    :param embedding_layer: encoder和decoder需保持一致
    :param encoder: 编码器
    :param encoder_optimizer: 编码器的优化器
    :param decoder: 解码器
    :param decoder_optimizer: 解码器的优化器
    :param start_iteration: 从start_iteration次继续迭代，默认值0
    :return:
    """

    train_batches = getRandomData(voc, pairs)

    print_loss = 0
    iter_list = list()
    loss_list = list()

    for i_iteration in range(start_iteration + 1, MAX_ITERATION):
        input_variable, input_lengths, target_variable, mask, max_target_length = train_batches[i_iteration]

        # 梯度清空
        encoder_optimizer.zero_grad()
        decoder_optimizer.zero_grad()
        # 数据放入gpu或cpu
        input_variable = input_variable.to(device)
        target_variable = target_variable.to(device)
        mask = mask.to(device)
        # Lengths 参数始终放在cpu上处理
        input_lengths = input_lengths.to("cpu")

        # 初始化变量
        loss = 0
        print_losses = []
        n_totals = 0

        # 将参数传入编码器前向传播
        encoder_outputs, encoder_hidden = encoder(input_seq=input_variable, input_lengths=input_lengths, hidden=None)

        # 创建解码所需的输入，从每段语句的embeddingSOS tokens开始
        decoder_input = torch.LongTensor([[SOS_token for _ in range(batch_size)]])

        decoder_input = decoder_input.to(device)  # 放入设备中

        # 初始化所有解码隐层单元
        decoder_hidden = encoder_hidden[:decoder.n_layers]

        #         for t in range(max_target_length):
        #             decoder_output, decoder_hidden = decoder(decoder_input, decoder_hidden, encoder_outputs)
        #             # 当前输出是下一步的输入
        #             decoder_input = target_variable[t].view(1, -1)

        #             # 计算损失
        #             mask_loss, nTotal = maskNLLLoss(decoder_output, target_variable[t], mask[t], voc)
        #             loss += mask_loss
        #             print_losses.append(mask_loss.item() * nTotal)
        #             n_totals += nTotal

        # 是否使用teacher_forcing_ratio强制本次迭代
        use_teacher_forcing = True if random.random() < teacher_forcing_ratio else False
        # 解码器一次一步地传入序列
        if use_teacher_forcing:
            for t in range(max_target_length):
                decoder_output, decoder_hidden = decoder(decoder_input, decoder_hidden, encoder_outputs)
                # 当前输出是下一步的输入
                decoder_input = target_variable[t].view(1, -1)
                # 计算损失
                mask_loss, nTotal = maskNLLLoss(decoder_output, target_variable[t], mask[t], voc)
                loss += mask_loss
                print_losses.append(mask_loss.item() * nTotal)
                n_totals += nTotal
        else:
            for t in range(max_target_length):
                decoder_output, decoder_hidden = decoder(decoder_input, decoder_hidden, encoder_outputs)
                # 下一步的输入是解码器自身输出
                _, topi = decoder_output.topk(1)
                decoder_input = torch.LongTensor([[topi[i][0] for i in range(batch_size)]])
                decoder_input = decoder_input.to(device)
                # 计算损失
                mask_loss, nTotal = maskNLLLoss(decoder_output, target_variable[t], mask[t], voc)
                loss += mask_loss
                print_losses.append(mask_loss.item() * nTotal)
                n_totals += nTotal

        loss.backward()

        # 梯度裁剪，防止梯度爆炸或消失
        _ = nn.utils.clip_grad_norm_(encoder.parameters(), clip)
        _ = nn.utils.clip_grad_norm_(decoder.parameters(), clip)

        # 参数更新
        encoder_optimizer.step()
        decoder_optimizer.step()

        print_loss += sum(print_losses) / n_totals
        if i_iteration % print_every == 0:
            printLoss(iteration=i_iteration,
                      print_loss=print_loss,
                      loss_list=loss_list,
                      iter_list=iter_list
                      )
            print_loss = 0
        if i_iteration % save_every == 0:
            saveModel(iteration=i_iteration,
                      embedding_layer=embedding_layer,
                      encoder=encoder,
                      decoder=decoder,
                      encoder_optimizer=encoder_optimizer,
                      decoder_optimizer=decoder_optimizer,
                      voc=voc,
                      loss=sum(print_losses) / n_totals)


if __name__ == '__main__':
    run(load_model=False, checkpoint_path=None)
    # run(load_model=True, checkpoint_path="../Models/qingyun_data/2-2_800/35000_checkpoint.tar")
