import random
import torch
from torch.utils.data import DataLoader
import torch.nn as nn
import time
import torch.optim as optim
from tqdm import tqdm
import json

from until import device, SOS_token, MAX_LENGTH, teacher_forcing_ratio, EOS_token, my_lr, epochs
from encoder import MyEncoderGRU
from attentiondecoder import AttentionDecoderGRU
from get_dict import get_data
from dataset import MyPairDataset

# 调用获取数据函数
english_word2index, english_index2word, english_word_n, \
    french_word2index, french_index2word, french_word_n, my_pairs = get_data()


def train_iter(x, y, my_encoder_gru, my_decoder_gru, encoder_adam, decoder_adam, criterion):
    # 1.编码：一次性将x送入编码器
    # x.shape                   [1,4]
    # hidden.shape              [1,1,256]
    h0 = my_encoder_gru.init_hidden().to(device)
    encoder_output, encoder_hidden = my_encoder_gru(x, h0)
    # encoder_hidden.shape      [1,1,256]
    # encoder_output.shape      [1,4,256]

    # 2.解码：一个字符一个字符的解码，准备Q、K、V
    # 2.1 准备Q值  [1,1]
    input_y = torch.tensor([[SOS_token]]).to(device)
    # 2.2 准备K值：使用编码器的最后一个时间步的hidden  [1,1,256]
    decoder_hidden = encoder_hidden
    # 2.3 准备V值：统一句子长度  [1,4,256] --> [10,256]
    encoder_output_c = torch.zeros(MAX_LENGTH, my_encoder_gru.hidden_size).to(device)
    for i in range(encoder_output.shape[1]):
        if i > MAX_LENGTH - 1:
            break
        encoder_output_c[i] = encoder_output[0, i]

    my_loss = 0.0
    use_teacher_forcing = True if random.random() < teacher_forcing_ratio else False
    if use_teacher_forcing:
        for idx in range(y.shape[1]):
            # input_y.shape            [1,1]
            # decoder_hidden.shape     [1,1,256]
            # encoder_output_c.shape   [10,256]

            output_y, decoder_hidden, attention_weights = my_decoder_gru(input_y, decoder_hidden, encoder_output_c)
            # output_y.shape           [1,4345]
            # decoder_hidden.shape     [1,1,256]
            # attention_weights.shape  [1,10]

            # 计算损失
            # y.shape[1, 5]
            target_y = y[0][idx].view(1)  # [1,5] -> [5] -> 标量 -> [1]
            my_loss += criterion(output_y, target_y)

            # teacher_forcing模式下，用当前时间步的真实结果，当作下一个时间步的输入
            input_y = y[0][idx].view(1, -1)  # [1,5] -> [5] -> 标量 -> [1,1]
    else:
        for idx in range(y.shape[1]):
            # input_y.shape            [1,1]
            # decoder_hidden.shape     [1,1,256]
            # encoder_output_c.shape   [10,256]
            output_y, decoder_hidden, attention_weights = my_decoder_gru(input_y, decoder_hidden, encoder_output_c)
            # output_y.shape           [1,4345]
            # decoder_hidden.shape     [1,1,256]
            # attention_weights.shape  [1,10]

            # 计算损失
            # y.shape[1, 5]
            target_y = y[0][idx].view(1)  # [1,5] -> [5] -> 标量 -> [1]
            my_loss += criterion(output_y, target_y)

            # 非teacher_forcing模式下，用当前时间步的预测结果，当作下一个时间步的输入
            topv, topi = torch.topk(output_y, k=1)
            # topv  [1,1]
            # topi  [1,1]
            if topi.item() == EOS_token:
                break
            input_y = topi.detach()

    # 梯度清零
    encoder_adam.zero_grad()
    decoder_adam.zero_grad()
    # 反向传播
    my_loss.backward()
    # 更新参数
    encoder_adam.step()
    decoder_adam.step()

    return my_loss.item() / y.shape[1]


def train_model():
    my_dataset = MyPairDataset(my_pairs)

    dataloader = DataLoader(dataset=my_dataset, batch_size=1, shuffle=True)

    # 实例化编码器模型
    my_encoder_gru = MyEncoderGRU(vocab_size=english_word_n, hidden_size=256)
    my_encoder_gru = my_encoder_gru.to(device)

    # 实例化解码器模型
    my_attention_decoder = AttentionDecoderGRU(vocab_size=french_word_n, hidden_size=256, dropout_p=0.1,
                                               max_len=MAX_LENGTH)
    my_attention_decoder = my_attention_decoder.to(device)

    # 实例化损失函数
    criterion = nn.NLLLoss()

    # 实例化优化器
    encoder_optim = optim.Adam(params=my_encoder_gru.parameters(), lr=my_lr)
    decoder_optim = optim.Adam(params=my_attention_decoder.parameters(), lr=my_lr)

    total_loss = 0.0
    total_item = 0
    avg_plot_loss_list = []
    start_time = time.time()

    for epoch in range(1, epochs + 1):
        print_loss_total = 0.0
        plot_loss_total = 0.0
        epoch_time = time.time()
        for item, (x, y) in enumerate(tqdm(dataloader), start=1):
            x = x.to(device)
            y = y.to(device)
            my_loss = train_iter(x, y, my_encoder_gru, my_attention_decoder, encoder_optim, decoder_optim, criterion)
            total_loss += my_loss
            print_loss_total += my_loss
            plot_loss_total += my_loss
            total_item += 1
            # 每隔1000步打印一下日志
            if item % 1000 == 0:
                avg_print_loss = print_loss_total / 1000
                print_loss_total = 0.0
                use_time = time.time() - epoch_time
                avg_loss = total_loss / total_item
                print("当前训练的轮次: %d, 平均总损失:%.4f, 每1000步平均损失:%.4f, 用时:%ds" % (
                    epoch, avg_loss, avg_print_loss, use_time))

            # 每隔100步保存损失，画图
            if item % 100 == 0:
                avg_plot_loss = plot_loss_total / 100
                avg_plot_loss_list.append(avg_plot_loss)
                plot_loss_total = 0.0

        # 每2轮保存模型到本地
        if epoch % 1 == 0:
            torch.save(my_encoder_gru.state_dict(), './model/encoder_gru_%d.bin' % (epoch))
            torch.save(my_attention_decoder.state_dict(), './model/decoder_gru_%d.bin' % (epoch))

    # 将画图所需变量保存在json中
    total_time = int(time.time() - start_time)
    result = {
        'avg_plot_loss_list': avg_plot_loss_list,
        'total_time': total_time,
    }
    with open('./result/result.json', 'w') as f:
        f.write(json.dumps(result))

if __name__ == '__main__':
    # 训练模型
    train_model()