import random

import torch
import torch.nn as nn
import torch.optim as optim
from tqdm import tqdm

from Transformer import Transformer
from config import *
from get_parm import *

device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
# trainning pram
optim_l_r = 1e-7
epochs = 90


def Show_sentence(sentence_list):
    for word in sentence_list:
        if word == "<EOS>":
            print(word, end="")
            break
        print(word, end="")
    print()


def get_loss(decoder_outputs, target):
    decoder_outputs = torch.log(decoder_outputs)
    # torch.nn.CrossEntropyLoss==torch.nn.NLLLoss+torch.nn.LogSoftmax，因此这里需要取对数
    target = target.view(-1)  # [batch_size*max_len]
    decoder_outputs = decoder_outputs.view(batch_size * max_len_ch, -1)
    return criterion(decoder_outputs, target)


transformer = Transformer()
if os.path.exists("./models/transformer_e2c.pkl"):
    transformer.load_state_dict(torch.load("./models/transformer_e2c.pkl"))
optimizer = optim.Adam(transformer.parameters(), optim_l_r)
scheduler = torch.optim.lr_scheduler.MultiStepLR(optimizer, milestones=[30, 60], gamma=0.1)
criterion = nn.NLLLoss(ignore_index=vocab_en.word2idx("<PAD>"), reduction="mean")

transformer.train()
transformer.to(device)
Loss_list = []
loss_min = float('inf')
for epoch in range(epochs):
    progress_bar = tqdm(total=len(trainDataLoader), desc='Train Epoch {}'.format(epoch), unit='batch')
    loss_all = 0
    n = 0
    for idx, (en_text, ch_text) in enumerate(trainDataLoader):
        n += 1
        optimizer.zero_grad()
        en_text = en_text.to(device)
        ch_text = ch_text.to(device)
        output = transformer(en_text, ch_text)
        loss_t = get_loss(output, ch_text)
        loss_all += loss_t.item()
        loss_t.backward()
        optimizer.step()

        if idx == 0:
            predict = torch.argmax(output.data, dim=-1)
            print("预测：", end="")
            number = random.randint(0, int(batch_size // 2))
            Show_sentence(mapping(predict[number], vocab_ch, reverse=True))
            print("真实：", end="")
            Show_sentence(mapping(ch_text[number], vocab_ch, reverse=True))

        progress_bar.set_postfix({'loss': loss_all / (idx + 1), "lr": optimizer.param_groups[0]['lr']})
        progress_bar.update()
    Loss_list.append(loss_all / n)
    with open("loss/loss_data.txt", "w", encoding="utf8") as f:
        f.write(str(Loss_list))
    if (loss_all / n) < loss_min:
        torch.save(transformer.state_dict(), "./models/transformer_e2c.pkl")
        loss_min = loss_all / n
    progress_bar.close()
    scheduler.step()
