import time
from itertools import chain
from torch.utils.tensorboard import SummaryWriter
import torch
import torch.nn as nn
from torch.optim import AdamW
from model import TranslationEncoder, TranslationDecoder
import config
from tqdm import tqdm
from dataset import get_dataloader
from tokenizer import ChineseTokenizer, EnglishTokenizer


def train_one_epoch(encoder, decoder, loss, optimizer, dataloader, device):
    """
    训练一轮
    :param encoder:
    :param decoder:
    :param loss:
    :param optimizer:
    :param dataloader:
    :param device:
    :return:
    """

    encoder.train()
    decoder.train()

    total_loss = 0

    for inputs, targets in tqdm(dataloader, desc="训练中"):
        # inputs.shape = (batch_size,seq_len)
        # target.shape = (batch_size,seq_len
        inputs, targets = inputs.to(device), targets.to(device)

        optimizer.zero_grad()

        # 编码
        context_vector = encoder(inputs)
        # context_vector.shape = (batch_size,encoder_hidden_size * 2)

        # 解码 teacher_forcing 输出的是每个token
        decoder_input = targets[:, 0:1]
        # 解码输出decoder_input.shape = (batch_size,1)
        decoder_hidden = context_vector.unsqueeze(0)
        # 隐藏状态输入 decoder_hidden.shape = (num_layers*bidirectional,batch_size,decoder_hidden_size)

        # 一个batch循环的output的结果
        decoder_outputs = []

        # 第一个是开始sos 从第二个开始预测
        for i in range(1,targets.shape[1] ):
            decoder_output, decoder_hidden = decoder(decoder_input, decoder_hidden)
            # decoder_hidden 被更新,decoder_output.shape = [batch_size,1,vocab_size]
            decoder_outputs.append(decoder_output)
            decoder_input = targets[:, i:i + 1]

        # 预测结果
        decoder_outputs = torch.cat(decoder_outputs, dim=1)
        # decoder.shape = (batch_size , seq_len-1,vocab_size)
        decoder_outputs = decoder_outputs.reshape(-1, decoder_outputs.shape[-1])
        # decoder.shape = (batch_size * seq_len-1,vocab_size)

        # 期望值
        decoder_taget = targets[:, 1:]
        # decoder_taget.shape = (batch_size,seq_len-1)
        decoder_taget = decoder_taget.reshape(-1)
        # decoder_taget = (batch_size*(seq_len - 1))

        loss_val = loss(decoder_outputs, decoder_taget)

        loss_val.backward()
        optimizer.step()
        total_loss += loss_val.item()

    return total_loss / len(dataloader)


def train():
    device = torch.device(
        'cuda' if torch.cuda.is_available() else 'mps' if torch.backends.mps.is_available() else 'cpu')

    chinese_tokenizer = ChineseTokenizer.from_vocab(config.PROCESSED_DIR / 'zh_vocab.txt')
    english_tokenizer = EnglishTokenizer.from_vocab(config.PROCESSED_DIR / 'en_vocab.txt')

    # 加载数据
    dataloader = get_dataloader(train=True)

    encoder = TranslationEncoder(vocab_size=chinese_tokenizer.vocab_size,padding_index=chinese_tokenizer.pad_token_id).to(device)
    decoder = TranslationDecoder(vocab_size=english_tokenizer.vocab_size,padding_index=english_tokenizer.pad_token_id).to(device)


    # 计算损失忽略填充
    loss_func = nn.CrossEntropyLoss(ignore_index=english_tokenizer.pad_token_id)

    optimizer = AdamW(chain(encoder.parameters(),decoder.parameters()), lr=config.LEARNING_RATE)

    # tensorboard
    writer = SummaryWriter(log_dir=config.LOGS_DIR / time.strftime('%Y-%m-%d_%H-%M-%S'))

    best_loss = float('inf')
    for epoch in range(1, 1 + config.EPOCHS):
        print(f'========== Epoch: {epoch} ==========')
        avg_loss = train_one_epoch(encoder,decoder,loss_func,optimizer,dataloader, device)
        print(f'Loss: {avg_loss:.4f}')

        writer.add_scalar('Loss', avg_loss, epoch)

        if avg_loss < best_loss:
            best_loss = avg_loss
            torch.save(encoder.state_dict(), config.MODELS_DIR / 'encoder.pt')
            torch.save(decoder.state_dict(), config.MODELS_DIR / 'decoder.pt')
            print('模型保存成功')
        else:
            print('模型无需保存')


if __name__ == '__main__':
    train()


