import torch
import torch.nn as nn
from tqdm import tqdm
from torch.utils.tensorboard import SummaryWriter
import time
from dataset import get_dataloader
from tokenizer import ChineseTokenizer, EnglishTokenizer
from model import TranslationModel
from config import PROCESSED_DATA_DIR, LOGS_DIR, MODELS_DIR, LEARNING_RATE, EPOCHS, SEQ_LEN


def train_one_epoch(dataloader, model, loss_func, optimizer, device):
    """ 训练一个epoch """
    model.train()
    total_loss = 0

    for src, tgt in tqdm(dataloader, desc="train"):
        src = src.to(device)  # [batch_size, src_len]
        tgt = tgt.to(device)  # [batch_size, tgt_len]

        src_pad_mask = (src == model.src_embedding.padding_idx)
        tgt_pad_mask = (tgt == model.tgt_embedding.padding_idx)

        tgt_input = tgt[:, :-1]  # [batch_size, tgt_len - 1]
        tgt_output = tgt[:, 1:]  # [batch_size, tgt_len - 1]

        tgt_mask = model.transformer.generate_square_subsequent_mask(tgt_input.shape[1]).to(device)

        optimizer.zero_grad()
        output = model(src, tgt_input, src_pad_mask, tgt_pad_mask[:, :-1], tgt_mask)
        # output.shape: [batch_size, tgt_len - 1, en_vocab_size]
        loss = loss_func(
            output.reshape(-1, output.shape[-1]),  # [batch_size * (tgt_len - 1), en_vocab_size]
            tgt_output.reshape(-1)  # [batch_size * (tgt_len - 1)]
        )
        loss.backward()
        optimizer.step()

        total_loss += loss.item()

    return total_loss / len(dataloader)


def train():
    """ 训练主函数 """
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")  # 设备

    dataloader = get_dataloader()  # 数据加载器

    # 创建tokenizer
    zh_tokenizer = ChineseTokenizer.from_vocab(PROCESSED_DATA_DIR / 'zh_vocab.txt')
    en_tokenizer = EnglishTokenizer.from_vocab(PROCESSED_DATA_DIR / 'en_vocab.txt')

    # 模型
    model = TranslationModel(zh_tokenizer.vocab_size, en_tokenizer.vocab_size,
                             zh_tokenizer.pad_token_index, en_tokenizer.pad_token_index).to(device)

    loss_func = nn.CrossEntropyLoss(ignore_index=en_tokenizer.pad_token_index)  # 交叉熵损失函数
    optimizer = torch.optim.Adam(model.parameters(), lr=LEARNING_RATE)  # Adam优化器

    writer = SummaryWriter(log_dir=LOGS_DIR / time.strftime("%Y-%m-%d-%H-%M-%S"))  # TensorBoard日志

    best_loss = float('inf')  # +∞

    for epoch in range(1, EPOCHS + 1):
        # 训练一个epoch
        avg_loss = train_one_epoch(dataloader, model, loss_func, optimizer, device)
        print(f'epoch: {epoch}, avg_loss: {avg_loss:.4f}')

        # 记录到TensorBoard
        writer.add_scalar('Loss/train', avg_loss, epoch)

        # 保存最优模型
        if avg_loss < best_loss:
            best_loss = avg_loss
            torch.save(model.state_dict(), MODELS_DIR / 'model.pt')


if __name__ == '__main__':
    train()
