import os.path
import time
from copy import deepcopy

from utils.data_helper import tokenizer_dict, LoadEnglishGermanDataset
from config.config import cfg
from model.TranslationModel import TranslationModel

import torch
from torch import nn

from tqdm import tqdm
from utils.log_helper import log_init


class CustomerSchedule(nn.Module):
    def __init__(self, d_model, warmup_steps=4000, optimizer=None):
        super(CustomerSchedule, self).__init__()
        self.d_model = torch.tensor(d_model, dtype=torch.float32)
        self.warmup_steps = warmup_steps
        self.steps = 0.
        self.optimizer = optimizer

    def step(self):
        self.steps += 1.
        arg1 = self.steps ** -0.5
        arg2 = self.steps * (self.warmup_steps ** -1.5)
        
        lr = (self.d_model ** -0.5) * min(arg1, arg2)
        for p in self.optimizer.param_groups:
            p['lr'] = lr
        return lr


def accuracy(logits, y_true, PAD_IDX):
    """

    :param logits: translation model output , shape [tgt_len,batch_size,tgt_vocab_size]
    :param y_true: target true, shape [tgt_len,batch_size]
    :param PAD_IDX: Tokenizer padding character index in vocab
    :return:
        accuracy
    """
    # 将[tgt_len, batch_size, tgt_vocab_size] 转成 [batch_size, tgt_len, tgt_vocab_size]
    y_pred = logits.transpose(0, 1).argmax(axis=2).reshape(-1)

    # 将[tgt_len, batch_size] 转成 [batch_size, tgt_len]
    y_true = y_true.transpose(0, 1).reshape(-1)

    acc = y_pred.eq(y_true)
    # 找到真实标签中，mask位置的信息。 mask位置为FALSE，非mask位置为TRUE
    mask = torch.logical_not(y_true.eq(PAD_IDX))

    # 去掉 acc 中 mask
    acc = acc.logical_and(mask)
    correct = acc.sum().item()
    total = mask.sum().item()
    return float(correct) / total, correct, total


def evaluate(cfg, val_iter, model, data_loader):
    model.eval()
    correct, totals = 0, 0
    with torch.no_grad():
        for idx, (src, tgt) in enumerate(val_iter):
            src = src.to(cfg.device)
            tgt = tgt.to(cfg.device)
            # 解码部分输入
            tgt_input = tgt[:-1, :]
            src_mask, tgt_mask, src_padding_mask, tgt_padding_mask = data_loader.create_mask(src, tgt_input,
                                                                                             device=cfg.device)
            logits = model(src=src,
                           tgt=tgt_input,
                           src_mask=src_mask,
                           tgt_mask=tgt_mask,
                           src_key_padding_mask=src_padding_mask,
                           tgt_key_padding_mask=tgt_padding_mask,
                           memory_key_padding_mask=src_padding_mask)
            # 解码部分的真实值 shape [tgt_len, batch_size]
            tgt_out = tgt[1:, :]
            _, c, t = accuracy(logits, tgt_out, data_loader.PAD_IDX)
            correct += c
            totals += t
    model.train()
    return float(correct) / totals


def train_model(cfg):
    data_loader = LoadEnglishGermanDataset(cfg.train_corpus_file_paths,
                                           batch_size=cfg.batch_size,
                                           tokenizer=tokenizer_dict)
    train_iter, valid_iter, test_iter = data_loader.load_train_val_test_data(cfg.train_corpus_file_paths,
                                                                             cfg.val_corpus_file_paths,
                                                                             cfg.test_corpus_file_paths)

    translation_model = TranslationModel(src_vocab_size=len(data_loader.de_vocab),
                                         tgt_vocab_size=len(data_loader.en_vocab),
                                         d_model=cfg.d_model,
                                         nhead=cfg.num_head,
                                         num_encoder_layers=cfg.num_encoder_layers,
                                         num_decoder_layers=cfg.num_decoder_layers,
                                         dim_feedforward=cfg.dim_feedforward,
                                         dropout=cfg.dropout)
    # 定义模型存储目录
    model_save_path = os.path.join(cfg.model_save_dir, 'model.pkl')
    # 判断模型文件是否存在
    if os.path.exists(model_save_path):
        loaded_paras = torch.load(model_save_path)
        translation_model.load_state_dict(loaded_paras)
        logger.debug("#### 成功载入已有模型，进行追加训练...")

    # 模型载入 cpu 或 cuda
    translation_model = translation_model.to(cfg.device)

    for p in translation_model.parameters():
        if p.dim() > 1:
            nn.init.xavier_uniform_(p)
    # 定义模型损失
    loss_fn = nn.CrossEntropyLoss(ignore_index=data_loader.PAD_IDX)

    optimizer = torch.optim.Adam(translation_model.parameters(),
                                 lr=cfg.warm_up_learning_rate,
                                 betas=(cfg.beta1, cfg.beta2),
                                 eps=cfg.epsilon)
    lr_schedule = CustomerSchedule(cfg.d_model, optimizer=optimizer, warmup_steps=cfg.warmup_steps)
    translation_model.train()

    # 开始训练模型
    for epoch in range(1, cfg.epochs+1):
        losses = 0
        start_time = time.time()
        for (idx, (src, tgt)) in tqdm(list(enumerate(train_iter)), ncols=100):
            # [src_len, batch_size]
            src = src.to(cfg.device)
            tgt = tgt.to(cfg.device)

            # 解码部分输入 [tgt_len, batch_size]
            tgt_input = tgt[:-1, :]
            src_mask, tgt_mask, src_padding_mask, tgt_padding_mask = data_loader.create_mask(src, tgt_input,
                                                                                             cfg.device)
            # logits shape [tgt_len,batch_size,tgt_vocab_size]
            logits = translation_model(src=src,
                                       tgt=tgt_input,
                                       src_mask=src_mask,
                                       tgt_mask=tgt_mask,
                                       src_key_padding_mask=src_padding_mask,
                                       tgt_key_padding_mask=tgt_padding_mask,
                                       memory_key_padding_mask=src_padding_mask)
            optimizer.zero_grad()
            # 解码部分的真实值 shape [tgt_len,batch_size]
            tgt_out = tgt[1:, :]

            # 计算损失 [tgt_len * batch_size, tgt_vocab_size] with [tgt_len * batch_size]
            loss = loss_fn(logits.reshape(-1, logits.shape[-1]), tgt_out.reshape(-1))
            loss.backward()
            lr_schedule.step()
            optimizer.step()
            losses += loss.item()
            acc, _, _ = accuracy(logits, tgt_out, data_loader.PAD_IDX)

        end_time = time.time()
        train_loss = losses / len(train_iter)
        logger.info(f'Epoch: {epoch}/{cfg.epochs}, Train loss: {train_loss}, Train acc: {acc}, Epoch time：{(end_time - start_time):.3f}s')
        if epoch % 10 == 0:
            acc = evaluate(cfg, valid_iter, translation_model, data_loader)
            logger.info(f'Accuracy on validation {acc:.3f}')
            stat_dict = deepcopy(translation_model.state_dict())
            torch.save(stat_dict, model_save_path)


if __name__ == '__main__':
    logger = log_init('train_model', level='INFO')
    train_model(cfg)
