from utils.data_helper import LoadSentenceClassificationDataset, tokenizer_en
from model.TextClassifier import Classification
from config.config import cfg
from loguru import logger
import time
from copy import deepcopy
import os

import torch
from torch import nn


class CustomSchedule(nn.Module):
    def __init__(self, d_model, warmup_steps=4000):
        super(CustomSchedule, self).__init__()
        self.d_model = torch.tensor(d_model, dtype=torch.float32)
        self.warmup_steps = warmup_steps
        self.step = 1.

    def __call__(self):
        arg1 = self.step ** -0.5
        arg2 = self.step * (self.warmup_steps ** -1.5)
        self.step += 1.
        return (self.d_model ** -0.5) * min(arg1, arg2)


def evaluate(model, test_loader, loss_fn):
    model.eval()
    total_acc, total_count = 0, 0
    with torch.no_grad():
        for idx, (text, label) in enumerate(test_loader):
            label = label.to(cfg.device)
            text = text.to(cfg.device)
            predicted_label = model(text)
            loss = loss_fn(predicted_label, label)
            total_acc += (predicted_label.argmax(1) == label).sum().item()
            total_count += label.size(0)
    test_acc = total_acc / total_count
    logger.info(f'Test Accuracy: {test_acc}')
    return test_acc


def train_model(config):
    data_loader = LoadSentenceClassificationDataset(train_file_path=config.train_corpus_file_paths,
                                                    tokenizer=tokenizer_en,
                                                    min_freq=config.min_freq,
                                                    batch_size=config.batch_size,
                                                    max_sen_len=config.max_sen_len)

    train_iter, test_iter = data_loader.load_train_val_test_data(config.train_corpus_file_paths,
                                                                 config.test_corpus_file_paths)
    model = Classification(
        vocab_size=len(data_loader.vocab),
        d_model=config.d_model,
        nhead=config.num_head,
        num_encoder_layers=config.num_encoder_layers,
        dim_feedforward=config.dim_feedforward,
        dim_classification=config.dim_classification,
        num_classification=config.num_class,
        dropout=config.dropout)

    model_file_path = os.path.join(cfg.model_save_dir, 'model.pkl')
    old_model_acc = 0.
    if os.path.exists(model_file_path):
        loaded_paras = torch.load(model_file_path)
        old_model_acc, state_dict = loaded_paras['acc'], loaded_paras['model_state_dict']
        model.load_state_dict(state_dict)
        logger.info(f'#### Successfully load model dict from {model_file_path}...')

    for p in model.parameters():
        if p.dim() > 1:
            nn.init.xavier_uniform_(p)

    loss_fn = torch.nn.CrossEntropyLoss()
    optimizer = torch.optim.Adam(model.parameters(),
                                 lr=0.,
                                 betas=(config.beta1, config.beta2),
                                 eps=config.epsilon)

    learning_rate = CustomSchedule(config.d_model)
    model = model.to(config.device)
    for epoch in range(1, config.epochs + 1):
        losses = 0
        start_time = time.time()
        for idx, (sample, label) in enumerate(train_iter):
            # [src_len, batch_size]
            sample = sample.to(config.device)
            label = label.to(config.device)
            padding_mask = (sample == data_loader.PAD_IDX).transpose(0, 1)
            logits = model(sample, src_key_padding_mask=padding_mask)
            # [batch_size,num_class]
            optimizer.zero_grad()
            loss = loss_fn(logits, label)
            loss.backward()
            lr = learning_rate()
            for p in optimizer.param_groups:
                p['lr'] = lr
            optimizer.step()
            losses += loss.item()
            acc = (logits.argmax(1) == label).float().mean()
            if idx % 20 == 0:
                logger.info(
                    f"Epoch: {epoch}, Batch[{idx}/{len(train_iter)}], Train loss :{loss.item():.3f}, Train acc:{acc:.3f}, lr = {lr:.9f}")
        end_time = time.time()
        train_loss = losses / len(train_iter)
        logger.info(
            f"Epoch: {epoch}, Train loss: {train_loss:.3f}, Epoch time ={(end_time - start_time): .3f}s, lr = {lr:.9f}")
    # 测试集上评估
    accu_val = evaluate(model, test_iter, loss_fn=loss_fn)

    # 保存模型
    if accu_val > old_model_acc:
        state_dict = deepcopy(model.state_dict())
        torch.save({
            'model_state_dict': state_dict,
            'epoch': epoch,
            'acc': accu_val
        }, model_file_path)
        logger.debug(f'model checkpoints saved to {model_file_path}')


if __name__ == '__main__':
    train_model(config=cfg)
