import os
import time
from copy import deepcopy
from loguru import logger
from tqdm import tqdm

import torch
from torchtext.data.utils import get_tokenizer

from config.config import cfg
from model.TextSentiment import TextClassificationModel
from utils.data_helper import NewsDataset
from utils.utils import build_vocab, load_dataset, log_init


def train(dataloader):
    model.train()
    total_acc, total_count = 0, 0
    log_interval = 500
    start_time = time.time()

    for (idx, (label, text, offsets)) in tqdm(list(enumerate(dataloader)), ncols=100):
        optimizer.zero_grad()
        predicted_label = model(text, offsets)
        loss = criterion(predicted_label, label)
        loss.backward()
        torch.nn.utils.clip_grad_norm_(model.parameters(), 0.1)
        optimizer.step()
        total_acc += (predicted_label.argmax(1) == label).sum().item()
        total_count += label.size(0)
        if idx % log_interval == 0 and idx > 0:
            elapsed = time.time() - start_time
            logger.debug(
                f'| epoch {epoch:3d} | {idx:5d}/{len(dataloader):5d} batches | accuracy {(total_acc / total_count):8.3f}')
            total_acc, total_count = 0, 0
            start_time = time.time()


def evaluate(dataloader):
    model.eval()
    total_acc, total_count = 0, 0

    with torch.no_grad():
        for idx, (label, text, offsets) in enumerate(dataloader):
            predicted_label = model(text, offsets)
            loss = criterion(predicted_label, label)
            total_acc += (predicted_label.argmax(1) == label).sum().item()
            total_count += label.size(0)
    return total_acc / total_count


if __name__ == '__main__':
    # 初始化日志对象
    log_init('train_model', log_dir=cfg.log_dir)

    # 创建分词器
    tokenizer = get_tokenizer('basic_english')
    # 构建词汇表
    vocab = build_vocab(cfg.dataset_dir, split='train', tokenizer=tokenizer)
    # 实例化文本分类模型
    model = TextClassificationModel(vocab_size=len(vocab),
                                    embed_dim=cfg.embed_dim,
                                    num_class=cfg.num_class).to(cfg.device)
    # 实例化模型损失函数
    criterion = torch.nn.CrossEntropyLoss()
    # 实例化模型优化器
    optimizer = torch.optim.SGD(model.parameters(), lr=cfg.lr)
    # 实例化动态学习率
    scheduler = torch.optim.lr_scheduler.StepLR(optimizer, 1, gamma=0.1)

    # 加载训练数据集
    train_dataset = load_dataset(cfg.dataset_dir, split='train')
    news_dataset = NewsDataset(tokenizer=tokenizer, vocab=vocab, config=cfg)
    train_dataloader, valid_dataloader = news_dataset.train_valid_split(train_dataset)

    total_accu = None
    # 开始循环迭代训练模型
    for epoch in range(1, cfg.epochs + 1):
        epoch_start_time = time.time()
        # 调用模型训练函数
        train(train_dataloader)
        # 完成每个epochs后需要对模型进行评估
        accu_val = evaluate(valid_dataloader)
        if total_accu is not None and total_accu > accu_val:
            scheduler.step()
        else:
            total_accu = accu_val
        logger.debug("-" * 59)
        logger.debug(
            f'end of epoch {epoch:3d} | time: {(time.time() - epoch_start_time):5.2f}s | valid accuracy {accu_val:8.3f}')
        print("-" * 59)
    # 保存模型
    state_dict = deepcopy(model.state_dict())
    model_file_path = os.path.join(cfg.model_save_dir, 'model.pkl')
    torch.save({
        'model_state_dict': state_dict,
        'epoch': epoch,
        'loss': accu_val
    }, model_file_path)
    logger.debug(f'model checkpoints saved to {model_file_path}')
