# -*- coding: utf-8 -*-
"""
@project: bert-text-classification

@author: bruce zhang

@file: main.py

@desc:
"""
import torch
from torch.utils.data import TensorDataset, DataLoader
from utils import load_vocab, read_corpus, load_model, save_model, build_dataset, get_time_diff
import fire
from config import Config
from model import Bert_Text_Classfication
import torch.optim as optim
import torch.nn.functional as F
from sklearn import metrics
import time

def train(**kwargs):
    config = Config()
    config.update(**kwargs)
    print('当前设置为:\n', config)
    if config.use_cuda:
        torch.cuda.set_device(config.gpu)
    print('loading corpus')
    vocab = load_vocab(config.vocab)
    label_dic = load_vocab(config.label_file)

    train_data = read_corpus(config.train_file, config.max_length, label_dic, vocab)
    dev_data = read_corpus(config.dev_file, config.max_length, label_dic, vocab)
    test_data = read_corpus(config.test_file, config.max_length, label_dic, vocab)

    train_dataset = build_dataset(train_data)
    train_loader = DataLoader(train_dataset, shuffle=True, batch_size=config.batch_size)

    dev_dataset = build_dataset(dev_data)
    dev_loader = DataLoader(dev_dataset, shuffle=True, batch_size=config.batch_size)

    test_dataset = build_dataset(test_data)
    test_loader = DataLoader(test_dataset, shuffle=True, batch_size=config.batch_size)

    model = Bert_Text_Classfication(config.bert_path, config.hidden_dim, len(label_dic))
    if config.load_model:
        assert config.load_path is not None
        model = load_model(model, name=config.load_path)
    if config.use_cuda:
        model.cuda()
    optimizer = getattr(optim, config.optim)
    optimizer = optimizer(model.parameters(), lr=config.lr, weight_decay=config.weight_decay)
    eval_loss = float('inf')
    step = 0 # 总的训练迭代次数
    last_improved = 0 # 记录上一次更新的时间
    start_time = time.time()
    flag = False
    model.train()
    for epoch in range(config.num_epoch):
        if hasattr(torch.cuda, 'empty_cache'): # 清空cuda缓存
            torch.cuda.empty_cache()
        for i, batch in enumerate(train_loader):
            input_ids, input_mask, labels = batch
            if config.use_cuda:
                input_ids, input_masks, labels = input_ids.cuda(), input_mask.cuda(), labels.cuda()
            outputs = model(input_ids, input_masks)
            model.zero_grad()
            loss = F.cross_entropy(outputs, labels)
            loss.backward()
            optimizer.step()
            if step % 5 == 0:
                print('step: {} |  epoch: {}|  loss: {}'.format(step, epoch, loss.item()))
            if step % 50 == 0: # 每100个step，输出在验证集上的效果。并且验证集表现好时，保存模型
                pred = torch.argmax(outputs.data, 1).cpu().tolist()
                true = labels.data.cpu().tolist()
                train_acc = metrics.accuracy_score(true, pred)
                dev_acc, dev_loss = evaluate(model, dev_loader, config)
                if dev_loss < eval_loss:
                    eval_loss = dev_loss
                    save_model(model, epoch)
                    improve = '*' # * 表示模型有提升
                    last_improved = step
                else:
                    improve = ''
                time_diff = get_time_diff(start_time)
                msg = 'Iter: {0:>6}, Train Loss: {1:>6.2}, Train Acc: {2:>7.2%},' \
                      ' Val Loss: {3:>6.2}, Val Acc: {4:>7.2%}, Time: {5} {6}'
                print(msg.format(step, loss.item(), train_acc, dev_loss, dev_acc, time_diff, improve))
                model.train() # 设置成训练模式
            step += 1
            # 使用早停策略
            if step - last_improved > config.require_improvement:
                print('No optimization for a long time, auto-stopping...')
                flag = True
                break # 跳出batch训练
        if flag: # flag为True，跳出epoch循环。表示结束运行
            break
    test(model, test_loader, config)

def test(model, test_loader, config):
    model.eval()
    start_time = time.time()
    test_acc, test_loss, test_report, test_confusion = evaluate(model, test_loader, config, test=True)
    msg = 'Test Loss: {0:>5.2},  Test Acc: {1:>6.2%}'
    print(msg.format(test_loss, test_acc))
    print("Precision, Recall and F1-Score...")
    print(test_report)
    print("Confusion Matrix...")
    print(test_confusion)
    time_dif = get_time_diff(start_time)
    print("Time usage on testing: ", time_dif)

def evaluate(model, dev_loader, config, test=False):
    # 评估在验证集上的性能
    model.eval()
    eval_loss = 0
    pred = [] # 预测的结果
    true = [] # 真实标签
    with torch.no_grad():
        for i, batch in enumerate(dev_loader):
            input_ids, input_masks, labels = batch
            if config.use_cuda:
                input_ids, input_masks, labels = input_ids.cuda(), input_masks.cuda(), labels.cuda()
            outputs = model(input_ids, input_masks)
            loss = F.cross_entropy(outputs, labels)
            eval_loss += loss.item()
            cur_pred = torch.argmax(outputs.data, 1)
            true.extend(labels.data.cpu().tolist())
            pred.extend(cur_pred.cpu().tolist())
    acc = metrics.accuracy_score(true, pred)
    if test:
        report = metrics.classification_report(true, pred, target_names=config.class_list, digits=4)
        confusion = metrics.confusion_matrix(true, pred)
        return acc, eval_loss / len(dev_loader), report, confusion
    return acc, eval_loss / len(dev_loader)


if __name__ == '__main__':
    fire.Fire()

# python main.py train --use_cuda=True --batch_size=1024
