#! -*- coding: utf-8 -*-
"""
@Create Time: 20240625
@Info: 训练模块
"""
from datetime import datetime
import os.path
import time
import torch
from sklearn.metrics import accuracy_score
from torch import nn
from torch.utils.data import DataLoader
from tqdm import tqdm
from transformers import BertTokenizer, AdamW, get_cosine_schedule_with_warmup
import warnings
from process import ClassifyDataSet, collate

from model import ClassifyModel, ClassifyBertLSTM
import arguments as args

warnings.filterwarnings('ignore')


def get_parameter_number(model):
    #  打印模型参数量
    total_num = sum(p.numel() for p in model.parameters())
    trainable_num = sum(p.numel() for p in model.parameters() if p.requires_grad)
    return 'Total parameters: {}, Trainable parameters: {}'.format(total_num, trainable_num)


def evaluate(model, data_loader, device):
    model.eval()
    val_true, val_pred = [], []
    with torch.no_grad():
        for batch in data_loader:
            inputs = {'input_ids': batch['input_ids'].to(device),
                      'token_type_ids': batch['token_type_ids'].to(device),
                      'attention_mask': batch['attention_mask'].to(device)}
            y_true = batch['label']
            y_pred = model(**inputs)
            y_pred = torch.argmax(y_pred, dim=1).detach().cpu().numpy().tolist()
            val_pred.extend(y_pred)
            val_true.extend(y_true.cpu().numpy().tolist())

    return accuracy_score(val_true, val_pred)  # 返回accuracy


# 测试集没有标签，需要预测提交
def predict(model, data_loader, device):
    model.eval()
    val_pred = []
    with torch.no_grad():
        for idx, (ids, att, tpe) in tqdm(enumerate(data_loader)):
            y_pred = model(ids.to(device), att.to(device), tpe.to(device))
            y_pred = torch.argmax(y_pred, dim=1).detach().cpu().numpy().tolist()
            val_pred.extend(y_pred)
    return val_pred


def train_and_eval(model, train_loader, valid_loader, optimizer, scheduler, device, epoch, config):
    best_acc = 0.0
    patience = 0
    criterion = nn.CrossEntropyLoss()
    for i in range(epoch):
        """训练模型"""
        start = time.time()
        model.train()
        print("***** Running training epoch {} *****".format(i + 1))
        train_loss_sum = 0.0
        for batch in train_loader:
            inputs = {'input_ids': batch['input_ids'].to(device),
                      'token_type_ids': batch['token_type_ids'].to(device),
                      'attention_mask': batch['attention_mask'].to(device)}
            y_true = batch['label'].to(device)
            # ids, att, tpe, y = ids.to(device), att.to(device), tpe.to(device), y.to(device)
            y_pred = model(**inputs)
            loss = criterion(y_pred, y_true)
            optimizer.zero_grad()
            loss.backward()
            optimizer.step()
            scheduler.step()  # 学习率变化

            train_loss_sum += loss.item()
            # if (idx + 1) % (len(train_loader) // 5) == 0:  # 只打印五次结果
            #   print("Epoch {:04d} | Step {:04d}/{:04d} | Loss {:.4f} | Time {:.4f}".format(
            #     i + 1, idx + 1, len(train_loader), train_loss_sum / (idx + 1), time.time() - start))
            # print("Learning rate = {}".format(optimizer.state_dict()['param_groups'][0]['lr']))

        """验证模型"""
        model.eval()
        acc = evaluate(model, valid_loader, device)  # 验证模型的性能
        ## 保存最优模型
        if acc > best_acc and i + 1 >= config['save_epoch']:
            best_acc = acc
            model_save_path = config['model_save_path']
            if not os.path.exists(model_save_path):
                os.mkdir(model_save_path)
            print('模型保存在：', model_save_path)
            torch.save(model.state_dict(), model_save_path + '/pytorch_model.bin')

        print("current acc is {:.4f}, best acc is {:.4f}".format(acc, best_acc))
        print("time costed = {}s \n".format(round(time.time() - start, 5)))


def train(config):
    DEVICE = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    EPOCHS = 1
    tokenizer = BertTokenizer.from_pretrained(config['pretrain_model_path'])
    config['class_num'] = len(args.OPERATE_TYPE)
    # model = ClassifyModel(config).to(DEVICE)
    model = ClassifyBertLSTM(config).to(DEVICE)
    print(get_parameter_number(model))
    train_data = ClassifyDataSet(file_path=config['train_data_path'],
                                 tokenizer=tokenizer,
                                 classify_type=config['classify_type'],
                                 is_train=True)
    valid_data = ClassifyDataSet(file_path=config['eval_data_path'],
                                 tokenizer=tokenizer,
                                 classify_type=config['classify_type'],
                                 is_train=True)
    train_data_loader = DataLoader(train_data, batch_size=config['batch_size'], shuffle=True, collate_fn=collate)
    valid_data_loader = DataLoader(valid_data, batch_size=config['batch_size'], collate_fn=collate)
    optimizer = AdamW(model.parameters(), lr=2e-5, weight_decay=1e-4)  # AdamW优化器
    scheduler = get_cosine_schedule_with_warmup(optimizer, num_warmup_steps=len(train_data_loader),
                                                num_training_steps=EPOCHS * len(train_data_loader))
    train_and_eval(model, train_data_loader, valid_data_loader, optimizer, scheduler, DEVICE, EPOCHS,
                   config)


if __name__ == '__main__':
    config = {
        'pretrain_model_path': args.bash_path + '/model/chinese-roberta-wwm',
        'train_data_path': './data/all.jsonl',
        'eval_data_path': './data/all_test.jsonl',
        'classify_type': '操作',  # 操作 or 业务二级
        'batch_size': 24,
        'model_save_path': '../model_saved/operate_type/' + datetime.now().strftime("%Y%m%d_%H%M%S"),
        'save_epoch': 1
    }
    train(config)
