import torch
import torch.nn as nn
from matplotlib.pyplot import margins
from torch.optim import AdamW
from bert_config_zc import Bert_Config_ZC
from bert_dataloader_zc import create_dataloader
from pediatric_model_zc import PediatricClassifier
from sklearn.metrics import accuracy_score, precision_score, recall_score, f1_score, classification_report
from tqdm import tqdm

conf = Bert_Config_ZC()

def model2train():
    # 准备数据
    train_dataloader, dev_dataloader, test_dataloader = create_dataloader()
    # 准备模型
    model = PediatricClassifier().to(conf.device)
    # 准备损失函数
    loss_fn = nn.CrossEntropyLoss()
    # 准备优化器
    optimizer = AdamW(model.parameters(), lr=conf.lr)
    best_f1 = 0.0
    margin_print = 20
    margin_dev = 80
    len_train = len(train_dataloader)
    # 外层循环，轮次
    for epoch in range(conf.epochs):
        model.train()
        # 总损失
        total_loss = 0.0
        train_preds, train_labels = [], []
        # 内层循环，批次
        for idx, (input_ids, attention_mask, labels) in enumerate(tqdm(train_dataloader, desc='正在训练中，请耐心等候...')):
            input_ids = input_ids.to(conf.device)
            attention_mask = attention_mask.to(conf.device)
            labels = labels.to(conf.device)

            # 获取分数
            logits = model(input_ids, attention_mask)
            # 计算损失
            loss = loss_fn(logits, labels)
            # 累计损失
            total_loss += loss.item()
            # 计算预测值
            preds = torch.argmax(logits, dim=-1)
            train_preds.extend(preds.cpu().tolist())
            train_labels.extend(labels.cpu().tolist())

            # 反向传播
            # 梯度清零
            optimizer.zero_grad()
            # 反向传播，计算梯度相关参数
            loss.backward()
            # 参数更新
            optimizer.step()

            if (idx + 1) % margin_print == 0 or idx == (len_train - 1):
                acc = accuracy_score(train_labels, train_preds)
                f1 = f1_score(train_labels, train_preds, average=conf.average)
                avg_loss = total_loss / (idx % 50 + 1)
                print(f"\n轮次: {epoch + 1}, 批次: {idx + 1}, 损失: {avg_loss:.4f}, acc准确率:{acc:.4f}, f1分数:{f1:.4f}")
                total_loss = 0.0
                train_preds, train_labels = [], []

                if (idx + 1) % margin_dev == 0 or idx == (len_train - 1):
                    rt_report, rt_acc, rt_precision, rt_recall, rt_f1 = model2dev(model, dev_dataloader)
                    report_str = f'验证集报告：{rt_report}'
                    print(report_str)
                    others_str = f'accuracy：{rt_acc}, precision：{rt_precision}, f1：{rt_f1}, recall：{rt_recall}'
                    model.train()
                    print(others_str)
                    if rt_f1 > best_f1:
                        best_f1 = rt_f1
                        torch.save(model.state_dict(), conf.pediatric_model_save_path)
                        with open(conf.model_save_score_path, 'w', encoding='utf-8') as fw:
                            fw.write(report_str)
                            fw.write(others_str)


def model2dev(model, data_loader):
    model.eval()
    all_preds, all_labels = [], []
    with torch.no_grad():
        for idx, (input_ids, attention_mask, labels) in enumerate(tqdm(data_loader, desc='正在验证中，请耐心等候...')):
            input_ids = input_ids.to(conf.device)
            attention_mask = attention_mask.to(conf.device)
            labels = labels.to(conf.device)

            logits = model(input_ids, attention_mask)
            preds = torch.argmax(logits, dim=-1)
            all_preds.extend(preds.cpu().tolist())
            all_labels.extend(labels.cpu().tolist())
    report = classification_report(all_labels, all_preds)
    acc = accuracy_score(all_labels, all_preds)
    precision = precision_score(all_labels, all_preds, average=conf.average)
    recall = recall_score(all_labels, all_preds, average=conf.average)
    f1 = f1_score(all_labels, all_preds, average=conf.average)

    return report, acc, precision, recall, f1

if __name__ == '__main__':
    model2train()


