# -*- coding: utf-8 -*-
# Program function：
import torch
from sklearn.metrics import accuracy_score, f1_score, precision_score, recall_score, classification_report
from tqdm import tqdm

from config import *
from transformers import BertTokenizer, BertModel, BertConfig, AdamW
from a_data_pro import get_loader
import torch.nn as nn

class MyBertModel(nn.Module):
    def __init__(self):
        super(MyBertModel, self).__init__()
        self.bert_model = bert_model
        self.out = nn.Linear(768,class_num)
    def forward(self,inputs,attention_masks):
        outputs = self.bert_model(inputs,attention_mask=attention_masks)
        outputs = self.out(outputs.pooler_output)
        return outputs

def bert_eval(model,dev_loader):
    model.eval()
    pre_label_list, label_list = [],[]
    with torch.no_grad():
        for i,(inputs,attention_masks,labels) in enumerate(tqdm(dev_loader,desc='验证集')):
            model_result = model(inputs,attention_masks)
            result = torch.argmax(model_result, dim=-1)
            pre_label_list.extend(result.cpu().numpy().tolist())
            label_list.extend(labels.cpu().numpy().tolist())
            acc = accuracy_score(label_list, pre_label_list)
            f1 = f1_score(label_list, pre_label_list, average='macro')
            precision = precision_score(label_list, pre_label_list, average='macro')
            recall = recall_score(label_list, pre_label_list, average='macro')
    return acc, precision, recall, f1


if __name__ == '__main__':
    train_loader,test_loader,dev_loader = get_loader(10000)
    my_bert_model = MyBertModel()
    optimizer = AdamW(my_bert_model.parameters(),lr=lr)
    loss_fn = nn.CrossEntropyLoss()
    best_f1 = 0
    loss_sum = 0
    for epoch in range(epochs):
        for i,(inputs,attention_masks,labels) in enumerate(tqdm(train_loader)):
            my_bert_model.train()
            inputs = inputs.clone().detach().to(device)
            attention_masks = attention_masks.clone().detach().to(device)
            model_result = my_bert_model(inputs,attention_masks)
            # 梯度清零
            optimizer.zero_grad()
            loss = loss_fn(model_result,labels)
            loss.backward()
            optimizer.step()
            loss_sum += loss.item()
            pre_label_list, label_list = [],[]
            pre_result = torch.argmax(model_result, dim=-1)
            pre_label_list.extend(pre_result.cpu().numpy().tolist())
            label_list.extend(labels.cpu().numpy().tolist())
            # 如果预测值和标签一致，则计数加1
            if (i+1) % 10 == 0 or i == len(train_loader)-1:
                # 查看准确率，f1
                acc = accuracy_score(label_list, pre_label_list)
                f1 = f1_score(label_list, pre_label_list, average='macro')
                print("训练第{}轮，第{}个batch，平均loss为{},acc:{},f1:{}".format(epoch+1,i+1,loss_sum/(i%10+1),acc,f1))
                loss_sum = 0
                pre_label_list, label_list = [], []
            if (i+1) % 200 == 0 or i == len(train_loader)-1:
                #验证集测试
                acc,precision,recall,f1 = bert_eval(my_bert_model,dev_loader)
                my_bert_model.train()
                print(f'f1:{f1},best:{best_f1}')
                if f1 > best_f1:
                    torch.save(my_bert_model.state_dict(), bert_model_path)
                    best_f1 = f1
                    print(f"保存模型参数，acc:{acc}，pre:{precision},rec:{recall},f1:{f1}")
