# 导入自定义的工具类函数
import os

import torch
from sklearn.metrics import accuracy_score, f1_score
from torch import nn
from tqdm import tqdm
from transformers import AdamW

from __00__config import Config
from __01__dataloader_utils import get_dataloader
from __02__bert_classifer_model import BertClassifierModel
from __03__model2dev_utils import model2dev
# 忽略的警告信息
import warnings

warnings.filterwarnings("ignore")
config = Config()
# print(config.model_save_path)


def train_model():
	# 获取训练数据
	train_dataloader = get_dataloader(config.train_datapath)
	test_dataloader = get_dataloader(config.test_datapath)

	# 构建模型，损失函数、优化器
	model = BertClassifierModel().to(config.device)
	criterion = nn.CrossEntropyLoss()
	optimizer = AdamW(
		params=model.parameters(),
		lr=config.learning_rate
	)

	# 开始训练模型
	# 记录最优的f1分数
	f1_best = 0
	# 外层遍历轮次
	for epoch in range(config.num_epoch):
		# 设置模型为训练模式
		model.train()
		total_loss = 0
		train_predict, train_label = [], []
		# 内层遍历批次
		for i, batch in enumerate(tqdm(train_dataloader, desc='模型训练中...'), start=1):
			# 拆包
			input_ids, attention_mask, label = batch
			# 将数据挂载到设备上
			input_ids = input_ids.to(config.device)
			attention_mask = attention_mask.to(config.device)
			label = label.to(config.device)
			# 前向传播
			logits = model(input_ids, attention_mask)
			# 计算损失
			loss = criterion(logits, label)
			total_loss += loss.item()
			# 获取预测结果
			y_predict_list = torch.argmax(logits, dim=-1)
			# 存储预测值和真实标签
			train_predict.extend(y_predict_list.cpu().tolist())
			train_label.extend(label.cpu().tolist())
			# 梯度清零
			optimizer.zero_grad()
			# 反向传播
			loss.backward()
			# 参数更新
			optimizer.step()

			# 每100个批次或者每轮次结束打印一次训练日志
			if i % 100 == 0 or i == len(train_dataloader):
				# 计算准确率和f1值
				accuracy = accuracy_score(train_label, train_predict)
				f1 = f1_score(train_label, train_predict, average='macro')
				# 获取batch_count，并计算平均损失
				batch_count = i % 10 + 1
				avg_loss = total_loss / batch_count
				# 打印训练信息
				print(f"\n轮次: {epoch + 1}, 批次: {i}, 损失: {avg_loss:.4f}, acc准确率:{accuracy:.4f}, f1分数:{f1:.4f}")
				# 轮次: 1, 批次: 10, 损失: 2.0761, acc准确率:0.3406, f1分数:0.3434
				# 清空累计损失和预测和真实标签
				total_loss = 0.0
				train_predict, train_label = [], []

			# 本次不是所有轮次都训练完后,保存最终模型
			# torch.save(model.state_dict(), conf.model_save_path)
			# 本次采用边训练边验证,实时保存最优模型,直到训练完为止
			# 每100个批次或一个轮次结束，计算验证集指标，打印，保存模型
			# 寻找最佳f1分数值保存
			# 每轮评估一次模型并保存
			if i == len(train_dataloader):
				report, f1, accuracy, precision, recall = model2dev(model, test_dataloader, config.device)
				print("验证集评估报告：\n", report)
				print(
					f"验证集的f1: {f1:.4f}, accuracy:{accuracy:.4f}, precision:{precision:.4f}, recall:{recall:.4f}")
				# 再将模型变为训练模型
				model.train()
				if f1 > f1_best:
					# 变更最佳f1分数
					f1_best = f1
					# 保存模型
					torch.save(model.state_dict(), config.model_save_path)
					print("保存模型成功, 当前f1分数:", f1_best)


if __name__ == '__main__':
	# print(os.getcwd())
	train_model()
