import evaluate
import torch
from datasets import load_dataset
from transformers import AutoTokenizer, AutoModelForSequenceClassification, Trainer, TrainingArguments, DataCollatorWithPadding, pipeline


# 主模块保护，确保只有直接运行此脚本时才执行以下代码
if __name__ == '__main__':
	# 清空CUDA缓存，释放GPU内存
	torch.cuda.empty_cache()
	# 设置CUDA设备为GPU 0
	torch.cuda.set_device(0)

	# 加载数据集：从CSV文件加载中文酒店评论数据
	dataset = load_dataset("csv", data_files = "./ChnSentiCorp_htl_all.csv", split = "train")
	# 过滤掉review字段为None的数据
	dataset = dataset.filter(lambda x: x["review"] is not None)
	print(dataset)
	print("-------------------------------------------1----------------------------------------------")

	# 将数据集划分为训练集和测试集，测试集占10%
	datasets = dataset.train_test_split(test_size = 0.1)
	print(datasets)
	print("-------------------------------------------2----------------------------------------------")

	# 数据集预处理：加载预训练的中文分词器
	tokenizer = AutoTokenizer.from_pretrained("hfl/chinese-macbert-large")


	# 定义数据预处理函数
	def process_function(examples):
		# 对文本进行分词，最大长度64，截断过长文本，填充到最大长度
		tokenized_examples = tokenizer(examples["review"], max_length = 64, truncation = True, padding = "max_length")
		# 添加标签字段
		tokenized_examples["labels"] = examples["label"]
		return tokenized_examples


	# 对训练集和测试集应用预处理函数
	tokenized_datasets = datasets.map(process_function, batched = True, remove_columns = datasets["train"].column_names)
	print(tokenized_datasets)
	print("-------------------------------------------3----------------------------------------------")


	# 创建模型：定义确保模型权重连续性的函数
	def ensure_weights_contiguous(model):
		# 遍历模型的所有参数
		for name, param in model.named_parameters():
			# 如果参数在内存中不连续，则使其连续
			if not param.is_contiguous():
				print(f"Making {name} contiguous.")
				param.data = param.data.contiguous()


	# 加载预训练的序列分类模型
	model = AutoModelForSequenceClassification.from_pretrained("hfl/chinese-macbert-large")
	# 确保模型权重连续性
	ensure_weights_contiguous(model)
	print(model.config)
	print("-------------------------------------------4----------------------------------------------")

	# 创建评估函数：加载准确率和F1分数评估指标
	acc_metric = evaluate.load("./metric_accuracy.py")
	f1_metirc = evaluate.load("./metric_f1.py")


	# 定义评估指标计算函数
	def eval_metric(eval_predict):
		# 获取预测结果和真实标签
		predictions, labels = eval_predict
		# 取最大概率的类别作为预测结果
		predictions = predictions.argmax(axis = -1)
		# 计算准确率
		acc = acc_metric.compute(predictions = predictions, references = labels)
		# 计算F1分数
		f1 = f1_metirc.compute(predictions = predictions, references = labels)
		# 合并两个指标结果
		acc.update(f1)
		return acc


	print(eval_metric)
	print("-------------------------------------------5----------------------------------------------")

	# 创建TrainingArguments：配置训练参数
	train_args = TrainingArguments(
		output_dir = "./checkpoints",  # 模型和日志输出目录
		per_device_train_batch_size = 64,  # 训练时每个设备的批次大小
		per_device_eval_batch_size = 128,  # 评估时每个设备的批次大小
		gradient_accumulation_steps = 4,  # 梯度累积步数
		gradient_checkpointing = True,  # 启用梯度检查点以节省内存
		gradient_checkpointing_kwargs = {"use_reentrant": False},  # 推荐设置
		optim = "adafactor",  # 使用Adafactor优化器
		save_steps = 1000,  # 每1000步保存一次模型
		fp16 = True,  # 使用16位浮点数训练
		logging_steps = 100,  # 每100步记录一次日志
		num_train_epochs = 3,  # 训练3个epoch
		eval_strategy = "epoch",  # 每个epoch结束后进行评估
		save_strategy = "epoch",  # 每个epoch结束后保存模型
		save_total_limit = 3,  # 最多保存3个检查点
		learning_rate = 2e-5,  # 学习率
		weight_decay = 0.01,  # 权重衰减系数
		metric_for_best_model = "f1",  # 以F1分数作为最佳模型选择标准
		load_best_model_at_end = True,  # 训练结束后加载最佳模型
	)
	# # 优化使更稳定
	# train_args = TrainingArguments(
	# 	output_dir = "./checkpoints",
	# 	per_device_train_batch_size = 64,
	# 	per_device_eval_batch_size = 128,
	# 	gradient_accumulation_steps = 4,
	# 	gradient_checkpointing = True,
	# 	gradient_checkpointing_kwargs = {"use_reentrant": False},
	# 	optim = "adamw_torch",  # 使用更稳定的优化器
	# 	save_steps = 500,
	# 	fp16 = True,
	# 	logging_steps = 50,
	# 	num_train_epochs = 2,
	# 	eval_strategy = "steps",
	# 	eval_steps = 200,  # 更频繁评估
	# 	save_strategy = "steps",
	# 	save_total_limit = 3,
	# 	learning_rate = 1e-5,  # 更小的学习率
	# 	weight_decay = 0.01,
	# 	metric_for_best_model = "f1",
	# 	load_best_model_at_end = True,
	# 	warmup_steps = 100,  # 添加预热步骤
	# )

	print(train_args)
	print("-------------------------------------------6----------------------------------------------")

	# 创建Trainer：冻结BERT模型参数（只训练分类头）
	for name, param in model.bert.named_parameters():
		param.requires_grad = False

	# 创建训练器实例
	trainer = Trainer(
		model = model,
		args = train_args,
		tokenizer = tokenizer,
		train_dataset = tokenized_datasets["train"],
		eval_dataset = tokenized_datasets["test"],
		data_collator = DataCollatorWithPadding(tokenizer = tokenizer),
		compute_metrics = eval_metric
	)
	print(trainer)
	print("-------------------------------------------7----------------------------------------------")

	# 模型训练
	trainer.train()
	print("-------------------------------------------8----------------------------------------------")

	# 模型预测：定义测试句子和标签映射
	sen = "我觉得这家酒店不错，饭很好吃！"
	id2_label = {0: "差评！", 1: "好评！"}

	# 设置模型为评估模式
	model.eval()
	# 使用torch.inference_mode()进行推理
	with torch.inference_mode():
		# 对输入句子进行分词
		inputs = tokenizer(sen, return_tensors = "pt")
		# 将输入数据移到GPU
		inputs = {k: v.cuda() for k, v in inputs.items()}
		# 获取模型预测的logits
		logits = model(**inputs).logits
		# 取最大概率的类别作为预测结果
		pred = torch.argmax(logits, dim = -1)
		# 打印预测结果
		print(f"输入：{sen}\n模型预测结果:{id2_label.get(pred.item())}")

	# 使用pipeline进行预测
	model.config.id2label = id2_label
	pipe = pipeline("text-classification", model = model, tokenizer = tokenizer, device = 0)
	print(pipe(sen))
	print("-------------------------------------------9----------------------------------------------")