import torch
from datasets import load_dataset
from torch.optim import Adam
from torch.utils.data import DataLoader
from transformers import AutoTokenizer, AutoModelForSequenceClassification, DataCollatorWithPadding, pipeline
torch.cuda.empty_cache()
torch.cuda.set_device(0)

# 加载数据集
dataset = load_dataset("csv", data_files = "./ChnSentiCorp_htl_all.csv", split = "train")
dataset = dataset.filter(lambda x: x["review"] is not None)
print(dataset)
print("--------------------------------------1---------------------------------------------")
# 划分数据集
datasets = dataset.train_test_split(test_size = 0.1)
# test_size=0.1 表示在进行数据集划分时，将整个数据集的10%作为测试集（test set），
# 剩下的90%作为训练集（training set）。这样的划分是为了在模型训练过程中保留一部分数据用于评估模型的泛化能力。
print(datasets)
print("--------------------------------------2---------------------------------------------")
# 创建Dataloader
tokenizer = AutoTokenizer.from_pretrained("hfl/rbt3")


def process_function(examples):
	tokenized_examples = tokenizer(examples["review"], max_length = 128, truncation = True)
	tokenized_examples["labels"] = examples["label"]
	return tokenized_examples


tokenized_datasets = datasets.map(process_function, batched = True, remove_columns = datasets["train"].column_names)
print(tokenized_datasets)
trainset, validset = tokenized_datasets["train"], tokenized_datasets["test"]
trainloader = DataLoader(trainset, batch_size = 32, shuffle = True, collate_fn = DataCollatorWithPadding(tokenizer))
validloader = DataLoader(validset, batch_size = 64, shuffle = False, collate_fn = DataCollatorWithPadding(tokenizer))
print(next(enumerate(validloader))[1])
print("--------------------------------------3--------------------------------------------")
# 创建模型及优化器
model = AutoModelForSequenceClassification.from_pretrained("hfl/rbt3")
if torch.cuda.is_available():
	model = model.cuda()
optimizer = Adam(model.parameters(), lr = 2e-5)
print(optimizer)
print("--------------------------------------4--------------------------------------------")
# 训练与验证
def evaluate():
	model.eval()
	acc_num = 0
	with torch.inference_mode():
		for batch in validloader:
			if torch.cuda.is_available():
				batch = {k: v.cuda() for k, v in batch.items()}
			output = model(**batch)
			pred = torch.argmax(output.logits, dim = -1)
			acc_num += (pred.long() == batch["labels"].long()).float().sum()
	return acc_num / len(validset)


def train(epoch = 3, log_step = 10):
	global_step = 0
	for ep in range(epoch):
		model.train()
		for batch in trainloader:
			if torch.cuda.is_available():
				batch = {k: v.cuda() for k, v in batch.items()}
			optimizer.zero_grad()
			output = model(**batch)
			output.loss.backward()
			optimizer.step()
			if global_step % log_step == 0:
				print(f"ep: {ep}, global_step: {global_step}, loss: {output.loss.item()}")
			global_step += 1
		acc = evaluate()
		print(f"ep: {ep}, acc: {acc}")


train()
print("--------------------------------------5--------------------------------------------")
# 模型预测
sen = "我觉得这家酒店不错，饭很好吃！"
id2_label = {0: "差评！", 1: "好评！"}
model.eval()
with torch.inference_mode():
	inputs = tokenizer(sen, return_tensors = "pt")
	inputs = {k: v.cuda() for k, v in inputs.items()}
	logits = model(**inputs).logits
	pred = torch.argmax(logits, dim = -1)
	print(f"输入：{sen}\n模型预测结果:{id2_label.get(pred.item())}")
model.config.id2label = id2_label
pipe = pipeline("text-classification", model = model, tokenizer = tokenizer, device = 0)
print(pipe(sen))
print("--------------------------------------6--------------------------------------------")