import pandas as pd
import torch
from torch.optim import Adam
from torch.utils.data import DataLoader
from torch.utils.data import Dataset
from torch.utils.data import random_split
from transformers import AutoTokenizer, AutoModelForSequenceClassification
torch.cuda.empty_cache()

# 酒店评论
# 加载数据
data = pd.read_csv("./ChnSentiCorp_htl_all.csv")
data = data.dropna()
print(data)
print("------------------------------1--------------------------------------")


# 创建Dataset
class MyDataset(Dataset):
    def __init__(self) -> None:
        super().__init__()
        self.data = pd.read_csv("./ChnSentiCorp_htl_all.csv")
        self.data = self.data.dropna()
        # 预处理数据，避免每次__getitem__重复处理
        self.reviews = self.data["review"].tolist()
        self.labels = self.data["label"].tolist()

    def __getitem__(self, index):
        return self.reviews[index], self.labels[index]

    def __len__(self):
        return len(self.data)



dataset = MyDataset()
for i in range(5):
	print(dataset[i])
print("------------------------------2--------------------------------------")
# 划分数据集
trainset, validset = random_split(dataset, lengths = [0.9, 0.1])
len(trainset), len(validset)
for i in range(10):
	print(trainset[i])
print("------------------------------3--------------------------------------")
# 创建Dataloader
tokenizer = AutoTokenizer.from_pretrained("hfl/rbt3")


def collate_func(batch):
	texts, labels = [], []
	for item in batch:
		texts.append(item[0])
		labels.append(item[1])
	inputs = tokenizer(texts, max_length = 128, padding = "max_length", truncation = True, return_tensors = "pt")
	inputs["labels"] = torch.tensor(labels)
	return inputs


trainloader = DataLoader(trainset, batch_size = 32, shuffle = True, collate_fn = collate_func)
validloader = DataLoader(validset, batch_size = 64, shuffle = False, collate_fn = collate_func)
print(next(enumerate(validloader))[1])
print("------------------------------4--------------------------------------")
# 创建模型及优化器
model = AutoModelForSequenceClassification.from_pretrained("hfl/rbt3", num_labels=2)

if torch.cuda.is_available():
	model = model.cuda()
optimizer = Adam(model.parameters(), lr = 2e-5)
print(optimizer)
print("------------------------------5--------------------------------------")


#训练与验证
def evaluate():
	model.eval()
	acc_num = 0
	with torch.inference_mode():
		for batch in validloader:
			if torch.cuda.is_available():
				batch = {k: v.cuda() for k, v in batch.items()}
			output = model(**batch)
			pred = torch.argmax(output.logits, dim = -1)
			acc_num += (pred.long() == batch["labels"].long()).float().sum()
	return acc_num / len(validset)


def train(epoch = 3, log_step = 20):
	global_step = 0
	for ep in range(epoch):
		model.train()
		total_loss = 0
		num_batches = 0

		for batch in trainloader:
			if torch.cuda.is_available():
				batch = {k: v.cuda() for k, v in batch.items()}

			optimizer.zero_grad()
			output = model(**batch)
			output.loss.backward()
			optimizer.step()

			total_loss += output.loss.item()
			num_batches += 1

			if global_step % log_step == 0:
				avg_loss = total_loss / num_batches
				print(f"ep: {ep}, global_step: {global_step}, loss: {output.loss.item():.4f}, avg_loss: {avg_loss:.4f}")

			global_step += 1

		acc = evaluate()
		print(f"ep: {ep}, acc: {acc:.4f}")


# 模型训练
train()

print("------------------------------6--------------------------------------")
# 模型保存
def save_model(path = "./fine_tuned_model"):
	# 确保模型在CPU上进行保存，避免非连续张量问题
	device = next(model.parameters()).device
	model.cpu()

	# 确保所有参数都是连续的
	for param in model.parameters():
		if not param.is_contiguous():
			param.data = param.data.contiguous()

	# 保存模型和tokenizer
	model.save_pretrained(path)
	tokenizer.save_pretrained(path)
	print(f"模型已保存到 {path}")

	# 恢复原来的设备
	if torch.cuda.is_available() and str(device).startswith('cuda'):
		model.cuda()

save_model()
print("------------------------------7--------------------------------------")

# 模型预测
sen = "我觉得这家酒店不错，饭很好吃！"
id2_label = {0: "差评！", 1: "好评！"}
model.eval()
with torch.inference_mode():
	inputs = tokenizer(sen, return_tensors = "pt")
	inputs = {k: v.cuda() for k, v in inputs.items()}
	logits = model(**inputs).logits
	pred = torch.argmax(logits, dim = -1)
	print(f"输入：{sen}\n模型预测结果:{id2_label.get(pred.item())}")
print("------------------------------7--------------------------------------")