# 导入相关的包
from transformers import AutoTokenizer,AutoModelForSequenceClassification
import torch
from torch.utils.data import DataLoader
import pandas as pd
from torch.utils.data import Dataset
# 划分验证集和训练集
from torch.utils.data import random_split
from tqdm import tqdm
data=pd.read_csv('ChnSentiCorp_htl_all.csv')
# 打印一下前几行的数据
print(data.head())

# 去除空数据
data=data.dropna()


# 创建dataset


class MyDataset(Dataset):
    def __init__(self):
        super().__init__()
        self.data=pd.read_csv('ChnSentiCorp_htl_all.csv')
        self.data=self.data.dropna()
    def __getitem__(self, item):
        return self.data.iloc[item]["review"],self.data.iloc[item]["label"]


    def __len__(self):
        return len(self.data)

# 测试一下数据集
dataset=MyDataset()
print(dataset[0])

# 划分训练集和验证集
train_dataset,val_dataset=random_split(dataset,[0.8,0.2])
print(len(train_dataset),len(val_dataset))

tokenizer = AutoTokenizer.from_pretrained("hfl/rbt3")
# 创建dataloader
def collect_fn(batch):
    texts=[i[0] for i in batch]
    labels=[i[1] for i in batch]

    inputs=tokenizer(texts,max_length=128,padding="max_length",truncation=True,return_tensors="pt")
    inputs["labels"]=torch.tensor(labels,dtype=torch.long)
    return inputs

# 创建dataloader
train_loader=DataLoader(train_dataset,batch_size=32,shuffle=True,collate_fn=collect_fn)
val_loader=DataLoader(val_dataset,batch_size=64,shuffle=True,collate_fn=collect_fn)


# 检验一下数据
for i,inputs in enumerate(train_loader):
    print(inputs)
    if i>1:
        break

# 创建模型以及优化器
model=AutoModelForSequenceClassification.from_pretrained("hfl/rbt3",num_labels=2)
optimizer=torch.optim.Adam(model.parameters(),lr=2e-5)
def train(model,optimizer,train_loader,val_loader,epochs=5):
    for epoch in range(epochs):
        model.train()
        for i,inputs in tqdm(enumerate(train_loader)):
            optimizer.zero_grad()
            outputs=model(**inputs)
            outputs.loss.backward()
            optimizer.step()
            if i%10==0:
                print("Epoch:{},Step:{},Loss:{}".format(epoch,i,outputs.loss.item()))
        model.eval()
        correct=0
        for i,inputs in tqdm(enumerate(val_loader)):
            outputs=model(**inputs)
            preds=torch.argmax(outputs.logits,dim=1)
            correct+=(preds==inputs["labels"]).sum().item()
        print("Epoch:{},Accuracy:{}".format(epoch,correct/len(val_dataset)))
# 定义损失函数
criterion=torch.nn.CrossEntropyLoss()

# 开始训练
epochs=5
train(model,optimizer,train_loader,val_loader,epochs)

# 开始模型预测
model.eval()

sen="这家酒店服务态度非常好，房间很干净，位置也很好， overall very good"
inputs=tokenizer(sen,max_length=128,padding="max_length",truncation=True,return_tensors="pt")
outputs=model(**inputs)
preds=torch.argmax(outputs.logits,dim=1)
print(preds)
if preds==0:
    print("负面评论")
else:
    print("正面评论")