import torch
from tqdm import tqdm
from  datasets import load_dataset,Dataset
from torch.utils.data import DataLoader, random_split
from transformers import AutoTokenizer, AutoModelForSequenceClassification
from transformers import DataCollatorWithPadding

dataset=load_dataset("csv",data_files="ChnSentiCorp_htl_all.csv",split="train")
dataset=dataset.filter(lambda x: x['review'] is not None)
print(len(dataset))
tokenizer = AutoTokenizer.from_pretrained("hfl/rbt3")
def process_function(example):
    token=tokenizer(example['review'],padding="max_length",max_length=128,truncation=True,return_tensors="pt")
    token['labels'] =  torch.tensor(example['label'], dtype=torch.long)
    return token
# 数据集预处理
dataset=dataset.map(process_function, batched=True,remove_columns=dataset.column_names)

print(dataset[:3])

coll=DataCollatorWithPadding(tokenizer=tokenizer)

# 划分一下数据集
datasets=dataset.train_test_split(test_size=0.1)
train_dataset,val_dataset=datasets['train'],datasets['test']
print(len(train_dataset),len(val_dataset))
train_loader=DataLoader(train_dataset,batch_size=32,collate_fn=coll,shuffle=True)
print("加载数据集")
val_loader=DataLoader(val_dataset,batch_size=32,collate_fn=coll,shuffle=False)

print("开始加载模型")
# 创建模型以及优化器
model=AutoModelForSequenceClassification.from_pretrained("hfl/rbt3",num_labels=2)
print("加载优化器")
optimizer=torch.optim.Adam(model.parameters(),lr=2e-5)
def train(model,optimizer,train_loader,val_loader,epochs=5):
    for epoch in range(epochs):
        model.train()
        for i,inputs in tqdm(enumerate(train_loader)):
            optimizer.zero_grad()
            outputs=model(**inputs)
            outputs.loss.backward()
            optimizer.step()
            if i%10==0:
                print("Epoch:{},Step:{},Loss:{}".format(epoch,i,outputs.loss.item()))
        model.eval()
        correct=0
        for i,inputs in tqdm(enumerate(val_loader)):
            outputs=model(**inputs)
            preds=torch.argmax(outputs.logits,dim=1)
            correct+=(preds==inputs["labels"]).sum().item()
        print("Epoch:{},Accuracy:{}".format(epoch,correct/len(val_dataset)))
# 定义损失函数
criterion=torch.nn.CrossEntropyLoss()

# 开始训练
epochs=5
print("开始训练")
train(model,optimizer,train_loader,val_loader,epochs)

# 开始模型预测
model.eval()

sen="这家酒店服务态度非常好，房间很干净，位置也很好， overall very good"
inputs=tokenizer(sen,max_length=128,padding="max_length",truncation=True,return_tensors="pt")
outputs=model(**inputs)
preds=torch.argmax(outputs.logits,dim=1)
print(preds)
if preds==0:
    print("负面评论")
else:
    print("正面评论")

