import numpy as np
import torch
from tqdm import tqdm
from  datasets import load_dataset,Dataset
from torch.utils.data import DataLoader, random_split
from transformers import AutoTokenizer, AutoModelForSequenceClassification, TrainingArguments
from transformers import DataCollatorWithPadding
import wandb

# 配置 WandB API 密钥
wandb.login(key="f8faa7a67ba54735a3e66f118d466640711b323d")
# 检查CUDA可用性
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
dataset=load_dataset("csv",data_files="/kaggle/input/nlp-chnsenticorp-htl-all/ChnSentiCorp_htl_all.csv",split="train")
dataset=dataset.filter(lambda x: x['review'] is not None)
print(len(dataset))
tokenizer = AutoTokenizer.from_pretrained("hfl/rbt3")
def process_function(example):
    token=tokenizer(example['review'],padding="max_length",max_length=128,truncation=True,return_tensors="pt")
    token['labels'] =  torch.tensor(example['label'], dtype=torch.long)
    return token
# 数据集预处理
dataset=dataset.map(process_function, batched=True,remove_columns=dataset.column_names)

print(dataset[:3])

coll=DataCollatorWithPadding(tokenizer=tokenizer)

# 划分一下数据集
datasets=dataset.train_test_split(test_size=0.1)
train_dataset,val_dataset=datasets['train'],datasets['test']
print(len(train_dataset),len(val_dataset))
train_loader=DataLoader(train_dataset,batch_size=32,collate_fn=coll,shuffle=True)
print("加载数据集")
val_loader=DataLoader(val_dataset,batch_size=32,collate_fn=coll,shuffle=False)

print("开始加载模型")
# 创建模型以及优化器
model=AutoModelForSequenceClassification.from_pretrained("hfl/rbt3",num_labels=2)
print("加载优化器")
import  evaluate
acc_metric=evaluate.load("accuracy")
f1_metric=evaluate.load("f1")

def eval_metric(eval_predict):
    preds,labels=eval_predict
    # 如果是np,转换成 tensor
    preds=torch.from_numpy(preds) if isinstance(preds,np.ndarray) else preds
    # labels 也是np,转换成tensor
    labels=torch.from_numpy(labels) if isinstance(labels,np.ndarray) else labels
    preds=preds.to(device)
    labels=labels.to(device)
    preds=torch.argmax(preds,dim=1)
    acc=acc_metric.compute(predictions=preds, references=labels)
    f1=f1_metric.compute(predictions=preds, references=labels)
    acc.update(f1)
    return acc

train_args=TrainingArguments(output_dir="output",
                             per_device_train_batch_size=128,
                             per_device_eval_batch_size=32,
                             num_train_epochs=5,
                             eval_strategy="epoch",
                             save_strategy="epoch",
                             metric_for_best_model="eval_accuracy",
                             learning_rate=2e-5)

from transformers import Trainer
trainer=Trainer(model=model,
                args=train_args,
                train_dataset=train_dataset,
                eval_dataset=val_dataset,
                data_collator=coll,
                compute_metrics=eval_metric)
trainer.train()
print("模型保存成功")
trainer.save_model("./output")

# 开始模型预测
model.eval()

sen="这家酒店服务态度非常好，房间很干净，位置也很好， overall very good"
inputs=tokenizer(sen,max_length=128,padding="max_length",truncation=True,return_tensors="pt")
outputs=model(**inputs)
preds=torch.argmax(outputs.logits,dim=1)
print(preds)
if preds==0:
    print("负面评论")
else:
    print("正面评论")

