from datasets import Dataset
import pandas as pd
import torch
import torch.nn as nn
#from torch.utils.data import Dataset,DataLoader
from transformers import BertTokenizer,BertForSequenceClassification,AdamW,pipeline
from transformers import AutoTokenizer, AutoModelForSequenceClassification
#from transformers import DistilBertTokenizerFast, DistilBertForSequenceClassification
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score,classification_report,precision_recall_fscore_support
from transformers import TrainingArguments,Trainer

device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
#print(torch.cuda.is_available())

df=pd.read_excel(r"C:\Users\Yooou\Desktop\com.xlsx")
df['text']=df['Comment'].astype(str)
#print(df['text'][1])
# 使用 Hugging Face 中文情感分析模型
#tokenizer = AutoTokenizer.from_pretrained("uer/roberta-base-finetuned-jd-binary-chinese")
#model = AutoModelForSequenceClassification.from_pretrained("uer/roberta-base-finetuned-jd-binary-chinese")
#classifier=pipeline("sentiment-analysis", model="uer/roberta-base-finetuned-jd-binary-chinese")
classifier=pipeline("sentiment-analysis", model="lxyuan/distilbert-base-multilingual-cased-sentiments-student"
                   )


def get_label(text):
    results = classifier(text)
    # 检查返回结果的结构
    if isinstance(results, list) and len(results) > 0:
        if isinstance(results[0], list):
            # 如果是嵌套列表，获取第一个结果
            best_result = max(results[0], key=lambda x: x['score'])
        else:
            # 如果是单层列表，直接获取第一个结果
            best_result = results[0]
    else:
        # 如果是字典直接使用
        best_result = results

    return 1 if best_result['label'] == 'positive' else 0

df['label']=df['text'].apply(get_label)
texts=df['text']
labels=df['label']
#print(df.tail())
#print(texts.head())
#print(df[df['label']==1])
train_texts,test_texts,train_labels,test_labels=train_test_split(
    texts,labels,test_size=0.35,random_state=10
)
tokenizer=BertTokenizer.from_pretrained('bert-base-chinese')
#print(train_texts)
#print(train_texts.shape,test_texts.shape,train_labels.shape,test_labels.shape)
# 直接使用 tokenizer 编码文本
train_texts_encodings = tokenizer(list(train_texts), padding=True, truncation=True)
enc_train_texts = {}
for key, val in train_texts_encodings.items():
    enc_train_texts[key] = val
# 添加原始文本和其他信息（如果需要）
enc_train_texts['text'] = list(train_texts)
#enc_train_texts['labels'] = list(train_labels)
#print(pd.DataFrame(enc_train_texts))#.iloc[:,0:2]
test_texts_encodings = tokenizer(list(test_texts), padding=True, truncation=True)
enc_test_texts = {}
for key, val in test_texts_encodings.items():
    enc_test_texts[key] = val
enc_test_texts['text'] = list(test_texts)
#enc_test_texts['labels'] = list(test_labels)
#print(pd.DataFrame(enc_test_texts))
#print(enc_test_texts.keys())

# 将字典转换为Dataset对象，并确保包含labels字段
enc_train_texts['labels'] = list(train_labels)
enc_test_texts['labels'] = list(test_labels)

train_dataset = Dataset.from_dict(enc_train_texts)
test_dataset = Dataset.from_dict(enc_test_texts)

# 检查标签数量
num_labels = len(set(labels))
#print(f"Number of labels: {num_labels}")

training_args=TrainingArguments(
    output_dir=r"C:\Users\Yooou\Desktop\日常使用数据集",
    do_train=True,
    do_eval=True,
    num_train_epochs=10,
    per_device_train_batch_size=16,
    per_device_eval_batch_size=32,
    learning_rate=2e-5,
    warmup_steps=100,
    weight_decay=0.01,
    logging_strategy='steps',
    logging_steps=200,
    logging_dir=r"C:\Users\Yooou\Desktop\日常使用数据集",
    eval_strategy='steps',
    eval_steps=100,# 设置评估步数
    save_steps=200,# 设置保存步数为评估步数
    load_best_model_at_end=True,
)

def compute_metrics(pred):
    true_label=pred.label_ids
    pred_label=pred.predictions.argmax(-1)
    acc=accuracy_score(true_label,pred_label)
    pre,rec,f1,_=precision_recall_fscore_support(true_label,pred_label,average='weighted')
    return {
        'Accuracy':acc,
        #'Precision':pre,
        #'F1':f1,
        #'Recall':rec
    }

trainer=Trainer(model=AutoModelForSequenceClassification.from_pretrained('lxyuan/distilbert-base-multilingual-cased-sentiments-student'),
                args=training_args,
                train_dataset=train_dataset,
                eval_dataset=test_dataset,
                compute_metrics=compute_metrics
)

results=trainer.train()

q=[trainer.evaluate(eval_dataset=data) for data in [train_dataset,test_dataset]]
print(pd.DataFrame(q,index=['train','test']).iloc[:,:5])

#%reload_ext tensorboard
#%tensorboard --logdir logs
model=AutoModelForSequenceClassification.from_pretrained("lxyuan/distilbert-base-multilingual-cased-sentiments-student").to(device)
def get_prediction(text):
    inputs=tokenizer(text,padding=True,truncation=True,return_tensors='pt').to(device)
    if 'token_type_ids' in inputs:
        del inputs['token_type_ids']
    outputs=model(**inputs)
    prob=outputs.logits.softmax(1)
    return prob,prob.argmax()
print(get_prediction('这电影拍得很差')[1].item())

from transformers import pipeline,DistilBertForSequenceClassification,DistilBertTokenizerFast
model=DistilBertForSequenceClassification.from_pretrained("lxyuan/distilbert-base-multilingual-cased-sentiments-student")
tokenizer=DistilBertTokenizerFast.from_pretrained("lxyuan/distilbert-base-multilingual-cased-sentiments-student")
nlp=pipeline("sentiment-analysis",model=model,tokenizer=tokenizer)
print(nlp('我喜欢成龙在里面的武打表演'))