from datasets import Dataset
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import torch
import torch.nn as nn
import re
from jinja2.optimizer import optimize
from torch.optim.lr_scheduler import ReduceLROnPlateau
#from torch.utils.data import Dataset,DataLoader
from transformers import BertTokenizer,BertForSequenceClassification,AdamW,pipeline
from transformers import AutoTokenizer, AutoModelForSequenceClassification
from sklearn import metrics
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score,classification_report,precision_recall_fscore_support
from sklearn.metrics import roc_auc_score,average_precision_score,roc_curve,auc
from transformers import TrainingArguments,Trainer
from transformers import EarlyStoppingCallback
from wordcloud import WordCloud,STOPWORDS

device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
#print(torch.cuda.is_available())

df=pd.read_excel(r"C:\Users\Yooou\Desktop\dataset\dsfp\senticom.xlsx")
#def clean_text(text):
#    text=re.sub(r'\s+',' ',text)
#    text=re.sub(r'\w','',text)
#    return text.strip()
df['text']=df['Comment'].astype(str)#.apply(clean_text)
#print(df['text'][1])
# 使用 Hugging Face 中文情感分析模型
classifier=pipeline("sentiment-analysis",
                    model="lxyuan/distilbert-base-multilingual-cased-sentiments-student"
                   )

def get_label(text):
    results = classifier(text)
    # 检查返回结果的结构
    if isinstance(results, list) and len(results) > 0:
        if isinstance(results[0], list):
            # 如果是嵌套列表，获取第一个结果
            best_result = max(results[0], key=lambda x: x['score'])
        else:
            # 如果是单层列表，直接获取第一个结果
            best_result = results[0]
    else:
        # 如果是字典直接使用
        best_result = results

    return 1 if best_result['label'] == 'positive' else 0

df['label']=df['text'].apply(get_label)
texts=df['text']
labels=df['label']
#print(df.tail())
print(texts.head())
#print(df[df['label']==1].iloc[:,-2:])
train_texts,test_texts,train_labels,test_labels=train_test_split(
    texts,labels,test_size=0.25,random_state=10,stratify=labels
)
#print(train_texts)
#print(train_texts.shape,test_texts.shape,train_labels.shape,test_labels.shape)
# 直接使用 tokenizer 编码文本
tokenizer=BertTokenizer.from_pretrained(r"D:\final_model")
train_texts_encodings = tokenizer(list(train_texts), padding=True, truncation=True)
enc_train_texts = {}
for key, val in train_texts_encodings.items():
    enc_train_texts[key] = val
# 添加原始文本和其他信息（如果需要）
enc_train_texts['text'] = list(train_texts)
enc_train_texts['labels'] = list(train_labels)
#print(pd.DataFrame(enc_train_texts).iloc[:,0:2])

test_texts_encodings = tokenizer(list(test_texts), padding=True, truncation=True)
enc_test_texts = {}
for key, val in test_texts_encodings.items():
    enc_test_texts[key] = val
enc_test_texts['text'] = list(test_texts)
enc_test_texts['labels'] = list(test_labels)
#print(pd.DataFrame(enc_test_texts))
#print(enc_test_texts.keys())

# 将字典转换为Dataset对象，并确保包含labels字段
train_dataset = Dataset.from_dict(enc_train_texts)
test_dataset = Dataset.from_dict(enc_test_texts)

# 检查标签数量
num_labels = len(set(labels))
#print(f"Number of labels: {num_labels}")

class CustomTrainer(Trainer):
    def __init__(self, *args, **kwargs):
        super().__init__(*args, **kwargs)
        self.custom_scheduler = None

    def create_scheduler(self, num_training_steps: int, optimizer=None):
        if self.args.lr_scheduler_type == "reduce_lr_on_plateau":
            if optimizer is None:
                optimizer = self.optimizer
            self.custom_scheduler = ReduceLROnPlateau(
                optimizer,
                mode='min',#max
                factor=0.5,
                patience=5,
                min_lr=1e-7,
                threshold=0.001,
                threshold_mode='rel'
            )
            # 使用父类的调度器以避免冲突
            super().create_scheduler(num_training_steps, optimizer)
        else:
            super().create_scheduler(num_training_steps, optimizer)
        return self.lr_scheduler

    def evaluation_loop(self, *args, **kwargs):
        eval_result = super().evaluation_loop(*args, **kwargs)
        if (self.custom_scheduler is not None and
                'eval_loss' in eval_result.metrics):
            # 保存当前学习率
            old_lr = self.custom_scheduler.optimizer.param_groups[0]['lr']
            # 更新学习率
            self.custom_scheduler.step(eval_result.metrics['eval_loss'])
            # 检查是否有变化
            new_lr = self.custom_scheduler.optimizer.param_groups[0]['lr']
            if old_lr != new_lr:
                print(f"Learning rate changed from {old_lr} to {new_lr}")
        return eval_result

training_args=TrainingArguments(
    output_dir=r"C:\Users\Yooou\Desktop\dataset",
    do_train=True,
    do_eval=True,
    num_train_epochs=15,
    per_device_train_batch_size=32,
    per_device_eval_batch_size=32,
    learning_rate=1e-5,
    #warmup_steps=100,
    warmup_ratio=0.1,
    weight_decay=0.1,
    logging_strategy='steps',
    logging_steps=100,
    logging_dir=r"C:\Users\Yooou\Desktop\dataset\dsfp\logs",
    eval_strategy='steps',
    eval_steps=100,# 设置评估步数
    save_steps=100,# 设置保存步数为评估步数
    load_best_model_at_end=True,
    save_total_limit=2,
    metric_for_best_model='Accuracy',#'eval_loss'
    greater_is_better=True,#False
    lr_scheduler_type='reduce_lr_on_plateau',
    fp16=torch.cuda.is_available(),
    report_to='tensorboard',
)

early_stopping_callback=EarlyStoppingCallback(
    early_stopping_patience=5,
    early_stopping_threshold=0.01
)

def compute_metrics(pred):
    true_label=pred.label_ids
    pred_proba=torch.nn.functional.softmax(torch.tensor(pred.predictions),dim=-1).numpy()
    pred_label=pred.predictions.argmax(-1)
    acc=accuracy_score(true_label,pred_label)
    pre,rec,f1,_=precision_recall_fscore_support(true_label,pred_label,average='binary')
    if len(np.unique(true_label))>1:
        auc_roc=roc_auc_score(true_label,pred_proba[:,1])
        auc_pr=average_precision_score(true_label,pred_proba[:,1])
    else:
        auc_roc=auc_pr=0
    return {
        'Accuracy':acc,
        'Precision':pre,
        'Recall':rec,
        'F1':f1,
        'AUC_ROC':auc_roc,
        'AUC_PR':auc_pr
    }

trainer=CustomTrainer(model=AutoModelForSequenceClassification.from_pretrained(r"D:\checkpoint-670"),
                      args=training_args,
                      train_dataset=train_dataset,
                      eval_dataset=test_dataset,
                      compute_metrics=compute_metrics,
                      callbacks=[early_stopping_callback]
)

results=trainer.train()
#trainer.save_model(r"C:\Users\Yooou\Desktop\dataset")
#tokenizer.save_pretrained(r"D:\final_model2")
q=[trainer.evaluate(eval_dataset=data) for data in [train_dataset,test_dataset]]
print(pd.DataFrame(q,index=['train','test']).iloc[:,:5])

def plot_roc_curve(true_label, pred_proba):
    auc_roc = roc_auc_score(true_label, pred_proba[:, 1])
    fpr, tpr, thresholds = metrics.roc_curve(true_label, pred_proba[:,1], drop_intermediate=False)
    plt.figure(figsize=(5, 5))
    plt.plot(fpr, tpr,label='AUC=' + str(auc_roc)),
    plt.legend(loc='lower right'),
    plt.ylabel('True positive rate'),
    plt.xlabel('False positive rate'),
    plt.show()
    return auc_roc

model=AutoModelForSequenceClassification.from_pretrained(r"D:\checkpoint-670").to(device)
tokenizer=BertTokenizer.from_pretrained(r"D:\final_model")
optimal_threshold=0.151886
def predict_sentiment(text, threshold=None):
    if threshold is None:
        threshold = optimal_threshold
    inputs = tokenizer(text, return_tensors="pt", padding=True, truncation=True).to(device)
    model_inputs = {
        'input_ids': inputs['input_ids'],
        'attention_mask': inputs['attention_mask']
    }
    with torch.no_grad():
        outputs = model(**model_inputs)
        predictions = torch.nn.functional.softmax(outputs.logits, dim=-1)
        positive_prob=predictions[0][1].item()
        predicted_label=1 if positive_prob>=threshold else 0
        return{
            'label':'positive' if predicted_label==1 else 'negative',
            'probability':positive_prob,
        }
        #predicted_class_id = predictions.argmax().item()
        #return {"label": "positive" if predicted_class_id == 1 else "negative", "score": predictions[0][predicted_class_id].item()}
sample_text=['纯粹烂片','勉强能看','很普通']
for text in sample_text:
    result = predict_sentiment(text)
    print(result)

def find_optimal_threshold(true_label, pred_proba):
    fpr, tpr, thresholds = metrics.roc_curve(true_label, pred_proba[:, 1], drop_intermediate=False)
    youden_j = tpr - fpr
    optimal_idx_youden = np.argmax(youden_j)
    optimal_threshold_youden = thresholds[optimal_idx_youden]
    return optimal_threshold_youden
test_predictions = trainer.predict(test_dataset)
pred_probas=torch.nn.functional.softmax(torch.tensor(test_predictions.predictions),dim=-1).numpy()
true_labels = test_predictions.label_ids
#print(true_labels.shape)
optimal_thresholds = find_optimal_threshold(true_labels,pred_probas)
print("最佳阈值:",optimal_thresholds)

try:
    auc_score = plot_roc_curve(true_labels, pred_probas)
    print(f"AUC Score: {auc_score:.4f}")
except Exception as e:
    print(f"Error plotting ROC curve: {e}")

stopwords=set(STOPWORDS)
import matplotlib.font_manager as fm
font_path=r"C:\Users\Yooou\Downloads\SimHei.ttf"
wordcloud=WordCloud(width=800,height=800,
                    background_color='white',stopwords=stopwords,
                    min_font_size=10,font_path=font_path).generate(' '.join(texts))
plt.rcParams['font.sans-serif']=['SimHei']
plt.rcParams['axes.unicode_minus']=False
plt.figure(figsize=(8,8),facecolor=None)
plt.imshow(wordcloud)
plt.axis('off')
plt.show()