import torch
from transformers import AutoTokenizer, AutoModelForSequenceClassification
import pandas as pd
from sklearn.model_selection import train_test_split
import os
os.environ["CUDA_VISIBLE_DEVICES"] = "0,1" 

# 设置模型路径和加载
# model_name = "/home/ZJQ/.cache/modelscope/hub/models/mota0user/roberta_rl_llama_model"

# model_name = "/home/ZJQ/.cache/modelscope/hub/models/iic/nlp_bert_document-segmentation_english-base"

model_name = "/home/ZJQ/.cache/modelscope/hub/models/iic/nlp_bert_sentiment-analysis_english-base"



# 加载分词器和模型
tokenizer = AutoTokenizer.from_pretrained(model_name, trust_remote_code=True)
model = AutoModelForSequenceClassification.from_pretrained(
    model_name,
    trust_remote_code=True,
    device_map="auto",  # 指定使用第三块GPU
    num_labels=3,       # 二分类任务
    problem_type="single_label_classification"
)

# 加载数据集
def load_excel_dataset(file_path):
    """从XLSX文件中加载情感分类数据集"""
    try:
        df = pd.read_excel(file_path)
        print(f"成功加载 {len(df)} 条数据")
        print(f"数据集列名: {list(df.columns)}")
        return df
    except Exception as e:
        print(f"加载XLSX数据失败: {e}")
        return None

dataset_path = "/home/ZJQ/pypro/data/data.xlsx"
dataset = load_excel_dataset(dataset_path)

# 创建情感标签到索引的映射
emotions = dataset['lable'].unique().tolist()
emotion_to_id = {emo: i for i, emo in enumerate(emotions)}
id_to_emotion = {i: emo for i, emo in enumerate(emotions)}

# 为数据添加数字标签列
dataset['label_id'] = dataset['lable'].map(emotion_to_id)

# 划分数据集，仅使用测试集
_, test_data = train_test_split(dataset, test_size=0.5, random_state=42)

# 初始化提示词
prompt = """ 请判断下面文本是消极还是积极："""

# 测试函数 - 逐条处理数据
def evaluate_model(model, tokenizer, test_data, prompt):
    model.eval()  # 设置为评估模式
    device = next(model.parameters()).device  # 获取模型所在设备
    total_correct = 0
    total_samples = 0
    
    # 为测试数据添加提示词
    test_texts = [f"{prompt}{text}" for text in test_data['text']]
    test_labels = test_data['label_id'].tolist()
    print(test_labels)
    
    # 逐条处理数据
    for text, true_label in zip(test_texts, test_labels):
        # 编码输入
        inputs = tokenizer(text, return_tensors="pt", truncation=True, max_length=512).to(device)
        
        # 模型推理
        with torch.no_grad():
            outputs = model(**inputs)
        
        # 获取预测结果
        logits = outputs.logits
        print(logits)
        prediction = torch.argmax(logits, dim=1).item()

        # print("-"*62)
        # print(text)
        # print(prediction)
        # print(true_label)
        # print("-"*62)

        if prediction == true_label:
            total_correct += 1
        total_samples += 1
    
    # 计算准确率
    accuracy = total_correct / total_samples if total_samples > 0 else 0
    print(total_correct)
    return accuracy

# 执行评估
accuracy = evaluate_model(model, tokenizer, test_data, prompt)
print(f"\n测试集准确率: {accuracy:.4f}")