import torch
from transformers import AutoTokenizer, AutoModelForCausalLM
import pandas as pd

# 设置设备
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
print(f"使用设备: {device}")

# 加载模型和分词器
# model_name = "/home/ZJQ/.cache/modelscope/hub/models/Qwen/Qwen2___5-0___5B-Instruct"
model_name = "/home/ZJQ/.cache/modelscope/hub/models/Qwen/Qwen2___5-3B-Instruct"
# model_name = "/home/ZJQ/.cache/modelscope/hub/models/Qwen/Qwen2___5-7B-Instruct"
# model_name = "/home/ZJQ/.cache/modelscope/hub/models/Qwen/Qwen2___5-14B-Instruct"

# model_name = "/home/ZJQ/.cache/modelscope/hub/models/Qwen/Qwen3-0___6B"
# model_name = "/home/ZJQ/.cache/modelscope/hub/models/Qwen/Qwen3-30B-A3B"
tokenizer = AutoTokenizer.from_pretrained(model_name, trust_remote_code=True)
model = AutoModelForCausalLM.from_pretrained(
    model_name,
    trust_remote_code=True
).cuda()

# 读取CSV数据集
def load_csv_dataset(file_path):
    """从CSV文件中加载情感分类数据集"""
    try:
        # 尝试使用GBK编码（常见于中文Windows系统）
        df = pd.read_csv(file_path, encoding='gbk')
        print(f"成功加载 {len(df)} 条数据")
        print(f"数据集列名: {list(df.columns)}")
        return df
    except UnicodeDecodeError:
        try:
            # 尝试使用utf-8-sig编码（带BOM的UTF-8）
            df = pd.read_csv(file_path, encoding='utf-8-sig')
            print(f"成功加载 {len(df)} 条数据")
            print(f"数据集列名: {list(df.columns)}")
            return df
        except Exception as e:
            print(f"尝试utf-8-sig编码失败: {e}")
            return None
    except Exception as e:
        print(f"加载数据失败: {e}")
        return None

# 数据集路径
dataset_path = "/home/ZJQ/pypro/data/Simplified_Chinese_Multi-Emotion_Dialogue_Dataset.csv"
dataset = load_csv_dataset(dataset_path)

correct_count = 0
total_count = len(dataset)
for index, row in dataset.iterrows():
    text = row['text']
    actual_label = row['label']
    # 构建提示词
    prompt = f"这段文本表达了什么情绪：{text} 仅回复伤心、生气、关心、惊讶、开心、平静、厌恶、疑问中的一种."
    print(f"模型输入: {prompt}")
    # 编码输入
    inputs = tokenizer(prompt, return_tensors="pt").to(device)
    
    # 生成回答
    with torch.no_grad():
        outputs = model.generate(
            **inputs,
            do_sample=False,
            max_new_tokens=3,  # 设置最大生成token数量
            pad_token_id=tokenizer.eos_token_id
        )
    
    # 解码生成的文本
    input_length = len(inputs.input_ids[0])
    generated_tokens = outputs[0][input_length:]
    generated_text = tokenizer.decode(generated_tokens, skip_special_tokens=True)
    print(f"模型输出: {generated_text}")
    print(f"数据标签: {actual_label}")
    print("-"*22)

    if actual_label in generated_text:
        correct_count += 1

accuracy = correct_count / total_count
print(f"模型预测的准确率为: {accuracy}")