import torch
from transformers import AutoTokenizer, AutoModelForCausalLM
import pandas as pd
import time
from tqdm import tqdm
import os
# os.environ["CUDA_VISIBLE_DEVICES"] = "1" 
# os.environ["CUDA_VISIBLE_DEVICES"] = "3,4" 

# 设置设备
if torch.cuda.device_count() > 1:
    print(f"发现 {torch.cuda.device_count()} 个GPU设备")
    device_map = "auto"
else:
    print(f"仅发现 {torch.cuda.device_count()} 个GPU设备，将使用单卡运行")
    device_map = None

# 加载模型和分词器
# model_name = "/home/ZJQ/.cache/modelscope/hub/models/Qwen/Qwen2___5-0___5B-Instruct"

model_name = "/home/ZJQ/.cache/modelscope/hub/models/Qwen/Qwen2___5-3B-Instruct"

# model_name = "/home/ZJQ/.cache/modelscope/hub/models/Qwen/Qwen2___5-7B-Instruct"

# model_name = "/home/ZJQ/.cache/modelscope/hub/models/Qwen/Qwen2___5-14B-Instruct"

# model_name = "/home/ZJQ/.cache/modelscope/hub/models/deepseek-ai/DeepSeek-R1-Distill-Qwen-32B"

tokenizer = AutoTokenizer.from_pretrained(model_name, trust_remote_code=True)
model = AutoModelForCausalLM.from_pretrained(
    model_name,
    trust_remote_code=True,
    device_map=device_map,  # 启用多GPU自动分配
    do_sample=False,
    torch_dtype=torch.float16  # 使用半精度减少显存占用
)

# 打印模型在各GPU上的分布情况
if device_map == "auto":
    print("模型分布情况:")
    for device, param in model.hf_device_map.items():
        print(f"  {device}: {param}")

# 读取XLSX数据集（新增/修改部分）
def load_excel_dataset(file_path):
    """从XLSX文件中加载情感分类数据集"""
    try:
        # 尝试读取XLSX文件（支持.xlsx和.xls格式）
        df = pd.read_excel(file_path)
        print(f"成功加载 {len(df)} 条数据")
        print(f"数据集列名: {list(df.columns)}")
        return df
    except Exception as e:
        print(f"加载XLSX数据失败: {e}")
        return None

# 数据集路径（修改为XLSX文件路径）
dataset_path = "/home/ZJQ/pypro/data/data.xlsx"
dataset = load_excel_dataset(dataset_path)

if dataset is None or len(dataset) == 0:
    print("没有数据可处理，程序退出")
    exit()

# 创建批次处理函数（与原代码一致）
def batch_inference(data, batch_size=1000):
    """批量进行推理以提高效率"""
    correct_count = 0
    total_count = len(data)
    batches = [data[i:i+batch_size] for i in range(0, len(data), batch_size)]
    
    start_time = time.time()
    
    for batch in tqdm(batches, desc="处理批次"):
        prompts = []
        actual_labels = []
        
        # 准备批次数据
        for index, row in batch.iterrows():
            text = row['text']
            actual_label = row['lable']
            prompt = f"{text}. Only respond Positive or Negative."
            prompts.append(prompt)
            actual_labels.append(actual_label)
        
        # 编码输入
        inputs = tokenizer(prompts, return_tensors="pt", padding=True)
        inputs = inputs.to(model.device)  # 将输入移至模型所在设备
        
        # 生成回答
        with torch.no_grad():
            outputs = model.generate(
                **inputs,
                do_sample=True,
                max_new_tokens=6,  # 设置最大生成token数量
                pad_token_id=tokenizer.eos_token_id
            )
        
        # 解码生成的文本
        for i, output in enumerate(outputs):
            input_length = len(inputs.input_ids[i])
            generated_tokens = output[input_length:]
            generated_text = tokenizer.decode(generated_tokens, skip_special_tokens=True)
            
            # 打印前几个样本的结果用于调试
            if index < 5:
                print(f"模型输入: {prompts[i]}")
                print(f"模型输出: {generated_text}")
                print(f"数据标签: {actual_labels[i]}")
                print("-"*22)
            
            if actual_labels[i] in generated_text:
                correct_count += 1
    
    end_time = time.time()
    print(f"推理完成，耗时: {end_time - start_time:.2f}秒")
    return correct_count, total_count

# 执行批量推理（可根据GPU内存调整batch_size）
correct_count, total_count = batch_inference(dataset, batch_size=4)  

# 计算准确率
accuracy = correct_count / total_count
print(f"模型预测的准确率为: {accuracy:.4f}")
print(f"正确预测数: {correct_count}, 总样本数: {total_count}")