import torch
from transformers import AutoTokenizer, AutoModelForCausalLM
import pandas as pd
import time
import json
from tqdm import tqdm
import os
import re
# 设置设备
if torch.cuda.device_count() > 1:
    print(f"发现 {torch.cuda.device_count()} 个GPU设备")
    device_map = "auto"
else:
    print(f"仅发现 {torch.cuda.device_count()} 个GPU设备，将使用单卡运行")
    device_map = None

# 加载模型和分词器
# model_name = "/home/ZJQ/.cache/modelscope/hub/models/openai-community/gpt2"
# 可切换其他模型
model_name = "/home/ZJQ/.cache/modelscope/hub/models/Qwen/Qwen2___5-7B-Instruct"
# model_name = "/home/ZJQ/.cache/modelscope/hub/models/Qwen/Qwen2___5-3B-Instruct"
# model_name = "/home/ZJQ/.cache/modelscope/hub/models/Qwen/Qwen2___5-0___5B-Instruct"
# model_name = "/home/ZJQ/.cache/modelscope/hub/models/LLM-Research/Meta-Llama-3___1-8B-Instruct"

tokenizer = AutoTokenizer.from_pretrained(model_name, trust_remote_code=True)
model = AutoModelForCausalLM.from_pretrained(
    model_name,
    trust_remote_code=True,
    device_map=device_map,
    torch_dtype=torch.float16
)

# 确保有pad_token
if tokenizer.pad_token is None:
    tokenizer.pad_token = tokenizer.eos_token

# 打印模型分布
if device_map == "auto":
    print("模型分布情况:")
    for device, param in model.hf_device_map.items():
        print(f"  {device}: {param}")

# 加载JSON选择题数据集
def load_json_dataset(file_path):
    """从JSON文件加载选择题数据集"""
    try:
        with open(file_path, 'r', encoding='utf-8') as f:
            data = json.load(f)
        print(f"成功加载 {len(data)} 条选择题数据")
        
        # 转换为DataFrame并提取关键信息
        df = pd.DataFrame(data)
        df['question_stem'] = df['question'].apply(lambda x: x['stem'])
        df['choices'] = df['question'].apply(lambda x: x['choices'])
        df['answerKey'] = df['answerKey']
        return df
    except Exception as e:
        print(f"加载JSON数据失败: {e}")
        return None

# 批次推理函数（仅测试LLM能力）
def batch_inference(data, batch_size=4, num_samples=None):
    """批量处理选择题，仅使用LLM原生能力"""
    # 动态设置测试样本数
    if num_samples is not None and num_samples < len(data):
        data = data.sample(n=num_samples, random_state=42)
    
    correct_count = 0
    total_count = len(data)
    batches = [data[i:i+batch_size] for i in range(0, len(data), batch_size)]
    
    # 选择题提示模板
    multiple_choice_template = """
Question: {question}
Options:
A: {option_A}
B: {option_B}
C: {option_C}
D: {option_D}
Please choose the correct option (A, B, C, or D), Only output correct answer options.
"""
    
    start_time = time.time()
    
    for batch in tqdm(batches, desc="处理批次"):
        prompts = []
        actual_answers = []
        
        # 准备批次数据
        for _, row in batch.iterrows():
            # 提取问题和选项
            question = row['question_stem']
            choices = row['choices']
            answer_key = row['answerKey']
            
            # 构建选项字典
            option_dict = {choice['label']: choice['text'] for choice in choices}
            
            # 生成提示词
            prompt = multiple_choice_template.format(
                question=question,
                option_A=option_dict.get('A', ''),
                option_B=option_dict.get('B', ''),
                option_C=option_dict.get('C', ''),
                option_D=option_dict.get('D', '')
            )
            prompts.append(prompt)
            actual_answers.append(answer_key)
        
        # 编码输入
        inputs = tokenizer(
            prompts,
            return_tensors="pt",
            padding=True,
            truncation=True,
            max_length=512
        ).to(model.device)
        
        # 生成回答（仅使用LLM原生能力）
        with torch.no_grad():
            outputs = model.generate(
                **inputs,
                do_sample=False,  # 确定性输出
                max_new_tokens=15,  # 仅需输出选项字母
                pad_token_id=tokenizer.pad_token_id
            )
        
        # 解析结果
        for i, (output, actual) in enumerate(zip(outputs, actual_answers)):
            # 解码生成内容（仅取生成部分）
            input_len = len(inputs.input_ids[i])
            generated_text = tokenizer.decode(
                output[input_len:],
                skip_special_tokens=True
            ).strip()
            
            # 提取预测选项（A-D）
            predicted = None
            for c in ['A', 'B', 'C', 'D']:
                if re.search(r'\b' + c + r'\b', generated_text.upper()):
                    predicted = c
                    break
            
            # 统计正确数
            if predicted == actual:
                correct_count += 1
            
            # 打印前2个样本调试
            # if i < 2:
                # print(f"\n问题: {question[:60]}...")
                # print(f"模型输出: {generated_text}")
                # print(f"预测选项: {predicted}")
                # print(f"正确选项: {actual}")
                # print(f"结果: {'正确' if predicted == actual else '错误'}")
    
    end_time = time.time()
    print(f"\n推理完成，耗时: {end_time - start_time:.2f}秒")
    return correct_count, total_count

# 主程序
if __name__ == "__main__":
    # 数据集路径
    json_dataset_path = "/home/ZJQ/pypro/data/dataOpenBook.json"
    dataset = load_json_dataset(json_dataset_path)
    
    if dataset is None or len(dataset) == 0:
        print("没有数据可处理，程序退出")
        exit()
    
    # 执行推理（可设置测试样本数，如num_samples=100）
    correct, total = batch_inference(
        dataset,
        batch_size=4,  # 根据GPU内存调整
        num_samples=None  # 为None时测试全部样本
    )
    
    # 计算准确率
    accuracy = correct / total if total > 0 else 0
    print(f"\nLLM选择题准确率: {accuracy:.4f}")
    print(f"正确数: {correct}, 总数: {total}")
