import torch
from transformers import AutoTokenizer, AutoModelForCausalLM
import pandas as pd
import time
import numpy as np
from tqdm import tqdm
import os
import re
import json  # 新增：处理JSON文件

# 设置GPU（保持原逻辑）
os.environ["CUDA_VISIBLE_DEVICES"] = "0,1"
if torch.cuda.device_count() > 0:
    print(f"发现 {torch.cuda.device_count()} 个GPU设备")
    device_map = "auto"
else:
    print(f"仅发现 {torch.cuda.device_count()} 个GPU设备，将使用单卡运行")
    device_map = None

# 加载模型和分词器（保持原逻辑）
# model_name = "/home/ZJQ/.cache/modelscope/hub/models/Qwen/Qwen2___5-3B-Instruct"
model_name = "/home/ZJQ/.cache/modelscope/hub/models/Qwen/Qwen2___5-7B-Instruct"
# model_name = "/home/ZJQ/.cache/modelscope/hub/models/Qwen/Qwen2___5-0___5B-Instruct"

# model_name = "/home/ZJQ/.cache/modelscope/hub/models/openai-community/gpt2"

# model_name = "/home/ZJQ/.cache/modelscope/hub/models/LLM-Research/Meta-Llama-3___1-8B-Instruct"

tokenizer = AutoTokenizer.from_pretrained(model_name, trust_remote_code=True)
model = AutoModelForCausalLM.from_pretrained(
    model_name,
    device_map=device_map,
    torch_dtype=torch.float16,
    trust_remote_code=True
)
if tokenizer.pad_token is None:
    tokenizer.pad_token = tokenizer.eos_token
if device_map == "auto":
    print("模型分布情况:")
    for device, param in model.hf_device_map.items():
        print(f"  {device}: {param}")

# 表征工程：FactAttentionAnalyzer（保持原代码）
class FactAttentionAnalyzer:
    def __init__(self, model, tokenizer, layer_indices=[1, 2, 3]):
        self.model = model
        self.tokenizer = tokenizer
        self.layer_indices = layer_indices
        self.original_state_dict = None

    def _save_model_state(self):
        self.original_state_dict = {k: v.clone() for k, v in self.model.state_dict().items()}

    def _restore_model_state(self):
        if self.original_state_dict is not None:
            self.model.load_state_dict(self.original_state_dict)
            self.original_state_dict = None

    def _get_representation(self, prompt, layer_idx, token_pos=-1):
        inputs = self.tokenizer(prompt, return_tensors="pt", padding=True, truncation=True).to(self.model.device)
        with torch.no_grad():
            outputs = self.model(**inputs, output_hidden_states=True)
            hidden_states = outputs.hidden_states[layer_idx]
            if token_pos == -1:
                representation = hidden_states[:, -1, :]
            else:
                representation = hidden_states[:, token_pos, :]
        return representation.cpu().numpy()

    def get_large_vectors_by_threshold_1d(self, contrast_vectors, threshold=1):
        print(f"输入形状: {contrast_vectors.shape}")
        n_vectors, feature_dim = contrast_vectors.shape
        result_vector = np.zeros(feature_dim)
        for dim in range(feature_dim):
            dim_values = contrast_vectors[:, dim]
            valid_values = dim_values[(dim_values > threshold) | (dim_values < -threshold)]
            result_vector[dim] = np.sum(valid_values)
        print(f"输出向量形状: {result_vector.shape}")
        return result_vector
    
    def extract_fact_attention_vectors(self, fact_prompts, non_fact_prompts, n_components=1):
        self._save_model_state()
        try:
            layer_contrast_vectors = {layer: [] for layer in self.layer_indices}
            for prompt in fact_prompts:
                for layer in self.layer_indices:
                    repr = self._get_representation(prompt, layer)
                    layer_contrast_vectors[layer].append(("fact", repr[0]))
            for prompt in non_fact_prompts:
                for layer in self.layer_indices:
                    repr = self._get_representation(prompt, layer)
                    layer_contrast_vectors[layer].append(("non_fact", repr[0]))

            layer_attention_vectors = {}
            layer_attention_vector = {}
            layer_attention_mean = {}
            ll = {}
            for layer in self.layer_indices:
                fact_reprs = [v for t, v in layer_contrast_vectors[layer] if t == "fact"]
                non_fact_reprs = [v for t, v in layer_contrast_vectors[layer] if t == "non_fact"]
                contrast_vectors = [f - n for f, n in zip(fact_reprs, non_fact_reprs)]
                contrast_vectors = np.vstack(contrast_vectors)
                layer_attention_mean[layer] = np.mean(contrast_vectors, axis=0)
                layer_attention_vectors[layer] = self.get_large_vectors_by_threshold_1d(contrast_vectors, threshold=1)  
                from sklearn.decomposition import PCA
                pca = PCA(n_components=n_components)
                pca.fit(contrast_vectors)
                layer_attention_vector[layer] = pca.components_[0]
                ll[layer] = layer_attention_vectors[layer] + layer_attention_vector[layer]
            return layer_attention_vector
        finally:
            self._restore_model_state()

# 表征工程：FactAttentionController（保持原代码）
class FactAttentionController:
    def __init__(self, model, tokenizer):
        self.model = model
        self.tokenizer = tokenizer
        self.original_state_dict = None

    def _save_model_state(self):
        self.original_state_dict = {k: v.clone() for k, v in self.model.state_dict().items()}

    def _restore_model_state(self):
        if self.original_state_dict is not None:
            self.model.load_state_dict(self.original_state_dict)
            self.original_state_dict = None

    def enhance_fact_attention(self, input_ids, layer_attention_vectors, coefficient=0.7):
        self._save_model_state()
        try:
            with torch.no_grad():
                outputs = self.model(
                    input_ids=input_ids,
                    output_hidden_states=True,
                    use_cache=True
                )
                hidden_states = outputs.hidden_states
                embeddings = self.model.get_input_embeddings()
                inputs_embeds = embeddings(input_ids)
                for layer, vector in layer_attention_vectors.items():
                    current_hidden = hidden_states[layer]
                    vector_tensor = torch.tensor(
                        vector,
                        dtype=current_hidden.dtype,
                        device=current_hidden.device
                    ).unsqueeze(0).unsqueeze(0)
                    enhanced_hidden = current_hidden
                    inputs_embeds[:, -1, :] = enhanced_hidden[:, -1, :]
                outputs = self.model.generate(
                    inputs_embeds=inputs_embeds,
                    max_new_tokens=6
                )
                return outputs
        finally:
            self._restore_model_state()

# 加载JSON选择题数据集（替换原Excel加载）
def load_json_dataset(file_path):
    try:
        with open(file_path, 'r', encoding='utf-8') as f:
            data = json.load(f)
        print(f"成功加载 {len(data)} 条选择题数据")
        df = pd.DataFrame(data)
        # 提取问题主干、选项、正确答案
        df['question_stem'] = df['question'].apply(lambda x: x['stem'])
        df['choices'] = df['question'].apply(lambda x: x['choices'])
        df['answerKey'] = df['answerKey']
        return df
    except Exception as e:
        print(f"加载JSON数据失败: {e}")
        return None

# 批量推理函数（修改为选择题任务）
def batch_inference(data, batch_size=5, use_representation=True, coefficient=0.1, num_samples=None):
    """批量选择题推理（支持表征增强）"""
    if num_samples is not None:
        data = data.sample(n=num_samples, random_state=42)  # 动态设置运行条目数
    correct_count = 0
    total_count = len(data)
    batches = [data[i:i + batch_size] for i in range(0, len(data), batch_size)]

    # 初始化表征增强组件
    layer_attention_vectors = None
    controller = None
    if use_representation:
        # 定义与“事实/正确答案”相关的提示词（优化为选择题场景）
        fact_prompts = [
            "correct answer",
            "factual statement",
            "true option"
        ]
        non_fact_prompts = [
            "wrong answer",
            "false statement",
            "incorrect option"
        ]
        print("正在提取事实关注向量...")
        analyzer = FactAttentionAnalyzer(model, tokenizer, layer_indices=[1, 2, 3])
        layer_attention_vectors = analyzer.extract_fact_attention_vectors(fact_prompts, non_fact_prompts)
        print(f"已提取事实关注向量的层: {list(layer_attention_vectors.keys())}")
        controller = FactAttentionController(model, tokenizer)

    # 选择题Prompt模板
    multiple_choice_template = """
Question: {question}
Options:
A: {option_A}
B: {option_B}
C: {option_C}
D: {option_D}
Please choose the correct option (A, B, C, or D), Only output correct answer options.
"""

    start_time = time.time()
    for batch in tqdm(batches, desc="处理批次"):
        prompts = []
        actual_answers = []

        # 准备每批数据的Prompt和正确答案
        for _, row in batch.iterrows():
            question = row['question_stem']
            choices = row['choices']
            answer_key = row['answerKey']
            # 提取A-D选项文本
            option_dict = {choice['label']: choice['text'] for choice in choices}
            option_A = option_dict.get('A', '')
            option_B = option_dict.get('B', '')
            option_C = option_dict.get('C', '')
            option_D = option_dict.get('D', '')
            # 生成Prompt
            prompt = multiple_choice_template.format(
                question=question, 
                option_A=option_A, 
                option_B=option_B, 
                option_C=option_C, 
                option_D=option_D
            )
            prompts.append(prompt)
            actual_answers.append(answer_key)

        # 编码输入
        inputs = tokenizer(
            prompts, 
            return_tensors="pt", 
            padding=True, 
            truncation=True,
            max_length=512
        )
        input_ids = inputs.input_ids.to(model.device)
        attention_mask = inputs.attention_mask.to(model.device)

        # 生成回答（带/不带表征增强）
        outputs = []
        with torch.no_grad():
            if use_representation and layer_attention_vectors is not None and controller is not None:
                outputs = controller.enhance_fact_attention(
                    input_ids, 
                    layer_attention_vectors, 
                    coefficient=coefficient
                )
            else:
                outputs = model.generate(
                    input_ids=input_ids,
                    attention_mask=attention_mask,
                    do_sample=False,
                    max_new_tokens=10,  # 足够生成选项
                    pad_token_id=tokenizer.pad_token_id
                )

        # 解析生成结果并计算准确率
        for i, (output, actual_answer) in enumerate(zip(outputs, actual_answers)):
            generated_text = tokenizer.decode(output, skip_special_tokens=True).strip()
            # 提取预测选项（找生成文本中第一个出现的A-D）
            predicted_option = None
            for c in ['A', 'B', 'C', 'D']:
                if re.search(r'\b' + c + r'\b', generated_text.upper()):
                    predicted_option = c
                    break
            # 判断是否正确
            is_correct = predicted_option == actual_answer
            if is_correct:
                correct_count += 1
            # 打印前2个结果用于调试
            if i < 2:
                print(f"\n问题: {prompts[i]}")
                print(f"生成结果: {generated_text}")
                print(f"预测选项: {predicted_option}")
                print(f"实际答案: {actual_answer}")
                print(f"判断: {'正确' if is_correct else '错误'}")

    end_time = time.time()
    print(f"\n推理完成，耗时: {end_time - start_time:.2f}秒")
    return correct_count, total_count

# 主程序
if __name__ == "__main__":
    # 加载JSON数据集（替换为你的JSON文件路径）
    json_dataset_path = "/home/ZJQ/pypro/data/dataOpenBook.json"  
    dataset = load_json_dataset(json_dataset_path)
    
    if dataset is None or len(dataset) == 0:
        print("没有数据可处理，程序退出")
        exit()
    
    # 动态设置运行条目数（示例：只运行前5条）
    num_samples_to_run = None  # 可修改为任意整数，或设为None运行全部
    
    try:
        correct_count, total_count = batch_inference(
            dataset,
            batch_size=1,        # 根据GPU内存调整
            use_representation=True,  # 是否启用表征增强
            coefficient=0.1,     # 表征增强系数
            num_samples=num_samples_to_run  # 动态控制运行条目数
        )

        # 计算准确率
        accuracy = correct_count / total_count if total_count > 0 else 0
        print(f"\n【选择题测试准确率】: {accuracy:.4f}")
        print(f"正确预测数: {correct_count}, 测试样本数: {total_count}")
    except Exception as e:
        print(f"推理过程中发生错误: {e}")