import torch
from transformers import AutoTokenizer, AutoModelForCausalLM
import pandas as pd
import time
import numpy as np
from tqdm import tqdm
import os
import re
import json

# 设置GPU
os.environ["CUDA_VISIBLE_DEVICES"] = "0,1"
if torch.cuda.device_count() > 0:
    print(f"发现 {torch.cuda.device_count()} 个GPU设备")
    device_map = "auto"
else:
    print(f"仅发现 {torch.cuda.device_count()} 个GPU设备，将使用单卡运行")
    device_map = None

# 表征工程：FactAttentionAnalyzer（优化阈值参数传递）
class FactAttentionAnalyzer:
    def __init__(self, model, tokenizer, layer_indices=[1, 2, 3]):
        self.model = model
        self.tokenizer = tokenizer
        self.layer_indices = layer_indices
        self.original_state_dict = None

    def _save_model_state(self):
        self.original_state_dict = {k: v.clone() for k, v in self.model.state_dict().items()}

    def _restore_model_state(self):
        if self.original_state_dict is not None:
            self.model.load_state_dict(self.original_state_dict)
            self.original_state_dict = None

    def _get_representation(self, prompt, layer_idx, token_pos=-1):
        inputs = self.tokenizer(prompt, return_tensors="pt", padding=True, truncation=True).to(self.model.device)
        with torch.no_grad():
            outputs = self.model(**inputs, output_hidden_states=True)
            hidden_states = outputs.hidden_states[layer_idx]
            if token_pos == -1:
                representation = hidden_states[:, -1, :]
            else:
                representation = hidden_states[:, token_pos, :]
        return representation.cpu().numpy()

    def get_large_vectors_by_threshold_1d(self, contrast_vectors, threshold=1):
        n_vectors, feature_dim = contrast_vectors.shape
        result_vector = np.zeros(feature_dim)
        for dim in range(feature_dim):
            dim_values = contrast_vectors[:, dim]
            valid_values = dim_values[(dim_values > threshold) | (dim_values < -threshold)]
            result_vector[dim] = np.sum(valid_values)
        return result_vector
    
    def extract_fact_attention_vectors(self, fact_prompts, non_fact_prompts, n_components=1, threshold=1):
        self._save_model_state()
        try:
            layer_contrast_vectors = {layer: [] for layer in self.layer_indices}
            for prompt in fact_prompts:
                for layer in self.layer_indices:
                    repr = self._get_representation(prompt, layer)
                    layer_contrast_vectors[layer].append(("fact", repr[0]))
            for prompt in non_fact_prompts:
                for layer in self.layer_indices:
                    repr = self._get_representation(prompt, layer)
                    layer_contrast_vectors[layer].append(("non_fact", repr[0]))

            layer_attention_vectors = {}
            layer_attention_vector = {}
            layer_attention_mean = {}
            ll = {}
            for layer in self.layer_indices:
                fact_reprs = [v for t, v in layer_contrast_vectors[layer] if t == "fact"]
                non_fact_reprs = [v for t, v in layer_contrast_vectors[layer] if t == "non_fact"]
                contrast_vectors = [f - n for f, n in zip(fact_reprs, non_fact_reprs)]
                contrast_vectors = np.vstack(contrast_vectors)
                layer_attention_mean[layer] = np.mean(contrast_vectors, axis=0)
                # 传递动态阈值
                layer_attention_vectors[layer] = self.get_large_vectors_by_threshold_1d(contrast_vectors, threshold=threshold)  
                from sklearn.decomposition import PCA
                pca = PCA(n_components=n_components)
                pca.fit(contrast_vectors)
                layer_attention_vector[layer] = pca.components_[0]
                ll[layer] = layer_attention_vectors[layer] + layer_attention_vector[layer]
            return ll
        finally:
            self._restore_model_state()

# 表征工程：FactAttentionController（修复增强逻辑）
class FactAttentionController:
    def __init__(self, model, tokenizer):
        self.model = model
        self.tokenizer = tokenizer
        self.original_state_dict = None

    def _save_model_state(self):
        self.original_state_dict = {k: v.clone() for k, v in self.model.state_dict().items()}

    def _restore_model_state(self):
        if self.original_state_dict is not None:
            self.model.load_state_dict(self.original_state_dict)
            self.original_state_dict = None

    def enhance_fact_attention(self, input_ids, layer_attention_vectors, coefficient=0.7):
        self._save_model_state()
        try:
            with torch.no_grad():
                outputs = self.model(
                    input_ids=input_ids,
                    output_hidden_states=True,
                    use_cache=True
                )
                hidden_states = outputs.hidden_states
                embeddings = self.model.get_input_embeddings()
                inputs_embeds = embeddings(input_ids)
                for layer, vector in layer_attention_vectors.items():
                    if layer < len(hidden_states):
                        current_hidden = hidden_states[layer]
                        vector_tensor = torch.tensor(
                            vector,
                            dtype=current_hidden.dtype,
                            device=current_hidden.device
                        ).unsqueeze(0).unsqueeze(0)
                        # 修复增强逻辑：添加注意力向量的加权
                        enhanced_hidden = current_hidden + coefficient * vector_tensor
                        inputs_embeds[:, -1, :] = enhanced_hidden[:, -1, :]
                outputs = self.model.generate(
                    inputs_embeds=inputs_embeds,
                    max_new_tokens=6,
                    pad_token_id=self.tokenizer.pad_token_id
                )
                return outputs
        finally:
            self._restore_model_state()

# 加载JSON选择题数据集（支持动态条数截取）
def load_json_dataset(file_path, sample_size=None, random_sample=True):
    """
    加载JSON数据集并支持动态条数控制
    :param file_path: JSON文件路径
    :param sample_size: 要使用的样本条数（None表示全部）
    :param random_sample: 是否随机采样（False则取前N条）
    :return: 处理后的数据集
    """
    try:
        with open(file_path, 'r', encoding='utf-8') as f:
            data = json.load(f)
        total_size = len(data)
        print(f"原始数据集共 {total_size} 条数据")
        
        # 动态控制条数
        if sample_size is not None and sample_size < total_size:
            if random_sample:
                data = np.random.choice(data, size=sample_size, replace=False).tolist()
            else:
                data = data[:sample_size]
            print(f"动态截取为 {len(data)} 条数据（{'随机采样' if random_sample else '前N条'}）")
        else:
            print(f"使用全部 {len(data)} 条数据")
        
        df = pd.DataFrame(data)
        # 提取问题主干、选项、正确答案
        df['question_stem'] = df['question'].apply(lambda x: x['stem'])
        df['choices'] = df['question'].apply(lambda x: x['choices'])
        df['answerKey'] = df['answerKey']
        return df
    except Exception as e:
        print(f"加载JSON数据失败: {e}")
        return None

# 批量推理函数（适配动态参数）
def batch_inference(model, tokenizer, data, layer_indices, threshold, batch_size=5, use_representation=True, coefficient=0.1):
    """批量选择题推理（支持动态模型/层/阈值）"""
    correct_count = 0
    total_count = len(data)
    batches = [data[i:i + batch_size] for i in range(0, len(data), batch_size)]

    # 初始化表征增强组件
    layer_attention_vectors = None
    controller = None
    if use_representation:
        fact_prompts = ["correct answer", "factual statement", "true option"]
        non_fact_prompts = ["wrong answer", "false statement", "incorrect option"]
        analyzer = FactAttentionAnalyzer(model, tokenizer, layer_indices=layer_indices)
        layer_attention_vectors = analyzer.extract_fact_attention_vectors(fact_prompts, non_fact_prompts, threshold=threshold)
        controller = FactAttentionController(model, tokenizer)

    # 选择题Prompt模板
    multiple_choice_template = """
Question: {question}
Options:
A: {option_A}
B: {option_B}
C: {option_C}
D: {option_D}
Please choose the correct option (A, B, C, or D), Only output correct answer options.
"""

    start_time = time.time()
    for batch in tqdm(batches, desc="处理批次"):
        prompts = []
        actual_answers = []

        for _, row in batch.iterrows():
            question = row['question_stem']
            choices = row['choices']
            answer_key = row['answerKey']
            option_dict = {choice['label']: choice['text'] for choice in choices}
            option_A = option_dict.get('A', '')
            option_B = option_dict.get('B', '')
            option_C = option_dict.get('C', '')
            option_D = option_dict.get('D', '')
            prompt = multiple_choice_template.format(
                question=question, 
                option_A=option_A, 
                option_B=option_B, 
                option_C=option_C, 
                option_D=option_D
            )
            prompts.append(prompt)
            actual_answers.append(answer_key)

        # 编码输入
        inputs = tokenizer(
            prompts, 
            return_tensors="pt", 
            padding=True, 
            truncation=True,
            max_length=512
        )
        input_ids = inputs.input_ids.to(model.device)
        attention_mask = inputs.attention_mask.to(model.device)

        # 生成回答
        outputs = []
        with torch.no_grad():
            if use_representation and layer_attention_vectors is not None and controller is not None:
                outputs = controller.enhance_fact_attention(
                    input_ids, 
                    layer_attention_vectors, 
                    coefficient=coefficient
                )
            else:
                outputs = model.generate(
                    input_ids=input_ids,
                    attention_mask=attention_mask,
                    do_sample=False,
                    max_new_tokens=10,
                    pad_token_id=tokenizer.pad_token_id
                )

        # 解析结果
        for i, (output, actual_answer) in enumerate(zip(outputs, actual_answers)):
            generated_text = tokenizer.decode(output, skip_special_tokens=True).strip()
            predicted_option = None
            for c in ['A', 'B', 'C', 'D']:
                if re.search(r'\b' + c + r'\b', generated_text.upper()):
                    predicted_option = c
                    break
            is_correct = predicted_option == actual_answer
            if is_correct:
                correct_count += 1

    end_time = time.time()
    print(f"批次推理耗时: {end_time - start_time:.2f}秒")
    return correct_count, total_count

# 加载本地模型的辅助函数
def load_local_model(model_path):
    """加载本地模型，避免HF Hub验证错误"""
    # 确保路径是绝对路径
    model_path = os.path.abspath(model_path)
    print(f"加载本地模型: {model_path}")
    
    # 加载分词器（添加local_files_only=True）
    tokenizer = AutoTokenizer.from_pretrained(
        model_path, 
        trust_remote_code=True,
        local_files_only=True  # 强制使用本地文件
    )
    
    # 加载模型（添加local_files_only=True）
    model = AutoModelForCausalLM.from_pretrained(
        model_path,
        device_map=device_map,
        torch_dtype=torch.float16,
        trust_remote_code=True,
        local_files_only=True,  # 强制使用本地文件
        low_cpu_mem_usage=True  # 减少CPU内存占用
    )
    
    if tokenizer.pad_token is None:
        tokenizer.pad_token = tokenizer.eos_token
        
    return model, tokenizer

# 主程序
if __name__ == "__main__":
    # ====================== 动态条数配置 ======================
    dataset_sample_size = 50  # 可修改：比如设为100则用100条，设为None用全部
    random_sample = True      # 是否随机采样（False则取前N条）
    # ==========================================================

    # 加载JSON数据集（应用动态条数控制）
    json_dataset_path = "/home/ZJQ/pypro/data/dataOpenBook.json"  
    dataset = load_json_dataset(json_dataset_path, sample_size=dataset_sample_size, random_sample=random_sample)
    
    if dataset is None or len(dataset) == 0:
        print("没有数据可处理，程序退出")
        exit()
    
    # 配置测试参数（使用相对路径或确保绝对路径正确）
    model_configs = [
        ("/home/ZJQ/.cache/modelscope/hub/models/Qwen/Qwen2___5-0___5-Instruct", "Qwen2.5-0.5B"),
        ("/home/ZJQ/.cache/modelscope/hub/models/Qwen/Qwen2___5-3B-Instruct", "Qwen2.5-3B"),
        ("/home/ZJQ/.cache/modelscope/hub/models/Qwen/Qwen2___5-7B-Instruct", "Qwen2.5-7B")
    ]
    layer_configs = {
        "初始层": [1, 2, 3],
        "中间层-第一部分": [4, 5, 6],
        "中间层-第二部分": [7, 8, 9],
        "输出层": [10, 11, 12]  # 自动适配模型实际层数
    }
    thresholds = np.arange(0.1, 2.1, 0.1).round(1)  # 0.1到2.0，步长0.1

    # 存储结果
    results = []

    # 循环测试所有组合
    for model_path, model_label in model_configs:
        print(f"\n{'='*50}")
        print(f"测试模型: {model_label}")
        print(f"{'='*50}")
        
        try:
            # 使用新的加载函数加载本地模型
            model, tokenizer = load_local_model(model_path)
            
            # 获取模型实际层数（适配不同Qwen模型）
            if hasattr(model, 'transformer') and hasattr(model.transformer, 'h'):
                total_layers = len(model.transformer.h)
            elif hasattr(model, 'model') and hasattr(model.model, 'layers'):
                total_layers = len(model.model.layers)
            else:
                total_layers = 12  # 默认值
            print(f"模型总层数: {total_layers}")

            # 测试不同层配置
            for layer_label, layer_indices in layer_configs.items():
                print(f"\n{'-'*40}")
                print(f"测试层配置: {layer_label} (原始索引: {layer_indices})")
                
                # 过滤有效层索引
                valid_layers = [l for l in layer_indices if l < total_layers]
                if not valid_layers:
                    print(f"警告: 层配置 {layer_label} 无有效层，跳过")
                    continue
                print(f"有效层索引: {valid_layers}")

                # 测试不同阈值
                for threshold in thresholds:
                    print(f"\n{'_'*30}")
                    print(f"测试阈值: {threshold}")
                    
                    try:
                        correct, total = batch_inference(
                            model=model,
                            tokenizer=tokenizer,
                            data=dataset,
                            layer_indices=valid_layers,
                            threshold=threshold,
                            batch_size=1,
                            coefficient=0.1
                        )
                        accuracy = correct / total if total > 0 else 0.0
                        print(f"结果: 正确={correct}, 总数={total}, 准确率={accuracy:.4f}")

                        # 保存结果
                        results.append({
                            "模型": model_label,
                            "层配置": layer_label,
                            "阈值": threshold,
                            "正确数": correct,
                            "总数": total,
                            "准确率": accuracy
                        })
                    except Exception as e:
                        print(f"测试失败: {str(e)[:100]}")
                        continue

            # 清理显存
            del model, tokenizer
            torch.cuda.empty_cache()
            
        except Exception as e:
            print(f"加载模型失败: {str(e)[:100]}")
            continue

    # 打印最终汇总结果
    print(f"\n{'='*60}")
    print("最终测试结果汇总")
    print(f"{'='*60}")
    results_df = pd.DataFrame(results)
    print(results_df.to_string(index=False))

    # 保存结果到CSV（可选）
    results_df.to_csv("model_test_results.csv", index=False, encoding='utf-8-sig')
    print("\n结果已保存到 model_test_results.csv")