import torch
from transformers import AutoTokenizer, AutoModelForCausalLM
import pandas as pd
import time
import numpy as np
from tqdm import tqdm
import os
import re
from sklearn.model_selection import train_test_split

# 设置GPU
# os.environ["CUDA_VISIBLE_DEVICES"] = "0,1"
os.environ["CUDA_VISIBLE_DEVICES"] = "1"

# 设置设备
if torch.cuda.device_count() > 0:
    print(f"发现 {torch.cuda.device_count()} 个GPU设备")
    device_map = "auto"
else:
    print(f"仅发现 {torch.cuda.device_count()} 个GPU设备，将使用单卡运行")
    device_map = None

model_name = "/home/ZJQ/.cache/modelscope/hub/models/LLM-Research/Meta-Llama-3___1-8B-Instruct"

tokenizer = AutoTokenizer.from_pretrained(model_name, trust_remote_code=True)
model = AutoModelForCausalLM.from_pretrained(
    model_name,
    device_map=device_map,
    torch_dtype=torch.float16,
    trust_remote_code=True
)

# 确保有pad_token（初始化逻辑不变）
if tokenizer.pad_token is None:
    tokenizer.pad_token = tokenizer.eos_token
    print(f"已将pad_token设置为eos_token，pad_token_id: {tokenizer.pad_token_id}")

# 打印模型在各GPU上的分布情况
if device_map == "auto":
    print("模型分布情况:")
    for device, param in model.hf_device_map.items():
        print(f"  {device}: {param}")

# 表征工程：事实关注向量提取器（完全不变）
class FactAttentionAnalyzer:
    def __init__(self, model, tokenizer, layer_indices=[1, 2, 3]):
        self.model = model
        self.tokenizer = tokenizer
        self.layer_indices = layer_indices  # 多个目标层
        self.original_state_dict = None

    def _save_model_state(self):
        self.original_state_dict = {k: v.clone() for k, v in self.model.state_dict().items()}

    def _restore_model_state(self):
        if self.original_state_dict is not None:
            self.model.load_state_dict(self.original_state_dict)
            self.original_state_dict = None

    def _get_representation(self, prompt, layer_idx, token_pos=-1):
        inputs = self.tokenizer(prompt, return_tensors="pt", padding=True, truncation=True).to(self.model.device)
        with torch.no_grad():
            outputs = self.model(**inputs, output_hidden_states=True)
            hidden_states = outputs.hidden_states[layer_idx]
            if token_pos == -1:
                representation = hidden_states[:, -1, :]  # 最后一个token的表征
            else:
                representation = hidden_states[:, token_pos, :]
        return representation.cpu().numpy()

    # 方法：按绝对值筛选，保留原始位置，不符合的置0，返回一维数组
    def get_large_vectors_by_threshold_1d(self, contrast_vectors, threshold=1):
        print(f"输入形状: {contrast_vectors.shape}")
        n_vectors, feature_dim = contrast_vectors.shape
        result_vector = np.zeros(feature_dim)
        
        # 对每个维度单独处理
        for dim in range(feature_dim):
            dim_values = contrast_vectors[:, dim]
            valid_values = dim_values[(dim_values > threshold) | (dim_values < -threshold)]
            result_vector[dim] = np.sum(valid_values)
        
        print(f"输出向量形状: {result_vector.shape}")
        return result_vector
    
    def extract_fact_attention_vectors(self, fact_prompts, non_fact_prompts, n_components=1):
        """提取关注事实信息的向量"""
        self._save_model_state()
        try:
            # 为每个层存储对比向量
            layer_contrast_vectors = {layer: [] for layer in self.layer_indices}

            # 提取事实相关提示词的表征
            for prompt in fact_prompts:
                for layer in self.layer_indices:
                    repr = self._get_representation(prompt, layer)
                    layer_contrast_vectors[layer].append(("fact", repr[0]))
            
            # 提取非事实相关提示词的表征
            for prompt in non_fact_prompts:
                for layer in self.layer_indices:
                    repr = self._get_representation(prompt, layer)
                    layer_contrast_vectors[layer].append(("non_fact", repr[0]))

            # 为每个层计算事实关注向量
            layer_attention_vectors = {}
            layer_attention_vector = {}
            layer_attention_mean = {}
            ll = {}
            
            for layer in self.layer_indices:
                # 分离事实和非事实表征
                fact_reprs = [v for t, v in layer_contrast_vectors[layer] if t == "fact"]
                non_fact_reprs = [v for t, v in layer_contrast_vectors[layer] if t == "non_fact"]
                
                # 计算对比向量
                contrast_vectors = [f - n for f, n in zip(fact_reprs, non_fact_reprs)]
                contrast_vectors = np.vstack(contrast_vectors)
                
                # 不使用PCA，直接取对比向量的平均值作为关注向量
                layer_attention_mean[layer] = np.mean(contrast_vectors, axis=0)

                # 采用阈值形式，提取较大数值，降低噪声
                layer_attention_vectors[layer] = self.get_large_vectors_by_threshold_1d(contrast_vectors, threshold=1)  

                # PCA提取主成分
                from sklearn.decomposition import PCA
                pca = PCA(n_components=n_components)
                pca.fit(contrast_vectors)
                layer_attention_vector[layer] = pca.components_[0]
                
                # 结合阈值筛选和PCA结果
                ll[layer] = layer_attention_vectors[layer] + layer_attention_vector[layer]

            return layer_attention_vector

        finally:
            self._restore_model_state()


# 表征工程：事实关注增强控制器（核心修改：enhance_fact_attention方法）
class FactAttentionController:
    def __init__(self, model, tokenizer):
        self.model = model
        self.tokenizer = tokenizer
        self.original_state_dict = None

    def _save_model_state(self):
        self.original_state_dict = {k: v.clone() for k, v in self.model.state_dict().items()}

    def _restore_model_state(self):
        if self.original_state_dict is not None:
            self.model.load_state_dict(self.original_state_dict)
            self.original_state_dict = None

    # 关键修改1：新增attention_mask参数，显式传入生成阶段
    def enhance_fact_attention(self, input_ids, attention_mask, layer_attention_vectors, coefficient=0.7):
        """增强对事实信息的关注"""
        self._save_model_state()
        try:
            with torch.no_grad():
                # 关键修改2：模型前向计算时传入attention_mask
                outputs = self.model(
                    input_ids=input_ids,
                    attention_mask=attention_mask,
                    output_hidden_states=True,
                    use_cache=True
                )
                hidden_states = outputs.hidden_states

                # 获取输入嵌入
                embeddings = self.model.get_input_embeddings()
                inputs_embeds = embeddings(input_ids)
  
                # 对每个指定层应用增强（原有逻辑不变）
                for layer, vector in layer_attention_vectors.items():
                    current_hidden = hidden_states[layer]
                    vector_tensor = torch.tensor(
                        vector,
                        dtype=current_hidden.dtype,
                        device=current_hidden.device
                    ).unsqueeze(0).unsqueeze(0)  # 扩展为 [1, 1, dim]

                # 使用增强后的嵌入生成回答
                # 关键修改3：显式传入attention_mask和pad_token_id（消除警告）
                outputs = self.model.generate(
                    inputs_embeds=inputs_embeds,
                    attention_mask=attention_mask,
                    pad_token_id=self.tokenizer.pad_token_id,
                    max_new_tokens=16
                )
                return outputs

        finally:
            self._restore_model_state()


# 加载Excel情感分类数据集（来自代码1，完全不变）
def load_excel_dataset(file_path):
    """从XLSX文件中加载情感分类数据集（text: 待分类文本, lable: 情感标签）"""
    try:
        df = pd.read_excel(file_path)
        print(f"成功加载 {len(df)} 条数据")
        print(f"数据集列名: {list(df.columns)}")
        # 校验核心列是否存在
        required_cols = ['text', 'lable']
        if not all(col in df.columns for col in required_cols):
            raise ValueError(f"数据集缺少核心列！需包含 {required_cols}")
        return df
    except Exception as e:
        print(f"加载XLSX数据失败: {e}")
        return None


# 批量推理函数（核心修改：调用enhance_fact_attention时传递attention_mask）
def batch_inference(data, batch_size=5, use_representation=True, coefficient=0.1):
    """批量情感分类推理（保留表征增强逻辑）"""
    correct_count = 0
    total_count = len(data)
    batches = [data[i:i + batch_size] for i in range(0, len(data), batch_size)]

    # 初始化表征增强组件（原有逻辑不变）
    layer_attention_vectors = None
    controller = None
    if use_representation:
        # 定义事实相关和非事实相关的提示词
        fact_prompts = [
            "integrity",
            "fact",
            "reality"
        ]
        
        non_fact_prompts = [
            "dishonest",
            "falsehood",
            "illusion"
        ]

        # 提取事实关注向量
        print("正在提取事实关注向量...")
        analyzer = FactAttentionAnalyzer(model, tokenizer, layer_indices=[1, 2, 3])
        layer_attention_vectors = analyzer.extract_fact_attention_vectors(fact_prompts, non_fact_prompts)
        print(f"已提取事实关注向量的层: {list(layer_attention_vectors.keys())}")
        
        # 初始化控制器
        controller = FactAttentionController(model, tokenizer)


    sentiment_prompt_template = "Please determine whether the movie reviews below are positive or negative. Movie reviews: {text}. Just answer positive or negative. "

    start_time = time.time()

    for batch in tqdm(batches, desc="处理批次"):
        prompts = []
        actual_labels = []
        input_lengths = []  # 存储每个prompt的token长度

        # 准备批次数据（原有逻辑不变）
        for _, row in batch.iterrows():
            text = row['text'].strip()
            actual_label = row['lable'].strip()
            # 生成情感分类Prompt
            prompt = sentiment_prompt_template.format(text=text)
            prompts.append(prompt)
            actual_labels.append(actual_label)
            input_ids = tokenizer.encode(prompt, add_special_tokens=False)
            input_lengths.append(len(input_ids))
        # 编码输入（原有逻辑不变）
        inputs = tokenizer(
            prompts, 
            return_tensors="pt", 
            padding=True, 
            truncation=True,
        )
        input_ids = inputs.input_ids.to(model.device)
        attention_mask = inputs.attention_mask.to(model.device)

        # 生成回答（带或不带表征增强）
        outputs = []
        with torch.no_grad():
            if use_representation and layer_attention_vectors is not None and controller is not None:
                # 关键修改4：调用时新增传递attention_mask参数
                outputs = controller.enhance_fact_attention(
                    input_ids=input_ids,
                    attention_mask=attention_mask,  # 新增：传递注意力掩码
                    layer_attention_vectors=layer_attention_vectors, 
                    coefficient=coefficient
                )
            else:
                # 普通生成（原有逻辑已正确传入，无需修改）
                outputs = model.generate(
                    input_ids=input_ids,
                    attention_mask=attention_mask,
                    do_sample=False,
                    max_new_tokens=15,
                    pad_token_id=tokenizer.pad_token_id
                )

        # 解码生成的文本并计算准确率（原有逻辑不变）
        for i, (output, actual_label, input_len) in enumerate(zip(outputs, actual_labels, input_lengths)):
            # 仅截取模型新生成的部分（排除原始prompt）
            # generated_tokens = output[input_len:]  # 从输入长度之后开始截取
            # print(input_len)
            # print(len(output))
            generated_text = tokenizer.decode(output, skip_special_tokens=True).strip()
            
            # 判断模型输出是否包含正确标签
            if actual_label.lower() in generated_text.lower():
                correct_count += 1
            
            # 打印部分结果用于调试
            if i < 2:  # 每个批次只打印前2个结果
                print(f"\n文本: {batch.iloc[i]['text'][:50]}...")
                print(f"生成结果: {generated_text}")
                print(f"实际标签: {actual_label}")
                print(f"判断: {'正确' if actual_label.lower() in generated_text.lower() else '错误'}")

    end_time = time.time()
    print(f"\n推理完成，耗时: {end_time - start_time:.2f}秒")
    return correct_count, total_count


# 主程序执行（完全不变）
if __name__ == "__main__":
    # 加载数据集
    dataset_path = "/home/ZJQ/pypro/data/data5000.xlsx"

    # dataset_path = "/home/ZJQ/pypro/data/data100.xlsx"
    # dataset_path = "/home/ZJQ/pypro/data/data.xlsx"

    dataset = load_excel_dataset(dataset_path)
    
    if dataset is None or len(dataset) == 0:
        print("没有数据可处理，程序退出")
        exit()
    
    # 处理情感标签
    emotions = dataset['lable'].unique().tolist()
    emotion_ids = {emo: tokenizer.encode(emo)[0] for emo in emotions}
    print(f"情感类别: {emotions}")
    print(f"情感标签-Token映射: {emotion_ids}")
    
    
    # 执行测试集推理
    try:
        correct_count, total_count = batch_inference(
            dataset,
            batch_size=1,  # 根据GPU内存调整
            use_representation=True,  # 是否启用表征增强
            coefficient=0.1  # 表征增强系数
        )

        # 计算并输出准确率
        accuracy = correct_count / total_count if total_count > 0 else 0
        print(f"\n【情感分类测试集准确率】: {accuracy:.4f}")
        print(f"正确预测数: {correct_count}, 测试集总样本数: {total_count}")
    except Exception as e:
        print(f"推理过程中发生错误: {e}")
        exit()