from tqdm import tqdm
import torch
import torch_npu  # 如使用 NPU 保留
import torch.nn as nn
from transformers import AutoModelForCausalLM, AutoTokenizer
import numpy as np
from datasets import load_dataset
import json
import os
import warnings
from concurrent.futures import ThreadPoolExecutor
import random

warnings.filterwarnings("ignore")

# ==== 设置全局随机种子 ====
SEED = 42
def set_seed(seed):
    random.seed(seed)
    np.random.seed(seed)
    torch.manual_seed(seed)
    if torch.cuda.is_available():
        torch.cuda.manual_seed_all(seed)
    if hasattr(torch_npu, 'manual_seed_all'):  # 如果使用 NPU
        torch_npu.manual_seed_all(seed)
    torch.backends.cudnn.deterministic = True
    torch.backends.cudnn.benchmark = False

# 设置随机种子
set_seed(SEED)

# ==== 基本设置 ====
expn = "expn_qwen3-0.6b-gsm8k-opt"
model_name = "/home/ma-user/work/DownLoads/Models/Qwen/Qwen3-0.6B"

# ==== 模型加载 ====
model = AutoModelForCausalLM.from_pretrained(
    model_name,
    torch_dtype=torch.float16,
    trust_remote_code=True,
    device_map="auto"
)
tokenizer = AutoTokenizer.from_pretrained(model_name, trust_remote_code=True)
model.eval()

# ==== 数据加载 ====
dataset = load_dataset("openai/gsm8k", "main", cache_dir="/home/ma-user/work/DownLoads/Dataset/openai/gsm8k")["train"]

# 随机抽取 1000 个样本
max_samples = 1000
start_idx = 0  # 0-based index for sample 928 (since sample indices are 1-based in your output)
if dataset.num_rows > max_samples:
    dataset = dataset.shuffle(seed=SEED).select(range(max_samples))  # 保持原始的随机排序
else:
    print(f"数据集样本数 ({dataset.num_rows}) 小于 {max_samples}，使用全部样本")
    max_samples = dataset.num_rows

# 从第928个样本（索引0）开始
dataset = dataset.select(range(start_idx, max_samples))

max_new_tokens = 3000

# ==== Hook：重写 MLP 模块 ====
num_layers = len(model.model.layers)

class CustomQwen3MLP(nn.Module):
    def __init__(self, original_mlp):
        super().__init__()
        self.gate_proj = original_mlp.gate_proj
        self.up_proj = original_mlp.up_proj
        self.down_proj = original_mlp.down_proj
        self.act_fn = original_mlp.act_fn
        self.down_proj_output = None
    
    def forward(self, x):
        self.down_proj_output = self.down_proj(
            self.act_fn(self.gate_proj(x)) * self.up_proj(x)
        )
        return self.down_proj_output

for layer_idx in range(num_layers):
    original_mlp = model.model.layers[layer_idx].mlp
    model.model.layers[layer_idx].mlp = CustomQwen3MLP(original_mlp).to(model.device)

# ==== 注册 Hook ====
activations = []
hooks = []

def create_hook(layer_idx):
    def hook_fn(module, input, output):
        activations.append((layer_idx, module.down_proj_output.detach().cpu().numpy()))
    return hook_fn

for layer_idx in range(num_layers):
    hooks.append(model.model.layers[layer_idx].mlp.register_forward_hook(create_hook(layer_idx)))

# ==== 异步保存 ====
save_executor = ThreadPoolExecutor(max_workers=4)

def async_save_numpy(path, array):
    save_executor.submit(np.save, path, array)

def async_save_json(path, obj):
    def save_fn():
        with open(path, "w", encoding="utf-8") as f:
            json.dump(obj, f, ensure_ascii=False, indent=2)
    save_executor.submit(save_fn)

# ==== 分析函数 ====
def analyze_sample(sample_idx, question):
    global activations
    activations = []

    # 使用全局样本索引（从928开始）
    global_sample_idx = sample_idx + start_idx
    sample_dir = f"{expn}/gsm8k_sample_{global_sample_idx}"

    # 检查是否已存在该样本的输出目录
    if os.path.exists(sample_dir) and os.path.exists(os.path.join(sample_dir, "metadata.json")):
        print(f"样本 {global_sample_idx} 已存在，跳过处理")
        return None

    try:
        messages = [{"role": "user", "content": question}]
        text = tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True, enable_thinking=True)
        inputs = tokenizer([text], return_tensors="pt").to(model.device)
        input_ids = inputs["input_ids"]
        input_str = question

        with torch.inference_mode():
            outputs = model.generate(
                **inputs,
                max_new_tokens=max_new_tokens,
                return_dict_in_generate=True,
                output_scores=True,
                output_attentions=False,
                output_hidden_states=False,
                pad_token_id=tokenizer.eos_token_id
            )

        generated_tokens = outputs.sequences[0]
        generated_token_ids = generated_tokens.tolist()
        generated_text = tokenizer.decode(generated_token_ids, skip_special_tokens=False)
        generated_tokens_str = tokenizer.convert_ids_to_tokens(generated_token_ids)

        entropies = []
        logits = []
        for score in outputs.scores:
            probs = torch.softmax(score, dim=-1).cpu().numpy()
            entropy = -np.sum(probs * np.log(probs + 1e-10), axis=-1)
            entropies.append(entropy[0])
            logits.append(score[0].cpu().numpy())

        layer_activations = [[] for _ in range(num_layers)]
        for layer_idx, act in activations:
            layer_activations[layer_idx].append(act[0, -1, :])  # 仅保留最后一个 token 的激活

        os.makedirs(sample_dir, exist_ok=True)

        activation_files = []
        for layer_idx in range(num_layers):
            if layer_activations[layer_idx]:
                acts = np.array(layer_activations[layer_idx])  # shape: [1, hidden_dim]
                fname = f"activations_layer_{layer_idx}.npy"
                async_save_numpy(os.path.join(sample_dir, fname), acts)
                activation_files.append(fname)

        async_save_numpy(os.path.join(sample_dir, "entropies.npy"), np.array(entropies))
        async_save_numpy(os.path.join(sample_dir, "logits.npy"), np.array(logits))

        metadata = {
            "dataset": "gsm8k",
            "sample_index": global_sample_idx,
            "input_string": input_str,
            "input_token_ids": input_ids[0].tolist(),
            "output_text": generated_text,
            "output_token_ids": generated_token_ids,
            "output_tokens": generated_tokens_str,
            "activation_files": activation_files,
            "entropies_file": "entropies.npy",
            "logits_file": "logits.npy"
        }
        async_save_json(os.path.join(sample_dir, "metadata.json"), metadata)

        return {
            "sample_idx": global_sample_idx,
            "input_str": input_str,
            "generated_text": generated_text,
            "generated_tokens": generated_tokens_str,
            "entropies": entropies,
            "activation_files": activation_files
        }
    except Exception as e:
        print(f"样本 {global_sample_idx} 处理失败: {str(e)}")
        return None

# ==== 主循环 ====
results = []
for idx, sample in enumerate(tqdm(dataset)):
    result = analyze_sample(idx, sample["question"])
    if result is not None:
        results.append(result)

# ==== 清理 ====
save_executor.shutdown(wait=True)
for hook in hooks:
    hook.remove()

# ==== 打印摘要 ====
for result in results:
    print(f"\n样本 {result['sample_idx']}:")
    print(f"输入: {result['input_str']}")
    print(f"输出: {result['generated_text']}")
    print(f"输出 token: {result['generated_tokens']}")
    print(f"熵: {result['entropies'][:5]}...")
    print(f"激活值文件: {result['activation_files'][:3]}...")

print(f"\n所有结果已保存到 {expn}/gsm8k_sample_X 文件夹")