import os
from transformers import AutoModelForCausalLM, AutoTokenizer, LlamaForCausalLM
import torch
import torch.nn as nn

# 模型路径
model_dir = os.getcwd()

# 加载分词器
tokenizer = AutoTokenizer.from_pretrained(model_dir)

if tokenizer.pad_token is None:
    tokenizer.pad_token = tokenizer.eos_token

# 加载 Llama 模型
model = LlamaForCausalLM.from_pretrained(
    model_dir,
    torch_dtype=torch.float16,
    device_map="auto",
    offload_folder="E:/offload"
)

# 自定义存储容器
ffn_inputs = {
    1: {
        "first_linear": None,
        "activation": None,
        "second_linear": None,
        "second_linear_out": None,
        "rotate_in": None,
        "rotate_out": None
    },
}

# 重写 FFN (MLP) 模块
class ModifiedMLP(nn.Module):
    def __init__(self, original_mlp, layer_id):
        super().__init__()
        self.gate_proj = original_mlp.gate_proj  # 第一次线性变换 (W1)
        self.down_proj = original_mlp.down_proj  # 第二次线性变换 (W2)
        self.act_fn = torch.nn.functional.silu  # 激活函数（Swish 的实现）
        self.layer_id = layer_id  # 标记层号

        # 旋转矩阵逻辑（留空）
        self.rotate_matrix = None  # 旋转矩阵
        self.rotate_inverse = None  # 旋转矩阵的逆

    def forward(self, x):
        # 第一次线性变换的输入
        ffn_inputs[self.layer_id]["first_linear"] = x.detach().clone()

        # 第一次线性变换
        hidden = self.gate_proj(x)

        # 激活函数的输入
        ffn_inputs[self.layer_id]["activation"] = hidden.detach().clone()

        # 激活函数
        activated = self.act_fn(hidden)

        # 第二次线性变换的输入
        ffn_inputs[self.layer_id]["second_linear"] = activated.detach().clone()

        # 第二次线性变换
        output = self.down_proj(activated)

        # 第二次线性变换的输出
        ffn_inputs[self.layer_id]["second_linear_out"] = output.detach().clone()

        # 插入旋转矩阵逻辑（留空）
        # 示例：旋转矩阵应用于某个张量（如 output 或 activated）
        # 1. 应用旋转矩阵
        if self.rotate_matrix is not None:
            rotated_input = torch.matmul(output, self.rotate_matrix)
            ffn_inputs[self.layer_id]["rotate_in"] = rotated_input.detach().clone()

        # 2. 逆旋转
        if self.rotate_inverse is not None:
            rotated_output = torch.matmul(rotated_input, self.rotate_inverse)
            ffn_inputs[self.layer_id]["rotate_out"] = rotated_output.detach().clone()
            output = rotated_output  # 如果需要将逆旋转后的结果继续用于后续处理

        return output

# 替换模型的 FFN 层
original_mlp = model.model.layers[0].mlp
model.model.layers[0].mlp = ModifiedMLP(original_mlp, layer_id=0 + 1)

# 输入示例
input_text = "Hello, LLaMA 2!"
inputs = tokenizer(input_text, return_tensors="pt")

# 前向传播
outputs = model(**inputs)

# 打印 FFN 的捕获输入
for layer_id in ffn_inputs:
    print(f"Layer {layer_id} FFN Inputs:")
    print(f"  First Linear (W1) Input shape: {ffn_inputs[layer_id]['first_linear'].shape}")
    print(f"  Activation Input shape: {ffn_inputs[layer_id]['activation'].shape}")
    print(f"  Second Linear (W2) Input shape: {ffn_inputs[layer_id]['second_linear'].shape}")
    print(f"  Second Linear (W2) Out shape: {ffn_inputs[layer_id]['second_linear_out'].shape}")
    if ffn_inputs[layer_id]["rotate_in"] is not None:
        print(f"  Rotate Input shape: {ffn_inputs[layer_id]['rotate_in'].shape}")
    if ffn_inputs[layer_id]["rotate_out"] is not None:
        print(f"  Rotate Output shape: {ffn_inputs[layer_id]['rotate_out'].shape}")
