from transformers import LlamaForCausalLM, LlamaTokenizer
import torch
from utils.model_path_getter import load_yaml

# ✅ 加载模型路径
model_path = load_yaml()["model_path"]
offload_path = load_yaml()["offload_path"]

# ✅ 加载 tokenizer
tokenizer = LlamaTokenizer.from_pretrained(model_path)

# ✅ 加载量化模型
model = LlamaForCausalLM.from_pretrained(
    model_path,
    device_map="auto",  # 自动分配设备
    offload_folder=offload_path,
    torch_dtype=torch.float16,
)

# ✅ 加载旋转矩阵并转换为 float32
rotateMatrix = torch.load('../../1-10-1 (1).pth', weights_only=True).to(torch.float16).to(torch.device('cuda'))

# 🔍 检查旋转矩阵的性质
identity_matrix = torch.eye(rotateMatrix.size(0), dtype=torch.float16).to(rotateMatrix.device)

# 正交性检查
is_orthogonal = torch.allclose(torch.matmul(rotateMatrix, rotateMatrix.T), identity_matrix, atol=1e-5)
print(f"Is rotateMatrix orthogonal? {is_orthogonal}")

# 行列式检查
determinant = torch.det(rotateMatrix).item()
print(f"Determinant of rotateMatrix: {determinant}")

# 逆矩阵和转置矩阵比较
inverse_matrix = torch.linalg.inv(rotateMatrix)
is_inverse_equal_transpose = torch.allclose(inverse_matrix, rotateMatrix.T, atol=1e-4)
print(f"Is the inverse of rotateMatrix equal to its transpose? {is_inverse_equal_transpose}")

# 打印旋转矩阵的前 5 行
print("First 5 rows of rotateMatrix:")
print(rotateMatrix[:5, :5])

# ✅ 修改 down_proj 的权重和输入值
for i, layer in enumerate(model.model.layers):
    # if i == 0:
    print(i,'-')
    down_proj = layer.mlp.down_proj
    original_weight = down_proj.weight.data

    # 转换权重为 float32 进行计算
    original_weight = original_weight.to(torch.float32)

    # 打印原始权重的均值和标准差
    # print(f"Original weight mean: {original_weight.mean().item()}, std: {original_weight.std().item()}")

    # ✅ 修改权重：插入反旋转矩阵
    modified_weight = torch.matmul(original_weight, rotateMatrix.T).to(torch.float16)
    # down_proj.weight.data = modified_weight * 0.01
    down_proj.weight.data = modified_weight

        # 打印修改后的权重均值和标准差
    # print(f"Modified weight mean: {down_proj.weight.data.mean().item()}, std: {down_proj.weight.data.std().item()}")


# ✅ 钩子函数：修改 down_proj 的输入值
def modify_down_proj_input(module, input):
    original_input = input[0].to(torch.float32)
    modified_input = torch.matmul(original_input, rotateMatrix).to(torch.float16)

    # 打印修改前后的输入张量信息
    # print(f"Original input mean: {original_input.mean().item()}, std: {original_input.std().item()}")
    # print(f"Modified input mean: {modified_input.mean().item()}, std: {modified_input.std().item()}")

    return (modified_input,)


# ✅ 注册钩子到 down_proj 层
# model.model.layers[0].mlp.down_proj.register_forward_pre_hook(modify_down_proj_input)

for i, layer in enumerate(model.model.layers):
    print(i)
    layer.mlp.down_proj.register_forward_pre_hook(modify_down_proj_input)

# ✅ 准备输入数据
prompts = [
    "Artificial intelligence is transforming the way we interact with technology."
]
inputs = tokenizer(prompts, return_tensors="pt", truncation=True, max_length=128)

# ✅ 确保输入张量在正确的设备上并转换为 float16
inputs = {key: value.to(model.device) for key, value in inputs.items()}
inputs["input_ids"] = inputs["input_ids"].to(torch.long)
inputs["labels"] = inputs["input_ids"]

# ✅ 禁用梯度计算，进行推理
with torch.no_grad():
    outputs = model(**inputs)

# ✅ 计算 PPL
loss = outputs.loss / inputs["input_ids"].size(1)  # 按 token 数量归一化
ppl = torch.exp(loss).item()
print(f"Perplexity: {ppl}")

loss = outputs.loss  # 按 token 数量归一化
ppl = torch.exp(loss).item()
print(f"Perplexity: {ppl}")

# ✅ 生成文本
with torch.no_grad():
    generated_ids = model.generate(
        inputs["input_ids"],
        max_length=50,  # 控制生成文本的最大长度
    )

# ✅ 解码生成的文本
generated_texts = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)

# ✅ 打印生成的文本
print("Generated text:", generated_texts)
