from transformers import LlamaForCausalLM, LlamaTokenizer
import torch
from utils.model_path_getter import load_yaml

# ✅ 加载模型路径
model_path = load_yaml()["model_path"]
offload_path = load_yaml()["offload_path"]

# ✅ 加载 tokenizer
tokenizer = LlamaTokenizer.from_pretrained(model_path)

# ✅ 加载量化模型
model = LlamaForCausalLM.from_pretrained(
    model_path,
    device_map="auto",  # 自动分配设备
    offload_folder=offload_path,
    torch_dtype=torch.float16,
)

scale_forward = 1
scale_back = 1

# ✅ 加载旋转矩阵并转换为 float32
rotateMatrix = (torch.load('../../1-10-1 (1).pth', weights_only=True)
                # .to(torch.float16)
                # .to(torch.float32)
                .to(torch.device('cuda')))
#
# print(rotateMatrix.dtype)
#
# identity_matrix = torch.eye(rotateMatrix.size(0)).to(rotateMatrix.device)
# roted = torch.matmul(rotateMatrix, rotateMatrix.T)
#
# # 计算绝对误差
# absolute_error = torch.abs(roted - identity_matrix)
#
# # 找到最大差值的位置
# max_diff_index = torch.argmax(absolute_error)
#
# # 打印最大差值
# print(f"Max absolute error: {absolute_error.max().item()}")
#
# # 🔍 检查旋转矩阵的性质
# identity_matrix = torch.eye(rotateMatrix.size(0)).to(rotateMatrix.device)
#
# # 正交性检查
# is_orthogonal = torch.allclose(torch.matmul(rotateMatrix, rotateMatrix.T), identity_matrix, atol=5e-4)
# print(f"Is rotateMatrix orthogonal? {is_orthogonal}")
#
# # 行列式检查
# determinant = torch.det(rotateMatrix).item()
# print(f"Determinant of rotateMatrix: {determinant}")
#
# # 逆矩阵和转置矩阵比较
# inverse_matrix = torch.linalg.inv(rotateMatrix)
# is_inverse_equal_transpose = torch.allclose(inverse_matrix, rotateMatrix.T, atol=1e-3)
# print(f"Is the inverse of rotateMatrix equal to its transpose? {is_inverse_equal_transpose}")
#
# # 打印旋转矩阵的前 5 行
# print("First 5 rows of rotateMatrix:")
# print(rotateMatrix[:5, :5])


rotateMatrix = rotateMatrix.to(torch.float16)

# ✅ 修改 down_proj 的权重和输入值
for i, layer in enumerate(model.model.layers):
    if i == 0:
        down_proj = layer.mlp.down_proj
        original_weight = down_proj.weight.data

        # print(original_weight.dtype)

        # 打印原始权重的均值和标准差
        print(f"Original weight mean: {original_weight.mean().item()}, std: {original_weight.std().item()}")

        # ✅ 修改权重：插入反旋转矩阵
        modified_weight = torch.matmul(original_weight, rotateMatrix)
        down_proj.weight.data = modified_weight * scale_back
        # down_proj.weight.data = original_weight
        # 打印修改后的权重均值和标准差
        print(f"Modified weight mean: {down_proj.weight.data.mean().item()}, std: {down_proj.weight.data.std().item()}")



# ✅ 钩子函数：修改 down_proj 的输入值
def modify_down_proj_input(module, input):
    # global rotateMatrix
    original_input = input[0]
    modified_input = torch.matmul(original_input, rotateMatrix) * scale_forward
    # modified_input = original_input
    # rotateMatrix = rotateMatrix @ rotateMatrix.T
    # rotateMatrix = rotateMatrix.to(torch.float32)
    # print(torch.det(rotateMatrix).item())
    # rotateMatrix = torch.eye(rotateMatrix.size(0)).to(rotateMatrix.device)
    # print(torch.det(rotateMatrix).item())
    # rotateMatrix = rotateMatrix.to(torch.float16)
    # modified_input = torch.matmul(original_input, rotateMatrix)
    print('hook_input')
    # 打印修改前后的输入张量信息
    print(f"Original input mean: {original_input.mean().item()}, std: {original_input.std().item()}")
    print(f"Modified input mean: {modified_input.mean().item()}, std: {modified_input.std().item()}")

    return (modified_input,)




# ✅ 注册钩子到 down_proj 层
model.model.layers[0].mlp.down_proj.register_forward_pre_hook(modify_down_proj_input)

# ✅ 准备输入数据
prompts = [
    "Artificial intelligence is transforming the way we interact with technology."
]
inputs = tokenizer(prompts, return_tensors="pt", truncation=True, max_length=50)

# ✅ 确保输入张量在正确的设备上并转换为 float16
inputs = {key: value.to(model.device) for key, value in inputs.items()}
# inputs["input_ids"] = inputs["input_ids"].to(torch.long)
inputs["labels"] = inputs["input_ids"]

# ✅ 禁用梯度计算，进行推理
with torch.no_grad():
    outputs = model(**inputs)

# # ✅ 计算 PPL
# loss = outputs.loss / inputs["input_ids"].size(1)  # 按 token 数量归一化
# ppl = torch.exp(loss).item()
# print(f"Perplexity: {ppl}")

loss = outputs.loss  # 按 token 数量归一化
ppl = torch.exp(loss).item()
print(f"Perplexity: {ppl}")

# # ✅ 生成文本
# with torch.no_grad():
#     generated_ids = model.generate(
#         inputs["input_ids"],
#     )
#
# # ✅ 解码生成的文本
# generated_texts = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)
#
# print("prompts:", prompts)
# # ✅ 打印生成的文本
# print("Generated text:", generated_texts)
