from transformers import LlamaForCausalLM, LlamaTokenizer
import torch
from utils.model_path_getter import load_yaml

# ✅ 加载模型路径
model_path = load_yaml()["model_path"]
offload_path = load_yaml()["offload_path"]

# ✅ 加载 tokenizer
tokenizer = LlamaTokenizer.from_pretrained(model_path)

# ✅ 加载量化模型
model = LlamaForCausalLM.from_pretrained(
    model_path,
    device_map="auto",  # 自动分配设备
    offload_folder=offload_path,
    torch_dtype=torch.float16,
)

# ✅ 加载旋转矩阵并转换为 float32
rotateMatrix = (torch.load('../1-10-1 (1).pth', weights_only=True)
                .to(torch.float16)
                .to(torch.float32)
                .to(torch.device('cuda')))


rotateMatrix = rotateMatrix.to(torch.float16)

# # ✅ 修改 down_proj 的权重和输入值
# for i, layer in enumerate(model.model.layers):
#     if i == 0:
#         print(i,'-w')



# ✅ 钩子函数：修改 down_proj 的输入值
def modify_down_proj_input(module, input):
    print('-i')
    print(len(input))
    print(input[0].shape)
    return (input[0],)


# ✅ 注册钩子到 down_proj 层
# model.model.layers[0].mlp.down_proj.register_forward_pre_hook(modify_down_proj_input)
c = 0
def second_line_input_hook(module, input, output):
    global c
    print(c)
    c += 1
    print(input[0].shape)
    print('hook')

layer_set = set()
for i, layer in enumerate(model.model.layers):
    if i == 0 and layer not in layer_set:
        # layer.mlp.down_proj.register_forward_pre_hook(modify_down_proj_input)
        layer.mlp.down_proj.register_forward_hook(second_line_input_hook)
        print("hook added")


# ✅ 准备输入数据
prompts = [
    "Artificial intelligence is transforming the way we interact with technology."
]
inputs = tokenizer(prompts, return_tensors="pt", truncation=True, max_length=128)

# ✅ 确保输入张量在正确的设备上并转换为 float16
inputs = {key: value.to(model.device) for key, value in inputs.items()}
inputs["labels"] = inputs["input_ids"]

# ✅ 禁用梯度计算，进行推理
with torch.no_grad():
    outputs = model(**inputs)

# ✅ 计算 PPL
loss = outputs.loss / inputs["input_ids"].size(1)  # 按 token 数量归一化
ppl = torch.exp(loss).item()
print(f"Perplexity: {ppl}")

loss = outputs.loss  # 按 token 数量归一化
ppl = torch.exp(loss).item()
print(f"Perplexity: {ppl}")

# # ✅ 生成文本
# with torch.no_grad():
#     generated_ids = model.generate(
#         inputs["input_ids"],
#         max_length=50
#     )
#
# # ✅ 解码生成的文本
# with torch.no_grad():
#     generated_texts = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)
#
# print("prompts:", prompts)
# # ✅ 打印生成的文本
# print("Generated text:", generated_texts)
