from transformers import LlamaForCausalLM, LlamaTokenizer
import torch
from transformers import BitsAndBytesConfig
from utils.model_path_getter import load_yaml

# 加载模型路径
model_path = load_yaml()["model_path"]


# 创建量化配置
quant_config = BitsAndBytesConfig(
    load_in_4bit=True,  # 设置为 4-bit 量化
    bnb_4bit_use_double_quant=True,
    bnb_4bit_quant_type="nf4",  # 使用 nf4 格式
    bnb_4bit_compute_dtype=torch.float16
)

# 加载 tokenizer
tokenizer = LlamaTokenizer.from_pretrained(model_path)

# 加载量化模型
model = LlamaForCausalLM.from_pretrained(
    model_path,
    # quantization_config=quant_config,
    device_map="auto",  # 自动分配设备

)



second_line_input = []
def down_proj_hook(module, input, output):
    second_line_input.append(input[0].detach().cpu())
    print("Hook added.")

for i, layer in enumerate(model.model.layers):
    if i == 0:
        layer.mlp.down_proj.register_forward_hook(down_proj_hook)


model = torch.quantization.quantize_dynamic(
    model,  # 模型
    {torch.nn.Linear},  # 只对 Linear 层进行量化
    dtype=torch.qint8  # 激活值使用 8-bit 量化
)

prompts = ["你好"]

inputs = tokenizer(prompts, return_tensors="pt")
inputs = {key: value.to(model.device) for key, value in inputs.items()}

with torch.no_grad():
    outputs = model(**inputs)

if not second_line_input:
    raise ValueError("Hook did not capture any data.")

print(len(second_line_input))
print(second_line_input[0].shape)
print(second_line_input[0].dtype)

# 打印结果
# print(tokenizer.decode(outputs[0], skip_special_tokens=True))
