from transformers import LlamaForCausalLM, LlamaTokenizer
import torch
from utils.model_path_getter import load_yaml

# 加载模型路径
model_path = load_yaml()["model_path"]
offload_path = load_yaml()["offload_path"]

# 加载 tokenizer 和模型
tokenizer = LlamaTokenizer.from_pretrained(model_path)
model = LlamaForCausalLM.from_pretrained(
    model_path,
    device_map="auto",
    offload_folder=offload_path,
    torch_dtype=torch.float16,
)

# 准备输入数据
prompts = ["This is a test sentence."]
inputs = tokenizer(prompts, return_tensors="pt", truncation=True)

# 确保输入张量在正确的设备上
inputs = {key: value.to(model.device) for key, value in inputs.items()}

inputs["labels"] = inputs["input_ids"]


# 禁用梯度计算，计算 PPL
with torch.no_grad():
    outputs = model(**inputs)
    loss = outputs.loss  # 交叉熵损失
    # print(loss.keys())
    ppl = torch.exp(loss).item()  # 使用 exp 计算 PPL

# 打印 PPL
print(f"Perplexity: {ppl}")
