from transformers import AutoTokenizer, AutoModelForCausalLM
import torch
from monitor_tools.device_monitor import gpu_monitor


@gpu_monitor(interval=0.1, filename='log/torch_gpu_usage.log')
def main():
    # 模型路径（改成你本地下载的路径）
    model_path = "/home/kylin/gjl/model/qwen2.5-3b-instruct"

    # 加载 tokenizer 和模型
    tokenizer = AutoTokenizer.from_pretrained(model_path)
    model = AutoModelForCausalLM.from_pretrained(
        model_path,
        torch_dtype=torch.float16,   # 如果显存不够可以改成 torch.float32 或 bfloat16
        device_map="auto"
    )

    # 输入 prompt
    prompt = "给我讲一个2000字的故事。\n"
    inputs = tokenizer(prompt, return_tensors="pt").to(model.device)

    # 推理
    outputs = model.generate(
        **inputs,
        max_new_tokens=2000,
        temperature=0.7,
        top_p=0.9
    )

    # 解码输出
    print(tokenizer.decode(outputs[0], skip_special_tokens=True))

main()