import torch
from transformers import AutoTokenizer, AutoModelForCausalLM
from monitor_tools.device_monitor import gpu_monitor

@gpu_monitor(interval=0.1, filename='log/torch_compile_gpu_usage.log')
def main():
    # 本地模型路径
    model_path = "/home/kylin/gjl/model/qwen2.5-3b-instruct"

    # 加载 tokenizer 和模型
    tokenizer = AutoTokenizer.from_pretrained(model_path, trust_remote_code=True)
    model = AutoModelForCausalLM.from_pretrained(
        model_path,
        torch_dtype=torch.float16,   # 半精度，减少显存占用
        device_map="auto"
    )

    # 使用 torch.compile 加速
    # 注意：mode 可以是 "default" / "reduce-overhead" / "max-autotune"
    model = torch.compile(model, mode="max-autotune")

    # 输入 prompt
    prompt = "给我讲一个2000字的故事。\n"
    inputs = tokenizer(prompt, return_tensors="pt").to(model.device)

    # 推理
    with torch.inference_mode():
        outputs = model.generate(
            **inputs,
            max_new_tokens=2000,
            temperature=0.7,
            top_p=0.9
        )

    # 解码输出
    print(tokenizer.decode(outputs[0], skip_special_tokens=True))

main()