from transformers import AutoModelForCausalLM, AutoTokenizer
from monitor_tools.host_monitor import cpu_monitor

@cpu_monitor(interval=0.1, filename='log/torch_eager_q_cpu_usage.log')
def main():
    model_name = "/home/kylin/gjl/model/Qwen3-8B-GPTQ-Int4"

    tokenizer = AutoTokenizer.from_pretrained(model_name, trust_remote_code=True)
    model = AutoModelForCausalLM.from_pretrained(
        model_name,
        device_map="cpu",       # 自动分配 GPU/CPU
        trust_remote_code=True
    )

    inputs = tokenizer("给我讲一个2000字的故事。\n", return_tensors="pt").to(model.device)
    outputs = model.generate(**inputs, max_new_tokens=2000, temperature=0.7,
            top_p=0.9)
    print(tokenizer.decode(outputs[0]))

main()
