import torch
from transformers import AutoTokenizer
from modeling_qwen2 import Qwen2ForCausalLM

MODEL_PATH = "/share/models/Qwen2.5-3B-Instruct/"
model = Qwen2ForCausalLM.from_pretrained(
    MODEL_PATH,
    torch_dtype=torch.float16,
    device_map="cuda:0",
    attn_implementation="sdpa",
)
tokenizer = AutoTokenizer.from_pretrained(MODEL_PATH)
model = model.eval()

context = "<|User|>写一个才子佳人的故事。<|/User|>\n\n<|Assistant|>"
input_ids = tokenizer(context, return_tensors="pt").input_ids
input_ids = input_ids.to(model.device)
output = model.generate(
    input_ids,
    max_new_tokens=512,
    temperature=0.9,
    top_k=50,
    top_p=0.95,
)[0]

output_text = tokenizer.decode(output[input_ids.shape[-1] :], skip_special_tokens=True)
print(output_text)
