# -*- coding: utf-8 -*-
# time: 2025/4/10 15:29
# file: 直接使用Transformers_test.py
# author: hanson
from transformers import AutoModelForCausalLM, AutoTokenizer

model = AutoModelForCausalLM.from_pretrained(
    "./qwen2-0.5b-finetuned",
    device_map="auto",
    trust_remote_code=True
)
tokenizer = AutoTokenizer.from_pretrained("./qwen2-0.5b-finetuned", trust_remote_code=True)

inputs = tokenizer("Instruction: 保持健康的三个提示。用中文\nInput: 用通俗语言\nOutput:", return_tensors="pt")
outputs = model.generate(**inputs, max_new_tokens=200)
print(tokenizer.decode(outputs[0], skip_special_tokens=True))