from peft import PeftModel
from transformers import AutoModelForCausalLM
from transformers import AutoTokenizer
from transformers import pipeline

base_model_name = "Qwen/Qwen2.5-0.5B"
peft_model_path = "./qwen-lora-adapter-small"

# 加载基础模型
base_model = AutoModelForCausalLM.from_pretrained(
    base_model_name,
    trust_remote_code=True
)
tokenizer = AutoTokenizer.from_pretrained(base_model_name)

# 加载LoRA适配器并与基础模型合并
model = PeftModel.from_pretrained(base_model, peft_model_path)

pipe =  pipeline(
    "text-generation",
    model=model,
    tokenizer=tokenizer
)

text = "Using the numbers [19, 36, 55, 7], create an equation that equals 65. You can use basic arithmetic operations (+, -, *, /)"
result = pipe(text)
print(result)

text = "用[19, 36, 55, 7]得到 65的数学表达式"
result = pipe(text)
print(result)

