from transformers import AutoModelForCausalLM, AutoTokenizer
from peft import PeftModel


# 加载基础模型和 tokenizer
model_name_or_path = '/changzheng/PracticalTraining/lyj/lingdongai/models/model/Qwen/Qwen2___5-7B-Instruct'
tokenizer = AutoTokenizer.from_pretrained(model_name_or_path)
model = AutoModelForCausalLM.from_pretrained(model_name_or_path)

# 加载微调后的 LoRA 适配器
peft_model = PeftModel.from_pretrained(model, "/path/to/new_model")

# 使用模型进行推理或评估
input_text = "Your input here"
inputs = tokenizer(input_text, return_tensors="pt")
outputs = peft_model.generate(**inputs)

print(tokenizer.decode(outputs[0], skip_special_tokens=True))