File size: 471 Bytes
0a4a712 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 |
from peft import AutoPeftModelForCausalLM
from transformers import AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained('/home/Qwen/output_qwen')
model = AutoPeftModelForCausalLM.from_pretrained(
'/home/Qwen/output_qwen', # path to the output directory
device_map="auto",
trust_remote_code=True
).eval()
model.generation_config.top_p=0
prompt='青岛海边钓鱼需要特别注意什么?'
resp,hist=model.chat(tokenizer,prompt,history=None)
print(resp)
|