File size: 342 Bytes
a604738
 
2c4e93b
 
a604738
fff417b
1
2
3
4
5
6
from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline

model = AutoModelForCausalLM.from_pretrained("Qwen/Qwen1.5-1.8B-Chat")
tokenizer = AutoTokenizer.from_pretrained("Qwen/Qwen1.5-1.8B-Chat")

pipe = pipeline("text-generation", model=model, tokenizer=tokenizer, max_new_tokens=1024, repetition_penalty=1.2, temperature=0.4)