from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline model = AutoModelForCausalLM.from_pretrained("allenai/OLMo-1B-hf") tokenizer = AutoTokenizer.from_pretrained("allenai/OLMo-1B-hf") pipe = pipeline("text-generation", model=model, tokenizer=tokenizer, max_new_tokens=1024, repetition_penalty=1.2, temperature=0.4)