buffalo / app.py
raoufjat's picture
Update app.py
c4d5c18 verified
raw
history blame
510 Bytes
from transformers import AutoModelForCausalLM, AutoTokenizer
# Load tokenizer and model manually
tokenizer = AutoTokenizer.from_pretrained("Omartificial-Intelligence-Space/Arabic-QWQ-32B-Preview")
model = AutoModelForCausalLM.from_pretrained("Omartificial-Intelligence-Space/Arabic-QWQ-32B-Preview")
# Manually perform text generation
inputs = tokenizer("Generate text example:", return_tensors="pt")
outputs = model.generate(**inputs, max_length=50)
# Decode the output
print(tokenizer.decode(outputs[0]))