Spaces:
Sleeping
Sleeping
import streamlit as st | |
from transformers import BloomTokenizerFast, BloomForCausalLM, pipeline | |
text="اكتب مقالا من عدة أسطر عن الذكاء الصناعي وتطوراته" | |
prompt = f'Instruction:\n{text}\n\nResponse:' | |
model = BloomForCausalLM.from_pretrained('Naseej/noon-7b') | |
tokenizer = BloomTokenizerFast.from_pretrained('Naseej/noon-7b') | |
generation_pipeline = pipeline("text-generation", model=model, tokenizer=tokenizer) | |
# We recommend the provided hyperparameters for generation | |
# But encourage you to try different values | |
response = generation_pipeline(prompt, | |
pad_token_id=tokenizer.eos_token_id, | |
do_sample=False, | |
num_beams=4, | |
max_length=500, | |
top_p=0.1, | |
top_k=20, | |
repetition_penalty = 3.0, | |
no_repeat_ngram_size=3)[0]['generated_text'] | |
# print(response) | |
st.write(response) |