Spaces:
Sleeping
Sleeping
import torch | |
from transformers import AutoTokenizer, AutoModel, BloomTokenizerFast,BloomForCausalLM | |
import gradio as gr | |
modelo = 'bigscience/bloom-1b7' | |
tokenizer = AutoTokenizer.from_pretrained(modelo) | |
model = BloomForCausalLM.from_pretrained(modelo) | |
def generator(prompt,max_length, temp): | |
input_ids = tokenizer(prompt, return_tensors="pt").input_ids | |
gen_tokens = model.generate( | |
input_ids, | |
do_sample=True, | |
temperature=temp, | |
max_length=max_length, | |
) | |
gen_text = tokenizer.batch_decode(gen_tokens)[0] | |
return gen_text | |
def run(prompt, max_len, temp): | |
min_len = 1 | |
output = generator(prompt,max_len, temp) | |
print(output) | |
return (output,"") | |
if __name__ == "__main__": | |
demo = gr.Blocks() | |
with demo: | |
gr.Markdown(modelo) | |
with gr.Row(): | |
with gr.Column(): | |
text = gr.Textbox( | |
label="Input", | |
value=" ", # should be set to " " when plugged into a real API | |
) | |
tokens = gr.Slider(1, 250, value=50, step=1, label="Tokens to generate") | |
temp = gr.Slider(0.1, 1.0, value=0.7, step=0.1, label="Temperature") | |
with gr.Row(): | |
submit = gr.Button("Submit") | |
with gr.Column(): | |
text_error = gr.Markdown(label="Log information") | |
text_out = gr.Textbox(label="Output") | |
submit.click( | |
run, | |
inputs=[text, tokens, temp], | |
outputs=[text_out, text_error], | |
) | |
demo.launch() |