|
from transformers import BloomForCausalLM, AutoTokenizer |
|
import gradio as gr |
|
|
|
|
|
model_path = './model_path_BLOOM_vf' |
|
model = BloomForCausalLM.from_pretrained(model_path) |
|
tokenizer = AutoTokenizer.from_pretrained(model_path) |
|
|
|
def generate_text(inp): |
|
|
|
input_data = tokenizer.encode(inp, return_tensors='pt') |
|
|
|
|
|
output = model.generate( |
|
input_data, |
|
max_length= 100, |
|
do_sample=True, |
|
top_k=0, |
|
top_p=0.92, |
|
) |
|
return tokenizer.decode(output[0], skip_special_tokens=True) |
|
|
|
textbox = gr.Textbox(label="Introduce una o m谩s palabras para generar el texto:", placeholder="Por ejemplo: Los j贸venes", lines=1) |
|
|
|
gr.Interface(fn=generate_text, |
|
inputs= textbox, |
|
outputs="text", |
|
examples=[ |
|
["Los j贸venes"], |
|
["La econom铆a ha"], |
|
["Los conservadores"], |
|
["Hemos trabajado para"], |
|
["El crimen organizado"], |
|
], |
|
).launch() |