from transformers import BloomForCausalLM, AutoTokenizer import gradio as gr # Loading and setting up GPT2 Open AI Transformer Model model_path = './model_path_BLOOM_vf' model = BloomForCausalLM.from_pretrained(model_path) tokenizer = AutoTokenizer.from_pretrained(model_path) def generate_text(inp): # Encoding the starting point of the sentence we want to predict input_data = tokenizer.encode(inp, return_tensors='pt') # Generating Output String output = model.generate( input_data, max_length= 100, do_sample=True, top_k=0, top_p=0.92, ) return tokenizer.decode(output[0], skip_special_tokens=True) textbox = gr.Textbox(label="Introduce una o más palabras para generar el texto:", placeholder="Por ejemplo: Los jóvenes", lines=1) gr.Interface(fn=generate_text, inputs= textbox, outputs="text", examples=[ ["Los jóvenes"], ["La economía ha"], ["Los conservadores"], ["Hemos trabajado para"], ["El crimen organizado"], ], ).launch()