import gradio as gr from transformers import AutoTokenizer, AutoModelForCausalLM import torch # Load the tokenizer and model tokenizer = AutoTokenizer.from_pretrained("bigscience/bloom-560m") model = AutoModelForCausalLM.from_pretrained("bigscience/bloom-560m") # Define a function to generate text def generate_text(prompt): input_ids = tokenizer.encode(prompt, return_tensors="pt") output = model.generate(input_ids, max_length=100, num_return_sequences=1) generated_text = tokenizer.decode(output[0], skip_special_tokens=True) return generated_text # Create a Gradio interface interface = gr.Interface( fn=generate_text, inputs=gr.Textbox("text", label="Digite seu texto aqui:", lines=5), # Increase lines for easier text input. outputs=gr.Textbox("text", label="Texto Gerado:") ) # Incluir texto em português para a interface interface.launch()