File size: 883 Bytes
0622b2f
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
7e78132
 
0622b2f
 
7e78132
9add10d
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
import gradio as gr
from transformers import AutoTokenizer, AutoModelForCausalLM
import torch

# Load the tokenizer and model
tokenizer = AutoTokenizer.from_pretrained("bigscience/bloom-560m")
model = AutoModelForCausalLM.from_pretrained("bigscience/bloom-560m")

# Define a function to generate text
def generate_text(prompt):
    input_ids = tokenizer.encode(prompt, return_tensors="pt")
    output = model.generate(input_ids, max_length=100, num_return_sequences=1)
    generated_text = tokenizer.decode(output[0], skip_special_tokens=True)
    return generated_text

# Create a Gradio interface
interface = gr.Interface(
    fn=generate_text,
    inputs=gr.Textbox("text", label="Digite seu texto aqui:", lines=5),  # Increase lines for easier text input.
    outputs=gr.Textbox("text", label="Texto Gerado:")
)

# Incluir texto em português para a interface
interface.launch()