| | import os |
| | import gradio as gr |
| | from transformers import AutoTokenizer, AutoModelForCausalLM, pipeline |
| |
|
| | hf_token = os.environ["HF_TOKEN"] |
| | model_name = "melyssa08/model_collapse_generation_0" |
| |
|
| | tokenizer = AutoTokenizer.from_pretrained(model_name, token=hf_token) |
| | model = AutoModelForCausalLM.from_pretrained(model_name, token=hf_token) |
| | generator = pipeline("text-generation", model=model, tokenizer=tokenizer) |
| |
|
| | def gerar_texto(texto): |
| | result = generator(texto, max_length=50, num_return_sequences=1) |
| | return result[0]["generated_text"] |
| |
|
| | |
| | with gr.Blocks() as demo: |
| | input_text = gr.Textbox(label="Digite seu texto") |
| | output_text = gr.Textbox(label="Texto gerado") |
| | gr.Button("Gerar").click(gerar_texto, input_text, output_text) |
| |
|
| | |
| | demo.launch(server_name="0.0.0.0", server_port=7860, share=True) |
| |
|