import gradio as gr import random from transformers import AutoTokenizer, AutoModelForSeq2SeqLM model_name = 'MR1B4RR4/Spanish_lyrics_model' tokenizer = AutoTokenizer.from_pretrained(model_name) model = AutoModelForSeq2SeqLM.from_pretrained(model_name) def poema(text, num_lines): num_lines=num_lines poem = text prev_output = '' for i in range(num_lines): input_text = f"""{poem}""" inputs = tokenizer(input_text, return_tensors="pt") outputs = model.generate(inputs["input_ids"], do_sample = True, max_length = 30, repetition_penalty = 20.0, top_k = 50, top_p = 0.92) detok_outputs = [tokenizer.decode(x, skip_special_tokens=True) for x in outputs] pre_output = detok_outputs[0] poem += '\n' + pre_output return poem iface = gr.Interface( fn=poema, title='Generation of Spanish poems', description=""" Descripcion........ """, theme='huggingface', inputs= [ gr.inputs.Textbox(lines=4, placeholder='texto inicial', label='Texto inicial'), gr.inputs.Textbox(lines=4, placeholder='Numero de lineas', label='Numero de lineas') ], outputs= [ gr.outputs.Textbox(label="Texto generado"), # gr.outputs.Audio(label="Primeros segundos") ]) iface.launch(enable_queue=True)