import gradio as gr, random, re import torch from transformers import AutoTokenizer, AutoModelForSeq2SeqLM, pipeline, set_seed tokenizer_en_es = AutoTokenizer.from_pretrained("Helsinki-NLP/opus-mt-es-en") model_en_es = AutoModelForSeq2SeqLM.from_pretrained("Helsinki-NLP/opus-mt-es-en") en_es_translator = pipeline("translation_es_to_en", model = model_en_es, tokenizer = tokenizer_en_es) gpt2_pipe = pipeline('text-generation', model='Gustavosta/MagicPrompt-Stable-Diffusion', tokenizer='gpt2') with open("ideas.txt", "r") as f: line = f.readlines() def generate(inputs): resultado = en_es_translator(inputs) starting_text = resultado[0]['translation_text'] for count in range(4): seed = random.randint(100, 1000000) set_seed(seed) if starting_text == "": starting_text: str = line[random.randrange(0, len(line))].replace("\n", "").lower().capitalize() starting_text: str = re.sub(r"[,:\-–.!;?_]", '', starting_text) print(starting_text) response = gpt2_pipe(starting_text, max_length=(len(starting_text) + random.randint(60, 90)), num_return_sequences=4) response_list = [] for x in response: resp = x['generated_text'].strip() if resp != starting_text and len(resp) > (len(starting_text) + 4) and resp.endswith((":", "-", "—")) is False: response_list.append(resp+'\n') response_end = "\n".join(response_list) response_end = re.sub('[^ ]+\.[^ ]+','', response_end) response_end = response_end.replace("<", "").replace(">", "") if response_end != "": return response_end if count == 4: return response_end txt = gr.Textbox(lines=1, label="Texto inicial", placeholder="Texto en Español") out = gr.Textbox(lines=4, label="Sugerencia generada") title = "Generador de sugerencia para Stable Diffusion (SD)" description = 'Esta es una demostración de la serie de modelos: "MagicPrompt", en este caso, dirigida a: Stable Diffusion. Para utilizarlo, simplemente envíe su texto.' article = "" gr.Interface(fn=generate, inputs=txt, outputs=out, title=title, description=description, article=article, allow_flagging='never', cache_examples=False, theme="default").launch(enable_queue=True, debug=True)