from transformers import GPT2Tokenizer, TFGPT2LMHeadModel, pipeline import gradio as gr model = TFGPT2LMHeadModel.from_pretrained("egosumkira/gpt2-fantasy") tokenizer = GPT2Tokenizer.from_pretrained("gpt2") story = pipeline( "text-generation", model=model, tokenizer=tokenizer, device=0 ) def generate(tags_text, temp=1.0, n_beams=3): tags = tags_text.split(", ") prefix = f"~^{'^'.join(tags)}~@" g_text = story(prefix, temperature=1.0, repetition_penalty=7.0, num_beams=3)[0]['generated_text'] return g_text[g_text.find("@") + 1:] title = "GPT2 fantasy story generator" description = "This model can generate short fantasy story based on set of keywords and (optional) start of the text." title = "GPT2 fantasy story generator" description = "This model can generate short fantasy story based on set of keywords and (optional) start of the text." iface = gr.Interface(generate, inputs = [ gr.Textbox(label="Keywords (comma separated)"), gr.inputs.Slider(0, 2, default=1.0, step=0.05, label="Temperature"), gr.inputs.Slider(1, 10, default=3, label="Number of beams", step=1) ], outputs = gr.Textbox(label="Output"), title=title, description=description ) iface.queue() iface.launch()