from transformers import AutoTokenizer, AutoModelForCausalLM import gradio as gr tokenizer = AutoTokenizer.from_pretrained("distilgpt2") model = AutoModelForCausalLM.from_pretrained("distilgpt2") gen_pipeline = pipeline(task = "text-generation", model=model, tokenizer=tokenizer) get_generated_text = lambda x: gen_pipeline(x)[0]['generated_text'] demo = gr.Interface( inputs = gr.TextBox(label = 'Enter A series of Text and Generate more',lines = 2), outputs = get_generated_text, ) demo.launch()