from transformers import AutoTokenizer, AutoModelForCausalLM,pipeline import gradio as gr tokenizer = AutoTokenizer.from_pretrained("distilgpt2") model = AutoModelForCausalLM.from_pretrained("distilgpt2") gen_pipeline = pipeline(task = "text-generation", model=model, tokenizer=tokenizer) get_generated_text = lambda x: gen_pipeline(x,max_length = 200)[0]['generated_text'] demo = gr.Interface( fn = get_generated_text , inputs = gr.Textbox(label = 'Enter A series of Text and Generate more',lines = 2, max_lines = 5), outputs = 'text', ) demo.launch()