#import tensorflow as tf from transformers import GPT2LMHeadModel, GPT2Tokenizer import gradio as gr def generate_text(sentence,len): tokenizer = GPT2Tokenizer.from_pretrained("gpt2-large") model = GPT2LMHeadModel.from_pretrained("gpt2-large", pad_token_id=tokenizer.eos_token_id) input_ids = tokenizer.encode(sentence, return_tensors='pt') output = model.generate(input_ids, max_length=len, num_beams=5, no_repeat_ngram_size=2, early_stopping=True) ans=tokenizer.decode(output[0], skip_special_tokens=True) return ans #sentence = 'YouTube Title: AI learns to' #input_ids = tokenizer.encode(sentence, return_tensors='pt') iface = gr.Interface( fn=generate_text, # Replace with your actual function inputs=[ gr.inputs.Textbox(lines=2, placeholder="Enter text here..."), gr.inputs.Slider(minimum=10, maximum=1000, step=10, default=100, label="Max length of output") ], outputs=gr.outputs.Textbox(), title="Blog Post Generation App", description="Enter some text and see the generated output.", ) iface.launch()