import torch import gradio as gr from transformers import GPT2Tokenizer, GPT2LMHeadModel # Load pre-trained model and tokenizer tokenizer = GPT2Tokenizer.from_pretrained("gpt2") model = GPT2LMHeadModel.from_pretrained("gpt2") # Define a function to generate text def generate_text(input_text): input_ids = tokenizer.encode(input_text, return_tensors="pt") # Generate text with torch.no_grad(): output = model.generate(input_ids, max_length=100, no_repeat_ngram_size=True, do_sample=True, temperature=0.9) # Decode the generated text generated_text = tokenizer.decode(output[0], skip_special_tokens=True) return generated_text # Create a Gradio interface # Create a Gradio interface iface = gr.Interface( generate_text, gr.Textbox(lines=5, label="Input Text"), "textbox", examples=[ ["Once upon a time, in a land far, far away..."] ], title="GPT-2 Text Generation", description="Enter some text and GPT-2 will generate more!", theme="compact" ) iface.launch()