import gradio as gr from transformers import pipeline, GPT2LMHeadModel, GPT2Tokenizer import os # Define your model details HF_TOKEN = os.getenv("HF_TOKEN") model_name = "Jonathanmann/GPT2-medium-SADnov21" # Load tokenizer and model from Hugging Face with error handling try: tokenizer = GPT2Tokenizer.from_pretrained(model_name, use_auth_token=HF_TOKEN) model = GPT2LMHeadModel.from_pretrained(model_name, use_auth_token=HF_TOKEN) except Exception as e: raise RuntimeError(f"Error loading model or tokenizer: {e}") # Define the text generation pipeline with error handling try: generator = pipeline("text-generation", model=model, tokenizer=tokenizer) except Exception as e: raise RuntimeError(f"Error creating text generation pipeline: {e}") # Define a function for generating text with error handling def generate_text(prompt, max_length, temperature, top_k, top_p): try: response = generator( prompt, max_length=max_length, temperature=temperature, top_k=top_k, top_p=top_p, num_return_sequences=1 ) return response[0]["generated_text"] except Exception as e: return f"An error occurred during text generation: {str(e)}" # Create the Gradio interface demo = gr.Interface( fn=generate_text, inputs=[ gr.Textbox(lines=2, placeholder="Enter your prompt here..."), gr.Slider(minimum=20, maximum=200, value=50, label="Max Length"), gr.Slider(minimum=0.1, maximum=1.0, value=0.7, label="Temperature"), gr.Slider(minimum=1, maximum=100, value=50, label="Top-k"), gr.Slider(minimum=0.1, maximum=1.0, value=0.9, label="Top-p") ], outputs="text", title="GPT-2 Text Generation", description="A demo of Jonathanmann/GPT2-medium-SADnov21 with adjustable generation parameters." ) # Launch the app with error handling try: demo.launch() except Exception as e: print(f"An error occurred while launching the Gradio interface: {str(e)}")