Spaces:
Sleeping
Sleeping
| from transformers import pipeline | |
| import gradio as gr | |
| # Load model safely | |
| try: | |
| model = pipeline("text-generation", model="ButteredToast/ToastBot") | |
| except Exception as e: | |
| print(f"Error loading model: {e}") | |
| model = None # Prevent crashes | |
| # Define text generation function | |
| def generate_text(input_text): | |
| if model is None: | |
| return "Error: Model could not be loaded. Check your Hugging Face space logs." | |
| try: | |
| output = model(input_text, max_length=30, truncation=True) # Explicit truncation added | |
| return output[0]["generated_text"] | |
| except Exception as e: | |
| return f"Error generating text: {e}" | |
| # Set up Gradio interface with API enabled | |
| demo = gr.Interface( | |
| fn=generate_text, | |
| inputs="text", | |
| outputs="text", | |
| title="ToastBot AI", | |
| description="Chat with ToastBot, your AI assistant!" | |
| ) | |
| # Enable API mode for external requests | |
| demo.launch(share=True) | |