Spaces:
Running
Running
| import gradio as gr | |
| from diffusers import StableDiffusionPipeline | |
| import torch | |
| import os | |
| # Retrieve Hugging Face API key from environment variables (Hugging Face Secrets) | |
| hf_token = os.getenv("HF_TOKEN") | |
| # Load the Stable Diffusion model in CPU mode | |
| pipe = StableDiffusionPipeline.from_pretrained( | |
| "stabilityai/stable-diffusion-2", | |
| torch_dtype=torch.float32, # Ensure compatibility with CPU | |
| use_auth_token=hf_token | |
| ) | |
| # Force model to run on CPU | |
| pipe.to("cpu") | |
| # Optimize performance for CPU execution | |
| pipe.enable_attention_slicing() | |
| # Define image generation function | |
| def generate_image(prompt): | |
| try: | |
| image = pipe(prompt).images[0] | |
| return image | |
| except Exception as e: | |
| return f"Error: {str(e)}" | |
| # Gradio Interface | |
| iface = gr.Interface( | |
| fn=generate_image, | |
| inputs=gr.Textbox(label="Enter your prompt"), | |
| outputs=gr.Image(label="Generated Image"), | |
| title="Stable Diffusion (CPU Optimized)", | |
| description="Generate AI-generated images using Stable Diffusion on CPU. No GPU required!" | |
| ) | |
| if __name__ == "__main__": | |
| iface.launch(share=True) |