Spaces:
Sleeping
Sleeping
| # app.py | |
| import io | |
| import base64 | |
| from fastapi import FastAPI | |
| from pydantic import BaseModel | |
| from diffusers import StableDiffusionPipeline | |
| from PIL import Image | |
| import torch | |
| app = FastAPI() | |
| # Load model on CPU | |
| pipe = StableDiffusionPipeline.from_pretrained( | |
| "SG161222/Realistic_Vision_V5.1_noVAE", | |
| torch_dtype=torch.float32, # use float32 for CPU | |
| safety_checker=None, | |
| use_safetensors=True | |
| ).to("cpu") # switched from "cuda" to "cpu" | |
| class PromptRequest(BaseModel): | |
| prompt: str | |
| negative_prompt: str = "" | |
| width: int = 512 | |
| height: int = 512 | |
| def generate_image(data: PromptRequest): | |
| image = pipe( | |
| prompt=data.prompt, | |
| negative_prompt=data.negative_prompt, | |
| width=data.width, | |
| height=data.height | |
| ).images[0] | |
| buffered = io.BytesIO() | |
| image.save(buffered, format="PNG") | |
| img_str = base64.b64encode(buffered.getvalue()).decode("utf-8") | |
| return {"image_base64": img_str} | |
| def root(): | |
| return {"message": "Image generation API running on CPU!"} | |