| | import streamlit as st |
| | from PIL import Image |
| | from io import BytesIO |
| | import base64 |
| | from diffusers import StableDiffusionPipeline |
| | import torch |
| |
|
| | |
| | model_id = "stabilityai/stable-diffusion-3-medium" |
| | pipe = StableDiffusionPipeline.from_pretrained(model_id, torch_dtype=torch.float32) |
| | pipe.to("cpu") |
| |
|
| | def generate_image(prompt, negative_prompt=None, temperature=1.0, steps=50, image_size=(512, 512)): |
| | |
| | image = pipe(prompt, negative_prompt=negative_prompt, num_inference_steps=steps, guidance_scale=temperature).images[0] |
| | |
| | image = image.resize(image_size) |
| | |
| | |
| | buffered = BytesIO() |
| | image.save(buffered, format="PNG") |
| | img_str = base64.b64encode(buffered.getvalue()).decode('utf-8') |
| | return img_str |
| |
|
| | def main(): |
| | st.title("Stable Diffusion Image Generation API") |
| | st.write("Generate images using Stable Diffusion and get them in base64 format.") |
| | |
| | |
| | query_params = st.experimental_get_query_params() |
| | prompt = query_params.get("prompt", [""])[0] |
| | negative_prompt = query_params.get("negative_prompt", [None])[0] |
| | temperature = float(query_params.get("temperature", [1.0])[0]) |
| | steps = int(query_params.get("steps", [50])[0]) |
| | image_size = tuple(map(int, query_params.get("image_size", ["512,512"])[0].split(","))) |
| | |
| | if prompt: |
| | st.write("Generating image with parameters:") |
| | st.write(f"Prompt: {prompt}") |
| | st.write(f"Negative Prompt: {negative_prompt}") |
| | st.write(f"Temperature: {temperature}") |
| | st.write(f"Steps: {steps}") |
| | st.write(f"Image Size: {image_size}") |
| | |
| | |
| | img_base64 = generate_image(prompt, negative_prompt, temperature, steps, image_size) |
| | |
| | |
| | st.image(f"data:image/png;base64,{img_base64}", caption="Generated Image") |
| | |
| | |
| | st.text_area("Base64 Image String", value=img_base64, height=200) |
| | |
| | if __name__ == "__main__": |
| | main() |