import streamlit as st from diffusers import AutoPipelineForText2Image import torch from PIL import Image import io import os import requests # --- CONFIG --- USE_GROQ = False # Set to True when Groq image API is available GROQ_API_URL = "https://your-groq-image-api.com/generate" # Placeholder # Force CPU os.environ["CUDA_VISIBLE_DEVICES"] = "" @st.cache_resource def load_model(): if USE_GROQ: return None # Skip local model pipe = AutoPipelineForText2Image.from_pretrained( "stabilityai/sd-turbo", torch_dtype=torch.float32 ) pipe.to("cpu") return pipe def generate_image_local(prompt, guidance_scale): pipe = load_model() result = pipe(prompt, guidance_scale=guidance_scale, num_inference_steps=20) return result.images[0] def generate_image_from_groq(prompt): response = requests.post(GROQ_API_URL, json={"prompt": prompt}) if response.status_code == 200: image_bytes = io.BytesIO(response.content) return Image.open(image_bytes) else: raise Exception(f"GROQ API failed: {response.text}") # UI st.title("🧠 AI Image Generator (Fast with API / Groq-ready)") prompt = st.text_input("Prompt:", "A glowing alien forest with floating orbs, concept art, 8K") guidance = st.slider("Guidance scale", 1.0, 10.0, 3.0) if st.button("Generate"): with st.spinner("Generating..."): try: if USE_GROQ: image = generate_image_from_groq(prompt) else: image = generate_image_local(prompt, guidance) st.image(image, caption="Generated Image", use_column_width=True) buf = io.BytesIO() image.save(buf, format="PNG") st.download_button("Download Image", buf.getvalue(), "generated.png", "image/png") except Exception as e: st.error(f"Error: {e}")