Spaces:
Running
Running
Update app.py
Browse files
app.py
CHANGED
|
@@ -1,22 +1,13 @@
|
|
| 1 |
import gradio as gr
|
| 2 |
-
import torch
|
| 3 |
-
from diffusers import StableDiffusionPipeline
|
| 4 |
import random
|
| 5 |
import numpy as np
|
|
|
|
|
|
|
| 6 |
|
| 7 |
-
# 1.
|
| 8 |
-
#
|
| 9 |
-
|
| 10 |
-
|
| 11 |
-
# Use float32 because CPU doesn't support float16 well
|
| 12 |
-
pipe = StableDiffusionPipeline.from_pretrained(
|
| 13 |
-
model_id,
|
| 14 |
-
torch_dtype=torch.float32
|
| 15 |
-
)
|
| 16 |
-
pipe = pipe.to("cpu")
|
| 17 |
-
|
| 18 |
-
# Optimize for CPU speed
|
| 19 |
-
pipe.set_progress_bar_config(disable=True)
|
| 20 |
|
| 21 |
MAX_SEED = np.iinfo(np.int32).max
|
| 22 |
|
|
@@ -24,33 +15,43 @@ def infer(prompt, seed, randomize_seed, width, height):
|
|
| 24 |
if randomize_seed:
|
| 25 |
seed = random.randint(0, MAX_SEED)
|
| 26 |
|
| 27 |
-
|
| 28 |
-
|
| 29 |
-
|
| 30 |
-
|
| 31 |
-
|
| 32 |
-
|
| 33 |
-
|
| 34 |
-
|
| 35 |
-
|
| 36 |
-
|
| 37 |
-
|
| 38 |
-
|
| 39 |
-
|
|
|
|
| 40 |
|
| 41 |
# Simple UI
|
| 42 |
-
with gr.Blocks() as demo:
|
| 43 |
-
gr.Markdown("# CodeIgnite
|
|
|
|
|
|
|
| 44 |
with gr.Column():
|
| 45 |
-
|
| 46 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 47 |
result = gr.Image(label="Result")
|
| 48 |
|
| 49 |
-
with gr.Accordion("Settings", open=False):
|
| 50 |
seed = gr.Slider(label="Seed", minimum=0, maximum=MAX_SEED, step=1, value=0)
|
| 51 |
randomize_seed = gr.Checkbox(label="Randomize seed", value=True)
|
| 52 |
-
|
| 53 |
-
|
|
|
|
| 54 |
|
| 55 |
run_button.click(
|
| 56 |
fn=infer,
|
|
|
|
| 1 |
import gradio as gr
|
|
|
|
|
|
|
| 2 |
import random
|
| 3 |
import numpy as np
|
| 4 |
+
import os
|
| 5 |
+
from huggingface_hub import InferenceClient
|
| 6 |
|
| 7 |
+
# 1. Initialize the Client
|
| 8 |
+
# If you are running this IN a Hugging Face Space, it will automatically
|
| 9 |
+
# find your token if you add it to 'Settings > Variables and Secrets' as HF_TOKEN
|
| 10 |
+
client = InferenceClient("black-forest-labs/FLUX.1-schnell")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 11 |
|
| 12 |
MAX_SEED = np.iinfo(np.int32).max
|
| 13 |
|
|
|
|
| 15 |
if randomize_seed:
|
| 16 |
seed = random.randint(0, MAX_SEED)
|
| 17 |
|
| 18 |
+
# FLUX.1-schnell is optimized for 4 steps.
|
| 19 |
+
# The Inference Client handles the generator/torch logic server-side.
|
| 20 |
+
try:
|
| 21 |
+
image = client.text_to_image(
|
| 22 |
+
prompt,
|
| 23 |
+
width=width,
|
| 24 |
+
height=height,
|
| 25 |
+
num_inference_steps=4, # Schnell specifically needs only 4 steps
|
| 26 |
+
guidance_scale=0.0, # Schnell usually ignores guidance or prefers 0.0
|
| 27 |
+
seed=seed
|
| 28 |
+
)
|
| 29 |
+
return image, seed
|
| 30 |
+
except Exception as e:
|
| 31 |
+
raise gr.Error(f"Generation failed: {e}")
|
| 32 |
|
| 33 |
# Simple UI
|
| 34 |
+
with gr.Blocks(theme=gr.themes.Soft()) as demo:
|
| 35 |
+
gr.Markdown("# 🚀 CodeIgnite FLUX Engine")
|
| 36 |
+
gr.Markdown("Using `FLUX.1-schnell` via Inference API for lightning-fast results.")
|
| 37 |
+
|
| 38 |
with gr.Column():
|
| 39 |
+
with gr.Row():
|
| 40 |
+
prompt = gr.Textbox(
|
| 41 |
+
label="Prompt",
|
| 42 |
+
placeholder="A futuristic cyberpunk city...",
|
| 43 |
+
scale=4
|
| 44 |
+
)
|
| 45 |
+
run_button = gr.Button("Generate", variant="primary")
|
| 46 |
+
|
| 47 |
result = gr.Image(label="Result")
|
| 48 |
|
| 49 |
+
with gr.Accordion("Advanced Settings", open=False):
|
| 50 |
seed = gr.Slider(label="Seed", minimum=0, maximum=MAX_SEED, step=1, value=0)
|
| 51 |
randomize_seed = gr.Checkbox(label="Randomize seed", value=True)
|
| 52 |
+
# FLUX works best at 1024x1024, but 512-1024 is safe for API
|
| 53 |
+
width = gr.Slider(label="Width", minimum=256, maximum=1024, step=32, value=1024)
|
| 54 |
+
height = gr.Slider(label="Height", minimum=256, maximum=1024, step=32, value=1024)
|
| 55 |
|
| 56 |
run_button.click(
|
| 57 |
fn=infer,
|