flux-lightning / app.py
Jordan Legg
mixed precision
242b4ef
raw
history blame
2.21 kB
import gradio as gr
import numpy as np
import random
import spaces
import torch
from diffusers import FluxPipeline
# Enable cuDNN benchmarking for potential performance improvement
torch.backends.cudnn.benchmark = True
# Set up device and data types
device = "cuda" if torch.cuda.is_available() else "cpu"
DTYPE = torch.float16
# Load the model
pipe = FluxPipeline.from_pretrained(
"black-forest-labs/FLUX.1-schnell",
torch_dtype=torch.bfloat16,
)
# Configure the pipeline
pipe.enable_sequential_cpu_offload()
pipe.vae.enable_tiling()
pipe = pipe.to(DTYPE)
MAX_SEED = np.iinfo(np.int32).max
MAX_IMAGE_SIZE = 2048
@spaces.GPU()
def infer(prompt, seed=42, randomize_seed=False, width=1024, height=1024, num_inference_steps=4, progress=gr.Progress(track_tqdm=True)):
if randomize_seed:
seed = random.randint(0, MAX_SEED)
generator = torch.Generator(device=device).manual_seed(seed)
image = pipe(
prompt,
num_inference_steps=num_inference_steps,
num_images_per_prompt=1,
guidance_scale=0.0,
height=height,
width=width,
generator=generator,
).images[0]
return image, seed
# Gradio interface
with gr.Blocks() as demo:
gr.Markdown("# FLUX.1 [schnell] Image Generator")
with gr.Row():
with gr.Column():
prompt = gr.Textbox(label="Prompt")
run_button = gr.Button("Generate")
with gr.Column():
result = gr.Image(label="Generated Image")
with gr.Accordion("Advanced Settings", open=False):
seed = gr.Slider(minimum=0, maximum=MAX_SEED, step=1, label="Seed", randomize=True)
randomize_seed = gr.Checkbox(label="Randomize seed", value=True)
width = gr.Slider(minimum=256, maximum=MAX_IMAGE_SIZE, step=32, value=1024, label="Width")
height = gr.Slider(minimum=256, maximum=MAX_IMAGE_SIZE, step=32, value=1024, label="Height")
num_inference_steps = gr.Slider(minimum=1, maximum=50, step=1, value=4, label="Number of inference steps")
run_button.click(
infer,
inputs=[prompt, seed, randomize_seed, width, height, num_inference_steps],
outputs=[result, seed]
)
demo.launch()