Spaces:
Running
on
Zero
Running
on
Zero
File size: 5,665 Bytes
3210048 5193654 3210048 98cede9 f9694e5 3210048 5193654 a9967ad 3210048 5193654 444510e 94e2acf 444510e a18a8e9 5193654 f9694e5 5193654 b0b7bea 5193654 a18a8e9 fd9cbdf 5193654 b0b7bea f9694e5 3210048 e0123d5 f9694e5 e0123d5 f9694e5 5193654 c3e1273 5193654 3210048 5193654 f9694e5 5193654 c3e1273 5193654 c3e1273 5193654 f9694e5 5193654 f9694e5 5193654 f9694e5 5193654 f9694e5 5193654 f9694e5 5193654 f9694e5 5193654 f9694e5 5193654 f9694e5 5193654 f9694e5 5193654 3210048 f9694e5 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 |
import gradio as gr
import numpy as np
import spaces
import torch
import spaces
import random
from diffusers import FluxFillPipeline
from PIL import Image
MAX_SEED = np.iinfo(np.int32).max
MAX_IMAGE_SIZE = 2048
pipe = FluxFillPipeline.from_pretrained("black-forest-labs/FLUX.1-Fill-dev", torch_dtype=torch.bfloat16).to("cuda")
# pipe.load_lora_weights("alvdansen/flux-koda")
# pipe.enable_sequential_cpu_offload()
# pipe.enable_fp16()
# pipe.enable_lora()
# pipe.vae.enable_slicing()
# pipe.vae.enable_tiling()
def calculate_optimal_dimensions(image: Image.Image):
# Extract the original dimensions
original_width, original_height = image.size
# Set constants
MIN_ASPECT_RATIO = 9 / 16
MAX_ASPECT_RATIO = 16 / 9
FIXED_DIMENSION = 1024
# Calculate the aspect ratio of the original image
original_aspect_ratio = original_width / original_height
# Determine which dimension to fix
if original_aspect_ratio > 1: # Wider than tall
width = FIXED_DIMENSION
height = round(FIXED_DIMENSION / original_aspect_ratio)
else: # Taller than wide
height = FIXED_DIMENSION
width = round(FIXED_DIMENSION * original_aspect_ratio)
# Ensure dimensions are multiples of 8
width = (width // 8) * 8
height = (height // 8) * 8
# Enforce aspect ratio limits
calculated_aspect_ratio = width / height
if calculated_aspect_ratio > MAX_ASPECT_RATIO:
width = (height * MAX_ASPECT_RATIO // 8) * 8
elif calculated_aspect_ratio < MIN_ASPECT_RATIO:
height = (width / MIN_ASPECT_RATIO // 8) * 8
# Ensure width and height remain above the minimum dimensions
width = max(width, 576) if width == FIXED_DIMENSION else width
height = max(height, 576) if height == FIXED_DIMENSION else height
return width, height
@spaces.GPU(durations=300)
def infer(edit_images, prompt, seed=42, randomize_seed=False, width=1024, height=1024, guidance_scale=3.5, num_inference_steps=28, progress=gr.Progress(track_tqdm=True)):
# pipe.enable_xformers_memory_efficient_attention()
image = edit_images["background"]
width, height = calculate_optimal_dimensions(image)
mask = edit_images["layers"][0]
if randomize_seed:
seed = random.randint(0, MAX_SEED)
image = pipe(
prompt=prompt,
image=image,
mask_image=mask,
height=height,
width=width,
guidance_scale=guidance_scale,
num_inference_steps=num_inference_steps,
generator=torch.Generator(device='cuda').manual_seed(seed),
# lora_scale=0.75 // not supported in this version
).images[0]
output_image_jpg = image.convert("RGB")
output_image_jpg.save("output.jpg", "JPEG")
return output_image_jpg, seed
# return image, seed
examples = [
"photography of a young woman, accent lighting, (front view:1.4), "
# "a tiny astronaut hatching from an egg on the moon",
# "a cat holding a sign that says hello world",
# "an anime illustration of a wiener schnitzel",
]
css="""
#col-container {
margin: 0 auto;
max-width: 1000px;
}
"""
with gr.Blocks(css=css) as demo:
with gr.Column(elem_id="col-container"):
gr.Markdown(f"""# FLUX.1 [dev]
""")
with gr.Row():
with gr.Column():
edit_image = gr.ImageEditor(
label='Upload and draw mask for inpainting',
type='pil',
sources=["upload", "webcam"],
image_mode='RGB',
layers=False,
brush=gr.Brush(colors=["#FFFFFF"]),
# height=600
)
prompt = gr.Text(
label="Prompt",
show_label=False,
max_lines=2,
placeholder="Enter your prompt",
container=False,
)
run_button = gr.Button("Run")
result = gr.Image(label="Result", show_label=False)
with gr.Accordion("Advanced Settings", open=False):
seed = gr.Slider(
label="Seed",
minimum=0,
maximum=MAX_SEED,
step=1,
value=0,
)
randomize_seed = gr.Checkbox(label="Randomize seed", value=True)
with gr.Row():
width = gr.Slider(
label="Width",
minimum=256,
maximum=MAX_IMAGE_SIZE,
step=32,
value=1024,
visible=False
)
height = gr.Slider(
label="Height",
minimum=256,
maximum=MAX_IMAGE_SIZE,
step=32,
value=1024,
visible=False
)
with gr.Row():
guidance_scale = gr.Slider(
label="Guidance Scale",
minimum=1,
maximum=30,
step=0.5,
value=50,
)
num_inference_steps = gr.Slider(
label="Number of inference steps",
minimum=1,
maximum=50,
step=1,
value=28,
)
gr.on(
triggers=[run_button.click, prompt.submit],
fn = infer,
inputs = [edit_image, prompt, seed, randomize_seed, width, height, guidance_scale, num_inference_steps],
outputs = [result, seed]
)
demo.launch()
|