File size: 11,440 Bytes
9bf8ce9
 
95ab1e6
3dc751e
95ab1e6
8f570a9
40b82c8
3dc751e
b887a7c
7e66050
 
95ab1e6
0d89801
d981a02
0d89801
 
 
031c42b
83cae6c
 
 
0d89801
 
e128936
72d2b99
741875a
95ab1e6
 
b887a7c
 
 
 
 
 
 
 
 
 
 
9bf8ce9
b887a7c
 
 
 
 
73ed8b7
b887a7c
 
 
9bf8ce9
 
 
b887a7c
9bf8ce9
 
 
b887a7c
 
 
73ed8b7
 
b887a7c
 
7c304e4
0d89801
95ab1e6
8f570a9
72d2b99
8f570a9
 
 
 
 
 
 
 
 
 
 
bca1af7
 
8f570a9
 
 
 
95ab1e6
d981a02
 
 
 
 
95ab1e6
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
35cbb2c
9bf8ce9
 
 
 
 
35cbb2c
 
b887a7c
35cbb2c
 
 
 
 
 
 
 
 
 
4f5bd18
35cbb2c
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
e128936
35cbb2c
e128936
d981a02
 
cfdaacd
 
e128936
 
 
4f5bd18
e128936
d981a02
9bf8ce9
722b968
0d89801
d981a02
 
 
 
 
0d89801
 
 
722b968
0d89801
95ab1e6
d981a02
 
 
95ab1e6
d981a02
722b968
0d89801
95ab1e6
4f5bd18
641c820
d981a02
 
 
 
4f5bd18
d981a02
95ab1e6
3dc751e
 
20fd04d
35cbb2c
3dc751e
35cbb2c
 
 
 
 
 
 
 
 
0d89801
 
9bf8ce9
 
 
0d89801
35cbb2c
0d89801
 
 
 
 
d981a02
0d89801
 
 
3e075bb
e128936
805962c
d981a02
b887a7c
805962c
 
d981a02
805962c
 
 
 
e128936
 
d981a02
b887a7c
d981a02
 
 
 
 
cfdaacd
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
e128936
 
 
 
 
72d2b99
e128936
 
 
 
 
 
 
 
72d2b99
 
 
e128936
 
 
72d2b99
e128936
 
 
 
72d2b99
 
e128936
 
 
 
 
0d89801
2b61647
722b968
7ea5176
 
722b968
b887a7c
9bf8ce9
b887a7c
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
69205b2
b887a7c
0d89801
 
 
 
35cbb2c
0d89801
d981a02
 
cfdaacd
 
e128936
 
 
 
0d89801
 
2b61647
 
0d89801
 
35cbb2c
0d89801
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
from functools import partial

import cv2
import random
from typing import Tuple, Optional

import gradio as gr
import numpy as np
import requests
import spaces
import torch
from PIL import Image, ImageFilter
from diffusers import FluxInpaintPipeline
from gradio_client import Client, handle_file

MARKDOWN = """
# FLUX.1 Inpainting 🔥

Shoutout to [Black Forest Labs](https://huggingface.co/black-forest-labs) team for 
creating this amazing model, and a big thanks to [Gothos](https://github.com/Gothos) 
for taking it to the next level by enabling inpainting with the FLUX.
"""

MAX_SEED = np.iinfo(np.int32).max
IMAGE_SIZE = 1024
DEVICE = "cuda" if torch.cuda.is_available() else "cpu"
PIPE = FluxInpaintPipeline.from_pretrained(
    "black-forest-labs/FLUX.1-schnell", torch_dtype=torch.bfloat16).to(DEVICE)
CLIENT = Client("SkalskiP/florence-sam-masking")


EXAMPLES = [
    [
        {
            "background": Image.open(requests.get("https://media.roboflow.com/spaces/doge-2-image.png", stream=True).raw),
            "layers": [Image.open(requests.get("https://media.roboflow.com/spaces/doge-2-mask-2-removebg.png", stream=True).raw)],
            "composite": Image.open(requests.get("https://media.roboflow.com/spaces/doge-2-composite-2.png", stream=True).raw),
        },
        "little lion",
        "",
        5,
        5,
        42,
        False,
        0.85,
        20
    ],
    [
        {
            "background": Image.open(requests.get("https://media.roboflow.com/spaces/doge-5.jpeg", stream=True).raw),
            "layers": None,
            "composite": None
        },
        "big blue eyes",
        "eyes",
        10,
        5,
        42,
        False,
        0.9,
        20
    ]
]


def calculate_image_dimensions_for_flux(
    original_resolution_wh: Tuple[int, int],
    maximum_dimension: int = IMAGE_SIZE
) -> Tuple[int, int]:
    width, height = original_resolution_wh

    if width > height:
        scaling_factor = maximum_dimension / width
    else:
        scaling_factor = maximum_dimension / height

    new_width = int(width * scaling_factor)
    new_height = int(height * scaling_factor)

    new_width = new_width - (new_width % 32)
    new_height = new_height - (new_height % 32)

    return new_width, new_height


def is_mask_empty(image: Image.Image) -> bool:
    gray_img = image.convert("L")
    pixels = list(gray_img.getdata())
    return all(pixel == 0 for pixel in pixels)


def process_mask(
    mask: Image.Image,
    mask_inflation: Optional[int] = None,
    mask_blur: Optional[int] = None
) -> Image.Image:
    """
    Inflates and blurs the white regions of a mask.

    Args:
        mask (Image.Image): The input mask image.
        mask_inflation (Optional[int]): The number of pixels to inflate the mask by.
        mask_blur (Optional[int]): The radius of the Gaussian blur to apply.

    Returns:
        Image.Image: The processed mask with inflated and/or blurred regions.
    """
    if mask_inflation and mask_inflation > 0:
        mask_array = np.array(mask)
        kernel = np.ones((mask_inflation, mask_inflation), np.uint8)
        mask_array = cv2.dilate(mask_array, kernel, iterations=1)
        mask = Image.fromarray(mask_array)

    if mask_blur and mask_blur > 0:
        mask = mask.filter(ImageFilter.GaussianBlur(radius=mask_blur))

    return mask


def set_client_for_session(request: gr.Request):
    try:
        x_ip_token = request.headers['x-ip-token']
        return Client("SkalskiP/florence-sam-masking", headers={"X-IP-Token": x_ip_token})
    except:
        return CLIENT


@spaces.GPU(duration=50)
def run_flux(
    image: Image.Image,
    mask: Image.Image,
    prompt: str,
    seed_slicer: int,
    randomize_seed_checkbox: bool,
    strength_slider: float,
    num_inference_steps_slider: int,
    resolution_wh: Tuple[int, int],
) -> Image.Image:
    print("Running FLUX...")
    width, height = resolution_wh
    if randomize_seed_checkbox:
        seed_slicer = random.randint(0, MAX_SEED)
    generator = torch.Generator().manual_seed(seed_slicer)
    return PIPE(
        prompt=prompt,
        image=image,
        mask_image=mask,
        width=width,
        height=height,
        strength=strength_slider,
        generator=generator,
        num_inference_steps=num_inference_steps_slider
    ).images[0]


def process(
    client,
    input_image_editor: dict,
    inpainting_prompt_text: str,
    masking_prompt_text: str,
    mask_inflation_slider: int,
    mask_blur_slider: int,
    seed_slicer: int,
    randomize_seed_checkbox: bool,
    strength_slider: float,
    num_inference_steps_slider: int
):
    if not inpainting_prompt_text:
        gr.Info("Please enter inpainting text prompt.")
        return None, None

    image_path = input_image_editor['background']
    mask_path = input_image_editor['layers'][0]

    image = Image.open(image_path)
    mask = Image.open(mask_path)

    if not image:
        gr.Info("Please upload an image.")
        return None, None

    if is_mask_empty(mask) and not masking_prompt_text:
        gr.Info("Please draw a mask or enter a masking prompt.")
        return None, None

    if not is_mask_empty(mask) and masking_prompt_text:
        gr.Info("Both mask and masking prompt are provided. Please provide only one.")
        return None, None

    if is_mask_empty(mask):
        print("Generating mask...")
        mask = client.predict(
            image_input=handle_file(image_path),
            text_input=masking_prompt_text,
            api_name="/process_image")
        mask = Image.open(mask)
        print("Mask generated.")

    width, height = calculate_image_dimensions_for_flux(original_resolution_wh=image.size)
    image = image.resize((width, height), Image.LANCZOS)
    mask = mask.resize((width, height), Image.LANCZOS)
    mask = process_mask(mask, mask_inflation=mask_inflation_slider, mask_blur=mask_blur_slider)
    image = run_flux(
        image=image,
        mask=mask,
        prompt=inpainting_prompt_text,
        seed_slicer=seed_slicer,
        randomize_seed_checkbox=randomize_seed_checkbox,
        strength_slider=strength_slider,
        num_inference_steps_slider=num_inference_steps_slider,
        resolution_wh=(width, height)
    )
    return image, mask


process_example = partial(process, client=CLIENT)


with gr.Blocks() as demo:
    client_component = gr.State()
    gr.Markdown(MARKDOWN)
    with gr.Row():
        with gr.Column():
            input_image_editor_component = gr.ImageEditor(
                label='Image',
                type='filepath',
                sources=["upload", "webcam"],
                image_mode='RGB',
                layers=False,
                brush=gr.Brush(colors=["#FFFFFF"], color_mode="fixed"))

            with gr.Row():
                inpainting_prompt_text_component = gr.Text(
                    label="Inpainting prompt",
                    show_label=False,
                    max_lines=1,
                    placeholder="Enter text to generate inpainting",
                    container=False,
                )
                submit_button_component = gr.Button(
                    value='Submit', variant='primary', scale=0)

            with gr.Accordion("Advanced Settings", open=False):
                masking_prompt_text_component = gr.Text(
                    label="Masking prompt",
                    show_label=False,
                    max_lines=1,
                    placeholder="Enter text to generate masking",
                    container=False,
                )

                with gr.Row():
                    mask_inflation_slider_component = gr.Slider(
                        label="Mask inflation",
                        info="Adjusts the amount of mask edge expansion before "
                             "inpainting.",
                        minimum=0,
                        maximum=20,
                        step=1,
                        value=5,
                    )

                    mask_blur_slider_component = gr.Slider(
                        label="Mask blur",
                        info="Controls the intensity of the Gaussian blur applied to "
                             "the mask edges.",
                        minimum=0,
                        maximum=20,
                        step=1,
                        value=5,
                    )

                seed_slicer_component = gr.Slider(
                    label="Seed",
                    minimum=0,
                    maximum=MAX_SEED,
                    step=1,
                    value=42,
                )

                randomize_seed_checkbox_component = gr.Checkbox(
                    label="Randomize seed", value=True)

                with gr.Row():
                    strength_slider_component = gr.Slider(
                        label="Strength",
                        info="Indicates extent to transform the reference `image`. "
                             "Must be between 0 and 1. `image` is used as a starting "
                             "point and more noise is added the higher the `strength`.",
                        minimum=0,
                        maximum=1,
                        step=0.01,
                        value=0.85,
                    )

                    num_inference_steps_slider_component = gr.Slider(
                        label="Number of inference steps",
                        info="The number of denoising steps. More denoising steps "
                             "usually lead to a higher quality image at the",
                        minimum=1,
                        maximum=50,
                        step=1,
                        value=20,
                    )
        with gr.Column():
            output_image_component = gr.Image(
                type='pil', image_mode='RGB', label='Generated image', format="png")
            with gr.Accordion("Debug", open=False):
                output_mask_component = gr.Image(
                    type='pil', image_mode='RGB', label='Input mask', format="png")
    gr.Examples(
        fn=process_example,
        examples=EXAMPLES,
        inputs=[
            input_image_editor_component,
            inpainting_prompt_text_component,
            masking_prompt_text_component,
            mask_inflation_slider_component,
            mask_blur_slider_component,
            seed_slicer_component,
            randomize_seed_checkbox_component,
            strength_slider_component,
            num_inference_steps_slider_component
        ],
        outputs=[
            output_image_component,
            output_mask_component
        ],
        run_on_click=False
    )

    submit_button_component.click(
        fn=process,
        inputs=[
            client_component,
            input_image_editor_component,
            inpainting_prompt_text_component,
            masking_prompt_text_component,
            mask_inflation_slider_component,
            mask_blur_slider_component,
            seed_slicer_component,
            randomize_seed_checkbox_component,
            strength_slider_component,
            num_inference_steps_slider_component
        ],
        outputs=[
            output_image_component,
            output_mask_component
        ]
    )
    demo.load(set_client_for_session, None, client_component)

demo.launch(debug=False, show_error=True)