chaojiemao commited on
Commit
0a44614
·
verified ·
1 Parent(s): 83266af

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +513 -0
app.py ADDED
@@ -0,0 +1,513 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+ # Copyright (c) Alibaba, Inc. and its affiliates.
3
+ import threading
4
+ import time
5
+ import gradio as gr
6
+ import numpy as np
7
+ import torch
8
+ from PIL import Image
9
+ import glob
10
+ import os, csv, sys
11
+ import shlex
12
+ import subprocess
13
+ subprocess.run("rm -rf /data-nvme/zerogpu-offload/*", env={}, shell=True)
14
+ subprocess.run(shlex.split('pip install scepter'))
15
+ from scepter.modules.transform.io import pillow_convert
16
+ from scepter.modules.utils.config import Config
17
+ from scepter.modules.utils.distribute import we
18
+ from scepter.modules.utils.file_system import FS
19
+
20
+ from inference.ace_plus_diffusers import ACEPlusDiffuserInference
21
+ from inference.utils import edit_preprocess
22
+ from examples.examples import all_examples
23
+
24
+ inference_dict = {
25
+ "ACE_DIFFUSER_PLUS": ACEPlusDiffuserInference
26
+ }
27
+
28
+ fs_list = [
29
+ Config(cfg_dict={"NAME": "HuggingfaceFs", "TEMP_DIR": "./cache"}, load=False),
30
+ Config(cfg_dict={"NAME": "ModelscopeFs", "TEMP_DIR": "./cache"}, load=False),
31
+ Config(cfg_dict={"NAME": "HttpFs", "TEMP_DIR": "./cache"}, load=False),
32
+ Config(cfg_dict={"NAME": "LocalFs", "TEMP_DIR": "./cache"}, load=False),
33
+ ]
34
+
35
+ for one_fs in fs_list:
36
+ FS.init_fs_client(one_fs)
37
+
38
+ os.environ["FLUX_FILL_PATH"]="hf://black-forest-labs/FLUX.1-Fill-dev"
39
+ os.environ["PORTRAIT_MODEL_PATH"]="hf://ali-vilab/ACE_Plus@portrait/comfyui_portrait_lora64.safetensors"
40
+ os.environ["SUBJECT_MODEL_PATH"]="hf://ali-vilab/ACE_Plus@subject/comfyui_subject_lora16.safetensors"
41
+ os.environ["LOCAL_MODEL_PATH"]="hf://ali-vilab/ACE_Plus@local_editing/comfyui_local_lora16.safetensors"
42
+
43
+ FS.get_dir_to_local_dir(os.environ["FLUX_FILL_PATH"])
44
+ FS.get_from(os.environ["PORTRAIT_MODEL_PATH"])
45
+ FS.get_from(os.environ["SUBJECT_MODEL_PATH"])
46
+ FS.get_from(os.environ["LOCAL_MODEL_PATH"])
47
+
48
+
49
+ csv.field_size_limit(sys.maxsize)
50
+ refresh_sty = '\U0001f504' # 🔄
51
+ clear_sty = '\U0001f5d1' # 🗑️
52
+ upload_sty = '\U0001f5bc' # 🖼️
53
+ sync_sty = '\U0001f4be' # 💾
54
+ chat_sty = '\U0001F4AC' # 💬
55
+ video_sty = '\U0001f3a5' # 🎥
56
+
57
+ lock = threading.Lock()
58
+ class DemoUI(object):
59
+ def __init__(self,
60
+ infer_dir = "./config",
61
+ model_list='./models/model_zoo.yaml'
62
+ ):
63
+ self.model_yamls = glob.glob(os.path.join(infer_dir,
64
+ '*.yaml'))
65
+ self.model_choices = dict()
66
+ self.default_model_name = ''
67
+ for i in self.model_yamls:
68
+ model_cfg = Config(load=True, cfg_file=i)
69
+ model_name = model_cfg.NAME
70
+ if model_cfg.IS_DEFAULT: self.default_model_name = model_name
71
+ self.model_choices[model_name] = model_cfg
72
+ print('Models: ', self.model_choices.keys())
73
+ assert len(self.model_choices) > 0
74
+ if self.default_model_name == "": self.default_model_name = list(self.model_choices.keys())[0]
75
+ self.model_name = self.default_model_name
76
+ pipe_cfg = self.model_choices[self.default_model_name]
77
+ infer_name = pipe_cfg.get("INFERENCE_TYPE", "ACE")
78
+ self.pipe = inference_dict[infer_name]()
79
+ self.pipe.init_from_cfg(pipe_cfg)
80
+
81
+ # choose different model
82
+ self.task_model_cfg = Config(load=True, cfg_file=model_list)
83
+ self.task_model = {}
84
+ self.task_model_list = []
85
+ self.edit_type_dict = {"repainting": None}
86
+ self.edit_type_list = ["repainting"]
87
+ for task_name, task_model in self.task_model_cfg.MODEL.items():
88
+ self.task_model[task_name.lower()] = task_model
89
+ self.task_model_list.append(task_name.lower())
90
+ for preprocessor in task_model.get("PREPROCESSOR", []):
91
+ if preprocessor["TYPE"] in self.edit_type_dict:
92
+ continue
93
+ preprocessor["REPAINTING_SCALE"] = task_model.get("REPAINTING_SCALE", 1.0)
94
+ self.edit_type_dict[preprocessor["TYPE"]] = preprocessor
95
+ self.max_msgs = 20
96
+ # reformat examples
97
+ self.all_examples = [
98
+ [
99
+ one_example["task_type"], one_example["edit_type"], one_example["instruction"],
100
+ one_example["input_reference_image"], one_example["input_image"],
101
+ one_example["input_mask"], one_example["output_h"],
102
+ one_example["output_w"], one_example["seed"]
103
+ ]
104
+ for one_example in all_examples
105
+ ]
106
+
107
+ def construct_edit_image(self, edit_image, edit_mask):
108
+ if edit_image is not None and edit_mask is not None:
109
+ edit_image_rgb = pillow_convert(edit_image, "RGB")
110
+ edit_image_rgba = pillow_convert(edit_image, "RGBA")
111
+ edit_mask = pillow_convert(edit_mask, "L")
112
+
113
+ arr1 = np.array(edit_image_rgb)
114
+ arr2 = np.array(edit_mask)[:, :, np.newaxis]
115
+ result_array = np.concatenate((arr1, arr2), axis=2)
116
+ layer = Image.fromarray(result_array)
117
+
118
+ ret_data = {
119
+ "background": edit_image_rgba,
120
+ "composite": edit_image_rgba,
121
+ "layers": [layer]
122
+ }
123
+ return ret_data
124
+ else:
125
+ return None
126
+
127
+
128
+
129
+
130
+ def create_ui(self):
131
+ with gr.Row(equal_height=True, visible=True):
132
+ with gr.Column(scale=2):
133
+ self.gallery_image = gr.Image(
134
+ height=600,
135
+ interactive=False,
136
+ type='pil',
137
+ elem_id='Reference_image'
138
+ )
139
+ with gr.Column(scale=1, visible=True) as self.edit_preprocess_panel:
140
+ with gr.Row():
141
+ with gr.Accordion(label='Related Input Image', open=False):
142
+ self.edit_preprocess_preview = gr.Image(
143
+ height=600,
144
+ interactive=False,
145
+ type='pil',
146
+ elem_id='preprocess_image'
147
+ )
148
+
149
+ self.edit_preprocess_mask_preview = gr.Image(
150
+ height=600,
151
+ interactive=False,
152
+ type='pil',
153
+ elem_id='preprocess_image_mask'
154
+ )
155
+ with gr.Row():
156
+ instruction = """
157
+ **Instruction**:
158
+ 1. Please choose the Task Type based on the scenario of the generation task. We provide three types of generation capabilities: Portrait ID Preservation Generation(portrait),
159
+ Object ID Preservation Generation(subject), and Local Controlled Generation(local editing), which can be selected from the task dropdown menu.
160
+ 2. When uploading images in the Reference Image section, the generated image will reference the ID information of that image. Please ensure that the ID information is clear.
161
+ In the Edit Image section, the uploaded image will maintain its structural and content information, and you must draw a mask area to specify the region to be regenerated.
162
+ 3. When the task type is local editing, there are various editing types to choose from. Users can select different information preserving dimensions, such as edge information,
163
+ color information, and more. The pre-processing information can be viewed in the 'related input image' tab.
164
+ """
165
+ self.instruction = gr.Markdown(value=instruction)
166
+ with gr.Row():
167
+ self.model_name_dd = gr.Dropdown(
168
+ choices=self.model_choices,
169
+ value=self.default_model_name,
170
+ label='Model Version')
171
+ self.task_type = gr.Dropdown(choices=self.task_model_list,
172
+ interactive=True,
173
+ value=self.task_model_list[0],
174
+ label='Task Type')
175
+ self.edit_type = gr.Dropdown(choices=self.edit_type_list,
176
+ interactive=True,
177
+ value=self.edit_type_list[0],
178
+ label='Edit Type')
179
+ with gr.Row():
180
+ self.generation_info_preview = gr.Markdown(
181
+ label='System Log.',
182
+ show_label=True)
183
+ with gr.Row(variant='panel',
184
+ equal_height=True,
185
+ show_progress=False):
186
+ with gr.Column(scale=10, min_width=500):
187
+ self.text = gr.Textbox(
188
+ placeholder='Input "@" find history of image',
189
+ label='Instruction',
190
+ container=False,
191
+ lines = 1)
192
+ with gr.Column(scale=2, min_width=100):
193
+ with gr.Row():
194
+ with gr.Column(scale=1, min_width=100):
195
+ self.chat_btn = gr.Button(value='Generate', variant = "primary")
196
+
197
+ with gr.Accordion(label='Advance', open=True):
198
+ with gr.Row(visible=True):
199
+ with gr.Column():
200
+ self.reference_image = gr.Image(
201
+ height=1000,
202
+ interactive=True,
203
+ image_mode='RGB',
204
+ type='pil',
205
+ label='Reference Image',
206
+ elem_id='reference_image'
207
+ )
208
+ with gr.Column():
209
+ self.edit_image = gr.ImageMask(
210
+ height=1000,
211
+ interactive=True,
212
+ value=None,
213
+ sources=['upload'],
214
+ type='pil',
215
+ layers=False,
216
+ label='Edit Image',
217
+ elem_id='image_editor',
218
+ show_fullscreen_button=True,
219
+ format="png"
220
+ )
221
+
222
+ with gr.Row():
223
+ self.step = gr.Slider(minimum=1,
224
+ maximum=1000,
225
+ value=self.pipe.input.get("sample_steps", 20),
226
+ visible=self.pipe.input.get("sample_steps", None) is not None,
227
+ label='Sample Step')
228
+ self.cfg_scale = gr.Slider(
229
+ minimum=1.0,
230
+ maximum=100.0,
231
+ value=self.pipe.input.get("guide_scale", 4.5),
232
+ visible=self.pipe.input.get("guide_scale", None) is not None,
233
+ label='Guidance Scale')
234
+ self.seed = gr.Slider(minimum=-1,
235
+ maximum=10000000,
236
+ value=-1,
237
+ label='Seed')
238
+ self.output_height = gr.Slider(
239
+ minimum=256,
240
+ maximum=1440,
241
+ value=self.pipe.input.get("output_height", 1024),
242
+ visible=self.pipe.input.get("output_height", None) is not None,
243
+ label='Output Height')
244
+ self.output_width = gr.Slider(
245
+ minimum=256,
246
+ maximum=1440,
247
+ value=self.pipe.input.get("output_width", 1024),
248
+ visible=self.pipe.input.get("output_width", None) is not None,
249
+ label='Output Width')
250
+
251
+ self.repainting_scale = gr.Slider(
252
+ minimum=0.0,
253
+ maximum=1.0,
254
+ value=self.pipe.input.get("repainting_scale", 1.0),
255
+ visible=True,
256
+ label='Repainting Scale')
257
+ with gr.Row():
258
+ self.eg = gr.Column(visible=True)
259
+
260
+
261
+
262
+ def set_callbacks(self, *args, **kwargs):
263
+ ########################################
264
+ def change_model(model_name):
265
+ if model_name not in self.model_choices:
266
+ gr.Info('The provided model name is not a valid choice!')
267
+ return model_name, gr.update(), gr.update()
268
+
269
+ if model_name != self.model_name:
270
+ lock.acquire()
271
+ del self.pipe
272
+ torch.cuda.empty_cache()
273
+ torch.cuda.ipc_collect()
274
+ pipe_cfg = self.model_choices[model_name]
275
+ infer_name = pipe_cfg.get("INFERENCE_TYPE", "ACE")
276
+ self.pipe = inference_dict[infer_name]()
277
+ self.pipe.init_from_cfg(pipe_cfg)
278
+ self.model_name = model_name
279
+ lock.release()
280
+
281
+ return (model_name, gr.update(),
282
+ gr.Slider(
283
+ value=self.pipe.input.get("sample_steps", 20),
284
+ visible=self.pipe.input.get("sample_steps", None) is not None),
285
+ gr.Slider(
286
+ value=self.pipe.input.get("guide_scale", 4.5),
287
+ visible=self.pipe.input.get("guide_scale", None) is not None),
288
+ gr.Slider(
289
+ value=self.pipe.input.get("output_height", 1024),
290
+ visible=self.pipe.input.get("output_height", None) is not None),
291
+ gr.Slider(
292
+ value=self.pipe.input.get("output_width", 1024),
293
+ visible=self.pipe.input.get("output_width", None) is not None),
294
+ gr.Slider(value=self.pipe.input.get("repainting_scale", 1.0))
295
+ )
296
+
297
+ self.model_name_dd.change(
298
+ change_model,
299
+ inputs=[self.model_name_dd],
300
+ outputs=[
301
+ self.model_name_dd, self.text,
302
+ self.step,
303
+ self.cfg_scale,
304
+ self.output_height,
305
+ self.output_width,
306
+ self.repainting_scale])
307
+
308
+ def change_task_type(task_type):
309
+ task_info = self.task_model[task_type]
310
+ edit_type_list = [self.edit_type_list[0]]
311
+ for preprocessor in task_info.get("PREPROCESSOR", []):
312
+ preprocessor["REPAINTING_SCALE"] = task_info.get("REPAINTING_SCALE", 1.0)
313
+ self.edit_type_dict[preprocessor["TYPE"]] = preprocessor
314
+ edit_type_list.append(preprocessor["TYPE"])
315
+
316
+ return gr.update(choices=edit_type_list, value=edit_type_list[0])
317
+
318
+ self.task_type.change(change_task_type, inputs=[self.task_type], outputs=[self.edit_type])
319
+
320
+ def change_edit_type(edit_type):
321
+ edit_info = self.edit_type_dict[edit_type]
322
+ edit_info = edit_info or {}
323
+ repainting_scale = edit_info.get("REPAINTING_SCALE", 1.0)
324
+ if edit_type == self.edit_type_list[0]:
325
+ return gr.Slider(value=1.0)
326
+ else:
327
+ return gr.Slider(
328
+ value=repainting_scale)
329
+
330
+ self.edit_type.change(change_edit_type, inputs=[self.edit_type], outputs=[self.repainting_scale])
331
+
332
+ def preprocess_input(ref_image, edit_image_dict, preprocess = None):
333
+ err_msg = ""
334
+ is_suc = True
335
+ if ref_image is not None:
336
+ ref_image = pillow_convert(ref_image, "RGB")
337
+
338
+ if edit_image_dict is None:
339
+ edit_image = None
340
+ edit_mask = None
341
+ else:
342
+ edit_image = edit_image_dict["background"]
343
+ edit_mask = np.array(edit_image_dict["layers"][0])[:, :, 3]
344
+ if np.sum(np.array(edit_image)) < 1:
345
+ edit_image = None
346
+ edit_mask = None
347
+ elif np.sum(np.array(edit_mask)) < 1:
348
+ err_msg = "You must draw the repainting area for the edited image."
349
+ return None, None, None, False, err_msg
350
+ else:
351
+ edit_image = pillow_convert(edit_image, "RGB")
352
+ edit_mask = Image.fromarray(edit_mask).convert('L')
353
+ if ref_image is None and edit_image is None:
354
+ err_msg = "Please provide the reference image or edited image."
355
+ return None, None, None, False, err_msg
356
+ return edit_image, edit_mask, ref_image, is_suc, err_msg
357
+ @spaces.GPU(duration=60)
358
+ def run_chat(
359
+ prompt,
360
+ ref_image,
361
+ edit_image,
362
+ task_type,
363
+ edit_type,
364
+ cfg_scale,
365
+ step,
366
+ seed,
367
+ output_h,
368
+ output_w,
369
+ repainting_scale
370
+ ):
371
+ model_path = self.task_model[task_type]["MODEL_PATH"]
372
+ edit_info = self.edit_type_dict[edit_type]
373
+
374
+ if task_type in ["portrait", "subject"] and ref_image is None:
375
+ err_msg = "<mark>Please provide the reference image.</mark>"
376
+ return (gr.Image(), gr.Column(visible=True),
377
+ gr.Image(),
378
+ gr.Image(),
379
+ gr.Text(value=err_msg))
380
+
381
+ pre_edit_image, pre_edit_mask, pre_ref_image, is_suc, err_msg = preprocess_input(ref_image, edit_image)
382
+ if not is_suc:
383
+ err_msg = f"<mark>{err_msg}</mark>"
384
+ return (gr.Image(), gr.Column(visible=True),
385
+ gr.Image(),
386
+ gr.Image(),
387
+ gr.Text(value=err_msg))
388
+ pre_edit_image = edit_preprocess(edit_info, we.device_id, pre_edit_image, pre_edit_mask)
389
+ # edit_image["background"] = pre_edit_image
390
+ st = time.time()
391
+ image, seed = self.pipe(
392
+ reference_image=pre_ref_image,
393
+ edit_image=pre_edit_image,
394
+ edit_mask=pre_edit_mask,
395
+ prompt=prompt,
396
+ output_height=output_h,
397
+ output_width=output_w,
398
+ sampler='flow_euler',
399
+ sample_steps=step,
400
+ guide_scale=cfg_scale,
401
+ seed=seed,
402
+ repainting_scale=repainting_scale,
403
+ lora_path = model_path
404
+ )
405
+ et = time.time()
406
+ msg = f"prompt: {prompt}; seed: {seed}; cost time: {et - st}s; repaiting scale: {repainting_scale}"
407
+
408
+ return (gr.Image(value=image), gr.Column(visible=True),
409
+ gr.Image(value=pre_edit_image if pre_edit_image is not None else pre_ref_image),
410
+ gr.Image(value=pre_edit_mask if pre_edit_mask is not None else None),
411
+ gr.Text(value=msg))
412
+
413
+ chat_inputs = [
414
+ self.reference_image,
415
+ self.edit_image,
416
+ self.task_type,
417
+ self.edit_type,
418
+ self.cfg_scale,
419
+ self.step,
420
+ self.seed,
421
+ self.output_height,
422
+ self.output_width,
423
+ self.repainting_scale
424
+ ]
425
+
426
+ chat_outputs = [
427
+ self.gallery_image, self.edit_preprocess_panel, self.edit_preprocess_preview,
428
+ self.edit_preprocess_mask_preview, self.generation_info_preview
429
+ ]
430
+
431
+ self.chat_btn.click(run_chat,
432
+ inputs=[self.text] + chat_inputs,
433
+ outputs=chat_outputs,
434
+ queue=True)
435
+
436
+ self.text.submit(run_chat,
437
+ inputs=[self.text] + chat_inputs,
438
+ outputs=chat_outputs,
439
+ queue=True)
440
+
441
+ @spaces.GPU(duration=60)
442
+ def run_example(task_type, edit_type, prompt, ref_image, edit_image, edit_mask,
443
+ output_h, output_w, seed):
444
+ model_path = self.task_model[task_type]["MODEL_PATH"]
445
+
446
+ step = self.pipe.input.get("sample_steps", 20)
447
+ cfg_scale = self.pipe.input.get("guide_scale", 20)
448
+
449
+ edit_info = self.edit_type_dict[edit_type]
450
+
451
+ edit_image = self.construct_edit_image(edit_image, edit_mask)
452
+
453
+ pre_edit_image, pre_edit_mask, pre_ref_image = preprocess_input(ref_image, edit_image)
454
+ pre_edit_image = edit_preprocess(edit_info, we.device_id, pre_edit_image, pre_edit_mask)
455
+ edit_info = edit_info or {}
456
+ repainting_scale = edit_info.get("REPAINTING_SCALE", 1.0)
457
+ st = time.time()
458
+ image, seed = self.pipe(
459
+ reference_image=pre_ref_image,
460
+ edit_image=pre_edit_image,
461
+ edit_mask=pre_edit_mask,
462
+ prompt=prompt,
463
+ output_height=output_h,
464
+ output_width=output_w,
465
+ sampler='flow_euler',
466
+ sample_steps=step,
467
+ guide_scale=cfg_scale,
468
+ seed=seed,
469
+ repainting_scale=repainting_scale,
470
+ lora_path=model_path
471
+ )
472
+ et = time.time()
473
+ msg = f"prompt: {prompt}; seed: {seed}; cost time: {et - st}s; repaiting scale: {repainting_scale}"
474
+ if pre_edit_image is not None:
475
+ ret_image = Image.composite(pre_edit_image, Image.new("RGB", pre_edit_image.size, (0, 0, 0)), pre_edit_mask)
476
+ else:
477
+ ret_image = None
478
+ return (gr.Image(value=image), gr.Column(visible=True),
479
+ gr.Image(value=pre_edit_image if pre_edit_image is not None else pre_ref_image),
480
+ gr.Image(value=pre_edit_mask if pre_edit_mask is not None else None),
481
+ gr.Text(value=msg),
482
+ gr.update(value=ret_image))
483
+
484
+ with self.eg:
485
+ self.example_edit_image = gr.Image(label='Edit Image',
486
+ type='pil',
487
+ image_mode='RGB',
488
+ visible=False)
489
+ self.example_edit_mask = gr.Image(label='Edit Image Mask',
490
+ type='pil',
491
+ image_mode='L',
492
+ visible=False)
493
+
494
+ self.examples = gr.Examples(
495
+ fn=run_example,
496
+ examples=self.all_examples,
497
+ inputs=[
498
+ self.task_type, self.edit_type, self.text, self.reference_image, self.example_edit_image,
499
+ self.example_edit_mask, self.output_height, self.output_width, self.seed
500
+ ],
501
+ outputs=[self.gallery_image, self.edit_preprocess_panel, self.edit_preprocess_preview,
502
+ self.edit_preprocess_mask_preview, self.generation_info_preview, self.edit_image],
503
+ examples_per_page=6,
504
+ cache_examples=False,
505
+ run_on_click=True)
506
+
507
+
508
+ if __name__ == '__main__':
509
+ with gr.Blocks() as demo:
510
+ chatbot = DemoUI()
511
+ chatbot.create_ui()
512
+ chatbot.set_callbacks()
513
+ demo.launch()