forplaytvplus commited on
Commit
5b8ee78
1 Parent(s): 49c7172

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +83 -195
app.py CHANGED
@@ -1,21 +1,26 @@
1
- #!/usr/bin/env python
2
 
3
  from __future__ import annotations
4
 
5
  import requests
6
  import os
7
  import random
 
 
8
 
9
  import gradio as gr
10
  import numpy as np
11
  import spaces
12
  import torch
 
13
  import cv2
14
  from PIL import Image
 
15
  from io import BytesIO
16
  from diffusers.utils import load_image
17
  from diffusers import StableDiffusionXLControlNetPipeline, StableDiffusionXLControlNetInpaintPipeline, ControlNetModel, AutoencoderKL, DiffusionPipeline, AutoPipelineForImage2Image, AutoPipelineForInpainting, UNet2DConditionModel
18
  from controlnet_aux import HEDdetector
 
19
 
20
  DESCRIPTION = "# Run any LoRA or SD Model"
21
  if not torch.cuda.is_available():
@@ -27,10 +32,7 @@ USE_TORCH_COMPILE = os.getenv("USE_TORCH_COMPILE") == "1"
27
  ENABLE_CPU_OFFLOAD = os.getenv("ENABLE_CPU_OFFLOAD") == "1"
28
  ENABLE_USE_LORA = os.getenv("ENABLE_USE_LORA", "1") == "1"
29
  ENABLE_USE_LORA2 = os.getenv("ENABLE_USE_LORA2", "1") == "1"
30
- ENABLE_USE_VAE = os.getenv("ENABLE_USE_VAE", "1") == "1"
31
  ENABLE_USE_IMG2IMG = os.getenv("ENABLE_USE_IMG2IMG", "1") == "1"
32
- ENABLE_USE_CONTROLNET = os.getenv("ENABLE_USE_CONTROLNET", "1") == "1"
33
- ENABLE_USE_CONTROLNETINPAINT = os.getenv("ENABLE_USE_CONTROLNETINPAINT", "1") == "1"
34
 
35
  device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
36
 
@@ -39,6 +41,11 @@ def randomize_seed_fn(seed: int, randomize_seed: bool) -> int:
39
  seed = random.randint(0, MAX_SEED)
40
  return seed
41
 
 
 
 
 
 
42
  @spaces.GPU
43
  def generate(
44
  prompt: str = "",
@@ -53,88 +60,62 @@ def generate(
53
  height: int = 1024,
54
  guidance_scale_base: float = 5.0,
55
  num_inference_steps_base: int = 25,
56
- controlnet_conditioning_scale: float = 1,
57
- control_guidance_start: float = 0,
58
- control_guidance_end: float = 1,
59
  strength_img2img: float = 0.7,
60
- use_vae: bool = False,
61
  use_lora: bool = False,
62
  use_lora2: bool = False,
63
  model = 'stabilityai/stable-diffusion-xl-base-1.0',
64
- vaecall = 'madebyollin/sdxl-vae-fp16-fix',
65
  lora = '',
66
  lora2 = '',
67
- controlnet_model = 'diffusers/controlnet-canny-sdxl-1.0',
68
  lora_scale: float = 0.7,
69
  lora_scale2: float = 0.7,
70
  use_img2img: bool = False,
71
- use_controlnet: bool = False,
72
- use_controlnetinpaint: bool = False,
73
  url = '',
74
- controlnet_img = '',
75
- controlnet_inpaint = '',
76
- ):
77
  if torch.cuda.is_available():
 
 
78
 
79
- if not use_img2img:
80
- pipe = DiffusionPipeline.from_pretrained(model, safety_checker=None, requires_safety_checker=False, torch_dtype=torch.float16, device_map="balanced", low_cpu_mem_usage=True)
81
-
82
- if use_vae:
83
- vae = AutoencoderKL.from_pretrained(vaecall, torch_dtype=torch.float16, device_map="balanced", low_cpu_mem_usage=True)
84
- pipe = DiffusionPipeline.from_pretrained(model, vae=vae, safety_checker=None, requires_safety_checker=False, torch_dtype=torch.float16, device_map="balanced", low_cpu_mem_usage=True)
85
-
86
- if use_img2img:
87
- pipe = AutoPipelineForImage2Image.from_pretrained(model, safety_checker=None, requires_safety_checker=False, torch_dtype=torch.float16, device_map="balanced", low_cpu_mem_usage=True)
88
 
 
89
  init_image = load_image(url)
90
-
91
- if use_vae:
92
- vae = AutoencoderKL.from_pretrained(vaecall, torch_dtype=torch.float16, device_map="balanced", low_cpu_mem_usage=True)
93
- pipe = AutoPipelineForImage2Image.from_pretrained(model, vae=vae, safety_checker=None, requires_safety_checker=False, torch_dtype=torch.float16, device_map="balanced", low_cpu_mem_usage=True)
94
-
95
- if use_controlnet:
96
- controlnet = ControlNetModel.from_pretrained(controlnet_model, torch_dtype=torch.float16, device_map="balanced", low_cpu_mem_usage=True)
97
- pipe = StableDiffusionXLControlNetPipeline.from_pretrained(model, safety_checker=None, requires_safety_checker=False, controlnet=controlnet, torch_dtype=torch.float16, device_map="balanced", low_cpu_mem_usage=True)
98
-
99
- image = load_image(controlnet_img)
100
-
101
- image = np.array(image)
102
- image = cv2.Canny(image, 250, 255)
103
- image = image[:, :, None]
104
- image = np.concatenate([image, image, image], axis=2)
105
- image = Image.fromarray(image)
106
-
107
- if use_vae:
108
- vae = AutoencoderKL.from_pretrained(vaecall, torch_dtype=torch.float16, device_map="balanced", low_cpu_mem_usage=True)
109
- pipe = StableDiffusionXLControlNetPipeline.from_pretrained(model, safety_checker=None, requires_safety_checker=False, controlnet=controlnet, vae=vae, torch_dtype=torch.float16, device_map="balanced", low_cpu_mem_usage=True)
110
-
111
- if use_controlnetinpaint:
112
- controlnet = ControlNetModel.from_pretrained(controlnet_model, torch_dtype=torch.float16, device_map="balanced", low_cpu_mem_usage=True)
113
- pipe = StableDiffusionXLControlNetInpaintPipeline.from_pretrained(model, safety_checker=None, requires_safety_checker=False, controlnet=controlnet, torch_dtype=torch.float16, device_map="balanced", low_cpu_mem_usage=True)
114
-
115
- image_start = load_image(controlnet_img)
116
- image = load_image(controlnet_img)
117
- image_mask = load_image(controlnet_img2img)
118
-
119
- image = np.array(image)
120
- image = cv2.Canny(image, 100, 200)
121
- image = image[:, :, None]
122
- image = np.concatenate([image, image, image], axis=2)
123
- image = Image.fromarray(image)
124
-
125
- if use_vae:
126
- vae = AutoencoderKL.from_pretrained(vaecall, torch_dtype=torch.float16, device_map="balanced", low_cpu_mem_usage=True)
127
- pipe = StableDiffusionXLControlNetInpaintPipeline.from_pretrained(model, safety_checker=None, requires_safety_checker=False, controlnet=controlnet, vae=vae, torch_dtype=torch.float16, device_map="balanced", low_cpu_mem_usage=True)
128
 
129
  if use_lora:
130
- pipe.load_lora_weights(lora, adapter_name="1")
131
- pipe.set_adapters("1", adapter_weights=[lora_scale])
 
 
 
 
 
 
132
 
133
  if use_lora2:
134
- pipe.load_lora_weights(lora, adapter_name="1")
135
- pipe.load_lora_weights(lora2, adapter_name="2")
136
- pipe.set_adapters(["1", "2"], adapter_weights=[lora_scale, lora_scale2])
137
-
 
 
 
 
 
 
 
 
 
 
 
 
 
138
  generator = torch.Generator().manual_seed(seed)
139
 
140
  if not use_negative_prompt:
@@ -144,67 +125,39 @@ def generate(
144
  if not use_negative_prompt_2:
145
  negative_prompt_2 = None # type: ignore
146
 
147
- if use_controlnetinpaint:
148
- image = pipe(
149
- prompt=prompt,
150
- strength=strength_img2img,
151
- controlnet_conditioning_scale=controlnet_conditioning_scale,
152
- eta=0.0,
153
- mask_image=image_mask,
154
- image=image_start,
155
- control_image=image,
156
- negative_prompt=negative_prompt,
157
- width=width,
158
- height=height,
159
- guidance_scale=guidance_scale_base,
160
- num_inference_steps=num_inference_steps_base,
161
- generator=generator,
162
- ).images[0]
163
- return image
164
- if use_controlnet:
165
- image = pipe(
166
- prompt=prompt,
167
- controlnet_conditioning_scale=controlnet_conditioning_scale,
168
- control_guidance_start=control_guidance_start,
169
- control_guidance_end=control_guidance_end,
170
- image=image,
171
- negative_prompt=negative_prompt,
172
- prompt_2=prompt_2,
173
- width=width,
174
- height=height,
175
- negative_prompt_2=negative_prompt_2,
176
- guidance_scale=guidance_scale_base,
177
- num_inference_steps=num_inference_steps_base,
178
- generator=generator,
179
- ).images[0]
180
- return image
181
- elif use_img2img:
182
- images = pipe(
183
- prompt=prompt,
184
- image=init_image,
185
- strength=strength_img2img,
186
- negative_prompt=negative_prompt,
187
- prompt_2=prompt_2,
188
- negative_prompt_2=negative_prompt_2,
189
- width=width,
190
- height=height,
191
- guidance_scale=guidance_scale_base,
192
- num_inference_steps=num_inference_steps_base,
193
- generator=generator,
194
- ).images[0]
195
- return images
196
- else:
197
- return pipe(
198
- prompt=prompt,
199
- negative_prompt=negative_prompt,
200
- prompt_2=prompt_2,
201
- negative_prompt_2=negative_prompt_2,
202
- width=width,
203
- height=height,
204
- guidance_scale=guidance_scale_base,
205
- num_inference_steps=num_inference_steps_base,
206
- generator=generator,
207
- ).images[0]
208
 
209
  with gr.Blocks(theme=gr.themes.Soft(), css="style.css") as demo:
210
  gr.HTML(
@@ -213,10 +166,8 @@ with gr.Blocks(theme=gr.themes.Soft(), css="style.css") as demo:
213
  gr.Markdown(DESCRIPTION, elem_id="description")
214
  with gr.Group():
215
  model = gr.Text(label='Model', placeholder='e.g. stabilityai/stable-diffusion-xl-base-1.0')
216
- vaecall = gr.Text(label='VAE', placeholder='e.g. madebyollin/sdxl-vae-fp16-fix')
217
  lora = gr.Text(label='LoRA 1', placeholder='e.g. nerijs/pixel-art-xl')
218
  lora2 = gr.Text(label='LoRA 2', placeholder='e.g. nerijs/pixel-art-xl')
219
- controlnet_model = gr.Text(label='Controlnet', placeholder='e.g diffusers/controlnet-canny-sdxl-1.0')
220
  lora_scale = gr.Slider(
221
  info="The closer to 1, the more it will resemble LoRA, but errors may be visible.",
222
  label="Lora Scale 1",
@@ -234,8 +185,6 @@ with gr.Blocks(theme=gr.themes.Soft(), css="style.css") as demo:
234
  value=0.7,
235
  )
236
  url = gr.Text(label='URL (Img2Img)')
237
- controlnet_img = gr.Text(label='URL (Controlnet)', placeholder='e.g https://example.com/image.png')
238
- controlnet_inpaint = gr.Text(label='URL (Controlnet - IMG2IMG)', placeholder='e.g https://example.com/image.png')
239
  with gr.Row():
240
  prompt = gr.Text(
241
  placeholder="Input prompt",
@@ -248,10 +197,7 @@ with gr.Blocks(theme=gr.themes.Soft(), css="style.css") as demo:
248
  result = gr.Image(label="Result", show_label=False)
249
  with gr.Accordion("Advanced options", open=False):
250
  with gr.Row():
251
- use_controlnet = gr.Checkbox(label='Use Controlnet', value=False, visible=ENABLE_USE_CONTROLNET)
252
- use_controlnetinpaint = gr.Checkbox(label='Use Controlnet Img2Img', value=False, visible=ENABLE_USE_CONTROLNETINPAINT)
253
  use_img2img = gr.Checkbox(label='Use Img2Img', value=False, visible=ENABLE_USE_IMG2IMG)
254
- use_vae = gr.Checkbox(label='Use VAE', value=False, visible=ENABLE_USE_VAE)
255
  use_lora = gr.Checkbox(label='Use Lora 1', value=False, visible=ENABLE_USE_LORA)
256
  use_lora2 = gr.Checkbox(label='Use Lora 2', value=False, visible=ENABLE_USE_LORA2)
257
  use_negative_prompt = gr.Checkbox(label="Use negative prompt", value=False)
@@ -318,33 +264,6 @@ with gr.Blocks(theme=gr.themes.Soft(), css="style.css") as demo:
318
  step=1,
319
  value=25,
320
  )
321
- with gr.Row():
322
- controlnet_conditioning_scale = gr.Slider(
323
- info="controlnet_conditioning_scale",
324
- label="controlnet_conditioning_scale",
325
- minimum=0.01,
326
- maximum=2,
327
- step=0.01,
328
- value=1,
329
- )
330
- with gr.Row():
331
- control_guidance_start = gr.Slider(
332
- info="control_guidance_start",
333
- label="control_guidance_start",
334
- minimum=0.01,
335
- maximum=1,
336
- step=0.01,
337
- value=0,
338
- )
339
- with gr.Row():
340
- control_guidance_end = gr.Slider(
341
- info="control_guidance_end",
342
- label="control_guidance_end",
343
- minimum=0.01,
344
- maximum=1,
345
- step=0.01,
346
- value=1,
347
- )
348
  with gr.Row():
349
  strength_img2img = gr.Slider(
350
  info="Strength for Img2Img",
@@ -376,13 +295,6 @@ with gr.Blocks(theme=gr.themes.Soft(), css="style.css") as demo:
376
  queue=False,
377
  api_name=False,
378
  )
379
- use_vae.change(
380
- fn=lambda x: gr.update(visible=x),
381
- inputs=use_vae,
382
- outputs=vaecall,
383
- queue=False,
384
- api_name=False,
385
- )
386
  use_lora.change(
387
  fn=lambda x: gr.update(visible=x),
388
  inputs=use_lora,
@@ -404,20 +316,6 @@ with gr.Blocks(theme=gr.themes.Soft(), css="style.css") as demo:
404
  queue=False,
405
  api_name=False,
406
  )
407
- use_controlnet.change(
408
- fn=lambda x: gr.update(visible=x),
409
- inputs=use_controlnet,
410
- outputs=controlnet_img,
411
- queue=False,
412
- api_name=False,
413
- )
414
- use_controlnetinpaint.change(
415
- fn=lambda x: gr.update(visible=x),
416
- inputs=use_controlnetinpaint,
417
- outputs=controlnet_inpaint,
418
- queue=False,
419
- api_name=False,
420
- )
421
 
422
  gr.on(
423
  triggers=[
@@ -447,30 +345,20 @@ with gr.Blocks(theme=gr.themes.Soft(), css="style.css") as demo:
447
  height,
448
  guidance_scale_base,
449
  num_inference_steps_base,
450
- controlnet_conditioning_scale,
451
- control_guidance_start,
452
- control_guidance_end,
453
  strength_img2img,
454
- use_vae,
455
  use_lora,
456
  use_lora2,
457
  model,
458
- vaecall,
459
  lora,
460
  lora2,
461
- controlnet_model,
462
  lora_scale,
463
  lora_scale2,
464
  use_img2img,
465
- use_controlnet,
466
- use_controlnetinpaint,
467
  url,
468
- controlnet_img,
469
- controlnet_inpaint,
470
  ],
471
  outputs=result,
472
  api_name="run",
473
  )
474
 
475
  if __name__ == "__main__":
476
- demo.queue(max_size=20, default_concurrency_limit=2).launch()
 
1
+ #!/usr/bin/env pythona
2
 
3
  from __future__ import annotations
4
 
5
  import requests
6
  import os
7
  import random
8
+ import random
9
+ import string
10
 
11
  import gradio as gr
12
  import numpy as np
13
  import spaces
14
  import torch
15
+ import gc
16
  import cv2
17
  from PIL import Image
18
+ from accelerate import init_empty_weights
19
  from io import BytesIO
20
  from diffusers.utils import load_image
21
  from diffusers import StableDiffusionXLControlNetPipeline, StableDiffusionXLControlNetInpaintPipeline, ControlNetModel, AutoencoderKL, DiffusionPipeline, AutoPipelineForImage2Image, AutoPipelineForInpainting, UNet2DConditionModel
22
  from controlnet_aux import HEDdetector
23
+ import threading
24
 
25
  DESCRIPTION = "# Run any LoRA or SD Model"
26
  if not torch.cuda.is_available():
 
32
  ENABLE_CPU_OFFLOAD = os.getenv("ENABLE_CPU_OFFLOAD") == "1"
33
  ENABLE_USE_LORA = os.getenv("ENABLE_USE_LORA", "1") == "1"
34
  ENABLE_USE_LORA2 = os.getenv("ENABLE_USE_LORA2", "1") == "1"
 
35
  ENABLE_USE_IMG2IMG = os.getenv("ENABLE_USE_IMG2IMG", "1") == "1"
 
 
36
 
37
  device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
38
 
 
41
  seed = random.randint(0, MAX_SEED)
42
  return seed
43
 
44
+ cached_pipelines = {} # Dicionário para armazenar os pipelines
45
+ cached_loras = {}
46
+ # Crie um objeto Lock
47
+ pipeline_lock = threading.Lock()
48
+
49
  @spaces.GPU
50
  def generate(
51
  prompt: str = "",
 
60
  height: int = 1024,
61
  guidance_scale_base: float = 5.0,
62
  num_inference_steps_base: int = 25,
 
 
 
63
  strength_img2img: float = 0.7,
 
64
  use_lora: bool = False,
65
  use_lora2: bool = False,
66
  model = 'stabilityai/stable-diffusion-xl-base-1.0',
 
67
  lora = '',
68
  lora2 = '',
 
69
  lora_scale: float = 0.7,
70
  lora_scale2: float = 0.7,
71
  use_img2img: bool = False,
 
 
72
  url = '',
73
+ ):
74
+ global cached_pipelines, cached_loras
75
+
76
  if torch.cuda.is_available():
77
+ # Construa a chave do dicionário baseada no modelo e no tipo de pipeline
78
+ pipeline_key = (model, use_img2img)
79
 
80
+ if pipeline_key not in cached_pipelines:
81
+ if not use_img2img:
82
+ cached_pipelines[pipeline_key] = DiffusionPipeline.from_pretrained(model, safety_checker=None, requires_safety_checker=False, torch_dtype=torch.float16, low_cpu_mem_usage=True)
83
+ elif use_img2img:
84
+ cached_pipelines[pipeline_key] = AutoPipelineForImage2Image.from_pretrained(model, safety_checker=None, requires_safety_checker=False, torch_dtype=torch.float16, low_cpu_mem_usage=True)
85
+
86
+ pipe = cached_pipelines[pipeline_key] # Usa o pipeline carregado da memória
 
 
87
 
88
+ if use_img2img:
89
  init_image = load_image(url)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
90
 
91
  if use_lora:
92
+ lora_key = (lora, lora_scale)
93
+ if lora_key not in cached_loras:
94
+ adapter_name = ''.join(random.choice(string.ascii_letters) for _ in range(5))
95
+ pipe.load_lora_weights(lora, adapter_name=adapter_name)
96
+ cached_loras[lora_key] = adapter_name
97
+ else:
98
+ adapter_name = cached_loras[lora_key]
99
+ pipe.set_adapters(adapter_name, adapter_weights=[lora_scale])
100
 
101
  if use_lora2:
102
+ lora_key1 = (lora, lora_scale)
103
+ lora_key2 = (lora2, lora_scale2)
104
+ if lora_key1 not in cached_loras:
105
+ adapter_name1 = ''.join(random.choice(string.ascii_letters) for _ in range(5))
106
+ pipe.load_lora_weights(lora, adapter_name=adapter_name1)
107
+ cached_loras[lora_key1] = adapter_name1
108
+ else:
109
+ adapter_name1 = cached_loras[lora_key1]
110
+ if lora_key2 not in cached_loras:
111
+ adapter_name2 = ''.join(random.choice(string.ascii_letters) for _ in range(5))
112
+ pipe.load_lora_weights(lora2, adapter_name=adapter_name2)
113
+ cached_loras[lora_key2] = adapter_name2
114
+ else:
115
+ adapter_name2 = cached_loras[lora_key2]
116
+ pipe.set_adapters([adapter_name1, adapter_name2], adapter_weights=[lora_scale, lora_scale2])
117
+
118
+ pipe.to("cuda")
119
  generator = torch.Generator().manual_seed(seed)
120
 
121
  if not use_negative_prompt:
 
125
  if not use_negative_prompt_2:
126
  negative_prompt_2 = None # type: ignore
127
 
128
+ with pipeline_lock:
129
+ if use_img2img:
130
+ result = pipe(
131
+ prompt=prompt,
132
+ image=init_image,
133
+ strength=strength_img2img,
134
+ negative_prompt=negative_prompt,
135
+ prompt_2=prompt_2,
136
+ negative_prompt_2=negative_prompt_2,
137
+ width=width,
138
+ height=height,
139
+ guidance_scale=guidance_scale_base,
140
+ num_inference_steps=num_inference_steps_base,
141
+ generator=generator,
142
+ ).images[0]
143
+ else:
144
+ result = pipe(
145
+ prompt=prompt,
146
+ negative_prompt=negative_prompt,
147
+ prompt_2=prompt_2,
148
+ negative_prompt_2=negative_prompt_2,
149
+ width=width,
150
+ height=height,
151
+ guidance_scale=guidance_scale_base,
152
+ num_inference_steps=num_inference_steps_base,
153
+ generator=generator,
154
+ ).images[0]
155
+ return result
156
+
157
+ # Limpeza de memória
158
+ del pipe
159
+ torch.cuda.empty_cache()
160
+ gc.collect()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
161
 
162
  with gr.Blocks(theme=gr.themes.Soft(), css="style.css") as demo:
163
  gr.HTML(
 
166
  gr.Markdown(DESCRIPTION, elem_id="description")
167
  with gr.Group():
168
  model = gr.Text(label='Model', placeholder='e.g. stabilityai/stable-diffusion-xl-base-1.0')
 
169
  lora = gr.Text(label='LoRA 1', placeholder='e.g. nerijs/pixel-art-xl')
170
  lora2 = gr.Text(label='LoRA 2', placeholder='e.g. nerijs/pixel-art-xl')
 
171
  lora_scale = gr.Slider(
172
  info="The closer to 1, the more it will resemble LoRA, but errors may be visible.",
173
  label="Lora Scale 1",
 
185
  value=0.7,
186
  )
187
  url = gr.Text(label='URL (Img2Img)')
 
 
188
  with gr.Row():
189
  prompt = gr.Text(
190
  placeholder="Input prompt",
 
197
  result = gr.Image(label="Result", show_label=False)
198
  with gr.Accordion("Advanced options", open=False):
199
  with gr.Row():
 
 
200
  use_img2img = gr.Checkbox(label='Use Img2Img', value=False, visible=ENABLE_USE_IMG2IMG)
 
201
  use_lora = gr.Checkbox(label='Use Lora 1', value=False, visible=ENABLE_USE_LORA)
202
  use_lora2 = gr.Checkbox(label='Use Lora 2', value=False, visible=ENABLE_USE_LORA2)
203
  use_negative_prompt = gr.Checkbox(label="Use negative prompt", value=False)
 
264
  step=1,
265
  value=25,
266
  )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
267
  with gr.Row():
268
  strength_img2img = gr.Slider(
269
  info="Strength for Img2Img",
 
295
  queue=False,
296
  api_name=False,
297
  )
 
 
 
 
 
 
 
298
  use_lora.change(
299
  fn=lambda x: gr.update(visible=x),
300
  inputs=use_lora,
 
316
  queue=False,
317
  api_name=False,
318
  )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
319
 
320
  gr.on(
321
  triggers=[
 
345
  height,
346
  guidance_scale_base,
347
  num_inference_steps_base,
 
 
 
348
  strength_img2img,
 
349
  use_lora,
350
  use_lora2,
351
  model,
 
352
  lora,
353
  lora2,
 
354
  lora_scale,
355
  lora_scale2,
356
  use_img2img,
 
 
357
  url,
 
 
358
  ],
359
  outputs=result,
360
  api_name="run",
361
  )
362
 
363
  if __name__ == "__main__":
364
+ demo.queue(max_size=4, default_concurrency_limit=4).launch()