forplaytvplus commited on
Commit
4dc3375
1 Parent(s): 76340d8

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +99 -80
app.py CHANGED
@@ -20,6 +20,7 @@ from io import BytesIO
20
  from diffusers.utils import load_image
21
  from diffusers import StableDiffusionXLControlNetPipeline, StableDiffusionXLControlNetInpaintPipeline, ControlNetModel, AutoencoderKL, DiffusionPipeline, AutoPipelineForImage2Image, AutoPipelineForInpainting, UNet2DConditionModel
22
  from controlnet_aux import HEDdetector
 
23
  import threading
24
 
25
  DESCRIPTION = "# Run any LoRA or SD Model"
@@ -27,6 +28,7 @@ if not torch.cuda.is_available():
27
  DESCRIPTION += "\n<p>⚠️ This space is running on the CPU. This demo doesn't work on CPU 😞! Run on a GPU by duplicating this space or test our website for free and unlimited by <a href='https://squaadai.com'>clicking here</a>, which provides these and more options.</p>"
28
 
29
  MAX_SEED = np.iinfo(np.int32).max
 
30
  MAX_IMAGE_SIZE = int(os.getenv("MAX_IMAGE_SIZE", "1824"))
31
  USE_TORCH_COMPILE = os.getenv("USE_TORCH_COMPILE") == "1"
32
  ENABLE_CPU_OFFLOAD = os.getenv("ENABLE_CPU_OFFLOAD") == "1"
@@ -50,17 +52,14 @@ pipeline_lock = threading.Lock()
50
  def generate(
51
  prompt: str = "",
52
  negative_prompt: str = "",
53
- prompt_2: str = "",
54
- negative_prompt_2: str = "",
55
  use_negative_prompt: bool = False,
56
- use_prompt_2: bool = False,
57
- use_negative_prompt_2: bool = False,
58
  seed: int = 0,
59
  width: int = 1024,
60
  height: int = 1024,
61
  guidance_scale_base: float = 5.0,
62
  num_inference_steps_base: int = 25,
63
  strength_img2img: float = 0.7,
 
64
  use_lora: bool = False,
65
  use_lora2: bool = False,
66
  model = 'stabilityai/stable-diffusion-xl-base-1.0',
@@ -84,7 +83,7 @@ def generate(
84
  cached_pipelines[pipeline_key] = AutoPipelineForImage2Image.from_pretrained(model, safety_checker=None, requires_safety_checker=False, torch_dtype=torch.float16, low_cpu_mem_usage=True)
85
 
86
  pipe = cached_pipelines[pipeline_key] # Usa o pipeline carregado da memória
87
-
88
  if use_img2img:
89
  init_image = load_image(url)
90
 
@@ -115,49 +114,95 @@ def generate(
115
  adapter_name2 = cached_loras[lora_key2]
116
  pipe.set_adapters([adapter_name1, adapter_name2], adapter_weights=[lora_scale, lora_scale2])
117
 
118
- pipe.enable_model_cpu_offload()
119
- generator = torch.Generator().manual_seed(seed)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
120
 
121
- if not use_negative_prompt:
122
- negative_prompt = None # type: ignore
123
- if not use_prompt_2:
124
- prompt_2 = None # type: ignore
125
- if not use_negative_prompt_2:
126
- negative_prompt_2 = None # type: ignore
127
-
128
- with pipeline_lock:
129
- if use_img2img:
130
- result = pipe(
131
- prompt=prompt,
132
- image=init_image,
133
- strength=strength_img2img,
134
- negative_prompt=negative_prompt,
135
- prompt_2=prompt_2,
136
- negative_prompt_2=negative_prompt_2,
137
- width=width,
138
- height=height,
139
- guidance_scale=guidance_scale_base,
140
- num_inference_steps=num_inference_steps_base,
141
- generator=generator,
142
- ).images[0]
143
- else:
144
- result = pipe(
145
- prompt=prompt,
146
- negative_prompt=negative_prompt,
147
- prompt_2=prompt_2,
148
- negative_prompt_2=negative_prompt_2,
149
- width=width,
150
- height=height,
151
- guidance_scale=guidance_scale_base,
152
- num_inference_steps=num_inference_steps_base,
153
- generator=generator,
154
- ).images[0]
155
  return result
156
 
157
- # Limpeza de memória
158
- del pipe
159
- torch.cuda.empty_cache()
160
- gc.collect()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
161
 
162
  with gr.Blocks(theme=gr.themes.Soft(), css="style.css") as demo:
163
  gr.HTML(
@@ -198,30 +243,16 @@ with gr.Blocks(theme=gr.themes.Soft(), css="style.css") as demo:
198
  with gr.Accordion("Advanced options", open=False):
199
  with gr.Row():
200
  use_img2img = gr.Checkbox(label='Use Img2Img', value=False, visible=ENABLE_USE_IMG2IMG)
 
201
  use_lora = gr.Checkbox(label='Use Lora 1', value=False, visible=ENABLE_USE_LORA)
202
  use_lora2 = gr.Checkbox(label='Use Lora 2', value=False, visible=ENABLE_USE_LORA2)
203
  use_negative_prompt = gr.Checkbox(label="Use negative prompt", value=False)
204
- use_prompt_2 = gr.Checkbox(label="Use prompt 2", value=False)
205
- use_negative_prompt_2 = gr.Checkbox(label="Use negative prompt 2", value=False)
206
  negative_prompt = gr.Text(
207
  placeholder="Input Negative Prompt",
208
  label="Negative prompt",
209
  max_lines=1,
210
  visible=False,
211
  )
212
- prompt_2 = gr.Text(
213
- placeholder="Input Prompt 2",
214
- label="Prompt 2",
215
- max_lines=1,
216
- visible=False,
217
- )
218
- negative_prompt_2 = gr.Text(
219
- placeholder="Input Negative Prompt 2",
220
- label="Negative prompt 2",
221
- max_lines=1,
222
- visible=False,
223
- )
224
-
225
  seed = gr.Slider(
226
  label="Seed",
227
  minimum=0,
@@ -281,20 +312,6 @@ with gr.Blocks(theme=gr.themes.Soft(), css="style.css") as demo:
281
  queue=False,
282
  api_name=False,
283
  )
284
- use_prompt_2.change(
285
- fn=lambda x: gr.update(visible=x),
286
- inputs=use_prompt_2,
287
- outputs=prompt_2,
288
- queue=False,
289
- api_name=False,
290
- )
291
- use_negative_prompt_2.change(
292
- fn=lambda x: gr.update(visible=x),
293
- inputs=use_negative_prompt_2,
294
- outputs=negative_prompt_2,
295
- queue=False,
296
- api_name=False,
297
- )
298
  use_lora.change(
299
  fn=lambda x: gr.update(visible=x),
300
  inputs=use_lora,
@@ -309,6 +326,13 @@ with gr.Blocks(theme=gr.themes.Soft(), css="style.css") as demo:
309
  queue=False,
310
  api_name=False,
311
  )
 
 
 
 
 
 
 
312
  use_img2img.change(
313
  fn=lambda x: gr.update(visible=x),
314
  inputs=use_img2img,
@@ -321,8 +345,6 @@ with gr.Blocks(theme=gr.themes.Soft(), css="style.css") as demo:
321
  triggers=[
322
  prompt.submit,
323
  negative_prompt.submit,
324
- prompt_2.submit,
325
- negative_prompt_2.submit,
326
  run_button.click,
327
  ],
328
  fn=randomize_seed_fn,
@@ -335,17 +357,14 @@ with gr.Blocks(theme=gr.themes.Soft(), css="style.css") as demo:
335
  inputs=[
336
  prompt,
337
  negative_prompt,
338
- prompt_2,
339
- negative_prompt_2,
340
  use_negative_prompt,
341
- use_prompt_2,
342
- use_negative_prompt_2,
343
  seed,
344
  width,
345
  height,
346
  guidance_scale_base,
347
  num_inference_steps_base,
348
  strength_img2img,
 
349
  use_lora,
350
  use_lora2,
351
  model,
 
20
  from diffusers.utils import load_image
21
  from diffusers import StableDiffusionXLControlNetPipeline, StableDiffusionXLControlNetInpaintPipeline, ControlNetModel, AutoencoderKL, DiffusionPipeline, AutoPipelineForImage2Image, AutoPipelineForInpainting, UNet2DConditionModel
22
  from controlnet_aux import HEDdetector
23
+ from compel import Compel, ReturnedEmbeddingsType
24
  import threading
25
 
26
  DESCRIPTION = "# Run any LoRA or SD Model"
 
28
  DESCRIPTION += "\n<p>⚠️ This space is running on the CPU. This demo doesn't work on CPU 😞! Run on a GPU by duplicating this space or test our website for free and unlimited by <a href='https://squaadai.com'>clicking here</a>, which provides these and more options.</p>"
29
 
30
  MAX_SEED = np.iinfo(np.int32).max
31
+ CUDA_LAUNCH_BLOCKING=1
32
  MAX_IMAGE_SIZE = int(os.getenv("MAX_IMAGE_SIZE", "1824"))
33
  USE_TORCH_COMPILE = os.getenv("USE_TORCH_COMPILE") == "1"
34
  ENABLE_CPU_OFFLOAD = os.getenv("ENABLE_CPU_OFFLOAD") == "1"
 
52
  def generate(
53
  prompt: str = "",
54
  negative_prompt: str = "",
 
 
55
  use_negative_prompt: bool = False,
 
 
56
  seed: int = 0,
57
  width: int = 1024,
58
  height: int = 1024,
59
  guidance_scale_base: float = 5.0,
60
  num_inference_steps_base: int = 25,
61
  strength_img2img: float = 0.7,
62
+ is_sdxl: bool = False,
63
  use_lora: bool = False,
64
  use_lora2: bool = False,
65
  model = 'stabilityai/stable-diffusion-xl-base-1.0',
 
83
  cached_pipelines[pipeline_key] = AutoPipelineForImage2Image.from_pretrained(model, safety_checker=None, requires_safety_checker=False, torch_dtype=torch.float16, low_cpu_mem_usage=True)
84
 
85
  pipe = cached_pipelines[pipeline_key] # Usa o pipeline carregado da memória
86
+
87
  if use_img2img:
88
  init_image = load_image(url)
89
 
 
114
  adapter_name2 = cached_loras[lora_key2]
115
  pipe.set_adapters([adapter_name1, adapter_name2], adapter_weights=[lora_scale, lora_scale2])
116
 
117
+ # É SDXL 1.0 (NÃO SD 1.5)
118
+ if is_sdxl:
119
+ pipe.enable_model_cpu_offload()
120
+ compel = Compel(
121
+ tokenizer=[pipe.tokenizer, pipe.tokenizer_2],
122
+ text_encoder=[pipe.text_encoder, pipe.text_encoder_2],
123
+ returned_embeddings_type=ReturnedEmbeddingsType.PENULTIMATE_HIDDEN_STATES_NON_NORMALIZED,
124
+ requires_pooled=[False, True],
125
+ truncate_long_prompts=False
126
+ )
127
+ conditioning, pooled = compel(prompt)
128
+ generator = torch.Generator().manual_seed(seed)
129
+
130
+ if not use_negative_prompt:
131
+ negative_prompt = None # type: ignore
132
+
133
+ with pipeline_lock:
134
+ if use_img2img:
135
+ result = pipe(
136
+ prompt_embeds=conditioning,
137
+ pooled_prompt_embeds=pooled,
138
+ image=init_image,
139
+ strength=strength_img2img,
140
+ negative_prompt=negative_prompt,
141
+ width=width,
142
+ height=height,
143
+ guidance_scale=guidance_scale_base,
144
+ num_inference_steps=num_inference_steps_base,
145
+ generator=generator,
146
+ ).images[0]
147
+ else:
148
+ result = pipe(
149
+ prompt_embeds=conditioning,
150
+ pooled_prompt_embeds=pooled,
151
+ negative_prompt=negative_prompt,
152
+ width=width,
153
+ height=height,
154
+ guidance_scale=guidance_scale_base,
155
+ num_inference_steps=num_inference_steps_base,
156
+ generator=generator,
157
+ ).images[0]
158
 
159
+ # Limpeza de memória
160
+ del pipe, conditioning, pooled
161
+ torch.cuda.empty_cache()
162
+ gc.collect()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
163
  return result
164
 
165
+ # NÃO É SDXL (E SIM SD 1.5)
166
+ if not is_sdxl:
167
+ pipe.enable_model_cpu_offload()
168
+ compel = Compel(tokenizer=pipe.tokenizer, text_encoder=pipe.text_encoder, truncate_long_prompts=False)
169
+ conditioning = compel.build_conditioning_tensor(prompt)
170
+ negative_conditioning = compel.build_conditioning_tensor(negative_prompt)
171
+ [conditioning, negative_conditioning] = compel.pad_conditioning_tensors_to_same_length([conditioning, negative_conditioning])
172
+ generator = torch.Generator().manual_seed(seed)
173
+
174
+ if not use_negative_prompt:
175
+ negative_prompt = None # type: ignore
176
+
177
+ with pipeline_lock:
178
+ if use_img2img:
179
+ result = pipe(
180
+ prompt_embeds=conditioning,
181
+ image=init_image,
182
+ strength=strength_img2img,
183
+ negative_prompt_embeds=negative_conditioning,
184
+ width=width,
185
+ height=height,
186
+ guidance_scale=guidance_scale_base,
187
+ num_inference_steps=num_inference_steps_base,
188
+ generator=generator,
189
+ ).images[0]
190
+ else:
191
+ result = pipe(
192
+ prompt_embeds=conditioning,
193
+ negative_prompt_embeds=negative_conditioning,
194
+ width=width,
195
+ height=height,
196
+ guidance_scale=guidance_scale_base,
197
+ num_inference_steps=num_inference_steps_base,
198
+ generator=generator,
199
+ ).images[0]
200
+
201
+ # Limpeza de memória
202
+ del pipe, conditioning, negative_conditioning
203
+ torch.cuda.empty_cache()
204
+ gc.collect()
205
+ return result
206
 
207
  with gr.Blocks(theme=gr.themes.Soft(), css="style.css") as demo:
208
  gr.HTML(
 
243
  with gr.Accordion("Advanced options", open=False):
244
  with gr.Row():
245
  use_img2img = gr.Checkbox(label='Use Img2Img', value=False, visible=ENABLE_USE_IMG2IMG)
246
+ is_sdxl = gr.Checkbox(label='Is SDXL?', value=False)
247
  use_lora = gr.Checkbox(label='Use Lora 1', value=False, visible=ENABLE_USE_LORA)
248
  use_lora2 = gr.Checkbox(label='Use Lora 2', value=False, visible=ENABLE_USE_LORA2)
249
  use_negative_prompt = gr.Checkbox(label="Use negative prompt", value=False)
 
 
250
  negative_prompt = gr.Text(
251
  placeholder="Input Negative Prompt",
252
  label="Negative prompt",
253
  max_lines=1,
254
  visible=False,
255
  )
 
 
 
 
 
 
 
 
 
 
 
 
 
256
  seed = gr.Slider(
257
  label="Seed",
258
  minimum=0,
 
312
  queue=False,
313
  api_name=False,
314
  )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
315
  use_lora.change(
316
  fn=lambda x: gr.update(visible=x),
317
  inputs=use_lora,
 
326
  queue=False,
327
  api_name=False,
328
  )
329
+ is_sdxl.change(
330
+ fn=lambda x: gr.update(visible=x),
331
+ inputs=is_sdxl,
332
+ outputs=is_sdxl,
333
+ queue=False,
334
+ api_name=False,
335
+ )
336
  use_img2img.change(
337
  fn=lambda x: gr.update(visible=x),
338
  inputs=use_img2img,
 
345
  triggers=[
346
  prompt.submit,
347
  negative_prompt.submit,
 
 
348
  run_button.click,
349
  ],
350
  fn=randomize_seed_fn,
 
357
  inputs=[
358
  prompt,
359
  negative_prompt,
 
 
360
  use_negative_prompt,
 
 
361
  seed,
362
  width,
363
  height,
364
  guidance_scale_base,
365
  num_inference_steps_base,
366
  strength_img2img,
367
+ is_sdxl,
368
  use_lora,
369
  use_lora2,
370
  model,