multimodalart HF staff commited on
Commit
15183c0
1 Parent(s): b7ca772

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +27 -31
app.py CHANGED
@@ -30,7 +30,7 @@ from pipeline_stable_diffusion_xl_instantid_img2img import StableDiffusionXLInst
30
  from controlnet_aux import ZoeDetector
31
 
32
  from compel import Compel, ReturnedEmbeddingsType
33
- #import spaces
34
 
35
  #from gradio_imageslider import ImageSlider
36
 
@@ -123,8 +123,6 @@ pipe.load_ip_adapter_instantid(face_adapter)
123
  pipe.set_ip_adapter_scale(0.8)
124
  zoe = ZoeDetector.from_pretrained("lllyasviel/Annotators")
125
  zoe.to(device)
126
-
127
- original_pipe = copy.deepcopy(pipe)
128
  pipe.to(device)
129
 
130
  last_lora = ""
@@ -204,32 +202,7 @@ def merge_incompatible_lora(full_path_lora, lora_scale):
204
  )
205
  del weights_sd
206
  del lora_model
207
- #@spaces.GPU
208
- def generate_image(prompt, negative, face_emb, face_image, image_strength, images, guidance_scale, face_strength, depth_control_scale):
209
- print("Processing prompt...")
210
- conditioning, pooled = compel(prompt)
211
- if(negative):
212
- negative_conditioning, negative_pooled = compel(negative)
213
- else:
214
- negative_conditioning, negative_pooled = None, None
215
- print("Processing image...")
216
- image = pipe(
217
- prompt_embeds=conditioning,
218
- pooled_prompt_embeds=pooled,
219
- negative_prompt_embeds=negative_conditioning,
220
- negative_pooled_prompt_embeds=negative_pooled,
221
- width=1024,
222
- height=1024,
223
- image_embeds=face_emb,
224
- image=face_image,
225
- strength=1-image_strength,
226
- control_image=images,
227
- num_inference_steps=20,
228
- guidance_scale = guidance_scale,
229
- controlnet_conditioning_scale=[face_strength, depth_control_scale],
230
- ).images[0]
231
- return image
232
-
233
  def run_lora(face_image, prompt, negative, lora_scale, selected_state, face_strength, image_strength, guidance_scale, depth_control_scale, sdxl_loras, progress=gr.Progress(track_tqdm=True)):
234
  global last_lora, last_merged, last_fused, pipe
235
 
@@ -304,8 +277,31 @@ def run_lora(face_image, prompt, negative, lora_scale, selected_state, face_stre
304
  pipe.unload_textual_inversion()
305
  pipe.load_textual_inversion(state_dict_embedding["text_encoders_0"], token=["<s0>", "<s1>"], text_encoder=pipe.text_encoder, tokenizer=pipe.tokenizer)
306
  pipe.load_textual_inversion(state_dict_embedding["text_encoders_1"], token=["<s0>", "<s1>"], text_encoder=pipe.text_encoder_2, tokenizer=pipe.tokenizer_2)
307
-
308
- image = generate_image(prompt, negative, face_emb, face_image, image_strength, images, guidance_scale, face_strength, depth_control_scale)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
309
  last_lora = repo_name
310
  return image, gr.update(visible=True)
311
 
 
30
  from controlnet_aux import ZoeDetector
31
 
32
  from compel import Compel, ReturnedEmbeddingsType
33
+ import spaces
34
 
35
  #from gradio_imageslider import ImageSlider
36
 
 
123
  pipe.set_ip_adapter_scale(0.8)
124
  zoe = ZoeDetector.from_pretrained("lllyasviel/Annotators")
125
  zoe.to(device)
 
 
126
  pipe.to(device)
127
 
128
  last_lora = ""
 
202
  )
203
  del weights_sd
204
  del lora_model
205
+ @spaces.GPU
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
206
  def run_lora(face_image, prompt, negative, lora_scale, selected_state, face_strength, image_strength, guidance_scale, depth_control_scale, sdxl_loras, progress=gr.Progress(track_tqdm=True)):
207
  global last_lora, last_merged, last_fused, pipe
208
 
 
277
  pipe.unload_textual_inversion()
278
  pipe.load_textual_inversion(state_dict_embedding["text_encoders_0"], token=["<s0>", "<s1>"], text_encoder=pipe.text_encoder, tokenizer=pipe.tokenizer)
279
  pipe.load_textual_inversion(state_dict_embedding["text_encoders_1"], token=["<s0>", "<s1>"], text_encoder=pipe.text_encoder_2, tokenizer=pipe.tokenizer_2)
280
+
281
+ print("Processing prompt...")
282
+ conditioning, pooled = compel(prompt)
283
+ if(negative):
284
+ negative_conditioning, negative_pooled = compel(negative)
285
+ else:
286
+ negative_conditioning, negative_pooled = None, None
287
+ print("Processing image...")
288
+
289
+ image = pipe(
290
+ prompt_embeds=conditioning,
291
+ pooled_prompt_embeds=pooled,
292
+ negative_prompt_embeds=negative_conditioning,
293
+ negative_pooled_prompt_embeds=negative_pooled,
294
+ width=1024,
295
+ height=1024,
296
+ image_embeds=face_emb,
297
+ image=face_image,
298
+ strength=1-image_strength,
299
+ control_image=images,
300
+ num_inference_steps=20,
301
+ guidance_scale = guidance_scale,
302
+ controlnet_conditioning_scale=[face_strength, depth_control_scale],
303
+ ).images[0]
304
+
305
  last_lora = repo_name
306
  return image, gr.update(visible=True)
307