multimodalart HF staff commited on
Commit
dc9311b
1 Parent(s): 38be318

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +10 -8
app.py CHANGED
@@ -249,11 +249,15 @@ def generate_image(prompt, negative, face_emb, face_image, face_kps, image_stren
249
  pipe.load_textual_inversion(state_dict_embedding["text_encoders_1"], token=["<s0>", "<s1>"], text_encoder=pipe.text_encoder_2, tokenizer=pipe.tokenizer_2)
250
 
251
  print("Processing prompt...")
 
252
  conditioning, pooled = compel(prompt)
253
  if(negative):
254
  negative_conditioning, negative_pooled = compel(negative)
255
  else:
256
  negative_conditioning, negative_pooled = None, None
 
 
 
257
  print("Processing image...")
258
  image = pipe(
259
  prompt_embeds=conditioning,
@@ -275,8 +279,8 @@ def generate_image(prompt, negative, face_emb, face_image, face_kps, image_stren
275
 
276
  def run_lora(face_image, prompt, negative, lora_scale, selected_state, face_strength, image_strength, guidance_scale, depth_control_scale, sdxl_loras, progress=gr.Progress(track_tqdm=True)):
277
  selected_state_index = selected_state.index
278
- face_image = center_crop_image_as_square(face_image)
279
  st = time.time()
 
280
  try:
281
  face_info = app.get(cv2.cvtColor(np.array(face_image), cv2.COLOR_RGB2BGR))
282
  face_info = sorted(face_info, key=lambda x:(x['bbox'][2]-x['bbox'][0])*x['bbox'][3]-x['bbox'][1])[-1] # only use the maximum face
@@ -286,8 +290,9 @@ def run_lora(face_image, prompt, negative, lora_scale, selected_state, face_stre
286
  raise gr.Error("No face found in your image. Only face images work here. Try again")
287
  et = time.time()
288
  elapsed_time = et - st
289
- print('Calculating face embeds took: ', elapsed_time, 'seconds')
290
 
 
291
  for lora_list in lora_defaults:
292
  if lora_list["model"] == sdxl_loras[selected_state_index]["repo"]:
293
  prompt_full = lora_list.get("prompt", None)
@@ -299,12 +304,6 @@ def run_lora(face_image, prompt, negative, lora_scale, selected_state, face_stre
299
  if(prompt == ""):
300
  prompt = "a person"
301
 
302
- #if(selected_state.index < 0):
303
- # if(selected_state.index == -9999):
304
- # selected_state.index = 0
305
- # else:
306
- # selected_state.index *= -1
307
- #sdxl_loras = sdxl_loras_new
308
  print("Selected State: ", selected_state_index)
309
  print(sdxl_loras[selected_state_index]["repo"])
310
  if negative == "":
@@ -318,6 +317,9 @@ def run_lora(face_image, prompt, negative, lora_scale, selected_state, face_stre
318
  full_path_lora = state_dicts[repo_name]["saved_name"]
319
  loaded_state_dict = copy.deepcopy(state_dicts[repo_name]["state_dict"])
320
  cross_attention_kwargs = None
 
 
 
321
 
322
  image = generate_image(prompt, negative, face_emb, face_image, face_kps, image_strength, guidance_scale, face_strength, depth_control_scale, repo_name, loaded_state_dict, lora_scale, sdxl_loras, selected_state_index)
323
  return image, gr.update(visible=True)
 
249
  pipe.load_textual_inversion(state_dict_embedding["text_encoders_1"], token=["<s0>", "<s1>"], text_encoder=pipe.text_encoder_2, tokenizer=pipe.tokenizer_2)
250
 
251
  print("Processing prompt...")
252
+ st = time.time()
253
  conditioning, pooled = compel(prompt)
254
  if(negative):
255
  negative_conditioning, negative_pooled = compel(negative)
256
  else:
257
  negative_conditioning, negative_pooled = None, None
258
+ et = time.time()
259
+ elapsed_time = et - st
260
+ print('Prompt processing took: ', elapsed_time, 'seconds')
261
  print("Processing image...")
262
  image = pipe(
263
  prompt_embeds=conditioning,
 
279
 
280
  def run_lora(face_image, prompt, negative, lora_scale, selected_state, face_strength, image_strength, guidance_scale, depth_control_scale, sdxl_loras, progress=gr.Progress(track_tqdm=True)):
281
  selected_state_index = selected_state.index
 
282
  st = time.time()
283
+ face_image = center_crop_image_as_square(face_image)
284
  try:
285
  face_info = app.get(cv2.cvtColor(np.array(face_image), cv2.COLOR_RGB2BGR))
286
  face_info = sorted(face_info, key=lambda x:(x['bbox'][2]-x['bbox'][0])*x['bbox'][3]-x['bbox'][1])[-1] # only use the maximum face
 
290
  raise gr.Error("No face found in your image. Only face images work here. Try again")
291
  et = time.time()
292
  elapsed_time = et - st
293
+ print('Cropping and calculating face embeds took: ', elapsed_time, 'seconds')
294
 
295
+ st = time.time()
296
  for lora_list in lora_defaults:
297
  if lora_list["model"] == sdxl_loras[selected_state_index]["repo"]:
298
  prompt_full = lora_list.get("prompt", None)
 
304
  if(prompt == ""):
305
  prompt = "a person"
306
 
 
 
 
 
 
 
307
  print("Selected State: ", selected_state_index)
308
  print(sdxl_loras[selected_state_index]["repo"])
309
  if negative == "":
 
317
  full_path_lora = state_dicts[repo_name]["saved_name"]
318
  loaded_state_dict = copy.deepcopy(state_dicts[repo_name]["state_dict"])
319
  cross_attention_kwargs = None
320
+ et = time.time()
321
+ elapsed_time = et - st
322
+ print('Small content processing took: ', elapsed_time, 'seconds')
323
 
324
  image = generate_image(prompt, negative, face_emb, face_image, face_kps, image_strength, guidance_scale, face_strength, depth_control_scale, repo_name, loaded_state_dict, lora_scale, sdxl_loras, selected_state_index)
325
  return image, gr.update(visible=True)