multimodalart HF staff commited on
Commit
3d5a08b
1 Parent(s): cacbc2e

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +9 -2
app.py CHANGED
@@ -2,6 +2,7 @@ import gradio as gr
2
  import torch
3
  torch.jit.script = lambda f: f
4
  import timm
 
5
  from huggingface_hub import hf_hub_download
6
  from safetensors.torch import load_file
7
  from share_btn import community_icon_html, loading_icon_html, share_js
@@ -124,7 +125,6 @@ pipe.set_ip_adapter_scale(0.8)
124
  zoe = ZoeDetector.from_pretrained("lllyasviel/Annotators")
125
  zoe.to(device)
126
 
127
- original_pipe = copy.deepcopy(pipe)
128
  pipe.to(device)
129
 
130
  last_lora = ""
@@ -209,10 +209,18 @@ def generate_image(prompt, negative, face_emb, face_image, image_strength, image
209
  global last_fused
210
  if last_lora != repo_name:
211
  if(last_fused):
 
212
  pipe.unfuse_lora()
213
  pipe.unload_lora_weights()
 
 
 
 
214
  pipe.load_lora_weights(loaded_state_dict)
215
  pipe.fuse_lora(lora_scale)
 
 
 
216
  last_fused = True
217
  is_pivotal = sdxl_loras[selected_state_index]["is_pivotal"]
218
  if(is_pivotal):
@@ -220,7 +228,6 @@ def generate_image(prompt, negative, face_emb, face_image, image_strength, image
220
  text_embedding_name = sdxl_loras[selected_state_index]["text_embedding_weights"]
221
  embedding_path = hf_hub_download(repo_id=repo_name, filename=text_embedding_name, repo_type="model")
222
  state_dict_embedding = load_file(embedding_path)
223
- print(state_dict_embedding)
224
  try:
225
  pipe.unload_textual_inversion()
226
  pipe.load_textual_inversion(state_dict_embedding["clip_l"], token=["<s0>", "<s1>"], text_encoder=pipe.text_encoder, tokenizer=pipe.tokenizer)
 
2
  import torch
3
  torch.jit.script = lambda f: f
4
  import timm
5
+ import time
6
  from huggingface_hub import hf_hub_download
7
  from safetensors.torch import load_file
8
  from share_btn import community_icon_html, loading_icon_html, share_js
 
125
  zoe = ZoeDetector.from_pretrained("lllyasviel/Annotators")
126
  zoe.to(device)
127
 
 
128
  pipe.to(device)
129
 
130
  last_lora = ""
 
209
  global last_fused
210
  if last_lora != repo_name:
211
  if(last_fused):
212
+ st = time.time()
213
  pipe.unfuse_lora()
214
  pipe.unload_lora_weights()
215
+ et = time.time()
216
+ elapsed_time = et - st
217
+ print('Unfuse and unload took: ', elapsed_time, 'seconds')
218
+ st = time.time()
219
  pipe.load_lora_weights(loaded_state_dict)
220
  pipe.fuse_lora(lora_scale)
221
+ et = time.time()
222
+ elapsed_time = et - st
223
+ print('Fuse and load took: ', elapsed_time, 'seconds')
224
  last_fused = True
225
  is_pivotal = sdxl_loras[selected_state_index]["is_pivotal"]
226
  if(is_pivotal):
 
228
  text_embedding_name = sdxl_loras[selected_state_index]["text_embedding_weights"]
229
  embedding_path = hf_hub_download(repo_id=repo_name, filename=text_embedding_name, repo_type="model")
230
  state_dict_embedding = load_file(embedding_path)
 
231
  try:
232
  pipe.unload_textual_inversion()
233
  pipe.load_textual_inversion(state_dict_embedding["clip_l"], token=["<s0>", "<s1>"], text_encoder=pipe.text_encoder, tokenizer=pipe.tokenizer)