Your Name commited on
Commit
8016317
1 Parent(s): 127e696

try fix the preprocess OOM issue

Browse files
Files changed (29) hide show
  1. .gitignore +17 -0
  2. app.py +5 -5
  3. lib/__pycache__/__init__.cpython-310.pyc +0 -0
  4. lib/__pycache__/cfg_helper.cpython-310.pyc +0 -0
  5. lib/__pycache__/cfg_holder.cpython-310.pyc +0 -0
  6. lib/__pycache__/log_service.cpython-310.pyc +0 -0
  7. lib/__pycache__/sync.cpython-310.pyc +0 -0
  8. lib/model_zoo/__pycache__/__init__.cpython-310.pyc +0 -0
  9. lib/model_zoo/__pycache__/attention.cpython-310.pyc +0 -0
  10. lib/model_zoo/__pycache__/autokl.cpython-310.pyc +0 -0
  11. lib/model_zoo/__pycache__/autokl_modules.cpython-310.pyc +0 -0
  12. lib/model_zoo/__pycache__/autokl_utils.cpython-310.pyc +0 -0
  13. lib/model_zoo/__pycache__/controlnet.cpython-310.pyc +0 -0
  14. lib/model_zoo/__pycache__/ddim.cpython-310.pyc +0 -0
  15. lib/model_zoo/__pycache__/diffusion_utils.cpython-310.pyc +0 -0
  16. lib/model_zoo/__pycache__/distributions.cpython-310.pyc +0 -0
  17. lib/model_zoo/__pycache__/ema.cpython-310.pyc +0 -0
  18. lib/model_zoo/__pycache__/openaimodel.cpython-310.pyc +0 -0
  19. lib/model_zoo/__pycache__/pfd.cpython-310.pyc +0 -0
  20. lib/model_zoo/__pycache__/seecoder.cpython-310.pyc +0 -0
  21. lib/model_zoo/__pycache__/seecoder_utils.cpython-310.pyc +0 -0
  22. lib/model_zoo/__pycache__/swin.cpython-310.pyc +0 -0
  23. lib/model_zoo/common/__pycache__/get_model.cpython-310.pyc +0 -0
  24. lib/model_zoo/common/__pycache__/get_optimizer.cpython-310.pyc +0 -0
  25. lib/model_zoo/common/__pycache__/get_scheduler.cpython-310.pyc +0 -0
  26. lib/model_zoo/common/__pycache__/utils.cpython-310.pyc +0 -0
  27. lib/model_zoo/controlnet.py +9 -9
  28. lib/model_zoo/controlnet_annotator/midas/__init__.py +1 -2
  29. lib/model_zoo/controlnet_annotator/openpose/__init__.py +23 -6
.gitignore ADDED
@@ -0,0 +1,17 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ __pycache__
2
+ .vscode/
3
+ data/
4
+ data
5
+ log/
6
+ log
7
+ pretrained/
8
+ pretrained
9
+ assets/nosync/
10
+ assets/demo/temp/temp_*
11
+ *.out
12
+ gradio_cached_examples/
13
+ src/*/build
14
+ src/*/dist
15
+ src/*/*.egg-info/
16
+ extensions/
17
+ extensions
app.py CHANGED
@@ -29,13 +29,13 @@ n_sample_image = 1
29
  # ['canny_v11p' , ('canny' , 'pretrained/controlnet/control_v11p_sd15_canny_slimmed.safetensors')],
30
  # ['depth' , ('depth' , 'pretrained/controlnet/control_sd15_depth_slimmed.safetensors')],
31
  # ['hed' , ('hed' , 'pretrained/controlnet/control_sd15_hed_slimmed.safetensors')],
 
32
  # ['mlsd' , ('mlsd' , 'pretrained/controlnet/control_sd15_mlsd_slimmed.safetensors')],
33
- # ['mlsd_v11p' , ('mlsd' , 'pretrained/controlnet/control_v11p_sd15_mlsd_slimmed.safetensors')],
34
- # ['normal' , ('normal' , 'pretrained/controlnet/control_sd15_normal_slimmed.safetensors')],
35
  # ['openpose' , ('openpose', 'pretrained/controlnet/control_sd15_openpose_slimmed.safetensors')],
36
  # ['openpose_v11p' , ('openpose', 'pretrained/controlnet/control_v11p_sd15_openpose_slimmed.safetensors')],
37
  # ['scribble' , ('scribble', 'pretrained/controlnet/control_sd15_scribble_slimmed.safetensors')],
38
- # ['softedge_v11p' , ('scribble', 'pretrained/controlnet/control_v11p_sd15_softedge_slimmed.safetensors')],
39
  # ['seg' , ('none' , 'pretrained/controlnet/control_sd15_seg_slimmed.safetensors')],
40
  # ['lineart_v11p' , ('none' , 'pretrained/controlnet/control_v11p_sd15_lineart_slimmed.safetensors')],
41
  # ['lineart_anime_v11p', ('none' , 'pretrained/controlnet/control_v11p_sd15s2_lineart_anime_slimmed.safetensors')],
@@ -45,7 +45,7 @@ controlnet_path = OrderedDict([
45
  ['canny' , ('canny' , hf_hub_download('shi-labs/prompt-free-diffusion', 'pretrained/controlnet/control_sd15_canny_slimmed.safetensors'))],
46
  # ['canny_v11p' , ('canny' , hf_hub_download('shi-labs/prompt-free-diffusion', 'pretrained/controlnet/control_v11p_sd15_canny_slimmed.safetensors'))],
47
  ['depth' , ('depth' , hf_hub_download('shi-labs/prompt-free-diffusion', 'pretrained/controlnet/control_sd15_depth_slimmed.safetensors'))],
48
- # ['hed' , ('hed' , hf_hub_download('shi-labs/prompt-free-diffusion', 'pretrained/controlnet/control_sd15_hed_slimmed.safetensors'))],
49
  ['mlsd' , ('mlsd' , hf_hub_download('shi-labs/prompt-free-diffusion', 'pretrained/controlnet/control_sd15_mlsd_slimmed.safetensors'))],
50
  # ['mlsd_v11p' , ('mlsd' , hf_hub_download('shi-labs/prompt-free-diffusion', 'pretrained/controlnet/control_v11p_sd15_mlsd_slimmed.safetensors'))],
51
  # ['normal' , ('normal' , hf_hub_download('shi-labs/prompt-free-diffusion', 'pretrained/controlnet/control_sd15_normal_slimmed.safetensors'))],
@@ -61,7 +61,7 @@ controlnet_path = OrderedDict([
61
  preprocess_method = [
62
  'canny' ,
63
  'depth' ,
64
- # 'hed' ,
65
  'mlsd' ,
66
  # 'normal' ,
67
  'openpose' ,
 
29
  # ['canny_v11p' , ('canny' , 'pretrained/controlnet/control_v11p_sd15_canny_slimmed.safetensors')],
30
  # ['depth' , ('depth' , 'pretrained/controlnet/control_sd15_depth_slimmed.safetensors')],
31
  # ['hed' , ('hed' , 'pretrained/controlnet/control_sd15_hed_slimmed.safetensors')],
32
+ # ['softedge_v11p' , ('hed' , 'pretrained/controlnet/control_v11p_sd15_softedge_slimmed.safetensors')],
33
  # ['mlsd' , ('mlsd' , 'pretrained/controlnet/control_sd15_mlsd_slimmed.safetensors')],
34
+ # # ['mlsd_v11p' , ('mlsd' , 'pretrained/controlnet/control_v11p_sd15_mlsd_slimmed.safetensors')],
35
+ # # ['normal' , ('normal' , 'pretrained/controlnet/control_sd15_normal_slimmed.safetensors')],
36
  # ['openpose' , ('openpose', 'pretrained/controlnet/control_sd15_openpose_slimmed.safetensors')],
37
  # ['openpose_v11p' , ('openpose', 'pretrained/controlnet/control_v11p_sd15_openpose_slimmed.safetensors')],
38
  # ['scribble' , ('scribble', 'pretrained/controlnet/control_sd15_scribble_slimmed.safetensors')],
 
39
  # ['seg' , ('none' , 'pretrained/controlnet/control_sd15_seg_slimmed.safetensors')],
40
  # ['lineart_v11p' , ('none' , 'pretrained/controlnet/control_v11p_sd15_lineart_slimmed.safetensors')],
41
  # ['lineart_anime_v11p', ('none' , 'pretrained/controlnet/control_v11p_sd15s2_lineart_anime_slimmed.safetensors')],
 
45
  ['canny' , ('canny' , hf_hub_download('shi-labs/prompt-free-diffusion', 'pretrained/controlnet/control_sd15_canny_slimmed.safetensors'))],
46
  # ['canny_v11p' , ('canny' , hf_hub_download('shi-labs/prompt-free-diffusion', 'pretrained/controlnet/control_v11p_sd15_canny_slimmed.safetensors'))],
47
  ['depth' , ('depth' , hf_hub_download('shi-labs/prompt-free-diffusion', 'pretrained/controlnet/control_sd15_depth_slimmed.safetensors'))],
48
+ ['hed' , ('hed' , hf_hub_download('shi-labs/prompt-free-diffusion', 'pretrained/controlnet/control_sd15_hed_slimmed.safetensors'))],
49
  ['mlsd' , ('mlsd' , hf_hub_download('shi-labs/prompt-free-diffusion', 'pretrained/controlnet/control_sd15_mlsd_slimmed.safetensors'))],
50
  # ['mlsd_v11p' , ('mlsd' , hf_hub_download('shi-labs/prompt-free-diffusion', 'pretrained/controlnet/control_v11p_sd15_mlsd_slimmed.safetensors'))],
51
  # ['normal' , ('normal' , hf_hub_download('shi-labs/prompt-free-diffusion', 'pretrained/controlnet/control_sd15_normal_slimmed.safetensors'))],
 
61
  preprocess_method = [
62
  'canny' ,
63
  'depth' ,
64
+ 'hed' ,
65
  'mlsd' ,
66
  # 'normal' ,
67
  'openpose' ,
lib/__pycache__/__init__.cpython-310.pyc DELETED
Binary file (150 Bytes)
 
lib/__pycache__/cfg_helper.cpython-310.pyc DELETED
Binary file (13.2 kB)
 
lib/__pycache__/cfg_holder.cpython-310.pyc DELETED
Binary file (1.22 kB)
 
lib/__pycache__/log_service.cpython-310.pyc DELETED
Binary file (5.01 kB)
 
lib/__pycache__/sync.cpython-310.pyc DELETED
Binary file (7.51 kB)
 
lib/model_zoo/__pycache__/__init__.cpython-310.pyc DELETED
Binary file (371 Bytes)
 
lib/model_zoo/__pycache__/attention.cpython-310.pyc DELETED
Binary file (15.8 kB)
 
lib/model_zoo/__pycache__/autokl.cpython-310.pyc DELETED
Binary file (6.08 kB)
 
lib/model_zoo/__pycache__/autokl_modules.cpython-310.pyc DELETED
Binary file (20.3 kB)
 
lib/model_zoo/__pycache__/autokl_utils.cpython-310.pyc DELETED
Binary file (13 kB)
 
lib/model_zoo/__pycache__/controlnet.cpython-310.pyc DELETED
Binary file (13.1 kB)
 
lib/model_zoo/__pycache__/ddim.cpython-310.pyc DELETED
Binary file (7.89 kB)
 
lib/model_zoo/__pycache__/diffusion_utils.cpython-310.pyc DELETED
Binary file (9.53 kB)
 
lib/model_zoo/__pycache__/distributions.cpython-310.pyc DELETED
Binary file (3.76 kB)
 
lib/model_zoo/__pycache__/ema.cpython-310.pyc DELETED
Binary file (3.01 kB)
 
lib/model_zoo/__pycache__/openaimodel.cpython-310.pyc DELETED
Binary file (51.1 kB)
 
lib/model_zoo/__pycache__/pfd.cpython-310.pyc DELETED
Binary file (15.9 kB)
 
lib/model_zoo/__pycache__/seecoder.cpython-310.pyc DELETED
Binary file (16.6 kB)
 
lib/model_zoo/__pycache__/seecoder_utils.cpython-310.pyc DELETED
Binary file (4.7 kB)
 
lib/model_zoo/__pycache__/swin.cpython-310.pyc DELETED
Binary file (21.2 kB)
 
lib/model_zoo/common/__pycache__/get_model.cpython-310.pyc DELETED
Binary file (3.32 kB)
 
lib/model_zoo/common/__pycache__/get_optimizer.cpython-310.pyc DELETED
Binary file (1.95 kB)
 
lib/model_zoo/common/__pycache__/get_scheduler.cpython-310.pyc DELETED
Binary file (9.44 kB)
 
lib/model_zoo/common/__pycache__/utils.cpython-310.pyc DELETED
Binary file (9.72 kB)
 
lib/model_zoo/controlnet.py CHANGED
@@ -296,12 +296,12 @@ class ControlNet(nn.Module):
296
  if type == 'none' or type is None:
297
  return None
298
 
299
- elif type in ['input', 'shuffle_v11e']:
300
  y_torch = torch.stack([tvtrans.ToTensor()(xi) for xi in x_list])
301
  y_torch = y_torch.to(device).to(torch.float32)
302
  return y_torch
303
 
304
- elif type in ['canny', 'canny_v11p']:
305
  low_threshold = kwargs.pop('low_threshold', 100)
306
  high_threshold = kwargs.pop('high_threshold', 200)
307
  from .controlnet_annotator.canny import apply_canny
@@ -320,7 +320,7 @@ class ControlNet(nn.Module):
320
  unload_midas_model()
321
  return y_torch
322
 
323
- elif type in ['hed', 'softedge_v11p']:
324
  from .controlnet_annotator.hed import apply_hed, unload_hed_model
325
  y_list = [apply_hed(np.array(xi), device=device) for xi in x_list]
326
  y_torch = torch.stack([tvtrans.ToTensor()(yi) for yi in y_list])
@@ -349,7 +349,7 @@ class ControlNet(nn.Module):
349
  unload_midas_model()
350
  return y_torch
351
 
352
- elif type in ['openpose', 'openpose_v11p']:
353
  from .controlnet_annotator.openpose import OpenposeModel
354
  from functools import partial
355
  wrapper = OpenposeModel()
@@ -359,10 +359,10 @@ class ControlNet(nn.Module):
359
  y_list = [apply_openpose(np.array(xi)) for xi in x_list]
360
  y_torch = torch.stack([tvtrans.ToTensor()(yi.copy()) for yi in y_list])
361
  y_torch = y_torch.to(device).to(torch.float32)
362
- OpenposeModel.unload()
363
  return y_torch
364
 
365
- elif type in ['openpose_withface', 'openpose_withface_v11p']:
366
  from .controlnet_annotator.openpose import OpenposeModel
367
  from functools import partial
368
  wrapper = OpenposeModel()
@@ -372,10 +372,10 @@ class ControlNet(nn.Module):
372
  y_list = [apply_openpose(np.array(xi)) for xi in x_list]
373
  y_torch = torch.stack([tvtrans.ToTensor()(yi.copy()) for yi in y_list])
374
  y_torch = y_torch.to(device).to(torch.float32)
375
- OpenposeModel.unload()
376
  return y_torch
377
 
378
- elif type in ['openpose_withfacehand', 'openpose_withfacehand_v11p']:
379
  from .controlnet_annotator.openpose import OpenposeModel
380
  from functools import partial
381
  wrapper = OpenposeModel()
@@ -385,7 +385,7 @@ class ControlNet(nn.Module):
385
  y_list = [apply_openpose(np.array(xi)) for xi in x_list]
386
  y_torch = torch.stack([tvtrans.ToTensor()(yi.copy()) for yi in y_list])
387
  y_torch = y_torch.to(device).to(torch.float32)
388
- OpenposeModel.unload()
389
  return y_torch
390
 
391
  elif type == 'scribble':
 
296
  if type == 'none' or type is None:
297
  return None
298
 
299
+ elif type in ['input']:
300
  y_torch = torch.stack([tvtrans.ToTensor()(xi) for xi in x_list])
301
  y_torch = y_torch.to(device).to(torch.float32)
302
  return y_torch
303
 
304
+ elif type in ['canny']:
305
  low_threshold = kwargs.pop('low_threshold', 100)
306
  high_threshold = kwargs.pop('high_threshold', 200)
307
  from .controlnet_annotator.canny import apply_canny
 
320
  unload_midas_model()
321
  return y_torch
322
 
323
+ elif type in ['hed']:
324
  from .controlnet_annotator.hed import apply_hed, unload_hed_model
325
  y_list = [apply_hed(np.array(xi), device=device) for xi in x_list]
326
  y_torch = torch.stack([tvtrans.ToTensor()(yi) for yi in y_list])
 
349
  unload_midas_model()
350
  return y_torch
351
 
352
+ elif type in ['openpose']:
353
  from .controlnet_annotator.openpose import OpenposeModel
354
  from functools import partial
355
  wrapper = OpenposeModel()
 
359
  y_list = [apply_openpose(np.array(xi)) for xi in x_list]
360
  y_torch = torch.stack([tvtrans.ToTensor()(yi.copy()) for yi in y_list])
361
  y_torch = y_torch.to(device).to(torch.float32)
362
+ wrapper.unload()
363
  return y_torch
364
 
365
+ elif type in ['openpose_withface']:
366
  from .controlnet_annotator.openpose import OpenposeModel
367
  from functools import partial
368
  wrapper = OpenposeModel()
 
372
  y_list = [apply_openpose(np.array(xi)) for xi in x_list]
373
  y_torch = torch.stack([tvtrans.ToTensor()(yi.copy()) for yi in y_list])
374
  y_torch = y_torch.to(device).to(torch.float32)
375
+ wrapper.unload()
376
  return y_torch
377
 
378
+ elif type in ['openpose_withfacehand']:
379
  from .controlnet_annotator.openpose import OpenposeModel
380
  from functools import partial
381
  wrapper = OpenposeModel()
 
385
  y_list = [apply_openpose(np.array(xi)) for xi in x_list]
386
  y_torch = torch.stack([tvtrans.ToTensor()(yi.copy()) for yi in y_list])
387
  y_torch = y_torch.to(device).to(torch.float32)
388
+ wrapper.unload()
389
  return y_torch
390
 
391
  elif type == 'scribble':
lib/model_zoo/controlnet_annotator/midas/__init__.py CHANGED
@@ -16,8 +16,7 @@ def apply_midas(input_image, a=np.pi * 2.0, bg_th=0.1, device='cpu'):
16
  global model
17
  if model is None:
18
  model = MiDaSInference(model_type="dpt_hybrid")
19
- model = model.to(device)
20
-
21
  assert input_image.ndim == 3
22
  image_depth = input_image
23
  with torch.no_grad():
 
16
  global model
17
  if model is None:
18
  model = MiDaSInference(model_type="dpt_hybrid")
19
+ model = model.to(device)
 
20
  assert input_image.ndim == 3
21
  image_depth = input_image
22
  with torch.no_grad():
lib/model_zoo/controlnet_annotator/openpose/__init__.py CHANGED
@@ -18,6 +18,8 @@ from .body import Body, BodyResult, Keypoint
18
  from .hand import Hand
19
  from .face import Face
20
 
 
 
21
  models_path = "pretrained/controlnet/preprocess"
22
 
23
  from typing import NamedTuple, Tuple, List, Callable, Union
@@ -170,11 +172,21 @@ class OpenposeDetector:
170
  """
171
  Unload the Openpose models by moving them to the CPU.
172
  """
 
173
  if self.body_estimation is not None:
174
  self.body_estimation.model.to("cpu")
175
  self.hand_estimation.model.to("cpu")
176
  self.face_estimation.model.to("cpu")
177
 
 
 
 
 
 
 
 
 
 
178
  def detect_hands(self, body: BodyResult, oriImg) -> Tuple[Union[HandResult, None], Union[HandResult, None]]:
179
  left_hand = None
180
  right_hand = None
@@ -291,7 +303,7 @@ class OpenposeDetector:
291
 
292
  class OpenposeModel(object):
293
  def __init__(self) -> None:
294
- self.model_openpose = None
295
 
296
  def run_model(
297
  self,
@@ -302,13 +314,17 @@ class OpenposeModel(object):
302
  json_pose_callback: Callable[[str], None] = None,
303
  device = 'cpu', ):
304
 
 
 
305
  if json_pose_callback is None:
306
  json_pose_callback = lambda x: None
307
 
308
- if self.model_openpose is None:
309
- self.model_openpose = OpenposeDetector(device=device)
 
 
310
 
311
- return self.model_openpose(
312
  img,
313
  include_body=include_body,
314
  include_hand=include_hand,
@@ -316,5 +332,6 @@ class OpenposeModel(object):
316
  json_pose_callback=json_pose_callback)
317
 
318
  def unload(self):
319
- if self.model_openpose is not None:
320
- self.model_openpose.unload_model()
 
 
18
  from .hand import Hand
19
  from .face import Face
20
 
21
+ openposemodel = None
22
+
23
  models_path = "pretrained/controlnet/preprocess"
24
 
25
  from typing import NamedTuple, Tuple, List, Callable, Union
 
172
  """
173
  Unload the Openpose models by moving them to the CPU.
174
  """
175
+ self.device = "cpu"
176
  if self.body_estimation is not None:
177
  self.body_estimation.model.to("cpu")
178
  self.hand_estimation.model.to("cpu")
179
  self.face_estimation.model.to("cpu")
180
 
181
+ def set_device(self, device):
182
+ self.device = device
183
+ if self.body_estimation is not None:
184
+ self.body_estimation.model.to(device)
185
+ if self.hand_estimation is not None:
186
+ self.hand_estimation.model.to(device)
187
+ if self.face_estimation is not None:
188
+ self.face_estimation.model.to(device)
189
+
190
  def detect_hands(self, body: BodyResult, oriImg) -> Tuple[Union[HandResult, None], Union[HandResult, None]]:
191
  left_hand = None
192
  right_hand = None
 
303
 
304
  class OpenposeModel(object):
305
  def __init__(self) -> None:
306
+ pass
307
 
308
  def run_model(
309
  self,
 
314
  json_pose_callback: Callable[[str], None] = None,
315
  device = 'cpu', ):
316
 
317
+ global openposemodel
318
+
319
  if json_pose_callback is None:
320
  json_pose_callback = lambda x: None
321
 
322
+ if openposemodel is None:
323
+ openposemodel = OpenposeDetector(device=device)
324
+ else:
325
+ openposemodel.set_device(device)
326
 
327
+ return openposemodel(
328
  img,
329
  include_body=include_body,
330
  include_hand=include_hand,
 
332
  json_pose_callback=json_pose_callback)
333
 
334
  def unload(self):
335
+ global openposemodel
336
+ if openposemodel is not None:
337
+ openposemodel.unload_model()