cleardusk multimodalart HF staff commited on
Commit
add5fb2
1 Parent(s): 0e9358c

Run insightface on CPU and adapt the code to make it work with ZeroGPU (#1)

Browse files

- Run insightface on CPU and adapt the code to make it work with ZeroGPU (20afb24ea6e0e9425221805f14242a6fb4fdfa6f)
- change it in cropper.py too (dfb1c4ae7bc5f65ec2369cad65a205dbd0ea5c55)
- Update app.py (f4ffec51a38d4a3fc5d70fc671b69d0e00e02742)


Co-authored-by: Apolinário from multimodal AI art <multimodalart@users.noreply.huggingface.co>

Files changed (3) hide show
  1. app.py +12 -4
  2. src/utils/cropper.py +2 -2
  3. src/utils/landmark_runner.py +1 -1
app.py CHANGED
@@ -35,6 +35,14 @@ gradio_pipeline = GradioPipeline(
35
  args=args
36
  )
37
 
 
 
 
 
 
 
 
 
38
  # assets
39
  title_md = "assets/gradio_title.md"
40
  example_portrait_dir = "assets/examples/source"
@@ -111,7 +119,7 @@ with gr.Blocks(theme=gr.themes.Soft()) as demo:
111
  with gr.Row():
112
  gr.Examples(
113
  examples=data_examples,
114
- fn=lambda *args: spaces.GPU()(gradio_pipeline.execute_video)(*args),
115
  inputs=[
116
  image_input,
117
  video_input,
@@ -121,7 +129,7 @@ with gr.Blocks(theme=gr.themes.Soft()) as demo:
121
  ],
122
  outputs=[output_image, output_image_paste_back],
123
  examples_per_page=5,
124
- cache_examples="lazy",
125
  )
126
  gr.Markdown(load_description("assets/gradio_description_retargeting.md"))
127
  with gr.Row():
@@ -152,13 +160,13 @@ with gr.Blocks(theme=gr.themes.Soft()) as demo:
152
  # binding functions for buttons
153
  process_button_retargeting.click(
154
  # fn=gradio_pipeline.execute_image,
155
- fn=lambda *args: spaces.GPU()(gradio_pipeline.execute_image)(*args),
156
  inputs=[eye_retargeting_slider, lip_retargeting_slider],
157
  outputs=[output_image, output_image_paste_back],
158
  show_progress=True
159
  )
160
  process_button_animation.click(
161
- fn=lambda *args: spaces.GPU()(gradio_pipeline.execute_video)(*args),
162
  inputs=[
163
  image_input,
164
  video_input,
 
35
  args=args
36
  )
37
 
38
+ @spaces.GPU(duration=240)
39
+ def gpu_wrapped_execute_video(*args, **kwargs):
40
+ return gradio_pipeline.execute_video(*args, **kwargs)
41
+
42
+ @spaces.GPU(duration=240)
43
+ def gpu_wrapped_execute_image(*args, **kwargs):
44
+ return gradio_pipeline.execute_image(*args, **kwargs)
45
+
46
  # assets
47
  title_md = "assets/gradio_title.md"
48
  example_portrait_dir = "assets/examples/source"
 
119
  with gr.Row():
120
  gr.Examples(
121
  examples=data_examples,
122
+ fn=gpu_wrapped_execute_video,
123
  inputs=[
124
  image_input,
125
  video_input,
 
129
  ],
130
  outputs=[output_image, output_image_paste_back],
131
  examples_per_page=5,
132
+ cache_examples=False,
133
  )
134
  gr.Markdown(load_description("assets/gradio_description_retargeting.md"))
135
  with gr.Row():
 
160
  # binding functions for buttons
161
  process_button_retargeting.click(
162
  # fn=gradio_pipeline.execute_image,
163
+ fn=gpu_wrapped_execute_image,
164
  inputs=[eye_retargeting_slider, lip_retargeting_slider],
165
  outputs=[output_image, output_image_paste_back],
166
  show_progress=True
167
  )
168
  process_button_animation.click(
169
+ fn=gpu_wrapped_execute_video,
170
  inputs=[
171
  image_input,
172
  video_input,
src/utils/cropper.py CHANGED
@@ -36,7 +36,7 @@ class Cropper(object):
36
  device_id = kwargs.get('device_id', 0)
37
  self.landmark_runner = LandmarkRunner(
38
  ckpt_path=make_abs_path('../../pretrained_weights/liveportrait/landmark.onnx'),
39
- onnx_provider='cuda',
40
  device_id=device_id
41
  )
42
  self.landmark_runner.warmup()
@@ -44,7 +44,7 @@ class Cropper(object):
44
  self.face_analysis_wrapper = FaceAnalysisDIY(
45
  name='buffalo_l',
46
  root=make_abs_path('../../pretrained_weights/insightface'),
47
- providers=["CUDAExecutionProvider"]
48
  )
49
  self.face_analysis_wrapper.prepare(ctx_id=device_id, det_size=(512, 512))
50
  self.face_analysis_wrapper.warmup()
 
36
  device_id = kwargs.get('device_id', 0)
37
  self.landmark_runner = LandmarkRunner(
38
  ckpt_path=make_abs_path('../../pretrained_weights/liveportrait/landmark.onnx'),
39
+ onnx_provider='cpu',
40
  device_id=device_id
41
  )
42
  self.landmark_runner.warmup()
 
44
  self.face_analysis_wrapper = FaceAnalysisDIY(
45
  name='buffalo_l',
46
  root=make_abs_path('../../pretrained_weights/insightface'),
47
+ providers=["CPUExecutionProvider"]
48
  )
49
  self.face_analysis_wrapper.prepare(ctx_id=device_id, det_size=(512, 512))
50
  self.face_analysis_wrapper.warmup()
src/utils/landmark_runner.py CHANGED
@@ -27,7 +27,7 @@ class LandmarkRunner(object):
27
  """landmark runner"""
28
  def __init__(self, **kwargs):
29
  ckpt_path = kwargs.get('ckpt_path')
30
- onnx_provider = kwargs.get('onnx_provider', 'cuda') # 默认用cuda
31
  device_id = kwargs.get('device_id', 0)
32
  self.dsize = kwargs.get('dsize', 224)
33
  self.timer = Timer()
 
27
  """landmark runner"""
28
  def __init__(self, **kwargs):
29
  ckpt_path = kwargs.get('ckpt_path')
30
+ onnx_provider = 'cpu' #run on cpu for it to work with ZeroGPU // kwargs.get('onnx_provider', 'cuda') # 默认用cuda
31
  device_id = kwargs.get('device_id', 0)
32
  self.dsize = kwargs.get('dsize', 224)
33
  self.timer = Timer()