leeway.zlw commited on
Commit
23a0ba6
1 Parent(s): 69c71b8
Files changed (1) hide show
  1. app.py +8 -11
app.py CHANGED
@@ -7,12 +7,13 @@ from scripts.inference import inference_process
7
  import argparse
8
  import uuid
9
 
10
- is_shared_ui = True if "fudan-generative-ai/hallo" in os.environ['SPACE_ID'] else False
 
11
 
12
  if(not is_shared_ui):
13
  hallo_dir = snapshot_download(repo_id="fudan-generative-ai/hallo", local_dir="pretrained_models")
14
 
15
- def run_inference(source_image, driving_audio, pose_weight, face_weight, lip_weight, face_expand_ratio, progress=gr.Progress(track_tqdm=True)):
16
  if is_shared_ui:
17
  raise gr.Error("This Space only works in duplicated instances")
18
 
@@ -23,10 +24,10 @@ def run_inference(source_image, driving_audio, pose_weight, face_weight, lip_wei
23
  source_image=source_image,
24
  driving_audio=driving_audio,
25
  output=f'output-{unique_id}.mp4',
26
- pose_weight=pose_weight,
27
- face_weight=face_weight,
28
- lip_weight=lip_weight,
29
- face_expand_ratio=face_expand_ratio,
30
  checkpoint=None
31
  )
32
 
@@ -112,17 +113,13 @@ We have provided some [samples](https://huggingface.co/datasets/fudan-generative
112
  with gr.Column():
113
  avatar_face = gr.Image(type="filepath", label="Face")
114
  driving_audio = gr.Audio(type="filepath", label="Driving audio")
115
- pose_weight = gr.Number(label="pose weight", value=1.0),
116
- face_weight = gr.Number(label="face weight", value=1.0),
117
- lip_weight = gr.Number(label="lip weight", value=1.0),
118
- face_expand_ratio = gr.Number(label="face expand ratio", value=1.2),
119
  generate = gr.Button("Generate")
120
  with gr.Column():
121
  output_video = gr.Video(label="Your talking head")
122
 
123
  generate.click(
124
  fn=run_inference,
125
- inputs=[avatar_face, driving_audio, pose_weight, face_weight, lip_weight, face_expand_ratio],
126
  outputs=output_video
127
  )
128
 
 
7
  import argparse
8
  import uuid
9
 
10
+ # is_shared_ui = True if "fudan-generative-ai/hallo" in os.environ['SPACE_ID'] else False
11
+ is_shared_ui = False
12
 
13
  if(not is_shared_ui):
14
  hallo_dir = snapshot_download(repo_id="fudan-generative-ai/hallo", local_dir="pretrained_models")
15
 
16
+ def run_inference(source_image, driving_audio, progress=gr.Progress(track_tqdm=True)):
17
  if is_shared_ui:
18
  raise gr.Error("This Space only works in duplicated instances")
19
 
 
24
  source_image=source_image,
25
  driving_audio=driving_audio,
26
  output=f'output-{unique_id}.mp4',
27
+ pose_weight=1.0,
28
+ face_weight=1.0,
29
+ lip_weight=1.0,
30
+ face_expand_ratio=1.2,
31
  checkpoint=None
32
  )
33
 
 
113
  with gr.Column():
114
  avatar_face = gr.Image(type="filepath", label="Face")
115
  driving_audio = gr.Audio(type="filepath", label="Driving audio")
 
 
 
 
116
  generate = gr.Button("Generate")
117
  with gr.Column():
118
  output_video = gr.Video(label="Your talking head")
119
 
120
  generate.click(
121
  fn=run_inference,
122
+ inputs=[avatar_face, driving_audio],
123
  outputs=output_video
124
  )
125