sky24h commited on
Commit
9e9df55
·
1 Parent(s): eca9dba

update gradio version

Browse files
Files changed (3) hide show
  1. README.md +1 -1
  2. app.py +2 -4
  3. requirements.txt +1 -1
README.md CHANGED
@@ -4,7 +4,7 @@ emoji: 🤖
4
  colorFrom: red
5
  colorTo: yellow
6
  sdk: gradio
7
- sdk_version: 3.41.0
8
  app_file: app.py
9
  pinned: false
10
  license: cc-by-nc-4.0
 
4
  colorFrom: red
5
  colorTo: yellow
6
  sdk: gradio
7
+ sdk_version: 4.38.1
8
  app_file: app.py
9
  pinned: false
10
  license: cc-by-nc-4.0
app.py CHANGED
@@ -57,9 +57,9 @@ with gr.Blocks() as demo:
57
  with gr.Row():
58
  with gr.Column():
59
  # select and preview video from a list of examples
60
- video_preview = gr.Video(label="Video Preview", elem_id="video-preview", height=360, value="./assets/videos/sample1.mp4")
61
  video_input = gr.Dropdown(available_videos, label="Input Video", value="sample1.mp4")
62
- audio_preview = gr.Audio(label="Audio Preview", elem_id="audio-preview", height=360, value="./assets/audios/sample2.wav")
63
  audio_input = gr.Dropdown(available_audios, label="Input Audio", value="sample2.wav")
64
  pose_select = gr.Radio(["front", "left_right_shaking"], label="Pose", value="front")
65
  emotion_select = gr.Radio(["neutral", "happy", "angry", "surprised"], label="Emotion", value="neutral")
@@ -67,8 +67,6 @@ with gr.Blocks() as demo:
67
  # with gr.Row():
68
  with gr.Column():
69
  video_out = gr.Video(label="Video Output", elem_id="video-output", height=360)
70
- # titile: Free-View Expressive Talking Head Video Editing
71
-
72
  submit_btn = gr.Button("Generate video")
73
 
74
  inputs = [video_input, audio_input, pose_select, emotion_select, blink_select]
 
57
  with gr.Row():
58
  with gr.Column():
59
  # select and preview video from a list of examples
60
+ video_preview = gr.Video(label="Video Preview", elem_id="video-preview", value="./assets/videos/sample1.mp4")
61
  video_input = gr.Dropdown(available_videos, label="Input Video", value="sample1.mp4")
62
+ audio_preview = gr.Audio(label="Audio Preview", elem_id="audio-preview", value="./assets/audios/sample2.wav")
63
  audio_input = gr.Dropdown(available_audios, label="Input Audio", value="sample2.wav")
64
  pose_select = gr.Radio(["front", "left_right_shaking"], label="Pose", value="front")
65
  emotion_select = gr.Radio(["neutral", "happy", "angry", "surprised"], label="Emotion", value="neutral")
 
67
  # with gr.Row():
68
  with gr.Column():
69
  video_out = gr.Video(label="Video Output", elem_id="video-output", height=360)
 
 
70
  submit_btn = gr.Button("Generate video")
71
 
72
  inputs = [video_input, audio_input, pose_select, emotion_select, blink_select]
requirements.txt CHANGED
@@ -5,6 +5,6 @@ moviepy==1.0.3
5
  numpy==1.23.5
6
  safetensors==0.3.2
7
  torchvision==0.15.2
8
- gradio==3.41.0
9
  natsort
10
  tqdm
 
5
  numpy==1.23.5
6
  safetensors==0.3.2
7
  torchvision==0.15.2
8
+ gradio==4.38.1
9
  natsort
10
  tqdm