update gradio version
Browse files- README.md +1 -1
- app.py +2 -4
- requirements.txt +1 -1
README.md
CHANGED
@@ -4,7 +4,7 @@ emoji: 🤖
|
|
4 |
colorFrom: red
|
5 |
colorTo: yellow
|
6 |
sdk: gradio
|
7 |
-
sdk_version:
|
8 |
app_file: app.py
|
9 |
pinned: false
|
10 |
license: cc-by-nc-4.0
|
|
|
4 |
colorFrom: red
|
5 |
colorTo: yellow
|
6 |
sdk: gradio
|
7 |
+
sdk_version: 4.38.1
|
8 |
app_file: app.py
|
9 |
pinned: false
|
10 |
license: cc-by-nc-4.0
|
app.py
CHANGED
@@ -57,9 +57,9 @@ with gr.Blocks() as demo:
|
|
57 |
with gr.Row():
|
58 |
with gr.Column():
|
59 |
# select and preview video from a list of examples
|
60 |
-
video_preview = gr.Video(label="Video Preview", elem_id="video-preview",
|
61 |
video_input = gr.Dropdown(available_videos, label="Input Video", value="sample1.mp4")
|
62 |
-
audio_preview = gr.Audio(label="Audio Preview", elem_id="audio-preview",
|
63 |
audio_input = gr.Dropdown(available_audios, label="Input Audio", value="sample2.wav")
|
64 |
pose_select = gr.Radio(["front", "left_right_shaking"], label="Pose", value="front")
|
65 |
emotion_select = gr.Radio(["neutral", "happy", "angry", "surprised"], label="Emotion", value="neutral")
|
@@ -67,8 +67,6 @@ with gr.Blocks() as demo:
|
|
67 |
# with gr.Row():
|
68 |
with gr.Column():
|
69 |
video_out = gr.Video(label="Video Output", elem_id="video-output", height=360)
|
70 |
-
# titile: Free-View Expressive Talking Head Video Editing
|
71 |
-
|
72 |
submit_btn = gr.Button("Generate video")
|
73 |
|
74 |
inputs = [video_input, audio_input, pose_select, emotion_select, blink_select]
|
|
|
57 |
with gr.Row():
|
58 |
with gr.Column():
|
59 |
# select and preview video from a list of examples
|
60 |
+
video_preview = gr.Video(label="Video Preview", elem_id="video-preview", value="./assets/videos/sample1.mp4")
|
61 |
video_input = gr.Dropdown(available_videos, label="Input Video", value="sample1.mp4")
|
62 |
+
audio_preview = gr.Audio(label="Audio Preview", elem_id="audio-preview", value="./assets/audios/sample2.wav")
|
63 |
audio_input = gr.Dropdown(available_audios, label="Input Audio", value="sample2.wav")
|
64 |
pose_select = gr.Radio(["front", "left_right_shaking"], label="Pose", value="front")
|
65 |
emotion_select = gr.Radio(["neutral", "happy", "angry", "surprised"], label="Emotion", value="neutral")
|
|
|
67 |
# with gr.Row():
|
68 |
with gr.Column():
|
69 |
video_out = gr.Video(label="Video Output", elem_id="video-output", height=360)
|
|
|
|
|
70 |
submit_btn = gr.Button("Generate video")
|
71 |
|
72 |
inputs = [video_input, audio_input, pose_select, emotion_select, blink_select]
|
requirements.txt
CHANGED
@@ -5,6 +5,6 @@ moviepy==1.0.3
|
|
5 |
numpy==1.23.5
|
6 |
safetensors==0.3.2
|
7 |
torchvision==0.15.2
|
8 |
-
gradio==
|
9 |
natsort
|
10 |
tqdm
|
|
|
5 |
numpy==1.23.5
|
6 |
safetensors==0.3.2
|
7 |
torchvision==0.15.2
|
8 |
+
gradio==4.38.1
|
9 |
natsort
|
10 |
tqdm
|