SadTalker / app.py
abreza's picture
add duration
3615bea
raw
history blame contribute delete
No virus
10.6 kB
import os
import platform
import uuid
import shutil
from pydub import AudioSegment
import spaces
import torch
import gradio as gr
from huggingface_hub import snapshot_download
from examples.get_examples import get_examples
from src.facerender.pirender_animate import AnimateFromCoeff_PIRender
from src.utils.preprocess import CropAndExtract
from src.test_audio2coeff import Audio2Coeff
from src.facerender.animate import AnimateFromCoeff
from src.generate_batch import get_data
from src.generate_facerender_batch import get_facerender_data
from src.utils.init_path import init_path
checkpoint_path = 'checkpoints'
config_path = 'src/config'
device = "cuda" if torch.cuda.is_available(
) else "mps" if platform.system() == 'Darwin' else "cpu"
os.environ['TORCH_HOME'] = checkpoint_path
snapshot_download(repo_id='vinthony/SadTalker-V002rc',
local_dir=checkpoint_path, local_dir_use_symlinks=True)
def mp3_to_wav(mp3_filename, wav_filename, frame_rate):
AudioSegment.from_file(file=mp3_filename).set_frame_rate(
frame_rate).export(wav_filename, format="wav")
@spaces.GPU(duration=120)
def generate_video(source_image, driven_audio, preprocess='crop', still_mode=False, use_enhancer=False,
batch_size=1, size=256, pose_style=0, facerender='facevid2vid', exp_scale=1.0,
use_ref_video=False, ref_video=None, ref_info=None, use_idle_mode=False,
length_of_audio=0, use_blink=True, result_dir='./results/'):
# Initialize models and paths
sadtalker_paths = init_path(
checkpoint_path, config_path, size, False, preprocess)
audio_to_coeff = Audio2Coeff(sadtalker_paths, device)
preprocess_model = CropAndExtract(sadtalker_paths, device)
animate_from_coeff = AnimateFromCoeff(sadtalker_paths, device) if facerender == 'facevid2vid' and device != 'mps' \
else AnimateFromCoeff_PIRender(sadtalker_paths, device)
# Create directories for saving results
time_tag = str(uuid.uuid4())
save_dir = os.path.join(result_dir, time_tag)
os.makedirs(save_dir, exist_ok=True)
input_dir = os.path.join(save_dir, 'input')
os.makedirs(input_dir, exist_ok=True)
# Process source image
pic_path = os.path.join(input_dir, os.path.basename(source_image))
shutil.move(source_image, input_dir)
# Process driven audio
if driven_audio and os.path.isfile(driven_audio):
audio_path = os.path.join(input_dir, os.path.basename(driven_audio))
if '.mp3' in audio_path:
mp3_to_wav(driven_audio, audio_path.replace('.mp3', '.wav'), 16000)
audio_path = audio_path.replace('.mp3', '.wav')
else:
shutil.move(driven_audio, input_dir)
elif use_idle_mode:
audio_path = os.path.join(
input_dir, 'idlemode_'+str(length_of_audio)+'.wav')
AudioSegment.silent(
duration=1000*length_of_audio).export(audio_path, format="wav")
else:
assert use_ref_video and ref_info == 'all'
# Process reference video
if use_ref_video and ref_info == 'all':
ref_video_videoname = os.path.splitext(os.path.split(ref_video)[-1])[0]
audio_path = os.path.join(save_dir, ref_video_videoname+'.wav')
os.system(
f"ffmpeg -y -hide_banner -loglevel error -i {ref_video} {audio_path}")
ref_video_frame_dir = os.path.join(save_dir, ref_video_videoname)
os.makedirs(ref_video_frame_dir, exist_ok=True)
ref_video_coeff_path, _, _ = preprocess_model.generate(
ref_video, ref_video_frame_dir, preprocess, source_image_flag=False)
else:
ref_video_coeff_path = None
# Preprocess source image
first_frame_dir = os.path.join(save_dir, 'first_frame_dir')
os.makedirs(first_frame_dir, exist_ok=True)
first_coeff_path, crop_pic_path, crop_info = preprocess_model.generate(
pic_path, first_frame_dir, preprocess, True, size)
if first_coeff_path is None:
raise AttributeError("No face is detected")
# Determine reference coefficients
ref_pose_coeff_path, ref_eyeblink_coeff_path = None, None
if use_ref_video:
if ref_info == 'pose':
ref_pose_coeff_path = ref_video_coeff_path
elif ref_info == 'blink':
ref_eyeblink_coeff_path = ref_video_coeff_path
elif ref_info == 'pose+blink':
ref_pose_coeff_path = ref_eyeblink_coeff_path = ref_video_coeff_path
else:
ref_pose_coeff_path = ref_eyeblink_coeff_path = None
# Generate coefficients from audio or reference video
if use_ref_video and ref_info == 'all':
coeff_path = ref_video_coeff_path
else:
batch = get_data(first_coeff_path, audio_path, device, ref_eyeblink_coeff_path=ref_eyeblink_coeff_path,
still=still_mode, idlemode=use_idle_mode, length_of_audio=length_of_audio, use_blink=use_blink)
coeff_path = audio_to_coeff.generate(
batch, save_dir, pose_style, ref_pose_coeff_path)
# Generate video from coefficients
data = get_facerender_data(coeff_path, crop_pic_path, first_coeff_path, audio_path, batch_size, still_mode=still_mode,
preprocess=preprocess, size=size, expression_scale=exp_scale, facemodel=facerender)
return_path = animate_from_coeff.generate(data, save_dir, pic_path, crop_info, enhancer='gfpgan' if use_enhancer else None,
preprocess=preprocess, img_size=size)
video_name = data['video_name']
print(f'The generated video is named {video_name} in {save_dir}')
return return_path
# Gradio UI
with gr.Blocks(analytics_enabled=False) as demo:
with gr.Row():
with gr.Column(variant='panel'):
with gr.Tabs(elem_id="sadtalker_source_image"):
with gr.TabItem('Source image'):
with gr.Row():
source_image = gr.Image(
label="Source image", sources="upload", type="filepath", elem_id="img2img_image")
with gr.Tabs(elem_id="sadtalker_driven_audio"):
with gr.TabItem('Driving Methods'):
gr.Markdown(
"Possible driving combinations: <br> 1. Audio only 2. Audio/IDLE Mode + Ref Video(pose, blink, pose+blink) 3. IDLE Mode only 4. Ref Video only (all) ")
with gr.Row():
driven_audio = gr.Audio(
label="Input audio", sources="upload", type="filepath")
driven_audio_no = gr.Audio(
label="Use IDLE mode, no audio is required", sources="upload", type="filepath", visible=False)
with gr.Column():
use_idle_mode = gr.Checkbox(
label="Use Idle Animation")
length_of_audio = gr.Number(
value=5, label="The length(seconds) of the generated video.")
use_idle_mode.change(lambda choice: (gr.update(visible=not choice), gr.update(visible=choice)),
inputs=use_idle_mode, outputs=[driven_audio, driven_audio_no])
with gr.Row():
ref_video = gr.Video(
label="Reference Video", sources="upload", elem_id="vidref")
with gr.Column():
use_ref_video = gr.Checkbox(
label="Use Reference Video")
ref_info = gr.Radio(['pose', 'blink', 'pose+blink', 'all'], value='pose', label='Reference Video',
info="How to borrow from reference Video?((fully transfer, aka, video driving mode))")
ref_video.change(lambda path: gr.update(
value=path is not None), inputs=ref_video, outputs=use_ref_video)
with gr.Column(variant='panel'):
with gr.Tabs(elem_id="sadtalker_checkbox"):
with gr.TabItem('Settings'):
with gr.Column(variant='panel'):
with gr.Row():
pose_style = gr.Slider(
minimum=0, maximum=45, step=1, label="Pose style", value=0)
exp_weight = gr.Slider(
minimum=0, maximum=3, step=0.1, label="expression scale", value=1)
blink_every = gr.Checkbox(
label="use eye blink", value=True)
with gr.Row():
size_of_image = gr.Radio(
[256, 512], value=256, label='face model resolution', info="use 256/512 model?")
preprocess_type = gr.Radio(
['crop', 'resize', 'full', 'extcrop', 'extfull'], value='crop', label='preprocess', info="How to handle input image?")
with gr.Row():
is_still_mode = gr.Checkbox(
label="Still Mode (fewer head motion, works with preprocess `full`)")
facerender = gr.Radio(
['facevid2vid', 'pirender'], value='facevid2vid', label='facerender', info="which face render?")
with gr.Row():
batch_size = gr.Slider(
label="batch size in generation", step=1, maximum=10, value=1)
enhancer = gr.Checkbox(
label="GFPGAN as Face enhancer", value=True)
submit = gr.Button(
'Generate', elem_id="sadtalker_generate", variant='primary')
with gr.Tabs(elem_id="sadtalker_generated"):
gen_video = gr.Video(label="Generated video")
submit.click(
fn=generate_video,
inputs=[source_image, driven_audio, preprocess_type, is_still_mode, enhancer, batch_size, size_of_image,
pose_style, facerender, exp_weight, use_ref_video, ref_video, ref_info, use_idle_mode, length_of_audio, blink_every],
outputs=[gen_video],
)
with gr.Row():
gr.Examples(examples=get_examples(), inputs=[source_image, driven_audio, preprocess_type, is_still_mode, enhancer],
outputs=[gen_video], fn=generate_video)
demo.launch(debug=True)