dreamtalk / app.py
fffiloni's picture
Update app.py
537ba12
raw
history blame
2.58 kB
import gradio as gr
import subprocess
import cv2
import ffmpeg
def convert_video(input_file, output_file, codec='libx264'):
try:
# Define input and output files
input_path = input_file
output_path = output_file
# Read input video file
cap = cv2.VideoCapture(input_path)
# Get video codec and frame dimensions
fourcc = cv2.VideoWriter_fourcc(*codec)
width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
fps = cap.get(cv2.CAP_PROP_FPS)
# Create output video writer
out = cv2.VideoWriter(output_path, fourcc, fps, (width, height))
# Process and write video frames
while cap.isOpened():
ret, frame = cap.read()
if ret:
out.write(frame)
else:
break
# Release video objects
cap.release()
out.release()
# Process and write audio stream using ffmpeg
stream = ffmpeg.input(input_path)
stream = ffmpeg.output(stream, output_path, acodec='aac', vcodec=codec, strict='experimental', loglevel='error')
ffmpeg.run(stream)
print(f"Video converted successfully: {output_path}")
except Exception as e:
print(f"Error converting video: {e}")
def execute_command(command: str) -> None:
subprocess.run(command, check=True)
def infer():
output_name = "acknowledgement_english@M030_front_neutral_level1_001@male_face"
command = [
f"python",
f"inference_for_demo_video.py",
f"--wav_path=data/audio/acknowledgement_english.m4a",
f"--style_clip_path=data/style_clip/3DMM/M030_front_neutral_level1_001.mat",
f"--pose_path=data/pose/RichardShelby_front_neutral_level1_001.mat",
f"--image_path=data/src_img/uncropped/male_face.png",
f"--cfg_scale=1.0",
f"--max_gen_len=30",
f"--output_name={output_name}"
]
execute_command(command)
# Convert video to compatible codecs
input_file_path = f"output_video/{output_name}.mp4"
output_file_path = f"{output_name}.mp4"
convert_video(input_file_path, output_file_path)
return f"{output_file_path}"
with gr.Blocks() as demo:
with gr.Column():
with gr.Row():
with gr.Column():
run_btn = gr.Button("Run")
with gr.Column():
output_video = gr.Video()
run_btn.click(
fn = infer,
inputs = [],
outputs = [output_video]
)
demo.launch()