import librosa
import numpy as np
import argparse
from scipy.signal import savgol_filter
import torch
# from model import EmoTalk
import random
import os, subprocess
import shlex

def render_video(args):
    wav_name = args.wav_path.split('/')[-1].split('.')[0]
    image_path = os.path.join(args.result_path, wav_name)
    os.makedirs(image_path, exist_ok=True)
    image_temp = image_path + "/%d.png"
    output_path = os.path.join(args.result_path, wav_name + ".mp4")
    blender_path = args.blender_path
    python_path = "./render.py"
    blend_path = "./render.blend"
    cmd = '{} -t 64 -b {} -P {} -- "{}" "{}" '.format(blender_path, blend_path, python_path, args.result_path, wav_name)
    cmd = shlex.split(cmd)
    p = subprocess.Popen(cmd, shell=False, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
    while p.poll() is None:
        line = p.stdout.readline()
        line = line.strip()
        if line:
            print('[{}]'.format(line))
    if p.returncode == 0:
        print('Subprogram success')
    else:
        print('Subprogram failed')

    cmd = 'ffmpeg -r 30 -i "{}" -i "{}" -pix_fmt yuv420p -s 512x768 "{}" -y'.format(image_temp, args.wav_path, output_path)
    subprocess.call(cmd, shell=True)

    cmd = 'rm -rf "{}"'.format(image_path)
    subprocess.call(cmd, shell=True)

def main():
    parser = argparse.ArgumentParser(
        description='EmoTalk: Speech-driven Emotional Disentanglement for 3D Face Animation')
    parser.add_argument("--wav_path", type=str, default="./audio/angry1.wav", help='path of the test data')
    # parser.add_argument("--bs_dim", type=int, default=52, help='number of blendshapes:52')
    # parser.add_argument("--feature_dim", type=int, default=832, help='number of feature dim')
    # parser.add_argument("--period", type=int, default=30, help='number of period')
    parser.add_argument("--device", type=str, default="cuda", help='device')
    # parser.add_argument("--model_path", type=str, default="./pretrain_model/EmoTalk.pth",
    #                     help='path of the trained models')
    parser.add_argument("--result_path", type=str, default="./data/", help='path of the result')
    # parser.add_argument("--max_seq_len", type=int, default=5000, help='max sequence length')
    # parser.add_argument("--num_workers", type=int, default=0)
    # parser.add_argument("--batch_size", type=int, default=1)
    # parser.add_argument("--post_processing", type=bool, default=True, help='whether to use post processing')
    parser.add_argument("--blender_path", type=str, default="../blender/blender", help='path of blender')
    args = parser.parse_args()

    render_video(args)


if __name__ == "__main__":
    main()