import os
#
import site
ffmpeg_path = 'E:\\AI-Starter\\EZ-AI-Starter-1.0\\envs\\ffmpeg\\bin'
if not os.path.exists(ffmpeg_path):
    raise Exception("ffmpeg not found")
os.environ["PATH"] = ffmpeg_path + ";" + os.environ["PATH"]
site.addsitedir('E:\\python_test\\MyAIs\\__pypackages__\\3.10\\lib')
current_path = os.path.dirname(os.path.abspath(__file__))
os.environ["TRANSFORMERS_CACHE"] = os.path.join(current_path, "transformers-cache")
import gradio as gr
import os
import sys
from scipy.io.wavfile import write
from os import path
import subprocess

basePath = os.path.dirname(os.path.abspath(__file__))
wav2lipFolderName = 'modules/Wav2Lip'
gfpganFolderName = 'modules/GFPGAN'
wav2lipPath = os.path.join(basePath, wav2lipFolderName)
gfpganPath = os.path.join(basePath, gfpganFolderName)
sys.path.append(wav2lipPath)
sys.path.append(gfpganPath)
outputPath = basePath + '/outputs'
if not os.path.exists(outputPath):
    os.makedirs(outputPath)
from modules.Wav2Lip.gradio_inference import main as wav2lip
from video_restore import restore


def not_exist_mkdir(filepath):
    dir_ = path.dirname(filepath)
    if not path.exists(dir_):
        os.makedirs(dir_)


def syn_img(image, audio, fix_face):
    audio_save_path = path.join(wav2lipPath, 'temp/temp.wav')
    # write会自动将ndarray对象的数据类型转换为16位带符号整数，因为WAV文件采用16位带符号整数格式
    not_exist_mkdir(audio_save_path)
    write(audio_save_path, audio[0], audio[1])
    final_path = wav2lip(image, audio_save_path, True, fix_face)
    return final_path


def syn_video(video_path, audio, fix_face):
    audio_save_path = path.join(wav2lipPath, 'temp/temp.wav')
    not_exist_mkdir(audio_save_path)
    write(audio_save_path, audio[0], audio[1])
    final_path = wav2lip(video_path, audio_save_path, False, fix_face)
    return final_path


def open_dir(dir_name):
    path = os.path.join(outputPath, dir_name)
    if not os.path.exists(path):
        path = os.path.dirname(path)
        if not os.path.exists(path):
            os.makedirs(path)
    path = path.replace("/", "\\")
    subprocess.Popen(f'explorer {path}')


def fix_video(video_path, only_restore_center, upscale):
    final_path = restore(video_path, only_restore_center, upscale)
    return final_path


def demo1():
    with gr.Blocks() as demo:
        with gr.Row():
            with gr.Column():
                with gr.Tab("图像嘴型同步"):
                    input_img = gr.Image().style(height=256)
                    input_audio_img = gr.Audio()
                    input_fix_face_img = gr.Checkbox(label="restore_face",
                                                     info="嘴型同步的过程中进行面部修复，比单独进行修复要快")
                    btn_submit_img = gr.Button(value="开始同步", variant='primary')

                with gr.Tab("视频嘴型同步"):
                    input_video = gr.Video(type="filepath").style(height=256)
                    input_audio_video = gr.Audio()
                    input_fix_face_video = gr.Checkbox(label="restore_face",
                                                       info="嘴型同步的过程中进行面部修复，比单独进行修复要快")
                    btn_submit_video = gr.Button(value="开始同步", variant='primary')
            with gr.Column():
                output_video = gr.Video(type="filepath").style(width=512)
                btn_open = gr.Button(value="打开输出目录")
        btn_submit_img.click(fn=syn_img, inputs=[input_img, input_audio_img, input_fix_face_img],
                             outputs=[output_video])
        btn_submit_video.click(fn=syn_video, inputs=[input_video, input_audio_video, input_fix_face_video],
                               outputs=[output_video])
        btn_open.click(fn=lambda: open_dir("wav2lip"))
    return demo


def demo2():
    with gr.Blocks() as demo:
        with gr.Row():
            with gr.Column():
                input_video = gr.Video(type="filepath").style(height=256)
                input_only_center = gr.Checkbox(value=True, label="only_restore_center",
                                                info="仅修复视频中间位置的人脸（速度更快）")
                input_number = gr.Number(value=1, label='分辨率放大倍数', info='比较消耗时间！为1时不放大')
                with gr.Row():
                    with gr.Column():
                        btn_submit = gr.Button(value="开始修复", variant='primary')
            with gr.Column():
                output_video = gr.Video(type="filepath").style(width=512)
                btn_open = gr.Button(value="打开输出目录")
        btn_submit.click(fn=fix_video, inputs=[input_video, input_only_center, input_number], outputs=[output_video])
        btn_open.click(fn=lambda: open_dir("restore"))
    return demo



app = gr.TabbedInterface([demo1(), demo2()], ["wav2lip同步器", '人脸修复'])

app.launch()
if __name__ == "__main__":
    # demo.launch()
    pass
