import json
import os
import uuid
import wave
from datetime import datetime

import paramiko
from scp import SCPClient
from flasgger import Swagger, swag_from
from flask import Flask, request, Response
from sqlalchemy import create_engine
import moviepy.editor as mp
import threading

import config.database_config as db
import config.url_manager as UrlConfig
from api.dao import facepicture_dao, pptfile_dao, speechtrainfile_dao, speechfile_dao, video_dao, speechsynthesis_dao, \
    facevideogeneration_dao, videosynthesis_dao
from tts import audio_set
from sadtalker.src.gradio_demo import SadTalker

from flask_cors import CORS

app = Flask(__name__)
CORS(app)
swagger = Swagger(app)
engine = create_engine(db.DB_URL)

# 临时用户
admin_id = 1
user_id = 1

# 视频类型
TTS = "tts"
SADTALKER = "sadtalker"
FINAL = "final"

# 远程账号名
# TODO
USER = "u200111629"

def is_image(filename):
    image_extensions = ['.jpg', '.jpeg', '.png', '.gif', '.bmp']
    extension = os.path.splitext(filename)[1].lower()
    if extension in image_extensions:
        return True
    else:
        return False


def is_video(filename):
    video_extensions = ['.mp4', '.avi', '.mov', '.wmv', '.flv']
    extension = os.path.splitext(filename)[1].lower()
    if extension in video_extensions:
        return True
    else:
        return False


def is_ppt(filename):
    ppt_extensions = ['.ppt', '.pptx']
    extension = os.path.splitext(filename)[1].lower()
    if extension in ppt_extensions:
        return True
    else:
        return False


def is_zip(filename):
    zip_extensions = ['.zip']
    extension = os.path.splitext(filename)[1].lower()
    if extension in zip_extensions:
        return True
    else:
        return False


def error_response(data):
    response = {'status': 'error', 'data': data}
    json_data = json.dumps(response)
    return Response(json_data, mimetype='application/json')


def success_response(data):
    # if data:
    #     response = {'status': 'error', 'data': data}
    # else:
    #     response = {'status': 'success', 'data': None}
    response = {'status': 'success', 'data': data}
    json_data = json.dumps(response)
    return Response(json_data, mimetype='application/json')


def save_file(request):
    if 'file' not in request.files:
        return 'No file in the request'
    files = request.files.getlist('file')
    if len(files) > 1:
        return error_response('Invalid number of file')
    file = files[0]
    filename = file.filename
    if filename == '':
        return error_response('No selected file')
    if is_ppt(filename):
        save_dir = os.path.join(os.getcwd(), 'data', 'ppt')
    elif is_image(filename):
        save_dir = os.path.join(os.getcwd(), 'data', 'images')
    elif is_zip(filename):
        save_dir = os.path.join(os.getcwd(), 'data', 'train-files')
    else:
        return None
    print(save_dir)
    if not os.path.exists(save_dir):
        os.makedirs(save_dir)
    path = os.path.join(save_dir, filename)
    file.save(path)
    return path, filename


# 上传文件到ssh服务器
def upload_file_ssh(local_file, remote_file):
    ssh = paramiko.SSHClient()
    ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
    ssh.connect(UrlConfig.HPC_HOST, port=UrlConfig.HPC_PORT, username=UrlConfig.HPC_USER, password=UrlConfig.HPC_PASS)
    # 创建SFTP客户端
    sftp_client = ssh.open_sftp()
    # 上传本地文件到远程服务器
    sftp_client.put(local_file, remote_file)
    sftp_client.close()
    ssh.close()

# 从服务器下载视频
def download_video_ssh(local_file, remote_file):
    ssh = paramiko.SSHClient()
    ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
    ssh.connect(UrlConfig.HPC_HOST, port=UrlConfig.HPC_PORT, username=UrlConfig.HPC_USER, password=UrlConfig.HPC_PASS)
    with SCPClient(ssh.get_transport()) as scp_client:
        scp_client.get(remote_file, local_file)
    scp_client.close()
    ssh.close()


# 上传文件（PPT，图片，zip压缩包）
@app.route(UrlConfig.UPLOAD_FILES_API, methods=['POST'])
@swag_from()
def upload_file():
    if request.method != 'POST':
        return error_response('Invalid request method')
    path, filename = save_file(request)
    if path is None:
        return error_response('File type error! Upload only PPT, image, or ZIP files.')

    # 将路径存入存入数据库
    if is_ppt(filename):
        file = pptfile_dao.PptFileDAO(engine).create(uuid.uuid4(), user_id, path, datetime.now())
        data = {'FileId': str(file.ppt_file_id), 'FilePath': file.ppt_file_path}
    elif is_image(filename):
        file = facepicture_dao.FacePictureDAO(engine).create(uuid.uuid4(), user_id, path, datetime.now())
        data = {'FileId': str(file.face_picture_id), 'FilePath': file.face_picture_path}
    elif is_zip(filename):
        file = speechtrainfile_dao.SpeechTrainFileDAO(engine).create(uuid.uuid4(), user_id, path, datetime.now())
        data = {'FileId': str(file.speech_train_file_id), 'FilePath': file.speech_train_file_path}
    else:
        return error_response('File type error! Upload only PPT, image, or ZIP files.')
    return success_response(data)


# 语音训练
@app.route(UrlConfig.TRAIN_API, methods=['POST'])
@swag_from()
def tts_train():
    if request.method != 'POST':
        return error_response('Invalid request method')
    data = request.get_json()
    if data:
        model_name = data.get("ModelName")
        if model_name is None or model_name == "":
            return error_response('Missing required parameter: ModelName')
        print("model_name:", model_name)
        file_id = data.get("FileId")
        if file_id is None or file_id == "":
            return error_response('Missing required parameter: FileId')
        print("file_id: ", file_id)
    else:
        return error_response('Failed to parse request body')

    # return success_response(None)

    speech_train_file = speechtrainfile_dao.SpeechTrainFileDAO(engine).read(file_id)
    audio_set.train(model_name, speech_train_file.speech_train_file_path)
    return success_response(None)


# cnt = -1

@app.route(UrlConfig.TRAININFO_API, methods=['GET'])
@swag_from()
def tts_traininfo():
    if request.method == 'GET':
        # global cnt
        model_name = request.args.get('modelname')
        print("model_name: ", model_name)
        if model_name is None or model_name == "":
            return error_response('Missing required parameter: modelname')
    else:
        return error_response('Invalid request method')

    # infos = [
    #     "Train Initializing",
    #     "Data Preprocess(1/4): 1",
    #     "Data Preprocess(2/4): 2",
    #     "Data Preprocess(3/4): 3",
    #     "Data Preprocess(4/4): 4",
    #     "Training: 0.00 %",
    #     "Training: 10.05 %",
    #     "Training: 50.00 %",
    #     "Training: 99.00 %",
    #     "Training: 100.00 %",
    #     "Train Finished",
    #     "No Information"
    # ]
    # if cnt < 11:
    #     cnt = cnt + 1
    # else:
    #     cnt = -1
    # return success_response({'TrainInfo': infos[cnt]})

    return success_response({'TrainInfo': audio_set.train_info(model_name)})

# cnt = 0
@app.route(UrlConfig.MODELS_API, methods=['GET'])
@swag_from()
def tts_models():
    if request.method != 'GET':
        return error_response('Invalid request method')
    # global cnt
    # speakers1 = [
    #     "Jingci",
    #     "lx",
    #     "SSB0316",
    #     "SMY",
    #     "DoctorJiang",
    #     "Luke_J"
    # ]
    # speakers2 = [
    #     "Jingci",
    #     "lx"
    # ]
    # if cnt == 0:
    #     cnt = cnt + 1
    # else:
    #     cnt = 0
    # if cnt == 0:
    #     return success_response({'ModelNames': speakers1})
    # else:
    #     return success_response({'ModelNames': speakers2})

    return success_response({'ModelNames': audio_set.get_speakers()})


@app.route(UrlConfig.DELETE_API, methods=['DELETE'])
@swag_from()
def tts_delete():
    if request.method != 'DELETE':
        return error_response('Invalid request method')
    model_name = request.args.get('modelname')
    if model_name is None or model_name == "":
        return error_response('Missing required parameter: modelname')
    audio_set.delete_speaker(model_name)
    return success_response(None)


# 检查文本转拼音
@app.route(UrlConfig.CHECKTEXT, methods=['POST'])
@swag_from()
def tts_pretext():
    if request.method != 'POST':
        return error_response('Invalid request method')
    data = request.get_json()
    if data:
        file_id = data.get("FileId")
        if file_id is None or file_id == "":
            return error_response('Missing required parameter: FileId')
        print("file_id: ", file_id)
    else:
        return error_response('Failed to parse request body')
    ppt_file = pptfile_dao.PptFileDAO(engine).read(file_id)
    if ppt_file is None:
        return error_response("PPT not Found!")

    # data = {
    #     "Texts": ["你", "好"],
    #     "Pinyins": ["ni2", "hao3"]
    # }
    # return success_response(data)

    notes = audio_set.GetTextByPPT(ppt_file.ppt_file_path)
    texts = []
    pinyins = []
    for note in notes:
        text, pinyin = audio_set.pretxt(note)
        text = ''.join(text)
        pinyin = ' '.join(pinyin)
        if text != '' and pinyin != '':
            texts.append(text)
            pinyins.append(pinyin)
    data = {
        "Texts": texts,
        "Pinyins": pinyins
    }
    return success_response(data)


# 语音合成
@app.route(UrlConfig.TTS_SYSTHESIS_API, methods=['POST'])
@swag_from()
def tts_synthesis():
    if request.method != 'POST':
        return error_response('Invalid request method')
    data = request.get_json()
    time = 0.1
    model_name = "DoctorJiang"
    if data:
        file_id = data.get("FileId")
        if file_id is None or file_id == "":
            return error_response('Missing required parameter: FileId')
        print("file_id: ", file_id)

        pinyins = data.get("Pinyins")
        if pinyins is None or pinyins == "":
            return error_response('Missing required parameter: Pinyins')
        print("pinyins: ", pinyins)

        if "ModelName" in data:
            model_name = data.get("ModelName")
            print("model_name:", model_name)

        if "Time" in data:
            time = float(data.get("Time"))
    else:
        return error_response('Failed to parse request body')

    # data = {'FileId': "531dc4e7-ef16-40de-9c3a-7df1db197304", 'FilePath': "/video/tts_video/video_20240227024436.mp4"}
    # return success_response(data)

    ppt_file = pptfile_dao.PptFileDAO(engine).read(file_id)
    if ppt_file is None:
        return error_response("PPT not Found!")
    ppt_file_path = ppt_file.ppt_file_path

    tts_video_path = os.path.join('../aigc_vue/src/views/chat/video', 'tts_video')
    if not os.path.exists(tts_video_path):
        os.makedirs(tts_video_path)
    merged_audio_path = os.path.join(os.getcwd(), 'data', 'tts_audio')
    if not os.path.exists(merged_audio_path):
        os.makedirs(merged_audio_path)

    durations, audio_folder = audio_set.ObtainPptNoteAndDuration(pinyins, "pinyin", model_name, time)
    merged_audio = os.path.join(merged_audio_path, f'audio_{datetime.now().strftime("%Y%m%d%H%M%S")}.wav')

    tts_video_name = f'video_{datetime.now().strftime("%Y%m%d%H%M%S")}.mp4'
    tts_video = os.path.join(tts_video_path, tts_video_name)
    audio_set.video(ppt_file_path, tts_video, pinyins, durations, audio_folder, merged_audio)

    video = video_dao.VideoDAO(engine).create(uuid.uuid4(), user_id, TTS, '/video/tts_video/' + tts_video_name,
                                              datetime.now())
    speech_file = speechfile_dao.SpeechFileDAO(engine).create(uuid.uuid4(), user_id, merged_audio, datetime.now())
    speechsynthesis_dao.SpeechSynthesisDAO(engine).create(file_id, speech_file.speech_file_id, video.video_id,
                                                          datetime.now(),
                                                          model_name)
    data = {'FileId': str(video.video_id), 'FilePath': video.video_file_path}
    return success_response(data)


# 语速调节
@app.route(UrlConfig.SPEED_API, methods=['POST'])
@swag_from()
def tts_speed():
    if request.method != 'POST':
        return error_response('Invalid request method')
    data = request.get_json()
    if data:
        file_id = data.get("FileId")
        if file_id is None or file_id == "":
            return error_response('Missing required parameter: FileId')
        print("file_id: ", file_id)

        speed = data.get("Speed")
        if speed is None or speed == "":
            return error_response('Missing required parameter: Speed')
        print("speed: ", speed)
    else:
        return error_response('Failed to parse request body')

    # data = {'FilePath': "/video/tts_video/video_20240227024436.mp4"}
    # return success_response(data)

    video = video_dao.VideoDAO(engine).read(file_id)
    new_video_path, new_audio_path, new_file_name = audio_set.speed_adjust(speed, video.video_file_path)
    # video = videoDao.create(uuid.uuid4(), user_id, TTS, new_video_path, datetime.now())
    data = {'FilePath': '/video/tts_video/' + new_file_name + '.mp4'}
    return success_response(data)


# 人脸视频合成
@app.route(UrlConfig.GEN_API, methods=['POST'])
@swag_from()
def sadtalker_gen():
    if request.method != 'POST':
        return error_response('Invalid request method')
    data = request.get_json()
    if data:
        face_picture_id = data.get("FacePictureId")
        print("face_picture_id: ", face_picture_id)
        ppt_file_id = data.get("PPTFileId")
        print("ppt_file_id: ", ppt_file_id)
        speed = data.get("Speed")
        print("speed: ", speed)
        preprocess = data.get("Preprocess")
        print("preprocess_type: ", preprocess)
        still_mode = data.get("StillMode")
        print("still_mode: ", still_mode)
        use_enhancer = data.get("UseEnhancer")
        print("use_enhancer: ", use_enhancer)
        batch_size = data.get("BatchSize")
        print("batch_size: ", batch_size)
        size_of_image = data.get("SizeOfImage")
        print("size_of_image: ", size_of_image)
        pose_style = data.get("PoseStyle")
        print("pose_style: ", pose_style)
        exp_scale = data.get("ExpScale")
        print("exp_scale: ", exp_scale)
        use_ref_video = data.get("UseRefVideo")
        print("use_ref_video: ", use_ref_video)
        ref_video = data.get("RefVideo")
        print("ref_video: ", ref_video)
        ref_info = data.get("RefInfo")
        print("ref_video: ", ref_info)
    else:
        return error_response('Failed to parse request body')

    face_video_id = uuid.uuid4()

    threading.Thread(target=sadtalker_test, args=(
        ppt_file_id, speed, face_picture_id, face_video_id, preprocess,
        still_mode, use_enhancer, batch_size, size_of_image,
        pose_style, exp_scale,
        use_ref_video, ref_video, ref_info
    )).start()

    data = {'FileId': str(face_video_id)}
    print("data: ", data)
    return success_response(data)


def sadtalker_test(ppt_file_id, speed, face_picture_id, face_video_id, preprocess,
                   still_mode, use_enhancer, batch_size, size_of_image,
                   pose_style, exp_scale,
                   use_ref_video, ref_video, ref_info):
    face_picture = facepicture_dao.FacePictureDAO(engine).read(face_picture_id)
    speech_synthesis = speechsynthesis_dao.SpeechSynthesisDAO(engine).readbyPPT(ppt_file_id)
    audio_video = video_dao.VideoDAO(engine).read(speech_synthesis.video_id)
    print("audio_video.video_file_path: ", audio_video.video_file_path)
    new_video_path, new_audio_path, new_file_name = audio_set.speed_adjust(speed, audio_video.video_file_path)
    print("new_audio_path: ", new_audio_path)
    result_dir = f'/home/{USER}/jupyterlab/aigc_server/data/sadtalker'
    print("result_dir: ", result_dir)
    if not os.path.exists(result_dir):
        os.makedirs(result_dir)

    # 上传文件到服务器
    remote_face_picture_path = f'/home/{USER}/jupyterlab/aigc_server/data/images/' + new_file_name + ".mp4"
    upload_file_ssh(face_picture.face_picture_path, remote_face_picture_path)

    # 将语音文件分段
    cnt = split_audio_by_time(new_file_name, 60)
    print("cnt: ", cnt)
    face_video_list = []
    for i in range(cnt):
        print("i: ", i)
        new_audio_path = os.path.join(os.getcwd(), 'data', 'tts_audio', f'{new_file_name}_{i}.wav')
        remote_audio_path = f'/home/{USER}/jupyterlab/aigc_server/data/audio/' + f'{new_file_name}_{i}.wav'
        upload_file_ssh(new_audio_path, remote_audio_path)

        # 运行sadtalker
        ssh = paramiko.SSHClient()
        ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
        ssh.connect(UrlConfig.HPC_HOST, port=UrlConfig.HPC_PORT, username=UrlConfig.HPC_USER, password=UrlConfig.HPC_PASS)
        args = remote_face_picture_path + " " + remote_audio_path
                # + ' --preprocess ' + preprocess
        if still_mode:
            args = args + " --still_mode"
        if use_enhancer:
            args = args + " --use_enhancer"
        args = args + " --batch_size " + str(batch_size) + " --size " + str(size_of_image) + " --pose_style " + str(pose_style) + " --exp_scale " + str(exp_scale)
        if use_ref_video:
            args = args + " --use_ref_video" + " --ref_video " + ref_video + " --ref_info " + ref_info
        # args = args + " --no_blink" + " --result_dir " + result_dir
        args = args + " --result_dir " + result_dir
        print("args: ", args)
        command = "cd jupyterlab/aigc_server; conda activate sadtalker; python sadtalker_test.py " + args
        stdin, stdout, stderr = ssh.exec_command(command)
        output = stdout.read().decode("utf-8")
        print("output: ", output)
        last_line = [line for line in output.splitlines() if line][-1]
        print("last_line: ", last_line)

        # 下载文件到本地
        # face_video_name = os.path.basename(last_line)
        face_video_name = f'face_{datetime.now().strftime("%Y%m%d%H%M%S")}_{i}.mp4'
        face_video_list.append(face_video_name)
        download_video_ssh("../aigc_vue/src/views/chat/video/sadtalker/" + face_video_name, last_line)

    # 合并人脸视频
    print(face_video_list)
    face_video_name = merge_face_videos(face_video_list, cnt)

    face_video = video_dao.VideoDAO(engine).create(face_video_id, user_id, "sadtalker", "/video/sadtalker/" + face_video_name, datetime.now())
    facevideogeneration_dao.FaceVideoGenerationDAO(engine).create(face_picture.face_picture_id, face_video.video_id,
                                                                  ppt_file_id,
                                                                  datetime.now())
    # dir1, dir2 = os.path.split(os.path.dirname(face_video.video_file_path))
    # data_file_path = '/video/sadtalker/' + dir2 + os.path.basename(face_video.video_file_path)
    data = {'FileId': str(face_video_id), 'FilePath': face_video.video_file_path}
    print("data: ", data)

    ssh.close()
    return data

def split_audio_by_time(audio_file_name, chunk_length):
    # 打开音频文件
    audio_file_path = os.path.join(os.getcwd(), 'data', 'tts_audio', f'{audio_file_name}.wav')
    wav_file = wave.open(audio_file_path, 'rb')

    # 获取音频文件的参数
    sample_rate = wav_file.getframerate()
    num_channels = wav_file.getnchannels()
    sample_width = wav_file.getsampwidth()
    num_frames = wav_file.getnframes()

    # 计算每个片段的帧数
    chunk_frames = chunk_length * sample_rate
    cnt = 0
    # 循环遍历音频文件，将其分成多个片段
    for i in range(0, num_frames, chunk_frames):
        # 计算每个片段的起始和结束位置
        start_frame = i
        end_frame = min(i + chunk_frames, num_frames)

        # 将片段写入新的音频文件
        new_audio_file_path = os.path.join(os.getcwd(), 'data', 'tts_audio', f'{audio_file_name}_{cnt}.wav')
        new_wav_file = wave.open(new_audio_file_path, 'wb')
        cnt += 1
        new_wav_file.setparams((num_channels, sample_width, sample_rate, end_frame - start_frame, 'NONE', 'not compressed'))

        # 读取片段的帧数据
        wav_file.setpos(start_frame)
        frames = wav_file.readframes(end_frame - start_frame)

        # 将帧数据写入新的音频文件
        new_wav_file.writeframes(frames)

        # 关闭新的音频文件
        new_wav_file.close()

    # 关闭原始音频文件
    wav_file.close()
    return cnt

def merge_face_videos(face_video_list, cnt):
    # 定义输出视频文件名
    face_file_name = f'face_{datetime.now().strftime("%Y%m%d%H%M%S")}.mp4'
    output_file = '../aigc_vue/src/views/chat/video/sadtalker/' + face_file_name
    # 构造ffmpeg的输入参数
    input_args = ''
    for i in range(len(face_video_list)):
        face_video_name = face_video_list[i]
        input_args += f' -i ../aigc_vue/src/views/chat/video/sadtalker/{face_video_name}'

    # 构造ffmpeg的输出参数
    output_args = f' -filter_complex "concat=n={cnt}:v=1:a=1" -y {output_file}'

    # 构造完整的ffmpeg命令
    cmd = f'ffmpeg {input_args} {output_args}'

    # 执行ffmpeg命令
    os.system(cmd)
    return face_file_name


# 判断人脸视频是否生成
@app.route(UrlConfig.CHECK_FACE_VIDEO_API, methods=["GET"])
@swag_from()
def check_face_video():
    if request.method == 'GET':
        face_video_id = request.args.get('facevideoid')
        if face_video_id is None or face_video_id == "":
            return error_response('Missing required parameter: facevideoid')
    else:
        return error_response('Invalid request method')

    face_video = video_dao.VideoDAO(engine).read(face_video_id)
    print("face_video: ", face_video)
    if face_video:
        # dir1, dir2 = os.path.split(os.path.dirname(face_video.video_file_path))
        # data_file_path = '/video/sadtalker/' + dir2 + os.path.basename(face_video.video_file_path)
        return success_response({'Info': 'done', 'FileId': str(face_video_id), 'FilePath': face_video.video_file_path})
    else:
        return success_response({'Info': 'wait', 'FileId': str(face_video_id), 'FilePath': None})


# 生成最终视频
@app.route(UrlConfig.MERGE_VIDEO_API, methods=['POST'])
@swag_from()
def final_merge():
    if request.method != 'POST':
        return error_response('Invalid request method')
    data = request.get_json()
    if data:
        face_video_id = data.get("FaceVideoId")
        if face_video_id is None or face_video_id == "":
            return error_response('Missing required parameter: FaceVideoId')
        print("face_video_id: ", face_video_id)
        ppt_file_id = data.get("PPTFileId")
        if ppt_file_id is None or ppt_file_id == "":
            return error_response('Missing required parameter: PPTFileId')
        print("ppt_file_id: ", ppt_file_id)

        speed = data.get("Speed")
        print("speed: ", speed)
        position = data.get("Position")
        print("position: ", position)
    else:
        return error_response('Failed to parse request body')

    final_video_id = uuid.uuid4()

    threading.Thread(target=merge_videos, args=(
        final_video_id, face_video_id, ppt_file_id, speed, position
    )).start()

    data = {'FileId': str(final_video_id)}
    print("data: ", data)
    return success_response(data)


def merge_videos(final_video_id, face_video_id, ppt_file_id, speed=1, position=1):
    face_video = video_dao.VideoDAO(engine).read(face_video_id)
    print("face_video_path： ", face_video.video_file_path)

    speech_synthesis = speechsynthesis_dao.SpeechSynthesisDAO(engine).readbyPPT(ppt_file_id)
    speech_video = video_dao.VideoDAO(engine).read(speech_synthesis.video_id)
    new_video_path, new_audio_path, new_file_name = audio_set.speed_adjust(speed, speech_video.video_file_path)
    speech_file = speechfile_dao.SpeechFileDAO(engine).create(uuid.uuid4(), user_id, new_audio_path, datetime.now())

    # final_video_dir = os.path.join(os.getcwd(), 'data', 'final_video')
    final_video_dir = '../aigc_vue/src/views/chat/video/final_video'
    if not os.path.exists(final_video_dir):
        os.makedirs(final_video_dir)
    final_video_name = f'video_{datetime.now().strftime("%Y%m%d%H%M%S")}.mp4'
    final_video_path = os.path.join(final_video_dir, final_video_name)

    clip1 = mp.VideoFileClip(new_video_path)
    # size = (int(clip1.size[0] / 40.0) * 10, int(clip1.size[1] / 40.0) * 10)
    size = (int(clip1.size[0] / 50.0) * 10, int(clip1.size[1] / 30.0) * 10)
    x = ['left', 'right', 'right', 'left']
    y = ['top', 'top', 'bottom', 'bottom']
    clip2 = mp.VideoFileClip('../aigc_vue/src/views/chat' + face_video.video_file_path).resize(size).set_position(pos=(x[position], y[position]))
    # clip2 = mp.VideoFileClip(face_video.video_file_path).resize(size).set_position((clip1.size[0] - size[0], 0))
    audio = clip1.audio
    # Crop the top layer video into a circular shape
    clip2 = clip2.fx(mp.vfx.mask_color, color=(0, 0, 0), thr=0)

    mp.CompositeVideoClip([clip1.set_audio(audio), clip2.set_audio(audio)]).write_videofile(final_video_path)

    final_video = video_dao.VideoDAO(engine).create(final_video_id, user_id, "final",
                                                    '/video/final_video/' + final_video_name, datetime.now())
    videosynthesis_dao.VideoSynthesisDAO(engine).create(ppt_file_id, final_video.video_id, speech_file.speech_file_id,
                                                        datetime.now())


# 判断最终视频是否生成
@app.route(UrlConfig.CHECK_FINAL_VIDEO_API, methods=["GET"])
@swag_from()
def check_final_video():
    if request.method == 'GET':
        final_video_id = request.args.get('finalvideoid')
        if final_video_id is None or final_video_id == "":
            return error_response('Missing required parameter: finalvideoid')
    else:
        return error_response('Invalid request method')

    final_video = video_dao.VideoDAO(engine).read(final_video_id)
    print("final_video: ", final_video)
    if final_video:
        return success_response(
            {'Info': 'done', 'FileId': str(final_video_id), 'FilePath': final_video.video_file_path})
    else:
        return success_response({'Info': 'wait', 'FileId': str(final_video_id), 'FilePath': None})


@app.route('/')
@swag_from('./config/hello.yaml')
def hello_world():  # put application's code here
    return '欢迎访问教学视频自动制作系统！'


if __name__ == '__main__':
    app.run()