import argparse
import os
import time

import numpy as np
import ffmpeg

from src.utils.utils import add_audio_to_video



def composite_videos(person_video_path, mask_video_path, background_video_path, output_path):
    """
    使用原视频、mask视频和背景视频进行合成，并输出结果视频。

    参数:
    - person_video_path: 原始人物视频路径
    - mask_video_path: 抠图后的人物mask视频路径
    - background_video_path: 背景视频路径
    - output_path: 最终输出的视频路径
    """
    # 读取人物视频、mask视频和背景视频
    person_video = ffmpeg.input(person_video_path)
    mask_video = ffmpeg.input(mask_video_path)
    background_video = ffmpeg.input(background_video_path)

    # 获取背景视频的分辨率
    probe_bg = ffmpeg.probe(background_video_path)
    video_info_bg = next(stream for stream in probe_bg['streams'] if stream['codec_type'] == 'video')
    bg_width = int(video_info_bg['width'])
    bg_height = int(video_info_bg['height'])

    # 获取人物视频的分辨率
    probe_person = ffmpeg.probe(person_video_path)
    video_info_person = next(stream for stream in probe_person['streams'] if stream['codec_type'] == 'video')
    person_width = int(video_info_person['width'])
    person_height = int(video_info_person['height'])

    # 计算背景视频的缩放比例，保持宽高比
    # bg_scale_factor = max(person_width / bg_width, person_height / bg_height)
    bg_scale_factor = 2
    new_bg_width = int(bg_width * bg_scale_factor)
    new_bg_height = int(bg_height * bg_scale_factor)

    person_scale_factor = 0.5
    new_person_width = int(person_width * person_scale_factor)
    new_person_height = int(person_height * person_scale_factor)

    # 调整人物视频和mask视频的分辨率
    person_video = ffmpeg.filter(person_video, 'scale', new_person_width, new_person_height, flags='lanczos')
    mask_video = ffmpeg.filter(mask_video, 'scale', new_person_width, new_person_height, flags='lanczos')

    # 调整背景视频的分辨率
    background_video = ffmpeg.filter(background_video, 'scale', new_bg_width, new_bg_height, flags='lanczos')

    # 使用 alphamerge 将 mask 与人物视频合并
    merged_video = ffmpeg.filter([person_video, mask_video], 'alphamerge')

    # 计算放置位置，将人物视频放置在背景视频的右下角
    x_pos = new_bg_width - new_person_width
    y_pos = new_bg_height - new_person_height

    # 将合成后的视频叠加到背景上
    output_video = ffmpeg.overlay(background_video, merged_video, x=x_pos, y=y_pos)

    # 提取人物视频的音频
    person_audio = ffmpeg.input(person_video_path).audio

    # 将视频和音频合并输出到目标路径
    ffmpeg.output(output_video, person_audio, output_path).run(overwrite_output=True)




def video_human_segmentation(video_path, out_path, mask_video_path, back_pic="data/background/mask_backaround.png", back_video_path=None,
                             blur_radius=0):
    start = time.time()

    composite_videos(video_path, mask_video_path, back_video_path, out_path)

    end = time.time()
    print(f"compose video spend time {end - start}s")


if __name__ == "__main__":
    parser = argparse.ArgumentParser()
    parser.add_argument('--person_video_path', type=str, required=True, help='input video file')
    parser.add_argument('--mask_video_path', type=str, default='matte', choices=['matte'],
                        help='matte - save the alpha matte; fg - save the foreground')
    parser.add_argument('--background_video_path', type=str, default='/vepfs_train2/meichaoyang/model/damo/cv_effnetv2_video-human-matting', help='model path')

    print('Get CMD Arguments...')
    args = parser.parse_args()

    output_path = os.path.join("data", "output",
                          os.path.splitext(os.path.basename(args.person_video_path))[0] + '_{0}.mp4'.format(
                              os.path.splitext(os.path.basename(args.background_video_path))[0]))

    composite_videos(args.person_video_path, args.mask_video_path, args.background_video_path, output_path)
    print("合成完成，输出文件: ", output_path)