import time

import cv2
import numpy as np

from src.utils.utils import add_audio_to_video


def process_frame(frame, mask, back_ori, w, h, blur_radius=0):
    if blur_radius > 0:
        mask = cv2.GaussianBlur(mask, (blur_radius, blur_radius), 0)

    # 计算 frame_fg 的缩放尺寸，保持原始比例
    height, width = frame.shape[:2]
    scale = min(w / width, h / height)
    new_width = int(width * scale)
    new_height = int(height * scale)

    frame_fg = cv2.resize(frame.astype(np.float32), (new_width, new_height), interpolation=cv2.INTER_LINEAR)
    mask_resized = cv2.resize(mask, (new_width, new_height), interpolation=cv2.INTER_LINEAR)
    mask_resized = mask_resized.astype(np.float32) / 255.0
    # 创建一个与背景同样大小的空白画布
    frame_fg_padded = np.zeros((h, w, 3), dtype=np.float32)
    mask_padded = np.zeros((h, w, 3), dtype=np.float32)

    # 计算 frame_fg 在背景中的位置（水平居中，垂直靠下）
    x_offset = (w - new_width) // 2
    y_offset = h - new_height

    # 将 frame_fg 和 mask 放置到正确的位置
    frame_fg_padded[y_offset:y_offset+new_height, x_offset:x_offset+new_width] = frame_fg
    mask_padded[y_offset:y_offset+new_height, x_offset:x_offset+new_width] = mask_resized

    # 应用 mask
    frame_fg_padded *= mask_padded

    back_h, back_w = back_ori.shape[:2]
    if back_h != h or back_w != w:
        back = cv2.resize(back_ori, (w, h), interpolation=cv2.INTER_CUBIC).astype(np.float32)
    else:
        back = back_ori.astype(np.float32)
    back *= (1 - mask_padded)

    return cv2.add(frame_fg_padded, back).astype(np.uint8)


def get_output_w_h(back_h, back_w, min_width=1920, min_height=1080):
    # 计算背景的原始宽高比
    aspect_ratio = back_w / back_h

    # 如果原始尺寸已经大于或等于1080p，则保持原始尺寸
    if back_w >= min_width and back_h >= min_height:
        return back_h, back_w

    # 确保宽度和高度至少满足最小值
    output_width = max(back_w, min_width)

    # 检查按照最小宽度调整后的高度是否满足最小高度要求
    if output_width / aspect_ratio < min_height:
        output_width = int(min_height * aspect_ratio)
        output_height = min_height
    else:
        output_height = int(output_width / aspect_ratio)

    return output_height, output_width


def video_human_segmentation(video_path, out_path, mask_video_path, back_pic="data/background/mask_backaround.png", back_video_path=None,
                             blur_radius=0):
    start = time.time()
    video_input = cv2.VideoCapture(video_path)
    mask_cap = cv2.VideoCapture(mask_video_path)
    fps = video_input.get(cv2.CAP_PROP_FPS)
    fourcc = cv2.VideoWriter_fourcc(*'mp4v')
    success, frame = video_input.read()

    h, w = frame.shape[:2]

    if back_video_path:
        back_video_input = cv2.VideoCapture(back_video_path)
        success_back, frame_back = back_video_input.read()
        back_h, back_w = frame_back.shape[:2]
    else:
        back_video_input = None
        back_ori = cv2.imread(back_pic)
        back_h, back_w = back_ori.shape[:2]
        frame_back = back_ori

    output_height, output_width = get_output_w_h(back_h, back_w)
    print(f"{output_height, output_width}")
    video_save = cv2.VideoWriter(out_path, fourcc, fps, (output_width, output_height))

    end = time.time()
    print(f"prepare spend time {end - start}s")
    start = time.time()

    mask_video_save = cv2.VideoWriter(out_path.replace('.mp4', '_mask.mp4'), fourcc, fps, (w, h))
    frame_fg_video_save = cv2.VideoWriter(out_path.replace('.mp4', '_fg.mp4'), fourcc, fps, (w, h))

    while success:
        mask_ret, mask = mask_cap.read()

        mask_video_save.write(mask.astype(np.uint8))
        frame_fg_video_save.write((frame.astype(np.float32) * mask).astype(np.uint8))

        com = process_frame(frame, mask, frame_back, output_width, output_height, blur_radius)
        video_save.write(com)
        success, frame = video_input.read()
        if back_video_input:
            success_back, frame_back_1 = back_video_input.read()
            if success_back:
                frame_back = frame_back_1

    video_input.release()
    video_save.release()
    mask_cap.release()
    if back_video_input:
        back_video_input.release()
    mask_video_save.release()
    frame_fg_video_save.release()
    end = time.time()
    print(f"compose video spend time {end - start}s")
    add_audio_to_video(video_path, out_path, out_path.replace('.mp4', '_with_audio.mp4'))
    end = time.time()
    print(f"compose video and add audio spend time {end - start}s")


if __name__ == '__main__':
    pass