import os

import cv2
import numpy as np
import paddlehub as hub
from PIL import Image


def CutVideo2Image(video_path, img_path):
    cap = cv2.VideoCapture(video_path)
    index = 0
    while(True):
        ret,frame = cap.read()
        if ret:
            cv2.imwrite(img_path + '%d.jpg' % index, frame)
            index += 1
        else:
            break
    cap.release()
    print( 'Video cut finish, all %d frame' % index)

def CutVideo2Image(video_path, img_path):
    cap = cv2.VideoCapture(video_path)
    index = 0
    while(True):
        ret,frame = cap.read()
        if ret:
            cv2.imwrite(img_path + '%d.jpg' % index, frame)
            index += 1
        else:
            break
    cap.release()
    print( 'Video cut finish, all %d frame' % index)

def deleteBg(path, path_output):
    # 加载模型
    humanseg = hub.Module(name='deeplabv3p_xception65_humanseg')
    # 获取文件列表
    if not os.path.exists(path_output):
        os.makedirs(path_output)
        print('make output_dir:' + output_dir)
    for dirpath, dirnames, filenames in os.walk(path):
        for filename in filenames:
            try:
                if filename.__contains__(".jpg") or filename.__contains__(".png"):
                    imgpath = os.path.join(dirpath, filename)
                    results = humanseg.segmentation(data={'image': [imgpath]},
                                                    output_dir=path_output)
                    for result in results:
                        print(result)
            except Exception as e:
                print(e)
                continue

def init_canvas(width, height, color=(255, 255, 255)):
    canvas = np.ones((height, width, 3), dtype="uint8")
    canvas[:] = color
    return canvas

# 生成绿幕
def GetGreenScreen(size, out_path):
    canvas = init_canvas(size[0], size[1], color=(255, 255, 255))
    cv2.imwrite(out_path, canvas)

def BlendImg(fore_image, base_image, output_path):
    """
    将抠出的人物图像换背景
    fore_image: 前景图片，抠出的人物图片
    base_image: 背景图片
    """
    # 读入图片
    base_image = Image.open(base_image).convert('RGB')
    fore_image = Image.open(fore_image).resize(base_image.size)

    # 图片加权合成
    scope_map = np.array(fore_image)[:,:,-1] / 255
    scope_map = scope_map[:,:,np.newaxis]
    scope_map = np.repeat(scope_map, repeats=3, axis=2)
    res_image = np.multiply(scope_map, np.array(fore_image)[:,:,:3]) + np.multiply((1-scope_map), np.array(base_image))

    # 保存图片
    res_image = Image.fromarray(np.uint8(res_image))
    res_image.save(output_path)

def BlendHumanImg(in_path, screen_path, out_path):
    for filename in os.listdir(in_path):
        print(filename)
        img_path = os.path.join(in_path,filename)
        output_path_img = os.path.join(out_path,filename)
        BlendImg(img_path, screen_path, output_path_img)

def CompVideo(in_path, out_path, size):
    fourcc = cv2.VideoWriter_fourcc(* 'mp4v')
    out = cv2.VideoWriter(out_path,fourcc, 15.0, size)
    files = os.listdir(in_path)
    for i in range(len(files)):
        img = cv2.imread(in_path + '%s' % files[i])
        out.write(img)  # 保存帧
    out.release()



if __name__ == "__main__":
    Video_Path = 'D:/11/839Tina.mp4'
    Video_Size = (3840,2160)
    FrameCut_Path = 'D:/22/'
    FrameSeg_Path = 'D:/33/22-w/'
    FrameCom_Path = 'D:/33/22-ww/'
    GreenScreen_Path = 'D:/33/green.jpg'
    ComOut_Path = 'D:/33/output.mp4'
# # 第一步：视频->图像
#     if not os.path.exists(FrameCut_Path):
#         os.mkdir(FrameCut_Path)
#     CutVideo2Image(Video_Path, FrameCut_Path)
#
    # 第二步：抠图
    if not os.path.exists(FrameSeg_Path):
        os.mkdir(FrameSeg_Path)
    deleteBg(FrameCut_Path, FrameSeg_Path)

    # # 第三步：生成绿幕并合成
    # GetGreenScreen(Video_Size, GreenScreen_Path)
    #
    # if not os.path.exists(FrameCom_Path):
    #     os.mkdir(FrameCom_Path)
    # BlendHumanImg(FrameSeg_Path, GreenScreen_Path, FrameCom_Path)
    #
    # # 第四步：合成视频
    # CompVideo(FrameCom_Path, ComOut_Path, Video_Size)
