import os
import re
from configparser import ConfigParser
from argparse import ArgumentParser
from moviepy.editor import VideoFileClip, concatenate_videoclips
from utils import *


print('启动中...')
script_path = os.path.realpath(__file__)
print('正在运行:',script_path)
script_path = re.findall(r'(.+)\\kzkt\-cut\.py', script_path)[0]

print('正在载入配置...')
cp = ConfigParser()
with open(f'{script_path}\\assets\\config.ini', 'r', encoding='utf-8') as f:
    cp.read_file(f)

interlude = cp.get('Args', 'interlude',
                   fallback=f'{script_path}\\assets\\interlude.mp4')
interlude_duration = cp.get('Args', 'interlude_duration')
threshold = float(cp.get('Args', 'threshold'))
samplings = float(cp.get('Args', 'samplings'))
face_min_size = cp.get('Args', 'interlude',
                   fallback=.01)

parser = ArgumentParser(description='空中课堂自动剪辑脚本')
parser.add_argument('--video')
parser.add_argument('--interlude', default=interlude)
parser.add_argument('--output', default=None)
parser.add_argument('--threshold', default=threshold)
parser.add_argument('--interlude_duration', default=interlude_duration)
parser.add_argument('--samplings', default=samplings)
parser.add_argument('--face_min_size', default=face_min_size)

args = parser.parse_args()

video_file_path = args.video
format = re.findall(r'\.(.+)', video_file_path)[-1]
video_path = re.findall(r'(.+)\.', video_file_path)[0]
print('脚本路径', script_path)
print('视频路径', video_path)

interlude_file_path = args.interlude or f'{script_path}\\assets\\interlude.mp4'
output_file_path = args.output or f'{video_path}-cut.{format}'

print('过场视频路径', interlude_file_path)
print('输出视频路径', output_file_path)

interlude_duration = float(args.interlude_duration)
samplings=int(args.samplings)
threshold=float(args.threshold)
face_min_size=float(args.face_min_size)

print('场景分析阈值',args.threshold)
print('过场片段时长',interlude_duration)
print('人脸检测采样',samplings)
print('最小人脸面积',face_min_size)

print('正在进行场景分析...')
scenes = find_scenes(video_file_path, threshold=threshold)
print('场景分析完成.')

clip = VideoFileClip(video_file_path)

subclips = sub_scene_clips(clip, scenes)

print('正在检测全脸场景...')
is_full_faces = [*map(lambda _:clip_fullface_detect(_,samplings=samplings,face_min_size=face_min_size), subclips)]
print('全脸场景检测完成.')

while True:
    if is_full_faces[0]:
        break
    is_full_faces.pop(0)
    subclips.pop(0)

while True:
    if is_full_faces[-1]:
        break
    is_full_faces.pop()
    subclips.pop()

print('正在进行视频拼接...')

interlude = VideoFileClip(interlude_file_path).set_end(
    interlude_duration).resize(tuple(clip.size))

keep_clips = []

for is_full_face, subclip in zip(is_full_faces, subclips):
    if is_full_face:
        keep_clips.append(interlude)
    else:
        keep_clips.append(subclip)

finalclip = concatenate_videoclips(keep_clips)
print('视频拼接完成.')

print('正在输出最终视频文件...')
finalclip.write_videofile(output_file_path)
print('最终视频文件输出完成.')

os.startfile(output_file_path)

print('最终视频文件位置：', output_file_path)
