import sys, os
from concurrent.futures import ThreadPoolExecutor, as_completed, ProcessPoolExecutor
import numpy as np
import pandas as pd
import cv2, traceback, subprocess
from tqdm import tqdm
from glob import glob
import random
import click
import dlib
from ezds.ezdlearn.utils.load import load_video_by_frame
from moviepy.editor import *


DETECTOR_PATH = './dlib/mmod_human_face_detector.dat'
PREDICTOR_PATH = './dlib/shape_predictor_68_face_landmarks.dat'

template = 'ffmpeg -loglevel panic -y -i {} -strict -2 {}'
# template2 = 'ffmpeg -hide_banner -loglevel panic -threads 1 -y -i {} -async 1 -ac 1 -vn -acodec pcm_s16le -ar 16000 {}'

def compute_crop_radius(video_size,landmark_data_clip,random_scale = None):
    video_w, video_h = video_size[0], video_size[1]
    landmark_max_clip = np.max(landmark_data_clip, axis=1)
    if random_scale is None:
        random_scale = random.random() / 10 + 1.05
    else:
        random_scale = random_scale
    radius_h = (landmark_max_clip[:,1] - landmark_data_clip[:,29, 1]) * random_scale
    radius_w = (landmark_data_clip[:,54, 0] - landmark_data_clip[:,48, 0]) * random_scale
    radius_clip = np.max(np.stack([radius_h, radius_w],1),1) // 2
    radius_max = np.max(radius_clip)
    radius_max = (np.int(radius_max/4) + 1 ) * 4
    radius_max_1_4 = radius_max//4
    clip_min_h = landmark_data_clip[:, 29,
                 1] - radius_max
    clip_max_h = landmark_data_clip[:, 29,
                 1] + radius_max * 2  + radius_max_1_4
    clip_min_w = landmark_data_clip[:, 33,
                 0] - radius_max - radius_max_1_4
    clip_max_w = landmark_data_clip[:, 33,
                 0] + radius_max + radius_max_1_4
    if min(clip_min_h.tolist() + clip_min_w.tolist()) < 0:
        return False,None
    elif max(clip_max_h.tolist()) > video_h:
        return False,None
    elif max(clip_max_w.tolist()) > video_w:
        return False,None
    elif max(radius_clip) > min(radius_clip) * 1.5:
        return False, None
    else:
        return True,radius_max

def crop_face(img, landmark, crop_radius=128, crop_radius_1_4=32):
    if landmark is None:
         return img
    y0 = landmark[29, 1] - crop_radius
    y1 = landmark[29, 1] + crop_radius * 2 + crop_radius_1_4
    x0 = landmark[33, 0] - crop_radius - crop_radius_1_4
    x1 = landmark[33, 0] + crop_radius + crop_radius_1_4
    y0 = max(y0, 0)
    x0 = max(x0, 0)
    img_crop = img[y0:y1, x0:x1, :]
    return img_crop

def process_mp4_file(vfile, preprocessed_root, detector=None, predictor=None, fps=25, max_frames=750):
	print('start processing', vfile)
	if detector is None:
		detector = dlib.cnn_face_detection_model_v1(DETECTOR_PATH)
	if predictor is None:
		predictor = dlib.shape_predictor(PREDICTOR_PATH)
	vid = load_video_by_frame(vfile, refps=fps)
	aud = AudioFileClip(vfile)
	max_duration = aud.duration
	vidname = os.path.basename(vfile).split('.')[0]
	dirname = vfile.split('/')[-2]
	ptss = {}
	now = -1
	h, w = None, None
	for idx, frame in enumerate(vid):
		h = h or frame.shape[0]
		w = w or frame.shape[1]
		current = idx // max_frames
		fulldir = os.path.join(preprocessed_root, dirname, vidname+f'_{current}')
		if not os.path.exists(fulldir):
			os.makedirs(fulldir, exist_ok=True)
		jdx = idx % max_frames
		rects = detector(frame, 0)
		rect = rects[0].rect if len(rects) > 0 else None
		if rect is None:
			continue
		pts = np.array([[p.x, p.y] for p in predictor(frame, rect).parts()])
		_, radius = compute_crop_radius((w, h), pts[None, ...])
		if radius is None:
			continue
		face = crop_face(frame, pts, radius, radius//4)
		ptss[jdx] = pts.T.reshape(-1).tolist()
		try:
			cv2.imwrite(os.path.join(fulldir, '{}.jpg'.format(jdx)), face)
		except:
			continue
		if now != idx // max_frames:
			if now >=0:
				predir = os.path.join(preprocessed_root, dirname, vidname+f'_{now}')
				pd.DataFrame(ptss, index=[f'x_{i}' for i in range(68)]+[f'y_{i}' for i in range(68)]).T.to_csv(os.path.join(predir, 'landmarks.csv'), index=True)
				ptss = {}
			aud.subclip(idx/fps, min((idx+max_frames)/fps, max_duration)).write_audiofile(os.path.join(fulldir, 'audio.wav'))
			now = idx // max_frames
	if len(ptss) > 0:
		pd.DataFrame(ptss, index=[f'x_{i}' for i in range(68)]+[f'y_{i}' for i in range(68)]).T.to_csv(os.path.join(fulldir, 'landmarks.csv'), index=True)

	
def mp_handler(args):
	vfile, preprocessed_root, detector, predictor = args
	try:
		process_mp4_file(vfile, preprocessed_root, detector, predictor)
	except KeyboardInterrupt:
		exit(0)
	except:
		traceback.print_exc()

@click.command()
@click.option("-d", "--data_root", type=str, required=True, help="raw data root path")
@click.option("-p", "--preprocessed_root", type=str, required=True, help="output root path")
@click.option("-t", "--nthread", type=int, default=1, help="num threads")
def main(data_root, preprocessed_root, nthread):
	filelist = glob(os.path.join(data_root, '*/*.mp4'))
	models = [[dlib.cnn_face_detection_model_v1(DETECTOR_PATH), dlib.shape_predictor(PREDICTOR_PATH)] for _ in range(nthread)]
	jobs = [(vfile, preprocessed_root, *models[i%nthread]) for i, vfile in enumerate(filelist)]
	p = ThreadPoolExecutor(nthread)
	futures = [p.submit(mp_handler, j) for j in jobs]
	_ = [r.result() for r in tqdm(as_completed(futures), total=len(futures))]

if __name__ == '__main__':
	from tqdm import tqdm
	preprocessed_root = r'preprocess_data'
	data_root = r'raw_data'
	filelist = glob(os.path.join(data_root, '*/*.mp4'))
	# nthread = 4
	# jobs = [(vfile, preprocessed_root, None, None) for i, vfile in enumerate(filelist)]
	# p = ProcessPoolExecutor(nthread)
	# futures = [p.submit(mp_handler, j) for j in jobs]
	# _ = [r.result() for r in tqdm(as_completed(futures), total=len(futures))]
	detector = dlib.cnn_face_detection_model_v1(DETECTOR_PATH)
	predictor = dlib.shape_predictor(PREDICTOR_PATH)
	for file in tqdm(filelist):
		process_mp4_file(file, preprocessed_root, detector, predictor, 25, 750)