import cv2
import os, sys
import glob
import dlib
import numpy as np
import time
import pdb
import argparse
from pathlib import Path

detector = dlib.get_frontal_face_detector()
predictor = dlib.shape_predictor('../data/face/shape_predictor_68_face_landmarks.dat')

def shape_to_np(shape, dtype="int"):
    # initialize the list of (x, y)-coordinates
    coords = np.zeros((shape.num_parts, 2), dtype=dtype)

    # loop over all facial landmarks and convert them
    # to a 2-tuple of (x, y)-coordinates
    for i in range(0, shape.num_parts):
        coords[i] = (shape.part(i).x, shape.part(i).y)

    # return the list of (x, y)-coordinates
    return coords

def detect_image(path_image, image=None, is_save=True):

    path_image = Path(path_image)
    path_save = path_image.parent.joinpath(path_image.stem + ".txt")

    if not path_image.exists():
        raise(f"ERROR: {path_image} not exist")
        return False

    if image is None:
        image = cv2.imread(str(path_image))

    gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
    rects = detector(gray, 1)

    if len(rects) == 0:
        print(f"ERROR: not find face! {str(path_image)}")
        return False

    for (i, rect) in enumerate(rects):
        shape = predictor(gray, rect)
        shape = shape_to_np(shape)
        for (x, y) in shape:
            cv2.circle(image, (x, y), 1, (0, 0, 255), -1)

        eyel = np.round(np.mean(shape[36:42,:], axis=0)).astype("int")
        eyer = np.round(np.mean(shape[42:48,:], axis=0)).astype("int")
        nose = shape[33]
        mouthl = shape[48]
        mouthr = shape[54]

        if is_save:
            message = f"{eyel[0]} {eyel[1]}\n{eyer[0]} {eyer[1]}\n" \
                      f"{nose[0]} {nose[1]}\n{mouthl[0]} {mouthl[1]}\n" \
                      f"{mouthr[0]} {mouthr[1]}\n"

            with open(str(path_save), 'w') as f:
                f.write(message)
    return True


def detect_images(dir_path):
    dir_path = Path(dir_path)

    for ifile in sorted(dir_path.glob("*.jpg")):
        print(ifile)
        detect_image(ifile)


def is_finished(path_video, nframe):
    nsize = len(sorted(path_video.glob("*.jpg")))

    return nsize == nframe


if __name__ == "__main__":

    path_src = Path("/mnt/data/DATA/LRW/lipread_mp4")
    path_tar = Path("../data/video/LRW")
    v_path_videos = sorted(path_src.glob("*/*/*.mp4"))

    print(f"Have {len(v_path_videos)} videos")

    for i, ipath in enumerate(v_path_videos):
        word, train, name = ipath.parent.parent.name, ipath.parent.name, ipath.stem

        if i%100 == 0:
            print(f"NOW processing the {i:06d}-th/{len(v_path_videos)} videos: {name}")

        # create data path
        ipath_save = path_tar.joinpath(word, train, name)
        ipath_save.mkdir(parents=True, exist_ok=True)

        # STEP1: extract frames
        count = 0
        postfix = ".jpg"
        cap = cv2.VideoCapture(str(ipath))
        length = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))

        # if have finished, then skip
        if is_finished(ipath_save, length):
            continue

        # read video and detect landmarks and pictures
        while True:
            success, image = cap.read()
            if not success:
                break

            ipath_frame = ipath_save.joinpath(f"frame-{count:04d}{postfix}")
            cv2.imwrite(str(ipath_frame), image)

            # detect image
            detect_image(ipath_frame, image=image, is_save=True)
            count += 1

            if count % 50 == 0:
                print(f"extract and save the {count}-th frame")
