import os
import cv2
from PIL import Image
from tqdm import tqdm
import numpy as np
import json

from RetinaFace_Pytorch import detect

def get_boundingbox(face, width, height, scale=1.3, minsize=None):
    """
    Expects a dlib face to generate a quadratic bounding box.
    :param face: dlib face class
    :param width: frame width
    :param height: frame height
    :param scale: bounding box size multiplier to get a bigger face region
    :param minsize: set minimum bounding box size
    :return: x, y, bounding_box_size in opencv form
    """
    x1 = face[0]
    y1 = face[1]
    x2 = face[2]
    y2 = face[3]
    size_bb = int(max(x2 - x1, y2 - y1) * scale)
    if minsize:
        if size_bb < minsize:
            size_bb = minsize
    center_x, center_y = (x1 + x2) // 2, (y1 + y2) // 2

    # Check for out of bounds, x-y top left corner
    x1 = max(int(center_x - size_bb // 2), 0)
    y1 = max(int(center_y - size_bb // 2), 0)
    # Check for too big bb size for given x, y
    size_bb = min(width - x1, size_bb)
    size_bb = min(height - y1, size_bb)

    return x1, y1, size_bb


def get_face(videoPath, save_root, select_nums=10):
    numFrame = 0
    v_cap = cv2.VideoCapture(videoPath)
    v_len = int(v_cap.get(cv2.CAP_PROP_FRAME_COUNT))
    if v_len > select_nums:
        samples = np.linspace(0, v_len - 1, 10).round().astype(int)
    else:
        samples = np.linspace(0, v_len - 1, v_len).round().astype(int)
    for j in range(v_len):
        success, vframe = v_cap.read()
        if j in samples:
            height, width = vframe.shape[:2]
            image = cv2.cvtColor(vframe, cv2.COLOR_BGR2RGB)
            image = Image.fromarray(image)

            try:
                boxes, _ = mtcnn.detect(image)
                x, y, size = get_boundingbox(boxes.flatten(), width, height)
                cropped_face = vframe[y:y + size, x:x + size]

                s = str(numFrame)
                if not os.path.exists(save_root):
                    os.makedirs(save_root)
                cv2.imwrite(os.path.join(save_root, "%s.png") % s, cropped_face)
                numFrame += 1

            except:
                print(videoPath)
    v_cap.release()


if __name__ == '__main__':

    root_dir = './data/cvpr/test1/'

    data_dir = root_dir+'image/'
    save_data_root = './data/cvpr/test1/crop/'

    ann_dir = root_dir+'/seg/'
    save_ann_root = './data/cvpr/test1/crop_seg/'

    # Face detector
    #mtcnn = MTCNN(device='cuda:0').eval()
    model = detect.get_model("./RetinaFace_Pytorch/model.pt")
    model.eval()
    boxes_dict={}
    boxes_dict['images_name']=[]
    boxes_dict['predict_boxes']=[]
    scale = 1.3

    for id in tqdm(os.listdir(data_dir)):
        # print(boxes_dict)
        for img in os.listdir(data_dir+id):
            im = Image.open(data_dir+id+'/'+img)
            img = torch.from_numpy(np.array(im))
            img = img.permute(2,0,1)
            if not scale == 1.0:
                size1 = int(img.shape[1]/scale)
                size2 = int(img.shape[2]/scale)
                img = resize(img.float(),(size1,size2))

            input_img = img.unsqueeze(0).float().cuda()
            anno = cv2.imread(ann_dir+id+'/'+img.replace('jpg', 'png'), cv2.IMREAD_GRAYSCALE)
            try:
                boxes,landmarks,scores = model.get_detections(input_img, RetinaFace, score_threshold=0.5, iou_threshold=0.3)
                boxes = boxes[0].cpu().numpy()
                # boxes, _ = mtcnn.detect(im)
                height, width = anno.shape[:2]
                x, y, size = get_boundingbox(boxes.flatten(), width, height)
                im = np.array(im)
                cropped_face = im[y:y + size, x:x + size]
                cropped_anno = anno[y:y + size, x:x + size]
                boxes_dict['images_name'].append(data_dir+id+'/'+img)
                boxes_dict['predict_boxes'].append({'x':x, 'y':y,'size':size})
                save_root = os.path.join(save_data_root, id)
                save_root_anno = os.path.join(save_ann_root, id)
                cropped_face = cv2.cvtColor(cropped_face, cv2.COLOR_RGB2BGR)
                if not os.path.exists(save_root):
                    os.makedirs(save_root)
                if not os.path.exists(save_root_anno):
                    os.makedirs(save_root_anno)
                cv2.imwrite(os.path.join(save_root, img), cropped_face)
                cv2.imwrite(os.path.join(save_root_anno, img.replace('jpg', 'png')), cropped_anno)
            except:
                # print(img)
                pass
        with open("./crop_face1.json",'w',encoding='utf-8') as json_file:
           json.dump(boxes_dict,json_file,ensure_ascii=False)
