#!/home/robot/anaconda3/envs/yolograsp/bin/python3
import cv2
import os
import numpy as np
import rospy
import torchvision.transforms as transforms
from copy import deepcopy
import sys
from vision_messages.srv import FaceDetect, FaceDetectResponse

sys.path.insert(0, os.path.join(os.path.dirname(os.path.abspath(__file__)), '../../../'))
from utilities.tools.image_converter import convert_msg_to_nparray, convert_cv2_to_Image
sys.path.pop(0)
############### Import models ##################
from peppa.face_onnx.detector import Detector as FaceDetector
from peppa.detector import Detector
# from mnn_detector import Detector
# from onnx_detector import Detector
###############################################

from argparse import ArgumentParser
parser = ArgumentParser()
config = parser.parse_args()
############### Other params ##################
config.intrinsics = np.array([[603.1332397460938, 0, 323.1732177734375],
                            [0, 601.6819458007812, 241.5373077392578],
                            [0, 0, 1]])
config.xmap = np.array([[i for i in range(640)] for j in range(480)])
config.ymap = np.array([[j for i in range(640)] for j in range(480)])
config.norm_scale = 1000.0
config.norm_color = transforms.Compose(
    [transforms.ToTensor(),
     transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])])
###############################################

def choose_closet_bbox(bboxes, raw_depth):
    """
    Input:
        bboxes: [n, 4], where the axis1: [x1, y1, x2, y2]
        raw_depth: [h, w, z], depth map that is used to choose the closet face
    Output:
        lip_x1, lip_x2, lip_y1, lip_y2: 4 bounding points around lip
    """
    assert bboxes.shape[0] != 0
    if bboxes.shape[0] == 1:
        rospy.loginfo("Only 1 People detected.")
        return bboxes[0]
    idx_depth_min = 0
    depth_min = np.inf
    for i, bbox in enumerate(bboxes):
        cmin, rmin, cmax, rmax = bbox
        mask_depths = raw_depth[rmin:rmax, cmin:cmax].flatten()
        mean_depth = np.mean(mask_depths)
        if mean_depth < depth_min:
            depth_min = mean_depth
            idx_depth_min = i
    rospy.loginfo("{} People detected, but the closet one is choosen for the next step.".format(bboxes.shape[0]))
    return bboxes[idx_depth_min]


def get_lip_mask(lip_points, img):
    frame = deepcopy(img)
    lip_x1 = np.min(lip_points[:, 0])
    lip_y1 = np.min(lip_points[:, 1])
    lip_x2 = np.max(lip_points[:, 0])
    lip_y2 = np.max(lip_points[:, 1])

    """
    mask[lip_y1:lip_y2, lip_x1:lip_x2] = True
    # compute the mean bgr in this bbox:
    mean_bgr = np.mean(img[mask], axis=0)
    # choose mask area with bgr similar to lip area:
    mask_ = np.linalg.norm(img - mean_bgr, axis=2) < 50
    mask = mask & mask_
    """
    out_points = lip_points[:13]
    in_points = lip_points[13:]
    # generate mask from out_points:
    # cv2.polylines(img, [out_points], True, (255, 255, 255), thickness=1)
    zeros = np.zeros((img.shape), dtype=np.uint8)

    mask = cv2.fillPoly(zeros, [out_points], color=(0, 60, 0))
    frame += mask
    center_point = [int((lip_x1+lip_x2)/2), int((lip_y1+lip_y2)/2)]
    return center_point, frame

def get_lip_bbox(frame, raw_depth, face_detector, lmk_detector):
    """
    Input:
        frame: rgb
        raw_depth: depth map that is used to choose the closet face
        face_detector: to detect face
        lmk_detector: to detect keypoints
    Output:
        lip_x1, lip_x2, lip_y1, lip_y2: 4 bounding points around lip
    """
    # detect_flag = True
    # while detect_flag:
    bboxes, _ = face_detector.detect(frame)
    if bboxes.shape[0] != 0:
        bbox = choose_closet_bbox(bboxes, raw_depth)
        bbox = bbox.astype(np.int)
        lmks, PRY_3d = lmk_detector.detect(frame, bbox)
        if lmks.shape[0] == 68:
            lmks = lmks.astype(np.int)
            # Here, we only remain lip(48~67)
            lip_points = lmks[48:]
            lmks = lmks[:48]
            frame = cv2.rectangle(frame, tuple(bbox[0:2]), tuple(bbox[2:4]), (0, 0, 255), 1, 1)
            for point in lmks:
                frame = cv2.circle(frame, tuple(point), 2, (0, 255, 0), -1, 1)
            for point in lip_points:
                frame = cv2.circle(frame, tuple(point), 2, (255, 255, 255), -1, 1)
            center_point, frame = get_lip_mask(lip_points, frame)
            # frame = cv2.drawMarker(frame, center_point, (255, 255, 255), markerType=3)
            lip_x1 = np.min(lip_points[:, 0])
            lip_y1 = np.min(lip_points[:, 1])
            lip_x2 = np.max(lip_points[:, 0])
            lip_y2 = np.max(lip_points[:, 1])
            return [lip_x1, lip_x2, lip_y1, lip_y2], PRY_3d, frame

def get_lip_point(bbox, raw_depth, args=None):
    """
    Input:
        bbox: cmin, cmax, rmin, rmax
        raw_depth: depth map that is used to choose the closet face
        args: configs
    Output:
        center_point: [x,y,z], the center point of lip
    """
    cam_fx, cam_fy = args.intrinsics[0, 0], args.intrinsics[1, 1]
    cam_cx, cam_cy = args.intrinsics[0, 2], args.intrinsics[1, 2]
    xmap = args.xmap
    ymap = args.ymap
    norm_scale = args.norm_scale

    cmin, cmax, rmin, rmax = bbox
    center_point = [int((rmin+rmax)/2), int((cmin+cmax)/2)]
    depth_masked = raw_depth[center_point[0], center_point[1]]#.flatten()[:, np.newaxis]
    xmap_masked = xmap[center_point[0], center_point[1]]#.flatten()[:, np.newaxis]
    ymap_masked = ymap[center_point[0], center_point[1]]#.flatten()[:, np.newaxis]
    pt2 = depth_masked / norm_scale
    pt0 = (xmap_masked - cam_cx) * pt2 / cam_fx
    pt1 = (ymap_masked - cam_cy) * pt2 / cam_fy
    center_point = [pt0, pt1, pt2]
    return center_point

class Face_Server(object):
    def __init__(self, args=None):
        rospy.init_node('face_server', anonymous=True)
        self.args = args

        self.face_detector = FaceDetector()
        self.lmk_detector = Detector()

        rospy.loginfo("Face Detector has initiated! Ready to get image!")

        rospy.Service("/perception/face_detector", FaceDetect, self.detector_callback)
        rospy.wait_for_service("/perception/face_detector")


    def detector_callback(self, req):
        rospy.loginfo("Get image! Processing ...")
        imgae = convert_msg_to_nparray(req.color)
        depth = convert_msg_to_nparray(req.depth)

        result = get_lip_bbox(imgae, depth, self.face_detector, self.lmk_detector)
        if result is not None:
            lip_bbox, PRY_3d, frame = result
            lip_point = get_lip_point(lip_bbox, depth, args=self.args)
            # print(lip_point)
            rospy.loginfo("Process Done! Return Results!")
            ros_frame = convert_cv2_to_Image(frame)
            return FaceDetectResponse(lip_point, PRY_3d, ros_frame)
        else:
            rospy.loginfo('WARNING! No Object detected! Please check again!')
            lip_point, PRY_3d = [], []
            return FaceDetectResponse(lip_point, PRY_3d, req.color)


if __name__ == '__main__':
    perception = Face_Server(args=config)
    rospy.spin()
