#!/usr/bin/python
#!coding:utf-8
# import the necessary packages
import cv2
try:
    from pyee import ExecutorEventEmitter
except Exception as e:
    from pyee.executor import ExecutorEventEmitter
import rospy
from sensor_msgs.msg import CompressedImage
# from sensor_msgs.srv import SetCameraInfo, SetCameraInfoResponse
from pi_cam.msg import FaceDetection
from pi_cam.srv import GetFaceDetections, GetFaceDetectionsResponse
from pi_driver.srv import GetString, GetStrings, GetStringResponse, GetStringsResponse, SetString, SetStringResponse
from pi_driver.srv import SetInt32, SetInt32Response

from camera_utils import toImageMsg
from pi_driver.srv import GetLandmarks, GetLandmarksResponse
from pi_ai.face_recognizer import FaceRecognizer


# (0,10) => (-320,230)
# (320,240) => (0,0)


def toScratchAxes(cv_x, cv_y):  # 480x360 in Scratch
    return (cv_x-240, cv_y*-1+180)


class FaceRecognizerNode(ExecutorEventEmitter):
    def __init__(self):
        super().__init__()
        self.node_name = rospy.get_name()
        rospy.loginfo("[%s] Initializing......" % (self.node_name))
        self.visualization = True
        self.msg = GetFaceDetectionsResponse()
        self.getShm()
        self.pub_detections = rospy.Publisher(
            "~image_face", CompressedImage, queue_size=1)
        self.on('pub_image', self.pubImage)
        self.on('pub_mesh_image', self.pubMeshImage)
        self.recognizer_model_state = 2
        self.mesher_model_state = 0
        self.recognizer = FaceRecognizer(scale=1.5)
        # self.mesher = FaceMesher()
        try:
            from pi_ai.face_analyzer import FaceAnalyzer
            self.analyzer = FaceAnalyzer()
        except Exception as e:
            print(e)

        rospy.Service('~set_threshold', SetInt32, self.cbSetThreshold)
        rospy.Service('~set_resize', SetInt32, self.srvSetResize)
        rospy.Service('~detect_face_locations',
                      GetFaceDetections, self.cbDetectFaceLocations)
        rospy.Service('~detect_face_labels', GetFaceDetections,
                      self.cbDetectFaceLabels)
        rospy.Service('~list_face_labels', GetStrings, self.cbListFaceLabels)
        rospy.Service('~add_face_label', SetString, self.cbAddFaceLabel)
        rospy.Service('~remove_face_label', SetString, self.cbRemoveFaceLabel)
        rospy.Service('~detect_face_mesh', GetLandmarks, self.cbFaceMesh)
        rospy.Service('~detect_face_emotion', GetFaceDetections,
                      self.cbFaceEmotion)
        rospy.Service('~detect_face_age', GetFaceDetections,
                      self.cbFaceAge)
        rospy.Service('~detect_face_gender', GetFaceDetections,
                      self.cbFaceGender)                    
        rospy.Service('~analyze_face', GetFaceDetections, self.cbFaceAnalyze)
        rospy.loginfo("[%s] Initialized." % (self.node_name))

    def getShm(self):
        from pi_driver import SharedMemory
        import time
        import numpy as np
        while True:
            try:
                self.shm = SharedMemory('cv_image')
                self.image_frame = np.ndarray(
                    (480, 640, 3), dtype=np.uint8, buffer=self.shm.buf)
                break
            except:
                print(self.node_name, 'wait for SharedMemory cv_image')
                time.sleep(1)

    def getImage(self):
        rect_image = self.image_frame.copy()
        return cv2.resize(rect_image, (480, 360))

    def cbSetThreshold(self, params):
        if params.value > 0 and params.value < 100:
            self.recognizer.detector.setThreshold(params.value)
        return SetInt32Response(params.port, params.value)

    def srvSetResize(self, params):
        if params.port <= 480 and params.port >= 10:
            if params.value <= 360 and params.value >= 10:
                self.recognizer.detector.setResize(params.port, params.value)
        return SetInt32Response(params.port, params.value)

    def cbDetectFaceLocations(self, params):
        if self.recognizer_model_state == 0:
            self.recognizer_model_state = 1
            try:
                from pi_ai.face_recognizer import FaceRecognizer
            except Exception as e:
                print(e)
                from face_recognizer import FaceRecognizer
            self.recognizer = FaceRecognizer(scale=1.5)
            self.recognizer_model_state = 2
        elif self.recognizer_model_state == 1:
            while self.recognizer_model_state == 1:
                time.sleep(0.5)
        self.rect_image = self.getImage()
        try:
            self.recognizer.detect(self.rect_image)
        except Exception as e:
            print(e)
        if self.visualization:
            self.emit('pub_image')
            # self.pubImage(faces)
        return self.toFaceDetectionMsg(self.recognizer.face_locations)

    def cbDetectFaceLabels(self, params):
        if self.recognizer_model_state == 0:
            self.recognizer_model_state = 1
            try:
                from pi_ai.face_recognizer import FaceRecognizer
            except Exception as e:
                print(e)
                from face_recognizer import FaceRecognizer
            self.recognizer = FaceRecognizer(scale=1.5)
            self.recognizer_model_state = 2
        elif self.recognizer_model_state == 1:
            while self.recognizer_model_state == 1:
                time.sleep(0.5)
        self.rect_image = self.getImage()
        try:
            self.recognizer.recognize(self.rect_image)
        except Exception as e:
            print(e)
        if self.visualization:
            self.emit('pub_image')
            # self.pubImage(faces)
        return self.toFaceDetectionMsg(self.recognizer.face_locations, self.recognizer.face_names)

    def toFaceDetectionMsg(self, face_locations=[], face_names=[]):
        msg = self.msg
        msg.detections = []
        if len(face_names) > 0 and len(face_names) == len(face_locations):
            for (left, top, right, bottom), name in zip(face_locations, face_names):
                face_detection = FaceDetection(
                    name, [left, top, right, bottom])
                msg.detections.append(face_detection)
        elif len(face_locations) > 0:
            for (left, top, right, bottom) in face_locations:
                face_detection = FaceDetection(
                    "", [left, top, right, bottom])
                msg.detections.append(face_detection)
        return msg

    def cbListFaceLabels(self, params):
        return GetStringsResponse(self.recognizer.known_faces.keys())

    def cbAddFaceLabel(self, params):
        if len(params.data) == 0:
            return SetStringResponse("添加失败,名称长度为0")
        try:
            cv_image = self.getImage()
            res = self.recognizer.add_face_label(
                cv_image, params.data, save=True)
            return SetStringResponse(res)
        except Exception as e:
            print(e)
            return SetStringResponse("添加失败")

    def cbRemoveFaceLabel(self, params):
        if len(params.data) == 0:
            return SetStringResponse("未提供要删除的标签")
        try:
            res = self.recognizer.remove_face_label(params.data)
            return SetStringResponse(res)
        except Exception as e:
            print(e)
            return SetStringResponse("删除失败")

    def pubImage(self):
        rect_image = self.rect_image
        face_locations = self.recognizer.face_locations
        face_names = self.recognizer.face_names
        image = self.recognizer.label_faces(
            rect_image, face_locations, face_names)
        msg = toImageMsg(image)
        self.pub_detections.publish(msg)

    def cbFaceMesh(self, param):
        if self.mesher_model_state == 0:
            self.mesher_model_state = 1
            from pi_ai.face_mesher import FaceMesher
            self.mesher = FaceMesher()
            self.mesher_model_state = 2
        elif self.mesher_model_state == 1:
            while self.mesher_model_state == 1:
                time.sleep(0.5)
        self.rect_image = self.getImage()
        array, self.results = self.mesher.detect(self.rect_image)
        if self.visualization:
            self.emit('pub_mesh_image')
        return array

    def pubMeshImage(self):
        rect_image = self.rect_image
        results = self.results
        self.mesher.draw_results(rect_image, results)
        msg = toImageMsg(rect_image)
        self.pub_detections.publish(msg)

    def drawAndPubAnalyzeMsg(self, rect_image, detections):
        face_locations = []
        labels = []
        for detection in detections:
            face = detection[0]
            label = detection[1]
            face_locations.append([face.left, face.top, face.right, face.bottom])
            labels.append(str(label))
        if self.visualization:
            image = self.recognizer.label_faces(
                rect_image, face_locations, labels)
            msg = toImageMsg(image)
            self.pub_detections.publish(msg)
        return self.toFaceDetectionMsg(face_locations, labels)

    def cbFaceEmotion(self, param):
        rect_image = self.getImage()
        inferences = self.analyzer.detect_emotion(rect_image.copy())
        boxes = inferences['boxes2D'] # [Box2D(182, 191, 329, 338, 0.4051913022994995, 'neutral')]
        face_locations = []
        labels = []
        for box in boxes:
            x_min, y_min, x_max, y_max = box.coordinates
            # box.score
            face_locations.append([x_min, y_min, x_max, y_max])
            labels.append(self.analyzer.emotion_map[box.class_name])
        # image = inferences['image']
        if self.visualization:
            image = self.recognizer.label_faces(
                rect_image, face_locations, labels)
            msg = toImageMsg(image)
            self.pub_detections.publish(msg)
        return self.toFaceDetectionMsg(face_locations, labels)

    def cbFaceAge(self, param):
        rect_image = self.getImage()
        detections = self.analyzer.detect_age(rect_image)
        return self.drawAndPubAnalyzeMsg(rect_image, detections)

    def cbFaceGender(self, param):
        rect_image = self.getImage()
        detections = self.analyzer.detect_gender(rect_image)
        return self.drawAndPubAnalyzeMsg(rect_image, detections)

    def cbFaceAnalyze(self, param):
        rect_image = self.getImage()
        detections = self.analyzer.analyze_face(rect_image.copy())
        return self.drawAndPubAnalyzeMsg(rect_image, detections)

    def onShutdown(self):
        self.recognizer.aligner.release()
        self.recognizer.identifier.release()
        self.analyzer.age_predictor.release()
        self.analyzer.gender_predictor.release()
        self.analyzer.face_aligner.release()
        self.shm.close()
        rospy.loginfo("[%s] Shutdown." % (self.node_name))


if __name__ == '__main__':
    rospy.init_node('face_recognizer_node', anonymous=False)
    node = FaceRecognizerNode()
    rospy.on_shutdown(node.onShutdown)
    rospy.spin()
