import time
import cv2
import numpy as np
import os
from paz.pipelines import DetectMiniXceptionFER
from pyseeta6 import FaceAligner
from pyseeta6.age_predictor import AgePredictor
from pyseeta6.gender_predictor import GenderPredictor
from .ultra_face_inference import UltraFaceInference
from pyseeta6.common import _Face

class FaceAnalyzer:
    def __init__(self):
        self.emotion_detect = DetectMiniXceptionFER()
        self.emotion_map = {"angry": "生气", "disgust": "沮丧", "fear": "害怕", "happy": "高兴",
                        "sad": "悲伤", "surprise": "惊讶", "neutral": "中性"}
        self.gender_map = {0: "男", 1: "女"}        
        self.model_dir = os.path.expanduser(
            '~') + "/Lepi_Data/ros/face_recognizer/models"
        self.age_predictor = AgePredictor(os.path.join(self.model_dir,"age_predictor.csta"))
        self.gender_predictor = GenderPredictor(os.path.join(self.model_dir,"gender_predictor.csta"))
        self.detector = UltraFaceInference()
        # self.detector.setResize(480, 360)
        self.detector.setResize(240, 180)
        self.face_aligner = FaceAligner(os.path.join(self.model_dir,"face_landmarker_pts5.csta"))

    def detect_face(self, frame):
        if len(frame.shape) == 3:
            frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
        faces = []
        boxes, labels, probs = self.detector.detect(frame)
        for box in boxes:
            face = _Face()
            face.left, face.top, face.right, face.bottom = box
            faces.append(face)
        return faces

    def detect_emotion(self, frame):
        inferences = self.emotion_detect(frame)
        boxes = inferences['boxes2D'] # [Box2D(182, 191, 329, 338, 0.4051913022994995, 'neutral')]
        # draw = inferences['image']
        return inferences

    def detect_age(self, frame):
        faces = self.detect_face(frame)
        image_gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
        detections = []
        for face in faces:
            landmarks = self.face_aligner.align(image_gray, face)
            age = self.age_predictor.predict_age_with_crop(frame, landmarks)
            # gender = self.gender_predictor.predict_gender_with_crop(frame, landmarks)
            detections.append([face, str(age)])
        return detections

    def detect_gender(self, frame):
        faces = self.detect_face(frame)
        image_gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
        detections = []
        for face in faces:
            landmarks = self.face_aligner.align(image_gray, face)
            # age = self.age_predictor.predict_age_with_crop(frame, landmarks)
            gender = self.gender_predictor.predict_gender_with_crop(frame, landmarks)
            detections.append([face, self.gender_map[gender]])
        return detections

    def analyze_face(self, frame):
        image_gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
        inferences = self.emotion_detect(frame)
        boxes = inferences['boxes2D'] # [Box2D(182, 191, 329, 338, 0.4051913022994995, 'neutral')]
        detections = []
        for box in boxes:
            face = _Face()
            face.left, face.top, face.right, face.bottom = box.coordinates
            landmarks = self.face_aligner.align(image_gray, face)
            emotion = self.emotion_map[box.class_name]
            age = self.age_predictor.predict_age_with_crop(frame, landmarks)
            gender = self.gender_predictor.predict_gender_with_crop(frame, landmarks)
            detections.append([face, f"{self.gender_map[gender]} {age} {emotion}"])
        return detections