import json
import sys

import cv2
import numpy as np
import tflite_runtime.interpreter as tflite

# detect mode selection
MODE_CASCADE = "CASCADE"
MODE_MODEL = "MODEL"

# gender constant
DETECT_THRESHOLD = 0.5
RKG_PEAKS = 3
RKG_DIST_THRE = 22
GENDER_LIST = ['Male', 'Female']
AGE_LIST = ['Juvenile', 'Youth', 'Adult', 'Middle-aged', 'Elder']

# db path
FEATURE_DB = f'{sys.path[0]}/db/feature.npy'
NAME_DB = f'{sys.path[0]}/db/name.npy'
INFO_DB = f'{sys.path[0]}/db/info.json'

# model path
DETECT_CASCADE = f'{sys.path[0]}/model/face/haarcascade_frontalface_default.xml'
DETECT_MODEL = f'{sys.path[0]}/model/face/opencv_face_detector_uint8.pb'
DETECT_MODEL_TXT = f'{sys.path[0]}/model/face/opencv_face_detector.pbtxt'
FACE_MODEL = f'{sys.path[0]}/model/face/facenet.tflite'
GENDER_MODEL = f'{sys.path[0]}/model/gender/gender_net.caffemodel'
GENDER_MODEL_TXT = f'{sys.path[0]}/model/gender/gender_net.prototxt'
AGE_MODEL = f'{sys.path[0]}/model/age/age_net.caffemodel'
AGE_MODEL_TXT = f'{sys.path[0]}/model/age/age_net.prototxt'


class FaceRecognitionLite:

    def __init__(self, img=None, detect_mode=MODE_MODEL,
                 do_option=False, debug_mode=False):
        # base
        self.img = img
        self.detect_mode = detect_mode
        self.do_option = do_option
        self.debug_mode = debug_mode
        # face model
        interpreter = tflite.Interpreter(model_path=FACE_MODEL)
        interpreter.allocate_tensors()
        self.__face_model = interpreter
        self.__face_input = interpreter.get_input_details()
        self.__face_output = interpreter.get_output_details()
        # detect model (2 modes)
        if self.detect_mode == MODE_CASCADE:
            self.__detect_cascade = cv2.CascadeClassifier(DETECT_CASCADE)
        elif self.detect_mode == MODE_MODEL:
            self.__detect_model = cv2.dnn.readNet(
                DETECT_MODEL, DETECT_MODEL_TXT)
        # attr model (gender, age)
        self.__gender_model = cv2.dnn.readNet(GENDER_MODEL, GENDER_MODEL_TXT)
        self.__age_model = cv2.dnn.readNet(AGE_MODEL, AGE_MODEL_TXT)
        # db
        self.__feature_db = np.load(FEATURE_DB)
        self.__info_db = json.load(open(INFO_DB, 'r', encoding='utf8'))
        self.__name_db = list(self.__info_db)

    def __detect_by_cascade(self):
        gray = cv2.cvtColor(self.img, cv2.COLOR_BGR2GRAY)
        return self.__detect_cascade.detectMultiScale(gray, 1.3, 5)

    def __detect_by_model(self):
        h = self.img.shape[0]
        w = self.img.shape[1]
        blob = cv2.dnn.blobFromImage(
            self.img, 1.0, (300, 300), [
                104, 117, 123], True, False)
        self.__detect_model.setInput(blob)
        det = self.__detect_model.forward()
        boxes = []
        for i in range(det.shape[2]):
            confidence = det[0, 0, i, 2]
            if confidence > DETECT_THRESHOLD:
                x1 = int(det[0, 0, i, 3] * w)
                y1 = int(det[0, 0, i, 4] * h)
                x2 = int(det[0, 0, i, 5] * w)
                y2 = int(det[0, 0, i, 6] * h)
                boxes.append([x1, y1, x2 - x1, y2 - y1])
        return boxes

    def __rkg(self, boxes):
        img_cp = self.img.copy()
        persons = []
        genders = {'Male': 0, 'Female': 0}
        ages = {
            'Juvenile': 0,
            'Youth': 0,
            'Adult': 0,
            'Middle-aged': 0,
            'Elder': 0}
        traffic = 0
        boxes = [self.__align(box, img_cp.shape[1], img_cp.shape[0]) for box in boxes]
        for box in boxes:
            face = img_cp[box[1]:box[3], box[0]:box[2]]
            [(h, w, _)] = [face.shape]
            if h <= 0 or w <= 0 or w / h > 3 or w / h < 1 / 3:
                boxes.remove(box)
            else:
                # face attr recognition
                gender, age = self.__attr(face)
                genders[gender] += 1
                ages[age] += 1
                # face id recognition
                person = self.__person(face)
                persons.append({
                    'name': person[0],
                    'type': person[1],
                    'rectangle': [box[0], box[1], box[2] - box[0], box[3] - box[1]],
                    'gender': gender,
                    'age': age
                })
        if self.do_option:
            traffic = len(persons)
        if self.debug_mode:
            for idx in range(len(persons)):
                box = boxes[idx]
                p = persons[idx]
                txt = "{}({}, {})".format(
                    str(p['name']), str(p['gender']), str(p['age']))
                cv2.rectangle(
                    img_cp, (box[0], box[1]), (box[2], box[3]), (0, 255, 0), 2, 8)
                cv2.putText(
                    img_cp, txt, (box[0], box[1] - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.6, (0, 0, 255), 2)
            return persons, genders, ages, traffic, img_cp
        return persons, genders, ages, traffic, None

    @staticmethod
    def __align(box, w0, h0):
        return [[max(0, int(x - (h - w) / 2)), y, min(w0, int(x + (h + w) / 2)), min(h0, y + h)]
                if h > w else [x, max(0, int(y - (w - h) / 2)), min(w0, x + w), min(h0, int(y + (w + h) / 2))]
                for (x, y, w, h) in [box]][0]

    def __person(self, face):
        face_id = cv2.resize(face, (160, 160))
        image_array = np.asarray(face_id)
        normalized_image_array = (image_array.astype(np.float32) / 127.0) - 1
        self.__face_model.set_tensor(
            self.__face_input[0]['index'],
            [normalized_image_array])
        self.__face_model.invoke()
        feature = self.__face_model.get_tensor(
            self.__face_output[0]['index'])
        return self.__match(feature)

    def __match(self, feature):
        distances = np.asarray([np.linalg.norm(feature - self.__feature_db[i])
                                for i in range(np.shape(self.__feature_db)[0])])
        peak_idx = np.argpartition(distances, RKG_PEAKS)[:RKG_PEAKS]
        mean_dist = np.mean(distances[peak_idx])
        if mean_dist > RKG_DIST_THRE:
            return 'Unknown', 'Stranger'
        names = [self.__name_db[int(peak / RKG_PEAKS)] for peak in peak_idx]
        name = max(names, key=names.count)
        return name, self.__info_db[name]

    def __attr(self, face):
        face_attr = cv2.dnn.blobFromImage(
            face, 1.0, (227, 227), (78.4263377603, 87.7689143744, 114.895847746), swapRB=False)
        # gender
        self.__gender_model.setInput(face_attr)
        gender_pred = self.__gender_model.forward()
        gender = GENDER_LIST[gender_pred[0].argmax()]
        # age
        self.__age_model.setInput(face_attr)
        age_pred = self.__age_model.forward()
        age = AGE_LIST[min(4, max(0, age_pred.argmax() - 2))]
        return gender, age

    def new_frame(self, img):
        self.img = img
        return self

    def renew_db(self):
        self.__feature_db = np.load(FEATURE_DB)
        self.__info_db = json.load(open(INFO_DB, 'r', encoding='utf8'))
        self.__name_db = list(self.__info_db)

    def lite(self):
        if not isinstance(self.img, np.ndarray):
            raise RuntimeError("invalid image input")
        if self.detect_mode == MODE_CASCADE:
            boxes = self.__detect_by_cascade()
        elif self.detect_mode == MODE_MODEL:
            boxes = self.__detect_by_model()
        else:
            raise RuntimeError("invalid detect mode")
        return self.__rkg(boxes)
