# coding=utf-8
import numpy as np
import ctypes
import cv2
import numpy.ctypeslib as npct
from skimage import transform as trans
from ctypes import *


class FaceNative():
    def __init__(self, modulePath, modelPath):
        path = ctypes.c_char_p(modelPath.encode())
        self.model = ctypes.CDLL(modulePath.encode())
        self.model.InitFaceSession.restype = c_long
        self.session = self.model.InitFaceSession(path)
        self.model.MtcnnFaceDetector.argtypes = [ctypes.c_long, ctypes.c_char_p, ctypes.c_int, ctypes.c_int,
                                                 ctypes.c_int, POINTER(POINTER(c_float)), ctypes.POINTER(ctypes.c_int)]
        self.model.ExtractorFeatures.argtypes = [ctypes.c_long, ctypes.c_char_p, ctypes.c_int, ctypes.c_int]
        self.model.ExtractorFeatures.restype = ctypes.POINTER(ctypes.c_float)
        self.model.ExtractorFeatures_direct.argtypes = [ctypes.c_long, ctypes.c_char_p, ctypes.c_int, ctypes.c_int]
        self.model.ExtractorFeatures_direct.restype = ctypes.POINTER(ctypes.c_float)
        self.model.ReleaseFaceSession.argtypes = [ctypes.c_long]
        self.model.FreeFloat.argtypes = [POINTER(POINTER(c_float))]

    def MtcnnFaceDetctorNative(self, imageFace, minSize):
        # print("test")

        mem = ctypes.POINTER(ctypes.c_float)()
        size = c_int(0)
        self.model.MtcnnFaceDetector(self.session, imageFace.tostring(), imageFace.shape[1], imageFace.shape[0],
                                     minSize, ctypes.byref(mem), ctypes.byref(size))
        res = np.ctypeslib.as_array((ctypes.c_float * 15 * size.value).from_address(ctypes.addressof(mem.contents)))
        numpyed_bak = res.reshape(size.value, 15).copy()
        self.model.FreeFloat(ctypes.byref(mem))
        return numpyed_bak

    def ExtractorFeaturesNative_direct(self, image):
        mem = self.model.ExtractorFeatures_direct(self.session, image.tostring(), image.shape[1], image.shape[0])
        res = np.ctypeslib.as_array((ctypes.c_float * 512).from_address(ctypes.addressof(mem.contents)))
        res_norm = res / np.sqrt(np.dot(res, res.T))
        return res_norm

    def ExtractorFeaturesNative(self, image):
        mem = self.model.ExtractorFeatures(self.session, image.tostring(), image.shape[1], image.shape[0])
        res = np.ctypeslib.as_array((ctypes.c_float * 512).from_address(ctypes.addressof(mem.contents)))
        res_norm = res / np.sqrt(np.dot(res, res.T))
        return res_norm

    def transformed(self, image, l_pts):
        list_m = []
        for pts in l_pts:
            landmark = pts[5:]
            src = np.array([
                [30.2946, 51.6963],
                [65.5318, 51.5014],
                [48.0252, 71.7366],
                [33.5493, 92.3655],
                [62.7299, 92.2041]], dtype=np.float32)
            src[:, 0] += 8.0
            dst = landmark.astype(np.float32).reshape((5, 2))
            tform = trans.SimilarityTransform()
            # print dst,src
            tform.estimate(dst, src)
            M = tform.params[0:2, :]
            warped = cv2.warpAffine(image, M, (112, 112), borderValue=0.0)
            list_m.append(warped)
        return list_m

    def transformed_single(self, image, l_pts):
        list_m = []
        landmark = l_pts[5:]
        src = np.array([
            [30.2946, 51.6963],
            [65.5318, 51.5014],
            [48.0252, 71.7366],
            [33.5493, 92.3655],
            [62.7299, 92.2041]], dtype=np.float32)
        src*=2
        src[:, 0] += 2*8.0
        dst = landmark.astype(np.float32).reshape((5, 2))
        tform = trans.SimilarityTransform()
        # print dst,src
        tform.estimate(dst, src)
        M = tform.params[0:2, :]
        warped = cv2.warpAffine(image, M, (224, 224), borderValue=0.0)
        return warped

    def transformed_single_M(self, image, l_pts):
        list_m = []
        landmark = l_pts[5:]
        src = np.array([
            [30.2946, 51.6963],
            [65.5318, 51.5014],
            [48.0252, 71.7366],
            [33.5493, 92.3655],
            [62.7299, 92.2041]], dtype=np.float32)
        src*=2
        src[:, 0] += 2*8.0
        dst = landmark.astype(np.float32).reshape((5, 2))
        tform = trans.SimilarityTransform()
        # print dst,src
        tform.estimate(dst, src)
        M = tform.params[0:2, :]
        warped = cv2.warpAffine(image, M, (224, 224), borderValue=0.0)
        return warped, M

    def __del__(self):
        self.model.ReleaseFaceSession(self.session)
