import argparse
import json
import os
import sys
import ipe_param_face_recongnition
from ctypes import *
import struct

sys.path.append("../../../")

from common.python.common import *
from common.python.callback_data_struct import *
from common.python.infer_process import *
from common.python import image_decoder
from common.python import image_encoder

class FaceDetectBox(ctypes.Structure):
    _fields_ = [
        ("xmin", ctypes.c_float),
        ("ymin", ctypes.c_float),
        ("xmax", ctypes.c_float),
        ("ymax", ctypes.c_float),
        ("area", ctypes.c_float),
        ("score", ctypes.c_float),
        ("id", ctypes.c_int),
        ("landmark", ctypes.ARRAY(ctypes.c_float, 10)),
    ]


class FaceDetectInfo(ctypes.Structure):
    _fields_ = [
        ("boxesNum", ctypes.c_uint16),
        ("boxes", ctypes.ARRAY(FaceDetectBox, 256)),
    ]


class bbox(ctypes.Structure):
    _fields_ = [
        ("xmin", ctypes.c_uint32),
        ("ymin", ctypes.c_uint32),
        ("xmax", ctypes.c_uint32),
        ("ymax", ctypes.c_uint32),
        ("score", ctypes.c_float),
        ("id", ctypes.c_int),
        ("label", ctypes.c_char * 64),
    ]


class Box(ctypes.Structure):
    _fields_ = [("boxesnum", ctypes.c_uint32), ("boxes", ctypes.ARRAY(bbox, 256))]


class FaceRect:
    def __init__(self, x, y, w, h):
        self.x = x
        self.y = y
        self.w = w
        self.h = h

    def ensure_even(self, n):
        if n % 2 != 0:
            return n + 1
        return n

    def make_even(self):
        self.x = self.ensure_even(self.x)
        self.y = self.ensure_even(self.y)
        self.w = self.ensure_even(self.w)
        self.h = self.ensure_even(self.h)


def extend_face_box(face_box, image_pixel_w, image_pixel_h):
    w = face_box.xmax - face_box.xmin
    h = face_box.ymax - face_box.ymin

    xmin = max(int(face_box.xmin), 0)
    ymin = max(int(face_box.ymin), 0)
    xmax = min(int(face_box.xmax), int(image_pixel_w - 1))
    ymax = min(int(face_box.ymax), int(image_pixel_h - 1))

    rect = FaceRect(xmin, ymin, xmax - xmin, ymax - ymin)
    rect.make_even()
    return rect


def check_face_box(face_box):
    w = face_box.xmax - face_box.xmin
    h = face_box.ymax - face_box.ymin

    if w <= 20 or h <= 20:
        return False

    return True


class FeatureData:
    def __init__(self, num_feature=0):
        self.numFeature = num_feature
        self.features = None

    @staticmethod
    def get_affine_transform(points_from, points_to, num_point):
        ma = np.zeros((4, 4))
        mb = np.zeros(4)
        mm = np.zeros(4)

        for i in range(num_point):
            ma[0][0] += (
                points_from[0] * points_from[0] + points_from[1] * points_from[1]
            )
            ma[0][2] += points_from[0]
            ma[0][3] += points_from[1]

            mb[0] += points_from[0] * points_to[0] + points_from[1] * points_to[1]
            mb[1] += points_from[0] * points_to[1] - points_from[1] * points_to[0]
            mb[2] += points_to[0]
            mb[3] += points_to[1]

            points_from = points_from[2:]
            points_to = points_to[2:]

        ma[1][1] = ma[0][0]
        ma[2][1] = ma[1][2] = -ma[0][3]
        ma[3][1] = ma[1][3] = ma[2][0] = ma[0][2]
        ma[2][2] = ma[3][3] = float(num_point)
        ma[3][0] = ma[0][3]

        mai = np.linalg.inv(ma)
        mm[0] = (
            mai[0][0] * mb[0]
            + mai[0][1] * mb[1]
            + mai[0][2] * mb[2]
            + mai[0][3] * mb[3]
        )
        mm[1] = (
            mai[1][0] * mb[0]
            + mai[1][1] * mb[1]
            + mai[1][2] * mb[2]
            + mai[1][3] * mb[3]
        )
        mm[2] = (
            mai[2][0] * mb[0]
            + mai[2][1] * mb[1]
            + mai[2][2] * mb[2]
            + mai[2][3] * mb[3]
        )
        mm[3] = (
            mai[3][0] * mb[0]
            + mai[3][1] * mb[1]
            + mai[3][2] * mb[2]
            + mai[3][3] * mb[3]
        )

        tm = [mm[0], -mm[1], mm[1], mm[2], mm[3], mm[0]]
        tmatrix = np.array([[mm[0], -mm[1], mm[2]], [mm[1], mm[0], mm[3]]])

        return tm, tmatrix

    @staticmethod
    def get_transform_mat(landmark, rect):
        point_landmark = np.array(
            [
                30.2946 + 8.0,
                51.6963,
                65.5318 + 8.0,
                51.5014,
                48.0252 + 8.0,
                71.7366,
                33.5493 + 8.0,
                92.3655,
                62.729904 + 8.0,
                92.2041,
            ]
        )
        num_landmarks = 5

        landmarks = np.zeros(num_landmarks * 2)
        for i in range(num_landmarks):
            landmarks[2 * i] = landmark[2 * i] - rect.x
            landmarks[2 * i + 1] = landmark[2 * i + 1] - rect.y

            landmarks[2 * i] = max(0, landmarks[2 * i])
            landmarks[2 * i + 1] = max(0, landmarks[2 * i + 1])

        tm, matrix = FeatureData.get_affine_transform(
            landmarks, point_landmark, num_landmarks
        )

        return matrix


def lyn_free_callback(args):
    for i in args:
        error_check(sdk.lyn_free(i))
    return 0


def return_buffers(callback_data) -> int:
    for buffer in callback_data[1]:
        callback_data[0].push(buffer)
    return 0


def face_recog_callback(args):
    pDevCosineBuf, pHostCosineBuf_c, faceIndexVec,\
        pHostBoxesInfo, face_lib_labels = args
    faceCount = len(faceIndexVec)
    int16_array = (ctypes.c_int16 * faceCount).from_address(pHostCosineBuf_c)
    float_array = (ctypes.c_float * faceCount).from_address(pHostCosineBuf_c + ctypes.sizeof(ctypes.c_int16) * faceCount)
    for i in range(faceCount):
        face_index = faceIndexVec[i]
        idx = int(float_array[i])
        score = half2float(int16_array[i])
        if idx >= 0 and idx < len(face_lib_labels):
            name = "unknown"
            if score > 5:
                name = face_lib_labels[idx]
                print(f"{name} is matched")
            pHostBoxesInfo.boxes[face_index].label = name.encode("utf-8")
        else:
            print(f"invalid consine execute result idx: {idx}")

    sdk.lyn_free(pDevCosineBuf)
    return 0

def main():
    parser = argparse.ArgumentParser(description="Face Detection and Recognition")

    parser.add_argument(
        "input_image",
        metavar="input_image",
        type=str,
        default="../../../data/face_recognition_with_image_sample/test.jpeg",
        help="input jpeg file path",
    )
    parser.add_argument(
        "output_image",
        metavar="output_image",
        type=str,
        default="../../../output.jpeg",
        help="output jpeg file path",
    )
    parser.add_argument(
        "device_id", metavar="device_id", type=int, default=0, help="lynxi device id"
    )
    parser.add_argument(
        "--face_lib_config",
        metavar="face_lib_config",
        type=str,
        default="../../../data/face_recognition_with_image_sample/face_config_py.json",
        help="input face lib config file",
    )
    parser.add_argument(
        "--face_detect_model",
        metavar="face_detect_model",
        type=str,
        default="../../../model/scrfd_500m_b1_h640_w640_c3_iuint8_ofloat16_0000/Net_0",
        help="face detect model path",
    )
    parser.add_argument(
        "--face_detect_post_plugin",
        metavar="face_detect_post_plugin",
        type=str,
        default="../../../common/plugin/postprocess/cross_compiled_lib/libScrfdPostPlugin.so",
        help="face detect post process plugin path",
    )
    parser.add_argument(
        "--face_feature_model",
        metavar="face_feature_model",
        type=str,
        default="../../../model/w600k_mbf_b1_h112_w112_c3_iuint8_ofloat16_0000/Net_0",
        help="face detect model path",
    )
    parser.add_argument(
        "--osd_plugin",
        metavar="osd_plugin",
        type=str,
        default="../../../common/plugin/osd/lib/libOsdPlugin.so",
        help="osd plugin path",
    )

    args = parser.parse_args()

    input_file = args.input_image
    output_file = args.output_image
    device_id = args.device_id
    face_lib_config = args.face_lib_config
    face_detect_model = args.face_detect_model
    face_detect_post_plugin_path = args.face_detect_post_plugin
    face_feature_model = args.face_feature_model
    osd_plugin_path = args.osd_plugin

    if not os.path.exists(input_file):
        raise ("error: input jpeg file does not exist.")

    if not is_valid_device(device_id):
        raise ("error: device id is invalid.")

    if not os.path.exists(face_lib_config):
        raise ("error: face lib config file does not exist.")

    if not os.path.exists(face_detect_model):
        raise ("error: face detect model does not exist.")

    if not os.path.exists(face_detect_post_plugin_path):
        raise ("error: face detect post process plugin does not exist.")

    if not os.path.exists(face_feature_model):
        raise ("error: face feature model does not exist.")

    if not os.path.exists(osd_plugin_path):
        raise ("error: osd plugin does not exist.")

    with open(face_lib_config) as f:
        faces_config = json.load(f)
        face_lib_labels = faces_config["names"]
        features_path = faces_config["features"]

    # 2. 创建 context 与 stream
    ctx, ret = sdk.lyn_create_context(device_id)
    error_check(ret, "create context")
    ret = sdk.lyn_register_error_handler(default_stream_error_handle)
    error_check(ret, "lyn_register_error_handler")
    stream, ret = sdk.lyn_create_stream()
    error_check(ret, "lyn_create_stream")
    error_check(sdk.lyn_cosine_init(), "lyn_cosine_init")


    # 3. 初始化解码图片类
    print("image decode")

    image_dec = image_decoder.ImageDecoder()
    image_dec.init(input_file, False)
    imgInfo = image_dec.get_image_dec_info()

    # 4. 同步解码图片到 Device 侧
    decodeImg = sdk.lyn_codec_buf_t()
    decodeImg.size = imgInfo.output.predict_buf_size
    p_decode_buf_out, ret = sdk.lyn_malloc(decodeImg.size)
    error_check(ret)
    decodeImg.data = p_decode_buf_out
    image_dec.decode_image_to_device(stream, decodeImg.data)

    # 5. 加载模型
    faceDetectModelInfo = ModelInfo(face_detect_model)
    faceFeatureModelInfo = ModelInfo(face_feature_model)

    # 加载lynxi plugin后处理和osd
    faceDetectPlugin, ret = sdk.lyn_plugin_register(face_detect_post_plugin_path)
    common.error_check(ret, "lyn_plugin_register")
    osdPlugin, ret = sdk.lyn_plugin_register(osd_plugin_path)
    common.error_check(ret, "lyn_plugin_register")

    # 读取人脸库信息
    face_lib_features_host_np = np.fromfile(features_path, dtype=np.uint8)
    face_lib_features_host = sdk.lyn_numpy_to_ptr(face_lib_features_host_np)
    face_lib_features, ret = sdk.lyn_malloc(len(face_lib_features_host_np))
    error_check(ret)
    error_check(
        sdk.lyn_memcpy(
            face_lib_features,
            face_lib_features_host,
            len(face_lib_features_host_np),
            sdk.lyn_memcpy_dir_t.ClientToServer,
        )
    )

    # 6. 人脸检测IPE处理
    print("detect faces")
    pDetectIpeBuf, ret = sdk.lyn_malloc(faceDetectModelInfo.input_size)
    error_check(ret)
    ipeScrfd = ipe_param_face_recongnition.IpeParamScrfd(
        faceDetectModelInfo.width, faceDetectModelInfo.height
    )
    ipeScrfd.set_img_info(
        imgInfo.output.width,
        imgInfo.output.height,
        sdk.lyn_pixel_format_t.LYN_PIX_FMT_NV12,
    )
    ipeScrfd.calc_param(stream, decodeImg.data, pDetectIpeBuf)

    # 7. 调用 APU 推理接口
    pDevDetectApuBuf, ret = sdk.lyn_malloc(faceDetectModelInfo.output_size)
    error_check(ret)
    error_check(
        sdk.lyn_execute_model_async(
            stream,
            faceDetectModelInfo.model,
            pDetectIpeBuf,
            pDevDetectApuBuf,
            faceDetectModelInfo.batch_size,
        )
    )

    # 8. 加载人脸检测post Plugin, 进行模型后处理
    pythonapi.PyCapsule_GetPointer.restype = c_void_p
    pythonapi.PyCapsule_GetPointer.argtypes = [py_object, c_char_p]

    pDevDetectApuBuf_ptr = pythonapi.PyCapsule_GetPointer(pDevDetectApuBuf, None)

    pDevDetectInfo, ret = sdk.lyn_malloc(ctypes.sizeof(FaceDetectInfo))
    error_check(ret)
    pDevDetectInfo_ptr = pythonapi.PyCapsule_GetPointer(pDevDetectInfo, None)

    post_info = struct.pack(
        "4i2f2P",
        faceDetectModelInfo.height,
        faceDetectModelInfo.width,
        imgInfo.output.height,
        imgInfo.output.width,
        0.25,
        0.45,
        pDevDetectApuBuf_ptr,
        pDevDetectInfo_ptr,
    )

    error_check(
        sdk.lyn_plugin_run_async(
            stream,
            faceDetectPlugin,
            "lynFacePostProcess",
            post_info,
            len(post_info),
        )
    )

    face_detect_size = ctypes.sizeof(FaceDetectInfo)
    pHostDetectInfo_np = np.zeros(face_detect_size, dtype=np.uint8)
    pHostDetectInfo = sdk.lyn_numpy_to_ptr(pHostDetectInfo_np)
    error_check(
        sdk.lyn_memcpy_async(
            stream,
            pHostDetectInfo,
            pDevDetectInfo,
            face_detect_size,
            sdk.lyn_memcpy_dir_t.ServerToClient,
        )
    )
    error_check(sdk.lyn_synchronize_stream(stream))

    # 分析人脸特征
    pHostDetectInfo_c = pythonapi.PyCapsule_GetPointer(pHostDetectInfo, None)
    c_face_info = ctypes.cast(
        pHostDetectInfo_c, ctypes.POINTER(FaceDetectInfo)
    ).contents

    faceIndexVec = []
    if c_face_info.boxesNum > 0:
        print("get face features")

        pDevFeatureApuBuf, ret = sdk.lyn_malloc(
            faceFeatureModelInfo.output_size * c_face_info.boxesNum
        )
        error_check(ret)

        pHostBoxesInfo = Box()
        pHostBoxesInfo.boxesnum = c_face_info.boxesNum
    else:
        print("no face detected")

    for faceIndex in range(c_face_info.boxesNum):
        box = c_face_info.boxes[faceIndex]
        faceRect = extend_face_box(box, imgInfo.output.width, imgInfo.output.height)
        if check_face_box(box):
            ipeCrop = ipe_param_face_recongnition.IpeParamCrop(
                faceRect.x, faceRect.y, faceRect.w, faceRect.h
            )
            pFaceCropIpeBufOut, ret = sdk.lyn_malloc(faceRect.w * faceRect.h * 3)
            error_check(ret)
            ipeCrop.set_img_info(
                imgInfo.output.width,
                imgInfo.output.height,
                sdk.lyn_pixel_format_t.LYN_PIX_FMT_NV12,
            )
            ipeCrop.calc_param(stream, decodeImg.data, pFaceCropIpeBufOut)

            # 特征模型预处理
            transformMat = np.zeros((2, 3), dtype=float)
            transformMat = FeatureData.get_transform_mat(box.landmark, faceRect)
            ipeAffine = ipe_param_face_recongnition.IpeParamAffine(
                faceFeatureModelInfo.width,
                faceFeatureModelInfo.height,
                transformMat,
                0,
                0,
                0,
            )
            pFeatureIpeBuf, ret = sdk.lyn_malloc(faceFeatureModelInfo.input_size)
            error_check(ret)
            ipeAffine.set_img_info(
                faceRect.w, faceRect.h, sdk.lyn_pixel_format_t.LYN_PIX_FMT_RGB24
            )
            ipeAffine.calc_param(stream, pFaceCropIpeBufOut, pFeatureIpeBuf)

            pDevFeatureApuBuf_seek = sdk.lyn_addr_seek(
                pDevFeatureApuBuf, faceFeatureModelInfo.output_size * len(faceIndexVec)
            )

            error_check(
                sdk.lyn_execute_model_async(
                    stream,
                    faceFeatureModelInfo.model,
                    pFeatureIpeBuf,
                    pDevFeatureApuBuf_seek,
                    faceFeatureModelInfo.batch_size,
                )
            )
            faceIndexVec.append(faceIndex)
            error_check(
                sdk.lyn_stream_add_callback(
                    stream,
                    lyn_free_callback,
                    [pFaceCropIpeBufOut, pFeatureIpeBuf],
                )
            )

        pHostBoxesInfo.boxes[faceIndex].xmin = faceRect.x
        pHostBoxesInfo.boxes[faceIndex].ymin = faceRect.y
        pHostBoxesInfo.boxes[faceIndex].xmax = faceRect.x + faceRect.w
        pHostBoxesInfo.boxes[faceIndex].ymax = faceRect.y + faceRect.h

    # 人脸比对
    if len(faceIndexVec) > 0:
        print("match faces")
        pDevCosineBuf, ret = sdk.lyn_malloc(
            len(faceIndexVec)
            * (ctypes.sizeof(ctypes.c_int16) + ctypes.sizeof(ctypes.c_float))
        )

        error_check(ret)

        ret = sdk.lyn_execute_cosine_async(
            stream,
            sdk.lyn_trans_type_t.LYN_TRANS,
            len(faceIndexVec),
            len(face_lib_labels),
            1,
            pDevFeatureApuBuf,
            sdk.lyn_data_type_t.DT_FLOAT16,
            face_lib_features,
            sdk.lyn_data_type_t.DT_FLOAT16,
            pDevCosineBuf,
        )
        error_check(ret)

        pHostCosineBuf_np = np.zeros(
            len(faceIndexVec)
            * (ctypes.sizeof(ctypes.c_int16) + ctypes.sizeof(ctypes.c_float))
            , dtype=np.uint8)
        pHostCosineBuf = sdk.lyn_numpy_to_ptr(pHostCosineBuf_np)
        error_check(
            sdk.lyn_memcpy_async(
                stream,
                pHostCosineBuf,
                pDevCosineBuf,
                len(faceIndexVec)
                * (ctypes.sizeof(ctypes.c_int16) + ctypes.sizeof(ctypes.c_float)),
                sdk.lyn_memcpy_dir_t.ServerToClient,
            )
        )

        pHostCosineBuf_c= pythonapi.PyCapsule_GetPointer(pHostCosineBuf , None)
        error_check(
            sdk.lyn_stream_add_callback(
                stream,
                face_recog_callback,
                [pDevCosineBuf, pHostCosineBuf_c, faceIndexVec,
                 pHostBoxesInfo, face_lib_labels],
            )
        )

        sdk.lyn_synchronize_stream(stream)


    # 9. 加载osd Plugin, 将结果叠加到图像上
    if pHostBoxesInfo.boxesnum > 0:
        pHostBoxesInfo_np = np.frombuffer(
            pHostBoxesInfo,
            dtype=np.uint8,
            count=ctypes.sizeof(Box) // np.dtype(np.uint8).itemsize,
        )
        pHostBoxesInfo_ptr = sdk.lyn_numpy_to_ptr(pHostBoxesInfo_np)
        pDevBoxesInfo, ret = sdk.lyn_malloc(ctypes.sizeof(Box))
        error_check(
            sdk.lyn_memcpy_async(
                stream,
                pDevBoxesInfo,
                pHostBoxesInfo_ptr,
                ctypes.sizeof(Box),
                sdk.lyn_memcpy_dir_t.ClientToServer,
            )
        )

        pDevBoxesInfo_ptr = pythonapi.PyCapsule_GetPointer(pDevBoxesInfo, None)
        decodeImg_data_ptr = pythonapi.PyCapsule_GetPointer(decodeImg.data, None)
        osd_para = struct.pack(
            "1P3i1P4i",
            pDevBoxesInfo_ptr,
            imgInfo.output.width,
            imgInfo.output.height,
            sdk.lyn_pixel_format_t.LYN_PIX_FMT_NV12,
            decodeImg_data_ptr,
            24,
            4,
            2,
            4,
        )
        error_check(
            sdk.lyn_plugin_run_async(
                stream, osdPlugin, "lynDrawBoxAndText", osd_para, len(osd_para)
            )
        )
        error_check(
            sdk.lyn_stream_add_callback(
                stream,
                lyn_free_callback,
                [pDevBoxesInfo],
            )
        )

    # 9. 进行图片编码
    print("save new image")
    encoder = image_encoder.ImageEncoder()
    encoder.encode_image_and_save(stream, output_file, decodeImg, imgInfo)

    #  10. 销毁资源
    ipeScrfd.destory()
    ipeCrop.destory()
    ipeAffine.destory()
    error_check(sdk.lyn_synchronize_stream(stream))
    error_check(sdk.lyn_destroy_stream(stream))
    error_check(sdk.lyn_plugin_unregister(faceDetectPlugin))
    error_check(sdk.lyn_plugin_unregister(osdPlugin))

    error_check(sdk.lyn_free(decodeImg.data))
    error_check(sdk.lyn_free(face_lib_features))
    error_check(sdk.lyn_free(pDetectIpeBuf))
    error_check(sdk.lyn_free(pDevDetectApuBuf))
    error_check(sdk.lyn_free(pDevDetectInfo))

    if pHostBoxesInfo.boxesnum > 0:
        error_check(sdk.lyn_free(pDevFeatureApuBuf))

    faceDetectModelInfo.unload_model()
    faceFeatureModelInfo.unload_model()
    error_check(sdk.lyn_cosine_uninit())

    error_check(sdk.lyn_destroy_context(ctx))
    print("process over")


if __name__ == "__main__":
    main()
