#include <android/bitmap.h>
#include <android/log.h>

#include <sys/system_properties.h> // for __system_property_get

#include <jni.h>

#include <math.h>

#include "platform.h"
#if NCNN_OPENCV
#include "opencv.h"
#else
#include <opencv2/core/core.hpp>
#endif

#include "FaceTrackDefine.h"
#include "utils.h"
#include "YTFaceTrackPro.h"
#include <sys/time.h>
#include "YTIlluminationScore.h"


static jfieldID getHandleField(JNIEnv *env, jobject obj)
{

    jclass c = env->GetObjectClass(obj);
    // J is the type signature for long:
    return env->GetFieldID(c, "nativePtr", "J");
}

template <typename T>
T *getHandle(JNIEnv *env, jobject obj)
{
    jlong handle = env->GetLongField(obj, getHandleField(env, obj));
    return reinterpret_cast<T *>(handle);
}

template <typename T>
void setHandle(JNIEnv *env, jobject obj, T *t)
{
    jlong handle = reinterpret_cast<jlong>(t);
    env->SetLongField(obj, getHandleField(env, obj), handle);
}


extern "C" {

jclass clsFaceStatus;
jmethodID constructorFaceStatus;
jfieldID xysId;
jfieldID pitchId;
jfieldID yawId;
jfieldID rollId;
jfieldID illumination_scoreId;

JNIEXPORT jboolean JNICALL Java_com_tencent_youtufacetrack_YoutuFaceTrack_nativeInit(JNIEnv* env, jobject thiz)
{
    clsFaceStatus = static_cast<jclass>(env->NewGlobalRef(env->FindClass("com/tencent/youtufacetrack/YoutuFaceTrack$FaceStatus")));
    constructorFaceStatus = env->GetMethodID(clsFaceStatus, "<init>", "()V");
    xysId = env->GetFieldID(clsFaceStatus, "xys", "[F");
    pitchId = env->GetFieldID(clsFaceStatus, "pitch", "F");
    yawId = env->GetFieldID(clsFaceStatus, "yaw", "F");
    rollId = env->GetFieldID(clsFaceStatus, "roll", "F");
    illumination_scoreId = env->GetFieldID(clsFaceStatus, "illumination_score", "D");

    return JNI_TRUE;
}

JNIEXPORT void JNICALL Java_com_tencent_youtufacetrack_YoutuFaceTrack_NativeConstructor(JNIEnv* env, jobject thiz)
{
    FaceTrack* inst = new FaceTrack;
    setHandle(env, thiz, inst);
}

JNIEXPORT void JNICALL Java_com_tencent_youtufacetrack_YoutuFaceTrack_NativeDestructor(JNIEnv* env, jobject thiz)
{
    FaceTrack* inst = getHandle<FaceTrack>(env, thiz);
    delete inst;
    setHandle(env, thiz, (FaceTrack*)0);
}

static std::vector<unsigned char> ufdmtcc_bin;
static std::vector<unsigned char> ufat_bin;

// public native boolean Init(byte[] ufdmtccbin, byte[] ufatbin);
JNIEXPORT jboolean JNICALL Java_com_tencent_youtufacetrack_YoutuFaceTrack_Init(JNIEnv* env, jobject thiz, jbyteArray ufdmtccbin, jbyteArray ufatbin)
{
  // init face detection
     {
         int len = env->GetArrayLength(ufdmtccbin);
          __android_log_print(ANDROID_LOG_DEBUG, "YoutuFaceTrack", "ufdmtccbin len %i", len);
         ufdmtcc_bin.resize(len);
         env->GetByteArrayRegion(ufdmtccbin, 0, len, (jbyte*)ufdmtcc_bin.data());
         int ret = ytfacedetect::yt_facedetection_init(ufdmtcc_bin.data());
         __android_log_print(ANDROID_LOG_DEBUG, "YoutuFaceTrack", "load ufdmtcc %d %d", ret, len);
     }

     // init face alignment
     {
         int len = env->GetArrayLength(ufatbin);
         ufat_bin.resize(len);
         env->GetByteArrayRegion(ufatbin, 0, len, (jbyte*)ufat_bin.data());
         int ret = ytfacetrack::yt_facealignment_init(ufat_bin.data());
         __android_log_print(ANDROID_LOG_DEBUG, "YoutuFaceTrack", "load ufat %d %d", ret, len);
     }

     return JNI_TRUE;
}

JNIEXPORT jobject JNICALL Java_com_tencent_youtufacetrack_YoutuFaceTrack_DoDetectionProcessYUV(JNIEnv* env, jobject thiz, jbyteArray yuv420sp, jint width, jint height)
{
    FaceTrack* inst = getHandle<FaceTrack>(env, thiz);

    cv::Mat rgb(height, width, CV_8UC3);
    jbyte* indata = env->GetByteArrayElements(yuv420sp, 0);
    yuv420sp_to_rgb_fast_asm((const unsigned char*)indata, width, height, rgb.data);
    env->ReleaseByteArrayElements(yuv420sp, indata, 0);

    std::string device_model_name = get_device_model_name();
    if (device_model_name == "Nexus 6" || device_model_name == "Nexus 6P")
    {
        // nexus 6/6p rotate back 90 + mirror
        transform_by_orientation_tag(rgb, 5);
    }
    else
    {
        // all other device rotate back 270 + mirror
        transform_by_orientation_tag(rgb, 7);
    }
    __android_log_print(ANDROID_LOG_DEBUG, "pancheng", "000");
    // detect faceshape
    std::vector<TC_FaceShape> faceshapes;
    int r0 = inst->detect_faceshape(rgb, faceshapes);
    if (r0 != 0 || faceshapes.empty())
    {
        // no face
        return NULL;
    }
    __android_log_print(ANDROID_LOG_DEBUG, "pancheng", "111");
    // return
    jobjectArray jfaceshapeArray = env->NewObjectArray(faceshapes.size(), clsFaceStatus, NULL);
    __android_log_print(ANDROID_LOG_DEBUG, "pancheng", "faceshapes.size() : %i",faceshapes.size());
    for (int i=0; i<faceshapes.size(); i++)
    {
        const TC_FaceShape& faceshape = faceshapes[i];
        jfloatArray jArray = env->NewFloatArray(90 * 2);
        env->SetFloatArrayRegion(jArray, 0, 90 * 2, (float*)&faceshape);
        jobject newObject = env->NewObject(clsFaceStatus, constructorFaceStatus);
        env->SetObjectField(newObject, xysId, jArray);

        env->DeleteLocalRef(jArray);

        // pose estimate
        float pitch = 0.f;
        float yaw = 0.f;
        float roll = 0.f;
        ytposeestimate::yt_pose_estimate(faceshape, pitch, yaw, roll);

        int illScore = ytsdk::yt_get_face_illumination_score(rgb, faceshape);

        env->SetFloatField(newObject, pitchId, pitch);
        env->SetFloatField(newObject, yawId, yaw);
        env->SetFloatField(newObject, rollId, roll);
        env->SetDoubleField(newObject, illumination_scoreId, illScore);
        __android_log_print(ANDROID_LOG_DEBUG, "pancheng", "illScore ： %i", illScore);

        env->SetObjectArrayElement(jfaceshapeArray, i, newObject);
        env->DeleteLocalRef(newObject);
    }
    return jfaceshapeArray;
}







JNIEXPORT jobject JNICALL Java_com_tencent_youtufacetrack_YoutuFaceTrack_DoDetectionProcessRGBA(JNIEnv* env, jobject thiz, jbyteArray rgba, jint width, jint height)
{
    FaceTrack* inst = getHandle<FaceTrack>(env, thiz);

    cv::Mat rgb(height, width, CV_8UC3);

    jbyte* indata = env->GetByteArrayElements(rgba, 0);

    bench_start();
    {
        cv::Mat rgbamat(height, width, CV_8UC4, (unsigned char*)indata);
        rgba2rgb(rgbamat, rgb);
    }
    bench_end("rgba2rgb");

    env->ReleaseByteArrayElements(rgba, indata, 0);

    // detect faceshape
    std::vector<TC_FaceShape> faceshapes;
    int r0 = inst->detect_faceshape(rgb, faceshapes);
    if (r0 != 0 || faceshapes.empty())
    {
        // no face
        return NULL;
    }

    // return
    jobjectArray jfaceshapeArray = env->NewObjectArray(faceshapes.size(), clsFaceStatus, NULL);

    bench_start();
    for (int i=0; i<faceshapes.size(); i++)
    {
        const TC_FaceShape& faceshape = faceshapes[i];

        jfloatArray jArray = env->NewFloatArray(90 * 2);
        env->SetFloatArrayRegion(jArray, 0, 90 * 2, (float*)&faceshape);

        jobject newObject = env->NewObject(clsFaceStatus, constructorFaceStatus);
        env->SetObjectField(newObject, xysId, jArray);

        env->DeleteLocalRef(jArray);

        // pose estimate
        float pitch = 0.f;
        float yaw = 0.f;
        float roll = 0.f;
        ytposeestimate::yt_pose_estimate(faceshape, pitch, yaw, roll);

        env->SetFloatField(newObject, pitchId, pitch);
        env->SetFloatField(newObject, yawId, yaw);
        env->SetFloatField(newObject, rollId, roll);

        // illumination_score
        int illScore = ytsdk::yt_get_face_illumination_score(rgb, faceshape);

        env->SetIntField(newObject, illumination_scoreId, illScore);
        env->SetObjectArrayElement(jfaceshapeArray, i, newObject);

        env->DeleteLocalRef(newObject);
    }
    bench_end("pose_estimate and rest");

    return jfaceshapeArray;
}



}
