// sed -i "s/com_iim_caffe_//g" extraction_java_api.cpp
// javac ExtractFeatureService.java
// export LD_LIBRARY_PATH=/home/iim/ego/build/registration_online
// export LD_LIBRARY_PATH=/var/darknet/registration_online/build/:/var/darknet/registration_online/:/opt/ego/cudnn-v7/

#include <jni.h>

#include <mutex>
#include <opencv2/opencv.hpp>
#include <opencv/cv.hpp>
#include <pthread.h>
#include <thread>

#include <arpa/inet.h>
#include <sys/ioctl.h>
#include <net/if.h>
#include <unistd.h>

#include "freetype.hpp"
#include "cnn_function.hpp"
#include "util.h"

#ifdef __cplusplus
extern "C" {
#endif
    JNIEXPORT jboolean JNICALL Java_com_iim_caffe_LoadLibraryModule_play_1audio(JNIEnv *env, jobject obj, jobjectArray user_name);
    JNIEXPORT jboolean JNICALL Java_com_iim_caffe_LoadLibraryModule_play_1warnig(JNIEnv *env, jobject obj, jstring content);
    JNIEXPORT jint JNICALL Java_com_iim_caffe_LoadLibraryModule_camera_1face(JNIEnv *env, jobject obj, jobjectArray feature_save);
    JNIEXPORT jboolean JNICALL Java_com_iim_caffe_LoadLibraryModule_recognition_1start(JNIEnv *env, jobject obj);
    JNIEXPORT jint JNICALL Java_com_iim_caffe_LoadLibraryModule_recognition_1face(
        JNIEnv *env, jobject obj, jbyteArray image_data, jobjectArray feature_save, jlongArray code_ret);
    JNIEXPORT jbyteArray JNICALL Java_com_iim_caffe_LoadLibraryModule_people_1face_1detection(
        JNIEnv *env, jobject obj, jbyteArray image_data, jintArray face_region, jfloatArray feature_save,
        jint model_version, jlongArray code_ret);
#ifdef __cplusplus
}
#endif

#define HANDLE_THREAD_INDEX_OFFSET 5000000000
#define HANDLE_THREAD_MAX_NUM 3
#define IMAGE_SIZE_BYTE 4
#define MAX_FEATURE_NUM 10

std::mutex extraction_lock;
bool have_init = false;
bool have_init_camera = false;
static int image_width = 1280;
static int image_height = 960;
static int fps =  15;
std::mutex read_image_lock;
bool have_new_recognition_frame = false;
std::mutex read_rtsp_image_lock;
bool have_new_rtsp_frame = false;
std::mutex have_detect_lock;
bool have_detect = false;
cv::Mat current_grab_image, current_recognition_image, current_rtsp_image;
std::vector<std::string> current_user_name;
std::vector<int> current_user_spoofing;
std::vector<std::string> rtsp_user_name;
std::vector<int> rtsp_user_spoofing;
int current_feature_index = 0;
std::vector<cv::Rect> rois_global;
std::vector<cv::Rect> rois_rtsp;
//const static cv::Scalar default_color = cv::Scalar(0x3d, 0xc2, 0xff);
//const static cv::Scalar default_color = cv::Scalar(0xff, 0x00, 0x00);
const static cv::Scalar default_color = cv::Scalar(0x00, 0x00, 0xff);

cv::VideoCapture capture;
int socket_desc = 0;
bool listen_thread_stop = false;
bool grab_image_thread_stop = false;
pthread_t stream_thread_pid;
pthread_t grab_image_thread_pid;
pthread_t handle_thread_id[HANDLE_THREAD_MAX_NUM];

cv::Ptr<cv::freetype::FreeType2> ft2;

void *send_stream_thread(void *arg)
{
    int client_sock = *((long long *)arg) % HANDLE_THREAD_INDEX_OFFSET;
    int handle_thread_index = *((long long *)arg) / HANDLE_THREAD_INDEX_OFFSET;
    struct timeval time_out = {2, 0};
    socklen_t time_out_length = sizeof(time_out);
    setsockopt(client_sock, SOL_SOCKET, SO_RCVTIMEO, &time_out, time_out_length);

    char client_message[128] = {0};
    int read_size;
    unsigned int image_max_size = 3 * 1024 * 1024 + IMAGE_SIZE_BYTE;
    uchar *img_encode_buf = (uchar *)malloc(image_max_size * sizeof(uchar));
    cv::Mat img;
    std::vector<uchar> img_encode;
    bool start_play = true;
    double send_time = 0;
    int send_frame = 0;
    while((read_size = read(client_sock , client_message , sizeof(client_message))) > 0 ) {
        if(listen_thread_stop) break;
        if(strncmp(client_message, "STOP_PLAY", 9) == 0){
            std::cout << "STOP_PLAY\n" << std::endl;
            start_play = false;
            close(client_sock);
            break;
        } else if(strncmp(client_message, "START_PLAY", 10) == 0){
            send_time = getMsecOfNow();
            std::cout << "START_PLAY\n" << std::endl;
            start_play = true;
            usleep(200000);
            send(client_sock, "READY_OK", 8, 0);
        } else if(start_play && strncmp(client_message, "NEXT_FRAME", 10) == 0){
            double now_time = getMsecOfNow();
            bool print_log = false;
            if(now_time - send_time > 20000){
                std::cout << "threader: " << handle_thread_id[handle_thread_index] << ", NEXT_FRAME, fps: "
                          << send_frame * 1000 / (now_time - send_time) << std::endl;
                print_log = true;
                send_frame = 0;
                send_time = now_time;
            }
            send_frame += 1;
            while(true){
                if(listen_thread_stop) break;
                {
                    std::lock_guard<std::mutex> read_rtsp_image_lock_ws(read_rtsp_image_lock, std::adopt_lock);
                    if(have_new_rtsp_frame) {
                        have_new_rtsp_frame = false;
                        img = current_rtsp_image.clone();
                        int thickness = -1;
                        int linestyle = 4;
                        for(unsigned int m = 0; m < rois_rtsp.size() && m < MAX_FEATURE_NUM; m++){
                            cv::rectangle(img, cvPoint(rois_rtsp[m].x, rois_rtsp[m].y),
                                          cvPoint(rois_rtsp[m].x + rois_rtsp[m].width, rois_rtsp[m].y + rois_rtsp[m].height),
                                          cvScalar(255,0,0), 4);
                            if(rtsp_user_name[m] != ""){
                                cv::Rect roi = rois_rtsp[m];
                                cv::Point textOrg(roi.x + roi.width, roi.y + roi.height);
                                if(rtsp_user_spoofing[m]){
                                    ft2->putText(img, rtsp_user_name[m], textOrg, img.rows / 25,
                                                 default_color, thickness, linestyle, true);
                                } else {
                                    ft2->putText(img, rtsp_user_name[m] + "_fake", textOrg, img.rows / 25,
                                                 default_color, thickness, linestyle, true);
                                }
                            }
                        }
                        rois_rtsp.clear();
                        rtsp_user_name.clear();
                        rtsp_user_spoofing.clear();
                        break;
                    }
                }
                usleep(30000);
            }
            cv::resize(img, img, cv::Size(640, 480), 0, 0, cv::INTER_LINEAR);
            cv::imencode(".jpg", img, img_encode);
            if(img_encode.size() <= image_max_size - IMAGE_SIZE_BYTE){
                *((int *)img_encode_buf) = int(img_encode.size());
                memcpy(img_encode_buf + IMAGE_SIZE_BYTE, img_encode.data(), img_encode.size() * sizeof(uchar));
            } else {
                while(img_encode.size() > image_max_size - IMAGE_SIZE_BYTE){
                    image_max_size *= 2;
                    free(img_encode_buf);
                    img_encode_buf = (uchar *)malloc(image_max_size * sizeof(uchar));
                }
                *((int *)img_encode_buf) = int(img_encode.size());
                memcpy(img_encode_buf + IMAGE_SIZE_BYTE, img_encode.data(), img_encode.size() * sizeof(uchar));
            }
            int bytes_left = img_encode.size() + IMAGE_SIZE_BYTE;
            int written_bytes = 0;
            bool write_error = false;
            while(bytes_left > 0){
                int write_bytes = write(client_sock, img_encode_buf + written_bytes, bytes_left);
                if(write_bytes < 0){
                    write_error = true;
                    std::cout << "write error: " << img_encode.size() * sizeof(uchar) << std::endl;
                    break;
                } else if(write_bytes == 0) {
                    break;
                } else {
                    bytes_left -= write_bytes;
                    written_bytes += write_bytes;
                }
                if(print_log){
                    std::cout << "written_bytes: " << written_bytes
                              << ", img_encode.size: " << img_encode.size() << std::endl;
                }

            }
            memset(client_message, 0, sizeof(client_message));
            if(write_error){
                close(client_sock);
                break;
            }
        } else {
            std::cout << "client_message not known: " << client_message << std::endl;
        }
    }
    free(img_encode_buf);
    close(client_sock);
    std::cout << "close client_sock: " << read_size << ", handle_thread_index: " << handle_thread_index << std::endl;
    handle_thread_id[handle_thread_index] = 0;
    return 0;
}

void *listen_thread(void *arg)
{
    usleep(10000000);
    socket_desc = socket(AF_INET, SOCK_STREAM , 0);
    if (socket_desc == -1) {
        std::cout << "Could not create socket" << std::endl;
    }
    int opt = 1;
    setsockopt(socket_desc, SOL_SOCKET, SO_REUSEADDR, (const void *)&opt, sizeof(opt));
    struct timeval time_out = {3, 0};
    socklen_t time_out_length = sizeof(time_out);
    setsockopt(socket_desc, SOL_SOCKET, SO_RCVTIMEO, &time_out, time_out_length);

    struct sockaddr_in server , client;
    //Prepare the sockaddr_in structure
    server.sin_family = AF_INET;
    server.sin_addr.s_addr = htonl(INADDR_ANY);
    server.sin_port = htons(8001);
    if(bind(socket_desc, (struct sockaddr *)&server , sizeof(server)) < 0) {
        perror("bind failed");
        exit(-1);
        return 0;
    }
    if(listen(socket_desc, 10) < 0) {
        perror("listen failed");
        exit(-1);
        return 0;
    }
    int c = sizeof(struct sockaddr_in);
    int client_sock;
    for(int i = 0; i < HANDLE_THREAD_MAX_NUM; i++) handle_thread_id[i] = 0;
    while(true){
        if(listen_thread_stop) break;
        client_sock = accept(socket_desc, (struct sockaddr *)&client, (socklen_t *)&c);
        int thread_index = -1;
        if(client_sock < 0) {
            //perror("accept failed");
            continue;
        } else {
            for(int i = 0; i < HANDLE_THREAD_MAX_NUM; i++){
                if(handle_thread_id[i] == 0){
                    thread_index = i;
                    break;
                }
            }
            if(thread_index == -1){
                std::cout << "can not found thread_index" << std::endl;
                close(client_sock);
                continue;
            }
        }
        std::cout << "create send_stream_thread: " << thread_index << std::endl;
        long long send_stream_thread_args = client_sock + thread_index * HANDLE_THREAD_INDEX_OFFSET;
        pthread_create(handle_thread_id + thread_index, NULL, send_stream_thread, &send_stream_thread_args);
    }
    for(int i = 0; i < HANDLE_THREAD_MAX_NUM; i++){
        if(handle_thread_id[i] != 0){
            pthread_join(handle_thread_id[i], NULL);
            handle_thread_id[i] = 0;
        }
    }
    close(socket_desc);
    std::cout << "ThreaderVideoStream::listen_thread quit..." << std::endl;
    return 0;
}

void init_camera() {
    capture.open(0);
    if (!capture.isOpened()) {
        std::cout << "Video capture not successfully initialized, exit!\n" << std::endl;
        exit(-1);
        return;
    }
    std::cout << "Video capture successfully initialized\n" << std::endl;
    capture.set(CV_CAP_PROP_FRAME_WIDTH, image_width);
    capture.set(CV_CAP_PROP_FRAME_HEIGHT, image_height);
    capture.set(CV_CAP_PROP_FPS, fps);
}

JNIEXPORT jboolean JNICALL Java_com_iim_caffe_LoadLibraryModule_recognition_1stop(JNIEnv *env, jobject obj)
{
    listen_thread_stop = true;
    grab_image_thread_stop = true;
    uninit_network();
    pthread_join(stream_thread_pid, NULL);
    pthread_join(grab_image_thread_pid, NULL);
    return true;
}

void *grab_image_thread(void *arg){
    bool bCap = false;
    static double grab_image_success_time = getMsecOfNow();
    while(!grab_image_thread_stop) {
        {
            std::lock_guard<std::mutex> read_image_lock_ws(read_image_lock, std::adopt_lock);
            bCap = capture.read(current_grab_image);
            if (bCap){
                have_new_recognition_frame = true;
            }
        }
        double now = getMsecOfNow();
        if (!bCap) {
            std::cout << "Cannot grab image from camera" << std::endl;
            if(now - grab_image_success_time > 10000){
                std::cout << "long time cannot grab image from camera, exit!" << std::endl;
                exit(-1);
            } else {
                usleep(100000);
            }
        } else {
            grab_image_success_time = now;
            usleep(20000);
        }
    }
    std::cout << "grab_image_thread quit." << std::endl;
    return 0;
}

JNIEXPORT jboolean JNICALL Java_com_iim_caffe_LoadLibraryModule_play_1warnig(JNIEnv *env, jobject obj, jstring content)
{
    const char *raw_string = env->GetStringUTFChars(content, 0);
    std::string user_name_str(raw_string);
    std::cout << user_name_str << std::endl;
    env->ReleaseStringUTFChars(content, raw_string);
}

JNIEXPORT jboolean JNICALL Java_com_iim_caffe_LoadLibraryModule_play_1audio(JNIEnv *env, jobject obj, jobjectArray user_name)
{
    static double play_audio_time = 0;
    std::lock_guard<std::mutex> have_detect_lock_ws(have_detect_lock, std::adopt_lock);
    if(!have_detect){
        std::cout << "ERROR: in paly_audio, have not call recognition_face first!" << std::endl;
        return false;
    }
    have_detect = false;
    current_user_name.clear();
    current_user_spoofing.clear();
    std::string user_name_audio = "";
    for(unsigned int m = 0; m < rois_global.size() && m < MAX_FEATURE_NUM; m++){
        jstring user_name_java = (jstring)(env->GetObjectArrayElement(user_name, m));
        if(user_name_java == NULL) {
            current_user_name.push_back("");
            current_user_spoofing.push_back(1);
            continue;
        }
        const char *raw_string = env->GetStringUTFChars(user_name_java, 0);
        std::string user_name_str(raw_string);
        if(user_name_str != ""){
            user_name_audio = user_name_str;
            current_user_name.push_back(user_name_str);
            current_user_spoofing.push_back(1);

            /*
            cv::Mat face_region = current_recognition_image(rois_global[m]);
            cv::resize(face_region, face_region, cv::Size(112, 112));
            char image_path[128] = {0};
            static int spoofing_index = 0;
            sprintf(image_path, "train/%d_%d_%s_real_real.jpg", (int)(getMsecOfNow() / 1000), spoofing_index, user_name_str.c_str());
            printf("%s\n", image_path);
            spoofing_index += 1;
            cv::imwrite(image_path, face_region);
            */
        } else {
            current_user_name.push_back("");
            current_user_spoofing.push_back(1);
        }
        //std::cout << user_name_str << " in play audio " << m << " " << rois_global.size() << std::endl;
        env->DeleteLocalRef(user_name_java);
    }
    if(user_name_audio != ""){
        //check_spoofing_face(current_recognition_image);
    }
    {
        std::lock_guard<std::mutex> read_rtsp_image_lock_ws(read_rtsp_image_lock, std::adopt_lock);
        current_rtsp_image = current_recognition_image.clone();
        have_new_rtsp_frame = true;
        rois_rtsp.clear();
        for(unsigned int m = 0; m < rois_global.size() && m < MAX_FEATURE_NUM; m++){
            rois_rtsp.push_back(rois_global[m]);
        }
        rtsp_user_name.clear();
        rtsp_user_spoofing.clear();
        for(unsigned int m = 0; m < current_user_name.size(); m++){
            rtsp_user_name.push_back(current_user_name[m]);
            rtsp_user_spoofing.push_back(current_user_spoofing[m]);
        }
    }
    double now = getMsecOfNow();
    if(user_name_audio != "" && now - play_audio_time > 6000){
        play_audio_time = now;
        std::string audio_cmd = "export EKHO_DATA_PATH=/home/nvidia/ekho/ekho-data && "
            "/home/nvidia/ekho/ekho --speed 30 --volume=-50 --pitch=-20 -o /home/nvidia/ekho/test.wma 你好," +
            user_name_audio;
        //int system_ret = system(audio_cmd.c_str());
        //system_ret = system("aplay /home/nvidia/ekho/test.wma");
        int system_ret = 0; //system("aplay /opt/ego/runtime/execution/text2speech/audio/welcome_chn.wav &");
        if(system_ret != 0) std::cout << "play audio failed!" << std::endl;
    }
    return true;
}

void init_all()
{
    init_network();
    have_init = true;
}

JNIEXPORT jboolean JNICALL Java_com_iim_caffe_LoadLibraryModule_recognition_1start(JNIEnv *env, jobject obj)
{
    std::lock_guard<std::mutex> lock_ws(extraction_lock, std::adopt_lock);
    if(!have_init){
        init_all();
    }
    return true;
}

JNIEXPORT jint JNICALL Java_com_iim_caffe_LoadLibraryModule_recognition_1face(
    JNIEnv *env, jobject obj, jbyteArray image_data, jobjectArray feature_save, jlongArray code_ret)
{
    double start, finish;
    start = getMsecOfNow();
    int face_count = 0;
    int code = 1000;
    jboolean isCopy;
    jlong* code_point = env->GetLongArrayElements(code_ret, &isCopy);
    if(NULL == image_data) {
        code = 1005;
        std::cout << "recognition: image_data is empty!\n" << std::endl;
        code_point[0] = code;
        env->ReleaseLongArrayElements(code_ret, code_point, 0);
        return face_count;
    }
    uchar *image_data_point = (uchar *)env->GetByteArrayElements(image_data, &isCopy);
    jsize image_data_size = env->GetArrayLength(image_data);
    cv::Mat img_input;
    if (image_data_point == NULL || 0 == image_data_size || image_data_size < 1000) {
        code = 1005;
        std::cout << "recognition: image_data is empty!\n" << std::endl;
        code_point[0] = code;
        env->ReleaseLongArrayElements(code_ret, code_point, 0);
        env->ReleaseByteArrayElements(image_data, (jbyte *)image_data_point, 0);
        return face_count;
    } else {
        std::vector<uchar> image_raw_data(image_data_size);
        for(int i = 0; i < image_data_size; i++) {
            image_raw_data[i] = image_data_point[i];
        }
        env->ReleaseByteArrayElements(image_data, (jbyte *)image_data_point, 0);
        img_input = cv::imdecode(image_raw_data, CV_LOAD_IMAGE_COLOR);
    }

    int face_features_length = 512;
    float *face_feature = 0;
    int detection_bbox[MAX_BBOX_NUM * 4];
    std::lock_guard<std::mutex> lock_ws(extraction_lock, std::adopt_lock);
    if(!have_init){
        init_all();
    }
    detection_face(img_input, &face_count, detection_bbox);
    if(face_count == 0){
        code = 1006;
        code_point[0] = code;
        env->ReleaseLongArrayElements(code_ret, code_point, 0);
        return face_count;
    } else {
        face_feature = (float *)malloc(face_count * face_features_length *sizeof(float));
        extract_face_feature(img_input, face_count, face_feature, detection_bbox);
        code = 1000;
        code_point[0] = code;
        env->ReleaseLongArrayElements(code_ret, code_point, 0);
    }
    for(unsigned int i = 0; i < face_count && i < MAX_FEATURE_NUM; i++) {
        jfloatArray feature_java =(jfloatArray)env->GetObjectArrayElement(feature_save, i);
        if (feature_java == NULL){
            std::cout << "recognition_face: feature_save is NULL! " << i << std::endl;
            continue;
        }
        env->SetFloatArrayRegion(feature_java, 0, face_features_length, face_feature + i * face_features_length);
        env->DeleteLocalRef(feature_java);
    }
    if(face_feature) free(face_feature);
    finish = getMsecOfNow();
    std::cout << "recognition thread id:" << std::this_thread::get_id()
              << ", spend: " << ((double)(finish - start)) << std::endl;
    return face_count;
}

JNIEXPORT jint JNICALL Java_com_iim_caffe_LoadLibraryModule_camera_1face(JNIEnv *env, jobject obj, jobjectArray feature_save)
{
    rois_global.clear();
    int face_features_length = 512;
    double start, finish;
    start = getMsecOfNow();
    std::lock_guard<std::mutex> have_detect_lock_ws(have_detect_lock, std::adopt_lock);
    if(!have_init_camera){
        std::cout << "init_camera!" << std::endl;
        init_camera();
        ft2 = cv::freetype::createFreeType2();
        ft2->loadFontData("/usr/share/fonts/opentype/noto/NotoSansCJK-Regular.ttc", 0);
        have_init_camera = true;
        pthread_create(&grab_image_thread_pid, NULL, grab_image_thread, NULL);
        pthread_create(&stream_thread_pid, NULL, listen_thread, NULL);
    }

    float * face_feature = 0;
    int face_count = 0;
    int detection_bbox[MAX_BBOX_NUM * 4];
    while (true) {
        while (true) {
            {
                std::lock_guard<std::mutex> read_image_lock_ws(read_image_lock, std::adopt_lock);
                if(have_new_recognition_frame){
                    have_new_recognition_frame = false;
                    current_recognition_image = current_grab_image.clone();
                    break;
                }
            }
            //std::cout << "sleep for have_new_recognition_frame" << std::endl;
            usleep(20000);
        }

        {
            std::lock_guard<std::mutex> lock_ws(extraction_lock, std::adopt_lock);
            if(!have_init){
                init_all();
            }
            detection_face(current_recognition_image, &face_count, detection_bbox);
        }
        if(face_count == 0){
            //std::cout << "no face detected " << std::endl;
            {
                std::lock_guard<std::mutex> read_rtsp_image_lock_ws(read_rtsp_image_lock, std::adopt_lock);
                current_rtsp_image = current_recognition_image.clone();
                have_new_rtsp_frame = true;
            }
        } else {
            face_feature = (float *)malloc(face_count * face_features_length *sizeof(float));
            {
                std::lock_guard<std::mutex> lock_ws(extraction_lock, std::adopt_lock);
                extract_face_feature(current_recognition_image, face_count, face_feature, detection_bbox);
                break;
            }
        }
    }
    for(unsigned int i = 0; i < face_count && i < MAX_FEATURE_NUM; i++) {
        jfloatArray feature_java =(jfloatArray)env->GetObjectArrayElement(feature_save, i);
        if (feature_java == NULL){
            std::cout << "recognition_face: feature_save is NULL! " << i << std::endl;
            continue;
        }
        env->SetFloatArrayRegion(feature_java, 0, face_features_length, face_feature + i * face_features_length);
        env->DeleteLocalRef(feature_java);
    }
    if(face_feature) free(face_feature);
    finish = getMsecOfNow();
    std::cout << "recognition thread id:" << std::this_thread::get_id()
              << ", spend: " << ((double)(finish - start)) << std::endl;
    have_detect = true;
    return face_count;
}

JNIEXPORT jbyteArray JNICALL Java_com_iim_caffe_LoadLibraryModule_people_1face_1detection(
    JNIEnv *env, jobject obj, jbyteArray image_data, jintArray face_region, jfloatArray feature_save,
    jint model_version, jlongArray code_ret)
{
    double start, finish;
    int detection_bbox[MAX_BBOX_NUM * 4];
    int code = 1000;
    start = getMsecOfNow();
    jboolean isCopy;    // JNI_TRUE表示原字符串的拷贝，返回JNI_FALSE表示返回原字符串的指针
    jbyteArray return_patch_image = env->NewByteArray(0);
    jlong* code_point = env->GetLongArrayElements(code_ret, &isCopy);
    if(NULL == image_data) {
        code = 1005;
        std::cout << "features extraction: image_data is empty!\n" << std::endl;
        code_point[0] = code;
        env->ReleaseLongArrayElements(code_ret, code_point, 0);
        //env->DeleteLocalRef(return_patch_image);
        return return_patch_image;
    }
    uchar *image_data_point = (uchar *)env->GetByteArrayElements(image_data, &isCopy);
    jsize image_data_size = env->GetArrayLength(image_data);
    //printf("isCopy:%d, image_data_size: %d\n", isCopy, image_data_size);
    if (image_data_point == NULL || 0 == image_data_size || image_data_size < 1000) {
        code = 1005;
        std::cout << "features extraction: image_data is empty!\n" << std::endl;
        code_point[0] = code;
        env->ReleaseLongArrayElements(code_ret, code_point, 0);
        env->ReleaseByteArrayElements(image_data, (jbyte *)image_data_point, 0);
        //env->DeleteLocalRef(return_patch_image);
        return return_patch_image;
    }

    std::vector<uchar> image_raw_data(image_data_size);
    for(int i = 0; i < image_data_size; i++) {
        image_raw_data[i] = image_data_point[i];
    }
    env->ReleaseByteArrayElements(image_data, (jbyte *)image_data_point, 0);
    cv::Mat img_temp = cv::imdecode(image_raw_data, CV_LOAD_IMAGE_COLOR);

    int face_count = 0;
    std::lock_guard<std::mutex> lock_ws(extraction_lock, std::adopt_lock);
    if(!have_init){
        init_all();
    }
    detection_face(img_temp, &face_count, detection_bbox);
    /* 1001: blur, 1002: multiple face, 1003: image too small, 1004: image too large, 1005: image empty
       1006: none face, 1007: save aligned-image failed, 1008: save feature failed, 1009: Face is dark
       1010: Face illuminaiton is unbalance, 1011: Image is side face, 1012: Face is noisy
     */
    if(face_count != 1){
        std::cout << "face detected num: " << face_count << std::endl;
        if(face_count == 0){
            code = 1006;
        } else {
            code = 1002;
        }
        code_point[0] = code;
        env->ReleaseLongArrayElements(code_ret, code_point, 0);
        return return_patch_image;
    }
    if(detection_bbox[2] - detection_bbox[0] < 112 || detection_bbox[3] - detection_bbox[1] < 112){
        code = 1003;
        code_point[0] = code;
        env->ReleaseLongArrayElements(code_ret, code_point, 0);
        return return_patch_image;
    }

    jint* face_region_point = env->GetIntArrayElements(face_region, &isCopy);
    face_region_point[0] = detection_bbox[0];
    face_region_point[1] = detection_bbox[1];
    face_region_point[2] = detection_bbox[2] - detection_bbox[0];
    face_region_point[3] = detection_bbox[3] - detection_bbox[1];
    env->ReleaseIntArrayElements(face_region, face_region_point, 0);

    int face_features_length = 512;
    float *face_feature = (float *)malloc(face_count * face_features_length *sizeof(float));
    extract_face_feature(img_temp, face_count, face_feature, detection_bbox);

    jfloat* feature_save_point = env->GetFloatArrayElements(feature_save, &isCopy);
    for(unsigned int i = 0; i < face_features_length; i++) {
        feature_save_point[i] = face_feature[i];
    }
    env->ReleaseFloatArrayElements(feature_save, feature_save_point, 0);
    free(face_feature);
    cv::Rect face_rect(detection_bbox[0], detection_bbox[1],
                       detection_bbox[2] - detection_bbox[0], detection_bbox[3] - detection_bbox[1]);
    std::cout << face_rect << std::endl;
    int start_x = face_rect.x - face_rect.width * 0.2;
    int start_y = face_rect.y - face_rect.height * 0.2;
    int roi_width = face_rect.width * 1.4;
    int roi_height = face_rect.height * 1.4;
    if(start_x <= 0) start_x = 1;
    if(start_y <= 0) start_y = 1;
    if(roi_width >= img_temp.cols - start_x - 1 ) roi_width = img_temp.cols - start_x - 1;
    if(roi_height >= img_temp.rows - start_y - 1 ) roi_height = img_temp.rows - start_y - 1;
    cv::Rect patch_roi(start_x, start_y, roi_width, roi_height);
    std::vector<uchar> data_encode;
    cv::imencode(".jpg", img_temp(patch_roi), data_encode);
    jbyte *data_encode_jbyte = (jbyte *)malloc(data_encode.size() * sizeof(jbyte));
    for(unsigned int i = 0; i < data_encode.size(); i++) {
        data_encode_jbyte[i] = data_encode[i];
    }
    jbyteArray return_byte_array = env->NewByteArray(data_encode.size());
    env->SetByteArrayRegion(return_byte_array, 0, data_encode.size(), data_encode_jbyte);
    free(data_encode_jbyte);
    finish = getMsecOfNow();
    std::cout << "registration thread id " << std::this_thread::get_id()
              << ", spend: " << ((double)(finish - start)) << std::endl;
    code_point[0] = code;
    env->ReleaseLongArrayElements(code_ret, code_point, 0);
    env->DeleteLocalRef(return_patch_image);
    return return_byte_array;
}
