
#include "face_sdk.hpp"
#include <iostream>
#include <vector>
#include <cmath>
#include <utility>
#include <algorithm>
#include <numeric>

#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wincompatible-property-type"
#pragma GCC diagnostic ignored "-Wincomplete-umbrella"
#pragma GCC diagnostic ignored "-Wdocumentation"
#pragma GCC diagnostic ignored "-Wnonnull"
#pragma GCC diagnostic ignored "-Wmissing-prototypes"

#include "eigen/Eigen/Dense"
#include "eigen/Eigen/Geometry"

#pragma GCC diagnostic pop

using namespace std;
using namespace Eigen;

namespace FaceSDK {
    float rotvecLeft = 0.1; // rotvec[1]大于该值则为脸部朝向偏左
    float rotvecRight = -0.1; // rotvec[1]小于该值则为脸部朝向偏右

    float rotvecDown = 0.1; // rotvec[0]大于该值则为脸部朝向偏下
    float rotvecUp = -0.2; // rotvec[0]小于该值则为脸部朝向偏上

    float eyeStatu = 5; //eye_l_statu 大于该值则为眼睛睁开，小于该值则为眼睛闭合
    float mouseStatu = 100; //mouse_score 大于该值则为嘴巴睁开，小于该值则为嘴巴闭合

    float lightStatu = 80; // img_light大于该值则为亮度正常，小于该值则为亮度不够

    float faceWidthLowStatu = 0.5; //face_width小于该值则脸部占比过小，距离镜头太远
    float faceWidthHightStatu = 0.8; //face_width大于该值则脸部占比过大，距离镜头太近

    MatrixXd process_landmark(double face_landmarks[478][3], int width, int height)
    {
        MatrixXd np_array(478, 3);
        for (int i = 0; i < 478; i++)
        {
            np_array(i, 0) = face_landmarks[i][0];
            np_array(i, 1) = face_landmarks[i][1];
            np_array(i, 2) = face_landmarks[i][2];
        }
        np_array.col(0) = np_array.col(0) * width;
        np_array.col(1) = -np_array.col(1) * height;
        np_array.col(2) = -np_array.col(2) * width;
        return np_array;
    }

    HeadDirect get_face_direction(double  face_landmarks[478][3], int  width, int  height) {

        MatrixXd  landmark_3d(478, 3);  //存放所有特征点的坐标
        //MatrixXd  x_dir(1, 3);  //人脸的x轴方向
        //MatrixXd  y_dir(1, 3);  //人脸的y轴方向
        //MatrixXd  z_dir(1, 3);  //人脸的z轴方向
        //MatrixXd  rotvec(1, 3);  //旋转向量

        landmark_3d = process_landmark(face_landmarks, width, height);  //获取所有特征点的坐标

        // Calculate x, y, and z axes
        Eigen::Vector3d x_axis = landmark_3d.row(280) - landmark_3d.row(50);
        x_axis += landmark_3d.row(352) - landmark_3d.row(123);
        x_axis += landmark_3d.row(280) - landmark_3d.row(50);
        x_axis += landmark_3d.row(376) - landmark_3d.row(147);
        x_axis += landmark_3d.row(416) - landmark_3d.row(192);
        x_axis += landmark_3d.row(298) - landmark_3d.row(68);
        x_axis += landmark_3d.row(301) - landmark_3d.row(71);

        Eigen::Vector3d y_axis = landmark_3d.row(10) - landmark_3d.row(152);
        y_axis += landmark_3d.row(151) - landmark_3d.row(152);
        y_axis += landmark_3d.row(8) - landmark_3d.row(17);
        y_axis += landmark_3d.row(5) - landmark_3d.row(200);
        y_axis += landmark_3d.row(6) - landmark_3d.row(199);
        y_axis += landmark_3d.row(8) - landmark_3d.row(18);
        y_axis += landmark_3d.row(9) - landmark_3d.row(175);


        // Normalize x and y axes and calculate z axis
        x_axis.normalize();
        y_axis.normalize();

        Eigen::Vector3d z_axis = x_axis.cross(y_axis);
        z_axis.normalize();

        // Recalculate y axis and create rotation matrix
        y_axis = z_axis.cross(x_axis);

        Eigen::Matrix3d rotation_matrix;
        rotation_matrix << x_axis, y_axis, z_axis;

        // Apply rotation and return rotation vector
        Eigen::AngleAxisd rotation(-0.25, Eigen::Vector3d::UnitX());

        rotation_matrix = rotation_matrix * rotation;

        Eigen::Vector3d rotvec2 = rotation_matrix.eulerAngles(0, 1, 2);

        Eigen::AngleAxisd rotAA(rotation_matrix);
        Eigen::Vector3d rotvec = rotAA.axis() * rotAA.angle();


        HeadDirect head_direct = HeadDirect();

        if (rotvec[0] > rotvecDown) {
            cout << "偏下" << endl;
            head_direct.ver_direct = 1;
        }
        else if (rotvec[0] < rotvecUp) {
            cout << "偏上" << endl;
            head_direct.ver_direct = 2;
        }

        if (rotvec[1] > rotvecLeft) {
            cout << "偏左" << endl;
            head_direct.hor_direct = 1;
        }
        else if (rotvec[1] < rotvecRight) {
            cout << "偏右" << endl;
            head_direct.hor_direct = 2;

        }

        head_direct.facePitch = rotvec[0];
        head_direct.faceYaw = rotvec[1];
        head_direct.faceRoll = rotvec[2];

        return head_direct;
    }


    int get_l_eye_status(double face_landmarks[478][3]) {
        /*
        闭眼检测 左眼
        */
        // Right eyes
        // horizontal line

        double lh_score = pow((pow((face_landmarks[263][0] - face_landmarks[362][0]), 2) + pow((face_landmarks[263][1] - face_landmarks[362][1]), 2)) / (pow((face_landmarks[386][0] - face_landmarks[374][0]), 2) + pow((face_landmarks[386][1] - face_landmarks[374][1]), 2)), 0.5);

        int eye_status = 0;
        //return std::make_pair(lh_score, rh_score);

        if (lh_score > eyeStatu) {
            cout << "左眼睛睁开" << endl;
            eye_status = 1;
        }

        return eye_status;
    }

    int get_r_eye_status(double face_landmarks[478][3]) {
        /*
        闭眼检测 右眼
        */
        // Right eyes
        // horizontal line

        double rh_score = pow((pow((face_landmarks[133][0] - face_landmarks[33][0]), 2) + pow((face_landmarks[133][1] - face_landmarks[33][1]), 2)) / (pow((face_landmarks[159][0] - face_landmarks[145][0]), 2) + pow((face_landmarks[159][1] - face_landmarks[145][1]), 2)), 0.5);

        int eye_status = 0;
        //return std::make_pair(lh_score, rh_score);

        if (rh_score > eyeStatu) {
            cout << "右眼睛睁开" << endl;
            eye_status = 1;
        }

        return eye_status;
    }

    int get_mouse_status(double face_landmarks[478][3]) {
        /*
        张嘴检测
        */

        double mouse_score = pow((pow((face_landmarks[78][0] - face_landmarks[308][0]), 2) + pow((face_landmarks[78][1] - face_landmarks[308][1]), 2)) / (pow((face_landmarks[13][0] - face_landmarks[14][0]), 2) + pow((face_landmarks[13][1] - face_landmarks[14][1]), 2)), 0.5);

        int mouse_status = 0;
        //return std::make_pair(lh_score, rh_score);

        if (mouse_score > mouseStatu) {
            cout << "嘴巴张开" << endl;
            mouse_status = 1;
        }

        return mouse_status;
    }

    int max(int a, int b) {
        return (a > b) ? a : b;
    }

    int min(int a, int b) {
        return (a < b) ? a : b;
    }

    Eigen::Vector3d process_face_landmark(double face_landmarks[478][3], int width, int height) {

        MatrixXd landmark_3d(478, 3);  //存放所有特征点的坐标
        //MatrixXd  x_dir(1, 3);  //人脸的x轴方向
        //MatrixXd  y_dir(1, 3);  //人脸的y轴方向
        //MatrixXd  z_dir(1, 3);  //人脸的z轴方向
        //MatrixXd  rotvec(1, 3);  //旋转向量

        landmark_3d = process_landmark(face_landmarks, width, height);  //获取所有特征点的坐标



        // Calculate x, y, and z axes
        Eigen::Vector3d x_axis = landmark_3d.row(280) - landmark_3d.row(50);
        x_axis += landmark_3d.row(352) - landmark_3d.row(123);
        x_axis += landmark_3d.row(280) - landmark_3d.row(50);
        x_axis += landmark_3d.row(376) - landmark_3d.row(147);
        x_axis += landmark_3d.row(416) - landmark_3d.row(192);
        x_axis += landmark_3d.row(298) - landmark_3d.row(68);
        x_axis += landmark_3d.row(301) - landmark_3d.row(71);

        Eigen::Vector3d y_axis = landmark_3d.row(10) - landmark_3d.row(152);
        y_axis += landmark_3d.row(151) - landmark_3d.row(152);
        y_axis += landmark_3d.row(8) - landmark_3d.row(17);
        y_axis += landmark_3d.row(5) - landmark_3d.row(200);
        y_axis += landmark_3d.row(6) - landmark_3d.row(199);
        y_axis += landmark_3d.row(8) - landmark_3d.row(18);
        y_axis += landmark_3d.row(9) - landmark_3d.row(175);

        // Normalize x and y axes and calculate z axis
        x_axis.normalize();
        y_axis.normalize();
        Eigen::Vector3d z_axis = x_axis.cross(y_axis);
        z_axis.normalize();

        // Recalculate y axis and create rotation matrix
        y_axis = z_axis.cross(x_axis);
        Eigen::Matrix3d rotation_matrix;
        rotation_matrix << x_axis, y_axis, z_axis;


        // Apply rotation and return rotation vector
        Eigen::AngleAxisd rotation(-0.25, Eigen::Vector3d::UnitX());

        rotation_matrix = rotation * rotation_matrix;

        Eigen::Vector3d rotvec = rotation_matrix.eulerAngles(0, 1, 2);
        return rotvec;
    }

    string get_head_position(double face_landmarks[478][3]) {
        std::vector<int> face_outline_points = {10, 338, 297, 332, 284, 251, 389, 356, 454, 323,
                                                361, 288, 397, 365, 379, 378, 400, 377, 152, 148,
                                                176, 140, 150, 136, 172, 58, 132, 93, 234, 127, 162,
                                                21, 54, 103, 67, 109};

        std::vector<double> x_list(face_outline_points.size());
        std::vector<double> y_list(face_outline_points.size());

        std::transform(face_outline_points.begin(), face_outline_points.end(), x_list.begin(),
                       [&](int item) { return face_landmarks[item][0]; });
        std::transform(face_outline_points.begin(), face_outline_points.end(), y_list.begin(),
                       [&](int item) { return face_landmarks[item][1]; });

        double x1_old = *std::min_element(x_list.begin(), x_list.end());
        double x2_old = *std::max_element(x_list.begin(), x_list.end());

        double y1_old = *std::min_element(y_list.begin(), y_list.end());
        double y2_old = *std::max_element(y_list.begin(), y_list.end());

        x_list.push_back(
                face_landmarks[152][0] + (face_landmarks[10][0] - face_landmarks[152][0]) * 1.5);
        y_list.push_back(
                face_landmarks[152][1] + (face_landmarks[10][1] - face_landmarks[152][1]) * 1.5);

        double x1 = *std::min_element(x_list.begin(), x_list.end());
        double x2 = *std::max_element(x_list.begin(), x_list.end());

        double y1 = *std::min_element(y_list.begin(), y_list.end());
        double y2 = *std::max_element(y_list.begin(), y_list.end());

        //double result[2][2][2] = {{{x1, y1}, {x2, y2}}, {{x1_old, y1_old}, {x2_old, y2_old}}};

        return '[' + to_string(x1) + ',' + to_string(y1) + ',' + to_string(x2) + ',' +
               to_string(y2) + ',' + to_string(x1_old) + ',' + to_string(y1_old) + ',' +
               to_string(x2_old) + ',' + to_string(y2_old) + ']';

    }

    std::vector<double> get_head_position2(double face_landmarks[478][3],int width,int height) {
        std::vector<int> face_outline_points = {10, 338, 297, 332, 284, 251, 389, 356, 454, 323,
                                                361, 288, 397, 365, 379, 378, 400, 377, 152, 148,
                                                176, 140, 150, 136, 172, 58, 132, 93, 234, 127, 162,
                                                21, 54, 103, 67, 109};

        std::vector<double> x_list(face_outline_points.size());
        std::vector<double> y_list(face_outline_points.size());

        std::transform(face_outline_points.begin(), face_outline_points.end(), x_list.begin(),
                       [&](int item) { return face_landmarks[item][0]; });
        std::transform(face_outline_points.begin(), face_outline_points.end(), y_list.begin(),
                       [&](int item) { return face_landmarks[item][1]; });

        double x1_old = *std::min_element(x_list.begin(), x_list.end());
        double x2_old = *std::max_element(x_list.begin(), x_list.end());

        double y1_old = *std::min_element(y_list.begin(), y_list.end());
        double y2_old = *std::max_element(y_list.begin(), y_list.end());

        x_list.push_back(
                face_landmarks[152][0] + (face_landmarks[10][0] - face_landmarks[152][0]) * 1.5);
        y_list.push_back(
                face_landmarks[152][1] + (face_landmarks[10][1] - face_landmarks[152][1]) * 1.5);

        double x1 = *std::min_element(x_list.begin(), x_list.end());
        double x2 = *std::max_element(x_list.begin(), x_list.end());

        double y1 = *std::min_element(y_list.begin(), y_list.end());
        double y2 = *std::max_element(y_list.begin(), y_list.end());

        // 求脸型宽度占比
        /*
       头部位置检测
       分为2个框，
       第一个是正好包括人脸的区域 (x1_old, y1_old), (x2_old, y2_old)分别为矩形框的左上角和右下角
       第二个是扩张之后的区域，可以包含整个头部 (x1, y1), (x2, y2)分别为矩形框的左上角和右下角
       */
        HeadPoint p;

        p.x1_old = *std::min_element(x_list.begin(), x_list.end()) * width;
        p.x2_old = *std::max_element(x_list.begin(), x_list.end()) * width;

        p.y1_old = *std::min_element(y_list.begin(), y_list.end()) * height;
        p.y2_old = *std::max_element(y_list.begin(), y_list.end()) * height;

        x_list.push_back(face_landmarks[152][0] + (face_landmarks[10][0] - face_landmarks[152][0]) * 1.5);
        y_list.push_back(face_landmarks[152][1] + (face_landmarks[10][1] - face_landmarks[152][1]) * 1.5);

        p.x1 = *std::min_element(x_list.begin(), x_list.end()) * width;
        p.x2 = *std::max_element(x_list.begin(), x_list.end()) * width;

        p.y1 = *std::min_element(y_list.begin(), y_list.end()) * height;
        p.y2 = *std::max_element(y_list.begin(), y_list.end()) * height;
        p.face_width_rate = pow((pow((face_landmarks[234][0] - face_landmarks[454][0]), 2) + pow((face_landmarks[234][1] - face_landmarks[454][1]), 2)), 0.5);

        if (p.face_width_rate < faceWidthLowStatu) {
            cout << "距离太远" << endl;
            p.face_distance = 1;
        }
        else if (p.face_width_rate > faceWidthHightStatu) {
            cout << "距离太近" << endl;
            p.face_distance = 2;
        }

        std::vector<double> result = {x1, y1, x2, y2, x1_old, y1_old, x2_old, y2_old, p.face_width_rate};
        return result;
    }

    ///  头部位置检测,  分为2个框，
///  第一个是正好包括人脸的区域 (x1_old, y1_old), (x2_old, y2_old)分别为矩形框的左上角和右下角
///  第二个是扩张之后的区域，可以包含整个头部 (x1, y1), (x2, y2)分别为矩形框的左上角和右下角
/// - Parameters:
///   - image: 图片
///   - width: 图片宽度
///   - height: 图片高度
    HeadPoint get_head_info(cv::Mat image, double face_landmarks[478][3], int width, int  height) {

        std::vector<int> face_outline_points = { 10, 338, 297, 332, 284, 251, 389, 356, 454, 323, 361, 288, 397, 365, 379, 378, 400, 377, 152, 148, 176, 140, 150, 136, 172, 58, 132, 93, 234, 127, 162, 21, 54, 103, 67, 109 };

        std::vector<double> x_list(face_outline_points.size());
        std::vector<double> y_list(face_outline_points.size());

        std::transform(face_outline_points.begin(), face_outline_points.end(), x_list.begin(), [&](int item) { return face_landmarks[item][0]; });
        std::transform(face_outline_points.begin(), face_outline_points.end(), y_list.begin(), [&](int item) { return face_landmarks[item][1]; });

        HeadPoint p;

        p.x1_old = *std::min_element(x_list.begin(), x_list.end()) * width;
        p.x2_old = *std::max_element(x_list.begin(), x_list.end()) * width;

        p.y1_old = *std::min_element(y_list.begin(), y_list.end()) * height;
        p.y2_old = *std::max_element(y_list.begin(), y_list.end()) * height;

        x_list.push_back(face_landmarks[152][0] + (face_landmarks[10][0] - face_landmarks[152][0]) * 1.5);
        y_list.push_back(face_landmarks[152][1] + (face_landmarks[10][1] - face_landmarks[152][1]) * 1.5);

        p.x1 = *std::min_element(x_list.begin(), x_list.end()) * width;
        p.x2 = *std::max_element(x_list.begin(), x_list.end()) * width;

        p.y1 = *std::min_element(y_list.begin(), y_list.end()) * height;
        p.y2 = *std::max_element(y_list.begin(), y_list.end()) * height;


        cv::Mat roi = image(cv::Range(max(0, min(height, p.y1_old)), max(0, min(height, p.y2_old))), cv::Range(max(0, min(width, p.x1_old)), max(0, min(width, p.x2_old))));


        // 求每一种颜色的亮度，再求平均
        cv::Scalar scalar = cv::mean(roi);
        float imgChannel1 = scalar.val[0];
        float imgChannel2 = scalar.val[1];
        float imgChannel3 = scalar.val[2];
        p.img_light = (imgChannel1 + imgChannel2 + imgChannel3) / 3;

        if (p.img_light < lightStatu) {
            cout << "亮度太低" << endl;
            p.light_status = 1;
        }

        // 求脸型宽度占比
        p.face_width_rate = pow((pow((face_landmarks[234][0] - face_landmarks[454][0]), 2) + pow((face_landmarks[234][1] - face_landmarks[454][1]), 2)), 0.5);

        if (p.face_width_rate < faceWidthLowStatu) {
            cout << "距离太远" << endl;
            p.face_distance = 1;
        }
        else if (p.face_width_rate > faceWidthHightStatu) {
            cout << "距离太近" << endl;
            p.face_distance = 2;
        }


        if ((p.x1_old + p.x2_old) < (width * 0.8)) {
            cout << "脸位置偏左" << endl;
            p.x_position = 1;

        }
        else if ((p.x1_old + p.x2_old) > (width * 0.8)) {
            cout << "脸位置偏右" << endl;
            p.x_position = 2;
        }

        if ((p.y1_old + p.y2_old) < (height * 0.9)) {
            cout << "脸位置偏上" << endl;
            p.y_position = 1;

        }
        else if ((p.x1_old + p.x2_old) > (height * 0.9)) {
            cout << "脸位置偏下" << endl;
            p.y_position = 2;
        }

        return p;
    }

    std::vector<float> detect(JNIEnv *env, cv::Mat image,jdoubleArray points,
                              int width,
                              int height
    )
    {
        cout << fixed;
        cout.precision(16);
        double face_landmarks[478][3] = {0};
        jdouble *body = env->GetDoubleArrayElements(points, 0);
        for (int i = 0; i < 478; i++){
            for (int j = 0; j < 3; j++){
                face_landmarks[i][j] = body[i*3 + j];
            }
        }
        int length = sizeof(*face_landmarks) / sizeof(face_landmarks[0]);
        DetectModel stu[length];
        memset(&stu, 0, sizeof(struct DetectModel));

        for (int i = 0; i < length; i++)
        {
            DetectModel model = {
                    .l_eye_status = get_l_eye_status(&face_landmarks[i]),
                    .r_eye_status = get_r_eye_status(&face_landmarks[i]),
                    .mouse_status = get_mouse_status(&face_landmarks[i]),
                    .head_position = get_head_info(image, &face_landmarks[i], width, height),
                    .face_direction = get_face_direction(&face_landmarks[i], width, height)};

            /*
             DetectModel *model = new DetectModel();
             model->l_eye_status = get_l_eye_status(face_landmarks[i]);
             model->r_eye_status = get_r_eye_status(face_landmarks[i]);
             model->mouse_status = get_mouse_status(face_landmarks[i]);
             model->face_landmark = process_face_landmark(face_landmarks[i], width, height);
             */

            cout << "index:\t" << i << endl;
            cout << "image size:\t(" << image.cols << "," << image.rows << ")" << endl;
            cout << "l_eye_status:\t" << model.l_eye_status << endl;
            cout << "r_eye_status:\t" << model.r_eye_status << endl;
            cout << "mouse_status:\t" << model.mouse_status << endl;
            cout << "head_position:\t" << model.head_position.x1 << endl;
            cout << "head_position:\t" << model.head_position.y1 << endl;
            cout << "face_direction:\t" << model.face_direction.hor_direct << endl;
            cout << "face_direction:\t" << model.face_direction.ver_direct << endl;
            cout << "end ......" << endl;

            stu[i] = model;
        };

        // MatrixXd process_landmark
//        static DetectModel *stu2;
//        stu2 = stu;

        std::vector<float> resultValues = {static_cast<float>(stu->l_eye_status), static_cast<float>(stu->r_eye_status),
                                           static_cast<float>(stu[0].face_direction.facePitch),static_cast<float>(stu[0].face_direction.faceYaw),static_cast<float>(stu[0].face_direction.faceRoll),
                                           static_cast<float>(stu[0].face_direction.hor_direct), static_cast<float>(stu[0].face_direction.ver_direct),
                                           static_cast<float>(stu[0].head_position.x_position), static_cast<float>(stu[0].head_position.y_position),
                                           static_cast<float>(stu[0].head_position.face_distance), static_cast<float>(stu[0].head_position.light_status),
                                           static_cast<float>(stu[0].head_position.face_width_rate),
                                           static_cast<float>(image.cols),static_cast<float>(image.rows)};
        return resultValues;
    }

    std::vector<double> detectFinish(JNIEnv *env, jdoubleArray points) {
        double face_landmarks[478][3] = {0};
        jdouble *body = env->GetDoubleArrayElements(points, 0);
        for (int i = 0; i < 478; i++){
            for (int j = 0; j < 3; j++){
                face_landmarks[i][j] = body[i*3 + j];
            }
        }
        return get_head_position2(face_landmarks,480,640);
    }

    string detect2(JNIEnv *env, jdoubleArray points)
    {
        string result = "人脸检测结果:";
        double face_landmarks[478][3] = {0};
        jdouble *body = env->GetDoubleArrayElements(points, 0);
        for (int i = 0; i < 478; i++){
            for (int j = 0; j < 3; j++){
                face_landmarks[i][j] = body[i*3 + j];
            }
        }
        int height = 480;
        int wight = 640;
        string left_eye = to_string(get_l_eye_status(face_landmarks));
        string right_eye = to_string(get_r_eye_status(face_landmarks));
        string mouse_status = to_string(get_mouse_status(face_landmarks));
        string head_position = get_head_position(face_landmarks);
        Eigen::Vector3d process_face = process_face_landmark(face_landmarks, wight, height);
        result = result.append("\n")
                .append("左眼：").append(left_eye)
                .append("\n")
                .append("右眼：").append(right_eye)
                .append("\n")
                .append("嘴部：").append(mouse_status)
                .append("\n")
                .append("头部：").append(head_position);
        return result;
    }

    std::vector<float> detect3(JNIEnv *env, jdoubleArray points,int width,int height)
    {
        string result = "人脸检测结果:";
        double face_landmarks[478][3] = {0};
        jdouble *body = env->GetDoubleArrayElements(points, 0);
        for (int i = 0; i < 478; i++){
            for (int j = 0; j < 3; j++){
                face_landmarks[i][j] = body[i*3 + j];
            }
        }
        int left_eye = get_l_eye_status(face_landmarks);
        int right_eye = get_r_eye_status(face_landmarks);
        int mouse_status = get_mouse_status(face_landmarks);
        HeadDirect face_director = get_face_direction(face_landmarks,width,height);
        string head_position = get_head_position(face_landmarks);
        Eigen::Vector3d process_face = process_face_landmark(face_landmarks, width, height);

        std::vector<double> head = get_head_position2(face_landmarks,width,height);
        std::vector<float> resultValues = {static_cast<float>(left_eye), static_cast<float>(right_eye),
                                           static_cast<float>(face_director.facePitch),static_cast<float>(face_director.faceYaw),static_cast<float>(face_director.faceRoll),
                                            static_cast<float>(head[0]), static_cast<float>(head[1]), static_cast<float>(head[2]), static_cast<float>(head[3]),
                                           static_cast<float>(head[4]), static_cast<float>(head[5]), static_cast<float>(head[6]), static_cast<float>(head[7]),
                                           static_cast<float>(head[8])};
        return resultValues;
    }

    cv::Mat convertBitmp2Mat(JNIEnv *env,jobject bitmap) {
        AndroidBitmapInfo infocolor;
        void *pixelscolor;
        int ret;

        if ((ret = AndroidBitmap_getInfo(env, bitmap, &infocolor)) < 0) {

        }

        if (infocolor.format != ANDROID_BITMAP_FORMAT_RGBA_8888) {

        }

        if ((ret = AndroidBitmap_lockPixels(env, bitmap, &pixelscolor)) < 0) {

        }
        cv::Mat test(infocolor.height, infocolor.width, CV_8UC4, (char *) pixelscolor);//BGRA
        cv::Mat bgra;

        cv::Mat bgr;

        cv::Mat rgba;
        //转换成BGRA
        //cvtColor(test,bgra,CV_RGBA2BGRA);
        //转换成BGR
        cvtColor(test, bgr, CV_RGBA2BGR);

        AndroidBitmap_unlockPixels(env, bitmap);
        return bgr;
    }

    jbyteArray convertMat2byteArray(JNIEnv *env,cv::Mat mat){
        int len=mat.cols*mat.rows*mat.channels();
        jbyteArray output=env->NewByteArray(len);
        jbyte *result= reinterpret_cast<jbyte *>(mat.data);
        env->SetByteArrayRegion(output, 0, len,result);
        ~mat;
        return output;

    }

}
