#include <jni.h>
#include <string>
#include <opencv2/opencv.hpp>

#include <sys/stat.h>
#include <android/log.h>
#include <android/native_window_jni.h>

using namespace cv;

#define LOG_TIME  LOGE
#define LOGE(FORMAT,...) __android_log_print(ANDROID_LOG_ERROR,"hugang",FORMAT,##__VA_ARGS__);


pthread_mutex_t mutex;
DetectionBasedTracker *tracker = NULL;
int index = 0;
ANativeWindow *window = NULL;

// 使用C++提供的计时方法
#define millisecond 1000000
#define RUN_TIME(time)  (double)(time).count()/millisecond
auto TIME0 = std::chrono::high_resolution_clock::now();
auto TIME1 = std::chrono::high_resolution_clock::now();
// 使用OpenCV提供的计时方法
cv::TickMeter tm;

// 适配器
class CascadeDetectorAdapter : public DetectionBasedTracker::IDetector{

public:
    CascadeDetectorAdapter(cv::Ptr<cv::CascadeClassifier> detector): IDetector(), Detector(detector){

    }

    void detect(const cv::Mat &image, std::vector<cv::Rect> &object){
        Detector->detectMultiScale(image, object, scaleFactor, minNeighbours, 0, minObjSize, maxObjSize);
    }

private:
    CascadeDetectorAdapter();
    cv::Ptr<cv::CascadeClassifier> Detector;
};

extern "C"
JNIEXPORT void JNICALL
Java_com_example_myopencv_MainActivity_init(JNIEnv *env, jobject thiz, jstring model_) {
    pthread_mutex_init(&mutex, 0);
    if (tracker) {
        tracker->stop();
        delete tracker;
        tracker = 0;
    }
    const char *model = env->GetStringUTFChars(model_, 0);
    LOGE("============ model: %s ============", model);
//    new 一个分类器
//    CascadeClassifier *cascadeClassifier= new CascadeClassifier();

    // 创建级联分类器
    Ptr<CascadeClassifier> classifier = makePtr<CascadeClassifier>(model);
    // 创建检测器，上面创建的级联分类器用于初始化检测器
    Ptr<CascadeDetectorAdapter> mainDetector = makePtr<CascadeDetectorAdapter>(classifier);

    // 创建级联分类器
    Ptr<CascadeClassifier> classifier_1 = makePtr<CascadeClassifier>(model);
    // 创建跟踪器，上面创建的级联分类器用于初始化跟踪器
    Ptr<CascadeDetectorAdapter> trackingDetector = makePtr<CascadeDetectorAdapter>(classifier_1);

    DetectionBasedTracker::Parameters params;
    // 创建跟踪器，由DetectionBasedTracker的构造函数可知：第一个参数为检测器，第二个参数为分类器
    tracker = new DetectionBasedTracker(mainDetector, trackingDetector, params);

    tracker->run();
    env->ReleaseStringUTFChars(model_, model);
}

extern "C"
JNIEXPORT void JNICALL
Java_com_example_myopencv_MainActivity_setSurface(JNIEnv *env, jobject thiz, jobject surface) {

    // 如果window已经存在，就释放掉
    if (window){
        ANativeWindow_release(window);
        window = NULL;
    }

    window = ANativeWindow_fromSurface(env, surface);
}


extern "C"
JNIEXPORT void JNICALL
Java_com_example_myopencv_MainActivity_postData(JNIEnv *env, jobject thiz, jbyteArray data_, jint w,
                                                jint h, jint camera_id) {

    jbyte *data = env->GetByteArrayElements(data_, NULL);

    // 创建一张图片
    // 图片高度为 h + h / 2 原因是摄像头的数据为NV21，是YVU格式的，YUV格式的图像的高度就是 h + h / 2
    Mat src(h + h / 2, w, CV_8UC1, data);

    TIME0 = std::chrono::high_resolution_clock::now();
    tm.start();

    // 将图像格式从 NV21 转换为 RGBA
    // 这里利用 OpenCV 进行格式转换的效率可能比较低，可以尝试使用 libYUV ，libYUV 使用了 neon 汇编，处理像素会很快
    cvtColor(src, src, COLOR_YUV2RGBA_NV21);
    tm.stop();
    LOGE("=================  cvtColor  opencv time: %lf", tm.getTimeMilli());
    TIME1= std::chrono::high_resolution_clock::now();
    LOG_TIME("================= cvtColor c++ time: %4.4fms =================" , RUN_TIME(TIME1 - TIME0));

    if (camera_id == 1) {
        // camera_id == 1 说明是前置摄像头
        // 需要将图片旋转 90° ， 然后再进行 镜像 处理
        // 可以将下面两个函数随便注释掉一个或者两个，对比观察最后生成的图片有什么区别

        tm.reset();//使用OpenCV的TickMeter计时器，多次使用的话，使用前需要reset
        tm.start();
        TIME0 = std::chrono::high_resolution_clock::now();
        // 将图片旋转 90°
        rotate(src, src, ROTATE_90_COUNTERCLOCKWISE);
        tm.stop();
        LOGE("================= rotate  opencv time: %lf", tm.getTimeMilli());
        TIME1= std::chrono::high_resolution_clock::now();
        LOG_TIME("================= rotate c++ time: %4.4fms =================" , RUN_TIME(TIME1 - TIME0));

        TIME0 = std::chrono::high_resolution_clock::now();
        tm.reset();
        tm.start();

        // 镜像处理
        flip(src, src, 1);
        tm.stop();
        LOGE("================= flip  opencv time: %lf", tm.getTimeMilli());
        TIME1 = std::chrono::high_resolution_clock::now();
        LOG_TIME("================= flip c++ time: %4.4fms =================" , RUN_TIME(TIME1 - TIME0));
    } else {
        // 后置摄像头，需要顺时针旋转 90°
        rotate(src, src, ROTATE_90_CLOCKWISE);
    }

    // 将图像转换为灰度图
    Mat gray_img;

    TIME0 = std::chrono::high_resolution_clock::now();
    tm.reset();
    tm.start();
    cvtColor(src, gray_img, COLOR_RGBA2GRAY);
    tm.stop();
    LOGE("================= cvtColor  opencv time: %lf", tm.getTimeMilli());
    TIME1= std::chrono::high_resolution_clock::now();
    LOG_TIME("=================  cvtColor : %4.4fms =================" , RUN_TIME(TIME1 - TIME0));

    TIME0 = std::chrono::high_resolution_clock::now();
    tm.reset();
    tm.start();
    // 用于增强灰度图的对比度（内部采用了直方图均衡化的方式）
    equalizeHist(gray_img, gray_img);
    tm.stop();
    LOGE("=================  equalizeHist  opencv time: %lf", tm.getTimeMilli());
    TIME1 = std::chrono::high_resolution_clock::now();
    LOG_TIME("=================  equalizeHist c++ time: %4.4fms =================" , RUN_TIME(TIME1 - TIME0));

    char picture_name[100];

    // 创建文件夹，用于存放图片数据
    mkdir("/data/data/com.example.myopencv/cache/test/", 0777);

    // 将 "/data/data/com.example.myopencv/cache/test/%d.png" 赋值给 picture_name
    sprintf(picture_name, "/data/data/com.example.myopencv/cache/test/%d.png", index++);
    // 将 转换格式后的图片写入到 /sdcard/hugang/ 这个路径下
    imwrite(picture_name, gray_img);

    // 将灰度图传给跟踪器
    tracker->process(gray_img);
    std::vector<Rect> faces;
    tracker->getObjects(faces);
    LOGE("========= faces size : %d =========", faces.size());
    // 创建文件夹，用于存放识别出的人脸的图片
    mkdir("/data/data/com.example.myopencv/cache/face_img_24x24/", 0777);

    TIME0 = std::chrono::high_resolution_clock::now();
    tm.reset();
    tm.start();
    for (Rect face : faces) {
        //要将下面几行注释掉，否则预览画面只会停在显示一帧
//        sprintf(picture_name, "/data/data/com.example.myopencv/cache/face_img_24x24/%d.png", index++);
//        Mat face_rect;
//        face_rect = gray_img(face).clone();
//        resize(face_rect, face_rect, Size(24, 24));
//        imwrite(picture_name, face_rect);

        // 在原始的预览画面中画人脸区域的框框
        rectangle(src, face,Scalar(0, 0, 255));
    }
    tm.stop();
    LOGE("=================  rectangle  opencv time: %lf", tm.getTimeMilli());
    TIME1 = std::chrono::high_resolution_clock::now();
    LOG_TIME("=================  rectangle c++ time: %4.4fms =================" , RUN_TIME(TIME1 - TIME0));

    // 将人脸渲染到预览画面
    if (window){
        ANativeWindow_setBuffersGeometry(window, src.cols, src.rows, WINDOW_FORMAT_RGBA_8888);
        ANativeWindow_Buffer buffer;

        if (ANativeWindow_lock(window, &buffer, 0)){
            ANativeWindow_release(window);
            window = NULL;
        }

        int srcLineSize = src.cols * 4;
        int dstLineSize = buffer.stride * 4;
        uint8_t *dstData = static_cast<uint8_t *> (buffer.bits);
        uint8_t *srcData = src.data;

        int count = 0;
        TIME0 = std::chrono::high_resolution_clock::now();
        tm.reset();
        tm.start();

        for (int i = 0; i < buffer.height; ++i) {
            count += srcLineSize;
            memcpy(dstData + dstLineSize * i, srcData + srcLineSize * i, srcLineSize);
        }
        tm.stop();
        LOGE("=================   memcpy  opencv time: %lf, count = %d", tm.getTimeMilli(), count);
        TIME1 = std::chrono::high_resolution_clock::now();
        LOG_TIME("=================  memcpy c++ time: %4.4fms =================" , RUN_TIME(TIME1 - TIME0));

        ANativeWindow_unlockAndPost(window);
    }

    src.release();
    gray_img.release();
    env->ReleaseByteArrayElements(data_, data, 0);
}

