#include <jni.h>
#include <string>
#include "Interpreter.h"
#include "Result.h"
#include <vector>
#include <iostream>
#include <memory>
#include <fstream>
#include <algorithm>
#include <numeric>
#include <functional>

//输入数据标准化 Int8 -> Float32。（均值为0，标准差为1）
void normalize(std::vector<uint8_t>& input, int width, int height, int channels, float* output) {
    // 修改归一化所需的平均值和标准差
//    std::vector<double> mean = {0.485, 0.456, 0.406};   //每个通道的均值
//    std::vector<double> std = {0.229, 0.224, 0.225};    //每个通道的标准差

    std::vector<double> mean = {103.94f, 116.78f, 123.68f};   //每个通道的均值
    std::vector<double> std = {0.017f, 0.017f, 0.017f};    //每个通道的标准差


    for (int y = 0; y < height; ++y) {
        for (int x = 0; x < width; ++x) {
            for (int c = 0; c < channels; ++c) {
                int index = (y *  width + x) * channels + c;
                // 将像素值归一化到 [0, 1] 范围，然后减去平均值并除以标准差
//                output[index] = (input[index] / 255.0 - mean[c]) / std[c];
                output[index] = (input[index] - mean[c]) * std[c];
            }
        }
    }
}

//从Int32 Color数据解析获取RGB Int8数据
void color2RGB(int32_t * pColor, int nW, int nH, int ch,  std::vector<uint8_t>& vtData) {
    vtData.resize(nW * nH * ch);
    for (int i = 0; i < 3; ++i) {
        for (int y = 0; y < nH; ++y) {
            for (int x = 0; x < nW; ++x) {
                int c = y * nW + x;
                int n = i  * nW * nH + c;
                int color = pColor[c];
                int p = (2-i) * 8;
                vtData[n] = (color >> p) & 0xFF; //r
            }
        }
    }
}

//输入数据预处理
void prepareInputData(int* pImgData, int nImgArrayLen, std::vector<size_t> vtShape,  std::vector<float>& vRealInput) {
    int nEleNum = std::accumulate(vtShape.begin(), vtShape.end(), 1, std::multiplies<int>());
    int nC = vtShape[1];    //3
    int nH = vtShape[2];    //224
    int nW = vtShape[3];    //224
    if (vtShape.size() != 4 || nEleNum != nImgArrayLen * nC) {
        printf("error, nEleNum=%d, nColorNum=%d", nEleNum, nImgArrayLen);
        return;
    }

    std::vector<uint8_t> vtData(nW * nH * nC);      //[3*224*224]
    color2RGB(pImgData, nW, nH, nC, vtData);
    vRealInput.resize(nW * nH * nC);
    normalize(vtData, nW, nH, nC, &vRealInput[0]);
}


extern "C"
JNIEXPORT jlong JNICALL
Java_com_example_mobilenet_InterpreterJni_crateInterpreter(JNIEnv *env, jobject thiz, jint framework,
                                                           jstring lib_path, jstring model_path) {
    //推理引擎类型(例如Mnn/Onnx)
    TNet::FrameworkTypeTag type = static_cast<TNet::FrameworkTypeTag>(framework);
    const char* pszLibPath = nullptr;
    const char* pszModelPath = nullptr;
    if (lib_path != nullptr) {
        pszLibPath = env->GetStringUTFChars(lib_path, nullptr);
    }
    if (model_path != nullptr) {
        pszModelPath = env->GetStringUTFChars(model_path, nullptr);
    }
    if(pszLibPath == nullptr || pszModelPath == nullptr) {
        return 0;
    }

    long createResult = 0;
   //推理引擎配置参数
   TNet::Option option;
   option.frameworkTypeTag = type;
   option.modelPath = pszModelPath;
   option.libraryPath = pszLibPath;
   //创建TNet统一推理引擎
   TNet::Interpreter* pInterpreter = new TNet::Interpreter();
   auto ret = pInterpreter->init(option);
   createResult = reinterpret_cast<jlong>(pInterpreter);

    return createResult;
}

extern "C"
JNIEXPORT jstring JNICALL
Java_com_example_mobilenet_InterpreterJni_run(JNIEnv *env, jobject thiz, jlong inst,
                                              jintArray image_data,
                                              jfloatArray result_array) {
    auto pInterpreter = (TNet::Interpreter*)inst;
    if (pInterpreter == nullptr) {
        return env->NewStringUTF("Interpreter is null");
    }
    jint* pImgData = env->GetIntArrayElements(image_data, nullptr);
    if (pImgData == nullptr) {
        return env->NewStringUTF("pImgData is null");
    }
    jfloat *pOutput = env->GetFloatArrayElements(result_array, nullptr);
    if (pOutput == nullptr) {
        return env->NewStringUTF("pOutput is null");
    }

    //获取模型inputTensor
    auto inputTensor = pInterpreter->inputTensor();
    //获取模型outputTensor
    auto outputTensor = pInterpreter->outputTensor();

    /* --------------------------填充模型推理所需输入数据-------------------------------- */
    auto inputTensorShape = inputTensor.shape();    //[1, 3, 224, 224]
    int nInputEleNum = std::accumulate(inputTensorShape.begin(), inputTensorShape.end(), 1, std::multiplies<int>());    //[3*224*224]
    std::vector<float> vNormalizeInputData;//标准化后的推理输入数据
    int nImgArrayLen =  env->GetArrayLength(image_data);    //[224*224]
   prepareInputData(pImgData, nImgArrayLen, inputTensorShape, vNormalizeInputData);

    if (vNormalizeInputData.empty()) {
        return env->NewStringUTF("vNormalizeInputData is empty");
    }
    auto outputTenserShape = outputTensor.shape();  //[1, 1000]
    int nOutArrayLen =  env->GetArrayLength(result_array);   //1000
    int nOutEleNum = std::accumulate(outputTenserShape.begin(), outputTenserShape.end(), 1, std::multiplies<int>());
    if (nOutArrayLen != nOutEleNum) {
        return env->NewStringUTF("nOutArrayLen != nOutEleNum");
    }
    std::vector<float> vOutData(nOutEleNum);
    TNet::Base::Result forwardResult = TNet::Base::Result::fail("notForward");

    /* --------------------------模型推理运行-------------------------------- */
   inputTensor.copyToTensor(vNormalizeInputData.data(), nInputEleNum * sizeof(float));
   forwardResult = pInterpreter->forward();
   outputTensor.copyFromTensor(vOutData.data(), nOutEleNum * sizeof(float));

    if (!forwardResult && forwardResult.getErrorMessage() == "notForward") {
        return env->NewStringUTF("not run forward");
    }
    if (forwardResult) {
        //推理forward成功
        env->SetFloatArrayRegion(result_array, 0, vOutData.size(),&vOutData[0]);
        return env->NewStringUTF("success");
    } else {
        //推理forward失败
        return env->NewStringUTF(forwardResult.getErrorMessage().data());
    }
}

extern "C"
JNIEXPORT jintArray JNICALL
Java_com_example_mobilenet_InterpreterJni_getInputDataShape(JNIEnv *env, jobject thiz, jlong inst) {
    auto pInterpreter = (TNet::Interpreter* )inst;
    if (!pInterpreter) {
        return nullptr;
    }
    //模型输入InputTensor张量shape
    auto inputShape = pInterpreter->inputTensor().shape();
    jintArray result = env->NewIntArray(inputShape.size());
    std::vector<int> vtShape;
    for (auto v : inputShape) {
        vtShape.push_back(v);
    }
    env->SetIntArrayRegion(result, 0, (int)vtShape.size(), vtShape.data());
    return result;
}


extern "C"
JNIEXPORT jintArray JNICALL
Java_com_example_mobilenet_InterpreterJni_getOutputDataShape(JNIEnv *env, jobject thiz, jlong inst) {
    auto pInterpreter = (TNet::Interpreter* )inst;
    if (!pInterpreter) {
        return nullptr;
    }
    //模型输出OutputTensor张量shape
    auto outputShape = pInterpreter->outputTensor().shape();
    std::vector<int> vtShape;
    for (auto v : outputShape) {
        vtShape.push_back(v);
    }
    jintArray result = env->NewIntArray(vtShape.size());
    env->SetIntArrayRegion(result, 0, (int)vtShape.size(), vtShape.data());
    return result;
}


extern "C"
JNIEXPORT void JNICALL
Java_com_example_mobilenet_InterpreterJni_destroyInterpreter(JNIEnv *env, jobject thiz, jlong inst) {
    auto pInterpreter = (TNet::Interpreter* )inst;
    if (!pInterpreter) {
        delete pInterpreter;
    }
}