#include "napi/native_api.h"

#include "common/plugin_common.h"
#include <algorithm>
#include <cstdint>
#include <cstdlib>
#include <cstring>
#include <js_native_api.h>
#include <js_native_api_types.h>
#include <ostream>
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <iostream>

#include <string>
#include <sys/stat.h>
#include <unistd.h>
#include <fstream>
#include <iostream>
#include <map>
#include <memory>
#include <set>
#include <unordered_map>
#include <unordered_set>
#include <utility>
#include <vector>

#include "mindspore/context.h"
#include "mindspore/data_type.h"
#include "mindspore/format.h"
#include "mindspore/model.h"
#include "mindspore/status.h"
#include "mindspore/tensor.h"
#include "mindspore/types.h"

#include "include/api/model.h"
#include "include/api/context.h"
#include "include/api/types.h"
#include "include/api/serialization.h"
#include "include/dataset/vision_lite.h"
#include "include/dataset/execute.h"
#include "include/dataset/transforms.h"

#include "api/types.h"
#include "dataset/lite_cv/lite_mat.h"
#include "dataset/lite_cv/image_process.h"
#include "dataset/vision_lite.h"
#include "dataset/execute.h"

#include "Detection.h"
#include "hello.h"

using mindspore::Context;
using mindspore::GraphCell;
using mindspore::kSuccess;
using mindspore::Model;
using mindspore::ModelType;
using mindspore::MSTensor;
using mindspore::Serialization;
using mindspore::Status;
using mindspore::dataset::Execute;
using mindspore::dataset::LDataType;
using mindspore::dataset::LiteMat;
using mindspore::dataset::PaddBorderType;
using mindspore::dataset::TensorTransform;
using mindspore::dataset::vision::Crop;
using mindspore::dataset::vision::Decode;
using mindspore::dataset::vision::Normalize;
using mindspore::dataset::vision::Resize;

using namespace mindspore;
using namespace std;

static napi_value Add(napi_env env, napi_callback_info info)
{
    LOGE("mindsporeTag naive Add");
    size_t requireArgc = 2;
    size_t argc = 2;
    napi_value args[2] = {nullptr};

    napi_get_cb_info(env, info, &argc, args , nullptr, nullptr);

    napi_valuetype valuetype0;
    napi_typeof(env, args[0], &valuetype0);

    napi_valuetype valuetype1;
    napi_typeof(env, args[1], &valuetype1);

    double value0;
    napi_get_value_double(env, args[0], &value0);

    double value1;
    napi_get_value_double(env, args[1], &value1);

    napi_value sum;
    napi_create_double(env, value0 + value1, &sum);

    return sum;
}

static napi_value InitClassificationModel(napi_env env, napi_callback_info info){
    LOGI("mindsporeTag Native InitClassificationModel");
    size_t requireArgc = 3;
    size_t argc = 3;
    napi_value args[3] = {nullptr};
    
    napi_get_cb_info(env, info, &argc, args, nullptr, nullptr);
    
    napi_valuetype valuetype0;
    napi_typeof(env, args[0], &valuetype0);
    
    napi_valuetype valuetype1;
    napi_typeof(env, args[0], &valuetype1);

    int value1;
    napi_get_value_int32(env, args[1], &value1);
    
    size_t ret = 0;
    void *buffer_tmp = (void *)malloc(value1);
    void *buffer_ptr = nullptr;
    napi_get_arraybuffer_info(env, args[0], &buffer_ptr, &ret);
    memcpy(buffer_tmp, buffer_ptr, value1);
    LOGE("mindsporeTag Native buffer_ptr>> %{public}u", buffer_ptr);
    LOGE("mindsporeTag Native value1>> %{public}d", value1);
    
    LOGE("mindsporeTag Native ret length>> %{public}u", ret);
    
    napi_valuetype valuetype2;
    napi_typeof(env, args[2], &valuetype0);
    
    char *imagePath = (char *)malloc(64);
    size_t typeLen = 0;
    napi_get_value_string_utf8(env, args[2], imagePath, 64, &typeLen);
    LOGI("mindsporeTag Native InitImage length>> %{public}s", imagePath);
    
    ifstream ifs(imagePath);
    if (!ifs.is_open() || !ifs.good()) {
        LOGE("mindsporeTag Native fail to load image,check image path");
    }
    
    ifs.seekg(0, ios::end);
    size_t size = ifs.tellg();
    LOGI("mindsporeTag Native size_t size %{public}d ",size);
    
    
    mindspore::MSTensor *image = MSTensor::CreateTensor("file", mindspore::DataType::kNumberTypeUInt8, {static_cast<int64_t>(size)}, nullptr, 0);
    if(image == nullptr) {
        LOGE("mindsporeTag Native image is nullptr");
    }
    ifs.seekg(0,ios::beg);
    ifs.read(reinterpret_cast<char *>(image->MutableData()), size);
    ifs.close();
    
    shared_ptr<TensorTransform> decode = make_shared<Decode>();
    shared_ptr<TensorTransform> resize = make_shared<Resize>(vector<int>{256,256});
    shared_ptr<TensorTransform> scale = make_shared<Normalize>(vector<float>{0,0,0}, vector<float>{255,255,255});
    shared_ptr<TensorTransform> crop = make_shared<Crop>(vector<int>{16,16}, vector<int>{224,224});
    shared_ptr<TensorTransform> normalize = make_shared<Normalize>(
        vector<float>{0.485,0.456,0.406}, vector<float>{0.229,0.224,0.225});
    
    vector<shared_ptr<TensorTransform>> trans_list;
    trans_list = {decode, resize,scale, crop, normalize};
    auto executor = Execute(trans_list);
    executor(*image, image);
    
    // Create and init context, add CPUdevice info
    OH_AI_ContextHandle context = OH_AI_ContextCreate();
    if (context == NULL) {
        LOGE("mindsporeTag Native ContextCreate FAILED");
    } else {
        LOGI("mindsporeTag Native ContextCreate SUCCESS");
    }
    const int thread_num = 2;
    OH_AI_ContextSetThreadNum(context, thread_num);
    OH_AI_ContextSetThreadAffinityMode(context, 1);
    
    OH_AI_DeviceInfoHandle cpu_device_info = OH_AI_DeviceInfoCreate(OH_AI_DEVICETYPE_CPU);
    if(cpu_device_info == NULL) {
        LOGE("mindsporeTag Native OH_AI_DeviceInfoCreate failed");
        OH_AI_ContextDestroy(&context);
    } else {
        LOGI("mindsporeTag Native OH_AI_DeviceInfoCreate success");
    }
    OH_AI_DeviceInfoSetEnableFP16(cpu_device_info, false);
    OH_AI_ContextAddDeviceInfo(context, cpu_device_info);
    
    // Create model
    OH_AI_ModelHandle model = OH_AI_ModelCreate();
    if (model == NULL) {
        LOGE("mindsporeTag Native OH_AI_ModelCreate fail");
        OH_AI_ContextDestroy(&context);
    } else {
        LOGI("mindsporeTag Native OH_AI_ModelCreate success");
    }
    // Build mode 
    int ret2 = OH_AI_ModelBuild(model, buffer_tmp, ret, OH_AI_MODELTYPE_MINDIR, context);
    if (ret2 != OH_AI_STATUS_SUCCESS) {
        LOGE("mindsporeTag Native OH_AI_ModelBuild fail");
        OH_AI_ModelDestroy(&model);
    } else {
        LOGI("mindsporeTag Native OH_AI_ModelBuild success");
    }
    // Get Inputs 
    OH_AI_TensorHandleArray inputs = OH_AI_ModelGetInputs(model);
    if (inputs.handle_list == NULL) {
        LOGE("mindsporeTag Native OH_AI_ModelGetInputs fail, inputs.handle_num : %{public}d", inputs.handle_num);
        OH_AI_ModelDestroy(&model);
    } else {
        LOGI("mindsporeTag Native OH_AI_ModelGetInputs success, inputs.handle_num : %{public}d", inputs.handle_num);
    }
    
    OH_AI_TensorSetData(inputs.handle_list[0], image->MutableData());
    OH_AI_TensorSetDataType(inputs.handle_list[0], OH_AI_DATATYPE_NUMBERTYPE_FLOAT32);
    OH_AI_TensorSetName(inputs.handle_list[0], image->Name().c_str());
    OH_AI_TensorSetFormat(inputs.handle_list[0], OH_AI_FORMAT_NCHW);
    
    // Get Outputs
    OH_AI_TensorHandleArray outputs = OH_AI_ModelGetOutputs(model);
    if (outputs.handle_list == NULL) {
        LOGE("mindsporeTag Native OH_AI_ModelGetOutputs fail, ret : %{public}u", outputs.handle_num);
        OH_AI_ModelDestroy(&model);
    } else {
        LOGI("mindsporeTag Native OH_AI_ModelGetOutputs success");
    }
    
    // Model Predict 
    OH_AI_Status predict_ret = OH_AI_ModelPredict(model, inputs, &outputs, NULL, NULL);
    if (predict_ret != OH_AI_STATUS_SUCCESS) {
        LOGE("mindsporeTag Native OH_AI_ModelPredict fail");
        OH_AI_ModelDestroy(&model);
    } else {
        LOGI("mindsporeTag Native OH_AI_ModelPredict success");
    }
    
    // 获取模型的输出张量，并打印
    OH_AI_TensorHandle tensor = outputs.handle_list[0];
    int64_t element_num = OH_AI_TensorGetElementNum(tensor);
    LOGE("mindsporeTag Native OH_Tensor name: %{public}s, tensor size is %{public}u ,elements num: %{public}d", 
        OH_AI_TensorGetName(tensor), OH_AI_TensorGetDataSize(tensor), element_num);
    const float *data = (const float *) OH_AI_TensorGetData(tensor);
    
    float scores[hello::RET_CATEGORY_SUM];
    for (int i = 0;i < hello::RET_CATEGORY_SUM; ++i) {
        scores[i] = data[i];
    }
    
    const float unifiedThre = 0.5;
    const float probMax = 1.0;
    
    map<string, float> my_map;
    for(int i = 0; i < hello::RET_CATEGORY_SUM; ++i) {
        float threshold = hello::g_thres_map[i];
        float tmpProb = scores[i];
        if(tmpProb < threshold) {
            tmpProb = tmpProb / threshold * unifiedThre;
        } else {
            tmpProb = (tmpProb - threshold) / (probMax - threshold) * unifiedThre + unifiedThre;
        }
        scores[i] = tmpProb;
        my_map.insert(pair<string, float>(hello::category[i], scores[i]));
    }
    
    // Score for each category
    // Converted to text information that needs to be displayed in the APP
    string categoryScore = "";
    for(int i = 0; i < hello::RET_CATEGORY_SUM; i++) {
        categoryScore += to_string(i);
        categoryScore += ":";
        string score_str = to_string(scores[i]);
        categoryScore += score_str;
        categoryScore += ";";
    }
    LOGI("mindsporeTag Native categoryScore : %{public}s", categoryScore.c_str());
  
    const char *resultCharData= categoryScore.c_str();
    napi_value result;
    napi_create_string_utf8(env, resultCharData, categoryScore.length(), &result);
    
    OH_AI_ModelDestroy(&model);
    
    return result;
}

static napi_value InitDetectionModel(napi_env env, napi_callback_info info){
    LOGI("mindsporeTag Native InitDetectionModel");
    size_t requireArgc = 3;
    size_t argc = 3;
    napi_value args[3] = {nullptr};
    
    napi_get_cb_info(env, info, &argc, args, nullptr, nullptr);
    
    napi_valuetype valuetype0;
    napi_typeof(env, args[0], &valuetype0);
    
    napi_valuetype valuetype1;
    napi_typeof(env, args[0], &valuetype1);

    int value1;
    napi_get_value_int32(env, args[1], &value1);
    
    size_t ret = 0;
    void *buffer_tmp = (void *)malloc(value1);
    napi_get_arraybuffer_info(env, args[0], &buffer_tmp, &ret);
    
    LOGI("mindsporeTag Native ret length>> %{public}u", ret);
    
    napi_valuetype valuetype2;
    napi_typeof(env, args[2], &valuetype0);
    
    char *imagePath = (char *)malloc(64);
    size_t typeLen = 0;
    napi_get_value_string_utf8(env, args[2], imagePath, 64, &typeLen);
    
    ifstream ifs(imagePath);
    if (!ifs.is_open() || !ifs.good()) {
        LOGE("mindsporeTag Native fail to load image,check image path");
    }
    
    LOGI("mindsporeTag Native ifstream success ");
    
    ifs.seekg(0, ios::end);
    size_t size = ifs.tellg();
    LOGI("mindsporeTag Native size_t size %{public}d ",size);
    
    mindspore::MSTensor image("file", mindspore::DataType::kNumberTypeUInt8, {static_cast<int64_t>(size)}, nullptr, 0);
    
    if(image == nullptr) {
        LOGE("mindsporeTag Native image is nullptr");
    }
    
    ifs.seekg(0,ios::beg);
    ifs.read(reinterpret_cast<char *>(image.MutableData()), size);
    ifs.close();
    
    auto decode = Decode();
    auto executor = Execute(decode);
    executor(image, &image);
    
    int imgWidth = image.Shape()[0];
    int imgHeight = image.Shape()[1];
    
    uint8_t *temp_scores = static_cast<uint8_t *>(image.MutableData());
    LOGI("mindsporeTag Native image.size(): %{public}d image[0]>>>%{public}d, 1>>>%{public}d,2>>>%{public}d,3>>>%{public}d,4>>>%{public}d, ",
        image.DataSize(), temp_scores[0], temp_scores[1], temp_scores[2], temp_scores[3], temp_scores[4]);
    
    shared_ptr<TensorTransform> resize = make_shared<Resize>(vector<int>{300,300});
    shared_ptr<TensorTransform> scale = make_shared<Normalize>(vector<float>{0,0,0},
                                                            vector<float>{255,255,255});
    shared_ptr<TensorTransform> normalize = make_shared<Normalize>(
        vector<float>{0.485 * 0.229,0.456 *0.224,0.406 *0.225}, vector<float>{0.229,0.224,0.225});
    
    uint8_t *temp_scores2 = static_cast<uint8_t *>(image.MutableData());
    
    mindspore::dataset::Execute ss2({resize, scale, normalize});
    auto ret4 = ss2(image, &image);
    float *temp_scores4 = static_cast<float *>(image.MutableData());
    LOGI("mindsporeTag Native image.size(): %{public}d image[0]>>>%{public}d, 1>>>%{public}d,2>>>%{public}d,3>>>%{public}d,4>>>%{public}d, ",
            image.DataSize(), temp_scores4[0], temp_scores4[1], temp_scores4[2], temp_scores4[3], temp_scores4[4]);
    
    // Create and init context, add CPU device info
    OH_AI_ContextHandle context = OH_AI_ContextCreate();
    if (context == NULL) {
        LOGE("mindsporeTag Native ContextCreate FAILED");
    } else {
        LOGI("mindsporeTag Native ContextCreate SUCCESS");
    }
    const int thread_num = 2;
    OH_AI_ContextSetThreadNum(context, thread_num);
    OH_AI_ContextSetThreadAffinityMode(context, 1);
    
    OH_AI_DeviceInfoHandle cpu_device_info = OH_AI_DeviceInfoCreate(OH_AI_DEVICETYPE_CPU);
    if(cpu_device_info == NULL) {
        LOGE("mindsporeTag Native OH_AI_DeviceInfoCreate failed");
        OH_AI_ContextDestroy(&context);
    } else {
        LOGI("mindsporeTag Native OH_AI_DeviceInfoCreate success");
    }
    OH_AI_DeviceInfoSetEnableFP16(cpu_device_info, false);
    OH_AI_ContextAddDeviceInfo(context, cpu_device_info);
    
    // Create model
    OH_AI_ModelHandle model = OH_AI_ModelCreate();
    if (model == NULL) {
        LOGE("mindsporeTag Native OH_AI_ModelCreate fail");
        OH_AI_ContextDestroy(&context);
    } else {
        LOGI("mindsporeTag Native OH_AI_ModelCreate success");
    }
    // Build mode 
    int ret2 = OH_AI_ModelBuild(model, buffer_tmp, ret, OH_AI_MODELTYPE_MINDIR, context);
    if (ret2 != OH_AI_STATUS_SUCCESS) {
        LOGE("mindsporeTag Native OH_AI_ModelBuild fail");
        OH_AI_ModelDestroy(&model);
    } else {
        LOGI("mindsporeTag Native OH_AI_ModelBuild success");
    }
    // Get Inputs 
    OH_AI_TensorHandleArray inputs = OH_AI_ModelGetInputs(model);
    if (inputs.handle_list == NULL) {
        LOGE("mindsporeTag Native OH_AI_ModelGetInputs fail, inputs.handle_num : %{public}d", inputs.handle_num);
        OH_AI_ModelDestroy(&model);
    } else {
        LOGI("mindsporeTag Native OH_AI_ModelGetInputs success, inputs.handle_num : %{public}d", inputs.handle_num);
    }
    
    OH_AI_TensorSetData(inputs.handle_list[0], image.MutableData());
    OH_AI_TensorSetDataType(inputs.handle_list[0], OH_AI_DATATYPE_NUMBERTYPE_FLOAT32);
    OH_AI_TensorSetName(inputs.handle_list[0], image.Name().c_str());
    OH_AI_TensorSetFormat(inputs.handle_list[0], OH_AI_FORMAT_NCHW);
    
    // Get Outputs 
    OH_AI_TensorHandleArray outputs = OH_AI_ModelGetOutputs(model);
    if (outputs.handle_list == NULL) {
        LOGE("mindsporeTag Native OH_AI_ModelGetOutputs fail, ret : %{public}u", outputs.handle_num);
        OH_AI_ModelDestroy(&model);
    } else {
        LOGI("mindsporeTag Native OH_AI_ModelGetOutputs success");
    }
    
    // Model Predict 
    OH_AI_Status predict_ret = OH_AI_ModelPredict(model, inputs, &outputs, NULL, NULL);
    if (predict_ret != OH_AI_STATUS_SUCCESS) {
        LOGE("mindsporeTag Native OH_AI_ModelPredict fail");
        OH_AI_ModelDestroy(&model);
    } else {
        LOGI("mindsporeTag Native OH_AI_ModelPredict success");
    }
    
    unordered_map<string, OH_AI_TensorHandle> msOutputs;
    for (size_t i = 0; i < outputs.handle_num; i++) {
        
        OH_AI_TensorHandle tensor = outputs.handle_list[i];
        int64_t element_num = OH_AI_TensorGetElementNum(tensor);
        const float *data = (const float *)OH_AI_TensorGetData(tensor);
        string name = OH_AI_TensorGetName(tensor);
        float *ptr = static_cast<float *>(OH_AI_TensorGetMutableData(tensor));
        msOutputs.insert(pair<string, OH_AI_TensorHandle>{name, tensor});
    }

    unordered_map<string, OH_AI_TensorHandle>::iterator iter;
    iter = msOutputs.begin();
    auto branch2_string = iter->first;
    auto branch2_tensor = iter->second;
    
    ++iter;
    auto branch1_string = iter->first;
    auto branch1_tensor = iter->second;
    LOGI("mindsporeTag Native Process Runnet Result %{public}s, Process Runnet Result %{public}s", branch1_string.c_str(), branch2_string.c_str());
    
    float *tmpscores2 = reinterpret_cast<float *>(OH_AI_TensorGetMutableData(branch1_tensor));
    float *tmpdata = reinterpret_cast<float *>(OH_AI_TensorGetMutableData(branch2_tensor));
    LOGI("mindsporeTag Native  Result %{public}f, Result %{public}f", *tmpscores2, *tmpdata);

    // Using ssd model util to process model branch outputs
    Detection Detection(500,500);
    LOGI("mindsporeTag Native  Result %{public}f, Result %{public}f", *tmpscores2, *tmpdata);

    string retStr = Detection.getDecodeResult(tmpdata,tmpscores2);
    LOGI("mindsporeTag Native  Process Runnet Result retStr %{public}s", retStr.c_str());

    free(buffer_tmp);
    const char *resultCharData = retStr.c_str();
    napi_value result;
    napi_create_string_utf8(env, resultCharData, retStr.length(), &result);
    return result;
}

EXTERN_C_START
static napi_value Init(napi_env env, napi_value exports){
    napi_property_descriptor desc[] = {
        { "add", nullptr, Add, nullptr, nullptr, nullptr, napi_default, nullptr },
        { "initClassificationModel", nullptr, InitClassificationModel, nullptr, nullptr, nullptr, napi_default, nullptr },
        { "initDetectionModel", nullptr, InitDetectionModel, nullptr, nullptr, nullptr, napi_default, nullptr },};
    napi_define_properties(env, exports, sizeof(desc) / sizeof(desc[0]), desc);
    return exports;
}
EXTERN_C_END

static napi_module demoModule = {
    .nm_version =1,
    .nm_flags = 0,
    .nm_filename = nullptr,
    .nm_register_func = Init,
    .nm_modname = "hello",
    .nm_priv = ((void*)0),
    .reserved = { 0 },
};

extern "C" __attribute__((constructor)) void RegisterApplicationModule(void){
    napi_module_register(&demoModule);
}
