/*
 * Copyright 2023 Unionman Technology Co., Ltd.
 * Licensed under the Apache License, Version 2.0 (the "License");
 * you may not use this file except in compliance with the License.
 * You may obtain a copy of the License at
 *
 *     http://www.apache.org/licenses/LICENSE-2.0
 *
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS,
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 * See the License for the specific language governing permissions and
 * limitations under the License.
 */

#include "napi/native_api.h"
#include <bits/alltypes.h>
#include <iostream>
#include <mindspore/model.h>
#include <mindspore/context.h>
#include <mindspore/status.h>
#include <mindspore/tensor.h>
#include <rawfile/raw_file_manager.h>
#include <hilog/log.h>
#include <unistd.h>
#include <math.h>
#include <algorithm>
#include "mslite_log.h"
#include "mslite_errors.h"
#include <opencv2/imgcodecs.hpp>
#include <opencv2/imgproc.hpp>
#include "napi_utils.h"
#include <time.h>
#include <mutex>

using namespace cv;
using namespace OHOS::MSAI;
using namespace std;

OH_AI_ModelHandle modelms = nullptr;
static float threshold_class = 0.45;
constexpr int output_dimension = 85;

struct ObjDetectMSOutputData {
    size_t elementNum;
    vector<uint8_t> data;
};
using PSMSOutputData = shared_ptr<struct ObjDetectMSOutputData>;

static mutex g_predictMux;

struct box {
    float x;
    float y;
    float w;
    float h;
};

static string classes[100] = {"person",        "bicycle",      "car",
                              "motorcycle",    "airplane",     "bus",
                              "train",         "truck",        "boat",
                              "traffic light", "fire hydrant", "stop sign",
                              "parking meter", "bench",        "bird",
                              "cat",           "dog",          "horse",
                              "sheep",         "cow",          "elephant",
                              "bear",          "zebra",        "giraffe",
                              "backpack",      "umbrella",     "handbag",
                              "tie",           "suitcase",     "frisbee",
                              "skis",          "snowboard",    "sports ball",
                              "kite",          "baseball bat", "baseball glove",
                              "skateboard",    "surfboard",    "tennis racket",
                              "bottle",        "wine glass",   "cup",
                              "fork",          "knife",        "spoon",
                              "bowl",          "banana",       "apple",
                              "sandwich",      "orange",       "broccoli",
                              "carrot",        "hot dog",      "pizza",
                              "donut",         "cake",         "chair",
                              "couch",         "potted plant", "bed",
                              "dining table",  "toilet",       "tv",
                              "laptop",        "mouse",        "remote",
                              "keyboard",      "cell phone",   "microwave",
                              "oven",          "toaster",      "sink",
                              "refrigerator",  "book",         "clock",
                              "vase",          "scissors",     "teddy bear",
                              "Bathtub",       "toothbrush",   "Panda",
                              "Piano",         "Door",         "Beer",
                              "Flower",        "Football",     "Headphones",
                              "Koala",         "Rifle",        "Saxophone",
                              "Guitar",        "Handgun",      "Frog",
                              "Goose",         "Parrot",       "Printer",
                              "Sunglasses",    "Violin",       "Washing machine",
                              "Watch"};

struct Bbox {
    int x;
    int y;
    int w;
    int h;
    float score;
    int classes;
};

#define MODEL_DATA_TYPE_UINT8

#ifdef MODEL_DATA_TYPE_UINT8
#define CONV_OUTPUT_40_ZPOINT 173
#define CONV_OUTPUT_40_SCALE 0.10785926133394241
#define CONV_OUTPUT_20_ZPOINT 158
#define CONV_OUTPUT_20_SCALE 0.0980067253112793
#define MODEL_FILE_NAME "unm_nchw_uint8.ms"
#else
#define CONV_OUTPUT_40_ZPOINT 46
#define CONV_OUTPUT_40_SCALE 0.10317288339138031
#define CONV_OUTPUT_20_ZPOINT 32
#define CONV_OUTPUT_20_SCALE 0.09866468608379364
#define MODEL_FILE_NAME "unm_nchw_int8.ms"
#endif

static bool sort_score(Bbox box1, Bbox box2) { return box1.score > box2.score ? true : false; }
static float sigmoid(float x) { return (1 / (1 + exp(-x))); }
static float iou(Bbox box1, Bbox box2) {

    int x1 = max(box1.x, box2.x);
    int y1 = max(box1.y, box2.y);
    int x2 = min(box1.x + box1.w, box2.x + box2.w);
    int y2 = min(box1.y + box1.h, box2.y + box2.h);
    int w = max(0, x2 - x1);
    int h = max(0, y2 - y1);
    float over_area = w * h;
    return over_area / (box1.w * box1.h + box2.w * box2.h - over_area);
}

static vector<Bbox> nms(std::vector<Bbox> &boxes, float threshold) {
    vector<Bbox> resluts;
    std::sort(boxes.begin(), boxes.end(), sort_score);
    while (boxes.size() > 0) {
        resluts.push_back(boxes[0]);
        int index = 1;
        while (index < boxes.size()) {
            float iou_value = iou(boxes[0], boxes[index]);
            cout << "iou_value=" << iou_value << endl;
            if (iou_value > threshold) {

                boxes.erase(boxes.begin() + index);

            } else {
                index++;
            }
        }
        boxes.erase(boxes.begin());
    }

    return resluts;
}

static cv::Mat letterbox(cv::Mat &src, int h, int w, vector<float> &pad) {

    int in_w = src.cols; // width
    int in_h = src.rows; // height
    int tar_w = w;
    int tar_h = h;
    float r = std::min(float(tar_h) / in_h, float(tar_w) / in_w);
    int inside_w = round(in_w * r);
    int inside_h = round(in_h * r);
    int pad_w = tar_w - inside_w;
    int pad_h = tar_h - inside_h;

    cv::Mat resize_img;

    cv::resize(src, resize_img, cv::Size(inside_w, inside_h));

    pad_w = pad_w / 2;
    pad_h = pad_h / 2;

    pad.push_back(pad_w);
    pad.push_back(pad_h);
    pad.push_back(r);

    int top = int(round(pad_h - 0.1));
    int bottom = int(round(pad_h + 0.1));
    int left = int(round(pad_w - 0.1));
    int right = int(round(pad_w + 0.1));
    cv::copyMakeBorder(resize_img, resize_img, top, bottom, left, right, 0, cv::Scalar(114, 114, 114));

    return resize_img;
}

static box scale_box(box boxes, vector<float> &pad) {

    box scaled_box;
    float r = pad[2];
    scaled_box.x = (boxes.x - pad[0]) / r;
    scaled_box.y = (boxes.y - pad[1]) / r;
    scaled_box.w = boxes.w / r;
    scaled_box.h = boxes.h / r;

    return scaled_box;
}
static void flatten(void *x, int size, int layers, int batch, int forward) {
#ifdef MODEL_DATA_TYPE_UINT8
    uint8_t *x_p = (uint8_t *)x;
    uint8_t *swap = (uint8_t *)calloc(size * layers * batch, sizeof(uint8_t));
#else
    int8_t *x_p = (int8_t *)x;
    int8_t *swap = (int8_t *)calloc(size * layers * batch, sizeof(int8_t));
#endif
    if (swap == nullptr) {
        LOGE("flatten: malloc failed");
        return;
    }
    int i, c, b, i1, i2;
    for (b = 0; b < batch; ++b) {
        for (c = 0; c < layers; ++c) {
            for (i = 0; i < size; ++i) {
                i1 = b * layers * size + c * size + i;
                i2 = b * layers * size + i * layers + c;
                if (forward)
                    swap[i2] = x_p[i1];
                else
                    swap[i1] = x_p[i2];
            }
        }
    }
    memcpy(x, swap, size * layers * batch * sizeof(int8_t));
    free(swap);
}

static box get_region_box(void *x_in, float *biases, int n, int index, int i, int j, int w, int h, int stride) {
#ifdef MODEL_DATA_TYPE_UINT8
    uint8_t *x = (uint8_t *)x_in;
#else
    int8_t *x = (int8_t *)x_in;
#endif
    
    float xywh[4];
    for (int k = 0; k < 4; k++) {
        if (stride == 16) {
            xywh[k] = (x[index + k] - CONV_OUTPUT_40_ZPOINT) * CONV_OUTPUT_40_SCALE; 
            xywh[k] = sigmoid(xywh[k]);
        } else if (stride == 32) {
            xywh[k] = (x[index + k] - CONV_OUTPUT_20_ZPOINT) * CONV_OUTPUT_20_SCALE;
            xywh[k] = sigmoid(xywh[k]);
        } else {
            xywh[k] = 0;
        }
    }
    float xx = xywh[0];
    float xy = xywh[1];
    float xw = xywh[2];
    float xh = xywh[3];
    box b;
    b.x = (i + xx * 2 - 0.5) * stride;
    b.y = (j + xy * 2 - 0.5) * stride;
    b.w = (xw * 2) * (xw * 2) * biases[2 * n];
    b.h = (xh * 2) * (xh * 2) * biases[2 * n + 1];
    return b;
}
 
static int yolo_v5_post_process_onescale(void *predictions_in, float *biases, float threshold_in, int stride,
    vector<float> &pad, vector<Bbox> &input) 
{
#ifdef MODEL_DATA_TYPE_UINT8
    uint8_t *predictions = (uint8_t *)predictions_in;
#else
    int8_t *predictions = (int8_t *)predictions_in;
#endif
    int i, j;
    int num_class = output_dimension - 5;
    int coords = 4;
    int bb_size = coords + num_class + 1;

    float threshold = threshold_in;

    int nn_width = 640;
    int nn_height = 640;

    int num_box = 3;
    int modelWidth = nn_width / stride;
    int modelHeight = nn_height / stride;

    for (i = 0; i < modelWidth * modelHeight; ++i) {
        int row = i / modelWidth;
        int col = i % modelWidth;
        int n = 0;
        for (n = 0; n < num_box; ++n) {
            int index = i * num_box + n;
            int p_index = index * bb_size + 4;
            float scale;
            if(stride == 16){
                scale = (predictions[p_index] - CONV_OUTPUT_40_ZPOINT) * CONV_OUTPUT_40_SCALE;
                scale = sigmoid(scale);
            }
            else if(stride == 32){
                scale = (predictions[p_index] - CONV_OUTPUT_20_ZPOINT) * CONV_OUTPUT_20_SCALE;
                scale = sigmoid(scale);
            }
            else{
                scale = 0;
            }

            int box_index = index * bb_size;
            int class_index = 0;
            class_index = index * bb_size + 5;

            if (scale > threshold) {
                int max_class = 0;
                float probs[num_class];
                for (j = 0; j < num_class; ++j) {
                    float conf_class;
                    if (stride == 16) {
                        conf_class = (predictions[class_index + j] - CONV_OUTPUT_40_ZPOINT) * CONV_OUTPUT_40_SCALE;
                        conf_class = sigmoid(conf_class);
                    } else if (stride == 32) {
                        conf_class = (predictions[class_index + j] - CONV_OUTPUT_20_ZPOINT) * CONV_OUTPUT_20_SCALE;
                        conf_class = sigmoid(conf_class);
                    } else {
                        conf_class = 0;
                    }

                    float prob = scale * conf_class;
                    probs[j] = prob;
                    if (probs[j] > probs[max_class]) {
                        max_class = j;
                    }
                }

                if (probs[max_class] > threshold) {
                    box getbox =
                        get_region_box(predictions, biases, n, box_index, col, row, modelWidth, modelHeight, stride);

                    float l = getbox.x - getbox.w / 2;
                    float t = getbox.y - getbox.h / 2;
                    float r = getbox.x + getbox.w / 2;
                    float d = getbox.y + getbox.h / 2;
//                    LOGI("l:%f, t:%f, r:%f, d:%f\n", l, t, r, d);

                    box scaled_box = scale_box(getbox, pad);
                    l = scaled_box.x - scaled_box.w / 2;
                    t = scaled_box.y - scaled_box.h / 2;
                    r = scaled_box.x + scaled_box.w / 2;
                    d = scaled_box.y + scaled_box.h / 2;
                    input.push_back({(int)l, (int)t, (int)r, (int)d, probs[max_class], max_class});
                }
            }
        }
    }

    return 0;
}

////////////////////////////////////////////////////////////////////////////////////////////////////

// 读取模型文件。
static void *ReadModelFile(NativeResourceManager *nativeResourceManager, const std::string &modelName,
                           size_t *modelSize) {
    auto rawFile = OH_ResourceManager_OpenRawFile(nativeResourceManager, modelName.c_str());
    if (rawFile == nullptr) {
        LOGE("Open model file failed");
        return nullptr;
    }
    long fileSize = OH_ResourceManager_GetRawFileSize(rawFile);
    void *modelBuffer = malloc(fileSize);
    if (modelBuffer == nullptr) {
        LOGE("Get model file size failed");
        OH_ResourceManager_CloseRawFile(rawFile);
        return nullptr;
    }
    int ret = OH_ResourceManager_ReadRawFile(rawFile, modelBuffer, fileSize);
    if (ret == 0) {
        LOGE("Read model file failed");
        OH_ResourceManager_CloseRawFile(rawFile);
        return nullptr;
    }
    OH_ResourceManager_CloseRawFile(rawFile);
    *modelSize = fileSize;
    return modelBuffer;
}
// 创建上下文，设置线程数、设备类型等参数，并加载模型。
static void DestroyModelBuffer(void **buffer) {
    if ((buffer == nullptr) || (*buffer == nullptr)){
        return;
    }
    free(*buffer);
    *buffer = nullptr;
}

static OH_AI_ModelHandle CreateMSLiteModel(void *modelBuffer, size_t modelSize) {
    // 创建上下文
    auto context = OH_AI_ContextCreate();
    LOGI("Creat Context.\n");
    if (context == nullptr) {
        LOGE("Create MSLite context failed.\n");
        return nullptr;
    }
    
    auto nnrt_device_info = OH_AI_CreateNNRTDeviceInfoByType(OH_AI_NNRTDEVICE_ACCELERATOR);
    OH_AI_ContextAddDeviceInfo(context, nnrt_device_info);
    auto cpu_device_info = OH_AI_DeviceInfoCreate(OH_AI_DEVICETYPE_CPU);
    OH_AI_ContextAddDeviceInfo(context, cpu_device_info);

    // 加载.ms模型文件
    auto model = OH_AI_ModelCreate();
    if (model == nullptr) {
        LOGE("Allocate MSLite Model failed.\n");
        return nullptr;
    }

    auto build_ret = OH_AI_ModelBuild(model, modelBuffer, modelSize, OH_AI_MODELTYPE_MINDIR, context);
    LOGI("Loader msfile.\n");
    if (build_ret != OH_AI_STATUS_SUCCESS) {
        OH_AI_ModelDestroy(&model);
        LOGE("Build MSLite model failed.\n");
        return nullptr;
    }
    LOGI("Build MSLite model success.\n");
    return model;
}

constexpr int copyChannelDataLen = 640 * 640 * sizeof(uint8_t);

#define GET_PARAMS(env, info, num)                                                                                     \
    size_t argc = num;                                                                                                 \
    napi_value argv[num] = {nullptr};                                                                                  \
    napi_value thisVar = nullptr;                                                                                      \
    void *data = nullptr;                                                                                              \
    napi_get_cb_info(env, info, &argc, argv, &thisVar, &data)


static int ObjectDetectPreprocess(PicDesc &picDesc, std::vector<unsigned char> &picData,
    std::vector<uint8_t>& inputData, std::vector<float>& imagePad, size_t picNo) 
{
    LOGI("ObjectDetectPreprocess: in picno=%d", picNo);
    if (picData.empty()) {
        LOGE("ObjectDetectPreprocess: input pic data is empty.");
        return -1;
    }

    cv::Mat rgbImage;
    cv::Mat yuv(picDesc.width * 3 / 2, picDesc.height, CV_8UC1);
    yuv.data = (unsigned char *)(picData.data());
    cv::cvtColor(yuv, rgbImage, COLOR_YUV420sp2BGR);
    if (rgbImage.data == nullptr || rgbImage.channels() != 3 || rgbImage.rows == 0 || rgbImage.cols == 0) {
        LOGE("cvtColor failed.");
        return -1;
    }
    LOGI("ObjectDetectPreprocess: convert to bgr picno=%d", picNo);
    
    cv::Mat image = letterbox(rgbImage, 640, 640, imagePad);
#ifndef MODEL_DATA_TYPE_UINT8
    /* 将uint8数据[0,255]归一化到int8[-128,127] */
    image.convertTo(image, CV_8S, 255.0 / 255.0, -128.0);
#endif
    
    /* 图像格式为HWC，需要转换为模型需要的CHW格式 */
    vector<Mat> img_channel(3);
    cv::split(image, img_channel);
    LOGI("ObjectDetectPreprocess: image convert to letterbox picno=%d", picNo);
    
    inputData.resize(copyChannelDataLen * 3);
    uint8_t *data = inputData.data();
    std::copy(img_channel[0].data, img_channel[0].data + copyChannelDataLen, data);
    std::copy(img_channel[1].data, img_channel[1].data + copyChannelDataLen, data + copyChannelDataLen);
    std::copy(img_channel[2].data, img_channel[2].data + copyChannelDataLen, data + copyChannelDataLen *2);

    LOGI("ObjectDetectPreprocess: convert to NCHW picno=%d", picNo);
    return 0;
}

constexpr size_t output40 = 40 * 40 * output_dimension * 3;
constexpr size_t output20 = 20 * 20 * output_dimension * 3;

static int32_t ObjectDetectGetMSOutput(OH_AI_TensorHandleArray &tensorOutputs, vector<PSMSOutputData> &msOutput)
{
    void *tensorData = nullptr;
    int max_num;
    int dataSize;
    
    for (size_t i = 0; i < tensorOutputs.handle_num; i++) {
        auto tensor = tensorOutputs.handle_list[i];        
        LOGI("- Tensor %{public}d name is: %{public}s.\n", static_cast<int>(i), OH_AI_TensorGetName(tensor));
                
        max_num = (int)OH_AI_TensorGetElementNum(tensor);
        // 不关注80*80的小目标
        if ((max_num != output40) && (max_num != output20)) {
            continue;
        }
        auto output = make_shared<struct ObjDetectMSOutputData>();
        
        dataSize = (int)OH_AI_TensorGetDataSize(tensor);
        tensorData = const_cast<void *>(OH_AI_TensorGetData(tensor));
        LOGI("- Tensor %{public}d size is: %{public}d.\n", static_cast<int>(i), dataSize);

        output->elementNum = max_num;
        output->data.resize(dataSize);
        std::copy((uint8_t *)tensorData, (uint8_t *)tensorData + dataSize, output->data.begin());
        msOutput.emplace_back(output);
    }
    
    return 0;
}

static int32_t ObjectDetectPredict(OH_AI_ModelHandle model, vector<uint8_t> &inputData,
    vector<PSMSOutputData> &msOutput, int picNo)
{
    // mindspore lite每个模型编译后的model不支持多个推理并行执行，
    // 这里需要用互斥锁保证推理串行执行，也包括input和output
    std::lock_guard<std::mutex> mslock(g_predictMux);
    LOGI("ObjectDetectPredict: start inference picNo=%d", picNo);
        
    // 设置模型输入数据
    auto inputTensor = OH_AI_ModelGetInputs(model);
    if (inputTensor.handle_num != 1) {
        LOGE("input tensor number is %d", inputTensor.handle_num);
        return -1;
    }

    size_t inputSize = inputData.size() * sizeof(uint8_t);
    auto tensorSize = OH_AI_TensorGetDataSize(inputTensor.handle_list[0]);
    if (tensorSize < inputSize) {
        LOGE("tensor data buffer isn't enough tensor_size=%d, input=%d", tensorSize, inputSize);
        return -1;
    }    
    // 当前模型只有1个input tensor
    uint8_t *data = (uint8_t *)OH_AI_TensorGetMutableData(inputTensor.handle_list[0]);
    memcpy(data, inputData.data(), tensorSize);

    OH_AI_TensorHandleArray tensorOutputs = OH_AI_ModelGetOutputs(model);
    
    OH_AI_Status status = OH_AI_ModelPredict(model, inputTensor, &tensorOutputs, nullptr, nullptr);
    if (status != OH_AI_STATUS_SUCCESS) {
        LOGE("Predict MSLite model error.\n");
        return -1;
    }

    // 先将output数据拷贝到msOutput中，后处理在其他线程中处理，这里尽快返回可以进行下一张图片的推理
    return ObjectDetectGetMSOutput(tensorOutputs, msOutput);
}

namespace {
    constexpr size_t PARAM0 = 0;
    constexpr size_t PARAM1 = 1;
    constexpr size_t PARAM2 = 2;
    constexpr size_t ARGS_ONE = 1;
    constexpr size_t ARGS_THREE = 3;
    constexpr int SIZE = 1048576;

int32_t ParsePicDesc(napi_env env, napi_value args, PicDescNapi &picDescNapi) {
    napi_valuetype valueType;
    napi_status status = napi_typeof(env, args, &valueType);
    if ((status != napi_ok) || (valueType != napi_object)) {
        LOGE("PicDesc is invalid.");
        return RETCODE_JS_NOT_EXISTED_PARAM;
    }

    bool has_width_property = false;
    bool has_height_property = false;
    bool has_dataSize_property = false;

    int32_t width = 0;
    int32_t height = 0;
    int32_t dataSize = 0;

    status = napi_has_named_property(env, args, "width", &has_width_property);
    if (status != napi_ok) {
        LOGE("can not find width property.");
        return RETCODE_JS_INVALID_OPERATION;
    }
    if (!has_width_property) {
        return RETCODE_JS_NOT_EXISTED_PARAM;
    }
    if (!GetPropertyInt32(env, args, "width", width)) {
    return RETCODE_JS_INVALID_OPERATION;
    }

    status = napi_has_named_property(env, args, "height", &has_height_property);
    if (status != napi_ok) {
        LOGE("can not find height property.");
        return RETCODE_JS_INVALID_OPERATION;
    }
    if (!has_height_property) {
        return RETCODE_JS_NOT_EXISTED_PARAM;
    }
    if (!GetPropertyInt32(env, args, "height", height)) {
    return RETCODE_JS_INVALID_OPERATION;
    }

    status = napi_has_named_property(env, args, "dataSize", &has_dataSize_property);
    if (status != napi_ok) {
        LOGE("can not find dataSize property.");
        return RETCODE_JS_INVALID_OPERATION;
    }
    
    if (!has_dataSize_property) {
        return RETCODE_JS_NOT_EXISTED_PARAM;
    }
    
    if (!GetPropertyInt32(env, args, "dataSize", dataSize)) {
        return RETCODE_JS_INVALID_OPERATION;
    }

    picDescNapi.width = width;
    picDescNapi.height = height;
    picDescNapi.dataSize = dataSize;

    return RETCODE_SUCCESS;
}

napi_status SetInferResult(napi_env env, napi_value result, InferResult& inferResult) {
    napi_value objects = nullptr;
    napi_create_array_with_length(env, inferResult.count, &objects);
    for (int i = 0; i < inferResult.count; i++) {
        napi_value object = nullptr;
        napi_status status = napi_create_object(env, &object);
        if (status != napi_ok) {
            LOGE("napi_create_object failed.");
            return status;
        }
        napi_value left  = nullptr;
        status = napi_create_double(env, inferResult.objects[i].left, &left);
        status = napi_set_named_property(env, object, "left", left);

        napi_value top  = nullptr;
        status = napi_create_double(env, inferResult.objects[i].top, &top);
        status = napi_set_named_property(env, object, "top", top);

        napi_value right  = nullptr;
        status = napi_create_double(env, inferResult.objects[i].right, &right);
        status = napi_set_named_property(env, object, "right", right);

        napi_value bottom  = nullptr;
        status = napi_create_double(env, inferResult.objects[i].bottom, &bottom);
        status = napi_set_named_property(env, object, "bottom", bottom);

        napi_value prop  = nullptr;
        status = napi_create_double(env, inferResult.objects[i].prop, &prop);
        status = napi_set_named_property(env, object, "prop", prop);

        napi_value name  = nullptr;
        status = napi_create_string_utf8(env, inferResult.objects[i].name.c_str(), NAPI_AUTO_LENGTH, &name);
        status = napi_set_named_property(env, object, "name", name);

        if (status != napi_ok) {
            LOGE("napi_create_xxx or napi_set_named_property failed. code: %{public}d", status);
            return status;
        }
 
        status = napi_set_element(env, objects, i, object);
        if (status != napi_ok) {
            LOGE("napi_set_element failed. code: %{public}d", status);
            return status;
        }
    }

    napi_status status = napi_set_named_property(env, result, "objects", objects);
    if (status != napi_ok) {
        LOGE("napi_set_named_property failed. code: %{public}d", status);
        return status;
    }

    napi_value count  = nullptr;
    status = napi_create_int32(env, inferResult.count, &count);
    status = napi_set_named_property(env, result, "count", count);
    if (status != napi_ok) {
        LOGE("napi_create_int32 or napi_set_named_property failed. code: %{public}d", status);
        return status;
    }

    return napi_ok;
}

static void ObjectDetectParseOutput(vector<PSMSOutputData> &msOutput, vector<float> &imgPad, InferResult &result)
{
    float iou_threshold = 0.45;
    float biases[18] = {10, 13, 16, 30, 33, 23, 30, 61, 62, 45, 59, 119, 116, 90, 156, 198, 373, 326};
    vector<Bbox> input;
    vector<Bbox> res;
    
    for (auto output : msOutput) {        
        if (output->elementNum == output40) {
            flatten(output->data.data(), 40 * 40, output_dimension * 3, 1, 1);
            yolo_v5_post_process_onescale(output->data.data(),&biases[6],
                threshold_class,16,imgPad,input);
            continue;
        }
        
        if (output->elementNum == output20) {
            flatten(output->data.data(), 20 * 20, output_dimension * 3, 1, 1);
            yolo_v5_post_process_onescale(output->data.data(), &biases[12],
                threshold_class, 32,imgPad,input);
        }
    }

    res = nms(input, iou_threshold);
    for (int i = 0; i < res.size(); i++) {
        LOGD("result 00000%s :%d %d %d %d %f\n", classes[res[i].classes].c_str(), res[i].x, res[i].y, res[i].w,
             res[i].h, res[i].score);
        result.objects[i].left = res[i].x;
        result.objects[i].top = res[i].y;
        result.objects[i].right = res[i].w;
        result.objects[i].bottom = res[i].h;
        result.objects[i].name = classes[res[i].classes];
        result.objects[i].prop = res[i].score;
    }
    result.count = res.size();
}

static void ExecuteCB(napi_env env, void *data)
{
    int32_t ret, picNo;
    MsAIAsyncContext *context = (MsAIAsyncContext *)data;
    vector<uint8_t> inputData;
    vector<float> imgPad;
    vector<PSMSOutputData> msOutput;

    picNo = context->picNo;
    LOGI("====================ExecuteCB start picNo=%d===========", picNo);
    ret = ObjectDetectPreprocess(context->picDesc, context->data, inputData, imgPad, picNo);
    if (ret != 0) {
        LOGE("ObjectDetectPreprocess failed");
        context->status = FAIL;
        context->inferResult.count = 0;
        return;
    }
    LOGI("====================ExecuteCB seg 1: preprocess picNo=%d=========\n", picNo);
    
    ret = ObjectDetectPredict(modelms, inputData,msOutput, picNo);
    if (ret != 0) {
        LOGE("ObjectDetectPredict failed");
        context->status = FAIL;
        context->inferResult.count = 0;
        return;
    }
    LOGI("====================ExecuteCB seg 2: predict picNo=%d=========\n", picNo);
    
    InferResult inferResult{0};
    ObjectDetectParseOutput(msOutput, imgPad, inferResult);    
    if (ret != 0) {
        LOGE("run ms model failed");
        context->status = FAIL;
        context->inferResult.count = 0;
    } else {
        context->status = SUCCESS;
        context->inferResult = inferResult;
    }
    LOGI("====================ExecuteCB end: parse output picNo=%d=========\n", picNo);
}

static void PromiseCompleteCB(napi_env env, napi_status status, void *data) {
    LOGD("PromiseCompleteCB start.");
    MsAIAsyncContext *context = (MsAIAsyncContext *)data;
    napi_value result = nullptr;
    napi_value undefinedResult = nullptr;
    napi_get_undefined(env, &undefinedResult);
    int32_t picNo = context->picNo;
        
    status = napi_create_object(env, &result);
    if (status != napi_ok) {
        LOGE("napi_create_object failed.");
        napi_reject_deferred(env, context->deferred, undefinedResult);
    }

    status = SetInferResult(env, result, context->inferResult);
    if (status != napi_ok) {
        LOGE("SetInferResult failed.");
        napi_reject_deferred(env, context->deferred, undefinedResult);
    }

    if (!context->status) {
        LOGE("PromiseCompleteCB failed.");
        napi_reject_deferred(env, context->deferred, undefinedResult);
    } else {
        LOGI("PromiseCompleteCB success.");
        napi_resolve_deferred(env, context->deferred, result);
    }

    if (context->callbackRef != nullptr) {
        napi_delete_reference(env, context->callbackRef);
    }
    LOGI("=================PromiseCompleteCB  pic %d inference complete", picNo);
    napi_delete_async_work(env, context->asyncWork);
    delete context;
}

static void SetData(std::unique_ptr<MsAIAsyncContext>& asyncContext, int32_t modelId,
 PicDescNapi& picDescNapi, unsigned char *data, size_t dataLen)
{
    asyncContext->picDesc.width = picDescNapi.width;
    asyncContext->picDesc.height = picDescNapi.height;
    asyncContext->picDesc.dataSize = picDescNapi.dataSize;
    asyncContext->modelId = modelId;
    
    asyncContext->data.resize(dataLen);
    std::copy(data, data + dataLen, asyncContext->data.begin());
}

static int ParseNapiParams(napi_env env, napi_callback_info info, std::unique_ptr<MsAIAsyncContext>& context) {
    napi_status status;
    size_t argc = ARGS_THREE + 1;
    napi_value argv[ARGS_THREE + 1] = {nullptr};
    napi_value thisArg;
    int32_t ret = napi_get_cb_info(env, info, &argc, argv, &thisArg, nullptr);
    int32_t modelId = 0;
    PicDescNapi picDescNapi{};
    int32_t picNo;
    unsigned char *array_buffer_data = nullptr;
    size_t array_buffer_total = 0;

    for (size_t i = PARAM0; i < argc; i++) {
        if (i == PARAM0) {
            status = napi_get_value_int32(env, argv[i], &modelId);
            if ((status != napi_ok) || (modelId < 0)) {
                return -1;
            }
        } else if (i == PARAM1) {
            ret = ParsePicDesc(env, argv[i], picDescNapi);
            if (ret != RETCODE_SUCCESS) {
                return -1;
            }
        } else if (i == PARAM2) {
            status = napi_get_arraybuffer_info(env, argv[i], reinterpret_cast<void **>(&array_buffer_data),
                                               &array_buffer_total);
            if ((status != napi_ok) || (array_buffer_total <= 0) || (array_buffer_data == nullptr)) {
                LOGE("get image array buffer info failed");
                return -1;
            }
        } else if (i == 3) {
            status = napi_get_value_int32(env, argv[i], &picNo);
            if ((status != napi_ok) || (picNo < 0)) {
                return -1;
            }
        } else {
            LOGE("Invalid input params.");
            return -1;
        }
    }

    LOGD("modelId: %{public}d, width: %{public}d, height: %{public}d, dataSize: %{public}d", modelId,
         (int)picDescNapi.width, (int)picDescNapi.height, (int)picDescNapi.dataSize);
    LOGD("array_buffer_total: %{public}d,", (int)array_buffer_total);
    LOGI("===============ObjectDectionProcess start to handle pic %d", picNo);

    SetData(context, modelId, picDescNapi, array_buffer_data, array_buffer_total);
    context->picNo = picNo;
    return 0;
}
}

//读取ms模型文件，指定CPU推理创建好模型并编译模型，modelms为全局变量
static napi_value ObjectDectionInit(napi_env env, napi_callback_info info)
{
    //int32_t ret;
    int32_t modelId;
    napi_value error_ret;
    napi_create_int32(env, -1, &error_ret);
    napi_value success_ret;

    napi_value result = nullptr;
    napi_get_undefined(env, &result);
    
    GET_PARAMS(env, info, 1);
    const std::string modelName = MODEL_FILE_NAME;
    size_t modelSize;
    auto resourcesManager = OH_ResourceManager_InitNativeResourceManager(env, argv[0]);
    auto modelBuffer = ReadModelFile(resourcesManager, modelName, &modelSize);
    modelId = 0;
    if (modelBuffer == nullptr) {
         LOGE("Read model failed");
         modelId = -1;
         return error_ret;
        
    }
    LOGI("Read model file success");

    modelms = CreateMSLiteModel(modelBuffer, modelSize);
    DestroyModelBuffer(&modelBuffer);
    if (modelms == nullptr) {
         modelId = -1;
         LOGE("MSLiteFwk Build model failed.\n");
         return error_ret;
    }
    LOGI("Create model file success");
    
    napi_create_int32(env, modelId, &success_ret);
    LOGI("ObjectDectionInit");

    return success_ret;
}

static napi_value ObjectDectionProcess(napi_env env, napi_callback_info info) {
    int ret;
    napi_status status;
    napi_value undefinedResult = nullptr;
    napi_get_undefined(env, &undefinedResult);

    std::unique_ptr<MsAIAsyncContext> asyncContext = std::make_unique<MsAIAsyncContext>();
    if (asyncContext == nullptr) {
        LOGE("AsyncContext object create failed.");
        return undefinedResult;
    }

    ret = ParseNapiParams(env, info, asyncContext);
    if (ret != 0) {
        LOGE("ParseNapiParams failed.");
        return undefinedResult;
    }
    LOGI("=============ObjectDectionProcess parse napi params for picNo %d", asyncContext->picNo);

    napi_value promise = nullptr;
    napi_value resourceName = nullptr;
    napi_create_string_utf8(env, "Process", NAPI_AUTO_LENGTH, &resourceName);
    status = napi_create_promise(env, &asyncContext->deferred, &promise);
    if (status != napi_ok) {
        LOGE("create callback failed.");
        return undefinedResult;
    }

    status = napi_create_async_work(env, nullptr, resourceName, ExecuteCB, PromiseCompleteCB,
                                    static_cast<void *>(asyncContext.get()), &asyncContext->asyncWork);
    if (status != napi_ok) {
        LOGE("napi_create_async_work failed.");
        return undefinedResult;
    }

    status = napi_queue_async_work(env, asyncContext->asyncWork);
    if (status == napi_ok) {
        asyncContext.release();
        LOGD("===================napi_queue_async_work ok.");
    } else {
        LOGE("===================napi_queue_async_work failed.");
        return undefinedResult;
    }

    LOGD("process async end.");
    return promise;
}

static napi_value setconf(napi_env env, napi_callback_info info) {
    if ((nullptr == env) || (nullptr == info)) {
        return nullptr;
    }

    // Number of parameters.
    size_t argc = 1;

    // Declare parameter array.
    napi_value args[1] = {nullptr};

    // Gets the arguments passed in and puts them in the argument array.
    if (napi_ok != napi_get_cb_info(env, info, &argc, args, nullptr, nullptr)) {
        return nullptr;
    }

    // Converts arguments passed in to type double.
    double valueX = 25;
    if (napi_ok != napi_get_value_double(env, args[0], &valueX)) {
        return nullptr;
    }

    LOGI("================prethreshold_class:%f", threshold_class);
    // The hypot method of the C standard library is called to perform the calculation.
    threshold_class = valueX / 100;
    LOGI("================threshold_class:%f valueX:%f", threshold_class, valueX);
    napi_value napiResult;
    if (napi_ok != napi_create_double(env, valueX, &napiResult)) {
        return nullptr;
    }
    return napiResult;
}

static napi_value ObjectDectionDeInit(napi_env env, napi_callback_info info) {
    LOGI("ObjectDectionDeinit");

    if (modelms != nullptr) {
        OH_AI_ModelDestroy(&modelms);
        modelms = nullptr;
    }

    napi_value success_ret;
    napi_create_int32(env, 0, &success_ret);
    return success_ret;
}

EXTERN_C_START
static napi_value Init(napi_env env, napi_value exports) {
    napi_property_descriptor desc[] = {
        {"setconf", nullptr, setconf, nullptr, nullptr, nullptr, napi_default, nullptr},
        {"Init", nullptr, ObjectDectionInit, nullptr, nullptr, nullptr, napi_default, nullptr},
        {"Process", nullptr, ObjectDectionProcess, nullptr, nullptr, nullptr, napi_default, nullptr},
        {"DeInit", nullptr, ObjectDectionDeInit, nullptr, nullptr, nullptr, napi_default, nullptr}};
    napi_define_properties(env, exports, sizeof(desc) / sizeof(desc[0]), desc);
    return exports;
}
EXTERN_C_END

/*
 * Napi Module define
 */
static napi_module msLiteModule = {
    .nm_version = 1,
    .nm_flags = 0,
    .nm_filename = nullptr,
    .nm_register_func = Init,
    .nm_modname = "mslite_napi",
    .nm_priv = ((void *)0),
    .reserved = {0},
};

/*
 * Module register function
 */
extern "C" __attribute__((constructor)) void RegisterModule(void) { napi_module_register(&msLiteModule); }
