//
// Created on 2025/10/15.
//
// Node APIs are not fully supported. To solve the compilation error of the interface cannot be found,
// please include "napi/native_api.h".

//
// Created on 2025/5/14.
//
// Node APIs are not fully supported. To solve the compilation error of the interface cannot be found,
// please include "napi/native_api.h".

#include "ms_model.h"
#include "common.h"
#include <string>
// 方法1: 将所有元素展开到一维float数组中
float *convertToFlatFloatArray(const std::vector<std::vector<std::vector<float>>> &data) {
    // 计算总元素数量
    size_t totalElements = 0;
    for (const auto &outerVec : data) {
        for (const auto &middleVec : outerVec) {
            totalElements += middleVec.size();
        }
    }

    // 分配连续内存
    float *result = new float[totalElements];

    // 复制数据
    size_t index = 0;
    for (const auto &outerVec : data) {
        for (const auto &middleVec : outerVec) {
            for (float value : middleVec) {
                result[index++] = value;
            }
        }
    }

    return result;
}
int MSModel::FillInputTensor(size_t tensor_index, std::vector<std::vector<std::vector<float>>> &input_data) {
    GetHandle();
    if (handle == nullptr) {
        LOGE("FillInputTensor error, there is no model handle\n");
        return -1;
    }
    auto inputs = OH_AI_ModelGetInputs(handle);
    if (tensor_index >= inputs.handle_num)
        return -1;


    // 分配连续内存
    size_t totalElements = input_data[0].size() * input_data[0][0].size() * input_data.size();
    LOGI("dim0: %{public}d, dim1: %{public}d, dime2: %{public}d", input_data.size(), input_data[0].size(),
         input_data[0][0].size());
    // 分配连续内存
    float vec_data[totalElements];

    // 复制数据
    size_t index = 0;
    for (const auto &outerVec : input_data) {
        for (const auto &middleVec : outerVec) {
            for (float value : middleVec) {
                vec_data[index++] = value;
            }
        }
    }
    std::vector<OH_AI_ShapeInfo> shape_infos = {
        {4, {1, int64_t(input_data.size()), int64_t(input_data[0].size()), int64_t(input_data[0][0].size())}},
    };
    OH_AI_ModelResize(handle, inputs, shape_infos.data(), shape_infos.size());
    

    LOGI("tensor size %{public}d", OH_AI_TensorGetElementNum(inputs.handle_list[0]));
    LOGI("totalElements: %{public}d", totalElements);
    float *data = (float *)OH_AI_TensorGetMutableData(inputs.handle_list[tensor_index]);
        // 使用 memcpy()
    memcpy(data, vec_data, totalElements * sizeof(float));
    // LOGI("FillInputTensor success! inputs index: %{public}d\n", tensor_index);

    return OH_AI_STATUS_SUCCESS;
}

void MSModel::CreateMsLiteModelCPUAsync(const std::string file_name) {
    future_load_model_ = std::async([file_name]() -> OH_AI_ModelHandle {
        OH_AI_ModelHandle handle = CreateMSLiteModelCPU(file_name);
        LOGI("cpu model %{public}s load success!\n", file_name.c_str());
        return handle;
    });
}

void MSModel::CreateMsLiteModelNNRTAsync(const std::string file_name) {
    future_load_model_ = std::async([file_name]() -> OH_AI_ModelHandle {
        OH_AI_ModelHandle handle = CreateMSLiteModelNNRT(file_name);
        LOGI("nnrt model %{public}s load success!\n", file_name.c_str());
        return handle;
    });
}

OH_AI_ModelHandle MSModel::CreateMSLiteModelNNRT(const std::string file_name) {
    // Set executing context for model.
    OH_AI_ContextHandle context = OH_AI_ContextCreate();
    if (context == NULL) {
        printf("OH_AI_ContextCreate failed.\n");
        return nullptr;
    }
    // 优先使用NNRT推理。
    // 这里利用查找到的第一个ACCELERATORS类别的NNRT硬件，来创建nnrt设备信息，并设置硬件使用高性能模式推理。还可以通过如：OH_AI_GetAllNNRTDeviceDescs()接口获取当前环境中所有NNRT硬件的描述信息，按设备名、类型等信息查找，找到某一具体设备作为NNRT推理硬件。
    OH_AI_DeviceInfoHandle nnrt_device_info = OH_AI_CreateNNRTDeviceInfoByType(OH_AI_NNRTDEVICE_ACCELERATOR);
    if (nnrt_device_info == NULL) {
        printf("OH_AI_DeviceInfoCreate failed.\n");
        OH_AI_ContextDestroy(&context);
        return nullptr;
    }
    OH_AI_DeviceInfoSetPerformanceMode(nnrt_device_info, OH_AI_PERFORMANCE_HIGH);
    OH_AI_ContextAddDeviceInfo(context, nnrt_device_info);

    // Create model
    auto model = OH_AI_ModelCreate();
    if (model == nullptr) {
        LOGE("MS_LITE_ERR: Allocate MSLite Model failed.\n");
        return nullptr;
    }

    // Build model object
    auto build_ret = OH_AI_ModelBuildFromFile(model, file_name.c_str(), OH_AI_MODELTYPE_MINDIR, context);
    if (build_ret != OH_AI_STATUS_SUCCESS) {
        OH_AI_ModelDestroy(&model);
        LOGE("MS_LITE_ERR: Build MSLite model failed.\n");
        return nullptr;
    }
    LOGI("MS_LITE_LOG: Build MSLite model success.\n");
    return model;
}

OH_AI_ModelHandle MSModel::CreateMSLiteModelCPU(const std::string file_name) {
    // Set executing context for model.
    auto context = OH_AI_ContextCreate();
    if (context == nullptr) {
        LOGE("MS_LITE_ERR: Create MSLite context failed.\n");
        return nullptr;
    }
    const int thread_num = 2;
    OH_AI_ContextSetThreadNum(context, thread_num);
    OH_AI_ContextSetThreadAffinityMode(context, 1);
    // 设置运行设备为CPU，不使用Float16推理
    OH_AI_DeviceInfoHandle cpu_device_info = OH_AI_DeviceInfoCreate(OH_AI_DEVICETYPE_CPU);
    if (cpu_device_info == NULL) {
        printf("OH_AI_DeviceInfoCreate failed.\n");
        OH_AI_ContextDestroy(&context);
        return nullptr;
    }
    OH_AI_DeviceInfoSetEnableFP16(cpu_device_info, false);
    OH_AI_ContextAddDeviceInfo(context, cpu_device_info);

    // Create model
    auto model = OH_AI_ModelCreate();
    if (model == nullptr) {
        LOGE("MS_LITE_ERR: Allocate MSLite Model failed.\n");
        return nullptr;
    }

    // Build model object
    auto build_ret = OH_AI_ModelBuildFromFile(model, file_name.c_str(), OH_AI_MODELTYPE_MINDIR, context);
    if (build_ret != OH_AI_STATUS_SUCCESS) {
        OH_AI_ModelDestroy(&model);
        LOGE("MS_LITE_ERR: Build MSLite model failed.\n");
        return nullptr;
    }
    LOGI("MS_LITE_LOG: Build MSLite model success.\n");
    return model;
}


void MSModel::SwitchPredictContext(const std::string file_name, bool is_enable_npu) {
    GetHandle();
    if (handle != nullptr) {
        OH_AI_ModelDestroy(&handle);
        handle = nullptr;
    }
    if (is_enable_npu) {
        CreateMsLiteModelNNRTAsync(file_name);
    } else {
        CreateMsLiteModelCPUAsync(file_name);
    }
    LOGI("SwitchPredictContext %{public}s success", is_enable_npu ? "npu" : "cpu");
}

const float *MSModel::Predict() {
    auto inputs = OH_AI_ModelGetInputs(handle);
    auto outputs = OH_AI_ModelGetOutputs(handle);
    LOGI("MS_LITE_LOG: Run MSLite model Predict ....\n");

    // Predict model.
    auto predict_ret = OH_AI_ModelPredict(handle, inputs, &outputs, nullptr, nullptr);
    if (predict_ret != OH_AI_STATUS_SUCCESS) {
        LOGE("MS_LITE_LOG: Run MSLite model Predict faild.\n");
    }
    LOGI("MS_LITE_LOG: Run MSLite model Predict success.\n");

    auto tensor = outputs.handle_list[0];
    auto num = (int)OH_AI_TensorGetElementNum(tensor);
    auto out_data = reinterpret_cast<const float *>(OH_AI_TensorGetData(tensor));
    // std::memcpy(out_data, image_embedding_, num);
    size_t shape_num;
    const int64_t *shape = OH_AI_TensorGetShape(tensor, &shape_num);
    for (int i = 0; i < shape_num; ++i) {
        LOGI("shape %{public}d: %{public}d", i, shape[i]);
    }

    LOGI("%{public}s has been save.\n", OH_AI_TensorGetName(tensor));
    return out_data;
}


void MSModel::GetHandle() {
    if (handle == nullptr && future_load_model_.valid()) {
        handle = future_load_model_.get();
    }
}
