#include <iostream>
#include <cstdarg>
#include "hilog/log.h"
#include "neural_network_runtime/neural_network_runtime.h"

#define LOG_DOMAIN 0xD002101
#define LOG_TAG "NNRt"
#define LOGD(...) OH_LOG_DEBUG(LOG_APP, __VA_ARGS__)
#define LOGI(...) OH_LOG_INFO(LOG_APP, __VA_ARGS__)
#define LOGW(...) OH_LOG_WARN(LOG_APP, __VA_ARGS__)
#define LOGE(...) OH_LOG_ERROR(LOG_APP, __VA_ARGS__)
#define LOGF(...) OH_LOG_FATAL(LOG_APP, __VA_ARGS__)

// 返回值检查宏
#define CHECKNEQ(realRet, expectRet, retValue, ...) \
    do { \
        if ((realRet) != (expectRet)) { \
            printf(__VA_ARGS__); \
            return (retValue); \
        } \
    } while (0)

#define CHECKEQ(realRet, expectRet, retValue, ...) \
    do { \
        if ((realRet) == (expectRet)) { \
            printf(__VA_ARGS__); \
            return (retValue); \
        } \
    } while (0)

// 设置输入数据用于推理
OH_NN_ReturnCode SetInputData(NN_Tensor* inputTensor[], size_t inputSize)
{
    OH_NN_DataType dataType(OH_NN_FLOAT32);
    OH_NN_ReturnCode ret{OH_NN_FAILED};
    size_t elementNum = 0;
    for (size_t i = 0; i < inputSize; ++i) {
        // 获取Tensor的数据内存
        auto data = OH_NNTensor_GetDataBuffer(inputTensor[i]);
        CHECKEQ(data, nullptr, OH_NN_FAILED, "Failed to get data buffer.");
        // 获取Tensor的描述
        auto desc = OH_NNTensor_GetTensorDesc(inputTensor[i]);
        CHECKEQ(desc, nullptr, OH_NN_FAILED, "Failed to get desc.");
        // 获取Tensor的数据类型
        ret = OH_NNTensorDesc_GetDataType(desc, &dataType);
        CHECKNEQ(ret, OH_NN_SUCCESS, OH_NN_FAILED, "Failed to get data type.");
        // 获取Tensor的元素个数
        ret = OH_NNTensorDesc_GetElementNum(desc, &elementNum);
        CHECKNEQ(ret, OH_NN_SUCCESS, OH_NN_FAILED, "Failed to get element number.");
        switch(dataType) {
            case OH_NN_FLOAT32: {
                float* floatValue = reinterpret_cast<float*>(data);
                for (size_t j = 0; j < elementNum; ++j) {
                    floatValue[j] = static_cast<float>(j);
                }
                break;
            }
            case OH_NN_INT32: {
                int* intValue = reinterpret_cast<int*>(data);
                for (size_t j = 0; j < elementNum; ++j) {
                    intValue[j] = static_cast<int>(j);
                }
                break;
            }
            default:
                return OH_NN_FAILED;
        }
    }
    return OH_NN_SUCCESS;
}

OH_NN_ReturnCode Print(NN_Tensor* outputTensor[], size_t outputSize)
{
    OH_NN_DataType dataType(OH_NN_FLOAT32);
    OH_NN_ReturnCode ret{OH_NN_FAILED};
    size_t elementNum = 0;
    for (size_t i = 0; i < outputSize; ++i) {
        auto data = OH_NNTensor_GetDataBuffer(outputTensor[i]);
        CHECKEQ(data, nullptr, OH_NN_FAILED, "Failed to get data buffer");
        auto desc = OH_NNTensor_GetTensorDesc(outputTensor[i]);
        CHECKEQ(desc, nullptr, OH_NN_FAILED, "Failed to get desc");
        ret = OH_NNTensorDesc_GetDataType(desc, &dataType);
        CHECKNEQ(ret, OH_NN_SUCCESS, OH_NN_FAILED, "Failed to get data type");
        ret = OH_NNTensorDesc_GetElementNum(desc, &elementNum);
        CHECKNEQ(ret, OH_NN_SUCCESS, OH_NN_FAILED, "Failed to get element num");
        switch(dataType) {
            case OH_NN_FLOAT32: {
                float* floatValue = reinterpret_cast<float*>(data);
                for (size_t j = 0; j < elementNum; ++j) {
                    std::cout << "Output index: " << j << ", value is: " << floatValue[j] << "." << std::endl;
                }
                break;
            }
            case OH_NN_INT32: {
                int* intValue = reinterpret_cast<int*>(data);
                for (size_t j = 0; j < elementNum; ++j) {
                    std::cout << "Output index: " << j << ", value is: " << intValue[j] << "." << std::endl;
                }
                break;
            }
            default:
                return OH_NN_FAILED;
        }
    }

    return OH_NN_SUCCESS;
}

OH_NN_ReturnCode BuildModel(OH_NNModel** pmodel)
{
    // 创建模型实例，进行模型构造
    OH_NNModel* model = OH_NNModel_Construct();
    CHECKEQ(model, nullptr, -1, "Create model failed.");

    // 添加Add算子的第一个输入Tensor，类型为float32，张量形状为[1, 2, 2, 3]
    NN_TensorDesc* tensorDesc = OH_NNTensorDesc_Create();
    CHECKEQ(tensorDesc, nullptr, -1, "Create TensorDesc failed.");

    int32_t inputDims[4] = {1, 2, 2, 3};
    returnCode = OH_NNTensorDesc_SetShape(tensorDesc, inputDims, 4);
    CHECKNEQ(returnCode, OH_NN_SUCCESS, -1, "Set TensorDesc shape failed.");

    returnCode = OH_NNTensorDesc_SetDataType(tensorDesc, OH_NN_FLOAT32);
    CHECKNEQ(returnCode, OH_NN_SUCCESS, -1, "Set TensorDesc data type failed.");

    returnCode = OH_NNTensorDesc_SetFormat(tensorDesc, OH_NN_FORMAT_NONE);
    CHECKNEQ(returnCode, OH_NN_SUCCESS, -1, "Set TensorDesc format failed.");

    returnCode = OH_NNModel_AddTensorToModel(model, tensorDesc);
    CHECKNEQ(returnCode, OH_NN_SUCCESS, -1, "Add first TensorDesc to model failed.");

    returnCode = OH_NNModel_SetTensorType(model, 0, OH_NN_TENSOR);
    CHECKNEQ(returnCode, OH_NN_SUCCESS, -1, "Set model tensor type failed.");

    // 添加Add算子的第二个输入Tensor，类型为float32，张量形状为[1, 2, 2, 3]
    tensorDesc = OH_NNTensorDesc_Create();
    CHECKEQ(tensorDesc, nullptr, -1, "Create TensorDesc failed.");

    returnCode = OH_NNTensorDesc_SetShape(tensorDesc, inputDims, 4);
    CHECKNEQ(returnCode, OH_NN_SUCCESS, -1, "Set TensorDesc shape failed.");

    returnCode = OH_NNTensorDesc_SetDataType(tensorDesc, OH_NN_FLOAT32);
    CHECKNEQ(returnCode, OH_NN_SUCCESS, -1, "Set TensorDesc data type failed.");

    returnCode = OH_NNTensorDesc_SetFormat(tensorDesc, OH_NN_FORMAT_NONE);
    CHECKNEQ(returnCode, OH_NN_SUCCESS, -1, "Set TensorDesc format failed.");

    returnCode = OH_NNModel_AddTensorToModel(model, tensorDesc);
    CHECKNEQ(returnCode, OH_NN_SUCCESS, -1, "Add second TensorDesc to model failed.");

    returnCode = OH_NNModel_SetTensorType(model, 1, OH_NN_TENSOR);
    CHECKNEQ(returnCode, OH_NN_SUCCESS, -1, "Set model tensor type failed.");

    // 添加Add算子的参数Tensor，该参数Tensor用于指定激活函数的类型，Tensor的数据类型为int8。
    tensorDesc = OH_NNTensorDesc_Create();
    CHECKEQ(tensorDesc, nullptr, -1, "Create TensorDesc failed.");

    int32_t activationDims = 1;
    returnCode = OH_NNTensorDesc_SetShape(tensorDesc, &activationDims, 1);
    CHECKNEQ(returnCode, OH_NN_SUCCESS, -1, "Set TensorDesc shape failed.");

    returnCode = OH_NNTensorDesc_SetDataType(tensorDesc, OH_NN_INT8);
    CHECKNEQ(returnCode, OH_NN_SUCCESS, -1, "Set TensorDesc data type failed.");

    returnCode = OH_NNTensorDesc_SetFormat(tensorDesc, OH_NN_FORMAT_NONE);
    CHECKNEQ(returnCode, OH_NN_SUCCESS, -1, "Set TensorDesc format failed.");

    returnCode = OH_NNModel_AddTensorToModel(model, tensorDesc);
    CHECKNEQ(returnCode, OH_NN_SUCCESS, -1, "Add second TensorDesc to model failed.");

    returnCode = OH_NNModel_SetTensorType(model, 2, OH_NN_ADD_ACTIVATIONTYPE);
    CHECKNEQ(returnCode, OH_NN_SUCCESS, -1, "Set model tensor type failed.");

    // 将激活函数类型设置为OH_NNBACKEND_FUSED_NONE，表示该算子不添加激活函数。
    int8_t activationValue = OH_NN_FUSED_NONE;
    returnCode = OH_NNModel_SetTensorData(model, 2, &activationValue, sizeof(int8_t));
    CHECKNEQ(returnCode, OH_NN_SUCCESS, -1, "Set model tensor data failed.");

    // 设置Add算子的输出，类型为float32，张量形状为[1, 2, 2, 3]
    tensorDesc = OH_NNTensorDesc_Create();
    CHECKEQ(tensorDesc, nullptr, -1, "Create TensorDesc failed.");

    returnCode = OH_NNTensorDesc_SetShape(tensorDesc, inputDims, 4);
    CHECKNEQ(returnCode, OH_NN_SUCCESS, -1, "Set TensorDesc shape failed.");

    returnCode = OH_NNTensorDesc_SetDataType(tensorDesc, OH_NN_FLOAT32);
    CHECKNEQ(returnCode, OH_NN_SUCCESS, -1, "Set TensorDesc data type failed.");

    returnCode = OH_NNTensorDesc_SetFormat(tensorDesc, OH_NN_FORMAT_NONE);
    CHECKNEQ(returnCode, OH_NN_SUCCESS, -1, "Set TensorDesc format failed.");

    returnCode = OH_NNModel_AddTensorToModel(model, tensorDesc);
    CHECKNEQ(returnCode, OH_NN_SUCCESS, -1, "Add forth TensorDesc to model failed.");

    returnCode = OH_NNModel_SetTensorType(model, 3, OH_NN_TENSOR);
    CHECKNEQ(returnCode, OH_NN_SUCCESS, -1, "Set model tensor type failed.");

    // 指定Add算子的输入、参数和输出索引
    uint32_t inputIndicesValues[2] = {0, 1};
    uint32_t paramIndicesValues = 2;
    uint32_t outputIndicesValues = 3;
    OH_NN_UInt32Array paramIndices = {&paramIndicesValues, 1 * 4};
    OH_NN_UInt32Array inputIndices = {inputIndicesValues, 2 * 4};
    OH_NN_UInt32Array outputIndices = {&outputIndicesValues, 1 * 4};

    // 向模型实例添加Add算子
    returnCode = OH_NNModel_AddOperation(model, OH_NN_OPS_ADD, &paramIndices, &inputIndices, &outputIndices);
    CHECKNEQ(returnCode, OH_NN_SUCCESS, -1, "Add operation to model failed.");

    // 设置模型实例的输入、输出索引
    returnCode = OH_NNModel_SpecifyInputsAndOutputs(model, &inputIndices, &outputIndices);
    CHECKNEQ(returnCode, OH_NN_SUCCESS, -1, "Specify model inputs and outputs failed.");

    // 完成模型实例的构建
    returnCode = OH_NNModel_Finish(model);
    CHECKNEQ(returnCode, OH_NN_SUCCESS, -1, "Build model failed.");

    // 返回参数赋值
    *pmodel = model;
    return OH_NN_SUCCESS;
}

void GetAvailableDevices(std::vector<size_t>& availableDevice)
{
    availableDevice.clear();

    // 获取可用的硬件ID
    const size_t* devices = nullptr;
    uint32_t deviceCount = 0;
    OH_NN_ReturnCode ret = OH_NNDevice_GetAllDevicesID(&devices, &deviceCount);
    if (ret != OH_NN_SUCCESS) {
        std::cout << "GetAllDevicesID failed, get no available device." << std::endl;
        return;
    }

    for (uint32_t i = 0; i < deviceCount; i++) {
        availableDevice.emplace_back(devices[i]);
    }
}

OH_NN_ReturnCode CreateCompilation(OH_NNModel* model, const std::vector<size_t>& availableDevice, 
OH_NNCompilation** pCompilation)
{
    // 创建compilation编译实例，将构图的model或MSLite传下来的model传入
    OH_NNCompilation* compilation = OH_NNCompilation_Construct(model);
    CHECKEQ(compilation, nullptr, -1, "OH_NNCore_ConstructCompilationWithNNModel failed.");

    // 设置编译的硬件、缓存路径、性能模式、计算优先级、是否开启float16低精度计算等选项
    // 选择在第一个设备上编译模型
    returnCode = OH_NNCompilation_SetDevice(compilation, availableDevice[0]);
    CHECKNEQ(returnCode, OH_NN_SUCCESS, -1, "OH_NNCompilation_SetDevice failed.");

    // 将模型编译结果缓存在/data/local/tmp目录下，版本号指定为1
    returnCode = OH_NNCompilation_SetCache(compilation, "/data/local/tmp", 1);
    CHECKNEQ(returnCode, OH_NN_SUCCESS, -1, "OH_NNCompilation_SetCache failed.");

    // 设置硬件性能模式
    returnCode = OH_NNCompilation_SetPerformanceMode(compilation, OH_NN_PERFORMANCE_EXTREME);
    CHECKNEQ(returnCode, OH_NN_SUCCESS, -1, "OH_NNCompilation_SetPerformanceMode failed.");

    // 设置推理执行优先级
    returnCode = OH_NNCompilation_SetPriority(compilation, OH_NN_PRIORITY_HIGH);
    CHECKNEQ(returnCode, OH_NN_SUCCESS, -1, "OH_NNCompilation_SetPriority failed.");

    // 是否开启FP16计算模式
    returnCode = OH_NNCompilation_EnableFloat16(compilation, false);
    CHECKNEQ(returnCode, OH_NN_SUCCESS, -1, "OH_NNCompilation_EnableFloat16 failed.");

    // 执行模型编译
    returnCode = OH_NNCompilation_Build(compilation);
    CHECKNEQ(returnCode, OH_NN_SUCCESS, -1, "OH_NNCompilation_Build failed.");

    // 销毁model
    OH_NNModel_Destroy(&model);

    *pCompilation = compilation;
    return OH_NN_SUCCESS;
}

OH_NNExecutor* CreateExecutor(OH_NNCompilation* compilation)
{
    // 通过编译实例compilation创建执行器executor
    OH_NNExecutor *executor = OH_NNExecutor_Construct(compilation);
    CHECKEQ(executor, nullptr, -1, "OH_NNExecutor_Construct failed.");

    // 销毁compilation
    OH_NNCompilation_Destroy(&compilation);

    return executor;
}

OH_NN_ReturnCode Run(OH_NNExecutor* executor)
{
    // 从executor获取输入输出信息
    // 获取输入Tensor的个数
    size_t inputCount = 0;
    returnCode = OH_NNExecutor_GetInputNum(executor, &inputCount);
    CHECKNEQ(returnCode, OH_NN_SUCCESS, -1, "OH_NNExecutor_GetInputNum failed.");
    std::vector<NN_TensorDesc*> inputTensorDescs;
    NN_TensorDesc* tensorDescTmp = nullptr;
    for (size_t i = 0; i < inputCount; ++i) {
        // 创建输入Tensor的描述
        tensorDescTmp = OH_NNExecutor_CreateInputTensorDesc(executor, i);
        CHECKEQ(tensorDescTmp, nullptr, -1, "OH_NNExecutor_CreateInputTensorDesc failed.");
        inputTensorDescs.emplace_back(tensorDescTmp);
    }
    // 获取输出Tensor的个数
    size_t outputCount = 0;
    returnCode = OH_NNExecutor_GetOutputNum(executor, &outputCount);
    CHECKNEQ(returnCode, OH_NN_SUCCESS, -1, "OH_NNExecutor_GetOutputNum failed.");
    std::vector<NN_TensorDesc*> outputTensorDescs;
    for (size_t i = 0; i < outputCount; ++i) {
        // 创建输出Tensor的描述
        tensorDescTmp = OH_NNExecutor_CreateOutputTensorDesc(executor, i);
        CHECKEQ(tensorDescTmp, nullptr, -1, "OH_NNExecutor_CreateOutputTensorDesc failed.");
        outputTensorDescs.emplace_back(tensorDescTmp);
    }

    // 创建输入和输出Tensor
    NN_Tensor* inputTensors[inputCount];
    NN_Tensor* tensor = nullptr;
    for (size_t i = 0; i < inputCount; ++i) {
        tensor = nullptr;
        tensor = OH_NNTensor_Create(deviceID, inputTensorDescs[i]);
        CHECKEQ(tensor, nullptr, -1, "OH_NNTensor_Create failed.");
        inputTensors[i] = tensor;
    }
    NN_Tensor* outputTensors[outputCount];
    for (size_t i = 0; i < outputCount; ++i) {
        tensor = nullptr;
        tensor = OH_NNTensor_Create(deviceID, outputTensorDescs[i]);
        CHECKEQ(tensor, nullptr, -1, "OH_NNTensor_Create failed.");
        outputTensors[i] = tensor;
    }

    // 设置输入数据
    returnCode = SetInputData(inputTensors, inputCount);
    CHECKNEQ(returnCode, OH_NN_SUCCESS, -1, "SetInputData failed.");

    // 执行推理
    returnCode = OH_NNExecutor_RunSync(executor, inputTensors, inputCount, outputTensors, outputCount);
    CHECKNEQ(returnCode, OH_NN_SUCCESS, -1, "OH_NNExecutor_RunSync failed.");

    // 打印输出数据
    Print(outputTensors, outputCount);

    // 清理输入和输出Tensor以及Tensor描述
    for (size_t i = 0; i < inputCount; ++i) {
        returnCode = OH_NNTensor_Destroy(&inputTensors[i]);
        CHECKNEQ(returnCode, OH_NN_SUCCESS, -1, "OH_NNTensor_Destroy failed.");
        returnCode = OH_NNTensorDesc_Destroy(&inputTensorDescs[i]);
        CHECKNEQ(returnCode, OH_NN_SUCCESS, -1, "OH_NNTensorDesc_Destroy failed.");
    }
    for (size_t i = 0; i < outputCount; ++i) {
        returnCode = OH_NNTensor_Destroy(&outputTensors[i]);
        CHECKNEQ(returnCode, OH_NN_SUCCESS, -1, "OH_NNTensor_Destroy failed.");
        returnCode = OH_NNTensorDesc_Destroy(&outputTensorDescs[i]);
        CHECKNEQ(returnCode, OH_NN_SUCCESS, -1, "OH_NNTensorDesc_Destroy failed.");
    }

    return OH_NN_SUCCESS;
}

int main() {
    OH_NNModel* model = nullptr;
    OH_NNCompilation* compilation = nullptr;
    OH_NNExecutor* executor = nullptr;
    std::vector<size_t> availableDevices;

    // 模型构造
    OH_NNModel* model = nullptr;
    OH_NN_ReturnCode ret = BuildModel(&model);
    if (ret != OH_NN_SUCCESS) {
        std::cout << "BuildModel failed." << std::endl;
        OH_NNModel_Destroy(&model);
        return -1;
    }

    // 获取可执行的设备
    GetAvailableDevices(availableDevices);
    if (availableDevices.empty()) {
        std::cout << "No available device." << std::endl;
        OH_NNModel_Destroy(&model);
        return -1;
    }

    // 模型编译
    ret = CreateCompilation(model, availableDevices, &compilation);
    if (ret != OH_NN_SUCCESS) {
        std::cout << "CreateCompilation failed." << std::endl;
        OH_NNModel_Destroy(&model);
        OH_NNCompilation_Destroy(&compilation);
        return -1;
    }

    // 创建模型的推理执行器
    executor = CreateExecutor(compilation);
    if (executor == nullptr) {
        std::cout << "CreateExecutor failed, no executor is created." << std::endl;
        OH_NNCompilation_Destroy(&compilation);
        return -1;
    }

    // 使用上一步创建的执行器，执行推理计算
    ret = Run(executor);
    if (ret != OH_NN_SUCCESS) {
        std::cout << "Run failed." << std::endl;
        OH_NNExecutor_Destroy(&executor);
        return -1;
    }

    // 销毁Executor
    OH_NNExecutor_Destroy(&executor);

    return 0;
}