/*
 * Copyright (c) 2023 Huawei Device Co., Ltd.
 * Licensed under the Apache License, Version 2.0 (the "License");
 * you may not use this file except in compliance with the License.
 * You may obtain a copy of the License at
 *
 *     http://www.apache.org/licenses/LICENSE-2.0
 *
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS,
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 * See the License for the specific language governing permissions and
 * limitations under the License.
 */


#include "nnexecutor.h"
#include "nntensor.h"
// #include "nnbackend.h"
// #include "backend_manager.h"
#include "common/log.h"
#include "cpp_type.h"

namespace OHOS {
namespace NeuralNetworkRuntime {
NNExecutor::NNExecutor(size_t deviceID, std::shared_ptr<Device> device, std::shared_ptr<PreparedModel> preparedModel,
    std::vector<std::shared_ptr<TensorDesc>> inputTensorDescs,
    std::vector<std::shared_ptr<TensorDesc>> outputTensorDescs)
{
    m_deviceID = deviceID;
    m_device = device;
    m_preparedModel = preparedModel;
    m_inputTensorDescs = inputTensorDescs;
    m_outputTensorDescs = outputTensorDescs;
}

OH_NN_ReturnCode NNExecutor::GetInputDimRange(
    size_t inputIndex, size_t** minInputDims, size_t** maxInputDims, size_t* shapeNum) const
{
    if (minInputDims == nullptr) {
        LOGE("NNExecutor::GetInputDimRange failed, minInputDims is nullptr.");
        return OH_NN_INVALID_PARAMETER;
    }
    if (maxInputDims == nullptr) {
        LOGE("NNExecutor::GetInputDimRange failed, maxInputDims is nullptr.");
        return OH_NN_INVALID_PARAMETER;
    }
    if (shapeNum == nullptr) {
        LOGE("NNExecutor::GetInputDimRange failed, shapeNum is nullptr.");
        return OH_NN_INVALID_PARAMETER;
    }

    std::vector<std::vector<uint32_t>> minInputDimsVec;
    std::vector<std::vector<uint32_t>> maxInputDimsVec;
    OH_NN_ReturnCode oldRet = m_preparedModel->GetInputDimRanges(minInputDimsVec, maxInputDimsVec);
    if (oldRet != OH_NN_SUCCESS) {
        LOGW("NNExecutor::GetInputDimRange failed, current version don't support get input dim ranges.");
        return OH_NN_OPERATION_FORBIDDEN;
    }

    if (minInputDimsVec.size() != maxInputDimsVec.size()) {
        LOGE("NNExecutor::GetInputDimRange failed, size of minInputDimsVec is not equal to maxInputDimsVec.");
        return OH_NN_INVALID_PARAMETER;
    }
    if (inputIndex >= minInputDimsVec.size()) {
        LOGE("NNExecutor::GetInputDimRange failed, inputIndex[%{public}zu] is out of range.", inputIndex);
        return OH_NN_INVALID_PARAMETER;
    }

    std::vector<uint32_t> minInputDimVec = minInputDimsVec[inputIndex];
    std::vector<uint32_t> maxInputDimVec = maxInputDimsVec[inputIndex];
    if (minInputDimVec.size() != maxInputDimVec.size()) {
        LOGE("NNExecutor::GetInputDimRange failed, size of the min input dims is not equal to the max input"
             " dims of the %{public}zuth input.", inputIndex);
        return OH_NN_INVALID_PARAMETER;
    }
    *shapeNum = minInputDimVec.size();
    *minInputDims = static_cast<size_t*>(minInputDimVec.data());
    *maxInputDims = static_cast<size_t*>(maxInputDimVec.data());
    return OH_NN_SUCCESS;
}

OH_NN_ReturnCode NNExecutor::GetOutputShape(uint32_t outputIndex, int32_t** shape, uint32_t* shapeNum) const
{
    if (outputIndex >= m_outputTensorDescs.size()) {
        LOGE("NNExecutor::GetOutputShape failed, outputIndex must be smaller than m_outputTensorDescs.size.");
        return OH_NN_INVALID_PARAMETER;
    }

    auto tensorDesc = m_outputTensorDescs[outputIndex];
    auto ret = tensorDesc->GetShape(shape, shapeNum);
    if (ret != OH_NN_SUCCESS) {
        LOGE("NNExecutor::GetOutputShape failed, failed to get shape from tensor desc.");
        return ret;
    }

    return OH_NN_SUCCESS;
}

size_t NNExecutor::GetInputNum() const
{
    return m_inputTensorDescs.size();
}

size_t NNExecutor::GetOutputNum() const
{
    return m_outputTensorDescs.size();
}

NN_TensorDesc* NNExecutor::CreateInputTensorDesc(size_t index) const
{
    if (index >= m_inputTensorDescs.size()) {
        LOGE("NNExecutor::CreateInputTensorDesc failed, index must be smaller than m_inputTensorDescs.size.");
        return nullptr;
    }

    TensorDesc* tensorDescImpl = new (std::nothrow) TensorDesc();
    if (tensorDescImpl == nullptr) {
        LOGE("NNExecutor::CreateInputTensorDesc failed, failed to create tensor desc.");
        return nullptr;
    }

    // Copy the member attributes to new tensor description
    *tensorDescImpl = *(m_inputTensorDescs[index].get());

    return reinterpret_cast<NN_TensorDesc*>(tensorDescImpl);
}

NN_TensorDesc* NNExecutor::CreateOutputTensorDesc(size_t index) const
{
    if (index >= m_outputTensorDescs.size()) {
        LOGE("NNExecutor::CreateOutputTensorDesc failed, index must be smaller than m_outputTensorDescs.size.");
        return nullptr;
    }

    TensorDesc* tensorDescImpl = new (std::nothrow) TensorDesc();
    if (tensorDescImpl == nullptr) {
        LOGE("NNExecutor::CreateOutputTensorDesc failed, failed to create tensor desc.");
        return nullptr;
    }

    // Copy the member attributes to new tensor description
    *tensorDescImpl = *(m_outputTensorDescs[index].get());

    return reinterpret_cast<NN_TensorDesc*>(tensorDescImpl);
}

OH_NN_ReturnCode NNExecutor::SetOnRunDone(NN_OnRunDone onRunDone)
{
    LOGE("NNExecutor::SetOnRunDone failed, SetOnRunDone is not supported.");
    return OH_NN_OPERATION_FORBIDDEN;
}

OH_NN_ReturnCode NNExecutor::SetOnServiceDied(NN_OnServiceDied onServiceDied)
{
    LOGE("NNExecutor::SetOnServiceDied failed, SetOnServiceDied is not supported.");
    return OH_NN_OPERATION_FORBIDDEN;
}

OH_NN_ReturnCode NNExecutor::RunSync(NN_Tensor* inputTensors[], size_t inputSize,
    NN_Tensor* outputTensors[], size_t outputSize)
{
    OH_NN_ReturnCode ret {OH_NN_FAILED};
    ret = CheckInputDimRanges(inputTensors, inputSize);
    if (ret != OH_NN_OPERATION_FORBIDDEN && ret != OH_NN_SUCCESS) {
        LOGE("NNExecutor::RunSync failed, failed to check input dim ranges.");
        return ret;
    }

    OHOS::NeuralNetworkRuntime::IOTensor tensor;
    std::vector<NN_Tensor*> inputTensorsVec;
    for (size_t i = 0; i < inputSize; ++i) {
        inputTensorsVec.emplace_back(inputTensors[i]);
    }

    std::vector<NN_Tensor*> outputTensorsVec;
    for (size_t i = 0; i < outputSize; ++i) {
        outputTensorsVec.emplace_back(outputTensors[i]);
    }

    std::vector<std::vector<int32_t>> outputsDims;
    std::vector<bool> isSufficientDataBuffer;
    // todo
    // auto oldRet = m_preparedModel->Run(inputTensorsVec, outputTensorsVec, outputsDims, isSufficientDataBuffer);
    // if (oldRet != OH_NN_SUCCESS) {
    //     LOGE("NNExecutor::RunSync failed, failed to run in prepared model.");
    //     return OH_NN_FAILED;
    // }

    // Set the output NNTensor2_0's dimensions from output IOTensor if it is dynamic.
    // NNTensor2_0::SetDimensions will check if the tensor buffer is enough for the new dimensions.
    if (outputsDims.size() != outputSize) {
        LOGE("NNExecutor::RunSync failed, size of outputsDims is not equal to outputTensors.");
        return OH_NN_INVALID_PARAMETER;
    }
    for (size_t i = 0; i < outputSize; ++i) {
        NNTensor2_0* nnTensor = reinterpret_cast<NNTensor2_0*>(outputTensors[i]);
        TensorDesc* nnTensorDesc = nnTensor->GetTensorDesc();
        if (nnTensorDesc == nullptr) {
            LOGE("NNExecutor::RunSync failed, failed to get desc from tensor.");
            return OH_NN_NULL_PTR;
        }
        ret = nnTensorDesc->SetShape(outputsDims[i].data(), outputsDims[i].size());
        if (ret != OH_NN_SUCCESS) {
            LOGE("NNExecutor::RunSync failed, error happened when setting output tensor's dimensions,"
                 " output id: %zu.", i);
            return ret;
        }
        ret = m_outputTensorDescs[i]->SetShape(outputsDims[i].data(), outputsDims[i].size());
        if (ret != OH_NN_SUCCESS) {
            LOGE("NNExecutor::RunSync failed, error happened when setting inner output tensor's dimensions,"
                 " output id: %zu.", i);
            return ret;
        }
    }
    return OH_NN_SUCCESS;
}

OH_NN_ReturnCode NNExecutor::RunAsync(NN_Tensor* inputTensors[], size_t inputSize,
    NN_Tensor* outputTensors[], size_t outputSize, int32_t timeout, void* userData)
{
    LOGE("NNExecutor::RunAsync failed, RunAsync is not supported.");
    return OH_NN_OPERATION_FORBIDDEN;
}

size_t NNExecutor::GetDeviceID()
{
    return m_deviceID;
}

OH_NN_ReturnCode NNExecutor::CheckInputDimRanges(NN_Tensor* inputTensors[], size_t inputSize)
{
    std::vector<std::vector<uint32_t>> minInputDims;
    std::vector<std::vector<uint32_t>> maxInputDims;
    OH_NN_ReturnCode oldRet = m_preparedModel->GetInputDimRanges(minInputDims, maxInputDims);
    if (oldRet != OH_NN_SUCCESS) {
        LOGW("NNExecutor::CheckInputDimRanges failed, current version don't support get input dim ranges.");
        return OH_NN_OPERATION_FORBIDDEN;
    }

    if (inputSize != minInputDims.size()) {
        LOGE("NNExecutor::CheckInputDimRanges failed, size of minInputDims is not equal to inputSize.");
        return OH_NN_INVALID_PARAMETER;
    }

    if (inputSize != maxInputDims.size()) {
        LOGE("NNExecutor::CheckInputDimRanges failed, size of maxInputDims is not equal to inputSize.");
        return OH_NN_INVALID_PARAMETER;
    }

    const NNTensor2_0* nnTensor = nullptr;
    OH_NN_ReturnCode ret {OH_NN_FAILED};
    for (size_t i = 0; i < inputSize; ++i) {
        const std::vector<uint32_t>& minSingleInputDims = m_minInputDims[i];
        const std::vector<uint32_t>& maxSingleInputDims = m_maxInputDims[i];
        nnTensor = reinterpret_cast<const NNTensor2_0*>(inputTensors[i]);
        if (nnTensor == nullptr) {
            LOGE("NNExecutor::CheckInputDimRanges failed, input %{public}zu is nullptr.", i);
            return OH_NN_NULL_PTR;
        }
        ret = nnTensor->CheckDimRanges(minSingleInputDims, maxSingleInputDims);
        if (ret != OH_NN_SUCCESS) {
            LOGE("NNExecutor::CheckInputDimRanges failed, failed to check input dim ranges of input %{public}zu", i);
            return ret;
        }
    }

    return OH_NN_SUCCESS;
}
}  // namespace NeuralNetworkRuntime
}  // namespace OHOS