/*
 * Copyright (c) 2022 Shenzhen Kaihong Digital Industry Development Co., Ltd.
 * Licensed under the Apache License, Version 2.0 (the "License");
 * you may not use this file except in compliance with the License.
 * You may obtain a copy of the License at
 *
 *     http://www.apache.org/licenses/LICENSE-2.0
 *
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS,
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 * See the License for the specific language governing permissions and
 * limitations under the License.
 */

#include <cerrno>
#include <cstring>
#include <fstream>
#include <string>
#include <vector>

#include "paddlelite_wrapper.h"

PaddleliteWrapper::PaddleliteWrapper()
    : pPaddlePredictor_(nullptr)
{
}

PaddleliteWrapper::~PaddleliteWrapper()
{
    // Release model and Session
    if (pPaddlePredictor_ != nullptr)
    {
        ClearModelAndSession();
    }

    // Release function pointer
    pPaddlePredictor_ = nullptr;
}

AiRetCode PaddleliteWrapper::Init(const AlgorithmInfo &algoConfig)
{
    AiRetCode rnt = pdAlgoConfig_.SetRawConfig(algoConfig);
    if (rnt != AiRetCode::AI_RETCODE_SUCCESS)
    {
        HILOGE("set Paddlelite fail");
    }
    return rnt;
}
// load dynamic lib and get the pointer of interface function

AiRetCode PaddleliteWrapper::Load()
{
    // 1. Create PaddlePredictor by MobileConfig
    pPaddlePredictor_ = createPaddlePredictor(pdAlgoConfig_.modelPath_);
    if (pPaddlePredictor_ == nullptr)
    {
        HILOGE("get pPaddlePredictor_ ponter error, errno: %d.", errno);
        return AiRetCode::AI_RETCODE_FAILURE;
    }

    // to get pointer of pdTensor: pdinputs
    AiRetCode rnt = GetInputsPointer();
    if (rnt != AiRetCode::AI_RETCODE_SUCCESS)
    {
        HILOGE("Get Inputs and Outputs Pointer fail");
        ClearModelAndSession();
        return rnt;
    }

    return AiRetCode::AI_RETCODE_SUCCESS;
}
std::shared_ptr<paddle::lite_api::PaddlePredictor> PaddleliteWrapper::createPaddlePredictor(std::string model_dir)
{
    paddle::lite_api::MobileConfig mobile_config;
    mobile_config.set_model_from_file(model_dir);
    std::shared_ptr<paddle::lite_api::PaddlePredictor> predictor = nullptr;
    predictor = paddle::lite_api::CreatePaddlePredictor(mobile_config);
    return predictor;
}
void PaddleliteWrapper::SetInputData(const std::vector<IOTensor> &inputs, std::vector<paddle::lite_api::Tensor> pdInputs_)
{
    size_t inputs_num = inputs.size();
    if (inputs_num != pdInputs_.size())
    {
        HILOGE("input tensor nums not equal model needed!");
        return;
    }
    for (size_t inputIndex = 0; inputIndex < inputs_num; ++inputIndex)
    {
        if (inputs[inputIndex].shape.size() == 4)
        {
            pdInputs_[inputIndex].Resize({inputs[inputIndex].shape[0],
                                          inputs[inputIndex].shape[1],
                                          inputs[inputIndex].shape[2],
                                          inputs[inputIndex].shape[3]});
        }
        else
        {
            HILOGE("inputs[inputIndex].shape.size !=4");
            return;
        }
        paddle::lite_api::PrecisionType dataType = SetPdTensorType(inputs[inputIndex]);
        pdInputs_[inputIndex].SetPrecision(dataType);
        void *inputData = pdInputs_[inputIndex].mutable_data<float>();
        paddle::lite_api::shape_t shape = pdInputs_[inputIndex].shape();

        int64_t res = 1;
        for (auto i : shape)
            res *= i;
        size_t inputDataSize = res;
        if (inputs[inputIndex].buffer.first == nullptr)
        {
            HILOGE("inputs[inputIndex].buffer.first == nullptr");
        }
        memcpy(inputData, inputs[inputIndex].buffer.first, inputDataSize);
    }
}
AiRetCode PaddleliteWrapper::GetInputsPointer()
{
    if (pPaddlePredictor_ == nullptr)
    {
        HILOGE("pPaddlePredictor_ is nullptr");
        return AiRetCode::INVALID_POINTER;
    }

    // init pdInputs_
    size_t inputSize = (pPaddlePredictor_->GetInputNames()).size();
    if (inputSize == 0)
    {
        HILOGE("Failed to get inputTensorNames");
        return AiRetCode::AI_RETCODE_FAILURE;
    }

    for (size_t i = 0; i < inputSize; ++i)
    {
        pdInputs_.emplace_back(*(pPaddlePredictor_->GetInput(i)));
    }

    return AiRetCode::AI_RETCODE_SUCCESS;
}

void PaddleliteWrapper::ClearModelAndSession()
{

    if (pPaddlePredictor_ != nullptr)
    {
        pPaddlePredictor_ = nullptr;
        pdInputs_.clear();
        pdOutputs_.clear();
    }
}

AiRetCode PaddleliteWrapper::SynInfer(const std::vector<IOTensor> &inputs, std::vector<IOTensor> &outputs)
{
    if (pPaddlePredictor_ == nullptr)
    {
        HILOGE("pPaddlePredictor_ is nullptr");
        return AiRetCode::INVALID_POINTER;
    }
    SetInputData(inputs, pdInputs_);
    // Run
    pPaddlePredictor_->Run();

    // to get pdOutputs
    size_t output_tensor_num = pPaddlePredictor_->GetOutputNames().size();
    for (size_t tidx = 0; tidx < output_tensor_num; ++tidx)
    {
        std::unique_ptr<const paddle::lite_api::Tensor> output_tensor =
            pPaddlePredictor_->GetOutput(tidx);
        // printTensorPtr_(*output_tensor);
        pdOutputs_.emplace_back(std::move(output_tensor));
    }

    // convert pdOutputs to IOTensors
    GetOutputData(std::move(pdOutputs_), outputs);

    return AiRetCode::AI_RETCODE_SUCCESS;
}

AiRetCode PaddleliteWrapper::Unload()
{
    // Release model and Session
    if (pPaddlePredictor_ != nullptr)
    {
        ClearModelAndSession();
    }

    return AiRetCode::AI_RETCODE_SUCCESS;
}
void PaddleliteWrapper::GetOutputData(
    std::vector<std::unique_ptr<const paddle::lite_api::Tensor>> pdOutputs_,
    std::vector<IOTensor> &outputs)
{
    for (int i = 0; i < pdOutputs_.size(); i++)
    {
        IOTensor outTensor;
        SetIOTensor(*pdOutputs_[i], outTensor);
        outputs.emplace_back(outTensor);
    }
}
int PaddleliteWrapper::DataTypeSize(const paddle::lite_api::PrecisionType &dataType) const
{
    int rnt = FLOAT32_SIZE;
    switch (dataType)
    {
    case paddle::lite_api::PrecisionType::kFloat:
    {
        rnt = FLOAT32_SIZE;
        break;
    }
    case paddle::lite_api::PrecisionType::kInt8:
    {
        rnt = INT8_SIZE;
        break;
    }
    case paddle::lite_api::PrecisionType::kUInt8:
    {
        rnt = UINT8_SIZE;
        break;
    }
    case paddle::lite_api::PrecisionType::kInt16:
    {
        rnt = INT16_SIZE;
        break;
    }
    case paddle::lite_api::PrecisionType::kInt32:
    {
        rnt = INT32_SIZE;
        break;
    }
    case paddle::lite_api::PrecisionType::kInt64:
    {
        rnt = INT64_SIZE;
        break;
    }
    default:
    {
        rnt = FLOAT32_SIZE;
        break;
    }
    }
    return rnt;
}
int64_t PaddleliteWrapper::ShapeProduction(const paddle::lite_api::shape_t &shape)
{
    int64_t res = 1;
    for (auto i : shape)
        res *= i;
    return res;
}

//入参： pdTensor 出参：tensor
void PaddleliteWrapper::SetIOTensor(paddle::lite_api::Tensor const &pdTensor, IOTensor &tensor) const
{
    // Set tensor shape
    SetIOTensorShape(pdTensor, tensor);

    // Set tensor's data type
    SetIOTensorType(pdTensor, tensor);

    // Set tensor buffer
    SetIOTensorBuffer(pdTensor, tensor);
}
void PaddleliteWrapper::SetIOTensorShape(paddle::lite_api::Tensor const &pdTensor,
                                         IOTensor &tensor) const
{
    paddle::lite_api::shape_t pdShape = pdTensor.shape();
    for (size_t i = 0; i < pdShape.size(); ++i)
    {
        tensor.shape.emplace_back(static_cast<size_t>(pdShape[i]));
    }
}
void PaddleliteWrapper::SetIOTensorType(paddle::lite_api::Tensor const &pdTensor,
                                        IOTensor &tensor) const
{
    paddle::lite_api::PrecisionType typeId = pdTensor.precision();
    switch (typeId)
    {
    case paddle::lite_api::PrecisionType::kFloat:
    {
        tensor.type = TensorType::FLOAT32;
        break;
    }
    case paddle::lite_api::PrecisionType::kInt8:
    {
        tensor.type = TensorType::INT8;
        break;
    }
    case paddle::lite_api::PrecisionType::kUInt8:
    {
        tensor.type = TensorType::UINT8;
        break;
    }
    case paddle::lite_api::PrecisionType::kInt16:
    {
        tensor.type = TensorType::INT16;
        break;
    }
    case paddle::lite_api::PrecisionType::kInt32:
    {
        tensor.type = TensorType::INT32;
        break;
    }
    case paddle::lite_api::PrecisionType::kInt64:
    {
        tensor.type = TensorType::INT64;
        break;
    }
    default:
    {
        tensor.type = TensorType::FLOAT32;
        break;
    }
    }
}
void PaddleliteWrapper::SetIOTensorBuffer(paddle::lite_api::Tensor const &pdTensor,
                                          IOTensor &tensor) const
{
    int tensorSize = 1;
    paddle::lite_api::shape_t pdShape =
        pdTensor.shape(); // shape_t = std::vector<int64_t>;
    const size_t ndim = pdShape.size();
    for (size_t dim = 0; dim < ndim; ++dim)
    {
        tensorSize *= pdShape[dim];
    }
    paddle::lite_api::Tensor *pdNonConstTensor =
        const_cast<paddle::lite_api::Tensor *>(&pdTensor);
    char *outputData = (char *)malloc(tensorSize * DataTypeSize(pdNonConstTensor->precision()));
    tensor.buffer.second = static_cast<size_t>(
        tensorSize * DataTypeSize(pdNonConstTensor->precision()));
    memcpy(outputData, (void *)pdTensor.data<float>(), tensor.buffer.second);
    tensor.buffer.first = outputData;
}

paddle::lite_api::PrecisionType PaddleliteWrapper::SetPdTensorType(IOTensor const &tensor) const
{
    switch (tensor.type)
    {
    case TensorType::FLOAT32:
    {
        return paddle::lite_api::PrecisionType::kFloat;
    }
    case TensorType::INT8:
    {
        return paddle::lite_api::PrecisionType::kInt8;
    }
    case TensorType::UINT8:
    {
        return paddle::lite_api::PrecisionType::kUInt8;
    }
    case TensorType::INT16:
    {
        return paddle::lite_api::PrecisionType::kInt16;
    }
    case TensorType::INT32:
    {
        return paddle::lite_api::PrecisionType::kInt32;
    }
    case TensorType::INT64:
    {
        return paddle::lite_api::PrecisionType::kInt64;
    }
    default:
    {
        return paddle::lite_api::PrecisionType::kFloat;
    }
    }
}