/*
* Copyright (c) 2022 Shenzhen Kaihong Digital Industry Development Co., Ltd. 
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
*     http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/

#ifndef PADDLELITE_WRAPPER_H
#define PADDLELITE_WRAPPER_H

#include <algorithm>
#include <dlfcn.h>
#include <map>
#include <vector>

#include "utils/log/infer_log.h"
#include "infer_framework.h"
#include "paddlelite_config.h"
#include "paddle_api.h"

const int FLOAT32_SIZE = 4;
const int FLOAT16_SIZE = 2;
const int INT8_SIZE = 1;
const int UINT8_SIZE = 1;
const int INT16_SIZE = 2;
const int UINT16_SIZE = 2;
const int INT32_SIZE = 4;
const int UINT32_SIZE = 4;
const int INT64_SIZE = 8;
const int UINT64_SIZE = 8;

class PaddleliteWrapper : public InferFramework {
public:
    PaddleliteWrapper();
    virtual ~PaddleliteWrapper();
    virtual AiRetCode Init(const AlgorithmInfo &algoConfig);
    virtual AiRetCode Load() override;
    virtual AiRetCode SynInfer(const std::vector<IOTensor> &inputs, std::vector<IOTensor> &outputs) override;
    virtual AiRetCode Unload() override;

protected:
    AiRetCode GetInputsPointer();
    void ClearModelAndSession();
    int DataTypeSize(const paddle::lite_api::PrecisionType &dataType) const;
    std::shared_ptr<paddle::lite_api::PaddlePredictor> createPaddlePredictor(std::string model_dir);
    void SetInputData(const std::vector<IOTensor> &inputs,std::vector<paddle::lite_api::Tensor> pdInputs_);
    void GetOutputData(std::vector<std::unique_ptr<const paddle::lite_api::Tensor>> pdOutputs_,std::vector<IOTensor> &outputs);

    void SetIOTensor(paddle::lite_api::Tensor const& pdTensor, IOTensor &tensor) const;
    void SetIOTensorShape(paddle::lite_api::Tensor const& pdTensor,IOTensor &tensor) const;
    void SetIOTensorType(paddle::lite_api::Tensor const& pdTensor,IOTensor &tensor) const;
    void SetIOTensorBuffer(paddle::lite_api::Tensor const& pdTensor,IOTensor &tensor) const;
    paddle::lite_api::PrecisionType SetPdTensorType(IOTensor const &tensor) const;
    int64_t ShapeProduction(const paddle::lite_api::shape_t &shape);


protected:
    // Dynamic link library handle
    PaddleliteConfig pdAlgoConfig_;
    std::shared_ptr<paddle::lite_api::PaddlePredictor> pPaddlePredictor_;
    std::vector<paddle::lite_api::Tensor> pdInputs_;
    std::vector<std::unique_ptr<const paddle::lite_api::Tensor>> pdOutputs_;
};

#endif // MINDSPORE_WRAPPER_H
