/*
* Copyright (c) 2022 Shenzhen Kaihong Digital Industry Development Co., Ltd. 
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
*     http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/

#include <cerrno>
#include <cstring>
#include <fstream>
#include <string>
#include <vector>

#include "mnn_wrapper.h"

MnnWrapper::MnnWrapper()
    : pMnnInterpreter_(nullptr)
{
}

MnnWrapper::~MnnWrapper()
{
    // Release model and Session
    if (pMnnInterpreter_ != nullptr)
    {
        ClearModelAndSession();
    }

    // Release function pointer
    pMnnInterpreter_ = nullptr;
}

AiRetCode MnnWrapper::Init(const AlgorithmInfo &algoConfig)
{
    AiRetCode rnt = pdAlgoConfig_.SetRawConfig(algoConfig);
    if (rnt != AiRetCode::AI_RETCODE_SUCCESS)
    {
        HILOGE("set Mnn fail");
    }

    return rnt;
}

AiRetCode MnnWrapper::Load()
{
    // 1. Create PaddlePredictor by MobileConfig
    pMnnInterpreter_ = MNN::Interpreter::createFromFile(pdAlgoConfig_.modelPath_.c_str());

    MNN::ScheduleConfig config;
    config.type = MNN_FORWARD_CPU;
    config.numThread = 4;
    MNN::BackendConfig backendConfig;
    backendConfig.precision = MNN::BackendConfig::Precision_High;
    config.backendConfig = &backendConfig;
    session = pMnnInterpreter_->createSession(config);

    return AiRetCode::AI_RETCODE_SUCCESS;
}

void MnnWrapper::SetIOTensorType(MNN::Tensor const &mnnTensor,
                                 IOTensor &tensor)
{
    switch (mnnTensor.getType().code)
    {
    case halide_type_uint:
    {
        if (1 == mnnTensor.getType().bytes())
        {
            tensor.type = TensorType::UINT8;
        }
        else if (4 == mnnTensor.getType().bytes())
        {
            tensor.type = TensorType::UINT32;
        }
        else if (8 == mnnTensor.getType().bytes())
        {
            tensor.type = TensorType::UINT64;
        }
        else
        {
            HILOGE("Not support unit data type");
            return;
        }
        break;
    }
    case halide_type_int:
    {
        if (1 == mnnTensor.getType().bytes())
        {
            tensor.type = TensorType::INT8;
        }
        else if (2 == mnnTensor.getType().bytes())
        {
            tensor.type = TensorType::INT16;
        }
        else if (4 == mnnTensor.getType().bytes())
        {
            tensor.type = TensorType::INT32;
        }
        else if (8 == mnnTensor.getType().bytes())
        {
            tensor.type = TensorType::INT64;
        }
        else
        {
            HILOGE("Not support int data type");
            return;
        }
        break;
    }
    case halide_type_float:
    {
        if (2 == mnnTensor.getType().bytes())
        {
            tensor.type = TensorType::FLOAT16;
        }
        else if (4 == mnnTensor.getType().bytes())
        {
            tensor.type = TensorType::FLOAT32;
        }
        else
        {
            HILOGE("Not support int data type");
            return;
        }
        break;
    }
    case halide_type_handle:
    {
        HILOGE("Not support halide_type_handle data type");
        break;
    }
    default:
    {
        HILOGE("Not support default data type");
        break;
    }
    }

}
void MnnWrapper::SetMnnTensorType(IOTensor const &tensor, MNN::Tensor &mnnTensor)
{
    switch (tensor.type)
    {
    case TensorType::FLOAT32:
    {
        mnnTensor.setType(DataType_DT_FLOAT);
        break;
    }
    case TensorType::INT8:
    {
        mnnTensor.setType(DataType_DT_INT8);
        break;
    }
    case TensorType::INT16:
    {
        mnnTensor.setType(DataType_DT_INT16);
        break;
    }
    case TensorType::INT32:
    {
        mnnTensor.setType(DataType_DT_INT32);
        break;
    }
    case TensorType::INT64:
    {
        mnnTensor.setType(DataType_DT_INT64);
        break;
    }
    case TensorType::UINT8:
    {
        mnnTensor.setType(DataType_DT_UINT8);
        break;
    }
    default:
    {
        mnnTensor.setType(DataType_DT_FLOAT);
        break;
    }
    }
}

MNN::Tensor::DimensionType MnnWrapper::SetMnnTensorShape(IOTensor const &tensor)
{
    switch (tensor.layout)
    {
    case NCHW:
    {
        return MNN::Tensor::CAFFE;
        break;
    }
    case NHWC:
    {
        return MNN::Tensor::TENSORFLOW;
        break;
    }
    case NC4HW4:
    {
        return MNN::Tensor::CAFFE_C4;
        break;
    }
    default:
    {
        return MNN::Tensor::TENSORFLOW;
        break;
    }
    }
}

void MnnWrapper::SetIOTensorShape(MNN::Tensor const &mnnTensor, IOTensor &tensor)
{
    switch (mnnTensor.getDimensionType())
    {
    case MNN::Tensor::TENSORFLOW:
    {
        tensor.layout = NHWC;
        break;
    }
    case MNN::Tensor::CAFFE:
    {
        tensor.layout = NCHW;
        break;
    }
    case MNN::Tensor::CAFFE_C4:
    {
        tensor.layout = NC4HW4;
        break;
    }
    default:
    {
        tensor.layout = NCHW;
        break;
    }
    }
}

void MnnWrapper::ClearModelAndSession()
{
    if (pMnnInterpreter_ != nullptr)
    {
        pMnnInterpreter_->releaseModel();
        pMnnInterpreter_->releaseSession(session);
        pMnnInterpreter_ = nullptr;
    }
}
void MnnWrapper::GetOutputData(MNN::Tensor mnnOutput_, IOTensor &output)
{
    // DimensionType：0-TENSORFLOW-NHWC, 1-CAFFE-NCHW
    std::vector<int> v_shape = mnnOutput_.shape();
    if (4 != v_shape.size())
    {
        HILOGE("!!!shape size wrong,only 4 is support by mnn");
        return;
    }
    for (int i = 0; i < v_shape.size(); ++i)
    {
        output.shape.emplace_back(size_t(v_shape[i]));
    }
    SetIOTensorShape(mnnOutput_, output);
    auto copy = std::shared_ptr<MNN::Tensor>(MNN::Tensor::createHostTensorFromDevice(&mnnOutput_));
    output.buffer.second = mnnOutput_.elementSize();
    char *outputData = (char *)malloc(output.buffer.second);
    memcpy(outputData, (void *)copy->host<float>(), output.buffer.second);
    output.buffer.first = outputData;
    SetIOTensorType(mnnOutput_, output);
}
AiRetCode MnnWrapper::SynInfer(const std::vector<IOTensor> &inputs, std::vector<IOTensor> &outputs)
{
    if (pMnnInterpreter_ == nullptr)
    {
        HILOGE("pMnnInterpreter_ is nullptr");
        return AiRetCode::INVALID_POINTER;
    }

    // Must get outTensor before runSession
    MNN::Tensor *outTensor = pMnnInterpreter_->getSessionOutput(session, NULL);
    MNN::Tensor outputTensorUser(outTensor, outTensor->getDimensionType());

    std::map<std::string, MNN::Tensor *> mnnInputTensors = pMnnInterpreter_->getSessionInputAll(session);
    if(mnnInputTensors.size() != inputs.size())
    {
        HILOGE("The input Tensor nums is not equal request Tensor nums");
        return AiRetCode::AI_RETCODE_FAILURE;
    }
    // Set input data
    std::map<std::string, MNN::Tensor *>::iterator iter;
    iter = mnnInputTensors.begin();
    int i = 0;
    while (iter != mnnInputTensors.end())
    {
        SetMnnTensorType(inputs[i], *(iter->second));
        std::vector<int> v_shape;
        for (int j = 0; j < inputs[i].shape.size(); j++)
        {
            v_shape.emplace_back(int(inputs[i].shape[j]));
        }
        MNN::Tensor *mnnInput_;
        MNN::Tensor::DimensionType mnnDimType = SetMnnTensorShape(inputs[i]);
        mnnInput_ = MNN::Tensor::create<float>(v_shape, NULL, mnnDimType);
        auto data = mnnInput_->host<float>();
        if (data == nullptr)
        {
            HILOGE("The input Tensor data is nullptr");
            return AiRetCode::AI_RETCODE_FAILURE;
        }
        memcpy(data, inputs[i].buffer.first, inputs[i].buffer.second);
        iter->second->copyFromHostTensor(mnnInput_);
        iter++;
        ++i;
    }

    //Run
    pMnnInterpreter_->runSession(session);

    //convert outputTensorUser to IOTensors
    IOTensor outIOTensor;
    GetOutputData(&outputTensorUser, outIOTensor);
    outputs.emplace_back(outIOTensor);
    
    return AiRetCode::AI_RETCODE_SUCCESS;
}

AiRetCode MnnWrapper::Unload()
{
    // Release model and Session
    if (pMnnInterpreter_ != nullptr)
    {
        ClearModelAndSession();
    }
    return AiRetCode::AI_RETCODE_SUCCESS;
}