/*
* Copyright (c) 2022 Shenzhen Kaihong Digital Industry Development Co., Ltd. 
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
*     http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/

#include <cerrno>
#include <cstring>
#include <fstream>
#include <string>
#include <vector>

#include "ncnn_wrapper.h"
#include "net.h"

NcnnWrapper::NcnnWrapper()
{
    //do nothing,all the infers are completed in stack
}

NcnnWrapper::~NcnnWrapper()
{
    ClearModelAndSession();
}

AiRetCode NcnnWrapper::Init(const AlgorithmInfo &algoConfig)
{
    AiRetCode rnt = pdAlgoConfig_.SetRawConfig(algoConfig);
    if (rnt != AiRetCode::AI_RETCODE_SUCCESS)
    {
        HILOGE("set Ncnn fail");
    }
    return rnt;
}
AiRetCode NcnnWrapper::Load()
{
    //do nothing,all the infers are completed in stack
    return AiRetCode::AI_RETCODE_SUCCESS;
}
void NcnnWrapper::ClearModelAndSession()
{
    //do nothing,all the infers are completed in stack
}
AiRetCode NcnnWrapper::Unload()
{
    //do nothing,all the infers are completed in stack
    return AiRetCode::AI_RETCODE_SUCCESS;
}

void NcnnWrapper::GetOutputData(ncnn::Mat ncnnOutputs_, IOTensor &outputs)
{
    outputs.layout = TensorLayout::NCHW;
    outputs.shape.emplace_back(ncnnOutputs_.dims);
    outputs.shape.emplace_back(ncnnOutputs_.c);
    outputs.shape.emplace_back(ncnnOutputs_.h);
    outputs.shape.emplace_back(ncnnOutputs_.w);
    SetIOTensorType(ncnnOutputs_,outputs);
    outputs.buffer.second = ncnnOutputs_.dims * ncnnOutputs_.c * ncnnOutputs_.h * ncnnOutputs_.w;
    char *outputData = (char *)malloc(outputs.buffer.second);
    memcpy(outputData, ncnnOutputs_, outputs.buffer.second);
    outputs.buffer.first = (void *)outputData;
    
}
AiRetCode NcnnWrapper::SynInfer(const std::vector<IOTensor> &inputs, std::vector<IOTensor> &outputs)
{
    // to create Ncnn Extracor
    ncnn::Net model;
    model.opt.use_vulkan_compute = true;
    model.load_param(pdAlgoConfig_.modelPath_.c_str());
    model.load_model(pdAlgoConfig_.weightPath_.c_str());
    std::vector<const char *> v_inBlobs = model.input_names();
    std::vector<const char *> v_outBlobs = model.output_names();
    ncnn::Extractor pNcnnExtractor_ = model.create_extractor();

    // IOtensor inputs convert to ncnn inputs
    size_t in_blobs = v_inBlobs.size();
    if (inputs.size() != in_blobs)
    {
        HILOGE("!!! Dim of InputTensors not equal the model needed!!");
        return AiRetCode::AI_RETCODE_FAILURE;
    }
    for (size_t i = 0; i < in_blobs; ++i)
    {
        int h, w, c, n;
        void *inputData = inputs[i].buffer.first;
        if (inputs[i].shape.size() != 4)
        {
            HILOGE("!!! NcnnWrapper:SynInfer inputs shape is error!!");
            return AiRetCode::AI_RETCODE_FAILURE;
        }
        switch (inputs[i].layout)
        {
        case NCHW:
        {
            h = inputs[i].shape[2];
            w = inputs[i].shape[3];
            c = inputs[i].shape[1];
            n = inputs[i].shape[0];
            break;
        }
        case NHWC:
        {
            h = inputs[i].shape[1];
            w = inputs[i].shape[2];
            c = inputs[i].shape[3];
            n = inputs[i].shape[0];
            break;
        }
        default:
        {
            HILOGE("!!! UNsupport NCNN layout!!");
            return AiRetCode::AI_RETCODE_FAILURE;
        }
        }
        ncnn::Mat in_mat(w, h, c, n, inputData);
        SetMnnTensorElemsize(inputs[i], in_mat);
        // input ncnninputs to Extractor
        pNcnnExtractor_.input(v_inBlobs[i], in_mat);
    }
    // to extract
    ncnn::Mat ncnnOutput_;
    IOTensor outIOTensor;
    size_t out_blobs = v_outBlobs.size();
    for (size_t i = 0; i < out_blobs; ++i)
    {
        pNcnnExtractor_.extract(v_outBlobs[i], ncnnOutput_);
        GetOutputData(ncnnOutput_, outIOTensor);
        outputs.emplace_back(outIOTensor);
    }
    return AiRetCode::AI_RETCODE_SUCCESS;
}

// element size in bytes
// 4 = float32/int32
// 2 = float16
// 1 = int8/uint8
// 0 = empty
void NcnnWrapper::SetIOTensorType(ncnn::Mat const &ncnnTensor, IOTensor &tensor)
{
    switch (ncnnTensor.elemsize)
    {
    case 1:
    {
        tensor.type = TensorType::INT8;
        break;
    }
    case 2:
    {
        tensor.type = TensorType::FLOAT16;
        break;
    }
    case 4:
    {
        tensor.type = TensorType::FLOAT32;
        break;
    }
    default:
    {
        tensor.type = TensorType::FLOAT32;
        break;
    }
    }
}

void NcnnWrapper::SetMnnTensorElemsize(IOTensor const &tensor, ncnn::Mat &ncnnTensor)
{
    switch (tensor.type)
    {
    case TensorType::FLOAT32:
    case TensorType::INT32:
    {
        ncnnTensor.elemsize = 4;
        break;
    }
    case TensorType::INT8:
    case TensorType::UINT8:
    {
        ncnnTensor.elemsize = 1;
        break;
    }
    case TensorType::FLOAT16:
    {
        ncnnTensor.elemsize = 2;
        break;
    }
    default:
    {
        ncnnTensor.elemsize = 4;
        break;
    }
    }
}
