#include "mnn_infer_api.h"

mnnModel::mnnModel(){
    
}
mnnModel::~mnnModel(){
    mnnNet->releaseModel();
    mnnNet->releaseSession(session);
    if(!prob)
    delete [] prob;
}

void mnnModel::Init(std::string model_path,std::string input_blob_name,std::string out_blob_name){

	mnnNet = std::shared_ptr<MNN::Interpreter>(MNN::Interpreter::createFromFile(model_path.c_str()));
    if (!mnnNet){
        std::cout << "load MNN model failed.\n";
        return;
    }

    MNN::ScheduleConfig netConfig;
    netConfig.type = MNN_FORWARD_CPU;
    netConfig.numThread = 4;

    MNN::BackendConfig backendConfig;
    backendConfig.precision = MNN::BackendConfig::Precision_Low;
    netConfig.backendConfig = &backendConfig;

    session = mnnNet->createSession(netConfig);
    input = mnnNet->getSessionInput(session, input_blob_name.c_str());
    
    output = mnnNet->getSessionOutput(session, out_blob_name.c_str());
    for(int i =0;i<output->dimensions();i++){
        output_size *= output->shape()[i];
    }
    OUTPUT_CANDIDATES = output->shape()[1];
    // if (input->elementSize() <= 4) {
    //     mnnNet->resizeTensor(input, {1, sp.INPUT_C, sp.INPUT_H, sp.INPUT_W}); // nchw
    //     // mnnNet->resizeTensor(input, {1, sp.INPUT_H, sp.INPUT_W ,sp.INPUT_C}); // nhwc

    //     mnnNet->resizeSession(session);
    // }	
}

void mnnModel::doInference(float *input_arr){
        // preprocess image
        MNN::Tensor givenTensor(input, MNN::Tensor::CAFFE);  // nchw
        // MNN::Tensor givenTensor(input, MNN::Tensor::TENSORFLOW); // nhwc
        
        // const int inputSize = givenTensor.elementSize();
        // std::cout << inputSize << std::endl;
   
        auto inputData = givenTensor.host<float>();
        ::memcpy(inputData,input_arr,givenTensor.size());
    
       
        input->copyFromHostTensor(&givenTensor);

        // run session
        mnnNet->runSession(session);

        // get output data
        // MNN::Tensor *output = mnnNet->getSessionOutput(session, NULL);
        // std::cout << "output shape: " << output->shape()[0] << " " << output->shape()[1] << std::endl;
        auto output_host = std::make_shared<MNN::Tensor>(output, MNN::Tensor::CAFFE);
        output->copyToHostTensor(output_host.get());
    
        prob = output_host->host<float>();
   
        //std::cout << "values: " <<values[0] << " " << values[1] << "\n";    
}

