#include "OnnxRuntimeObj.h"

#include "onnxruntime_cxx_api.h"
#include "cuda_provider_factory.h"
#include "onnxruntime_c_api.h"

#include <QDebug>

static Ort::Env *env=nullptr;
static Ort::SessionOptions *session_options=nullptr;
static Ort::Session *session=nullptr;

static Ort::AllocatorWithDefaultOptions *allocator=nullptr;

const char TensorTypeStrs[][100]={"TENSOR_DATA_TYPE_UNDEFINED",
                                  "TENSOR_DATA_TYPE_FLOAT",   // maps to c type float
                                  "TENSOR_DATA_TYPE_UINT8",   // maps to c type uint8_t
                                  "TENSOR_DATA_TYPE_INT8",    // maps to c type int8_t
                                  "TENSOR_DATA_TYPE_UINT16",  // maps to c type uint16_t
                                  "TENSOR_DATA_TYPE_INT16",   // maps to c type int16_t
                                  "TENSOR_DATA_TYPE_INT32",   // maps to c type int32_t
                                  "TENSOR_DATA_TYPE_INT64",   // maps to c type int64_t
                                  "TENSOR_DATA_TYPE_STRING",  // maps to c++ type std::string
                                  "TENSOR_DATA_TYPE_BOOL",
                                  "TENSOR_DATA_TYPE_FLOAT16",
                                  "TENSOR_DATA_TYPE_DOUBLE",      // maps to c type double
                                  "TENSOR_DATA_TYPE_UINT32",      // maps to c type uint32_t
                                  "TENSOR_DATA_TYPE_UINT64",      // maps to c type uint64_t
                                  "TENSOR_DATA_TYPE_COMPLEX64",   // complex with float32 real and imaginary components
                                  "TENSOR_DATA_TYPE_COMPLEX128",  // complex with float64 real and imaginary components
                                  "TENSOR_DATA_TYPE_BFLOAT16"     // Non-IEEE floating-point format based on IEEE754 single-precision
                                 };


OnnxRuntimeObj::OnnxRuntimeObj(QObject *parent):QObject (parent)
{
    if(!env) env=new Ort::Env(ORT_LOGGING_LEVEL_WARNING, "onnx_runtime");
    if(!session_options) session_options=new Ort::SessionOptions;
    if(!allocator) allocator=new Ort::AllocatorWithDefaultOptions;
}

OnnxRuntimeObj::~OnnxRuntimeObj()
{
    if(!session) delete session;
    session = nullptr;
}

///
/// \brief initSession
/// \param filename
/// \return
///初始化计算图
bool OnnxRuntimeObj::initSession(const QString& filename)
{



    session_options->SetIntraOpNumThreads(1);

    // If onnxruntime.dll is built with CUDA enabled, we can uncomment out this line to use CUDA for this
    // session (we also need to include cuda_provider_factory.h above which defines it)
    // #include "cuda_provider_factory.h"
    // OrtSessionOptionsAppendExecutionProvider_CUDA(session_options, 1);

    // Sets graph optimization level
    // Available levels are
    // ORT_DISABLE_ALL -> To disable all optimizations
    // ORT_ENABLE_BASIC -> To enable basic optimizations (Such as redundant node removals)
    // ORT_ENABLE_EXTENDED -> To enable extended optimizations (Includes level 1 + more complex optimizations like node fusions)
    // ORT_ENABLE_ALL -> To Enable All possible opitmizations
    session_options->SetGraphOptimizationLevel(GraphOptimizationLevel::ORT_ENABLE_EXTENDED);

    if(!session) delete session;
    std::wstring wstr = filename.toStdWString();
    try {
        session = new Ort::Session(*env, wstr.data(),*session_options);
    } catch (...) {
        emit toastInfo("New ORT session failed!","red");
    }


    if(!session){
        return false;
    }

    // print number of model input nodes
    size_t num_input_nodes = session->GetInputCount();
    std::vector<const char*> input_node_names(num_input_nodes);
    std::vector<int64_t> input_node_dims;  // simplify... this model has only 1 input node {1, 3, 224, 224}.
    // Otherwise need vector<vector<>>

    emit toastInfo(QString("Number of inputs = %1").arg(num_input_nodes),"green");

    // iterate over all input nodes
    for (int i = 0; i < num_input_nodes; i++) {
        // print input node names
        char* input_name = session->GetInputName(i, *allocator);
        emit toastInfo(QString("Input %1 : name=%2").arg(i).arg(input_name));
        input_node_names[i] = input_name;

        // print input node types
        Ort::TypeInfo type_info = session->GetInputTypeInfo(i);
        auto tensor_info = type_info.GetTensorTypeAndShapeInfo();

        ONNXTensorElementDataType type = tensor_info.GetElementType();
        if(type>=ONNXTensorElementDataType::ONNX_TENSOR_ELEMENT_DATA_TYPE_UNDEFINED
                &&
                type<=ONNXTensorElementDataType::ONNX_TENSOR_ELEMENT_DATA_TYPE_BFLOAT16
                ){
            emit toastInfo(QString("Input %1 : type= %2").arg(i).arg(TensorTypeStrs[type]));
        }

        // print input shapes/dims
        input_node_dims = tensor_info.GetShape();
        emit toastInfo(QString("Input %1 : num_dims=%2")
                  .arg(i)
                  .arg(input_node_dims.size())
                  );
        for (int j = 0; j < input_node_dims.size(); j++)
            emit toastInfo(QString("Input %1 : dim %2=%3")
                      .arg(i)
                      .arg(j)
                      .arg(input_node_dims[j])
                      );
    }

    return true;
}

///
/// \brief getSessionInputShape
/// \param shape
/// \return
///获取计算图中输入的形状写入到shape,成功返回true
bool OnnxRuntimeObj::getSessionInputShape(std::vector<int64_t> &shape)
{
    if(!session){
        return false;
    }

    size_t num_input_nodes = session->GetInputCount();
    if(num_input_nodes!=1){
        toastInfo("number of input nodes not equal to 1!","red");
        return false;
    }

    //获取输入的形状
    Ort::TypeInfo type_info = session->GetInputTypeInfo(0);
    auto tensor_info = type_info.GetTensorTypeAndShapeInfo();
    shape = tensor_info.GetShape();

    if(shape.size()!=4){
        toastInfo("input shape rand not equal to 4!","red");
        return false;
    }

    if( (shape[0]==1 && shape[1]==1 && shape[2]>0 && shape[3]>0)
            ||
            (shape[0]==-1 && shape[1]==1 && shape[2]==-1 && shape[3]==-1)
            ){
        if(shape[0]==-1){
            shape[0]=1;
            shape[2]=512;
            shape[3]=512;
        }
    }else{
        shape.clear();
        toastInfo("input shape is invalid!","red");
        return false;
    }

    return true;
}

///
/// \brief sessionInfer
/// \return
///执行计算图
bool OnnxRuntimeObj::sessionInfer(float *input,const std::vector<int64_t> & inshape,float **output,std::vector<int64_t> & outshape)
{
    if(inshape.size()!=4 || input==nullptr){
        emit toastInfo("invalid input shape","red");
        return false;
    }

    size_t num_input_nodes = session->GetInputCount();
    size_t num_output_nodes = session->GetOutputCount();
    Q_ASSERT("Input Count != 1" && (num_input_nodes==1));
    Q_ASSERT("Output Count != 1" && (num_output_nodes==1));

    Ort::TypeInfo type_info = session->GetInputTypeInfo(0);
    auto tensor_info = type_info.GetTensorTypeAndShapeInfo();

    ONNXTensorElementDataType type = tensor_info.GetElementType();
    Q_ASSERT("Input Type != float" && type==ONNXTensorElementDataType::ONNX_TENSOR_ELEMENT_DATA_TYPE_FLOAT);

    std::vector<int64_t> input_node_dims;
    size_t inout_tensor_size;//1LL*1*512*512;

//    input_node_dims.push_back(1);   //batch size
//    input_node_dims.push_back(1);   //channel
//    input_node_dims.push_back(512); //width
//    input_node_dims.push_back(512); //height

    input_node_dims = tensor_info.GetShape();
    Q_ASSERT("Input dims invalid" && (inshape.size()==4 && input_node_dims.size() ==4));
    Q_ASSERT("Input dims batch size invalid" && (input_node_dims[0]==-1 || input_node_dims[0]==inshape[0]));
    Q_ASSERT("Input dims channel invalid" && (input_node_dims[1]==1 && input_node_dims[1]==inshape[1]));
    Q_ASSERT("Input dims width invalid" && (input_node_dims[2]==-1 || input_node_dims[2]==inshape[2]));
    Q_ASSERT("Input dims height invalid" && (input_node_dims[3]==-1 || input_node_dims[3]==inshape[3]));

    inout_tensor_size=inshape[0];
    inout_tensor_size=inout_tensor_size*inshape[1]*inshape[2]*inshape[3];
    Q_ASSERT(inout_tensor_size<1LL*1024*1024*1024);
    input_node_dims[0]=inshape[0];
    input_node_dims[1]=inshape[1];
    input_node_dims[2]=inshape[2];
    input_node_dims[3]=inshape[3];


    // create input tensor object from data values
    auto memory_info = Ort::MemoryInfo::CreateCpu(OrtArenaAllocator, OrtMemTypeDefault);
    Ort::Value input_tensor = Ort::Value::CreateTensor<float>(memory_info, input, inout_tensor_size, input_node_dims.data(), 4);
    Q_ASSERT(input_tensor.IsTensor());


    std::string inputname = session->GetInputName(0,*allocator);
    std::string outputname = session->GetOutputName(0,*allocator);
    const char* innameptr[1] = {inputname.c_str()};
    const char* outnameptr[1] = {outputname.c_str()};

    // score model & input tensor, get back output tensor
    std::vector<Ort::Value> output_tensors;
    try {
        output_tensors = session->Run(Ort::RunOptions{nullptr},
                                      (const char* const*)&innameptr,
                                      &input_tensor,
                                      1,     //input count
                                      (const char* const*)&outnameptr,
                                      1  //output count
                                      );
        Q_ASSERT(output_tensors.size() == 1 && output_tensors.front().IsTensor());
    } catch (...) {
        return false;
    }

    auto out_tensor_info=output_tensors.front().GetTypeInfo();
    Q_ASSERT("Output Type != float" && out_tensor_info.GetTensorTypeAndShapeInfo().GetElementType()==ONNXTensorElementDataType::ONNX_TENSOR_ELEMENT_DATA_TYPE_FLOAT);
    auto out_tensor_dims = out_tensor_info.GetTensorTypeAndShapeInfo().GetShape();
    Q_ASSERT("Output dims invalid" && (out_tensor_dims.size() ==4));
    outshape.clear();
    outshape.push_back(out_tensor_dims[0]);
    outshape.push_back(out_tensor_dims[1]);
    outshape.push_back(out_tensor_dims[2]);
    outshape.push_back(out_tensor_dims[3]);

    inout_tensor_size=outshape[0];
    inout_tensor_size=inout_tensor_size*outshape[1]*outshape[2]*outshape[3];
    Q_ASSERT(inout_tensor_size<1LL*1024*1024*1024);

    // Get pointer to output tensor float values
    float* outfloatptr = output_tensors.front().GetTensorMutableData<float>();

    *output = new float[inout_tensor_size];

    std::memcpy(*output,outfloatptr,inout_tensor_size*sizeof (float));

    return true;
}


