/***
 * Official Document:
 *      1. https://onnxruntime.ai/docs/build/eps.html#openvino
 *      2. https://onnxruntime.ai/docs/get-started/with-cpp
 *      3. https://onnxruntime.ai/getting-started
 *      4. https://github.com/microsoft/onnxruntime-inference-examples/blob/main/c_cxx/MNIST/MNIST.cpp
 *      此部分只会使用纯CPU来验证流程，真正使用的时候，应当使用编译OpenVINO后端的ONNX
 * 关于xtensor库
 *  xtensor: https://github.com/xtensor-stack/xtensor
 *      - xtl(数据结构): https://github.com/xtensor-stack/xtl
 *      - xsimd(并行封装): https://github.com/xtensor-stack/xsimd
 */
#include "onnxruntime_cxx_api.h"
#include <opencv2/opencv.hpp>
#include "xtensor.hpp"
#include "xtensor/xarray.hpp"
#include "xtensor/xio.hpp"
#include "xtensor/xview.hpp"


cv::Mat preprocessSampleInput(){
    std::string sampleImage = "../../../resource/raw_resize/1.jpg";
    cv::Mat mat = cv::imread(sampleImage);

//    qDebug() << "Mat basic Info" << mat.rows << mat.cols << mat.dims;
    if(mat.rows != 512 && mat.cols != 512){
        cv::resize(mat, mat, cv::Size(512, 512));
    }

    mat.convertTo(mat, CV_32F);
    cv::Scalar matMean; cv::Mat matStd;
    cv::meanStdDev(mat, matMean, matStd);
    matStd += 1e-8;
    double *stdPtr = matStd.ptr<double>();
    cv::Scalar stdScalar(*(stdPtr + 0), *(stdPtr + 1), *(stdPtr + 2));

    cv::subtract(mat, matMean, mat);
    mat /= stdScalar;

    return cv::dnn::blobFromImage(mat);
}

xt::xarray<float> torchStyleSoftmaxImpl(xt::xarray<float> &x){
    xt::xarray<float> e_x = xt::exp(x - xt::amax(x, 1));
    return e_x / xt::sum(e_x, 1);
}

xt::xarray<float> numpyStyleFlip(xt::xarray<float> &x, std::vector<int> axis){
    bool first = true;
    xt::xarray<float> temp;
    for(int &each:axis) {
        if(first) temp = xt::flip(x, each);
        else temp = xt::flip(temp, each);
        first = false;
    }
    return temp;
}

int main(){
    // Load the model and create InferenceSession
    Ort::Env env;
    std::string model_path = "../../../3-NNUnetWithOnnxAndOpenVino/v1_onnx.onnx";
    Ort::Session session(env,  std::wstring(model_path.begin(), model_path.end()).c_str(), Ort::SessionOptions{nullptr});

    // load preprocess
    cv::Mat sampleImage = preprocessSampleInput();
    xt::xarray<float> sampleArray = xt::adapt((float *)sampleImage.data, 1 * 3 * 512 * 512, xt::no_ownership(), std::vector<std::size_t>{1, 3, 512, 512});
    xt::xarray<float> inputArray = xt::zeros<float>({1, 3, 512, 512});
    xt::xarray<float> outputArray = xt::zeros<float>({1, 2, 512, 512});
    xt::xarray<float> sampleOutArray = xt::zeros<float>({1, 2, 512, 512});

    // run
    Ort::MemoryInfo memoryInfo = Ort::MemoryInfo::CreateCpu(OrtDeviceAllocator, OrtMemTypeCPU);
    std::array<int64_t, 4> inputShape{1, 3, 512, 512};
    std::array<int64_t, 4> outputShape{1, 2, 512, 512};
    Ort::Value inputTensor = Ort::Value::CreateTensor<float>(memoryInfo, inputArray.data(), inputArray.size(),
                                                             inputShape.data(), inputShape.size());
    Ort::Value outputTensor = Ort::Value::CreateTensor<float>(memoryInfo, outputArray.data(), outputArray.size(),
                                                              outputShape.data(), outputShape.size());

    Ort::RunOptions options;
    const char *input_names[] = {"input:0"};
    const char *output_names[] = {"output:0"};

    //  x = torch.from_numpy(mat)
    //  pred = F.softmax(model(x), 1)
    //  pred += torch.flip(F.softmax(model(torch.flip(x, (3,))), 1), (3,))
    //  pred += torch.flip(F.softmax(model(torch.flip(x, (2,))), 1), (2,))
    //  pred += torch.flip(F.softmax(model(torch.flip(x, (3, 2))), 1), (3, 2))
    inputArray = sampleArray;
    session.Run(options, input_names, &inputTensor, 1, output_names, &outputTensor, 1);
    sampleOutArray = torchStyleSoftmaxImpl(outputArray);

    // 无效，应为修改了数据的指向
    inputArray = numpyStyleFlip(sampleArray, {3});
    session.Run(options, input_names, &inputTensor, 1, output_names, &outputTensor, 1);
    outputArray = torchStyleSoftmaxImpl(outputArray);
    sampleOutArray += numpyStyleFlip(outputArray, {3});

    inputArray = numpyStyleFlip(sampleArray, {2});
    session.Run(options, input_names, &inputTensor, 1, output_names, &outputTensor, 1);
    outputArray = torchStyleSoftmaxImpl(outputArray);
    sampleOutArray += numpyStyleFlip(outputArray, {2});

    inputArray = numpyStyleFlip(sampleArray, {3, 2});
    session.Run(options, input_names, &inputTensor, 1, output_names, &outputTensor, 1);
    outputArray = torchStyleSoftmaxImpl(outputArray);
    sampleOutArray += numpyStyleFlip(outputArray, {3, 2});

    sampleOutArray /= 4.0;
    sampleOutArray = xt::argmax(sampleOutArray, 1) * 255;

    cv::Mat outMat(512, 512, CV_8UC1, sampleOutArray.data());
    cv::imwrite("onnx_output.png", outMat);
}