//
// Created by tacom on 2024/12/20.
//

#ifndef SIMPLEIMPLWITHONNX_UNETONNX_H
#define SIMPLEIMPLWITHONNX_UNETONNX_H

#include <iostream>
#include <openvino_provider_factory.h>
#include <onnxruntime_cxx_api.h>
#include <opencv2/opencv.hpp>
#include <xtensor.hpp>
#include <xtensor/xarray.hpp>
#include <xtensor/xio.hpp>
#include <xtensor/xview.hpp>

class UNetOnnx {
public:
    explicit UNetOnnx(std::string modelPath);
    cv::Mat infer(const std::string &imagePath);

private:
    cv::Mat preprocessInputMat(cv::Mat &inputMat);
    xt::xarray<float> getInferData(xt::xarray<float> inputArray, const std::vector<int> &axis);
    xt::xarray<float> inferStart(const xt::xarray<float> &inputArray);

    xt::xarray<float> convertMatToArray(const cv::Mat &inputMat, size_t size, std::vector<int64_t> &shape);
    cv::Mat convertArrayToMat(const xt::xarray<uint8_t> &argmaxArray);
    void torchStyleSoftmaxImpl(xt::xarray<float> &x, bool useOtherActivation);
    void numpyStyleFlip(xt::xarray<float> &x, const std::vector<int> &axis);
private:
    // basic info
    std::string modelPath;

    // onnx runtime
    Ort::Env ortEnv;
    Ort::Session ortSession;
    Ort::MemoryInfo ortMemoryInfo;
    Ort::SessionOptions ortSessionOptions;
    Ort::RunOptions ortRunOptions;

    // onnx infer tensor
    int64_t inputSize, outputSize;
    std::vector<float> ortInputArray;
    std::vector<float> ortOutputArray;
    Ort::Value inputTensor;
    Ort::Value outputTensor;

    // model info
    std::vector<int64_t> ortInputShape;
    std::vector<int64_t> ortOutputShape;
    const char* ortInputNames[1];
    const char* ortOutputNames[1];
};


#endif //SIMPLEIMPLWITHONNX_UNETONNX_H
