//
// Created by tacom on 2024/12/20.
//
// PATH=D:\Development\Envs\onnxruntime-win-x64-1.20.1\lib\;D:\Development\Envs\OpenCV460\Binary\x64\vc16\bin

#include "UNetOnnx.h"

UNetOnnx::UNetOnnx(std::string modelPath)
    :modelPath(modelPath), ortSession{nullptr},
     inputTensor{nullptr}, outputTensor{nullptr}, ortMemoryInfo{nullptr}, ortRunOptions{nullptr},
     ortInputShape{1, 3, 512, 512}, ortOutputShape{1, 2, 512, 512},
     inputSize{1 * 3 * 512 * 512}, outputSize{1 * 2 * 512 * 512},
     ortInputNames{"input:0"}, ortOutputNames{"output:0"}
{
    ortEnv = {ORT_LOGGING_LEVEL_ERROR, "UNetOnnx"};
//    ortSessionOptions.SetIntraOpNumThreads(8);
//    ortSessionOptions.SetGraphOptimizationLevel(GraphOptimizationLevel::ORT_ENABLE_ALL);
    // AzureExecutionProvider, CPUExecutionProvider
    for(const std::string &provider : Ort::GetAvailableProviders()){
        std::cout << provider << " ";
    }std::cout << std::endl;
    OrtSessionOptionsAppendExecutionProvider_DML(ortSessionOptions, 0);

    ortSession = {ortEnv, std::wstring(modelPath.begin(), modelPath.end()).c_str(), ortSessionOptions};

    ortInputArray.resize(inputSize);
    ortOutputArray.resize(outputSize);
    ortMemoryInfo = Ort::MemoryInfo::CreateCpu(OrtDeviceAllocator, OrtMemTypeCPU);
    inputTensor = Ort::Value::CreateTensor<float>(ortMemoryInfo, ortInputArray.data(), inputSize,
                                                  ortInputShape.data(), ortInputShape.size());
    outputTensor = Ort::Value::CreateTensor<float>(ortMemoryInfo, ortOutputArray.data(), outputSize,
                                                   ortOutputShape.data(), ortOutputShape.size());
}

cv::Mat UNetOnnx::infer(const std::string &imagePath){
    // load and prepare data
    cv::Mat oriMat = cv::imread(imagePath);
    cv::Mat preprocessMat = preprocessInputMat(oriMat);

    // convert to mat array
    xt::xarray<float> convertArray = convertMatToArray(preprocessMat, inputSize, ortInputShape);

    // start get result
    xt::xarray<float> resultArray = getInferData(convertArray, {});
    resultArray += getInferData(convertArray, {3});
    resultArray += getInferData(convertArray, {2});
    resultArray += getInferData(convertArray, {3, 2});
    resultArray /= 4.0;

    // argmax and vis
    xt::xarray<uint8_t> argmaxArray = xt::argmax(resultArray, 1) * 255;
    return convertArrayToMat(argmaxArray);
}

xt::xarray<float> UNetOnnx::getInferData(xt::xarray<float> inputArray, const std::vector<int> &axis){
    double start, end;
    numpyStyleFlip(inputArray, axis);

    start = (double ) clock();
    xt::xarray<float> inferArray = inferStart(inputArray);
    end = (double ) clock();
    std::cout << "onnx running time: " << (end - start) / CLOCKS_PER_SEC << " s" << std::endl;

//    start = (double ) clock();
//    torchStyleSoftmaxImpl(inferArray, true);
//    end = (double ) clock();
//    std::cout << "sigmoid/softmax time: " << (end - start) / CLOCKS_PER_SEC << " s" << std::endl;

    numpyStyleFlip(inferArray, axis);
    return inferArray;
}

xt::xarray<float> UNetOnnx::inferStart(const xt::xarray<float> &inputArray){
    // copy to tensor array
    std::copy(inputArray.begin(), inputArray.end(), ortInputArray.begin());

    // start
    ortSession.Run(ortRunOptions, ortInputNames, &inputTensor, 1,
                   ortOutputNames, &outputTensor, 1);

    // copy to a xarray
    return xt::adapt(ortOutputArray.data(), outputSize, xt::no_ownership(), ortOutputShape);
//    return xt::adapt(ortOutputArray.data(), ortOutputShape);
}


cv::Mat UNetOnnx::preprocessInputMat(cv::Mat &inputMat){
    if(inputMat.rows != 512 || inputMat.cols != 512){
        cv::resize(inputMat, inputMat, cv::Size(512, 512));
    }
    inputMat.convertTo(inputMat, CV_32F);
    cv::Scalar matMean; cv::Mat matStd;
    cv::meanStdDev(inputMat, matMean, matStd);
    matStd += 1e-8;
    double *stdPtr = matStd.ptr<double>();
    cv::Scalar stdScalar(*(stdPtr + 0), *(stdPtr + 1), *(stdPtr + 2));

    cv::subtract(inputMat, matMean, inputMat);
    inputMat /= stdScalar;

    return cv::dnn::blobFromImage(inputMat);
}

xt::xarray<float> UNetOnnx::convertMatToArray(const cv::Mat &inputMat, size_t size, std::vector<int64_t> &shape){
    return xt::adapt((float *)inputMat.data, size, xt::no_ownership(), shape);
}

cv::Mat UNetOnnx::convertArrayToMat(const xt::xarray<uint8_t> &argmaxArray){
    cv::Mat argmaxMat(512, 512, CV_8UC1);
    std::copy(argmaxArray.begin(), argmaxArray.end(), (uint8_t*)argmaxMat.data);
    return argmaxMat;
}

void UNetOnnx::torchStyleSoftmaxImpl(xt::xarray<float> &x, bool useOtherActivation){
    if (useOtherActivation){
//        x = 1.0 / (1.0 + xt::exp(-1 * x));
        x = xt::where(x > 0, x, 0.0);
    }else{
        x = xt::exp(x - xt::amax(x, 1));
        x /= xt::sum(x, 1);
    }
}

void UNetOnnx::numpyStyleFlip(xt::xarray<float> &x, const std::vector<int> &axis){
    for(const int &each : axis) x = xt::flip(x, each);
}
