#include "LayerSegInference.h"

std::vector<cv::Vec3b> colors = {
        cv::Vec3b(0, 0, 0),     // Black
        cv::Vec3b(255, 0, 0),   // Blue
        cv::Vec3b(0, 255, 0),   // Green
        cv::Vec3b(0, 0, 255),   // Red
        cv::Vec3b(255, 255, 0), // Cyan
        cv::Vec3b(255, 0, 255), // Magenta
};

OrtLayerSeg::OrtLayerSeg() {
    CreateSession();
}

OrtLayerSeg::~OrtLayerSeg() {
    delete session;
}


template<typename T>
char* OrtLayerSeg::BlobFromImage(cv::Mat& iImg, T& iBlob) {
    int channels = iImg.channels();
    int imgHeight = iImg.rows;
    int imgWidth = iImg.cols;

	// 我的输入图都是灰度图，所以只有两层循环。RGB图再加一个channels通道
    for (int h = 0; h < imgHeight; h++)
    {
        for (int w = 0; w < imgWidth; w++)
        {
            //std::cout << "h: " << h << " w: " << w << std::endl;
            iBlob[h * imgWidth + w] = typename std::remove_pointer<T>::type(
                (iImg.at<uchar>(h, w)) / 255.0f);
        }
    }
    return RET_OK;
}


char* OrtLayerSeg::CreateSession()
{
    char* Ret = RET_OK;
    try
    {
        env = Ort::Env(ORT_LOGGING_LEVEL_WARNING, "LayerSeg");
        Ort::SessionOptions sessionOption;
        // 因为用CUDA，所以要配置GPU
        if (cudaEnable)
        {
            OrtCUDAProviderOptions cudaOption;
            cudaOption.device_id = 0;
            sessionOption.AppendExecutionProvider_CUDA(cudaOption);
        }
        sessionOption.SetGraphOptimizationLevel(GraphOptimizationLevel::ORT_ENABLE_ALL);
        sessionOption.SetIntraOpNumThreads(intraOpNumThreads);
        sessionOption.SetLogSeverityLevel(logSeverityLevel);

        session = new Ort::Session(env, modelPath, sessionOption);

        // 明确知道onnx的输入输出格式就不需要用Get，Get方法存在泄露问题，不推荐使用
        /*Ort::AllocatorWithDefaultOptions allocator;
		size_t inputNodesNum = session->GetInputCount();
		for (size_t i = 0; i < inputNodesNum; i++)
		{
		    auto input_node_name = session->GetInputNameAllocated(i, allocator);
		    inputNodeNames.push_back(input_node_name.get());
		}

		size_t OutputNodesNum = session->GetOutputCount();
		for (size_t i = 0; i < OutputNodesNum; i++)
		{
		    auto output_node_name = session->GetOutputNameAllocated(i, allocator);
		    outputNodeNames.push_back(output_node_name.get());
		}*/

        options = Ort::RunOptions{ nullptr };

        return RET_OK;

    }
    catch (const std::exception& e)
    {
        const char* str1 = "[ONNX LayerSeg]:";
        const char* str2 = e.what();
        std::string result = std::string(str1) + std::string(str2);
        std::cout << result << std::endl;
        char output[] = "[ONNX LayerSeg]: Create session failed.";
        Ret = output;
        return Ret;
    }
}


char* OrtLayerSeg::RunSession(cv::Mat& iImg, std::vector<int>& layerSuface, cv::Mat& outputImg)
{
    char* Ret = RET_OK;
    cv::Mat processedImg;
    PreProcess(iImg, imgSize, processedImg);
    float* blob = new float[processedImg.total() * 1];
    BlobFromImage(processedImg, blob);
    std::vector<int64_t> inputNodeDims = { 1, imgSize.at(0), imgSize.at(1), 1};
    TensorProcess(iImg, blob, inputNodeDims, layerSuface, outputImg);
    return Ret;
}


char* OrtLayerSeg::PreProcess(cv::Mat& iImg, std::vector<int>& ImgSize, cv::Mat& oImg)
{
    char* Ret = RET_OK;
    int pad_width, pad_height;
    if (iImg.cols % 32 == 0) pad_width = 0;
    else pad_width = 32 - iImg.cols % 32;
    if (iImg.rows % 32 == 0) pad_height = 0;
    else pad_height = 32 - iImg.rows % 32;
    cv::copyMakeBorder(iImg, oImg, 0, pad_height, 0, pad_width, cv::BORDER_CONSTANT, 0);
    ImgSize = { oImg.rows, oImg.cols };

    return Ret;
}


template<typename N>

char* OrtLayerSeg::TensorProcess(cv::Mat& iImg, N& blob, std::vector<int64_t>& inputNodeDims,
    std::vector<int>& layerSuface, cv::Mat& outputImg)
{
    Ort::Value inputTensor = Ort::Value::CreateTensor<typename std::remove_pointer<N>::type>(
        Ort::MemoryInfo::CreateCpu(OrtDeviceAllocator, OrtMemTypeCPU), blob, imgSize.at(0) * imgSize.at(1) * 1,
        inputNodeDims.data(), inputNodeDims.size());
    auto outputTensor = session->Run(options, inputNodeNames.data(), &inputTensor, 1, outputNodeNames.data(),
        outputNodeNames.size());
    Ort::TypeInfo typeInfo = outputTensor.front().GetTypeInfo();
    auto tensor_info = typeInfo.GetTensorTypeAndShapeInfo();
    std::vector<int64_t> outputNodeDims = tensor_info.GetShape(); // (b, h, w, c)
    auto output = outputTensor.front().GetTensorMutableData<typename std::remove_pointer<N>::type>();
    delete[] blob;

	// 我的网络模型输出是softmax激活后的probility map，所以下面取最大概率通道为分割类别
    int height = outputNodeDims[1];
    int width = outputNodeDims[2];
    int classNum = outputNodeDims[3];
    cv::Mat prediction = cv::Mat::zeros(height, width, CV_8UC3);

	// 输出数据也是一维的，height*width*classNum
    float* rawData = (float*)output;
    for (int i = 0; i < height; ++i) {
        for (int j = 0; j < width; ++j) {
            cv::Mat prob(1, classNum, CV_32FC1, rawData);
            cv::Point class_id;
            double maxLabelScore;
            cv::minMaxLoc(prob, 0, &maxLabelScore, 0, &class_id);

            prediction.at<cv::Vec3b>(i, j) = colors[class_id.x]; 
            rawData += classNum;
        }
    }

    cv::Mat prediction_crop = prediction(cv::Rect(0, 0, iImg.cols, iImg.rows));
    cv::cvtColor(iImg, outputImg, cv::COLOR_GRAY2BGR);
    cv::addWeighted(prediction_crop, 0.3, outputImg, 0.9, 0, outputImg);

    return RET_OK;
}
