#ifndef DENSEGRADEWRAPPER_H
#define DENSEGRADEWRAPPER_H

// https://docs.nvidia.com/deeplearning/tensorrt/developer-guide/index.html#python_topics
// https://docs.nvidia.com/deeplearning/tensorrt/quick-start-guide/index.html#run-engine-c

#include <fstream>
#include <iostream>
#include <memory>
#include <sstream>

#include <QObject>
#include <QDebug>

#include <cuda_runtime_api.h>
#include <NvInfer.h>
#include <NvInferRuntime.h>

#include <opencv2/opencv.hpp>

#include "Network/NvLogger.h"
#include "Network/NetworkTools.h"

typedef struct {
    int index;
    float pie[5];
}DenseInferResult;

class DenseGradeWrapper : public QObject
{
    Q_OBJECT
public:
    explicit DenseGradeWrapper(QString enginePath, QObject *parent = nullptr);
    ~DenseGradeWrapper();

public:
    DenseInferResult infer(QString imageSrc);

private:
    // tensorrt tools
    void allocateBuffers();
    size_t getMemorySize(const nvinfer1::Dims& dims, const int32_t elem_size);

private:
    // infer process
    QVector<float> _infer(cv::Mat transDim);

    // post process
    void softmax(QVector<float> &buffer);
    DenseInferResult postProcess(const QVector<float> &buffer);

    // pre process
    cv::Mat preprocess(QString imageSrc);
    cv::Mat circleCrop(QString imageSrc);

private:
    // runtime
    std::unique_ptr<nvinfer1::IRuntime> runtime;
    std::unique_ptr<nvinfer1::ICudaEngine> engine;
    std::unique_ptr<nvinfer1::IExecutionContext> context;
    cudaStream_t stream;
    NvLogger logger;

    // binding
    std::vector<void *> inputs;
    std::vector<void *> outputs;
    std::vector<void *> bindings;
};

#endif // DENSEGRADEWRAPPER_H
