/**
* \brief 
* \author pengcheng (pengcheng@yslrpch@126.com)
* \date 2020-05-30
* \attention CopyrightÃ‚Â©ADC Technology(tianjin)Co.Ltd
* \attention Refer to COPYRIGHT.txt for complete terms of copyright notice
*/

#ifndef DETECTION_VISION_TENSORRT_IMPL_UTILS_HPP
#define DETECTION_VISION_TENSORRT_IMPL_UTILS_HPP
#include <memory>
#include <tensor_rt.pb.h>
#include <stdint.h>
#include <NvInferPlugin.h>
#include <cuda_runtime_api.h>
// #include <cudnn.h>
#include <numeric>
#include <iostream>

#include "onnx_tensorrt/NvOnnxParser.h"
#include "onnx_tensorrt/NvOnnxParserRuntime.h"

namespace tensorrt_inference
{

#ifndef CUDA_CHECK
#define CUDA_CHECK(callstr)                                                                    \
    {                                                                                          \
        cudaError_t error_code = callstr;                                                      \
        if (error_code != cudaSuccess) {                                                       \
            std::cerr << "CUDA error " << error_code << " at " << __FILE__ << ":" << __LINE__; \
            assert(0);                                                                         \
        }                                                                                      \
    }
#endif

inline void* SafeCudaMalloc(size_t memSize)
{
    void* deviceMem;
    CUDA_CHECK(cudaMalloc(&deviceMem, memSize));
    if (deviceMem == nullptr)
    {
        std::cerr << "Out of memory" << std::endl;
        exit(1);
    }
    return deviceMem;
}

inline void SafeCudaFree(void* device_mem)
{
    if(device_mem != nullptr)
    {
        CUDA_CHECK(cudaFree(device_mem));
    }
}

class Logger : public nvinfer1::ILogger
{
public:
    Logger(Severity severity = Severity::kWARNING)
            : reportableSeverity(severity)
    {
    }

    void log(Severity severity, const char* msg) override
    {
        // suppress messages with severity enum value greater than the reportable
        if (severity > reportableSeverity)
            return;

        switch (severity)
        {
            case Severity::kINTERNAL_ERROR: std::cerr << "INTERNAL_ERROR: "; break;
            case Severity::kERROR: std::cerr << "ERROR: "; break;
            case Severity::kWARNING: std::cerr << "WARNING: "; break;
            case Severity::kINFO: std::cerr << "INFO: "; break;
            default: std::cerr << "UNKNOWN: "; break;
        }
        std::cerr << msg << std::endl;
    }
    Severity reportableSeverity;
};

class Profiler : public nvinfer1::IProfiler
{
public:
    struct Record
    {
        float time{0};
        int count{0};
    };
    void printTime(const int& runTimes)
    {
        //std::cout << "========== " << mName << " profile ==========" << std::endl;
        float totalTime = 0;
        std::string layerNameStr = "TensorRT layer name";
        int maxLayerNameLength = std::max(static_cast<int>(layerNameStr.size()), 70);
        for (const auto& elem : mProfile)
        {
            totalTime += elem.second.time;
            maxLayerNameLength = std::max(maxLayerNameLength, static_cast<int>(elem.first.size()));
        }
        std::cout<< " total runtime = " << totalTime/runTimes << " ms " << std::endl;
    }

    virtual void reportLayerTime(const char* layerName, float ms)
    {
        mProfile[layerName].count++;
        mProfile[layerName].time += ms;
    }
private:
    std::map<std::string, Record> mProfile;
};

class Tensor
{
public:

    /// smart pointor
    typedef std::shared_ptr<Tensor> Ptr;
    typedef std::shared_ptr<const Tensor> ConstPtr;

    /// constuctor
    Tensor(){}
    Tensor(std::string name, size_t channel,
           size_t height, size_t width, size_t batch_, size_t index, void* p_gpu_buffer)
           : name_ (name), channel_ (channel), height_ (height), width_ (width),
             index_ (index), p_gpu_buffer_ (p_gpu_buffer)
    {

    }

    /// getter and setter
    inline const std::string& name() const noexcept
    {
        return name_;
    }

    inline void name(std::string name)
    {
        name_ = name;
    }

    inline size_t channel() const noexcept
    {
        return channel_;
    }

    inline void channel(size_t channel)
    {
        channel_ = channel;
    }

    inline size_t height() const noexcept
    {
        return height_;
    }

    inline void height(size_t height)
    {
        height_ = height;
    }

    inline size_t width() const noexcept
    {
        return width_;
    } 
    

    inline void width(size_t width)
    {
        width_ = width;
    }

    inline size_t batch() const noexcept
    {
        return batch_;
    }

    inline void batch(size_t batch)
    {
        batch_ = batch;
    }

    inline size_t index() const noexcept
    {
        return index_;
    }

    inline void index(size_t index)
    {
        index_ = index;
    }

    inline void* p_gpu_buffer() const noexcept
    {
        return p_gpu_buffer_;
    }

    inline void p_gpu_buffer(void* p_gpu_buffer)
    {
        p_gpu_buffer_ = p_gpu_buffer;
    }

    /// one batch size, note it is the ele num not the buffer size
    inline size_t Size() const
    {
        return channel_ * height_ * width_ * batch_;
    }

private:
    std::string name_;
    size_t channel_;
    size_t height_;
    size_t width_;
    size_t index_; /// index managemented in TensorRT
    size_t batch_;
    void* p_gpu_buffer_; /// gpu memory in TensorRT
};

inline int64_t Volume(const nvinfer1::Dims& d)
{
    return std::accumulate(d.d, d.d + d.nbDims, 1, std::multiplies<int64_t>());
}

inline unsigned int GetElementSize(nvinfer1::DataType t)
{
    switch (t)
    {
        case nvinfer1::DataType::kINT32: return 4;
        case nvinfer1::DataType::kFLOAT: return 4;
        case nvinfer1::DataType::kHALF: return 2;
        case nvinfer1::DataType::kINT8: return 1;
    }
    throw std::runtime_error("Invalid DataType.");
    // return 0;
}
}

#endif
