/**
* \brief 
* \author pengcheng (pengcheng@yslrpch@126.com)
* \date 2020-05-30
* \attention CopyrightÃ‚Â©ADC Technology(tianjin)Co.Ltd
* \attention Refer to COPYRIGHT.txt for complete terms of copyright notice
*/
#ifndef _TENSORRT_CALIBRATOR_H_
#define _TENSORRT_CALIBRATOR_H_

#include <fstream>
#include <iostream>
#include <iterator>
#include <sstream>
#include <string>
#include <vector>
#include <cuda_runtime_api.h>
#include <NvInfer.h>
#include "detection_vision/tensorrt/batch_stream.h"

namespace tensorrt_inference
{

#ifndef CUDACHECK
#define CUDACHECK(callstr)                                                                    \
    {                                                                                          \
        cudaError_t error_code = callstr;                                                      \
        if (error_code != cudaSuccess) {                                                       \
            std::cerr << "CUDA error " << error_code << " at " << __FILE__ << ":" << __LINE__; \
            assert(0);                                                                         \
        }                                                                                      \
    }
#endif
class Int8CacheCalibrator : public nvinfer1::IInt8EntropyCalibrator
{
public:
    Int8CacheCalibrator(BatchStream& stream, int32_t firstBatch, std::string calibrationTableName, bool readCache = true);

    virtual ~Int8CacheCalibrator()
    {
        CUDACHECK(cudaFree(device_input_));
    }

    int getBatchSize() const;

    bool getBatch(void* bindings[], const char* names[], int nbBindings) override;

    const void* readCalibrationCache(size_t& length) override;

    void writeCalibrationCache(const void* cache, size_t length) override;
private:
    BatchStream stream_;
    std::string calibration_table_name_;
    bool read_cache_;
    size_t input_count_;
    void* device_input_;
    std::vector<char> calibration_cache_;
};

}

#endif