/**
* \brief 
* \author pengcheng (pengcheng@yslrpch@126.com)
* \date 2020-05-30
* \attention CopyrightÃ‚Â©ADC Technology(tianjin)Co.Ltd
* \attention Refer to COPYRIGHT.txt for complete terms of copyright notice
*/


#ifndef _TENSORRT_BATCH_STREAM_H_
#define _TENSORRT_BATCH_STREAM_H_

#include <assert.h>
#include <algorithm>
#include <fstream>
#include <iomanip>
#include <vector>
#include <NvInfer.h>
#include "detection_vision/tensorrt/image_processor.h"


namespace tensorrt_inference
{

// TODO(haowentian): we may use TensorRT inner type of batch stream to replace this file
class BatchStream
{
public:

	BatchStream(std::string calibration_list,
	            int firstBatch,
                int batchSize,
                int maxBatches,
                ImageProcessor::ImageInfo image_info);
	BatchStream() {};

	void reset(int firstBatch);

	bool next();

	void skip(int skipCount);

	inline float *getBatch() { return batch_.data(); }
	// float *getLabels() { return labels_.data(); }
	inline int getBatchesRead() const { return batch_count_; }
	inline int getBatchSize() const { return batch_size_; }
	inline nvinfer1::DimsNCHW getDims() const { return dims_; }

private:

	inline float* getFileBatch() { return file_batch_.data(); }
	// inline float* getFileLabels() { return file_labels_.data(); }
	bool update();

private:

    // calibration list
    std::string calibration_list_;
    // batch size
    int batch_size_;
    // max batches
    int max_batches_;
    // Image info
    ImageProcessor::ImageInfo image_info_;
    // Dimension in NCHW format
    nvinfer1::DimsNCHW dims_;
    // Image size
    int image_size_;
    // batch counter
    int batch_count_;
    // file counter
    int file_count_;
    // counter for current batch position
    int file_batch_pos_;
	// images batch buffer
    std::vector<float> batch_;
    // file batch (bath_size * image_size) buffer
    std::vector<float> file_batch_;
};

}  
#endif