/**
* \brief 
* \author pengcheng (pengcheng@yslrpch@126.com)
* \date 2020-05-30
* \attention CopyrightÃ‚Â©ADC Technology(tianjin)Co.Ltd
* \attention Refer to COPYRIGHT.txt for complete terms of copyright notice
*/

#include "detection_vision/tensorrt/batch_stream.h"

namespace tensorrt_inference
{
BatchStream::BatchStream(std::string calibration_list,
                         int firstBatch,
                         int batchSize,
                         int maxBatches,
                         ImageProcessor::ImageInfo image_info) :
    calibration_list_(calibration_list),
    batch_size_(batchSize),
    max_batches_(maxBatches),
    image_info_(image_info),
    dims_(batchSize, 3, image_info.height, image_info.width),
    batch_count_(0),
    file_count_(0),
    file_batch_pos_(0)
{
    LOG(INFO) << "create batch stream";
    image_size_ = dims_.c() * dims_.h() * dims_.w();
    batch_.resize(batch_size_ * image_size_, 0);
    file_batch_.resize(batch_size_ * image_size_, 0);
#if 0
    labels_.resize(batch_size_, 0);
    file_labels_.resize(batch_size_, 0);
#endif
    reset(firstBatch);
}

void BatchStream::reset(int firstBatch)
{
    LOG(INFO) << "reset batch stream";
    batch_count_ = 0;
    file_count_ = 0;
    file_batch_pos_ = batch_size_;
    skip(firstBatch);
}

bool BatchStream::next()
{
    LOG(INFO) << "next batch stream";
    if (batch_count_ == max_batches_)
    {
        return false;
    }
    for (int csize = 1, batchPos = 0; batchPos < batch_size_; batchPos += csize, file_batch_pos_ += csize)
    {
        assert(file_batch_pos_ > 0 && file_batch_pos_ <= batch_size_);
        if (file_batch_pos_ == batch_size_ && !update())
            return false;

        // copy the smaller of: elements left to fulfill the request, or elements left in the file buffer.
        csize = std::min(batch_size_ - batchPos, batch_size_ - file_batch_pos_);
        std::copy_n(getFileBatch() + file_batch_pos_ * image_size_, csize * image_size_, getBatch() + batchPos * image_size_);
    }
    batch_count_++;
    return true;
}

void BatchStream::skip(int skipCount)
{
    LOG(INFO) << "skip batch stream";
    if (batch_size_ >= dims_.n() && batch_size_ % dims_.n() == 0 && file_batch_pos_ == dims_.n())
    {
        file_count_ += skipCount * batch_size_ / dims_.n();
        return;
    }

    int x = batch_count_;
    for (int i = 0; i < skipCount; i++)
    {
        next();
    }
    batch_count_ = x;
}

bool BatchStream::update()
{
    LOG(INFO) << "update batch stream";
    std::vector<std::string> fNames;

    std::ifstream file(calibration_list_.c_str());
    if(!file)
    {
        LOG(ERROR) << "Failed to open file " << calibration_list_.c_str();
        return false;
    }
    LOG(INFO) << "Batch #" << file_count_;
    // TODO(haowentian): inefficient implementation here!
    for(int i = 0; i < (batch_count_ + 1) * batch_size_; i++)
    {
        std::string sName;
        std::getline(file, sName, '\n');
        if (i < batch_count_ * batch_size_)
        {
            continue;
        }
        LOG(INFO) << "Calibrating with file " << sName;
        fNames.emplace_back(sName);
    }
    file_count_++;

    std::vector<float> images;
    image_info_.paths = fNames;
    ImageProcessor image_processor(image_info_);
    image_processor.LoadImageAsNCHW(images);
    std::copy_n(images.data(), batch_size_ * image_size_, getFileBatch());
    file_batch_pos_ = 0;
    return true;
}
}