/**
* \brief 
* \author pengcheng (pengcheng@yslrpch@126.com)
* \date 2020-05-30
* \attention CopyrightÃ‚Â©ADC Technology(tianjin)Co.Ltd
* \attention Refer to COPYRIGHT.txt for complete terms of copyright notice
*/
#include "detection_vision/tensorrt/tensorrt_inference.h"
#include <glog/logging.h>
#include <memory>
#include <numeric>
#include <string>
#include <vector>
#include <boost/lexical_cast.hpp>
#include <boost/algorithm/string/classification.hpp>
#include <boost/algorithm/string/split.hpp>
#include <boost/thread/shared_mutex.hpp>
#include <boost/thread/shared_lock_guard.hpp>

namespace tensorrt_inference
{


/// Parse string in protobuf according to delimminator, and convert into a vector
template<typename T>
std::vector<T> ParseStringAsVec(const std::string& input,
                                const std::string& delim)
{
	std::vector<T> output;
	std::vector<std::string> dst;
	boost::split(dst, input, boost::is_any_of(delim), boost::token_compress_on);
	for (auto& it : dst)
	{
		output.push_back(boost::lexical_cast<T>(it));
	}
	return output;
}

TRTInfernce::TRTInfernce(const TensorRTConfig& config)
{
    config_ = config;
    cudaSetDevice(config_.gpu_id());
    CUDA_CHECK(cudaStreamCreate(&stream_));

    tensor_type_ = TensorType(config_.tensor_type());

    if(config_.serialize_model())
    {
        LOG(INFO)<<"begin transform onnx model to gie, model file: "
            <<config_.model_file();
        std::shared_ptr<Int8CacheCalibrator> calibrator = nullptr;

        if(TensorType::INT8 == tensor_type_)
        {
            const std::string string_calibration_file = config_.calibration_config().file();
            if(config_.calibration_config().read_calibration_table_only())
            {
                std::ifstream file(string_calibration_file.c_str());
                if(!file)
                {
                    LOG(ERROR)<<"open calibration file failed";
                    exit(1);
                }
                BatchStream batch_stream;
                calibrator = std::make_shared<Int8CacheCalibrator>(batch_stream, 0, string_calibration_file);
            }
            else
            {
                ImageProcessor::ImageInfo image_inifo;
                image_inifo.height = config_.calibration_config().height();
                image_inifo.width = config_.calibration_config().width();
                image_inifo.scale = config_.calibration_config().scale();

                const std::string delim(". ");
                image_inifo.mean = ParseStringAsVec<float>(config_.calibration_config().mean(), delim);
                image_inifo.dev = ParseStringAsVec<float>(config_.calibration_config().dev(), delim);
                image_inifo.padding_with = config_.calibration_config().padding_with();

                const std::string calibration_list = config_.calibration_config().calibration_list();
                BatchStream batch_stream(calibration_list,
                                            config_.calibration_config().first_batch(),
                                            config_.calibration_config().batch_size(),
                                            config_.calibration_config().num_batches(),
                                            image_inifo);
                calibrator = std::make_shared<Int8CacheCalibrator>(batch_stream, 
                                                                config_.calibration_config().first_batch(),
                                                                "calibrator");

            }
        }
        OnnxToGIEModel(*calibrator);
        InitEngine();
        nvinfer1::IHostMemory* data = engine_->serialize();
        std::ofstream file;
        file.open(config_.serialize_model_name(),std::ios::binary | std::ios::out);
        if(!file.is_open())
        {
            LOG(ERROR)<< "write create engine file " << config_.serialize_model_name() <<" failed";
            return;
        }
        LOG(INFO)<<"write create engine file to: "<<config_.serialize_model_name();
        file.write((const char*)data->data(), data->size());
        file.close();
    }
    else    
    {
        std::string engine_model_file = config_.serialize_model_name();
        LOG(INFO)<<"begin read enging model";
        std::fstream in_file(engine_model_file.c_str(), std::ios::in | std::ios::binary);
        if(!in_file.is_open())
        {
            LOG(ERROR)<<engine_model_file<<" open file failed";
            return;
        }
        in_file.seekg(0, std::ios::end);
        size_t length = in_file.tellg();
        in_file.seekg(0, std::ios::beg);
        std::unique_ptr<unsigned char[]> engine_data(new unsigned char[length]);
        in_file.read((char*)engine_data.get(), length);
        in_file.close();
        plugin_factory_ = nvonnxparser::createPluginFactory(gLogger_);
        LOG(INFO)<<"deserializing";
        runtime_ = nvinfer1::createInferRuntime(gLogger_);
        assert(runtime_ != nullptr);
        engine_ = runtime_->deserializeCudaEngine(engine_data.get(), length, plugin_factory_);
        assert(mEngine != nullptr);
        InitEngine();
    }

}

TRTInfernce::~TRTInfernce()
{
    // if(cuda_output_buffer_ != nullptr)
    // {
    //     SafeCudaFree(cuda_output_buffer_);
    // }
    for(auto ptr : cuda_cuffers_)
    {
        if(ptr != nullptr)
        {
            SafeCudaFree(ptr);
        }
    }
    
}

Tensor TRTInfernce::GetTensor(const int64_t index)
{
    if(index >=nb_bindings_)
    {
        LOG(ERROR)<<"index outside of nb_bindings_: "<<nb_bindings_;
        throw "index outside of nb_bindings";
    }
    nvinfer1::Dims dims = engine_->getBindingDimensions(index);
    if(dims.nbDims == 4)
    {
        return Tensor(std::to_string(index), dims.d[1], dims.d[2], dims.d[3], dims.d[4], index, nullptr);
    }
    else
    {
        return Tensor(std::to_string(index), dims.d[0], dims.d[1], dims.d[2], 1, index, nullptr);
    }
    return Tensor();
    
}


void TRTInfernce::InitEngine()
{
    context_ = engine_->createExecutionContext();
    assert(context_ != nullptr);
    context_->setProfiler(&profiler_);
    nb_bindings_ = engine_->getNbBindings();
    cuda_cuffers_.resize(nb_bindings_);
    bind_buffer_sizes_.resize(nb_bindings_);

    int64_t total_size = 0;
    
    for(int i =0; i < nb_bindings_; i++ )
    {
        nvinfer1::Dims dims = engine_->getBindingDimensions(i);
        nvinfer1::DataType dtype = engine_->getBindingDataType(i);
        total_size = Volume(dims) * config_.batch_size() * GetElementSize(dtype);
        bind_buffer_sizes_[i] = total_size;
        cuda_cuffers_[i] = SafeCudaMalloc(total_size);
        // output_size_ += total_size;
    }
    // cuda_output_buffer_ = SafeCudaMalloc(output_size_);
}

void TRTInfernce::OnnxToGIEModel(Int8CacheCalibrator& calibrator)
{
    nvinfer1::IBuilder* builder = nvinfer1::createInferBuilder(gLogger_);
    nvinfer1::INetworkDefinition* network = builder->createNetwork();
    plugin_factory_ = nvonnxparser::createPluginFactory(gLogger_);
    nvonnxparser::IParser* parser =  nvonnxparser::createParser(*network, gLogger_);
    int verbosity = (int) nvinfer1::ILogger::Severity::kWARNING;
    nvinfer1::IHostMemory *gieModelStream = nullptr;
    if(!parser->parseFromFile(config_.model_file().c_str(), verbosity))
    {
        std::string err = "failed to parse file";
        gLogger_.log(nvinfer1::ILogger::Severity::kERROR, err.c_str());
        exit(EXIT_FAILURE);
    }
    builder->setMaxBatchSize(config_.batch_size());
    builder->setMaxWorkspaceSize(config_.work_space());
    if(TensorType::INT8 == tensor_type_)
    {
        LOG(INFO)<<"set int8 mode";
        if(!builder->platformHasFastInt8())
        {
            LOG(ERROR)<<"Notice: the platform do not has fast fot int8";
        }
        builder->setInt8Mode(true);
        builder->setInt8Calibrator(&calibrator);
    }
    else if (TensorType::HALF == tensor_type_)
    {
        LOG(INFO)<<"set half mode";
        if(!builder->platformHasFastFp16())
        {
            LOG(ERROR)<<"Notice: the platform do not has fast fot fp16";
        }
        builder->setFp16Mode(true);
    }

    LOG(INFO)<<"Begin building engine...";
    nvinfer1::ICudaEngine* engine = builder->buildCudaEngine(*network);
    if (!engine)
    {
        std::string error_message ="Unable to create engine";
        LOG(ERROR)<<"Unable to create engine";
        gLogger_.log(nvinfer1::ILogger::Severity::kERROR, error_message.c_str());
        exit(-1);
    }
    LOG(INFO)<<"End building engine...";

    (gieModelStream) = engine->serialize();
    engine->destroy();
    assert((gieModelStream) != nullptr);
    runtime_ = nvinfer1::createInferRuntime(gLogger_);
    assert(runtime != nullptr);
    engine_= runtime_->deserializeCudaEngine((gieModelStream)->data(), (gieModelStream)->size(), plugin_factory_);
    network->destroy();
    builder->destroy();
    parser->destroy();
    (gieModelStream)->destroy();
}
void TRTInfernce::Inferrence(const void *input_data)
{
    CUDA_CHECK(cudaMemcpyAsync(cuda_cuffers_[0], input_data, bind_buffer_sizes_[0], cudaMemcpyHostToDevice, stream_));
    context_->execute(config_.batch_size(), &cuda_cuffers_[0]);
}
}