#include "trt_predictor.h"


TRTPredictor::TRTPredictor()
{
    input_blob_name[0] = 0;
    output_blob_name[0] = 0;
    trtModelStream = NULL;
    device_id = 0;

    runtime = NULL;
    engine = NULL;
    context = NULL;

    input_typesize = 0;
    output_typesize = 0;

    output_size = 1;

    config = false;
}

TRTPredictor::~TRTPredictor()
{
    destroy();
}

void TRTPredictor::destroy()
{
    config = false;
    if (trtModelStream) 
    {
        delete[] trtModelStream;
        trtModelStream = NULL;
    }
    if (context)
    {
        context->destroy();
        context = NULL;
    }
    if (engine)
    {
        engine->destroy();
        engine = NULL;
    }
    if (runtime)
    {
        runtime->destroy();
        runtime = NULL;
    }
}

bool TRTPredictor::load_model(std::string model_path)
{
    if (config)
    {
        std::cout << "You have already init the predictor." << std::endl;
        return false;
    }
    if (trtModelStream)
    {
        std::cout << "You have already loaded the model." << std::endl;
        return false;
    }
    std::ifstream file(model_path, std::ios::binary);
    if (file.good())
    {
        file.seekg(0, file.end);
        model_size = file.tellg();
        file.seekg(0, file.beg);
        trtModelStream = new char[model_size];
        if (!trtModelStream)
        {
            return false;
        }
        file.read(trtModelStream, model_size);
        file.close();
        return true;
    }
    return false;
}

bool TRTPredictor::set_input_name(std::string input_name)
{
    if (config)
    {
        std::cout << "You have already init the predictor." << std::endl;
        return false;
    }
    strcpy(input_blob_name, input_name.c_str());
    return true;
}

bool TRTPredictor::set_output_name(std::string output_name)
{
    if (config)
    {
        std::cout << "You have already init the predictor." << std::endl;
        return false;
    }
    strcpy(output_blob_name, output_name.c_str());
    return true;
}

bool TRTPredictor::set_device_id(int id)
{
    if (config)
    {
        std::cout << "You have already init the predictor." << std::endl;
        return false;
    }
    device_id = id;
    return true;
}

bool TRTPredictor::set_input_typesize(size_t typesize)
{
    if (config)
    {
        std::cout << "You have already init the predictor." << std::endl;
        return false;
    }
    input_typesize = typesize;
    return true;
}

bool TRTPredictor::set_output_typesize(size_t typesize)
{
    if (config)
    {
        std::cout << "You have already init the predictor." << std::endl;
        return false;
    }
    output_typesize = typesize;
    return true;
}

bool TRTPredictor::init()
{
    if (config)
    {
        std::cout << "You have already init the predictor." << std::endl;
        return false;
    }
    if (!trtModelStream)
    {
        std::cout << "You have not set the path of trt model." << std::endl;
        return false;
    }
    if (strlen(input_blob_name) <= 0)
    {
        std::cout << "You have not set the name of input node." << std::endl;
        return false;
    }
    if (strlen(output_blob_name) <= 0)
    {
        std::cout << "You have not set the name of output node." << std::endl;
        return false;
    }
    if (input_typesize <= 0)
    {
        std::cout << "You have not set the type-size of input node." << std::endl;
        return false;
    }
    if (output_typesize <= 0)
    {
        std::cout << "You have not set the type-size of output node." << std::endl;
        return false;
    }
    runtime = createInferRuntime(gLogger);
    if (!runtime)
    {
        std::cout << "Can not init infer runtime." << std::endl;
        return false;
    }
    engine = runtime->deserializeCudaEngine(trtModelStream, model_size);
    if (!engine)
    {
        std::cout << "Can not init cuda engine." << std::endl;
        return false;
    }
    context = engine->createExecutionContext();
    if (!context)
    {
        std::cout << "Can not init execution context." << std::endl;
        return false;
    }
    out_dims = engine->getBindingDimensions(1);
    for (int i = 0; i < out_dims.nbDims; i++)
    {
        output_size *= out_dims.d[i];
    }
    config = true;
    return true;
}

size_t TRTPredictor::get_output_size()
{
    if (!config) return 0;
    return output_size;
}

Dims TRTPredictor::get_output_dims()
{
    return out_dims;
}

void TRTPredictor::inference(void* input, size_t input_size, void* output)
{
    if (!config)
    {
        std::cout << "You have not init the predictor." << std::endl;
        return;
    }
    
    const ICudaEngine& engine = context->getEngine();

    // Pointers to input and output device buffers to pass to engine.
    // Engine requires exactly IEngine::getNbBindings() number of buffers.
    assert(engine.getNbBindings() == 2);
    void* buffers[2];

    // In order to bind the buffers, we need to know the names of the input and output tensors.
    // Note that indices are guaranteed to be less than IEngine::getNbBindings()
    const int inputIndex = engine.getBindingIndex(input_blob_name);

    // assert(engine.getBindingDataType(inputIndex) == nvinfer1::DataType::kFLOAT);
    const int outputIndex = engine.getBindingIndex(output_blob_name);
    // assert(engine.getBindingDataType(outputIndex) == nvinfer1::DataType::kFLOAT);
    int mBatchSize = engine.getMaxBatchSize();

    // Create GPU buffers on device
    CHECK(cudaMalloc(&buffers[inputIndex], input_size * input_typesize));
    CHECK(cudaMalloc(&buffers[outputIndex], output_size * output_typesize));

    // Create stream
    cudaStream_t stream;
    CHECK(cudaStreamCreate(&stream));

    // DMA input batch data to device, infer on the batch asynchronously, and DMA output back to host
    CHECK(cudaMemcpyAsync(buffers[inputIndex], input, input_size * input_typesize, cudaMemcpyHostToDevice, stream));
    context->enqueue(1, buffers, stream, nullptr);
    CHECK(cudaMemcpyAsync(output, buffers[outputIndex], output_size * output_typesize, cudaMemcpyDeviceToHost, stream));
    cudaStreamSynchronize(stream);

    // Release stream and buffers
    cudaStreamDestroy(stream);
    CHECK(cudaFree(buffers[inputIndex]));
    CHECK(cudaFree(buffers[outputIndex]));
}