//
// Created by zhangcc on 2020/7/4.
//
#include "osnet_reid.h"

#define CHECK(exp) \
if (exp != cudaSuccess) { \
printf("[ERROR] [%s:%d] %d\n", __FILE__, __LINE__, cudaGetLastError()); \
exit(-1); \
}

class Logger : public nvinfer1::ILogger
{
public:

    Logger(): Logger(Severity::kWARNING) {}

    Logger(Severity severity): reportableSeverity(severity) {}

    void log(Severity severity, const char* msg) override
    {
        // suppress messages with severity enum value greater than the reportable
        if (severity > reportableSeverity) return;

        switch (severity)
        {
            case Severity::kINTERNAL_ERROR: std::cerr << "INTERNAL_ERROR: "; break;
            case Severity::kERROR: std::cerr << "ERROR: "; break;
            case Severity::kWARNING: std::cerr << "WARNING: "; break;
            case Severity::kINFO: std::cerr << "INFO: "; break;
            default: std::cerr << "UNKNOWN: "; break;
        }
        std::cerr << msg << std::endl;
    }

    Severity reportableSeverity{Severity::kWARNING};
};

int OsnetReid::init(const std::string &model_file)
{
    Logger gLogger(nvinfer1::ILogger::Severity::kERROR);//kWARNING, kINFO, kINTERNAL_ERROR
    int device_count = 0;
    cudaGetDeviceCount(&device_count);
    assert(device_count > 0);
    if((device_id + 1) > device_count)
    {
        device_id = 0;
    }
    std::cout << "GPU_nums: " << device_count << std::endl;
    std::cout << "OsnetReid GPU ID: " << device_id << std::endl;
    cudaSetDevice(device_id);

    std::unique_ptr<nvinfer1::IRuntime, nvinfer1::Destroy<nvinfer1::IRuntime>> runtime{nvinfer1::createInferRuntime(gLogger)};

    //反序列化
    std::string key_name = ".onnx";
    std::string engine_file = model_file;
    engine_file = engine_file.replace(engine_file.find(key_name), key_name.size(), ".engine");

    std::string buffer = nvinfer1::readBuffer(engine_file.c_str());
    if (!buffer.size() || alway_serialize)
    {
        std::unique_ptr<nvinfer1::IBuilder, nvinfer1::Destroy<nvinfer1::IBuilder>> builder{nvinfer1::createInferBuilder(gLogger)};
        if(!builder)
        {
            return -1;
        }

        const auto explicitBatch = 1U << static_cast<uint32_t>(nvinfer1::NetworkDefinitionCreationFlag::kEXPLICIT_BATCH);
        std::unique_ptr<nvinfer1::INetworkDefinition, nvinfer1::Destroy<nvinfer1::INetworkDefinition>> network{builder->createNetworkV2(explicitBatch)};
        if(!network)
        {
            return -1;
        }

        std::unique_ptr<nvonnxparser::IParser, nvinfer1::Destroy<nvonnxparser::IParser>> parser{nvonnxparser::createParser(*network, gLogger)};
        if(!parser)
        {
            return -1;
        }

        std::unique_ptr<nvinfer1::IBuilderConfig, nvinfer1::Destroy<nvinfer1::IBuilderConfig>> config{builder->createBuilderConfig()};
        if(!config)
        {
            return -1;
        }

        auto parsed = parser->parseFromFile(model_file.c_str(), static_cast<int>(nvinfer1::ILogger::Severity::kINFO));
        if (!parsed)
        {
            std::cout << "ERROR: could not parse input engine." << std::endl;
            return -1;
        }

        builder->setMaxBatchSize(max_batch_size);
        config->setFlag(nvinfer1::BuilderFlag::kGPU_FALLBACK);
        config->setFlag(nvinfer1::BuilderFlag::kSTRICT_TYPES);
        switch (dataType) {
            case 0:
                builder->setFp16Mode(true);
                config->setFlag(nvinfer1::BuilderFlag::kFP16);
                break;
            case 1:
                break;
            default:
                std::cout << "use float32 engine " << std::endl;
                break;
        }

        if(use_dynamic)
        {
            auto profile = builder->createOptimizationProfile();
            profile->setDimensions(network->getInput(0)->getName(), nvinfer1::OptProfileSelector::kMIN,
                                   nvinfer1::Dims4{max_batch_size, inputH, inputW, channels});
            profile->setDimensions(network->getInput(0)->getName(), nvinfer1::OptProfileSelector::kOPT,
                                   nvinfer1::Dims4{max_batch_size, inputH, inputW, channels});
            profile->setDimensions(network->getInput(0)->getName(), nvinfer1::OptProfileSelector::kMAX,
                                   nvinfer1::Dims4{max_batch_size, inputH, inputW, channels});
            config->addOptimizationProfile(profile);
        }
        

        mEngine.reset(builder->buildEngineWithConfig(*network, *config));

        // 序列化
        std::unique_ptr<nvinfer1::IHostMemory, nvinfer1::Destroy<nvinfer1::IHostMemory>> trtModelStream{mEngine->serialize()};
        nvinfer1::writeBuffer(trtModelStream->data(), trtModelStream->size(), engine_file.c_str());
                
    }
    buffer = nvinfer1::readBuffer(engine_file.c_str());
    mEngine.reset(runtime->deserializeCudaEngine(buffer.data(), buffer.size(), nullptr));

    assert(mEngine->getNbBindings() == MAX_BINDING_NUMS);

    context.reset(mEngine->createExecutionContext());
//    context->setOptimizationProfile(0);

//    nvinfer1::Dims4 inputDims{max_batch_size, inputH, inputW, channels};
//    context->setBindingDimensions(0, inputDims);
    int inputBufferSize = max_batch_size * channels * inputH * inputW;
    CHECK(cudaMalloc(&bindings[0], inputBufferSize * sizeof(float)));

//    nvinfer1::Dims2 outputDims{max_batch_size, feats_dim};
//    context->setBindingDimensions(1, outputDims);
    int outputBufferSize = max_batch_size * feats_dim;
    CHECK(cudaMalloc(&bindings[1], outputBufferSize * sizeof(float)));

    cudaStreamCreate(&stream_t);

    return 0;
}

std::vector<std::vector<float>> OsnetReid::extract(std::vector<cv::Mat> &inputs)
{
    cudaSetDevice(device_id);

    int batch_size = inputs.size();
    int single_image_size = channels * inputH * inputW;

    std::vector <std::vector<float>> feats(batch_size, std::vector<float>(feats_dim));

    int iters = ceil(1.0f * batch_size / max_batch_size);
    for (size_t iter = 0; iter < iters; iter++)
    {
        for (size_t i = 0; i < max_batch_size; i++)
        {
            int batch_id = i + max_batch_size * iter;
            if (batch_id > batch_size  -  1)
            {
                break;
            }

            if (preprocess)
            {
                inputs[batch_id].convertTo(input, CV_32FC3, 1.0f);
                cv::resize(input, input, cv::Size(inputW, inputH));
            }
            else
            {
                input = inputs[batch_id];
            }

            CHECK(cudaMemcpyAsync((float *) bindings[0] + i * single_image_size, \
                (float *) input.data, single_image_size * sizeof(float), \
                cudaMemcpyHostToDevice, stream_t));
        }

//        float elapsedTime;
//        cudawrapper::CudaEvent start, end;
//        cudaEventRecord(start, stream_t);

        bool status = context->enqueueV2(bindings, stream_t, nullptr);
        if (!status) {
            std::cout << "Enqueue failed" << std::endl;
        }

        cudaStreamSynchronize(stream_t);

//        cudaEventRecord(end, stream_t);
//        cudaEventSynchronize(end);
//        cudaEventElapsedTime(&elapsedTime, start, end);
    //    std::cout << "infer elapsedTime: " << elapsedTime << " ms" << std::endl;

        for (size_t i = 0; i < max_batch_size; i++)
        {
            int batch_id = i + max_batch_size * iter;
            if (batch_id > batch_size  -  1)
            {
                break;
            }

            CHECK(cudaMemcpyAsync((float *) feats[batch_id].data(), \
                (float *) bindings[1] + i * feats_dim, \
                feats_dim * sizeof(float), cudaMemcpyDeviceToHost, stream_t));
        }
    }

    return feats;
}

OsnetReid::~OsnetReid()
{
    for (void* ptr : bindings)
    {
        cudaFree(ptr);
    }

    cudaStreamDestroy(stream_t);
}