#include <iostream>
#include <NvOnnxParser.h>

#include "common.h"
#include "Logger.h"
#include "Utils.h"
#include "plugins/top_pool_plugin.h"
#include "plugins/bottom_pool_plugin.h"
#include "plugins/left_pool_plugin.h"
#include "plugins/right_pool_plugin.h"

using namespace nvinfer1;
using namespace std;

simplelogger::Logger *logger = simplelogger::LoggerFactory::CreateConsoleLogger(simplelogger::TRACE);

namespace
{
    hackathon::Logger gLogger;
}

bool OnnxToTrt(const hackathon::CommandParam &command_param)
{
    auto builder = hackathon::UniquePtr<IBuilder>(createInferBuilder(gLogger));
    if (!builder)
    {
        std::cout << "create infer builder failure\n";
        return false;
    }

    const auto explicitBatch = 1U << static_cast<uint32_t>(nvinfer1::NetworkDefinitionCreationFlag::kEXPLICIT_BATCH);
    auto network = hackathon::UniquePtr<INetworkDefinition>(builder->createNetworkV2(explicitBatch));
    if (!network)
    {
        std::cout << "create networkV2 failure\n";
        return false;
    }
    auto config = hackathon::UniquePtr<IBuilderConfig>(builder->createBuilderConfig());

    auto parser = hackathon::UniquePtr<nvonnxparser::IParser>(nvonnxparser::createParser(*network, gLogger));
    auto parsed = parser->parseFromFile(command_param.model_path.c_str(), static_cast<int>(gLogger.reportableSeverity));
    if (!parsed)
    {
        std::cout << "parse model failure\n";
        return false;
    }

    builder->setMaxBatchSize(command_param.batch_size);
    config->setMaxWorkspaceSize(static_cast<size_t>(32) << 20);

    BuildEngineParam param;
    param.nChannel = network->getInput(0)->getDimensions().d[1];
    param.nHeight = network->getInput(0)->getDimensions().d[2];
    param.nWidth = network->getInput(0)->getDimensions().d[3];
    std::unique_ptr<Calibrator> calib = std::make_unique<Calibrator>(2, &param, "int8_cache.CNLite");
    if (command_param.use_int8)
    {
        config->setFlag(BuilderFlag::kINT8);
        config->setInt8Calibrator(calib.get());
    }
    else
    {
//        config->setFlag(BuilderFlag::kFP16);
    }

    // build engine
    auto engine = hackathon::UniquePtr<ICudaEngine>(builder->buildEngineWithConfig(*network, *config));
    IHostMemory *trt_model_stream = engine->serialize();
    if (!trt_model_stream)
    {
        std::cout << "serialize model failure\n";
        return false;
    }

    string save_trt_file = "cn_lite_trt.plan";
    ofstream ofs(save_trt_file);
    ofs.write(static_cast<const char *>(trt_model_stream->data()), trt_model_stream->size());
    ofs.close();
    std::cout << "Save TensorRT Model:" + save_trt_file + " Success\n";

    if (trt_model_stream)
    {
        trt_model_stream->destroy();
    }

    return true;
}

bool parseArgs(int argc, char *argv[], hackathon::CommandParam &command_param)
{
    if (argc == 1)
    {
        printf("CornetNetLite TensorRT Tool\n");
        exit(0);
    }

    if (argc < 4)
    {
        printf("\n");
        printf("Mandatory params:\n");
        printf("  [onnx path]   : onnx model path\n");
        printf("  [batch size]  : batch size\n");
        printf("  [enable int8] : enable int8(only 1 or 0)\n");
        return false;
    }

    command_param.model_path = std::string(argv[1]);
    command_param.batch_size = std::atoi(argv[2]);
    command_param.use_int8 = std::atoi(argv[3]) == 1 ? true : false;
    if (hackathon::check_file_exist(command_param.model_path) == false)
    {
        printf("onnx path: %s not exist\n", command_param.model_path.c_str());
        return false;
    }

    return true;
}

int main(int argc, char *argv[])
{
    hackathon::CommandParam command_param;
    bool r = parseArgs(argc, argv, command_param);
    if (!r)
    {
        cout << "Parse Command Failure\n";
        return -1;
    }

    if (!OnnxToTrt(command_param))
    {
        cout << "Onnx TensorRT Convert Failure\n";
        return -1;
    }

    cout << "Onnx TensorRT Convert Success" << endl;

    return 0;
}
