#include "trt_builder.h"

#include <chrono>
#include <NvOnnxParser.h>

#include "utils/logger.h"
#include "utils/nv_utils.h"
#include "utils/file_utils.h"
using namespace std;
_TRT_INFER_BEGIN

namespace TrtBuilder {
static string join_dims(const vector<int> &dims)
{
    stringstream output;
    char buf[64];
    const char *fmts[] = {"%d", " x %d"};
    for (int i = 0; i < dims.size(); ++i)
    {
        snprintf(buf, sizeof(buf), fmts[i != 0], dims[i]);
        output << buf;
    }
    return output.str();
}

static bool layer_has_input_tensor(nvinfer1::ILayer *layer)
{
    int num_input = layer->getNbInputs();
    for (int i = 0; i < num_input; ++i)
    {
        auto input = layer->getInput(i);
        if (input == nullptr)
            continue;

        if (input->isNetworkInput())
            return true;
    }
    return false;
}

static bool layer_has_output_tensor(nvinfer1::ILayer *layer)
{
    int num_output = layer->getNbOutputs();
    for (int i = 0; i < num_output; ++i)
    {

        auto output = layer->getOutput(i);
        if (output == nullptr)
            continue;

        if (output->isNetworkOutput())
            return true;
    }
    return false;
}

static const char *activation_type_name(nvinfer1::ActivationType activation_type)
{
    switch (activation_type)
    {
    case nvinfer1::ActivationType::kRELU:
        return "ReLU";
    case nvinfer1::ActivationType::kSIGMOID:
        return "Sigmoid";
    case nvinfer1::ActivationType::kTANH:
        return "TanH";
    case nvinfer1::ActivationType::kLEAKY_RELU:
        return "LeakyRelu";
    case nvinfer1::ActivationType::kELU:
        return "Elu";
    case nvinfer1::ActivationType::kSELU:
        return "Selu";
    case nvinfer1::ActivationType::kSOFTSIGN:
        return "Softsign";
    case nvinfer1::ActivationType::kSOFTPLUS:
        return "Parametric softplus";
    case nvinfer1::ActivationType::kCLIP:
        return "Clip";
    case nvinfer1::ActivationType::kHARD_SIGMOID:
        return "Hard sigmoid";
    case nvinfer1::ActivationType::kSCALED_TANH:
        return "Scaled tanh";
    case nvinfer1::ActivationType::kTHRESHOLDED_RELU:
        return "Thresholded ReLU";
    }
    return "Unknow activation type";
}

static const char *pooling_type_name(nvinfer1::PoolingType type)
{
    switch (type)
    {
    case nvinfer1::PoolingType::kMAX:
        return "MaxPooling";
    case nvinfer1::PoolingType::kAVERAGE:
        return "AveragePooling";
    case nvinfer1::PoolingType::kMAX_AVERAGE_BLEND:
        return "MaxAverageBlendPooling";
    }
    return "Unknow pooling type";
}

static string layer_type_name(nvinfer1::ILayer *layer)
{
    switch (layer->getType())
    {
    case nvinfer1::LayerType::kCONVOLUTION:
        return "Convolution";
    case nvinfer1::LayerType::kFULLY_CONNECTED:
        return "Fully connected";
    case nvinfer1::LayerType::kACTIVATION:
    {
        nvinfer1::IActivationLayer *act = (nvinfer1::IActivationLayer *)layer;
        auto type = act->getActivationType();
        return activation_type_name(type);
    }
    case nvinfer1::LayerType::kPOOLING:
    {
        nvinfer1::IPoolingLayer *pool = (nvinfer1::IPoolingLayer *)layer;
        return pooling_type_name(pool->getPoolingType());
    }
    case nvinfer1::LayerType::kLRN:
        return "LRN";
    case nvinfer1::LayerType::kSCALE:
        return "Scale";
    case nvinfer1::LayerType::kSOFTMAX:
        return "SoftMax";
    case nvinfer1::LayerType::kDECONVOLUTION:
        return "Deconvolution";
    case nvinfer1::LayerType::kCONCATENATION:
        return "Concatenation";
    case nvinfer1::LayerType::kELEMENTWISE:
        return "Elementwise";
    case nvinfer1::LayerType::kPLUGIN:
        return "Plugin";
    case nvinfer1::LayerType::kUNARY:
        return "UnaryOp operation";
    case nvinfer1::LayerType::kPADDING:
        return "Padding";
    case nvinfer1::LayerType::kSHUFFLE:
        return "Shuffle";
    case nvinfer1::LayerType::kREDUCE:
        return "Reduce";
    case nvinfer1::LayerType::kTOPK:
        return "TopK";
    case nvinfer1::LayerType::kGATHER:
        return "Gather";
    case nvinfer1::LayerType::kMATRIX_MULTIPLY:
        return "Matrix multiply";
    case nvinfer1::LayerType::kRAGGED_SOFTMAX:
        return "Ragged softmax";
    case nvinfer1::LayerType::kCONSTANT:
        return "Constant";
    case nvinfer1::LayerType::kRNN_V2:
        return "RNNv2";
    case nvinfer1::LayerType::kIDENTITY:
        return "Identity";
    case nvinfer1::LayerType::kPLUGIN_V2:
        return "PluginV2";
    case nvinfer1::LayerType::kSLICE:
        return "Slice";
    case nvinfer1::LayerType::kSHAPE:
        return "Shape";
    case nvinfer1::LayerType::kPARAMETRIC_RELU:
        return "Parametric ReLU";
    case nvinfer1::LayerType::kRESIZE:
        return "Resize";
    }
    return "Unknow layer type";
}

static string dims_str(const nvinfer1::Dims &dims)
{
    return join_dims(vector<int>(dims.d, dims.d + dims.nbDims));
}

static string layer_descript(nvinfer1::ILayer *layer)
{
    switch (layer->getType())
    {
    case nvinfer1::LayerType::kCONVOLUTION:
    {
        nvinfer1::IConvolutionLayer *conv = (nvinfer1::IConvolutionLayer *)layer;
        return format("channel: %d, kernel: %s, padding: %s, stride: %s, dilation: %s, group: %d",
                      conv->getNbOutputMaps(),
                      dims_str(conv->getKernelSizeNd()).c_str(),
                      dims_str(conv->getPaddingNd()).c_str(),
                      dims_str(conv->getStrideNd()).c_str(),
                      dims_str(conv->getDilationNd()).c_str(),
                      conv->getNbGroups());
    }
    case nvinfer1::LayerType::kFULLY_CONNECTED:
    {
        nvinfer1::IFullyConnectedLayer *fully = (nvinfer1::IFullyConnectedLayer *)layer;
        return format("output channels: %d", fully->getNbOutputChannels());
    }
    case nvinfer1::LayerType::kPOOLING:
    {
        nvinfer1::IPoolingLayer *pool = (nvinfer1::IPoolingLayer *)layer;
        return format(
            "window: %s, padding: %s",
            dims_str(pool->getWindowSizeNd()).c_str(),
            dims_str(pool->getPaddingNd()).c_str());
    }
    case nvinfer1::LayerType::kDECONVOLUTION:
    {
        nvinfer1::IDeconvolutionLayer *conv = (nvinfer1::IDeconvolutionLayer *)layer;
        return format("channel: %d, kernel: %s, padding: %s, stride: %s, group: %d",
                      conv->getNbOutputMaps(),
                      dims_str(conv->getKernelSizeNd()).c_str(),
                      dims_str(conv->getPaddingNd()).c_str(),
                      dims_str(conv->getStrideNd()).c_str(),
                      conv->getNbGroups());
    }
    case nvinfer1::LayerType::kACTIVATION:
    case nvinfer1::LayerType::kPLUGIN:
    case nvinfer1::LayerType::kLRN:
    case nvinfer1::LayerType::kSCALE:
    case nvinfer1::LayerType::kSOFTMAX:
    case nvinfer1::LayerType::kCONCATENATION:
    case nvinfer1::LayerType::kELEMENTWISE:
    case nvinfer1::LayerType::kUNARY:
    case nvinfer1::LayerType::kPADDING:
    case nvinfer1::LayerType::kSHUFFLE:
    case nvinfer1::LayerType::kREDUCE:
    case nvinfer1::LayerType::kTOPK:
    case nvinfer1::LayerType::kGATHER:
    case nvinfer1::LayerType::kMATRIX_MULTIPLY:
    case nvinfer1::LayerType::kRAGGED_SOFTMAX:
    case nvinfer1::LayerType::kCONSTANT:
    case nvinfer1::LayerType::kRNN_V2:
    case nvinfer1::LayerType::kIDENTITY:
    case nvinfer1::LayerType::kPLUGIN_V2:
    case nvinfer1::LayerType::kSLICE:
    case nvinfer1::LayerType::kSHAPE:
    case nvinfer1::LayerType::kPARAMETRIC_RELU:
    case nvinfer1::LayerType::kRESIZE:
        return "";
    }
    return "Unknow layer type";
}

static nvinfer1::Dims convert_to_trt_dims(const std::vector<int> &dims)
{

    nvinfer1::Dims output{0};
    if (dims.size() > nvinfer1::Dims::MAX_DIMS)
    {
        LOGE << "convert failed, dims.size: " << dims.size() << " " << nvinfer1::Dims::MAX_DIMS;
        return output;
    }

    if (!dims.empty())
    {
        output.nbDims = dims.size();
        memcpy(output.d, dims.data(), dims.size() * sizeof(int));
    }
    return output;
}

static string align_blank(const string &input, int align_size, char blank = ' ')
{
    if (input.size() >= align_size)
        return input;
    string output = input;
    for (int i = 0; i < align_size - input.size(); ++i)
        output.push_back(blank);
    return output;
}

static long long timestamp_now()
{
    return chrono::duration_cast<chrono::milliseconds>(chrono::system_clock::now().time_since_epoch()).count();
}

static double timestamp_now_float()
{
    return chrono::duration_cast<chrono::microseconds>(chrono::system_clock::now().time_since_epoch()).count() / 1000.0;
}

const char *mode_string(ModelType type)
{
    switch (type)
    {
    case ModelType::FP32:
        return "FP32";
        break;
    case ModelType::FP16:
        return "FP16";
        break;
    case ModelType::INT8:
        return "INT8";
        break;
    default:
        return "UnknownTRTModelType";
    }
}

bool compile(
    ModelType model_type,
    unsigned int max_batch_size,
    const std::string &source_model_path,
    const std::string &save_model_path,
    const size_t maxWorkspaceSize)
{
    // TODO Send logger appender
    LOGER_INST.init();
    LOGI << "Start compile....";
    LOGI << "model_type: " << mode_string(model_type);
    LOGI << "max_batch_size: " << max_batch_size;
    LOGI << "source_model_path: " << source_model_path;
    LOGI << "save_model_path: " << save_model_path;
    LOGI << "maxWorkspaceSize: " << (maxWorkspaceSize >> 20) << " MB";
    LOGI << "Device : " << CUDATools::device_description();

    auto builder = make_nvshared(nvinfer1::createInferBuilder(LOGER_INST.getTRTLogger()));
    if (builder == nullptr)
    {
        LOGE << "Failed to create InferBuilder";
        return false;
    }

    auto config = make_nvshared(builder->createBuilderConfig());
    if (model_type == ModelType::FP16)
    {
        if (!builder->platformHasFastFp16())
        {
            LOGW << "Platform does not support FP16";
        }
        config->setFlag(nvinfer1::BuilderFlag::kFP16);
    }

    const auto explicitBatch = 1U << static_cast<uint32_t>(nvinfer1::NetworkDefinitionCreationFlag::kEXPLICIT_BATCH);
    auto network = make_nvshared(builder->createNetworkV2(explicitBatch));
    auto onnxParser = make_nvshared(nvonnxparser::createParser(*network, LOGER_INST.getTRTLogger()));
    if (onnxParser == nullptr)
    {
        LOGE << "Failed to create OnnxParser: ";
        return false;
    }

    if (!onnxParser->parseFromFile(source_model_path.c_str(), 1))
    {
        LOGE << "Failed to parse ONNX file: " << source_model_path;
        return false;
    }

    auto inputTensor = network->getInput(0);
    auto inputDims = inputTensor->getDimensions();

    LOGI << "InputDims: " << join_dims(vector<int>(inputDims.d, inputDims.d + inputDims.nbDims));
    LOGI << "set max batch size: " << max_batch_size;
    LOGI << "set max workspace size: " << (maxWorkspaceSize >> 20) << " MB";
    LOGI << "Device : " << CUDATools::device_description();

    int net_num_input = network->getNbInputs();
    LOGI << "Network has " << net_num_input << " inputs.";
    vector<string> input_names(net_num_input);
    for (int i = 0; i < net_num_input; ++i)
    {
        auto tensor = network->getInput(i);
        auto dims = tensor->getDimensions();
        auto dims_str = join_dims(vector<int>(dims.d, dims.d + dims.nbDims));
        LOGI << "      " << i << " [ " << tensor->getName() << " ] "
             << "shape is " << dims_str;
        input_names[i] = tensor->getName();
    }

    int net_num_output = network->getNbOutputs();
    LOGI << "Network has " << net_num_output << " outputs.";
    for (int i = 0; i < net_num_output; ++i)
    {
        auto tensor = network->getOutput(i);
        auto dims = tensor->getDimensions();
        auto dims_str = join_dims(vector<int>(dims.d, dims.d + dims.nbDims));
        LOGI << "      " << i << " [ " << tensor->getName() << " ] "
             << "shape is " << dims_str;
    }
    int net_num_layers = network->getNbLayers();
    LOGI << "Network has " << net_num_layers << " layers.";
    for (int i = 0; i < net_num_layers; ++i)
    {
        auto layer = network->getLayer(i);
        auto name = layer->getName();
        auto type_str = layer_type_name(layer);
        auto input0 = layer->getInput(0);
        if (input0 == nullptr)
            continue;

        auto output0 = layer->getOutput(0);
        auto input_dims = input0->getDimensions();
        auto output_dims = output0->getDimensions();
        bool has_input = layer_has_input_tensor(layer);
        bool has_output = layer_has_output_tensor(layer);
        auto descript = layer_descript(layer);
        type_str = align_blank(type_str, 18);
        auto input_dims_str = align_blank(dims_str(input_dims), 18);
        auto output_dims_str = align_blank(dims_str(output_dims), 18);
        auto number_str = align_blank(format("%d.", i), 4);

        const char *token = "      ";
        if (has_input)
            token = "  >>> ";
        else if (has_output)
            token = "  *** ";

        LOGI << token << number_str << type_str << " " << input_dims_str << " -> " << output_dims_str << " " << descript;
    }
    builder->setMaxBatchSize(max_batch_size);
    config->setMaxWorkspaceSize(maxWorkspaceSize);
    auto profile = builder->createOptimizationProfile();
    for (int i = 0; i < net_num_input; ++i)
    {
        auto input = network->getInput(i);
        auto input_dims = input->getDimensions();
        input_dims.d[0] = 1;
        profile->setDimensions(input->getName(), nvinfer1::OptProfileSelector::kMIN, input_dims);
        profile->setDimensions(input->getName(), nvinfer1::OptProfileSelector::kOPT, input_dims);
        input_dims.d[0] = max_batch_size;
        profile->setDimensions(input->getName(), nvinfer1::OptProfileSelector::kMAX, input_dims);
    }
    config->addOptimizationProfile(profile);

    LOGI << "Start building engine...";
    auto time_start = timestamp_now();
    auto engine = make_nvshared(builder->buildEngineWithConfig(*network, *config));
    if (engine == nullptr)
    {
        LOGE << "Failed to build engine";
        return false;
    }
    LOGI << "Engine build success, cost time: " << timestamp_now() - time_start << "ms";
    auto seridata = make_nvshared(engine->serialize());

    return FileUtils::save_file(save_model_path, seridata->data(), seridata->size());
}

} // namespace TrtBuilder
_TRT_INFER_END