//
// Created by zhangcc on 2020/7/4.
//
#include<algorithm>
#include<cstdio>
#include<cmath>
#include "angle.h"
#include "utils/RrConfig.h"
#include "utils/utils.h"
#include <fstream>

//#define CHECK(exp) \
//if (exp != cudaSuccess) { \
//printf("[ERROR] [%s:%d] %d\n", __FILE__, __LINE__, cudaGetLastError()); \
//exit(-1); \
//}
//
//class Logger : public nvinfer1::ILogger {
//public:
//
//    Logger() : Logger(Severity::kWARNING) {}
//
//    Logger(Severity severity) : reportableSeverity(severity) {}
//
//    void log(Severity severity, const char *msg) override {
//        // suppress messages with severity enum value greater than the reportable
//        if (severity > reportableSeverity) return;
//
//        switch (severity) {
//            case Severity::kINTERNAL_ERROR:
//                std::cerr << "INTERNAL_ERROR: ";
//                break;
//            case Severity::kERROR:
//                std::cerr << "ERROR: ";
//                break;
//            case Severity::kWARNING:
//                std::cerr << "WARNING: ";
//                break;
//            case Severity::kINFO:
//                std::cerr << "INFO: ";
//                break;
//            default:
//                std::cerr << "UNKNOWN: ";
//                break;
//        }
//        std::cerr << msg << std::endl;
//    }
//
//    Severity reportableSeverity{Severity::kWARNING};
//};

int AgnleNet::serialization(const std::string &model_file, const std::string &engine_file){
    Logger gLogger(nvinfer1::ILogger::Severity::kERROR);//kWARNING, kINFO, kINTERNAL_ERROR
    int device_count = 0;
    cudaGetDeviceCount(&device_count);
    assert(device_count > 0);
    if ((device_id + 1) > device_count) {
        device_id = 0;
    }
    std::cout << "GPU_nums: " << device_count << std::endl;
    std::cout << "CraftNet GPU ID: " << device_id << std::endl;
    cudaSetDevice(device_id);

    std::unique_ptr <nvinfer1::IRuntime, nvinfer1::Destroy<nvinfer1::IRuntime>> runtime{
            nvinfer1::createInferRuntime(gLogger)};

    std::unique_ptr <nvinfer1::IBuilder, nvinfer1::Destroy<nvinfer1::IBuilder>> builder{
            nvinfer1::createInferBuilder(gLogger)};
    if (!builder) {
        return -1;
    }

    const auto explicitBatch =
            1U << static_cast<uint32_t>(nvinfer1::NetworkDefinitionCreationFlag::kEXPLICIT_BATCH);
    std::unique_ptr <nvinfer1::INetworkDefinition, nvinfer1::Destroy<nvinfer1::INetworkDefinition>> network{
            builder->createNetworkV2(explicitBatch)};
    if (!network) {
        return -1;
    }

    std::unique_ptr <nvonnxparser::IParser, nvinfer1::Destroy<nvonnxparser::IParser>> parser{
            nvonnxparser::createParser(*network, gLogger)};
    if (!parser) {
        return -1;
    }

    std::unique_ptr <nvinfer1::IBuilderConfig, nvinfer1::Destroy<nvinfer1::IBuilderConfig>> config{
            builder->createBuilderConfig()};
    if (!config) {
        return -1;
    }

    auto parsed = parser->parseFromFile(model_file.c_str(), static_cast<int>(nvinfer1::ILogger::Severity::kINFO));
    if (!parsed) {
        std::cout << "ERROR: could not parse input engine." << std::endl;
        return -1;
    }

    builder->setMaxBatchSize(max_batch_size);
    builder->setMaxWorkspaceSize(MAX_WORKSPACE_SIZE);

    uint32_t cublas = 1U << static_cast<uint32_t>(nvinfer1::TacticSource::kCUBLAS);
    uint32_t cublasLt = 1U << static_cast<uint32_t>(nvinfer1::TacticSource::kCUBLAS_LT);
    config->setTacticSources(cublas | cublasLt);
    config->setFlag(nvinfer1::BuilderFlag::kGPU_FALLBACK);
    config->setFlag(nvinfer1::BuilderFlag::kSTRICT_TYPES);
#if defined(USE_FP16)
    {
            builder->setFp16Mode(true);
            config->setFlag(nvinfer1::BuilderFlag::kFP16);
        }
#endif

    mEngine.reset(builder->buildEngineWithConfig(*network, *config));

    // 序列化
    std::unique_ptr <nvinfer1::IHostMemory, nvinfer1::Destroy<nvinfer1::IHostMemory>> trtModelStream{
            mEngine->serialize()};
    nvinfer1::writeBuffer(trtModelStream->data(), trtModelStream->size(), engine_file.c_str());
    mEngine.reset(runtime->deserializeCudaEngine(trtModelStream->data(), trtModelStream->size(), nullptr));

}

int AgnleNet::init(const std::string &model_file) {
    Logger gLogger(nvinfer1::ILogger::Severity::kERROR);//kWARNING, kINFO, kINTERNAL_ERROR
    int device_count = 0;
    cudaGetDeviceCount(&device_count);
    assert(device_count > 0);
    if ((device_id + 1) > device_count) {
        device_id = 0;
    }
    std::cout << "GPU_nums: " << device_count << std::endl;
    std::cout << "CraftNet GPU ID: " << device_id << std::endl;
    cudaSetDevice(device_id);

    std::unique_ptr <nvinfer1::IRuntime, nvinfer1::Destroy<nvinfer1::IRuntime>> runtime{
            nvinfer1::createInferRuntime(gLogger)};

    //反序列化
    std::string key_name = ".pth";
    std::string engine_file = model_file;
    engine_file = engine_file.replace(engine_file.find(key_name), key_name.size(), ".engine");
    if (trt_serialize) {
        std::string buffer = nvinfer1::readBuffer(engine_file.c_str());
        if (!buffer.size()){
            std::string TrtRoot = RrConfigMgr::GetInstance().ReadString("ocrConfig", "TrtRoot", "");
            std::string name = TrtRoot + "/bin/trtexec --onnx=";
            name += model_file + " --tacticSources=-cublasLt,+cublas --workspace=1024 ";
            //name += "--minShapes=input_0:1x3x48x192 --optShapes=input_0:1x3x48x192 --maxShapes=input_0:1x3x48x192 --shapes=input_0:1x3x48x192 ";
            name += "--fp16 --saveEngine=" + engine_file;
            system(name.c_str());
            buffer = nvinfer1::readBuffer(engine_file.c_str());
        }
        mEngine.reset(runtime->deserializeCudaEngine(buffer.data(), buffer.size(), nullptr));
    } else {
        serialization(model_file, engine_file);
    }

    assert(mEngine->getNbBindings() == MAX_BINDING_NUMS);

    context.reset(mEngine->createExecutionContext());

    cudaStreamCreate(&stream_t);
    CHECK(cudaMalloc(&bindings[0], 3 * 48 * 192 * sizeof(float)));
    CHECK(cudaMalloc(&bindings[1], 2 * sizeof(float)));

    return 0;
}

bool AgnleNet::Input(const cv::Mat &src, float *data) {

    int i = 0;

    for (int row = 0; row < src.rows; ++row) {
        uchar *uc_pixel = src.data + row * src.step;
        for (int col = 0; col < src.cols; ++col) {
            data[i] = 2.0f * ((float) uc_pixel[2] / 255.0f - 0.5f);
            data[i + src.rows * src.cols] = 2.0f * ((float) uc_pixel[1] / 255.0f - 0.5f);
            data[i + 2 * src.rows * src.cols] = 2.0f * ((float) uc_pixel[0] / 255.0f - 0.5f);
            uc_pixel += 3;
            ++i;
        }
    }

    return true;
}

std::vector<float> AgnleNet::extract(const cv::Mat &inputs) {
    cudaSetDevice(device_id);
    //////////////////////////////
    int max_width = 1280, min_width = 6, imgh = 48, imgw = 192;
    int h = inputs.rows, w = inputs.rows;
    float ratio = float(w) / float(h);
    int resize_w = imgh * ratio >  imgw ? imgw : int(imgh * ratio);
    cv::Mat process, padded_im;
    cv::resize(inputs, process, cv::Size(resize_w, imgh));
    cv::copyMakeBorder(
            process, padded_im, 0, 0, 0, imgw-resize_w,
            cv::BORDER_CONSTANT,
            cv::Scalar::all(0));
    /////////////////////////////
    //
    inputBufferSize = 3 * padded_im.rows * padded_im.cols;
    outputBufferSize = 2;
    float *data = new float[inputBufferSize];
    float *outputs = new float[outputBufferSize];


    Input(padded_im, data);

    CHECK(cudaMemcpyAsync((float *) bindings[0] , \
                (float *) data, inputBufferSize * sizeof(float), \
                cudaMemcpyHostToDevice, stream_t));
    bool status = context->enqueueV2(bindings, stream_t, nullptr);
    if (!status) {
        std::cout << "Enqueue failed" << std::endl;
    }
    cudaStreamSynchronize(stream_t);
    CHECK(cudaMemcpyAsync(outputs,
                          (float *) bindings[1],
                          outputBufferSize * sizeof(float), cudaMemcpyDeviceToHost, stream_t));
    std::vector<float> feats(outputs, outputs+outputBufferSize);
    delete [] data;
    delete [] outputs;


    return feats;
}

AgnleNet::~AgnleNet() {
    for (void *ptr : bindings) {
        cudaFree(ptr);
    }
    cudaStreamDestroy(stream_t);
}