#ifndef RT_MODEL_OPERATION_H
#define RT_MODEL_OPERATION_H
#include <iostream>
#include <cuda_runtime_api.h>
#include <string>
#include <vector>
#include <assert.h>
#include <opencv2/opencv.hpp>

#include "cuda_runtime.h"

#include "NvOnnxParser.h"
#include "NvInfer.h"
#include "base_common/rt_mem_fly.hpp"

using namespace nvinfer1;

namespace algocommon
{

    typedef enum Result
    {
        SUCCESS,
        FAILED,
    } Result;

    struct OnnxModeltoTRTParam
    {
        int max_batch_size = 1;
        bool use_fp16 = true;
        bool encrypted = false;
    };

    class OnnxModeltoTRT
    {
    public:
        OnnxModeltoTRT(const int max_batch_size, const bool use_fp16, const bool encrypted);
        OnnxModeltoTRT(const OnnxModeltoTRTParam &param);
        OnnxModeltoTRT();
        virtual ~OnnxModeltoTRT();

    public:
        Result ErializeEncryptedOnnxModel(const std::string &onnx_model, const std::string &rt_erialize_model);
        Result ErializeOnnxModel(const std::string &onnx_model, const std::string &rt_erialize_model);
        Result ReadTRTErializedFile(const std::string &rt_mode_path);
        std::vector<TensorFloat> &Forward();
        TensorFloat &GetInputTensor();

        Shape GetNetInputShape() const
        {
            return input_shape_;
        }

        inline void SetBatchSize(int batch_size)
        {
            batch_size_ = batch_size;
            if (batch_size_ > max_batch_size_)
            {
                batch_size_ = max_batch_size_;
            }
        }
        inline int GetBatchSize() const
        {
            return batch_size_;
        }

        inline int GetMaxBatchSize() const
        {
            return max_batch_size_;
        }

    protected:
        class Logger : public nvinfer1::ILogger
        {
        public:
            void log(nvinfer1::ILogger::Severity severity, const char *msg) override
            {
                if (severity == Severity::kINFO)
                    return;
                switch (severity)
                {
                case Severity::kINTERNAL_ERROR:
                    std::cerr << "INTERNAL_ERROR:" << msg << std::endl;
                    break;
                case Severity::kERROR:
                    std::cerr << "ERROR:" << msg << std::endl;
                    break;
                case Severity::kWARNING:
                    std::cerr << "WARNING:" << msg << std::endl;
                    break;
                case Severity::kINFO:
                    std::cerr << "INFO:" << msg << std::endl;
                    break;
                default:
                    break;
                }
            }
        };
        Result SaveTRTErializedFile(const std::string &rt_mode_path);
        void Release();

    private:
        Result DeserializeModelAndMollocData(void *model_host_data, std::size_t size);
        bool SimpleXor(const std::string &info, const std::string &key, std::string &result);
        void GetFileString(const std::string &file, std::string &str);
        bool CheckFileExist(const std::string &path);

        Result MollcData();

    protected:
        cudaStream_t stream_;

    private:
        std::vector<void *> buffers_;
        nvinfer1::ICudaEngine *cuda_engine_ = nullptr;
        nvinfer1::IHostMemory *model_host_memory_ = nullptr;
        nvinfer1::IRuntime *runtime_ = nullptr;
        nvinfer1::IExecutionContext *context_ = nullptr;
        bool release_ = false;
        std::vector<TensorFloat> output_tensors_;
        TensorFloat input_tensor_;
        int max_batch_size_;
        int batch_size_;
        bool use_fp16_;
        Logger gLogger_;
        Shape input_shape_;
        bool encrypted_;
    };

    inline TensorCopyFromMat(TensorUint8 &target, int offset_image, const cv::Mat &source)
    {
        cudaMemcpy(target.mutable_gpu_data(offset_image),
                   reinterpret_cast<unsigned char *>(source.data),
                   target.GetSize() / target.GetNums() * sizeof(unsigned char),
                   cudaMemcpyHostToDevice);
    }

    inline TensorCopyFromMat(TensorUint8 &target, const cv::Mat &source, cudaStream_t stream)
    {

        target.Resize(1, source.channels(), source.rows, source.cols);

        cudaMemcpyAsync(target.mutable_gpu_data(),
                        reinterpret_cast<unsigned char *>(source.data),
                        target.GetSize() * sizeof(unsigned char),
                        cudaMemcpyHostToDevice, stream);
    }
    inline TensorCopyFromMat(TensorUint8 &target, int offset_image, const cv::Mat &source, cudaStream_t stream)
    {
        target.Resize(1, source.channels(), source.rows, source.cols);
        cudaMemcpyAsync(target.mutable_gpu_data(offset_image),
                        reinterpret_cast<unsigned char *>(source.data),
                        target.GetSize() * sizeof(unsigned char),
                        cudaMemcpyHostToDevice, stream);
    }
    inline TensorCopyFromMat(TensorUint8 &target, const cv::Mat &source)
    {

        target.Resize(1, source.channels(), source.rows, source.cols);

        cudaMemcpy(target.mutable_gpu_data(),
                   reinterpret_cast<unsigned char *>(source.data),
                   target.GetSize() * sizeof(unsigned char),
                   cudaMemcpyHostToDevice);
    }

} //namespace algocommon

#endif
