#ifndef NVBUFFERS_H
#define NVBUFFERS_H

#include <NvInfer.h>
#include <cuda_runtime.h>
#include <cuda_runtime_api.h>
#include <stdlib.h>

#include <exception>
#include <memory>
#include <string>
#include <vector>

#include "nvcommon.h"

namespace weilin {

template <typename Allocator>
class GenericBuffer {
 public:
  GenericBuffer(nvinfer1::DataType type = nvinfer1::DataType::kFLOAT)
      : mSize(0), mCapacity(0), mType(type), mBuffer(nullptr) {}

  GenericBuffer(size_t size, nvinfer1::DataType type)
      : mSize(size), mType(type) {
    if (!allocator.alloc(&mBuffer, this->nb_bytes())) {
      throw std::bad_alloc();
    }
  }

  GenericBuffer(GenericBuffer&& buf)
      : mSize(buf.mSize),
        mCapacity(buf.mCapacity),
        mType(buf.mType),
        mBuffer(buf.mBuffer) {
    buf.mSize = 0;
    buf.mCapacity = 0;
    buf.mType = nvinfer1::DataType::kFLOAT;
    buf.mBuffer = nullptr;
  }

  GenericBuffer& operator=(GenericBuffer&& buf) {
    if (this != &buf) {
      allocator.free(mBuffer);
      mSize = buf.mSize;
      mCapacity = buf.mCapacity;
      mType = buf.mType;
      mBuffer = buf.mBuffer;

      buf.mSize = 0;
      buf.mCapacity = 0;
      buf.mBuffer = nullptr;
    }
    return *this;
  }

  void* data() const { return mBuffer; }

  size_t size() const { return mSize; }

  size_t nb_bytes() const {
    return this->size() * get_nv_data_type_size(mType);
  }

  void resize(size_t newSize) {
    mSize = newSize;
    if (mCapacity < newSize) {
      allocator.free(mBuffer);
      if (!allocator.alloc(&mBuffer, this->nb_bytes())) {
        throw std::bad_alloc();
      }
      mCapacity = newSize;
    }
  }

  void resize(const nvinfer1::Dims& dims) { this->resize(volume(dims)); }

  ~GenericBuffer() { allocator.free(mBuffer); }

 private:
  size_t mSize;
  size_t mCapacity;
  nvinfer1::DataType mType;
  void* mBuffer;
  Allocator allocator;
};

class CudaAllocator {
 public:
  bool alloc(void** ptr, size_t size) const {
    return cudaMalloc(ptr, size) == cudaSuccess;
  }

  void free(void* ptr) const { cudaFree(ptr); }
};

class HostAllocator {
 public:
  bool alloc(void** ptr, size_t size) const {
    *ptr = std::malloc(size);
    return *ptr != nullptr;
  }

  void free(void* ptr) const { std::free(ptr); }
};

class TensorBuffer {
 public:
  TensorBuffer(std::shared_ptr<nvinfer1::ICudaEngine> engine)
      : mEngine(engine), mBatchSize(0) {
    for (int i = 0; i < mEngine->getNbBindings(); ++i) {
      auto dims = mEngine->getBindingDimensions(i);
      size_t vol = 1;
      nvinfer1::DataType type = mEngine->getBindingDataType(i);
      int vecDim = mEngine->getBindingVectorizedDim(i);
      if (-1 != vecDim) {
        int scalarsPerVec = mEngine->getBindingComponentsPerElement(i);
        dims.d[vecDim] = div_up(dims.d[vecDim], scalarsPerVec);
        vol *= scalarsPerVec;
      }
      vol *= volume(dims);
      std::unique_ptr<TManageBuffer> manBuf{new TManageBuffer()};
      manBuf->deviceBuffer = GenericBuffer<CudaAllocator>(vol, type);
      manBuf->hostBuffer = GenericBuffer<HostAllocator>(vol, type);
      mDeviceBindings.emplace_back(manBuf->deviceBuffer.data());
      mManageBuffers.emplace_back(std::move(manBuf));
    }
  }

  std::vector<void*>& get_device_bindings() { return mDeviceBindings; }

  const std::vector<void*>& get_device_bindings() const {
    return mDeviceBindings;
  }

  void* get_device_buffer(const int index) {
    return get_buffer(false, mEngine->getBindingName(index));
  }

  void* get_device_buffer(const std::string& tensorName) const {
    return get_buffer(false, tensorName);
  }

  void* get_host_buffer(const int index) {
    return get_buffer(true, mEngine->getBindingName(index));
  }

  void* get_host_buffer(const std::string& tensorName) const {
    return get_buffer(true, tensorName);
  }

  size_t size(const std::string& tensorName) const {
    int index = mEngine->getBindingIndex(tensorName.c_str());
    if (index == -1) return ~size_t(0);
    return mManageBuffers[index]->hostBuffer.nb_bytes();
  }

  void dump_buffer() {
    // TODO: 输出缓存中的数据
  }

  void copy_input_to_device() { memcpy_buffer(false, true); }

  void copy_output_to_host() { memcpy_buffer(true, false); }

  void copy_input_to_device_async(const cudaStream_t stream) {
    memcpy_buffer(false, true, true, stream);
  }

  void copy_output_to_host_async(const cudaStream_t stream) {
    memcpy_buffer(true, false, true, stream);
  }

 private:
  void* get_buffer(const bool isHost, const std::string& tensorName) const {
    int index = mEngine->getBindingIndex(tensorName.c_str());
    if (index == -1) return nullptr;
    return (isHost ? mManageBuffers[index]->hostBuffer.data()
                   : mManageBuffers[index]->deviceBuffer.data());
  }

  void memcpy_buffer(const bool isToHost, const bool isInput,
                     const bool isAsync = false,
                     const cudaStream_t stream = nullptr) {
    for (int i = 0; i < mEngine->getNbBindings(); ++i) {
      if ((isInput && mEngine->bindingIsInput(i)) ||
          (!isInput && !mEngine->bindingIsInput(i))) {
        void* dstPtr = isToHost ? mManageBuffers[i]->hostBuffer.data()
                                : mManageBuffers[i]->deviceBuffer.data();
        void* srcPtr = isToHost ? mManageBuffers[i]->deviceBuffer.data()
                                : mManageBuffers[i]->hostBuffer.data();
        const size_t bytesSize = mManageBuffers[i]->hostBuffer.nb_bytes();
        const cudaMemcpyKind memcpyType =
            isToHost ? cudaMemcpyDeviceToHost : cudaMemcpyHostToDevice;
        if (isAsync)
          cudaMemcpyAsync(dstPtr, srcPtr, bytesSize, memcpyType, stream);
        else
          cudaMemcpy(dstPtr, srcPtr, bytesSize, memcpyType);
      }
    }
  }

  struct TManageBuffer {
    GenericBuffer<CudaAllocator> deviceBuffer;
    GenericBuffer<HostAllocator> hostBuffer;
  };
  std::shared_ptr<nvinfer1::ICudaEngine> mEngine;
  int mBatchSize;
  std::vector<void*> mDeviceBindings;
  std::vector<std::unique_ptr<TManageBuffer>> mManageBuffers;
};

}  // namespace weilin

#endif /* #ifndef NVBUFFERS_H */
