#ifndef CPU_ONLY

#ifndef __GPU_MANAGER_H__
#define __GPU_MANAGER_H__

#include <iostream>
#include <vector>

#include <glog/logging.h>

#include <boost/thread.hpp>
#include <boost/shared_ptr.hpp>
#include <boost/make_shared.hpp>

#include <cuda.h>
#include <cuda_runtime.h>
#include <cublas_v2.h>
#ifdef USE_CUDNN
#include <cudnn.h>
#endif // USE_CUDNN

#include "caffe/common.hpp"
#include "caffe/util/device_alternate.hpp"

// Compute capability-specific settings.
// Maximum number of registers per thread.
#define MAX_REGS_PER_THREAD GetMaxRegsPerThread()
// #if __CUDA_ARCH__ == 300
// #define MAX_REGS_PER_THREAD 63
// #elif __CUDA_ARCH__ > 300
// #define MAX_REGS_PER_THREAD 255
// #endif

// Maximum size of shared memory per block (bytes)
#define MAX_SHARED_MEMORY_PER_BLOCK GetMaxSharedMemPerBlock()
// #if (__CUDA_ARCH__ >= 300) && (__CUDA_ARCH__ <= 620)
// #define MAX_SHARED_MEMORY_PER_BLOCK 49152
// #elif __CUDA_ARCH__ == 700
// #define MAX_SHARED_MEMORY_PER_BLOCK 98304
// #elif __CUDA_ARCH__ == 750
// #define MAX_SHARED_MEMORY_PER_BLOCK 65536
// #endif

// Maximum size of shared memory per multiprocessor (bytes)
#define MAX_THREAD_BLOCKS_PER_MULTIPROCESSOR GetMaxThreadBlocksPerMultiprocessor()
// #if (__CUDA_ARCH__ >= 300) && (__CUDA_ARCH__ <= 370) && (__CUDA_ARCH__ == 750)
// #define MAX_THREAD_BLOCKS_PER_MULTIPROCESSOR 16
// #elif (__CUDA_ARCH__ >= 500) && (__CUDA_ARCH__ <= 700)
// #define MAX_THREAD_BLOCKS_PER_MULTIPROCESSOR 32
// #endif

// Maximum number of warps per multiprocessor
#define MAX_WARPS_PER_MULTIPROCESSOR GetMaxWarpsPerMultiprocessor()
// #if (__CUDA_ARCH__ >= 300) && (__CUDA_ARCH__ <= 700)
// #define MAX_WARPS_PER_MULTIPROCESSOR 64
// #else
// #define MAX_WARPS_PER_MULTIPROCESSOR 32
// #endif

// Maximum number of kernels that can be launched concurrently.
#define MAX_GRIDS_PER_DEVICE GetMaxConcurrentKernels()
// #if ((__CUDA_ARCH__ >=350) && (__CUDA_ARCH__ <= 520)) || (__CUDA_ARCH__ == 610)
// #define MAX_GRIDS_PER_DEVICE 32
// #elif (__CUDA_ARCH__ >= 700) || (__CUDA_ARCH__ == 600)
// #define MAX_GRIDS_PER_DEVICE 128
// #elif (__CUDA_ARCH__ == 300) || (__CUDA_ARCH__ == 530) || (__CUDA_ARCH__ == 620)
// #define MAX_GRIDS_PER_DEVICE 16
// #else
// #define MAX_GRIDS_PER_DEVICE 4
// #endif

#define WARP_REG_ALLOCATION_GRANULARITY 256

namespace caffe {
  
  int GetMaxRegsPerThread();

  int GetMaxSharedMemPerBlock();

  int GetMaxThreadBlocksPerMultiprocessor();

  int GetMaxWarpsPerMultiprocessor();

  int GetMaxConcurrentKernels();

  class GpuStreamPool
  {
  public:
    // Default destructor.
    ~GpuStreamPool();

    /**
     * @brief    Object access method.
     *
     * Thread local context for GpuStreamPool. Each device has a unique GpuStreamPool
     * object to manage the connection between host and device.
     *
     * @return    The corresponding GpuStreamPool object.
     */
    static GpuStreamPool& Get();

    /**
     * @brief    Reset the stream pool.
     */
    void Reset();

    /**
     * @brief    Pool size setting function.
     *
     * @param[in] pool_size    The required pool size.
     */
    void SetPoolSize(size_t pool_size = 0);

    /**
     * @brief   Get the size of the stream pool.
     */
    inline int GetPoolSize() {
      return this->streams_.size() ? this->streams_.size() : 1;
    }

    /**
     * @brief    Set the corresponding GPU device.
     *
     * @param[in] device_id    The target GPU device.
     */
    void SetDevice(const int device_id);

    /**
     * @brief Get the number of handles.
     */
    inline unsigned int GetHandleNum() {
      return this->handle_num_;
    }

    /**
     * @brief Get the kernel running CUDA stream.
     *
     * @param[in] stream_id    ID of the selected stream_id that the kernel will be launched.
     */
    inline cudaStream_t CudaStreamGet(int stream_id = -1) {
      if (stream_id == -1 or this->handle_num_ == 0) {
        return default_stream_;
      }
      return this->streams_[stream_id % this->handle_num_].get()->get();
    }

    /**
     * @brief    Get the boost::shared_ptr object of a CudaStream object.
     *
     * @param[in] stream_id   ID of the selected stream_id that the kernel will be launched.
     */
    inline boost::shared_ptr<CudaStream> CudaStreamGetShared(int stream_id = -1) {
      if (stream_id == -1 or this->handle_num_ == 0) {
        return boost::make_shared<CudaStream>(default_stream_);
      }
      return this->streams_[stream_id % this->handle_num_];
    }

    /**
     * @brief Get the cuBLAS kernel handler.
     *
     * @param[in] cublas_id    ID of the selected cuBLAS handler.
     */
    inline cublasHandle_t CuBlasHandleGet(int cublas_id = -1) {
      if (cublas_id == -1 or this->handle_num_ == 0) {
        return this->default_cublas_handle_;
      }
      return this->cublas_handles_[cublas_id % this->handle_num_].get()->get();
    }

#ifdef USE_CUDNN
    /**
     * @brief    Get the cuDNN kernel handler.
     *
     * @param[in] cudnn_id    ID of the selected cuDNN handler.
     */
    inline cudnnHandle_t CuDNNHandleGet(int cudnn_id = -1) {
      if (cudnn_id == -1 or this->handle_num_ == 0) {
        return this->default_cudnn_handle_;
      }
      return this->cudnn_handles_[cudnn_id % this->handle_num_].get()->get();
    }
#endif // USE_CUDNN

  protected:
    int device_id_; /**< The GPU device ID */

    cudaDeviceProp device_prop_; /**< The property of the GPU device. */

    // TODO: Convolution layer-specific parameters.
    int group_; /**< The convolution operations group_. */
    cudaStream_t default_stream_; /**< The default stream. */
    // Manager of streams on all GPU devices.
    std::vector<boost::shared_ptr<CudaStream>> streams_; /**< CudaStream vector, used to manage CUDA streams. */

    unsigned int handle_num_; /**< The number of CUDA streams. */

    // Manager of cuBLAS handler on all GPU devices in all available streams.
    vector<boost::shared_ptr<CuBLASHandle>> cublas_handles_; /**< CuBLASHandle vector. */
    cublasHandle_t default_cublas_handle_; /**< The default cuBLAS handler binding to the default stream. */

#ifdef USE_CUDNN
    // Manager of cuDNN handler on all GPU devices in all available streams.
    vector<boost::shared_ptr<CuDNNHandle>> cudnn_handles_; /**< CuDNNHandle vector. */
    cudnnHandle_t default_cudnn_handle_; /**< The default cuDNN handler binding to the default stream */
#endif // USE_CUDNN

  private:
    // Default constructor.
    GpuStreamPool();

    DISABLE_COPY_MOVE_AND_ASSIGN(GpuStreamPool);
  };
} /* namespace caffe */

#endif /* __GPU_MANAGER_H__ */

#endif /* CPU_ONLY */
