#ifndef CPU_ONLY

#include <cstdlib>
#include <cstring>

#include <boost/thread.hpp>

#include "caffe/util/gpu_manager.hpp"

using std::malloc;
using std::realloc;
using std::free;

namespace caffe {
  // Make sure each thread can have different values.
  static boost::thread_specific_ptr<GpuStreamPool> tls_thread_gsp_instance_;
  
  int GetMaxRegsPerThread() {
    constexpr int MAX_REGS_PER_THREAD_300 = 63;
    constexpr int MAX_REGS_PER_THREAD_30x = 255;

    if (Caffe::device_capability(Caffe::current_device()) == 300) {
      return MAX_REGS_PER_THREAD_300;
    } else {
      return MAX_REGS_PER_THREAD_30x;
    }
  }

  int GetMaxSharedMemPerBlock() {
    constexpr int MAX_SHARED_MEM_PER_BLOCK_300_602 = 49152;
    constexpr int MAX_SHARED_MEM_PER_BLOCK_700 = 98304;
    constexpr int MAX_SHARED_MEM_PER_BLOCK_705 = 65536;

    auto device_capability = Caffe::device_capability(Caffe::current_device());
    if (device_capability >= 300 and device_capability <= 602) {
      return MAX_SHARED_MEM_PER_BLOCK_300_602;
    } else if (device_capability == 700) {
      return MAX_SHARED_MEM_PER_BLOCK_700;
    } else {
      return MAX_SHARED_MEM_PER_BLOCK_705;
    }
  }

  int GetMaxThreadBlocksPerMultiprocessor() {
    constexpr int MAX_THREAD_BLOCKS_PER_MULTIPROCESSOR_300_307_705 = 16;
    constexpr int MAX_THREAD_BLOCKS_PER_MULTIPROCESSOR_500_700 = 32;

    auto device_capability = Caffe::device_capability(Caffe::current_device());
    if ((device_capability >= 300 and device_capability <= 307) or
        device_capability == 705) {
      return MAX_THREAD_BLOCKS_PER_MULTIPROCESSOR_300_307_705;
    } else {
      return MAX_THREAD_BLOCKS_PER_MULTIPROCESSOR_500_700;
    }
  }

  int GetMaxWarpsPerMultiprocessor() {
    constexpr int MAX_WARPS_PER_MULTIPROCESSOR_300_700 = 64;
    constexpr int MAX_WARPS_PER_MULTIPROCESSOR_DEFAULT = 32;

    auto device_capability = Caffe::device_capability(Caffe::current_device());
    if (device_capability >= 300 and device_capability <= 700) {
      return MAX_WARPS_PER_MULTIPROCESSOR_300_700;
    } else {
      return MAX_WARPS_PER_MULTIPROCESSOR_DEFAULT;
    }
  }

  int GetMaxConcurrentKernels() {
    constexpr int MAX_GRIDS_PER_DEVICE_DEFAULT = 4;
    constexpr int MAX_GRIDS_PER_DEVICE_300_503 = 16;
    constexpr int MAX_GRIDS_PER_DEVICE_305_502 = 32;
    constexpr int MAX_GRIDS_PER_DEVICE_700_600 = 128;

    auto device_capability = Caffe::device_capability(Caffe::current_device());
    if ((device_capability >= 305 and device_capability <= 502) or
        device_capability == 601) {
      return MAX_GRIDS_PER_DEVICE_305_502;
    } else if (device_capability >= 700 or device_capability == 600) {
      return MAX_GRIDS_PER_DEVICE_700_600;
    } else if (device_capability == 300 or device_capability == 503 or
               device_capability == 602) {
      return MAX_GRIDS_PER_DEVICE_300_503;
    } else {
      return MAX_GRIDS_PER_DEVICE_DEFAULT;
    }
  }

  GpuStreamPool& GpuStreamPool::Get() {
    if (!tls_thread_gsp_instance_.get()) {
      tls_thread_gsp_instance_.reset(new GpuStreamPool());
    }

    return *(tls_thread_gsp_instance_.get());
  }

  void GpuStreamPool::Reset() {
    tls_thread_gsp_instance_.reset();
    return ;
  }

  void GpuStreamPool::SetPoolSize(size_t pool_size) {
    if (pool_size > MAX_GRIDS_PER_DEVICE) {
      LOG(INFO) << "Maximum concurrent streams between DEVICE " << Caffe::current_device()
                << " and HOST is " << MAX_GRIDS_PER_DEVICE << ". Device capability "
                << Caffe::device_capability(Caffe::current_device()) << ".";
      pool_size = MAX_GRIDS_PER_DEVICE;
    }

    // TODO: Should resize the corresponding vectors to the pool_size.
    if (pool_size <= 1 or this->handle_num_ == pool_size) {
      return ;
    } else if (this->handle_num_ > pool_size) {
      for (auto i = pool_size; i < this->handle_num_; ++ i) {
        this->cublas_handles_[i].reset();
#ifdef USE_CUDNN
        this->cudnn_handles_[i].reset();
#endif // USE_CUDNN
        this->streams_[i].reset();
      }

      this->streams_.resize(pool_size);
      this->cublas_handles_.resize(pool_size);
#ifdef USE_CUDNN
      this->cudnn_handles_.resize(pool_size);
#endif // USE_CUDNN
    } else {
      auto original_size = this->streams_.size();

      // Resize the size of cudaStream_t, cublasHandle_t and cudnnHandle_t vectors.
      this->streams_.resize(pool_size);
      this->cublas_handles_.resize(pool_size);
#ifdef USE_CUDNN
      this->cudnn_handles_.resize(pool_size);
#endif /* USE_CUDNN */

      for (auto i = 0; i < (pool_size - original_size); ++ i) {
        this->streams_[original_size + i] = CudaStream::create();
        this->cublas_handles_[original_size + i] = boost::make_shared<CuBLASHandle>(this->streams_[original_size + i]);
#ifdef USE_CUDNN
        this->cudnn_handles_[original_size + i] = boost::make_shared<CuDNNHandle>(this->streams_[original_size + i]);
#endif /* USE_CUDNN */
      }
    }

    this->handle_num_ = pool_size;
  }

  void GpuStreamPool::SetDevice(const int device_id) {
    if (device_id == this->device_id_) {
      return ;
    } else {
      CUBLAS_CHECK(cublasDestroy(default_cublas_handle_));
#ifdef USE_CUDNN
      CUDNN_CHECK(cudnnDestroy(default_cudnn_handle_));
#endif // USE_CUDNNa

      if (this->handle_num_ != 0) {
        this->cublas_handles_.clear();
#ifdef USE_CUDNN
        this->cudnn_handles_.clear();
#endif // USE_CUDNN
        this->streams_.clear();
      }
    }

    this->device_id_ = device_id;
    CUDA_CHECK(cudaSetDevice(device_id));
    CUDA_CHECK(cudaGetDeviceProperties(&this->device_prop_, this->device_id_));
    this->handle_num_ = this->group_ = 0;

    CUBLAS_CHECK(cublasCreate(&this->default_cublas_handle_));
    CUBLAS_CHECK(cublasSetStream(this->default_cublas_handle_, 0));
#ifdef USE_CUDNN
    CUDNN_CHECK(cudnnCreate(&this->default_cudnn_handle_));
    CUDNN_CHECK(cudnnSetStream(this->default_cudnn_handle_, 0));
#endif // USE_CUDNN
  }

  GpuStreamPool::GpuStreamPool(): group_(0), handle_num_(0) {
    CUDA_CHECK(cudaGetDevice(&this->device_id_));
    CUDA_CHECK(cudaGetDeviceProperties(&this->device_prop_, this->device_id_));

    // Create the default stream.
    this->default_stream_ = static_cast<cudaStream_t>(0);

    // Create default cuBLAS handle and cuDNN handle.
    CUBLAS_CHECK(cublasCreate(&default_cublas_handle_));
    CUBLAS_CHECK(cublasSetStream(default_cublas_handle_, 0));
#ifdef USE_CUDNN
    CUDNN_CHECK(cudnnCreate(&default_cudnn_handle_));
    CUDNN_CHECK(cudnnSetStream(default_cudnn_handle_, 0));
#endif // USE_CUDNN
  }

  GpuStreamPool::~GpuStreamPool() {
    CUDA_CHECK(cudaSetDevice(this->device_id_));

    this->cublas_handles_.clear();
#ifdef USE_CUDNN
    this->cudnn_handles_.clear();
#endif // USE_CUDNN
    this->streams_.clear();

    CUBLAS_CHECK(cublasDestroy(default_cublas_handle_));
#ifdef USE_CUDNN
    CUDNN_CHECK(cudnnDestroy(default_cudnn_handle_));
#endif // USE_CUDNN

    this->handle_num_ = 0;
  }
} /* namespace caffe */

#endif /* CPU_ONLY */
