#ifndef DESCRIPTOR_H
#define DESCRIPTOR_H

#include <torch/extension.h>
#include <ATen/ATen.h>
#include <vector>
#include <unordered_map>
#include <mutex>
#include "common.h"

cudnnDataType_t getcudnndatatype(const at::Tensor& tensor);

cudnnDataType_t getcudnndatatype(const at::Tensor& tensor) {
  if (tensor.scalar_type() == at::kFloat) {
      return CUDNN_DATA_FLOAT;
  } else if (tensor.scalar_type() == at::kDouble) {
      return CUDNN_DATA_DOUBLE;
  }
}


template <typename T, cudnnStatus_t (*dtor)(T)>
struct Deleter {
  void operator()(T* x) {
    if (x != nullptr) {
      checkCUDNN(dtor(*x));
    }
  }
};


template <typename T, cudnnStatus_t (*ctor)(T*), cudnnStatus_t (*dtor)(T)>
class base {
  public:
    base() = default;
    T desc() const {return *desc_.get();}
    T mut_desc() {init(); return *desc_.get();}
  protected:
    void init(){
      if (desc_ == nullptr) {
        T* raw_desc = new T;
        checkCUDNN(ctor(raw_desc));
        desc_.reset(raw_desc);
      }
    }
  private:
    std::unique_ptr<T, Deleter<T, dtor>> desc_;
};


template <typename Handle_t,
          cudnnStatus_t (*ctor)(Handle_t*),
          cudnnStatus_t (*dtor)(Handle_t)>
struct handlepool : std::enable_from_this<handlepool<Handle_t, ctor, dtor>> {
  struct Handle{
    Handle_t handle;
    Handle() = default;
    Handle(bool need) : handle(nullptr) {
      if (need) checkCUDNN(ctor(&handle));
    }
    Handle(const Handle &rhs) = delete;//prevent copy
    Handle(Handle &&rhs) : Handle(){
      std::swap(handle, rhs.handle);
    }
    ~Handle() {if (handle) checkCUDNN(dtor(handle));}
  };

  std::mutex mutex;

  std::unordered_map<int, std::vector<Handle_t>> available_handles;
  std::unordered_map<int, std::vector<Handle>> created_handles;

  class poolwindow {
    public:
      poolwindow(std::shared_ptr<handlepool> s_ptr): w_ptr(s_ptr) {};
      ~poolwindow() { release(); }
      Handle_t get(int device) {
        if (my_handles.find(device) != my_handles.end()) {
          return my_handles[device];
        }
        auto parent = w_ptr.lock();
        if (!parent) throw "Cannot create handle";
        std::lock_guard<std::mutex> lck(parent -> mutex);//多线程
        if (parent -> available_handles.size() > 0) {
          my_handles[device] = parent -> available_handles[device].back();
          parent -> available_handles[device].pop_back();
        } else {
          parent -> created_handles[device].emplace_back(true);
          my_handles[device] = parent -> created_handles[device].back();
        }
        return my_handles[device];
      }

      void release() {
        if (my_handles.size() > 0) {
          auto parent = w_ptr.lock();
          if (!parent) return;
          std::lock_guard<std::mutex> lck(parent -> mutex);
          for (auto &[d, h] : my_handles) {
            parent -> available_handles[d].push_back(h);
          }
        }
      }

    private:
      std::weak_ptr<handlepool> w_ptr;
      unordered_map<int ,Handle_t> my_handles;
  }
  poolwindow* newpoolwindow() {
    return new poolwindow(this -> shared_from_ths());
  }
};


template <typename Handle_t,
          cudnnStatus_t (*ctor)(Handle_t*),
          cudnnStatus_t (*dtor)(Handle_t)>
Handle_t handlepool<Handle_t, ctor, dtor>::get(int device) {
  if (myhandles.find(device) != myhandles.end()) {
    return myhandles[device];
  } else {
    create_Handle[device].emplace_back(true /* create a Handle*/);
    myhandles[device] = create_Handle[device].back().handle;
  }
  return myhandles[device];
}

class TensorDescriptor
  : public base<cudnnTensorDescriptor_t,
                cudnnCreateTensorDescriptor,
                cudnnDestroyTensorDescriptor> {
public:
  TensorDescriptor() = default;
  void set(const at::Tensor& tensor);
};

void TensorDescriptor::set(const at::Tensor& tensor) {
  int tensor_shape[4];
  int tensor_stride[4];
  int dim = tensor.dim();
  cudnnDataType_t datatype = getcudnndatatype(tensor);
  for (int i = 0; i < dim; ++i) {
    tensor_shape[i] = static_cast<int>(tensor.size(i));
    tensor_stride[i] = static_cast<int>(tensor.stride(i));
  }
  checkCUDNN(cudnnSetTensorNdDescriptor(mut_desc(),
                                        datatype,
                                        dim,
                                        tensor_shape,
                                        tensor_stride));
}


class FilterDescriptor
  : public base<cudnnFilterDescriptor_t,
                cudnnCreateFilterDescriptor,
                cudnnDestroyFilterDescriptor> {
  public:
    FilterDescriptor() = default;
    void set(const at::Tensor& tensor);
};

void FilterDescriptor::set(const at::Tensor& tensor) {
  cudnnDataType_t datatype = getcudnndatatype(tensor);
  int filter_shape[4];
  int dim = tensor.dim();
  for (int i = 0; i < dim; ++i){
    filter_shape[i] = tensor.size(i);
  }
  checkCUDNN(cudnnSetFilterNdDescriptor(mut_desc(),
                                        datatype,
                                        CUDNN_TENSOR_NCHW,
                                        dim,
                                        filter_shape));
}


class ConvolutionDescriptor
  : public base<cudnnConvolutionDescriptor_t,
                cudnnCreateConvolutionDescriptor,
                cudnnDestroyConvolutionDescriptor> {
  public:
    ConvolutionDescriptor() = default;
    void set(int dim_convolution, int* pad, int* stride, int* dilation, cudnnDataType_t dataType);
};

void ConvolutionDescriptor::set(int dim_convolution,
                                int* pad,
                                int* stride,
                                int* dilation,
                                cudnnDataType_t dataType) {
  checkCUDNN(cudnnSetConvolutionNdDescriptor(mut_desc(),
                                             dim_convolution,
                                             pad,
                                             stride,
                                             dilation,
                                             CUDNN_CROSS_CORRELATION,
                                             dataType));
}

#endif