#include <torch/extension.h>
#include <ATen/cuda/CUDAContext.h>
#include <ATen/ATen.h>
#include <vector>
#include <memory>
#include "common.h"
#include "descriptor.h"

cudnnHandle_t getcudnnhandle();

struct convolutionparams {
  cudnnDataType_t dataType;
  int input_shape[4]; //4D 
  int weight_shape[4]; //4D
  int padding[2];
  int stride[2];
  int dilation[2];
};

union Constant {
  float f;
  double d;
  Constant(cudnnDataType_t dataType, double value){
    if (dataType == CUDNN_DATA_HALF || dataType == CUDNN_DATA_FLOAT) {
      f = static_cast<float>(value);
    } else {
      d = value;
    }
  }
};

struct convolutionargs {
  convolutionparams params;
  cudnnHandle_t handle;
  TensorDescriptor xdesc, ydesc;
  FilterDescriptor wdesc;
  ConvolutionDescriptor cdesc;
  const at::Tensor &input, output, weight;

  convolutionargs(const at::Tensor& input,
                  const at::Tensor& output,
                  const at::Tensor& weight)
      : input(input), output(output), weight(weight) {}
};

at::Tensor conv_forward(const at::Tensor& input,
                        const at::Tensor& weight,
                        std::vector<int> padding, 
                        std::vector<int> stride,
                        std::vector<int> dilation);

void setconvolutionparams(convolutionparams* params,
                          const at::Tensor& input,
                          const at::Tensor& weight,
                          std::vector<int> padding, 
                          std::vector<int> stride,
                          std::vector<int> dilation);

struct workspace {
  workspace(size_t size) : size(size), data(NULL) {
    CUDA_CHECK(cudaMalloc((void **) &data, size));
  }
  workspace(const workspace&) = delete;
  workspace(workspace&&) = default;
  workspace& operator=(workspace&&) = default;
  ~workspace() {
    if (data) {
      CUDA_CHECK(cudaFree(data));
    }
  }
  size_t size;
  void* data;
};

template<typename perf_t>
std::vector<perf_t> getvalidalgorithms(perf_t* perfresults, int n_algo) {
  std::vector<perf_t> result;
  result.reserve(n_algo);
  for (int i = 0; i != n_algo; ++i) {
    perf_t perf = perfresults[i];
    if (perf.status == CUDNN_STATUS_SUCCESS) {
      result.push_back(perf);
    }
  }
  if (result.size() <= 0) throw "no valid  algorithms available in cudnn";
  return result;
}

template<typename perf_t>
struct algorithm_search {
};


template<>
struct algorithm_search<cudnnConvolutionFwdAlgoPerf_t> {
  using perf_t = cudnnConvolutionFwdAlgoPerf_t;
  using algo_t = cudnnConvolutionFwdAlgo_t;
  
  static constexpr auto default_algo = CUDNN_CONVOLUTION_FWD_ALGO_IMPLICIT_PRECOMP_GEMM;
  std::vector<perf_t> findAlgorithms(const convolutionargs &args) {
    int num_algos = CUDNN_CONVOLUTION_FWD_ALGO_COUNT;
    std::unique_ptr<perf_t[]> perf_results(new perf_t[num_algos]);
    int perf_count;
    checkCUDNN(
        cudnnGetConvolutionForwardAlgorithm_v7(
            args.handle,
            args.xdesc.desc(),
            args.wdesc.desc(),
            args.cdesc.desc(),
            args.ydesc.desc(),
            num_algos,
            &perf_count,
            perf_results.get())
    );
    return getvalidalgorithms<perf_t>(perf_results.get(), perf_count);
  }
};


template<>
struct algorithm_search<cudnnConvolutionBwdDataAlgoPerf_t>
{
  using perf_t = cudnnConvolutionBwdDataAlgoPerf_t;
  using algo_t = cudnnConvolutionBwdDataAlgo_t;
  
  std::vector<perf_t> findAlgorithms(const convolutionargs& args) {
    int num_algos = CUDNN_CONVOLUTION_BWD_DATA_ALGO_COUNT;
    std::unique_ptr<perf_t[]> perf_results(new perf_t[num_algos]);
    int perf_count;
    checkCUDNN(
        cudnnGetConvolutionBackwardDataAlgorithm_v7(
            args.handle,
            args.wdesc.desc(),
            args.ydesc.desc(),
            args.cdesc.desc(),
            args.xdesc.desc(),
            num_algos,
            &perf_count,
            perf_results.get())
    );
    return getvalidalgorithms<perf_t>(perf_results.get(), perf_count);
  }
};


template<>
struct algorithm_search<cudnnConvolutionBwdFilterAlgoPerf_t>
{
  using perf_t = cudnnConvolutionBwdFilterAlgoPerf_t;
  using algo_t = cudnnConvolutionBwdFilterAlgo_t;
  
  std::vector<perf_t> findAlgorithms(const convolutionargs& args) {
    int num_algos = CUDNN_CONVOLUTION_BWD_FILTER_ALGO_COUNT;
    std::unique_ptr<perf_t[]> perf_results(new perf_t[num_algos]);
    int perf_count;
    checkCUDNN(
        cudnnGetConvolutionBackwardFilterAlgorithm_v7(
            args.handle,
            args.xdesc.desc(),
            args.ydesc.desc(),
            args.cdesc.desc(),
            args.wdesc.desc(),
            num_algos,
            &perf_count,
            perf_results.get())
    );
    return getvalidalgorithms<perf_t>(perf_results.get(), perf_count);
  }
};

void setconvolutionparams(convolutionparams* params,
                          const at::Tensor& input,
                          const at::Tensor& weight,
                          std::vector<int> padding, 
                          std::vector<int> stride,
                          std::vector<int> dilation) {
  params -> dataType = getcudnndatatype(input);
  for (decltype(input.dim()) i = 0; i != input.dim(); ++i) {
    params -> input_shape[i] = input.size(i);
    params -> weight_shape[i] = weight.size(i);
  }
  for (decltype(padding.size()) i = 0; i != padding.size(); ++i) {
    params -> padding[i] = padding[i];
    params -> stride[i] = stride[i];
    params -> dilation[i] = dilation[i];
  }
}

std::vector<int64_t> con_output_size(const at::Tensor& input,
                                     const at::Tensor& weight,
                                     std::vector<int> padding, 
                                     std::vector<int> stride,
                                     std::vector<int> dilation) {
  AT_ASSERT(input.dim()>2);
  bool has_dilation = dilation.size()>0;
  auto dim = input.dim();
  std::vector<int64_t> output_size(dim);
  output_size[0] = input.size(0);
  output_size[1] = weight.size(0);
  for (decltype(dim) d = 2; d < dim; ++d) {
      auto dilation_ = has_dilation ? dilation[d-2] : 1;
      auto kernel = dilation_ * (weight.size(d) - 1) + 1;
      output_size[d] = (input.size(d) + 2 * padding[d-2] - kernel) / stride[d-2] + 1;
  }
  return output_size;
}

cudnnHandle_t getcudnnhandle() {
  using handletype = handlepool<cudnnHandle_t, cudnnCreate, cudnnDestroy>;
  static auto pool = std::make_shared<handletype>();
  thread_local std::unique_ptr<handletype::poolwindow> handlepool_ptr(pool -> newhandlewindow());
  int device;
  CUDA_CHECK(cudaGetDevice(&device));
  auto handle = handlepool_ptr->get(device);
  checkCUDNN(cudnnSetStream(handle, at::cuda::getCurrentCUDAStream()));
  return handle;
}

at::Tensor conv_forward(const at::Tensor& input,
                        const at::Tensor& weight,
                        std::vector<int> padding, 
                        std::vector<int> stride,
                        std::vector<int> dilation) {
  auto output = at::empty(con_output_size(input, weight, padding, stride, dilation),
                          input.options());
  auto datatype = getcudnndatatype(input);
  convolutionargs args {input, output, weight};
  setconvolutionparams(&args.params, input, weight, padding, stride, dilation);
  args.handle = getcudnnhandle();
  args.xdesc.set(input);
  args.wdesc.set(weight);
  args.ydesc.set(output);
  args.cdesc.set(input.dim()-2,
                 args.params.padding,
                 args.params.stride,
                 args.params.dilation,
                 datatype);
  algorithm_search<cudnnConvolutionFwdAlgoPerf_t> algo_search;
  auto perf_results = algo_search.findAlgorithms(args);
  workspace Ws(perf_results[0].memory);
  Constant one(datatype, 1);
  Constant zero(datatype, 0);
  checkCUDNN(
      cudnnConvolutionForward(
          args.handle,
          &one,
          args.xdesc.desc(),
          input.data_ptr(),
          args.wdesc.desc(),
          weight.data_ptr(),
          args.cdesc.desc(),
          perf_results[0].algo,
          Ws.data,
          perf_results[0].memory,
          &zero,
          args.ydesc.desc(),
          output.data_ptr())
  );
  return output;
}

at::Tensor conv_backward_input(std::vector<int> input_size,
                               const at::Tensor& grad_output,
                               const at::Tensor& weight,
                               std::vector<int> padding,
                               std::vector<int> stride,
                               std::vector<int> dilation) {
  auto grad_input = at::empty(input_size,grad_output.options());
  auto datatype = getcudnndatatype(grad_output);
  convolutionargs args {grad_input, grad_output, weight};
  setconvolutionparams(&args.params, grad_input, weight, padding, stride, dilation);
  args.handle = getcudnnhandle();
  args.xdesc.set(grad_input);
  args.wdesc.set(weight);
  args.ydesc.set(grad_output);
  args.cdesc.set(input.dim()-2,
                 args.params.padding,
                 args.params.stride,
                 args.params.dilation,
                 datatype);
  algorithm_search<cudnnConvolutionBwdDataAlgoPerf_t> algo_search;
  auto perf_results = algo_search.findAlgorithms(args);
  workspace Ws(perf_results[0].memory);
  
  Constant one(datatype, 1);
  Constant zero(datatype, 0);
  checkCUDNN(
      cudnnConvolutionBackwardData(
          args.handle,
          &one,
          args.wdesc.desc(), 
          weight.data_ptr(),
          args.ydesc.desc(),
          grad_output.data_ptr(),
          args.cdesc.desc(),
          perf_results[0].algo,
          Ws.data,
          perf_results[0].memory,
          &zero,
          args.xdesc.desc(),
          grad_input.data_ptr())
  );
  return grad_input;
}

at::Tensor conv_backward_filter(std::vector<int> weight_size,
                                const at::Tensor& grad_output,
                                const at::Tensor& input, 
                                std::vector<int> padding, 
                                std::vector<int> stride,
                                std::vector<int> dilation,
                                ) {
  auto grad_weight = at::empty(weight_size,grad_output.options());
  auto datatype = getcudnndatatype(grad_output);
  convolutionargs args {input, grad_output, grad_weight};
  setconvolutionparams(&args.params,input, grad_weight, padding, stride, dilation);
  args.handle = getcudnnhandle();
  args.xdesc.set(input);
  args.wdesc.set(grad_weight);
  args.ydesc.set(grad_output);
  args.cdesc.set(input.dim()-2,
                 args.params.padding,
                 args.params.stride,
                 args.params.dilation,
                 datatype);
  algorithm_search<cudnnConvolutionBwdFilterAlgoPerf_t> algo_search;
  auto perf_results = algo_search.findAlgorithms(args);
  workspace Ws(perf_results[0].memory);
  
  Constant one(datatype, 1);
  Constant zero(datatype, 0);
  checkCUDNN(
      cudnnConvolutionBackwardFilter(
          args.handle, &one,
          args.xdesc.desc(),
          input.data_ptr(),
          args.ydesc.desc(),
          grad_output.data_ptr(),
          args.cdesc.desc(),
          perf_results[0].algo,
          Ws.data,
          perf_results[0].memory,
          &zero,
          args.wdesc.desc(),
          grad_weight.data_ptr())
  );
  return grad_weight;
}

std::vector<at::Tensor> cudnn_batchnorm_training(const at::Tensor& input, 
                                                 const at::Tensor& scale,
                                                 const at::Tensor& bias,
                                                 const at::Tensor& running_mean,
                                                 const at::Tensor& running_var,
                                                 double exponential_average,
                                                 double epsilon,
                                                 bool training=true) {
  auto handle = getcudnnhandle();
  auto datatype = getcudnndatatype(input);
  cudnnBatchNormMode_t batchnormmode = CUDNN_BATCHNORM_SPATIAL;
  TensorDescriptor xdesc, ydesc, wdesc;

  auto output = at::empty_like(input,input.options());
  auto save_mean = at::empty({input.size(1)}, input.options());
  auto save_var = at::empty({input.size(1)}, input.options());

  xdesc.set(input);
  ydesc.set(output);
  wdesc.set(scale);

  Constant one(datatype, 1);
  Constant zero(datatype, 0);

  checkCUDNN(
      cudnnBatchNormalizationForwardTraining(
           handle,
           batchnormmode,
           &one,
           &zero, 
           xdesc.desc(),
           input.data_ptr(),
           ydesc.desc(),
           output.data_ptr(),
           wdesc,
           scale.data_ptr(),
           bias.data_ptr(),
           exponential_average,
           running_mean.data_ptr(),
           runing_var.data_ptr(),
           epsilon,
           save_mean.data_ptr(),
           save_var.data_ptr())
  );
  return {output, save_mean, save_var};
}

/*
std::vector<at::Tensor> cudnn_batchnorm_backward(const at::Tensor& grad_output,
                                                 const at::Tensor& input,
                                                 const at::Tensor& weight,
                                                 const at::Tensor& running_mean,
                                                 const at::Tensor& running_var,
                                                 const at::Tensor& save_mean,
                                                 const at::Tensor& save_var,
                                                 double epsilon) {
  auto handle = cudnngethandle();
  auto datatype = getcudnndatatype();
  cudnnBatchNormMode_t batchnormmode = CUDNN_BATCHNORM_SPATIAL;
  cudnnTensorDesctiptor_t bnscalebiasmeanvardesc;

  TensorDescriptor xdesc, ydesc, wdesc;

  auto grad_input = at::empty(input.size(), input.options());
  auto grad_weight = at::empty(weight.size(), weight.options());
  auto grad_bias = at::empty(weight.size(), weight.options());

  xdesc.set(input);
  ydesc.set(grad_output);
  wdesc.set(weight);

  Constant one(dataType, 1);
  Constant zero(dataType, 0);

  checkCUDNN(cudnnBatchNormalizedBackward(handle,batchnorm));

}
*/

PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) {
  m.def("convforward", &conv_forward, "conv forward (cudnn)");
  m.def("convbackwardinput", &conv_backward_input, "conv backward input (cudnn)");
  m.def("convbackwardfilter", &conv_backward_filter, "conv backward filter (cudnn)")
}





