#ifndef CUDNNCU
#define CUDNNCU

#include <cuda.h>
#include <cudnn.h>
#include <cstdlib>
#include <iomanip>
#include <iostream>
#include <memory>
#include <vector>
#include "utils.cuh"

#pragma once



 #define cudaCheck(f) {                                                         \
  cudaError_t status = (f);                                                    \
  if (status != cudaSuccess) {                                                 \
    std::cout << #f ": " << status << std::endl;                               \
    std::exit(1);                                                              \
  }                                                                            \
}

#define cudnnCheck(f) {                                                        \
  cudnnStatus_t status = (f);                                                 \
  if (status != CUDNN_STATUS_SUCCESS) {                                       \
    std::cout << #f ": " << status << std::endl;                              \
    throw MyException();                                                      \
  }                                                                           \
}                                                                             \


template <typename T = void>
inline std::shared_ptr<T> MemAlloc(std::size_t s){
  typedef T value_type;
  typedef T* ptr_type;
  ptr_type ptr;
  cudaCheck(cudaMalloc(&ptr, s));
  return std::shared_ptr<value_type>(ptr, [](ptr_type ptr) {
    cudaFree(ptr); });
}
/*
    Fuction to initialize the device data by host 
    similar to ' cudaMemcpy(&b , dev_b , sizeof(int) , cudaMemcpyDeviceToHost );'



*/
__global__
void InitData(double* device, double* host ,int N) {
  const int tid = blockIdx.x * blockDim.x + threadIdx.x;
  if (tid < N){
    device[tid] = host[tid];
  }
}

/*
    Function to use cudnn to calculate the convolution output value
    
    Params:
    ~ std::size_t sign : indicator which algorithm to caculate
    ~ int N_A : size of vector A 
*/
void cudnn_1d( std::size_t sign,const int N_A, const int N_B, Real* h_output_X,Real* h_output_W,Real* h_output_Y)
{

  const int xW = N_A;
  const int xH = 1;
  const int xC = 1;
  const int xN = 1;

  const int wW = N_B;
  const int wH = 1;
  const int wC = 1;
  const int wK = 1;

  const int padW = N_B - 1;
  const int padH = 0;

  const int strideW = 1;
  const int strideH = 1;

  const int dilationW = 1;
  const int dilationH = 1;

  const int xBias = 1;
  const int wBias = 1;

  // Initialization (creates context)
  cudnnHandle_t ctx;
  cudnnCheck(cudnnCreate(&ctx));

  // Input
  cudnnTensorDescriptor_t xDesc;
  cudnnCheck(cudnnCreateTensorDescriptor(&xDesc));
  cudnnCheck(cudnnSetTensor4dDescriptor(xDesc, CUDNN_TENSOR_NCHW,
    CUDNN_DATA_FLOAT, xN, xC, xH, xW));

  // Filter
  cudnnFilterDescriptor_t wDesc;
  cudnnCheck(cudnnCreateFilterDescriptor(&wDesc));
  cudnnCheck(cudnnSetFilter4dDescriptor(wDesc, CUDNN_DATA_FLOAT,
    CUDNN_TENSOR_NCHW, wK, wC, wH, wW));

  // Convolution
  cudnnConvolutionDescriptor_t cDesc;
  cudnnCheck(cudnnCreateConvolutionDescriptor(&cDesc));
  cudnnCheck(cudnnSetConvolution2dDescriptor(cDesc, padH, padW,
    strideH, strideW, dilationH, dilationW, CUDNN_CONVOLUTION,
    CUDNN_DATA_FLOAT));

  // Output
  int yN, yC, yH, yW;
  cudnnCheck(cudnnGetConvolution2dForwardOutputDim(cDesc, xDesc, wDesc,
    &yN, &yC, &yH, &yW)); 


    // yW = 24;

  std::cout<< yN << std::endl;
  std::cout<< yC << std::endl;
  std::cout<< yH << std::endl;
  std::cout<< yW << std::endl;

  cudnnTensorDescriptor_t yDesc;
  cudnnCheck(cudnnCreateTensorDescriptor(&yDesc));
  cudnnCheck(cudnnSetTensor4dDescriptor(yDesc, CUDNN_TENSOR_NCHW,
    CUDNN_DATA_FLOAT, yN, yC, yH, yW));

  // Algorithm
  cudnnConvolutionFwdAlgo_t fwdAlgo;
  cudnnConvolutionBwdDataAlgo_t bwdXAlgo;
  cudnnConvolutionBwdFilterAlgo_t bwdWAlgo;


  fwdAlgo = CUDNN_CONVOLUTION_FWD_ALGO_FFT_TILING;
  bwdXAlgo = CUDNN_CONVOLUTION_BWD_DATA_ALGO_FFT;
  bwdWAlgo = CUDNN_CONVOLUTION_BWD_FILTER_ALGO_FFT;

  // Workspace
  std::size_t fwdWSsize;
  std::size_t bwdXWSsize;
  std::size_t bwdWWSsize;
  cudnnCheck(cudnnGetConvolutionForwardWorkspaceSize(ctx, xDesc, wDesc, cDesc,
    yDesc, fwdAlgo, &fwdWSsize));
  // cudnnCheck(cudnnGetConvolutionBackwardDataWorkspaceSize(ctx, wDesc, yDesc,
  //   cDesc, xDesc, bwdXAlgo, &bwdXWSsize));
  // cudnnCheck(cudnnGetConvolutionBackwardFilterWorkspaceSize(ctx, xDesc, yDesc,
  //   cDesc, wDesc, bwdWAlgo, &bwdWWSsize));

  typedef float value_type;
  std::size_t value_size = sizeof(value_type);
  auto X = MemAlloc<value_type>(xN * xC * xH * xW * value_size);
  auto W = MemAlloc<value_type>(wK * wC * wH * wW * value_size);
  auto Y = MemAlloc<value_type>(yN * yC * yH * yW * value_size);


  auto fwdWS = MemAlloc(fwdWSsize);


  // InitData<<<xW * xH, xN * xC>>>(X.get(), h_output_X, N_A);
  // InitData<<<wW * wH, wK * wC>>>(W.get(), h_output_W, N_B);



  cudaMemcpy( X.get(), h_output_X, xN * xC * xH * xW * value_size, cudaMemcpyHostToDevice);
  cudaMemcpy( W.get(), h_output_W, wK * wC * wH * wW * value_size, cudaMemcpyHostToDevice);


  value_type alpha = 1.f;
  value_type beta = 0.f;
  cudnnCheck(cudnnConvolutionForward(ctx, &alpha, xDesc, X.get(), wDesc,
    W.get(), cDesc, fwdAlgo, fwdWS.get(), fwdWSsize, &beta, yDesc, Y.get()));




  float* tmp_h_output_X = new float[xN * xC * xH * xW * value_size];
  float* tmp_h_output_W = new float[wK * wC * wH * wW * value_size];

  cudaMemcpy(tmp_h_output_X, X.get(), xN * xC * xH * xW * value_size, cudaMemcpyDeviceToHost);
  cudaMemcpy(tmp_h_output_W, W.get(), wK * wC * wH * wW * value_size, cudaMemcpyDeviceToHost);



  cudaMemcpy(h_output_Y, Y.get(), yN * yC * yH * yW * value_size, cudaMemcpyDeviceToHost);

  // std::cout<<"copy from device successfull !!" <<std::endl;

 for (int i = 0 ; i < xN * xC * xH * xW ; ++i){
  std::cout<<"element in X is : "<< tmp_h_output_X[i] <<std::endl;
 }
 for (int i = 0 ; i < wK * wC * wH * wW ; ++i){
  std::cout<<"element in W is : "<< tmp_h_output_W[i] <<std::endl;
 }
 for (int i = 0 ; i < yN * yC * yH * yW ; ++i){
  std::cout<<"element in Y is : "<< h_output_Y[i] <<std::endl;
 }


  cudnnCheck(cudnnDestroyTensorDescriptor(xDesc));
  cudnnCheck(cudnnDestroyTensorDescriptor(yDesc));
  cudnnCheck(cudnnDestroyFilterDescriptor(wDesc));
  cudnnCheck(cudnnDestroyConvolutionDescriptor(cDesc));
  cudnnCheck(cudnnDestroy(ctx));

}


/*
    int datatype : 1: float 2:double
    int dataformat :1: NCHW  2: NCDHW
    int fft_algorithm :1: FFT  2: FFT_TILING

*/
void cudnn_forward(void* input_X, int in_n, int in_c, int in_d, int in_h, int in_w,
                  void* input_W, int k_n, int k_c, int k_d, int k_h, int k_w,
                  int p_d ,int p_h, int p_w,
                  int s_h, int s_w,
                  int d_h, int d_w,
                  void* output_Y,int datatype, int dataformat,int fft_algorithm)
{
      
    const int xW = in_w;
    const int xH = in_h;
    const int xC = in_c;
    const int xN = in_n;
    const int xD = in_d;
  
    const int wW = k_w;
    const int wH = k_h;
    const int wC = k_c;
    const int wK = k_n;
    const int wD = k_d;
  
    const int padW = p_w;
    const int padH = p_h;
  
    const int strideW = s_w;
    const int strideH = s_h;
  
    const int dilationW = d_w;
    const int dilationH = d_h;
  
    const int xBias = 1;
    const int wBias = 1;

    cudnnTensorFormat_t format_t = dataformat==1 ? CUDNN_TENSOR_NCHW : CUDNN_TENSOR_NCHW;

    cudnnDataType_t datatype_t = datatype == 1 ? CUDNN_DATA_FLOAT : CUDNN_DATA_DOUBLE;

    // Initialization (creates context)
    cudnnHandle_t ctx;
    cudnnCheck(cudnnCreate(&ctx));


  // std::cout <<"xW "<< xW << std::endl;
  // std::cout <<"xH "<< xH << std::endl;
  // std::cout <<"xC "<< xC << std::endl;
  // std::cout <<"xN "<< xN << std::endl;
  // std::cout <<"xD "<< xD << std::endl;

  // std::cout <<"wW "<< wW << std::endl;
  // std::cout <<"wH "<< wH << std::endl;
  // std::cout <<"wC "<< wC << std::endl;
  // std::cout <<"wK "<< wK << std::endl;
  // std::cout <<"wD "<< wD << std::endl;
 

  

    // Input
  cudnnTensorDescriptor_t xDesc;
  cudnnCheck(cudnnCreateTensorDescriptor(&xDesc));
  if (dataformat == 1){
    cudnnCheck(cudnnSetTensor4dDescriptor(xDesc, format_t,
      datatype_t, xN, xC, xH, xW));
  } else {
    int dimA[5] = {xN,xC,xD,xH,xW};
    int strideA[5] = {xC*xD*xH*xW,xD*xH*xW,xH*xW,xW,1};
    cudnnCheck(cudnnSetTensorNdDescriptor(xDesc,datatype_t,5,dimA,strideA));
  }

  // std::cout <<"cudnnCreateTensorDescriptor  input" << std::endl;
    // Filter
    cudnnFilterDescriptor_t wDesc;
    cudnnCheck(cudnnCreateFilterDescriptor(&wDesc));
    if (dataformat == 1){
    cudnnCheck(cudnnSetFilter4dDescriptor(wDesc,datatype_t,format_t, wK, wC, wH, wW));
    } else {
      int filterDimA[5] = {wK,wC,wD,wH,wW};
      cudnnCheck(cudnnSetFilterNdDescriptor(wDesc,datatype_t,format_t,5,filterDimA));
    }

    // std::cout <<"cudnnCreateTensorDescriptor  filter" << std::endl;

  // Convolution
  cudnnConvolutionDescriptor_t cDesc;
  cudnnCheck(cudnnCreateConvolutionDescriptor(&cDesc));

  if (dataformat == 1){
  cudnnCheck(cudnnSetConvolution2dDescriptor(cDesc, padH, padW,
    strideH, strideW, dilationH, dilationW, CUDNN_CROSS_CORRELATION,
    datatype_t));
  } else {  // CUDNN_CROSS_CORRELATION   CUDNN_CONVOLUTION
   
    int padA[3] = {p_d,p_h,p_w};
    int filterStrideA[3] = {1,1,1};
    int dilationA[3] = {1,dilationH,dilationW};
    // cudnnConvolutionMode_t * convolutionMode_t ;
    cudnnCheck(cudnnSetConvolutionNdDescriptor(cDesc,3,padA,filterStrideA,dilationA,CUDNN_CROSS_CORRELATION,datatype_t));
  }
  
  // std::cout <<"cudnnCreateTensorDescriptor  convolution" << std::endl;

  // Output
  int yN, yC, yH, yW, yD;
  int tensorOuputDimA[5] = {};
  if (dataformat == 1){
  cudnnCheck(cudnnGetConvolution2dForwardOutputDim(cDesc, xDesc, wDesc,
    &yN, &yC, &yH, &yW)); 
    yD = 1;
  } else {
    cudnnCheck(cudnnGetConvolutionNdForwardOutputDim(cDesc, xDesc, wDesc,
      5,tensorOuputDimA));
      for (int i = 0 ; i < 5 ; i++){
        std::cout <<tensorOuputDimA[i] << std::endl;
      }
      yN = tensorOuputDimA[0]; 
      yC = tensorOuputDimA[1];  
      yD = tensorOuputDimA[2];
      yH = tensorOuputDimA[3];
      yW = tensorOuputDimA[4];
  }

  // std::cout <<"cudnnCreateTensorDescriptor  output dim" << std::endl;

  


   

  // std::cout <<"yN "<< yN << std::endl;
  // std::cout <<"yC "<< yC << std::endl;
  // std::cout <<"yH "<< yH << std::endl;
  // std::cout <<"yW "<< yW << std::endl;
  // std::cout <<"yD "<< yD << std::endl;

  // output
  cudnnTensorDescriptor_t yDesc;
  cudnnCheck(cudnnCreateTensorDescriptor(&yDesc));
  if (dataformat == 1){
    cudnnCheck(cudnnSetTensor4dDescriptor(yDesc, format_t,
      datatype_t, yN, yC, yH, yW));
  } else {
    int strideA[5] = {yC*yD*yH*yW,yD*yH*yW,yH*yW,yW,1};
    cudnnCheck(cudnnSetTensorNdDescriptor(yDesc,datatype_t,5,tensorOuputDimA,strideA));
  }


  // std::cout <<"cudnnCreateTensorDescriptor  output" << std::endl;

  // Algorithm
  cudnnConvolutionFwdAlgo_t fwdAlgo;
  cudnnConvolutionBwdDataAlgo_t bwdXAlgo;
  cudnnConvolutionBwdFilterAlgo_t bwdWAlgo;

  bwdXAlgo = CUDNN_CONVOLUTION_BWD_DATA_ALGO_FFT;
  bwdWAlgo = CUDNN_CONVOLUTION_BWD_FILTER_ALGO_FFT;

  if (fft_algorithm == 1){
    fwdAlgo = CUDNN_CONVOLUTION_FWD_ALGO_FFT;
  }  else {
    fwdAlgo = CUDNN_CONVOLUTION_FWD_ALGO_FFT_TILING;
  }
  

  // Workspace
  std::size_t fwdWSsize;
  std::size_t bwdXWSsize;
  std::size_t bwdWWSsize;
  cudnnCheck(cudnnGetConvolutionForwardWorkspaceSize(ctx, xDesc, wDesc, cDesc,
    yDesc, fwdAlgo, &fwdWSsize));


    // std::cout <<"cudnnGetConvolutionForwardWorkspaceSize "<< fwdWSsize << std::endl;

  typedef float value_type;
  std::size_t value_size = sizeof(value_type);
  auto X = MemAlloc<value_type>(xN * xC * xD * xH * xW * value_size);
  auto W = MemAlloc<value_type>(wK * wC * wD * wH * wW * value_size);
  auto Y = MemAlloc<value_type>(yN * yC * yD * yH * yW * value_size);


  auto fwdWS = MemAlloc(fwdWSsize);



  cudaMemcpy( X.get(), input_X, xN * xC * xD * xH * xW * value_size, cudaMemcpyHostToDevice);
  cudaMemcpy( W.get(), input_W, wK * wC * wD * wH * wW * value_size, cudaMemcpyHostToDevice);


  float* input_X_float  = (float*)input_X;
  
  // std::cout  << "input_X_float" << std::endl; 
  // for (int i = 0 ; i <  xN * xC * xD *xH * xW; i++ ){
  //   // std::cout << input_X_float[i] << "," ; 
  // }
  // std::cout  << std::endl; 
  
  float* input_W_float  = (float*)input_W;
  // std::cout  << "input_W_float" << std::endl; 
  // for (int i = 0 ; i <  wK * wC * wD *wH * wW; i++ ){
  //   // std::cout << input_W_float[i] << "," ; 
  // }
  // std::cout  << std::endl; 

  value_type alpha = 1.f;
  value_type beta = 0.f;

  clock_t startTime,endTime;
  startTime = clock();
  cudnnCheck(cudnnConvolutionForward(ctx, &alpha, xDesc, X.get(), wDesc,
    W.get(), cDesc, fwdAlgo, fwdWS.get(), fwdWSsize, &beta, yDesc, Y.get()));
  endTime = clock();
  cout << "The cudnnConvolutionForward run time is: " <<(double) 1000 * (endTime - startTime) / CLOCKS_PER_SEC << "ms" << endl;



  // std::cout <<"cudnnConvolutionForward " << std::endl;  

  cudaMemcpy(output_Y, Y.get(), yN * yC * yD * yH * yW * value_size, cudaMemcpyDeviceToHost);

//   float* output_Y_float = (float*)output_Y;
// for (int l = 0 ; l < yN ;l++){  
//   for (int k = 0 ; k < yC ; k++){
//     for (int i = 0 ; i < yH  ; i ++ ){
//       for (int j = 0 ; j < yW ; j ++ ){
//         std::cout << output_Y_float[l * yC * yH * yW + k * yH * yW + i * yW + j] << "," ; 
//       }
//       std::cout  << std::endl; 
//    }
//       std::cout  << std::endl; 
//  }
//       std::cout  << std::endl; 
// }
//       std::cout  << std::endl; 
//
 // for (int k = 0 ; k < yN * yC * yH * yW ; k++){

  //   std::cout << output_Y_float[k] << "," ;
  // }

  // for (int i = 0 ; i < yN * yC * yD * yH * yW ; i ++){
  //   std::cout << output_Y_float[i] << ",";
  // }

  // std::cout  << std::endl; 
  // std::cout <<"single output_Y " << std::endl;  


  // for (int l = 0 ; l < yN ;l++){
  //     for (int i = 0 ; i < yC ; i++){
  //         for (int d = 0 ; d < yD ; d++){
  //             for (int j = 0 ; j < yH ; j++){
  //                 for (int k = 0 ; k < yW ; k++){
  //                   // std::cout << output_Y_float[l * yC * yD * yH * yW + i * yD * yH * yW + d * yH * yW + j * yW + k ] << "," ;
  //                 }
  //                 // std::cout << std::endl;
  //               }
  //           // std::cout << std::endl;
  //         }
  //     // std::cout  << std::endl; 
  //   }
  //   // std::cout  << std::endl; 
  // }
  // std::cout <<"cudaMemcpy output_Y " << std::endl;  


  

  cudnnCheck(cudnnDestroyTensorDescriptor(xDesc));
  cudnnCheck(cudnnDestroyTensorDescriptor(yDesc));
  cudnnCheck(cudnnDestroyFilterDescriptor(wDesc));
  cudnnCheck(cudnnDestroyConvolutionDescriptor(cDesc));
  cudnnCheck(cudnnDestroy(ctx));


}

/*
    int datatype : 1: float 2:double
    int dataformat :1: NCHW  2: NCDHW
    int fft_algorithm :1: FFT  2: FFT_TILING

*/
void cudnn_backward_filter(void* input_X, int in_n, int in_c, int in_d, int in_h, int in_w,
  void* output_Y, int out_n, int out_c, int out_d, int out_h, int out_w,
  int p_h, int p_w,
  int s_h, int s_w,
  int d_h, int d_w,
  void* input_W, int k_n, int k_c, int k_d, int k_h, int k_w,
  int datatype, int dataformat,int fft_algorithm)
{

  const int xW = in_w;
  const int xH = in_h;
  const int xC = in_c;
  const int xN = in_n;
  const int xD = in_d;

  const int yW = out_w;
  const int yH = out_h;
  const int yC = out_c;
  const int yN = out_n;
  const int yD = out_d;

  const int wW = k_w;
  const int wH = k_h;
  const int wC = k_c;
  const int wK = k_n;
  const int wD = k_d;

  const int padW = p_w;
  const int padH = p_h;

  const int strideW = s_w;
  const int strideH = s_h;

  const int dilationW = d_w;
  const int dilationH = d_h;

  const int xBias = 1;
  const int wBias = 1;

  cudnnTensorFormat_t format_t = dataformat==1 ? CUDNN_TENSOR_NCHW : CUDNN_TENSOR_NCHW;

  cudnnDataType_t datatype_t = datatype == 1 ? CUDNN_DATA_FLOAT : CUDNN_DATA_DOUBLE;

  // Initialization (creates context)
  cudnnHandle_t ctx;
  cudnnCheck(cudnnCreate(&ctx));


  std::cout <<"xW "<< xW << std::endl;
  std::cout <<"xH "<< xH << std::endl;
  std::cout <<"xC "<< xC << std::endl;
  std::cout <<"xN "<< xN << std::endl;
  std::cout <<"xD "<< xD << std::endl;

  std::cout <<"yW "<< yW << std::endl;
  std::cout <<"yH "<< yH << std::endl;
  std::cout <<"yC "<< yC << std::endl;
  std::cout <<"yN "<< yN << std::endl;
  std::cout <<"yD "<< yD << std::endl;




  // Input
  cudnnTensorDescriptor_t xDesc;
  cudnnCheck(cudnnCreateTensorDescriptor(&xDesc));
  if (dataformat == 1){
  cudnnCheck(cudnnSetTensor4dDescriptor(xDesc, format_t,
  datatype_t, xN, xC, xH, xW));
  } else {
  int dimA[5] = {xN,xC,xD,xH,xW};
  int strideA[5] = {xC*xD*xH*xW,xD*xH*xW,xH*xW,xW,1};
  cudnnCheck(cudnnSetTensorNdDescriptor(xDesc,datatype_t,5,dimA,strideA));
  }
  std::cout <<"cudnnCreateTensorDescriptor  input" << std::endl;

  // Output
  cudnnTensorDescriptor_t yDesc;
  cudnnCheck(cudnnCreateTensorDescriptor(&yDesc));
  if (dataformat == 1){
  cudnnCheck(cudnnSetTensor4dDescriptor(yDesc, format_t,
  datatype_t, yN, yC, yH, yW));
  } else {
  int tensorOuputDimA[5] = {yN, yC, yD, yH, yW};
  int strideA[5] = {yC*yD*yH*yW,yD*yH*yW,yH*yW,yW,1};
  cudnnCheck(cudnnSetTensorNdDescriptor(yDesc,datatype_t,5,tensorOuputDimA,strideA));
  }
  std::cout <<"cudnnCreateTensorDescriptor  output" << std::endl;

  // Weight
  cudnnFilterDescriptor_t wDesc;
  cudnnCheck(cudnnCreateFilterDescriptor(&wDesc));
  if (dataformat == 1){
  cudnnCheck(cudnnSetFilter4dDescriptor(wDesc,datatype_t,format_t, wK, wC, wH, wW));
  } else {
  int filterDimA[5] = {wK,wC,wD,wH,wW};
  cudnnCheck(cudnnSetFilterNdDescriptor(wDesc,datatype_t,format_t,5,filterDimA));
  }
  std::cout <<"cudnnCreateTensorDescriptor  filter" << std::endl;



  // Convolution
  cudnnConvolutionDescriptor_t cDesc;
  cudnnCheck(cudnnCreateConvolutionDescriptor(&cDesc));

  if (dataformat == 1){
  cudnnCheck(cudnnSetConvolution2dDescriptor(cDesc, padH, padW,
  strideH, strideW, dilationH, dilationW, CUDNN_CONVOLUTION,
  datatype_t));
  } else {

  int padA[3] = {1,1,1};
  int filterStrideA[3] = {1,1,1};
  int dilationA[3] = {1,dilationH,dilationW};
  // cudnnConvolutionMode_t * convolutionMode_t ;
  cudnnCheck(cudnnSetConvolutionNdDescriptor(cDesc,3,padA,filterStrideA,dilationA,CUDNN_CROSS_CORRELATION,datatype_t));
  }

  std::cout <<"cudnnCreateTensorDescriptor  convolution" << std::endl;



  // Algorithm
  cudnnConvolutionFwdAlgo_t fwdAlgo;
  cudnnConvolutionBwdDataAlgo_t bwdXAlgo;
  cudnnConvolutionBwdFilterAlgo_t bwdWAlgo;

  bwdWAlgo = CUDNN_CONVOLUTION_BWD_FILTER_ALGO_FFT;

  if (fft_algorithm == 1){
  fwdAlgo = CUDNN_CONVOLUTION_FWD_ALGO_FFT;
  // bwdWAlgo = CUDNN_CONVOLUTION_BWD_FILTER_ALGO_FFT;
  bwdWAlgo = CUDNN_CONVOLUTION_BWD_FILTER_ALGO_1;
  }  else {
  fwdAlgo = CUDNN_CONVOLUTION_FWD_ALGO_FFT_TILING;
  bwdWAlgo = CUDNN_CONVOLUTION_BWD_FILTER_ALGO_FFT_TILING;
  }





  // Workspace
  std::size_t fwdWSsize;
  std::size_t bwdXWSsize;
  std::size_t bwdWWSsize;
  cudnnCheck(cudnnGetConvolutionBackwardFilterWorkspaceSize(ctx, xDesc, yDesc, cDesc, wDesc
  , bwdWAlgo, &bwdWWSsize));
  std::cout <<"cudnnGetConvolutionBackwardFilterWorkspaceSize "<< bwdWWSsize << std::endl;


  typedef float value_type;
  std::size_t value_size = sizeof(value_type);
  auto X = MemAlloc<value_type>(xN * xC * xD * xH * xW * value_size);
  auto W = MemAlloc<value_type>(wK * wC * wD * wH * wW * value_size);
  auto Y = MemAlloc<value_type>(yN * yC * yD * yH * yW * value_size);

  auto bwdWWS = MemAlloc(bwdWWSsize);

  cudaMemcpy( X.get(), input_X, xN * xC * xD * xH * xW * value_size, cudaMemcpyHostToDevice);
  cudaMemcpy( Y.get(), output_Y, yN * yC * yD * yH * yW * value_size, cudaMemcpyHostToDevice);


  // float* input_X_float  = (float*)input_X;
  // std::cout  << "input_X_float" << std::endl; 
  // for (int i = 0 ; i <  xN * xC * xD *xH * xW; i++ ){
  // std::cout << input_X_float[i] << "," ; 
  // }
  // std::cout  << std::endl; 

  // float* input_W_float  = (float*)input_W;
  // std::cout  << "input_W_float" << std::endl; 
  // for (int i = 0 ; i <  wK * wC * wD *wH * wW; i++ ){
  // std::cout << input_W_float[i] << "," ; 
  // }
  // std::cout  << std::endl; 

  value_type alpha = 1.f;
  value_type beta = 0.f;
  cudnnCheck(cudnnConvolutionBackwardFilter(ctx, &alpha, xDesc, X.get(), yDesc,
  Y.get(), cDesc, bwdWAlgo, bwdWWS.get(), bwdWWSsize, &beta, wDesc, W.get()));



  std::cout <<"cudnnConvolutionBackwardFilter " << std::endl;  

  cudaMemcpy(input_W, W.get(),wK * wC * wD * wH * wW * value_size, cudaMemcpyDeviceToHost);

//  float* input_W_float = (float*)input_W;
  // for (int k = 0 ; k < yC ; k++){
  //   for (int i = 0 ; i < yH  ; i ++ ){

  //     for (int j = 0 ; j < yW ; j ++ ){

  //       std::cout << output_Y_float[k * yH * yW + i * yW + j] << "," ; 
  //     }
  //     std::cout  << std::endl; 

  //   }
  // }


  // for (int k = 0 ; k < yN * yC * yH * yW ; k++){

  //   std::cout << output_Y_float[k] << "," ;
  // }

  // for (int i = 0 ; i < wK * wC * wD * wH * wW ; i ++){
  //   std::cout << input_W_float[i] << ",";
  // }

  // std::cout  << std::endl; 
  std::cout <<"single input_W " << std::endl;  


 //  for (int l = 0 ; l < wK ;l++){
 //  for (int i = 0 ; i < wC ; i++){
 //  for (int d = 0 ; d < wD ; d++){
 //  for (int j = 0 ; j < wH ; j++){
 //  for (int k = 0 ; k < wW ; k++){
 //      std::cout << input_W_float[l * wC * wD * wH * wW + i * wD * wH * wW + d * wH * wW + j * wW + k ] << "," ;
 //  }
 //    std::cout << std::endl;
 //  }
 //  std::cout << std::endl;
 //  }
 //  std::cout  << std::endl; 
 //  }
 //  std::cout  << std::endl; 
 //  }
  // std::cout <<"cudaMemcpy output_Y " << std::endl;  



  cudnnCheck(cudnnDestroyTensorDescriptor(xDesc));
  cudnnCheck(cudnnDestroyTensorDescriptor(yDesc));
  cudnnCheck(cudnnDestroyFilterDescriptor(wDesc));
  cudnnCheck(cudnnDestroyConvolutionDescriptor(cDesc));
  cudnnCheck(cudnnDestroy(ctx));


}



/*
    int datatype : 1: float 2:double
    int dataformat :1: NCHW  2: NCDHW
    int fft_algorithm :1: FFT  2: FFT_TILING

*/
void cudnn_backward_data(
  void* input_W, int k_n, int k_c, int k_d, int k_h, int k_w,
  void* output_Y, int out_n, int out_c, int out_d, int out_h, int out_w,
 int p_d, int p_h, int p_w,
 int s_d, int s_h, int s_w,
 int d_d, int d_h, int d_w,
  void* input_X, int in_n, int in_c, int in_d, int in_h, int in_w,
  int datatype, int dataformat,int fft_algorithm)
{

  const int xW = in_w;
  const int xH = in_h;
  const int xC = in_c;
  const int xN = in_n;
  const int xD = in_d;

  const int yW = out_w;
  const int yH = out_h;
  const int yC = out_c;
  const int yN = out_n;
  const int yD = out_d;

  const int wW = k_w;
  const int wH = k_h;
  const int wC = k_c;
  const int wK = k_n;
  const int wD = k_d;

  const int padD = p_d;
  const int padW = p_w;
  const int padH = p_h;

  const int strideD = s_d;
  const int strideW = s_w;
  const int strideH = s_h;

  const int dilationD = d_d;
  const int dilationW = d_w;
  const int dilationH = d_h;

  const int xBias = 1;
  const int wBias = 1;

  cudnnTensorFormat_t format_t = dataformat==1 ? CUDNN_TENSOR_NCHW : CUDNN_TENSOR_NCHW;

  cudnnDataType_t datatype_t = datatype == 1 ? CUDNN_DATA_FLOAT : CUDNN_DATA_DOUBLE;

  // Initialization (creates context)
  cudnnHandle_t ctx;
  cudnnCheck(cudnnCreate(&ctx));


  std::cout <<"xW "<< xW << std::endl;
  std::cout <<"xH "<< xH << std::endl;
  std::cout <<"xC "<< xC << std::endl;
  std::cout <<"xN "<< xN << std::endl;
  std::cout <<"xD "<< xD << std::endl;

  std::cout <<"yW "<< yW << std::endl;
  std::cout <<"yH "<< yH << std::endl;
  std::cout <<"yC "<< yC << std::endl;
  std::cout <<"yN "<< yN << std::endl;
  std::cout <<"yD "<< yD << std::endl;

  std::cout <<"wW "<< wW << std::endl;
  std::cout <<"wH "<< wH << std::endl;
  std::cout <<"wC "<< wC << std::endl;
  std::cout <<"wK "<< wK << std::endl;
  std::cout <<"wD "<< wD << std::endl;


  // Input
  cudnnTensorDescriptor_t xDesc;
  cudnnCheck(cudnnCreateTensorDescriptor(&xDesc));
  if (dataformat == 1){
  cudnnCheck(cudnnSetTensor4dDescriptor(xDesc, format_t,
  datatype_t, xN, xC, xH, xW));
  } else {
  int dimA[5] = {xN,xC,xD,xH,xW};
  int strideA[5] = {xC*xD*xH*xW,xD*xH*xW,xH*xW,xW,1};
  cudnnCheck(cudnnSetTensorNdDescriptor(xDesc,datatype_t,5,dimA,strideA));
  }
  std::cout <<"cudnnCreateTensorDescriptor  input" << std::endl;

  // Output
  cudnnTensorDescriptor_t yDesc;
  cudnnCheck(cudnnCreateTensorDescriptor(&yDesc));
  if (dataformat == 1){
  cudnnCheck(cudnnSetTensor4dDescriptor(yDesc, format_t,
  datatype_t, yN, yC, yH, yW));
  } else {
  int tensorOuputDimA[5] = {yN, yC, yD, yH, yW};
  int strideA[5] = {yC*yD*yH*yW,yD*yH*yW,yH*yW,yW,1};
  cudnnCheck(cudnnSetTensorNdDescriptor(yDesc,datatype_t,5,tensorOuputDimA,strideA));
  }
  std::cout <<"cudnnCreateTensorDescriptor  output" << std::endl;

  // Weight
  cudnnFilterDescriptor_t wDesc;
  cudnnCheck(cudnnCreateFilterDescriptor(&wDesc));
  if (dataformat == 1){
  cudnnCheck(cudnnSetFilter4dDescriptor(wDesc,datatype_t,format_t, wK, wC, wH, wW));
  } else {
  int filterDimA[5] = {wK,wC,wD,wH,wW};
  cudnnCheck(cudnnSetFilterNdDescriptor(wDesc,datatype_t,format_t,5,filterDimA));
  }
  std::cout <<"cudnnCreateTensorDescriptor  filter" << std::endl;



  // Convolution
  cudnnConvolutionDescriptor_t cDesc;
  cudnnCheck(cudnnCreateConvolutionDescriptor(&cDesc));

  if (dataformat == 1){
  cudnnCheck(cudnnSetConvolution2dDescriptor(cDesc, padH, padW,
  strideH, strideW, dilationH, dilationW, CUDNN_CONVOLUTION,
  datatype_t));
  } else {

  int padA[3] = {1,1,1};
  int filterStrideA[3] = {1,1,1};
  int dilationA[3] = {dilationD,dilationH,dilationW};
  // cudnnConvolutionMode_t * convolutionMode_t ;
  cudnnCheck(cudnnSetConvolutionNdDescriptor(cDesc,3,padA,filterStrideA,dilationA,CUDNN_CONVOLUTION,datatype_t));
  }

  std::cout <<"cudnnCreateTensorDescriptor  convolution" << std::endl;



  // Algorithm
  cudnnConvolutionFwdAlgo_t fwdAlgo;
  cudnnConvolutionBwdDataAlgo_t bwdXAlgo;
  cudnnConvolutionBwdFilterAlgo_t bwdWAlgo;

  if (fft_algorithm == 1){
  fwdAlgo = CUDNN_CONVOLUTION_FWD_ALGO_FFT;
  bwdWAlgo = CUDNN_CONVOLUTION_BWD_FILTER_ALGO_FFT;
  bwdXAlgo = CUDNN_CONVOLUTION_BWD_DATA_ALGO_FFT;
  }  else {
  fwdAlgo = CUDNN_CONVOLUTION_FWD_ALGO_FFT_TILING;
  bwdWAlgo = CUDNN_CONVOLUTION_BWD_FILTER_ALGO_FFT_TILING;
  bwdXAlgo = CUDNN_CONVOLUTION_BWD_DATA_ALGO_FFT_TILING;
  }





  // Workspace
  std::size_t fwdWSsize;
  std::size_t bwdXWSsize;
  std::size_t bwdWWSsize;
  cudnnCheck(cudnnGetConvolutionBackwardDataWorkspaceSize(ctx, wDesc, yDesc, cDesc, xDesc
  , bwdXAlgo, &bwdXWSsize));
  std::cout <<"cudnnGetConvolutionBackwardDataWorkspaceSize "<< bwdXWSsize << std::endl;


  typedef float value_type;
  std::size_t value_size = sizeof(value_type);
  auto X = MemAlloc<value_type>(xN * xC * xD * xH * xW * value_size);
  auto W = MemAlloc<value_type>(wK * wC * wD * wH * wW * value_size);
  auto Y = MemAlloc<value_type>(yN * yC * yD * yH * yW * value_size);

  auto bwdXWS = MemAlloc(bwdXWSsize);

  cudaMemcpy( W.get(), input_W, wK * wC * wD * wH * wW * value_size, cudaMemcpyHostToDevice);
  cudaMemcpy( Y.get(), output_Y, yN * yC * yD * yH * yW * value_size, cudaMemcpyHostToDevice);


  // float* input_X_float  = (float*)input_X;
  // std::cout  << "input_X_float" << std::endl; 
  // for (int i = 0 ; i <  xN * xC * xD *xH * xW; i++ ){
  // std::cout << input_X_float[i] << "," ; 
  // }
  // std::cout  << std::endl; 

  // float* input_W_float  = (float*)input_W;
  // std::cout  << "input_W_float" << std::endl; 
  // for (int i = 0 ; i <  wK * wC * wD *wH * wW; i++ ){
  // std::cout << input_W_float[i] << "," ; 
  // }
  // std::cout  << std::endl; 

  value_type alpha = 1.f;
  value_type beta = 0.f;
  cudnnCheck(cudnnConvolutionBackwardData(ctx, &alpha,  wDesc, W.get() , yDesc,
  Y.get(), cDesc, bwdXAlgo, bwdXWS.get(), bwdXWSsize, &beta, xDesc, X.get()));



  std::cout <<"cudnnConvolutionBackwardData " << std::endl;  

  cudaMemcpy(input_X, X.get(), xN * xC * xD * xH * xW * value_size, cudaMemcpyDeviceToHost);

  //float* input_X_float = (float*)input_X;
  // for (int k = 0 ; k < yC ; k++){
  //   for (int i = 0 ; i < yH  ; i ++ ){

  //     for (int j = 0 ; j < yW ; j ++ ){

  //       std::cout << output_Y_float[k * yH * yW + i * yW + j] << "," ; 
  //     }
  //     std::cout  << std::endl; 

  //   }
  // }


  // for (int k = 0 ; k < yN * yC * yH * yW ; k++){

  //   std::cout << output_Y_float[k] << "," ;
  // }

  // for (int i = 0 ; i < xN * xC * xD * xH * xW ; i ++){
  //   std::cout << input_X_float[i] << ",";
  // }

//  std::cout  << std::endl; 
//  std::cout <<"single input_X " << std::endl;  
//
//
//  for (int l = 0 ; l < xN ;l++){
//  for (int i = 0 ; i < xC ; i++){
//  for (int d = 0 ; d < xD ; d++){
//  for (int j = 0 ; j < xH ; j++){
//  for (int k = 0 ; k < xW ; k++){
//      std::cout << input_X_float[l * xC * xD * xH * xW + i * xD * xH * xW + d * xH * xW + j * xW + k ] << "," ;
//    }
//    std::cout << std::endl;
//  }
//  std::cout << std::endl;
//  }
//  std::cout  << std::endl; 
//  }
//  std::cout  << std::endl; 
//  }
  // std::cout <<"cudaMemcpy output_Y " << std::endl;  




  cudnnCheck(cudnnDestroyTensorDescriptor(xDesc));
  cudnnCheck(cudnnDestroyTensorDescriptor(yDesc));
  cudnnCheck(cudnnDestroyFilterDescriptor(wDesc));
  cudnnCheck(cudnnDestroyConvolutionDescriptor(cDesc));
  cudnnCheck(cudnnDestroy(ctx));


}

#endif
