#include <cudnn.h>
#include <common.cuh>


namespace runtime {namespace cuda {

void fast_conv_cudnn(const float* data_im, const float* weight, const float* bias, const int batch_size, const int conv_out_channels, const int conv_in_channels,
    const int height, const int width, const int group, const int kernel_h, const int kernel_w,
    const int pad_h, const int pad_w, const int stride_h,
    const int stride_w, const int dilation_h, const int dilation_w,
    float* conv_output, cudnnHandle_t& cudnn_handle_) {

    cudnnTensorFormat_t tensorFMT = CUDNN_TENSOR_NCHW;
    cudnnDataType_t tensorType = CUDNN_DATA_FLOAT;
    cudnnTensorDescriptor_t input_desc;
    cudnnFilterDescriptor_t weight_desc;
    cudnnTensorDescriptor_t bias_desc;
    cudnnTensorDescriptor_t output_desc;
    cudnnConvolutionDescriptor_t conv_desc;

    // create desc
    CHECK_CUDNN_ERR(cudnnCreateTensorDescriptor(&input_desc));
    CHECK_CUDNN_ERR(cudnnCreateFilterDescriptor(&weight_desc));
    if (bias) {
        CHECK_CUDNN_ERR(cudnnCreateTensorDescriptor(&bias_desc));
    }
    CHECK_CUDNN_ERR(cudnnCreateTensorDescriptor(&output_desc));
    CHECK_CUDNN_ERR(cudnnCreateConvolutionDescriptor(&conv_desc));

    // set input
    CHECK_CUDNN_ERR(cudnnSetTensor4dDescriptor(input_desc, tensorFMT, tensorType, batch_size, conv_in_channels, height, width));

    // set weight
    CHECK_CUDNN_ERR(cudnnSetFilter4dDescriptor(weight_desc, tensorType, tensorFMT, conv_out_channels, conv_in_channels / group, kernel_h, kernel_w));

    // set bias
    if (bias) {
        CHECK_CUDNN_ERR(cudnnSetTensor4dDescriptor(bias_desc, tensorFMT, tensorType, 1, conv_out_channels, 1, 1));
    }

    // set conv desc
    CHECK_CUDNN_ERR(cudnnSetConvolution2dDescriptor(conv_desc, pad_h, pad_w, stride_h, stride_w, dilation_h, dilation_w, CUDNN_CROSS_CORRELATION, tensorType));
    if (CUDNN_MAJOR < 7) {
        throw std::runtime_error("CUDNN VERSION < 7 DO NOT SUPPORT GROUP CONV, PLEASE UPGRADE!");
    } else {
        CHECK_CUDNN_ERR(cudnnSetConvolutionGroupCount(conv_desc, group));
    }

    // set output
    int out_n, out_c, out_h, out_w;
    CHECK_CUDNN_ERR(cudnnGetConvolution2dForwardOutputDim(conv_desc, input_desc, weight_desc, &out_n, &out_c, &out_h, &out_w));
    CHECK_CUDNN_ERR(cudnnSetTensor4dDescriptor(output_desc, tensorFMT, tensorType, out_n, out_c, out_h, out_w));

    // get workspace
    cudnnConvolutionFwdAlgo_t algo = CUDNN_CONVOLUTION_FWD_ALGO_IMPLICIT_PRECOMP_GEMM;
    size_t ws_size;
    CHECK_CUDNN_ERR(cudnnGetConvolutionForwardWorkspaceSize(cudnn_handle_, input_desc, weight_desc, conv_desc, output_desc, algo, &ws_size));
    float *ws_data;
    CHECK_CUDA_ERR(cudaMalloc(&ws_data, ws_size));

    // forward
    float alpha = 1.f, beta = 0.f;
    CHECK_CUDNN_ERR(cudnnConvolutionForward(cudnn_handle_, &alpha, input_desc, data_im, weight_desc, weight, conv_desc, algo, ws_data, ws_size, &beta, output_desc, conv_output));

    // add bias
    if (bias) {
        CHECK_CUDNN_ERR(cudnnAddTensor(cudnn_handle_, &alpha, bias_desc, bias, &alpha, output_desc, conv_output));
    }

    CHECK_CUDA_ERR(cudaFree(ws_data));
    CHECK_CUDNN_ERR(cudnnDestroyTensorDescriptor(output_desc));
    if (bias) {
        CHECK_CUDNN_ERR(cudnnDestroyTensorDescriptor(bias_desc));
    }
    CHECK_CUDNN_ERR(cudnnDestroyConvolutionDescriptor(conv_desc));
    CHECK_CUDNN_ERR(cudnnDestroyFilterDescriptor(weight_desc));
    CHECK_CUDNN_ERR(cudnnDestroyTensorDescriptor(input_desc));
}

}} // end of namespace runtime/cuda
