#include <cuda_runtime.h>
#include <iostream>
#include <cublas_v2.h>

#include "common.cuh"
#include "im2col.cuh"
#include "gemm.cuh"

namespace runtime{ namespace cuda {


void fast_conv(const float* data_im, const float* weight, const float* bias, const int batch_size, const int conv_out_channels, const int conv_in_channels,
    const int height, const int width, const int group, const int kernel_h, const int kernel_w,
    const int pad_h, const int pad_w, const int stride_h,
    const int stride_w, const int dilation_h, const int dilation_w,
    float* conv_output, cublasHandle_t& cublas_handle_) {

    int height_col = (height + 2 * pad_h -
        (dilation_h * (kernel_h - 1) + 1)) / stride_h + 1;
    int width_col = (width + 2 * pad_w -
        (dilation_w * (kernel_w - 1) + 1)) / stride_w + 1;
    const int input_image_size = height * width;
    const int output_image_size = height_col * width_col;
    const int kernel_size = kernel_h * kernel_w;
    const int X_offset = conv_in_channels / group * input_image_size;
    const int Y_offset = output_image_size * conv_out_channels / group;
    const int W_offset = kernel_size * conv_in_channels  / group * conv_out_channels / group;
    const int kernel_dim = conv_in_channels / group * kernel_size;
    const int col_buffer_size = kernel_dim * output_image_size;

    // prepare bias
    float* bias_mul_d{nullptr};
    if (bias) {
        std::vector<float> bias_multiplier_(output_image_size, 1.0f);
        CHECK_CUDA_ERR(cudaMalloc((void**)(&bias_mul_d), bias_multiplier_.size() * sizeof(float)));
        CHECK_CUDA_ERR(cudaMemcpy(bias_mul_d, bias_multiplier_.data(), bias_multiplier_.size() * sizeof(float), cudaMemcpyHostToDevice));
    }

    bool is_1x1_ = kernel_h == 1 && kernel_w == 1 \
        && stride_h == 1 && stride_w == 1 \
        && pad_h == 0 && pad_w == 0;

    float* data_col{nullptr};
    CHECK_CUDA_ERR(cudaMalloc((void**)(&data_col), col_buffer_size * sizeof(float)));

    float *col_buffer_data{data_col};
    const float* Xdata = data_im;
    float* Ydata = conv_output;
    for (int image_id = 0; image_id < batch_size; ++image_id) {
        for (int group_id = 0; group_id < group; ++group_id) {
            if (!is_1x1_) {
                im2col_gpu(Xdata + group_id * X_offset, conv_in_channels / group, \
                    height, width, kernel_h, kernel_w, pad_h, \
                    pad_w, stride_h, stride_w, dilation_h, dilation_w, col_buffer_data);
            } else {
                col_buffer_data = const_cast<float*>(Xdata + group_id * X_offset);
            }
            caffe_gpu_gemm<float>(CUBLAS_OP_N, CUBLAS_OP_N, conv_out_channels / group, \
                output_image_size, kernel_dim, 1.0f, weight + group_id * W_offset, col_buffer_data, \
                0.0f, Ydata + group_id * Y_offset, cublas_handle_);
        }
        if (bias) {
            // bias(M*1) * ones(1*N) => M * N
            caffe_gpu_gemm(CUBLAS_OP_N, CUBLAS_OP_N, conv_out_channels, output_image_size, 1, 1.0f, bias, bias_mul_d, 1.f, Ydata, cublas_handle_);
        }

        Xdata += X_offset * group;
        Ydata += Y_offset * group;
    }    

    if (bias) {
        CHECK_CUDA_ERR(cudaFree(bias_mul_d));
    }

    CHECK_CUDA_ERR(cudaFree(data_col));

}

}} // end of namespace runtime/cuda