#include <vector>
#include <string>
#include <cublas_v2.h>
#include <cudnn.h>
#include <sys/time.h>

#include "test_conv.cuh"
#include "im2col.cuh"
#include "common.cuh"

namespace runtime {namespace cuda {

#define BATCH_SIZE          1
#define IMAGE_HEIGHT        224
#define IMAGE_WIDTH         224
#define IMAGE_CHANNEL       32
#define IMAGE_SIZE          (IMAGE_CHANNEL * IMAGE_HEIGHT * IMAGE_WIDTH)
#define GROUP               16
#define KERNEL_SIZE         3
#define OUT_CHANNELS        32
#define KERNEL_ELE_SIZE     (OUT_CHANNELS * IMAGE_CHANNEL / GROUP * KERNEL_SIZE * KERNEL_SIZE)
#define PADS                1
#define STRIDES             1
#define DILATIONS           1
#define CONV_OUT_ELE_SIZE   (IMAGE_SIZE * OUT_CHANNELS)

const std::string conv_in_path = "./data/conv_in.bin";
const std::string conv_w_path = "./data/conv_w.bin";
const std::string conv_b_path = "./data/conv_b.bin";
const std::string conv_golden_path = "./data/conv_output.bin";

void fast_conv(const float* data_im, const float* weight, const float* bias, const int batch_size, const int conv_out_channels, const int channels,
    const int height, const int width, const int group, const int kernel_h, const int kernel_w,
    const int pad_h, const int pad_w, const int stride_h,
    const int stride_w, const int dilation_h, const int dilation_w,
    float* conv_output, cublasHandle_t& cublas_handle_);
void fast_conv_cudnn(const float* data_im, const float* weight, const float* bias, const int batch_size, const int conv_out_channels, const int conv_in_channels,
    const int height, const int width, const int group, const int kernel_h, const int kernel_w,
    const int pad_h, const int pad_w, const int stride_h,
    const int stride_w, const int dilation_h, const int dilation_w,
    float* conv_output, cudnnHandle_t& cudnn_handle_);

int test_conv() {
    std::vector<float> conv_in(IMAGE_SIZE, 0);
    std::vector<float> conv_w(KERNEL_ELE_SIZE, 0);
    std::vector<float> conv_b(OUT_CHANNELS, 0);
    std::vector<float> conv_out(CONV_OUT_ELE_SIZE, 0);
    read_from_bin(conv_in.data(), IMAGE_SIZE, conv_in_path);
    read_from_bin(conv_w.data(), KERNEL_ELE_SIZE, conv_w_path);
    read_from_bin(conv_b.data(), OUT_CHANNELS, conv_b_path);

    cublasHandle_t cublas_handle_ = nullptr;
    if (!cublas_handle_) {
        if (cublasCreate(&cublas_handle_) != CUBLAS_STATUS_SUCCESS) {
            throw std::runtime_error("Cannot create Cublas handle. Cublas won't be available.");
        }
    }
    cudnnHandle_t cudnn_handle_ = nullptr;
    CHECK_CUDNN_ERR(cudnnCreate(&cudnn_handle_));
    if (!cudnn_handle_) {
        if (cudnnCreate(&cudnn_handle_) != CUDNN_STATUS_SUCCESS) {
            throw std::runtime_error("Cannot create CUDNN handle. CUDNN won't be available.");
        }
    }

    GpuMemManager manager;
    float* conv_in_d = (float*)manager.mapCpu2Gpu(conv_in.data(), conv_in.size() * sizeof(float));
    float* conv_w_d = (float*)manager.mapCpu2Gpu(conv_w.data(), conv_w.size() * sizeof(float));
    float* conv_b_d = (float*)manager.mapCpu2Gpu(conv_b.data(), conv_b.size() * sizeof(float));
    float* conv_out_d = (float*)manager.mapCpu2Gpu(conv_out.data(), conv_out.size() * sizeof(float));
    TINIT
    TIC
    for (int i = 0; i < 100; i++) {
        // fast_conv(conv_in_d, conv_w_d, conv_b_d, BATCH_SIZE, OUT_CHANNELS, IMAGE_CHANNEL, IMAGE_HEIGHT, IMAGE_WIDTH, GROUP, KERNEL_SIZE, KERNEL_SIZE, PADS, PADS, STRIDES, STRIDES, DILATIONS, DILATIONS, conv_out_d, cublas_handle_);
        fast_conv_cudnn(conv_in_d, conv_w_d, conv_b_d, BATCH_SIZE, OUT_CHANNELS, IMAGE_CHANNEL, IMAGE_HEIGHT, IMAGE_WIDTH, GROUP, KERNEL_SIZE, KERNEL_SIZE, PADS, PADS, STRIDES, STRIDES, DILATIONS, DILATIONS, conv_out_d, cudnn_handle_);
    }
    TOC(fast_conv_100times)
    manager.mapGpu2Cpu(conv_out.data(), conv_out_d, conv_out.size() * sizeof(float));
    write_to_bin(conv_out.data(), conv_out.size(), "./data/conv_out_gpu.bin");

    std::vector<float> conv_golden(CONV_OUT_ELE_SIZE, 0);
    read_from_bin(conv_golden.data(), CONV_OUT_ELE_SIZE, conv_golden_path);
    std::pair<float, int64_t> p = compare_two_buff(conv_golden.data(), conv_out.data(), conv_golden.size());
    std::cout << "max_diff_idx = " << p.second  << ", max_diff = " << p.first << ".\nfinish." << std::endl;
    
    CHECK_CUBLAS_ERR(cublasDestroy(cublas_handle_));
    CHECK_CUDNN_ERR(cudnnDestroy(cudnn_handle_));
    return 0;
}


}} // end of namespace runtime/cuda
