#include <bits/stdc++.h>

#include <iostream>
#include <cuda_runtime.h>
#include <random>
#include <ctime>
#include <sys/time.h>

#include <cudnn.h>
#include <cublas_v2.h>

/**
 * @brief C = A * B
 * 
 */

using namespace std;

#define PERF(name) Perf perf_##name##__COUNTER__(#name)
#define PERF_CPU(name) PerfCPU perf_CPU_##name##__COUNTER__(#name)

class PerfCPU
{
public:
    PerfCPU(const std::string& name) {
        m_name = name;
        gettimeofday(&m_start, NULL);
    }

    ~PerfCPU() {
        gettimeofday(&m_end, NULL);
        float elapsed_time = (m_end.tv_sec - m_start.tv_sec) * 1000.0 + (m_end.tv_usec - m_start.tv_usec) / 1000.0;
        std::cout << m_name << " elapse: " << elapsed_time << " ms" << std::endl;
    }

private:
    std::string m_name;
    struct timeval m_start, m_end;
}; // class PerfCPU

class Perf
{
public:
    Perf(const std::string& name) {
        m_name = name;
        cudaEventCreate(&m_start);
        cudaEventCreate(&m_end);
        cudaEventRecord(m_start);
        cudaEventSynchronize(m_start);
    }

    ~Perf() {
        cudaEventRecord(m_end);
        cudaEventSynchronize(m_end);
        float elapsed_time = 0.0;
        cudaEventElapsedTime(&elapsed_time, m_start, m_end);
        std::cout << m_name << " elapse: " << elapsed_time << " ms" << std::endl;
    }

private:
    std::string m_name;
    cudaEvent_t m_start, m_end;
}; // class Perf

const int WIDTH_BLOCK_SIZE = 32;
const int HEIGHT_BLOCK_SIZE = 8;
const int MATRIX_M = 2048;
const int MATRIX_K = 512;
const int MATRIX_N = 512;

/**
 * @brief matmul by hand with transpose b to increase cache hit
 * 
 * @param matrix_a (M * K)
 * @param matrix_b (K * N)
 * @param matrix_c (M * N)
 * @return __global__ 
 */
__global__ void matmul_transpose_b(const float* matrix_a, const float* matrix_b, float* matrix_c)
{
    
}

__global__ void matmul_naive(const float* matrix_a, const float* matrix_b, float* matrix_c)
{
    // 以target为目标
    int row = blockIdx.y * blockDim.y + threadIdx.y;
    int col = blockIdx.x * blockDim.x + threadIdx.x;

    if (row >= MATRIX_M || col >= MATRIX_N) {
        return;
    }

    float sum = 0.0;
    for (int i = 0; i < MATRIX_N; ++i) {
        sum += matrix_a[row * MATRIX_K + i] * matrix_b[i * MATRIX_N + col];
    }
    matrix_c[row * MATRIX_N + col] = sum;
}

void AllocABCAndInit(float*& matrix_a, float*& matrix_b, float*& matrix_c)
{
    matrix_a = new float[MATRIX_M * MATRIX_K];
    matrix_b = new float[MATRIX_K * MATRIX_N];
    matrix_c = new float[MATRIX_M * MATRIX_N];

    std::default_random_engine generator;
    std::uniform_real_distribution<float> distribution(0.0, 1.0);

    for (int i = 0; i < MATRIX_M * MATRIX_K; ++i) {
        matrix_a[i] = distribution(generator);
    }

    for (int i = 0; i < MATRIX_K * MATRIX_N; ++i) {
        matrix_b[i] = distribution(generator);
    }

    for (int i = 0; i < MATRIX_M * MATRIX_N; ++i) {
        matrix_c[i] = distribution(generator);
    }

}

float* CUBlasSgemm(float* matrix_a, float* matrix_b, float* matrix_c)
{
    cublasHandle_t handle;
    cublasCreate(&handle);

    float alpha = 1.0;
    float beta = 0.0;
    {
        Perf perf("cublasSgemm");
        cublasSgemm(handle, CUBLAS_OP_N, CUBLAS_OP_N, MATRIX_N, MATRIX_M,
            MATRIX_K, &alpha, matrix_b, MATRIX_N, matrix_a,
            MATRIX_K, &beta, matrix_c, MATRIX_N);
    }
    cublasDestroy(handle);
    float* blas_output = new float[MATRIX_M * MATRIX_N];
    cudaMemcpy(blas_output, matrix_c, MATRIX_M * MATRIX_N * sizeof(float), cudaMemcpyDeviceToHost);
    return blas_output;
}



int main(int argc, char* argv[])
{
    float* matrix_a, * matrix_b, * matrix_c;
    AllocABCAndInit(matrix_a, matrix_b, matrix_c);

    float* d_matrix_a, * d_matrix_b, * d_matrix_c;
    cudaMalloc(&d_matrix_a, MATRIX_M * MATRIX_K * sizeof(float));
    cudaMalloc(&d_matrix_b, MATRIX_K * MATRIX_N * sizeof(float));
    cudaMalloc(&d_matrix_c, MATRIX_M * MATRIX_N * sizeof(float));

    float* d_cublas_matrix_c;
    cudaMalloc(&d_cublas_matrix_c, MATRIX_M * MATRIX_N * sizeof(float));

    cudaMemcpy(d_matrix_a, matrix_a, MATRIX_M * MATRIX_K * sizeof(float), cudaMemcpyHostToDevice);
    cudaMemcpy(d_matrix_b, matrix_b, MATRIX_K * MATRIX_N * sizeof(float), cudaMemcpyHostToDevice);
    cudaMemcpy(d_matrix_c, matrix_c, MATRIX_M * MATRIX_N * sizeof(float), cudaMemcpyHostToDevice);
    cudaMemcpy(d_cublas_matrix_c, matrix_c, MATRIX_M * MATRIX_N * sizeof(float), cudaMemcpyHostToDevice);

    {
        Perf perf("matmul_naive");

        dim3 block_size(WIDTH_BLOCK_SIZE, HEIGHT_BLOCK_SIZE);
        dim3 grid_size((MATRIX_N + block_size.x - 1) / block_size.x, (MATRIX_M + block_size.y - 1) / block_size.y);
        matmul_naive<<<grid_size, block_size>>>(d_matrix_a, d_matrix_b, d_matrix_c);
        cudaDeviceSynchronize();
        cudaMemcpy(matrix_c, d_matrix_c, MATRIX_M * MATRIX_N * sizeof(float), cudaMemcpyDeviceToHost);
    }

    float* cublas_output = CUBlasSgemm(d_matrix_a, d_matrix_b, d_cublas_matrix_c);

    int print_count = 64;
    for (int i = 0; i < MATRIX_M * MATRIX_N && i < print_count; ++i) {
        if (std::abs(cublas_output[i] - matrix_c[i]) > 10e-3) {
            std::cout << "Error: cublas_output[" << i << "] = " << cublas_output[i] << ", matrix_c[" << i << "] = " << matrix_c[i]  <<
                ", diff: " << cublas_output[i] - matrix_c[i] << std::endl;
        }
    }
    return 0;
}