#include <cstdio>
#include <stdlib.h>
#include <cuda.h>
#include <cuda_runtime.h>
#include <math.h>

#define A(i, j) A[i * Y + j]
#define B(i, j) B[i * Y + j]

void random_matrix(float *A, const int X, const int Y)
{
    for (int i = 0; i < X; i++)
        for (size_t j = 0; j < Y; j++)
            A(i, j) = 2.0 * (float)drand48() - 1.0;
}

void cpu_sgemm(float *A, float *B, float *C, const int M, const int K, const int N)
{
    for (size_t i = 0; i < M; i++)
    {
        for (size_t j = 0; j < N; j++)
        {
            float res = 0.f;
            for (size_t k = 0; k < K; k++)
            {
                res += A[i * K + k] * B[N * k + j];
            }
            C[i * N + j] = res;
        }
    }
}

float cal_diff(float *A, float *B, int X, int Y)
{
    int i, j;
    float max_diff = 0.0, diff;
    int printed = 0;
    for (i = 0; i < X; i++)
    {
        for (j = 0; j < Y; j++)
        {
            diff = abs(A(i, j) - B(i, j));
            max_diff = (diff > max_diff ? diff : max_diff);
            if (printed == 0)
            {
                if (max_diff > 0.5f)
                {
                    printf("error: i %d j %d diff %f got %f expect %f\n", i, j, max_diff, A(i, j), B(i, j));
                    printed = 1;
                }
            }
        }
    }
    return max_diff;
}

#define FETCH_FLOAT4(pointer) (reinterpret_cast<float4 *>(&(pointer))[0])

template <unsigned int M_NUM_PER_BLOCK,
          unsigned int K_NUM_PER_BLOCK,
          unsigned int N_NUM_PER_BLOCK,
          unsigned int NUM_PER_THREAD>
__global__ void cuda_sgemm(float *A, float *B, float *C, const int M, const int K, const int N)
{
    int tx = threadIdx.x;
    int ty = threadIdx.y;
    __shared__ float A_block[M_NUM_PER_BLOCK][K_NUM_PER_BLOCK];
    __shared__ float B_block[K_NUM_PER_BLOCK][N_NUM_PER_BLOCK];
    float *A_begin = A + K * blockIdx.y * M_NUM_PER_BLOCK;
    float *B_begin = B + blockIdx.x * N_NUM_PER_BLOCK;
    float *C_begin = C + N * blockIdx.y * M_NUM_PER_BLOCK + blockIdx.x * N_NUM_PER_BLOCK;

    float tmp[NUM_PER_THREAD] = {0.f};

    for (size_t k = 0; k < K; k += K_NUM_PER_BLOCK)
    {
        FETCH_FLOAT4(A_block[ty][tx * NUM_PER_THREAD]) = FETCH_FLOAT4(A_begin[K * ty + tx * NUM_PER_THREAD + k]);
        FETCH_FLOAT4(B_block[ty][tx * NUM_PER_THREAD]) = FETCH_FLOAT4(B_begin[N * (ty + k) + tx * NUM_PER_THREAD]);
        __syncthreads();

        for (size_t i = 0; i < NUM_PER_THREAD; i++)
        {
            for (size_t s = 0; s < K_NUM_PER_BLOCK; s++)
            {
                tmp[i] += A_block[ty][s] * B_block[s][tx * NUM_PER_THREAD + i];
            }
        }
        __syncthreads();
    }

    for (size_t i = 0; i < NUM_PER_THREAD; i++)
    {
        C_begin[N * ty + tx * NUM_PER_THREAD + i] = tmp[i];
    }
}

int main(int argc, char const *argv[])
{
    constexpr int M = 128;
    constexpr int N = 128;
    constexpr int K = 128;

    size_t size_A = sizeof(float) * M * K;
    size_t size_B = sizeof(float) * N * K;
    size_t size_C = sizeof(float) * M * N;

    float *matrix_A_host = (float *)malloc(size_A);
    float *matrix_B_host = (float *)malloc(size_B);
    float *matrix_C_host_cpu = (float *)malloc(size_C);
    float *matrix_C_host_gpu = (float *)malloc(size_C);

    random_matrix(matrix_A_host, M, K);
    random_matrix(matrix_B_host, K, N);
    memset(matrix_C_host_cpu, 0.f, size_C);
    memset(matrix_C_host_gpu, 0.f, size_C);

    float *matrix_A_device;
    float *matrix_B_device;
    float *matrix_C_device;

    cudaMalloc((void **)&matrix_A_device, size_A);
    cudaMalloc((void **)&matrix_B_device, size_B);
    cudaMalloc((void **)&matrix_C_device, size_C);

    cudaMemcpy(matrix_A_device, matrix_A_host, size_A, cudaMemcpyHostToDevice);
    cudaMemcpy(matrix_B_device, matrix_B_host, size_B, cudaMemcpyHostToDevice);

    cpu_sgemm(matrix_A_host, matrix_B_host, matrix_C_host_cpu, M, K, N);

    constexpr int M_NUM_PER_BLOCK = 32;
    constexpr int K_NUM_PER_BLOCK = 32;
    constexpr int N_NUM_PER_BLOCK = 32;
    constexpr int NUM_PER_THREAD = 4;

    dim3 block(8, 32);
    dim3 grid(M / M_NUM_PER_BLOCK, N / N_NUM_PER_BLOCK);

    cuda_sgemm<M_NUM_PER_BLOCK, K_NUM_PER_BLOCK, N_NUM_PER_BLOCK, NUM_PER_THREAD><<<grid, block>>>(matrix_A_device, matrix_B_device, matrix_C_device, M, K, N);
    cudaMemcpy(matrix_C_host_gpu, matrix_C_device, size_C, cudaMemcpyDeviceToHost);

    float diff = cal_diff(matrix_C_host_gpu, matrix_C_host_cpu, M, N);

    if (diff > 0.5f)
    {
        printf("ans is wrong\n");
        exit(-1);
    }
    else
    {
        printf("ans is right\n");
    }

    free(matrix_A_host);
    free(matrix_B_host);
    free(matrix_C_host_cpu);
    free(matrix_C_host_gpu);

    cudaFree(matrix_A_device);
    cudaFree(matrix_B_device);
    cudaFree(matrix_C_device);

    return 0;
}