#include <assert.h>
#include <stdlib.h>

// CUDA runtime
#include <cublas_v2.h>
#include <cuda_runtime.h>

#include "helper.h"



__global__ void sgemm_0_naive(int m, int n, int k, 
        float *a, float *b,  float *c) {
  int x = blockIdx.x * blockDim.x + threadIdx.x;
  int y = blockIdx.y * blockDim.y + threadIdx.y;
  if (x < m and y < n) {
    float sum = 0.f;
    for (int i = 0; i < k; ++i) {
      sum += a[x * k + i] * b[i * n + y];
    }
    c[x * n + y] = sum;
  }
}

void SGEMM_GPU(cublasHandle_t handle, int m, int n, int k, float *d_A, float *d_B, float *d_C) {
  constexpr int BLOCK = 16;
  dim3 block(BLOCK, BLOCK);
  dim3 grid((m + BLOCK - 1) / BLOCK, (n + BLOCK - 1) / BLOCK);

  // Assume m = 128, n = 128
  // This call will launch a grid with 128*128=16384 threads
  // There are 128*128/16/16=64 blocks in total.
  // Each block has 16*16=256 threads.
  sgemm_0_naive<<<grid, block>>>(m, n, k, d_A, d_B, d_C);
}