#include <cuda_runtime.h>
#include <stdio.h>
#include <cuda_fp16.h>
#include <random>

// GEMV: General Matrix-Vector Multiplication
// C = A * B, (kM x kK) * (kK x 1) -> (kM x 1)
// A: kM x kK matrix, row-major (BLAS_T)
// B: kK x 1 vector
// C: kM x 1 vector

constexpr int kM = 8192;
constexpr int kK = 4096;

// constexpr int kWarpSize = 32;
// constexpr int kBlockSize = 256;
// constexpr uint32_t kFullMask = 0xffffffff;

constexpr int kWarpSize = 64;
constexpr int kBlockSize = 256;
constexpr uint32_t kFullMask = 0xffffffff;



// Test the performance of GEMV on GPU
constexpr int kIter = 16;
constexpr bool kVerify_CPU = false; // whether or not verify the result against CPU
constexpr bool kVerify_BLAS = true; // whether or not verify the result and compare speedup against cublas


// load global memory in 64-bit manner
#define LOAD_GLOBAL_64(ptr) (reinterpret_cast<float2*>(ptr)[0])
#define LOAD_GLOBAL_128(ptr) (reinterpret_cast<float4*>(ptr)[0])

#define ROW_MAJOR(row, col, ld) ((row) * (ld) + (col))
#define CEIL_DIV(a, b) ((a) + (b) - 1) / (b)

template <unsigned int blockSize>
__device__ __forceinline__ float warpReduceSum(float sum){
    if(blockSize >= 32) sum += __shfl_xor_sync(0xffffffff,sum,16,kWarpSize);
    if(blockSize >= 16) sum += __shfl_xor_sync(0xffffffff,sum,8, kWarpSize);
    if(blockSize >=  8) sum += __shfl_xor_sync(0xffffffff,sum,4, kWarpSize);
    if(blockSize >=  4) sum += __shfl_xor_sync(0xffffffff,sum,2, kWarpSize);
    if(blockSize >=  2) sum += __shfl_xor_sync(0xffffffff,sum,1, kWarpSize);
    return sum;
}

// This will output the proper CUDA error strings in the event
// that a CUDA host call returns an error
#define checkGPUErrors(val) __check((val), #val, __FILE__, __LINE__)

void __check(cudaError_t result, char const *const func, const char *const file,
           int const line) {
  if (result) {
    fprintf(stderr, "CUDA error at %s:%d code=%d(%s) \"%s\" \n", file, line,
            static_cast<unsigned int>(result), cudaGetErrorName(result), func);
    exit(EXIT_FAILURE);
  }
}



// #if kVerify_BLAS
#include <cublas_v2.h>
// #endif

// Gemv_0: naive implementation
// Each thread computes one element of C
template <int ProblemM, int ProblemK>
__global__ void gemv_0(float *A, float *B, float *C) {
  int i = blockIdx.x * blockDim.x + threadIdx.x;
  if (i < ProblemM) {
    float sum = 0.0f;
    for (int j = 0; j < ProblemK; j++) {
      sum += A[i * ProblemK + j] * B[j];
    }
    C[i] = sum;
  }
}


// Gemv_1: each warp computes 1 elements of C. Load global with 128-bit load. Prefers kK >= kWarpSize
// kM and kK should be align4 
// Shape:  [kM, (kK/(4*kWarpSize), kWarpSize, 4)]
// Stride: [kK, ((4*kWarpSize),    4,         1)]
//         wid     col            lid
template <int ProblemM, int ProblemK>
__global__ void gemv_1(float *A, float *B, float *C) {
  int wid = (blockIdx.x * blockDim.x + threadIdx.x) / kWarpSize;
  int lid = threadIdx.x % kWarpSize;
  int row = wid;  // each warp process 1 row of A
  // int col = threadIdx.x % kWarpSize * 4;
  if (row < ProblemM) {
    float sum = 0.0f;
    for (int col = lid * 4; col < ProblemK; col += kWarpSize * 4) {
      int A_index = ROW_MAJOR(row, col, ProblemK);
      float4 A4 = LOAD_GLOBAL_128(A + A_index);
      float4 B4 = LOAD_GLOBAL_128(B + col);
      sum += A4.x * B4.x + A4.y * B4.y + A4.z * B4.z + A4.w * B4.w;
    }
    sum = warpReduceSum<kWarpSize>(sum);
    if (lid == 0) {
      C[row] = sum;
    }
  }
}

// Gemv_1_1: each warp computes 1 elements of C. Load global with 64-bit load. Prefers kK >> kWarpSize
// kM and kK should be align2 
template <int ProblemM, int ProblemK>
__global__ void gemv_1_1(float *A, float *B, float *C) {
  int wid = (blockIdx.x * blockDim.x + threadIdx.x) / kWarpSize;
  int row = wid;  // each warp process 1 row of A
  if (row < ProblemM) {
    float sum = 0.0f;
    for (int col = threadIdx.x % kWarpSize * 2; col < ProblemK; col += kWarpSize * 2) {
      int A_index = ROW_MAJOR(row, col, ProblemK);
      float2 A2 = LOAD_GLOBAL_64(A + A_index);
      float2 B2 = LOAD_GLOBAL_64(B + col);
      sum += A2.x * B2.x + A2.y * B2.y;
    }
    sum = warpReduceSum<kWarpSize>(sum);
    if (threadIdx.x % kWarpSize == 0) {
      C[row] = sum;
    }
  }
}

// Gemv_8: each warp computes 8 elements of C. Load global with 128-bit load.
// kM and kK should be align4
// Shape:  [(kM/8,     8),       (kK/(kWarpSize/2), kWarpSize/8, 4)]
// Stride: [(8*kK, kWarpSize/2), ((4*kWarpSize),    4,           1)]
//           wid    lid_m            col+            lid_k
template <int ProblemM, int ProblemK>
__global__ void gemv_8(float *A, float *B, float *C) {
  int wid = (blockIdx.x * blockDim.x + threadIdx.x) / kWarpSize;
  int lid_m = (threadIdx.x % kWarpSize) / (kWarpSize / 8);
  int lid_k = threadIdx.x % (kWarpSize / 8);
  int row = wid * 8 + lid_m;  // each warp process 8 row of A.
  if (row < ProblemM) {
    float sum = 0.0f;
    for (int col = lid_k * 4; col < ProblemK; col += kWarpSize/2) {
      int A_index = ROW_MAJOR(row, col, ProblemK);
      float4 A4 = LOAD_GLOBAL_128(A + A_index);
      float4 B4 = LOAD_GLOBAL_128(B + col);   // TODO change to shared memory in case of L1 cache not used on C500
      sum += A4.x * B4.x + A4.y * B4.y + A4.z * B4.z + A4.w * B4.w;
    }
    sum = warpReduceSum<kWarpSize / 8>(sum);
    if (lid_k == 0) {
      C[row] = sum;
    }
  }
}

using SgemmKernel = void (*)(float*, float*, float*);

template <int ProblemM, int ProblemK>
void measureSgemv(std::string kernel_name, SgemmKernel func, int blockSize, int gridSize, float *deviceA, float *deviceB, float *deviceC) {
  cudaEvent_t start, stop;
  checkGPUErrors(cudaEventCreate(&start));
  checkGPUErrors(cudaEventCreate(&stop));
  checkGPUErrors(cudaEventRecord(start));
  for(int i = 0; i < kIter; i++)
    func<<<gridSize, blockSize>>>(deviceA, deviceB, deviceC);
  checkGPUErrors(cudaEventRecord(stop));
  checkGPUErrors(cudaEventSynchronize(stop));
  float milliseconds = 0;
  checkGPUErrors(cudaEventElapsedTime(&milliseconds, start, stop));
  float exe_time = milliseconds / kIter;
  float dram_througput = (ProblemM * ProblemK * sizeof(float) + ProblemK * sizeof(float)) / exe_time / 1e6;
  printf("%d, %d, %s, %f, %f\n", ProblemM, ProblemK, kernel_name.c_str(), exe_time, dram_througput);
}


int main() {
  if constexpr (kM % 4 != 0) {
    printf("kM should be multiple of 4\n");
    exit(1);
  }
  cudaDeviceProp deviceProp;
  checkGPUErrors(cudaGetDeviceProperties(&deviceProp, 0));
  printf("Using device %s\n", deviceProp.name);
  
  // Allocate memory
  float *hostA = (float *)malloc(kM * kK * sizeof(float));
  float *hostB = (float *)malloc(kK * sizeof(float));
  float *hostC = (float *)malloc(kM * sizeof(float));
  float *deviceA, *deviceB, *deviceC;
  checkGPUErrors(cudaMalloc((void **)&deviceA, kM * kK * sizeof(float)));
  checkGPUErrors(cudaMalloc((void **)&deviceB, kK * sizeof(float)));
  checkGPUErrors(cudaMalloc((void **)&deviceC, kM * sizeof(float)));
  std::random_device rd; 
  std::mt19937 gen(rd());
  std::uniform_real_distribution<> dis(1.0, 10.0);
  // for (int i = 0; i < kM; i++) {
  //   for (int j = 0; j < kK; j++) {
  //     hostA[ROW_MAJOR(i, j, kK)] = (float)i;
  //   }
  // }
  for (int i = 0; i < kM * kK; i++) {
    hostA[i] = dis(gen);
  }
  for (int i = 0; i < kK; i++) {
    // hostB[i] = 1.0f;
    hostB[i] = dis(gen);
  }
  memset(hostC, 0, kM * sizeof(float));
  checkGPUErrors(cudaMemcpy(deviceA, hostA, kM * kK * sizeof(float), cudaMemcpyHostToDevice));
  checkGPUErrors(cudaMemcpy(deviceB, hostB, kK * sizeof(float), cudaMemcpyHostToDevice));
  checkGPUErrors(cudaMemset(deviceC, 0, kM * sizeof(float)));

  printf("M, K, Kernel, Time(ms), DRAM_throughput(GB/s)\n");
  // Launch Gemm_0 kernel
  // {
  //   int blockSize = kBlockSize;
  //   int gridSize = CEIL_DIV(kM, blockSize);
  //   measureSgemv<kM, kK>("gemv_0", gemv_0<kM, kK>, blockSize, gridSize, deviceA, deviceB, deviceC);
  // }
  
    // Launch Gemm_1 kernel
  {
    int blockSize = kBlockSize;
    int gridSize = CEIL_DIV(kM, blockSize/kWarpSize);
    measureSgemv<kM, kK>("gemv_1", gemv_1<kM, kK>, blockSize, gridSize, deviceA, deviceB, deviceC);
  }
  {
    int blockSize = kBlockSize;
    int gridSize = CEIL_DIV(kM, blockSize/kWarpSize);
    measureSgemv<kM, kK>("gemv_1_1", gemv_1_1<kM, kK>, blockSize, gridSize, deviceA, deviceB, deviceC);
  }
  {
    int blockSize = kBlockSize;
    int gridSize = CEIL_DIV(kM, blockSize/kWarpSize*8);
    measureSgemv<kM, kK>("gemv_8", gemv_8<kM, kK>, blockSize, gridSize, deviceA, deviceB, deviceC);
  }


  if(kVerify_BLAS || kVerify_CPU)
    // For verification
    checkGPUErrors(cudaMemcpy(hostC, deviceC, kM * sizeof(float), cudaMemcpyDeviceToHost));

  ////////////////////////////////////
  // Verify the result againt CPU
  if (kVerify_CPU) {
    float *hostC2 = (float *)malloc(kM * sizeof(float));
    for (int i = 0; i < kM; i++) {
      hostC2[i] = 0;
      for (int j = 0; j < kK; j++) {
        hostC2[i] += hostA[i * kK + j] * hostB[j];
      }
    }
    bool passed = true;
    double eps = 1e-6;
    for (int i = 0; i < kM; i++) {
      double abs_err = fabs(hostC[i] - hostC2[i]);
      double dot_length = kK;
      double abs_val = fabs(hostC2[i]);
      double rel_err = abs_err / abs_val / dot_length;
      if (rel_err > eps) {
        printf("Mismatch with CPU: hostC[%d] = %f, hostC2[%d] = %f\n", i, hostC[i], i, hostC2[i]);
        passed = false;
        break;
      }
    }
    if (passed) {
      printf("Verification againt CPU passed!\n");
    }
    free(hostC2);
  }

  ////////////////////////////////////
  // Verify the result againt cublas
  if (kVerify_BLAS)  {
    cudaEvent_t start, stop;
    checkGPUErrors(cudaEventCreate(&start));
    checkGPUErrors(cudaEventCreate(&stop));
    cublasHandle_t handle;
    cublasCreate(&handle);
    float *hostC2 = (float *)malloc(kM * sizeof(float));
    float alpha = 1.0f;
    float beta = 0.0f;
    float *deviceC2;
    checkGPUErrors(cudaMalloc((void **)&deviceC2, kM * sizeof(float)));
    checkGPUErrors(cudaMemset(deviceC2, 0, kM * sizeof(float)));
    checkGPUErrors(cudaEventRecord(start));
    for(int i = 0; i < kIter; i++) {
      cublasSgemv(handle, CUBLAS_OP_T, 
      kK, kM, &alpha, 
      deviceA, kK, deviceB, 1, &beta, deviceC2, 1);
    }
    checkGPUErrors(cudaEventRecord(stop));
    checkGPUErrors(cudaEventSynchronize(stop));
    float milliseconds = 0;
    checkGPUErrors(cudaEventElapsedTime(&milliseconds, start, stop));
    float exe_time = milliseconds / kIter;
    float dram_througput = (kM * kK * sizeof(float) + kK * sizeof(float)) / exe_time / 1e6;
    printf("%d, %d, %s, %f, %f\n", kM, kK, "cublasSgemv", exe_time, dram_througput);
    checkGPUErrors(cudaMemcpy(hostC2, deviceC2, kM * sizeof(float), cudaMemcpyDeviceToHost));
    bool passed = true;
    double eps = 1e-6;
    // printf(" hostC[%d] = %f, hostC2[%d] = %f\n", 0, hostC[0], 0, hostC2[0]);
    for (int i = 0; i < kM; i++) {
      double abs_err = fabs(hostC[i] - hostC2[i]);
      double dot_length = kK;
      double abs_val = fabs(hostC2[i]);
      double rel_err = abs_err / abs_val / dot_length;
      if (rel_err > eps) {
        printf("Mismatch with cublas: hostC[%d] = %f, hostC2[%d] = %f\n", i, hostC[i], i, hostC2[i]);
        passed = false;
        break;
      }
    }
    if (passed) {
      printf("Verification againt cublas passed!\n");
    }
    free(hostC2);
    checkGPUErrors(cudaFree(deviceC2));
    cublasDestroy(handle);
  }
  

  // Free memory
  free(hostA);
  free(hostB);
  free(hostC);
  checkGPUErrors(cudaFree(deviceA));
  checkGPUErrors(cudaFree(deviceB));
  checkGPUErrors(cudaFree(deviceC));


  return 0;
}