#ifndef CUDA_GEMM_HPP
#define CUDA_GEMM_HPP

/*
存放所有调用函数的头文件
*/

#include <cuda_runtime.h>

// gemm function
void cpu_gemm(size_t m, size_t n, size_t k, float *A, float *B, float *C, float alpha, float beta);

// naive gemm kernel
void cuda_gemm_v0(size_t m, size_t n, size_t k, float *A, float *B, float *C, float alpha, float beta, cudaStream_t stream);

// global memory access, 为什么这个算global memory
void cuda_gemm_v1(size_t m, size_t n, size_t k, float *A, float *B, float *C, float alpha, float beta, cudaStream_t stream);

// block tilling vectorized
void cuda_gemm_v2(size_t m, size_t n, size_t k, float* A, float* B, float* C, float alpha, float beta, cudaStream_t stream);

// block tilling for shared memory, and 1d tiled vectorized for register
void cuda_gemm_v3(size_t m, size_t n, size_t k, float* A, float* B, float* C, float alpha, float beta, cudaStream_t stream);

// block tilling for shared memory, and 1d tiled vectorized for register
void cuda_gemm_v4(size_t m, size_t n, size_t k, float* A, float* B, float* C, float alpha, float beta, cudaStream_t stream);

// block tilling for shared memory, and 1d tiled vectorized for register
void cuda_gemm_v5(size_t m, size_t n, size_t k, float* A, float* B, float* C, float alpha, float beta, cudaStream_t stream);

// try to use warp tile to optimize the performance
void cuda_gemm_v6(size_t m, size_t n, size_t k, float* A, float* B, float* C, float alpha, float beta, cudaStream_t stream);

// try to use float2 to optimize the performance
// void cuda_gemm_v7(size_t m, size_t n, size_t k, float* A, float* B, float* C, float alpha, float beta, cudaStream_t stream);
#endif