#include <stdio.h>
#include <cuda_runtime.h>
#include <curand.h>
#include <cublas_v2.h>
#include <time.h>
#include <pthread.h>
#include <string>

/*
	compile : 
  nvcc demo_gemm_nvcc.cu -o v100_demo_gemm -I$ROCM_PATH/cuda/include -L$ROCM_PATH/cuda/lib64 -lcublas -lcurand -lpthread
*/

//比较核函数和cudablasDgemm的结果
void compare_matrices(int N, double* C1, double* C2)
{ 
 int i, j;
  double max_error = 0.0;
  for (j = 0; j < N; j++) {
    for (i = 0; i < N; i++) {
      double error = fabs(C1[i+j* N] - C2[i+j* N]);
      if (error > max_error) {
        max_error = error;
      }
    }
  }
  printf("Max error: %.4e\n", max_error);
}

void print_matrix(int N, double* M)
{ 
 int i, j;
  int max_size = 5; //打印的最大维度
  for (i = 0; i < N && i < max_size; i++) {
    for (j = 0; j < N && j < max_size; j++) {
      printf(" %13.6f", M[i + j* N]);
    }
    if (N > max_size) { //超过了最大维度ꎬ打印省略号
      printf("...");
    }
    printf("\n");
  }
  if (N > max_size) { //超过了最大维度ꎬ打印省略号
    printf(".\n.\n.\n");
  }
}


#define Nsub 128
#define Nsub_ 16
#define BLOCKDIM_X 16
#define BLOCKDIM_Y 16
#define Nx Nsub / BLOCKDIM_X
#define Ny Nsub / BLOCKDIM_Y


template <int nrows, int ncols>
__device__
void read_matrix(double* M, int N, int i, int j,
        double* M_ij, int LD)
{ 
 int row, col, offset, Row, Col, Offset;
  //子矩阵行列数与线程块行列数的比值
  constexpr int Rrow = nrows / BLOCKDIM_X;
  constexpr int Rcol = ncols / BLOCKDIM_Y;
  for (int J = 0; J < Rcol; J++) {
    for (int I = 0; I < Rrow; I++) {
      //矩阵元在子矩阵M_ij 中的行列
      row = threadIdx.x + I * BLOCKDIM_X;
      col = threadIdx.y + J * BLOCKDIM_Y;
      offset = row + col * LD;
      //矩阵元在矩阵M 中的行列
      Row = i * nrows + row;
      Col = j * ncols + col;
      Offset = Row + Col * N;
      //将矩阵元从全局内存读入共享内存
      M_ij[offset] = M[Offset];
    } // I
  } // J
}

__device__
void multiply_submatrices(double* A_sub, double* B_sub,
             double* c)
{ 
 for (int k = 0; k < Nsub_; k++) {
    for (int J = 0; J < Ny; J ++) {
      for (int I = 0; I < Nx; I ++) {
        //每个线程计算Nx * Ny 个c
        int i = threadIdx.x + I * BLOCKDIM_X;
        int j = threadIdx.y + J * BLOCKDIM_Y;
        c[I + J * Nx] += A_sub[i + k * Nsub]
                * B_sub[k + j * Nsub_];
      } // I
    } // J
  } // k
}


__device__
void write_matrix(double* C, int N, int i, int j, double* c)
{ 
 int row, col, Row, Col;
  for (int J = 0; J < Ny; J++) {
    for (int I = 0; I < Nx; I++) {
      row = threadIdx.x + I * BLOCKDIM_X;
      col = threadIdx.y + J * BLOCKDIM_Y;
      Row = i * Nsub + row;
      Col = j * Nsub + col;
      if ((Row < N) && (Col < N)) {
        C[Row + Col * N] = c[I + J * Nx];
      }
    }
  }
}

__global__ __launch_bounds__(BLOCKDIM_X * BLOCKDIM_Y)
void matrix_multiply(int N, int Nblk, double * A, double * B,
          double * C)
{ 
 //共享内存中的A_sub,B_sub 用于保存A,B 的子矩阵
 // __shared__ double A_sub[Nsub * Nsub_];
 // __shared__ double B_sub[Nsub_ * Nsub];
 //获取动态内存大小
 extern __shared__ double shmem[];
 double* A_sub = (double*) shmem;
 double* B_sub = (double*) &shmem[Nsub*Nsub_];
  //每个线程块负责计算子矩阵C_ij
  for (int i = blockIdx.x; i < Nblk; i += gridDim.x) {
  for (int j = blockIdx.y; j < Nblk; j += gridDim.y) {
    //每个线程负责计算子矩阵C_ij 中Nx* Ny 个矩阵元
    double c[Nx * Ny];
    for (int k = 0; k < Nx * Ny; k++) {
      c[k] = 0.0;
    }
    //假设Nsub_能整除N
    for (int k = 0; k < N / Nsub_; k++) {
      //将子矩阵A_ik 从全局内存读到共享内存
      read_matrix<Nsub, Nsub_>(A, N, i, k, A_sub, Nsub);
      //将子矩阵B_kj 从全局内存读到共享内存
      read_matrix<Nsub_, Nsub>(B, N, k, j, B_sub, Nsub_);
      __syncthreads();
      //计算矩阵A_ik 和B_kj 的乘
      multiply_submatrices(A_sub, B_sub, c);
      __syncthreads();
    } // k
    //将矩阵元写到C 中
    write_matrix(C, N, i, j, c);
  } // j
  } // i
}


typedef struct 
{
int N;
int id;
}threaddata;

void* gemm_compute(void * args)
{ 
  threaddata * argss = (threaddata*)args; 
  int N = argss->N;
  int id = argss->id;
  printf("N %d \n", N);
  printf("id %d \n", id);
  int deviceid = id %4;
  std::string freedevice=  std::to_string(deviceid);
  printf("%s \n", freedevice.c_str());
  setenv("CUDA_VISIBLE_DEVICES", freedevice.c_str() , 1);
//初始化cudaRAND 随机数发生器
  curandGenerator_t gen;
  curandCreateGenerator(&gen, CURAND_RNG_PSEUDO_DEFAULT);
  curandSetPseudoRandomGeneratorSeed(gen, time(NULL));
  //计时器
  cudaEvent_t start, stop;
  cudaEventCreate(&start);
  cudaEventCreate(&stop);
  float dt;
  //分配矩阵内存空间
    double * C_kernel = (double * ) malloc(N * N * sizeof(double));
  double * C_cudablas= (double * ) malloc(N * N * sizeof(double));
  double * A = (double * ) malloc(N * N * sizeof(double));
  double * B = (double * ) malloc(N * N * sizeof(double));
  double * C = (double * ) malloc(N * N * sizeof(double));
  double * dA, * dB, * dC;
  cudaMalloc(&dA, N * N * sizeof(double));
  cudaMalloc(&dB, N * N * sizeof(double));
  cudaMalloc(&dC, N * N * sizeof(double));
  //随机生成矩阵A 和B
  cudaEventRecord(start);
  curandGenerateUniformDouble(gen, dA, N * N);
  curandGenerateUniformDouble(gen, dB, N * N);
  cudaEventRecord(stop);
  cudaEventSynchronize(stop);
  cudaEventElapsedTime(&dt, start, stop);
  printf("Generating Matrices took %8.3f ms. \n", dt);
  //打印矩阵A,B
  cudaMemcpy(A, dA, N * N * sizeof(double),
       cudaMemcpyDeviceToHost);
  cudaMemcpy(B, dB, N * N * sizeof(double),
       cudaMemcpyDeviceToHost);
  printf("Matrix A: \n");
  print_matrix(N, A);
  printf("\n");
  printf("Matrix B: \n");
  print_matrix(N, B);
  printf("\n");
  //创建cudaBLAS 句柄
  cublasHandle_t handle;
  cublasCreate(&handle);
  //矩阵乘法ꎬ将结果存储在矩阵C 中
    double alpha = 1.0;
    double beta = 0.0;
  //预热
  cublasDgemm(handle, CUBLAS_OP_N, CUBLAS_OP_N, 2, 2, 2,
        &alpha, dA, N, dB, N, &beta, dC, N);
  //正式计算
  /*cudaEventRecord(start);
  cudablasDgemm(handle, CUBLAS_OP_N, CUBLAS_OP_N, N, N, N,
        &alpha, dA, N, dB, N, &beta, dC, N);
  cudaEventRecord(stop);
  cudaEventSynchronize(stop);
  cudaEventElapsedTime(&dt, start, stop);
  printf("Matrix multiplication took %8.3f ms.\n", dt);
  //打印矩阵C
  cudaMemcpy(C, dC, N * N * sizeof(double),
       cudaMemcpyDeviceToHost);

  printf("Matrix C: \n");
  print_matrix(N, C);*/
 //正式计算
  cudaEventRecord(start);
  cublasDgemm(handle, CUBLAS_OP_N, CUBLAS_OP_N, N, N, N, &alpha, dA, N, dB, N, &beta, dC, N);
  cudaEventRecord(stop);
  cudaEventSynchronize(stop);
  cudaEventElapsedTime(&dt, start, stop);
  printf("Matrix multiplication (cublasDgemm)"
     "took %8.3f ms.\n ", dt);
  //将cudaBLAS 计算的结果拷贝回主机
  cudaMemcpy(C_cudablas, dC, N * N * sizeof(double),
       cudaMemcpyDeviceToHost);
  //将dC 清零
  cudaMemset(dC, 0, N * N * sizeof(double));
  //使用核函数计算
  //dim3 blockSize = dim3(Nsub, Nsub, 1);
  dim3 blockSize = dim3(BLOCKDIM_X, BLOCKDIM_Y, 1);
  int Nblk = (N + Nsub - 1) / Nsub;
  dim3 numBlocks = dim3(Nblk, Nblk, 1);
  cudaEventRecord(start);
  //matrix_multiply<<<numBlocks, blockSize>>>(N, dA, dB, dC);
  int shmemSize = 2 * Nsub * Nsub_ * sizeof(double);
  matrix_multiply<<<numBlocks, blockSize, shmemSize>>>
          (N, Nblk, dA, dB, dC);
  cudaEventRecord(stop);
  cudaEventSynchronize(stop);
  cudaEventElapsedTime(&dt, start, stop);
  printf("Matrix multiplication (kernel) took % 8.3f ms.\n", dt);
  //将核函数计算的结果拷贝回主机并与cudaBLAS 的结果做比较
  cudaMemcpy(C_kernel, dC, N * N * sizeof(double),
       cudaMemcpyDeviceToHost);
  compare_matrices(N, C_kernel, C_cudablas);
  //清理
  cudaFree(dA);
  cudaFree(dB);
  cudaFree(dC);
  cudaEventDestroy(start);
  cudaEventDestroy(stop);
  curandDestroyGenerator(gen);
  cublasDestroy_v2(handle);
  free(A);
  free(B);
  free(C);
  return 0;
}
 

int main(int argc, char * * argv)
{ 
 if (argc != 2) {
    fprintf(stderr, "Usage: %s N \n", argv[0]);
    return 1;
  }
  int N = atoi(argv[1]);
  if (N < 2) {
    fprintf(stderr, "N must larger than 2.\n");
    return 1;
  }
  int status=-1;
  pthread_t thread[10];
 
  for(int i=0; i < 10 ;i++){
  threaddata *args = (threaddata*)malloc(sizeof(threaddata));
  args->N = N;
  args->id = N/1024 - 1 ;
  status = pthread_create(&thread[i], NULL, gemm_compute, args);
  }
  for(int i=0; i < 10 ;i++)
  pthread_join(thread[i], NULL);


  return 0;
 } 
