#include <cuda.h>
#include "include/utils.h"
using namespace std;


__global__ void multiply_gpu_kernel(double *A, double *B, double *C, const int N,const double val) {
    int bx = blockIdx.x,
            by = blockIdx.y,
            tx = threadIdx.x,
            ty = threadIdx.y;
    __shared__ double subA[BLOCK_SIZE_CUDA][BLOCK_SIZE_CUDA];
    __shared__ double subB[BLOCK_SIZE_CUDA][BLOCK_SIZE_CUDA];
    double subC = 0;
    for(int b=0;b<N;b+=BLOCK_SIZE_CUDA){
        int bid = b/BLOCK_SIZE_CUDA;
        subA[tx][ty] = A[(bx*BLOCK_SIZE_CUDA+tx)*N+bid*BLOCK_SIZE_CUDA+ty];//A[bx][b/BSC][tx][ty]
        subB[tx][ty] = B[(bid*BLOCK_SIZE_CUDA+tx)*N+by*BLOCK_SIZE_CUDA+ty];//B[b/BSC][by][tx][ty]
        __syncthreads();
        for(int k=0;k<BLOCK_SIZE_CUDA;++k){
            subC += subA[tx][k] * subB[k][ty];
        }
        __syncthreads();
    }
    C[(bx*BLOCK_SIZE_CUDA+tx)*N+(by*BLOCK_SIZE_CUDA+ty)] = subC;
}

void multiply_gpu(const int N, double * a, double * b, double * c){
    double *deviceA;
    double *deviceB;
    double *deviceC;
    cudaMalloc((void **)&deviceA, sizeof(double)*N*N);
    cudaMalloc((void **)&deviceB, sizeof(double)*N*N);
    cudaMalloc((void **)&deviceC, sizeof(double)*N*N);
    cudaMemcpy(deviceA, a, sizeof(double)*N*N, cudaMemcpyHostToDevice);
    cudaMemcpy(deviceB, b, sizeof(double)*N*N, cudaMemcpyHostToDevice);

    dim3 dimBlock(BLOCK_SIZE_CUDA,BLOCK_SIZE_CUDA,1);
    dim3 dimGrid(N/BLOCK_SIZE_CUDA,N/BLOCK_SIZE_CUDA,1);
    timeval start, end;
    gettimeofday(&start, 0);
    multiply_gpu_kernel<<<dimGrid,dimBlock>>>(deviceA,deviceB,deviceC,N,12);
    cudaThreadSynchronize();
    cudaMemcpy(c, deviceC, sizeof(double)*N*N, cudaMemcpyDeviceToHost);
    cudaThreadSynchronize();
    gettimeofday(&end, 0);
    printf("%.5lfsec consumed\n",end.tv_sec-start.tv_sec+(end.tv_usec-start.tv_usec)*1e-6);
    cudaFree(deviceA);
    cudaFree(deviceB);
    cudaFree(deviceC);
}
