/****************************************************************************
*	MatrixMul:
*				0:   c0 = a0 * b0
*				1:   c1 = c0 * b1
*				2:   c2 = c1 * b2
*				3:   c3 = c2 * b1
*				4:   c4 = c3 * b2
*				5:   c5 = c4 * b1
*
****************************************************************************/


#include<stdio.h>
#include<cuda_runtime.h>
#include<cutil.h>
#include<cutil_inline.h>
#include<stdlib.h>
#include<string.h>
#include<time.h>
#define TILE_WIDTH 16
#define N 63
#define T 6
#define BLOCKNUM ( N*N )
const int width = TILE_WIDTH * N;
unsigned int timer = 0;
__device__ int2 map[BLOCKNUM*T];
#define WARP 32
#define SYNC 
#define TYPE char
__device__ void matrixMul ( float* a, float* b, float* c, int bid, float* share ) {
    /***************************************************************************
     *    Task : Compute c=a*b
     ***************************************************************************/
    int bx = bid / ( width / TILE_WIDTH ), by = bid - bx * ( width / TILE_WIDTH );
    int tx = threadIdx.x, ty = threadIdx.y;
    float* sa = share, *sb = share + TILE_WIDTH * TILE_WIDTH;
    int row = by * TILE_WIDTH + ty, col = bx * TILE_WIDTH + tx;
    float val = 0.0;
    for ( int x = 0;x < width / TILE_WIDTH;++x ) {
        sa[ty*TILE_WIDTH+tx] = a[row*width+ ( x*TILE_WIDTH+tx ) ];
        sb[ty*TILE_WIDTH+tx] = b[ ( x*TILE_WIDTH+ty ) *width+col];
        __syncthreads();
        for ( int y = 0;y < TILE_WIDTH;++y ) val += sa[ty*TILE_WIDTH+y] * sb[y*TILE_WIDTH+tx];
        __syncthreads();
    }
    c[row*width+col] = val;
    return;
}
__global__ void singleMatrixMulKernel ( float* a, float* b, float* c, int width ) {
    /***************************************************************************
     *    Task : Compute c=a*b
     ***************************************************************************/
    int bid = blockIdx.x;
    int bx = bid / ( width / TILE_WIDTH ), by = bid - bx * ( width / TILE_WIDTH );
    int tx = threadIdx.x, ty = threadIdx.y;
    int row = by * TILE_WIDTH + ty, col = bx * TILE_WIDTH + tx;
    float val = 0.0;
    __shared__ float sa[TILE_WIDTH][TILE_WIDTH], sb[TILE_WIDTH][TILE_WIDTH];
    for ( int x = 0;x < width / TILE_WIDTH;++x ) {
        sa[ty][tx] = a[row*width+ ( x*TILE_WIDTH+tx ) ];
        sb[ty][tx] = b[ ( x*TILE_WIDTH+ty ) *width+col];
        __syncthreads();
        for ( int y = 0;y < TILE_WIDTH;++y ) val += sa[ty][y] * sb[y][tx];
        __syncthreads();
    }
    c[row*width+col] = val;
    return;
}
__global__ void multiMatrixMulKernel ( float* a0, float* b0, float* c0, volatile TYPE* out0,
                                       float* a1, float* b1, float* c1, volatile TYPE* out1,
                                       float* a2, float* b2, float* c2, volatile TYPE* out2,
                                       float* a3, float* b3, float* c3, volatile TYPE* out3,
                                       float* a4, float* b4, float* c4, volatile TYPE* out4,
                                       float* a5, float* b5, float* c5, volatile TYPE* out5,
                                       int width, volatile TYPE* tag ) {
    __shared__ float share[TILE_WIDTH*TILE_WIDTH*2];
    int block_id = blockIdx.x + gridDim.x * blockIdx.y;
    int bid = map[ block_id ].x, taskid = map[ block_id ].y;
    int tid = threadIdx.x + threadIdx.y * blockDim.x;
    int ntid = blockDim.x * blockDim.y;
    if ( taskid == 0 ) {
        /***************************************************************************
         *    Task 0: Compute c0=a0*b0
         ***************************************************************************/
        matrixMul ( a0, b0, c0, bid, share );
        if ( tid == 0 ) out0[bid] = 0;
        if ( bid == BLOCKNUM - 1 ) {
			if ( tid < WARP )
            for ( int i = tid; i < BLOCKNUM; i += WARP ) while ( out0[i] != 0 );
            SYNC;
            if ( tid == 0 ) tag[0] = 1;
        }
        return;
    }
    if ( taskid == 1 ) {
        /***************************************************************************
         *    Task 1: Compute c1 = c0 * b1
         ***************************************************************************/
        if ( tid == 0 ) while ( tag[0] == 0 );
        __syncthreads();
        matrixMul ( c0, b1, c1, bid, share );
        if ( tid == 0 ) out1[bid] = 0;
        if ( bid == BLOCKNUM - 1 ) {
            if ( tid < WARP )
            for ( int i = tid; i < BLOCKNUM; i += WARP ) while ( out1[i] != 0 );
            SYNC;
            if ( tid == 0 ) tag[1] = 1;
        }
        return;
    }
    if ( taskid == 2 ) {
        /***************************************************************************
         *    Task 2: Compute c2 = a2 * b2
         ***************************************************************************/
        if ( tid == 0 ) while ( tag[1] == 0 );
        __syncthreads();
        matrixMul ( c1, b2, c2, bid, share );
        if ( tid == 0 ) out2[bid] = 0;
        if ( bid == BLOCKNUM - 1 ) {
            if ( tid < WARP )
            for ( int i = tid; i < BLOCKNUM; i += WARP ) while ( out2[i] != 0 );
            SYNC;
            if ( tid == 0 ) tag[2] = 1;
        }
        return;
    }
    if ( taskid == 3 ) {
        /***************************************************************************
         *    Task 3: Compute c3 = a3 * b3
         ***************************************************************************/
        if ( tid == 0 ) while ( tag[2] == 0 );
        __syncthreads();
        matrixMul ( c2, b3, c3, bid, share );
        if ( tid == 0 ) out3[bid] = 0;
        if ( bid == BLOCKNUM - 1 ) {
            if ( tid < WARP )
            for ( int i = tid; i < BLOCKNUM; i += WARP ) while ( out3[i] != 0 );
            SYNC;
            if ( tid == 0 ) tag[3] = 1;
        }
        return;
    }
    if ( taskid == 4 ) {
        /***************************************************************************
         *    Task 4: Compute c4 = a4 * b4
         ***************************************************************************/
        if ( tid == 0 ) while ( tag[3] == 0 );
        __syncthreads();
        matrixMul ( c3, b4, c4, bid, share );
        if ( tid == 0 ) out4[bid] = 0;
        if ( bid == BLOCKNUM - 1 ) {
            if ( tid < WARP )
            for ( int i = tid; i < BLOCKNUM; i += WARP ) while ( out4[i] != 0 );
            SYNC;
            if ( tid == 0 ) tag[4] = 1;
        }
        return;
    }
    if ( taskid == 5 ) {
        /***************************************************************************
         *    Task 5: Compute c5 = a5 * b5
         ***************************************************************************/
        if ( tid == 0 ) while ( tag[4] == 0 );
        __syncthreads();
        matrixMul ( c4, b5, c5, bid, share );
        if ( tid == 0 ) out5[bid] = 0;
        if ( bid == BLOCKNUM - 1 ) {
            if ( tid < WARP )
            for ( int i = tid; i < BLOCKNUM; i += WARP ) while ( out5[i] != 0 );
            SYNC;
            if ( tid == 0 ) tag[5] = 1;
        }
        return;
    }
}

void initMatrix ( float*a, int width ) {
    srand ( rand() *time ( NULL ) );
    for ( int i = 0;i < width*width;++i )  a[i] = (rand() % 10000) / 10000.0;
    return;
}

void checkResult ( float* a, float* b, float*c, int width ) {
    for ( int i = 0;i < width;++i, ++i ) {
        for ( int j = 0;j < width;++j, ++j ) {
            float val = 0.0;
            for ( int k = 0;k < width;++k )
                val += a[i*width+k] * b[k*width+j];
            if ( fabs ( val - c[i*width+j] ) > 1e-6 && fabs ( c[i*width+j] / val - 1.0 ) > 1e-6 ) {
                printf ( "Wrong !!!: %.3f -> %.3f\n", val, c[i*width+j] );
                return;
            }
        }
    }
    printf ( "Right!   " );
    return;
}
void singleMatrixMul ( int width ) {
    printf ( "#####################  Test Begin : Single Task  #####################\n" );
    float *d_a[T], *d_b[T], *d_c[T], *c[T];
	float *a[T], *b[T];
	int mem_size = width * width * sizeof ( float );
    for(int i=0;i<T;++i){
		a[i] = ( float* ) malloc ( mem_size );
		b[i] = ( float* ) malloc ( mem_size );
		c[i] = ( float* ) malloc ( mem_size );
	    cudaMalloc ( ( void** ) &d_a[i], mem_size );
		cudaMalloc ( ( void** ) &d_b[i], mem_size );
		cudaMalloc ( ( void** ) &d_c[i], mem_size );
		initMatrix ( a[i], width ), initMatrix ( b[i], width );
		cudaMemcpy ( d_a[i], a[i], mem_size, cudaMemcpyHostToDevice );
		cudaMemcpy ( d_b[i], b[i], mem_size, cudaMemcpyHostToDevice );
		cudaMemset ( d_c[i], 0, mem_size );
	}
    int blockNumber = width / TILE_WIDTH * ( width / TILE_WIDTH );
    dim3 dimGrid ( blockNumber, 1 );
    dim3 dimBlock ( TILE_WIDTH, TILE_WIDTH );
    printf ( "Total Block Number:\t\t%9d\n", dimGrid.x*dimGrid.y );
    cutResetTimer ( timer );
    cutStartTimer ( timer );
    singleMatrixMulKernel <<< dimGrid, dimBlock>>> ( d_a[0], d_b[0], d_c[0], width );
    singleMatrixMulKernel <<< dimGrid, dimBlock>>> ( d_c[0], d_b[1], d_c[1], width );
    singleMatrixMulKernel <<< dimGrid, dimBlock>>> ( d_c[1], d_b[2], d_c[2], width );
    singleMatrixMulKernel <<< dimGrid, dimBlock>>> ( d_c[2], d_b[3], d_c[3], width );
    singleMatrixMulKernel <<< dimGrid, dimBlock>>> ( d_c[3], d_b[4], d_c[4], width );
    singleMatrixMulKernel <<< dimGrid, dimBlock>>> ( d_c[4], d_b[5], d_c[5], width );
    cutilCheckMsg ( "Fail in kernel execution...\n" );
    cudaThreadSynchronize();
    cutStopTimer ( timer );
    printf ( "Total GPU time:\t\t\t\t%9.3f ms\n\n", cutGetTimerValue ( timer ) );
	for(int i=0;i<T;++i)
		cudaMemcpy ( c[i], d_c[i], width*width*sizeof ( float ), cudaMemcpyDeviceToHost );
    if ( N <= 64 ){
		checkResult ( a[0], b[0], c[0], width );
		checkResult ( c[0], b[1], c[1], width );
		checkResult ( c[1], b[2], c[2], width );
		checkResult ( c[2], b[3], c[3], width );
		checkResult ( c[3], b[4], c[4], width );
		checkResult ( c[4], b[5], c[5], width );
		printf("\n");
	}
	for(int i=0;i<T;++i){
		cudaFree ( d_a[i] ), cudaFree ( d_b[i] ), cudaFree ( d_c[i] );
		free ( a[i] ), free ( b[i] ), free ( c[i] );
	}
    printf ( "#####################       Test Finished        #####################\n\n\n" );
    return;
}

void swap ( int& a, int & b ) {
    a ^= b, b ^= a, a ^= b;
}
void swap ( unsigned short& a, unsigned short & b ) {
    a ^= b, b ^= a, a ^= b;
}
void multiMatrixMul ( int width ) {
    printf ( "##################### Test Begin : Multiple Task #####################\n" );
    float *d_a[T], *d_b[T], *d_c[T];
	float *c[T], *a[T], *b[T];
    int2 *h_map;
	int *status;
	TYPE *tag, *d_out[T];
    float globalMemory = 0.0, pinnedMemory = 0.0;
    int blockNumber[T], totalBlockNumber = 0;
    int mem_size = width * width * sizeof ( float );
    cudaMalloc ( ( void** ) &tag, T*sizeof ( TYPE ) );
    globalMemory += T * sizeof ( TYPE );
    cudaMemset ( tag, 0, ( T + 1 ) *sizeof ( TYPE ) );
    cudaMallocHost ( ( void** ) &status, T*sizeof ( int ) );
    pinnedMemory += T * sizeof ( int );
    memset ( status, 0, T*sizeof ( int ) );
    for ( int i = 0;i < T;++i ) {
        a[i] = ( float* ) malloc ( mem_size );
        b[i] = ( float* ) malloc ( mem_size );
        cudaMallocHost ( ( void** ) &c[i], mem_size );
        cudaMalloc ( ( void** ) &d_a[i], mem_size );
        cudaMalloc ( ( void** ) &d_b[i], mem_size );
        cudaMalloc ( ( void** ) &d_c[i], mem_size );
        globalMemory += mem_size * 3.0;
        pinnedMemory += mem_size;
        initMatrix ( a[i], width ), initMatrix ( b[i], width );
        cudaMemcpy ( d_a[i], a[i], mem_size, cudaMemcpyHostToDevice );
        cudaMemcpy ( d_b[i], b[i], mem_size, cudaMemcpyHostToDevice );
        cudaMemset ( d_c[i], 0, mem_size );
        blockNumber[i] = width / TILE_WIDTH * ( width / TILE_WIDTH );
        totalBlockNumber += blockNumber[i];
        cudaMalloc ( ( void** ) &d_out[i], blockNumber[i]*sizeof ( int ) );
        globalMemory += blockNumber[i] * sizeof ( int );
        cudaMemset ( d_out[i], -1, blockNumber[i]*sizeof ( int ) );
    }
    printf ( "Matrix Size = %d*%d\n", N*TILE_WIDTH, N*TILE_WIDTH );
    //printf ( "\nComputation Details:\n" );
    //for ( int i = 0;i < T;++i )
    //    printf ( "\tBlock Number of Task%d:\t\t%9d\n", i, blockNumber[i] );
    printf ( "Total Block Number:\t\t%9d\n", totalBlockNumber );
    //printf ( "\nMemory Used Details:\n" );
    //printf ( "\tTotal Global Memory Used:\t%9.3f MB\n",  globalMemory / ( 1 << 20 ) );
    //printf ( "\tTotal Pinned Memory Used:\t%9.3f MB\n",  pinnedMemory / ( 1 << 20 ) );
    h_map = ( int2* ) malloc ( totalBlockNumber * sizeof ( int2 ) );
    for ( int i = 0, k = 0;i < T;++i ) {
        for ( int j = 0;j < blockNumber[i];++j ) {
            h_map[k].y = i, h_map[k].x = j, ++k;
        }
    }
    cudaMemcpyToSymbol ( map, h_map, totalBlockNumber*sizeof ( int2 ) );
    dim3 dimGrid ( totalBlockNumber / T , T ), dimBlock ( TILE_WIDTH, TILE_WIDTH );
    printf ( "\nKernel is starting...\n" );
    cutResetTimer ( timer ), cutStartTimer ( timer );
    multiMatrixMulKernel <<< dimGrid, dimBlock>>> (
        d_a[0], d_b[0], d_c[0], d_out[0],
        d_a[1], d_b[1], d_c[1], d_out[1],
        d_a[2], d_b[2], d_c[2], d_out[2],
        d_a[3], d_b[3], d_c[3], d_out[3],
        d_a[4], d_b[4], d_c[4], d_out[4],
        d_a[5], d_b[5], d_c[5], d_out[5],
        width, tag );
    cudaThreadSynchronize();
    cutStopTimer ( timer );
    printf ( "Total GPU time:\t\t\t\t%9.3f ms\n\n", cutGetTimerValue ( timer ) );
	for(int i=0;i<T;++i)
		cudaMemcpy ( c[i], d_c[i], width*width*sizeof ( float ), cudaMemcpyDeviceToHost );
    if ( N <= 64 ){
		checkResult ( a[0], b[0], c[0], width );
		checkResult ( c[0], b[1], c[1], width );
		checkResult ( c[1], b[2], c[2], width );
		checkResult ( c[2], b[3], c[3], width );
		checkResult ( c[3], b[4], c[4], width );
		checkResult ( c[4], b[5], c[5], width );
		printf("\n");
	}
	for(int i=0;i<T;++i){
		cudaFree ( d_a[i] );
		cudaFree ( d_b[i] );
		cudaFree ( d_c[i] );
		cudaFree ( d_out[i] );
		free ( a[i] );
		free ( b[i] );
		cudaFreeHost ( c[i] );
	}
    cudaFreeHost ( status );
	cudaFree ( tag );
    free ( h_map );
    printf ( "#####################       Test Finished        #####################\n\n\n" );
    return;
}


void chooseDevice(int k){
	cudaDeviceProp devProp;
    cudaSetDevice ( k );	
	cudaGetDeviceProperties(&devProp,k);
    printf("Running on %s, CUDA Version %d\nGlobal Memory %.fGB, Shared Memory %.fKB, Warp size %d\n",  
					devProp.name,
					CUDART_VERSION, 
					devProp.totalGlobalMem / 1024.0 / 1024.0 + 0.5,
					devProp.sharedMemPerBlock / 1024.0 + 0.5,
					devProp.warpSize ); 
    return; 
}

int main() {
    chooseDevice ( 1 );
    cutCreateTimer ( &timer );
    singleMatrixMul ( width );
    multiMatrixMul ( width );
    return 0;
    
}