#pragma comment ( lib, "cuda.lib" )
#pragma comment ( lib, "cudart.lib" )

#include "matrix.h"
#include <cstdio>

#define TILE_WIDTH 16

//----------------------------------------------------------------------------
//
// multiply two matrices: A * B = [x * y] * [y * z] = [x * z]
// viec nhan nguoc dam bao loai di hoac them 1 tai matran A de phep nhan hop le
// 
//----------------------------------------------------------------------------
__global__ void MatrixMulMatrix( float* dIn1, float* dIn2, float* dOut, int numRows1, int numCols1, int numRows2, int numCols2 ) {

	__shared__ float dsIn1 [TILE_WIDTH][TILE_WIDTH];
	__shared__ float dsIn2 [TILE_WIDTH][TILE_WIDTH];

	int row = blockIdx.y*TILE_WIDTH + threadIdx.y;
	int col = blockIdx.x*TILE_WIDTH + threadIdx.x;

	int tx = threadIdx.x;
	int ty = threadIdx.y;

	int loop = ( numRows2 + TILE_WIDTH - 1 )/TILE_WIDTH;
	float s = 0;
	
	for( int p = 1; p <= loop; ++p ) {

		int tem1 = numCols1 - p*TILE_WIDTH + tx;
		if( row < numRows1 && tem1 >= 0  ) {

			dsIn1[ty][tx] = dIn1[ row*numCols1 + tem1 ];//r: row; c: tem;
		}
		else { dsIn1[ty][tx] = 1; }

		int tem2 = numRows2 - p*TILE_WIDTH + ty;
		if( tem2 >= 0 && col < numCols2 ) {

			dsIn2[ty][tx] = dIn2[ tem2*numCols2 + col ];//r: tem2; c: col;
		}
		else { dsIn2[ty][tx] = 0; }
		
		//dam bao cac threasd load xong du lieu
		__syncthreads();

		//tinh nhan ma tran
		for( int k = 0; k < TILE_WIDTH; ++k ) {

			s += dsIn1[ty][k] * dsIn2[k][tx];
		}

		//dam bao cac thread luu xong du lieu
		__syncthreads();
	}

	if( row < numRows1 && col < numCols2 ) {

		dOut[ row*numCols2 + col ] = s;
	}
}

extern "C"
void resultMatrixMulMatrix( float* dIn1, float* dIn2, float* dOut, int numRows1, int numCols1, int numRows2, int numCols2 ) {

	dim3 blockD ( TILE_WIDTH, TILE_WIDTH );
	dim3 gridD ( ( numCols2 + TILE_WIDTH - 1 )/TILE_WIDTH, ( numRows1 + TILE_WIDTH - 1 )/TILE_WIDTH );

	MatrixMulMatrix <<< gridD, blockD >>>( dIn1, dIn2, dOut, numRows1, numCols1, numRows2, numCols2 );
}
//----------------------------------------------------------------------------
//
// multiply two matrices: A * B' = [x * y] * [z * y]' = [x * z];
// viec nhan nguoc dam bao loai di hoac them 1 tai matran A de phep nhan hop le
//
//----------------------------------------------------------------------------
__global__ void MatrixMulMatrixT( float* dIn1, float* dIn2, float* dOut, int numRows1, int numCols1, int numCols2, int numRows2 ) {

	__shared__ float dsIn1 [TILE_WIDTH][TILE_WIDTH];
	__shared__ float dsIn2 [TILE_WIDTH][TILE_WIDTH];

	int row1 = blockIdx.y*TILE_WIDTH + threadIdx.y;
	int row2 = blockIdx.x*TILE_WIDTH + threadIdx.y;
	int col2 = blockIdx.x*TILE_WIDTH + threadIdx.x;

	int tx = threadIdx.x;
	int ty = threadIdx.y;

	int loop = ( numCols2 + TILE_WIDTH - 1 )/TILE_WIDTH;
	float s = 0;
	
	for( int p = 1; p <= loop; ++p ) {

		int tem1 = numCols1 - p*TILE_WIDTH + tx;
		if( row1 < numRows1 && tem1 >= 0 ) {

			dsIn1[ty][tx] = dIn1[ row1*numCols1 + tem1 ];//r: row; c: tem1;
		}
		else { dsIn1[ty][tx] = 1; }

		int tem2 = numCols2 - p*TILE_WIDTH + tx;
		if( tem2 >= 0 && row2 < numRows2 ) {

			dsIn2[ty][tx] = dIn2[ row2*numCols2 + tem2 ];//r: p*TILE_WIDTH + ty; c: tem2;
		}
		else { dsIn2[ty][tx] = 0; }
		
		//dam bao cac threasd load xong du lieu
		__syncthreads();

		//tinh nhan ma tran
		for( int k = 0; k < TILE_WIDTH; ++k ) {

			s += dsIn1[ty][k] * dsIn2[tx][k];
		}

		//dam bao cac thread luu xong du lieu
		__syncthreads();
	}
	
	if( row1 < numRows1 && col2 < numRows2 ) {

		dOut[ row1*numRows2 + col2 ] = s;
	}
}

extern "C"
void resultMatrixMulMatrixT( float* dIn1, float* dIn2, float* dOut, int numRows1, int numCols1, int numRows2, int numCols2 ) {

	dim3 blockD ( TILE_WIDTH, TILE_WIDTH );
	dim3 gridD ( ( numRows2 + TILE_WIDTH - 1 )/TILE_WIDTH, ( numRows1 + TILE_WIDTH - 1 )/TILE_WIDTH );

	MatrixMulMatrixT <<< gridD, blockD >>>( dIn1, dIn2, dOut, numRows1, numCols1, numCols2, numRows2 );
}
//----------------------------------------------------------------------------
//
// multiply two matrices: A' * B = [y * x]' * [y * z] = [x * z]
// viec nhan nguoc dam bao loai di hoac them 1 tai matran A de phep nhan hop le
//
//----------------------------------------------------------------------------
__global__ void MatrixTMulMatrix( float* dIn1, float* dIn2, float* dOut, int numCols1, int numRows1, int numRows2, int numCols2 ) {

  	__shared__ float dsIn1 [TILE_WIDTH][TILE_WIDTH];
	__shared__ float dsIn2 [TILE_WIDTH][TILE_WIDTH];

	int row1 = blockIdx.y*TILE_WIDTH + threadIdx.y;
	int col1 = blockIdx.y*TILE_WIDTH + threadIdx.x;
	int col2 = blockIdx.x*TILE_WIDTH + threadIdx.x;

	int tx = threadIdx.x;
	int ty = threadIdx.y;

	int loop = ( numRows2 + TILE_WIDTH - 1 )/TILE_WIDTH;
	float s = 0;
	
	for( int p = 1; p <= loop; ++p ) {

		int tem1 = numRows1 - p*TILE_WIDTH + ty;
		if( col1 < numCols1 && tem1 >= 0 ) {

			dsIn1[ty][tx] = dIn1[ tem1*numCols1 + col1 ];//r: tem1; c: col1;
		}
		else { dsIn1[ty][tx] = 1; }

		int tem2 = numRows2 - p*TILE_WIDTH + ty;
		if( tem2 >= 0 && col2 < numCols2 ) {

			dsIn2[ty][tx] = dIn2[ tem2*numCols2 + col2 ];//r: tem2 + ty; c: col2;
		}
		else { dsIn2[ty][tx] = 0; }
		
		//dam bao cac threasd load xong du lieu
		__syncthreads();

		//tinh nhan ma tran
		for( int k = 0; k < TILE_WIDTH; ++k ) {

			s += dsIn1[k][ty] * dsIn2[k][tx];
		}

		//dam bao cac thread luu xong du lieu
		__syncthreads();
	}

	if( row1 < numCols1 && col2 < numCols2 ) {

		dOut[ row1*numCols2 + col2 ] = s;
	}
}

extern "C"
void resultMatrixTMulMatrix( float* dIn1, float* dIn2, float* dOut, int numRows1, int numCols1, int numRows2, int numCols2 ) {

	dim3 blockD ( TILE_WIDTH, TILE_WIDTH );
	dim3 gridD ( ( numCols2 + TILE_WIDTH - 1 )/TILE_WIDTH, ( numCols1 + TILE_WIDTH - 1 )/TILE_WIDTH );

	MatrixTMulMatrix <<< gridD, blockD >>>( dIn1, dIn2, dOut, numCols1, numRows1, numRows2, numCols2 );
}

//----------------------------------------------------------------------------
//
// empty kernel
// dung khoi dong card gpu
//
//----------------------------------------------------------------------------
__global__ void empty(  ){  }

//----------------------------------------------------------------------------
//
// addColum kernel
// them mot cot chua gia tri f vao ben trai ma tran
//
//----------------------------------------------------------------------------
__global__ void addColum( int nrows, int ncols, float* arr1, float* arr2, int f ){

	int id = blockIdx.x*blockDim.x + threadIdx.x;
	int numElements = nrows*ncols;

	int row = id/ncols;
	int col = id%ncols;

	if( id < numElements ) {
	
		if( id%ncols != 0 ) {

			arr2[id] = arr1[( row + 1 )*( col - 1 )];
		}
		else { arr2[id] = f; }
	}
} 

extern "C"
void addNewColum( int nrows, int ncols, float* arr1, float* arr2, int f ) {

	addColum<<< ( nrows*ncols + MATRIX::CTA_SIZE - 1 )/MATRIX::CTA_SIZE, MATRIX::CTA_SIZE >>>( nrows, ncols, arr1, arr2, f );
}

//----------------------------------------------------------------------------
//
// subColum kernel
// xoa mot cot ben trai ma tran
//
//----------------------------------------------------------------------------
__global__ void sudColum( int nrows, int ncols, float* arr1, float* arr2 ){

	int id = blockIdx.x*blockDim.x + threadIdx.x;
	int numElements = nrows*ncols;

	int row = id/ncols;
	int col = id%ncols;

	if( id < numElements ) {
	
		if( id%ncols != 0 ) {

			arr2[( row + 1 )*( col - 1 )] = arr1[id];
		}
	}
} 

extern "C"
void SubColum( int nrows, int ncols, float* arr1, float* arr2 ) {

	sudColum<<< ( nrows*ncols + MATRIX::CTA_SIZE - 1 )/MATRIX::CTA_SIZE, MATRIX::CTA_SIZE >>>( nrows, ncols, arr1, arr2 );
}

//----------------------------------------------------------------------------
//
// sigmoid kernel
// tinh sigmoid cua mot ma tran, ta dung mang float4 de du lieu doc duoc la toi
// da.
//
//----------------------------------------------------------------------------
__global__ void sigmoid( int numElements, float4* arr ){

	int id = blockIdx.x*blockDim.x + threadIdx.x;
	int ld = (id+1) << 2;

	if( ld <= numElements ) {
	
		float4 t = arr[id];

		t.x = 1.0/( 1.0 + exp( -t.x ) );
		t.y = 1.0/( 1.0 + exp( -t.y ) );
		t.z = 1.0/( 1.0 + exp( -t.z ) );
		t.w = 1.0/( 1.0 + exp( -t.w ) );

		arr[id] = t;
	}
	else if( ld - numElements < 4 ) {
	
		int d = ld - numElements;
		
		if( d >= 1 ) { arr[id].x = 1.0/( 1.0 + exp( -arr[id].x ) ); }
		if( d >= 2 ) { arr[id].y = 1.0/( 1.0 + exp( -arr[id].y ) ); }
		if( d >= 3 ) { arr[id].z = 1.0/( 1.0 + exp( -arr[id].z ) ); }
		if( d >= 4 ) { arr[id].w = 1.0/( 1.0 + exp( -arr[id].w ) ); }
	}
} 

extern "C"
void Sigmoid( int nsize, float* arr ) {

	sigmoid<<< ( nsize + MATRIX::CTA_SIZE - 1 )/MATRIX::CTA_SIZE, MATRIX::CTA_SIZE >>>( nsize, (float4*)arr );
}

//----------------------------------------------------------------------------
//
// logarit kernel
// tinh ln cua mot ma tran, ta dung mang float4 de du lieu doc duoc la toi da.
//
//----------------------------------------------------------------------------
__global__ void logarit( int numElements, float4* arr ) {

	int id = blockIdx.x*blockDim.x + threadIdx.x;
	int ld = (id+1) << 2;

	if( ld <= numElements ) {
	
		float4 t = arr[id];

		t.x = log( t.x );
		t.y = log( t.y );
		t.z = log( t.z );
		t.w = log( t.w );

		arr[id] = t;
	}
	else if( ld - numElements < 4 ) {
	
		int d = ld - numElements;

		if( d >= 1 ) { arr[id].x = log( arr[id].x ); }
		if( d >= 2 ) { arr[id].y = log( arr[id].y ); }
		if( d >= 3 ) { arr[id].z = log( arr[id].z ); }
		if( d >= 4 ) { arr[id].w = log( arr[id].w ); }
	}
}

extern "C"
void Logarit( int nsize, float* arr ) {

	logarit<<< ( nsize  )/256+1, 256 >>>( nsize, (float4*)arr );
}

//----------------------------------------------------------------------------
//
// add kernel
// tinh tong hai ma tran, ta dung mang float4 de du lieu doc duoc la toi da.
//
//----------------------------------------------------------------------------
__global__ void add( int numElements, float4* arr1, float4* arr2 ){

	int id = blockIdx.x*blockDim.x + threadIdx.x;
	int ld = (id+1) << 2;

	if( ld <= numElements ) {
	
		float4 t1 = arr1[id];
		float4 t2 = arr2[id];
	
		t1.x += t2.x;
		t1.y += t2.y;
		t1.z += t2.z;
		t1.w += t2.w;

		arr1[id] = t1;
	}
	else if( ld - numElements < 4 ) {
	
		int d = ld - numElements;
		
		if( d >= 1 ) { arr1[id].x += arr2[id].x; }
		if( d >= 2 ) { arr1[id].y += arr2[id].y; }
		if( d >= 3 ) { arr1[id].z += arr2[id].z; }
		if( d >= 4 ) { arr1[id].w += arr2[id].w; }
	}
}

extern "C"
void AddMatrix( int nsize, float* arr1, float* arr2 ) {

	add<<< ( nsize + MATRIX::CTA_SIZE - 1 )/MATRIX::CTA_SIZE, MATRIX::CTA_SIZE >>>( nsize, (float4*)arr1, (float4*)arr2 );
}

//----------------------------------------------------------------------------
//
// sub kernel
// tinh tong hai ma tran, ta dung mang float4 de du lieu doc duoc la toi da.
//
//----------------------------------------------------------------------------
__global__ void sub( int numElements, float4* arr1, float4* arr2 ){

	int id = blockIdx.x*blockDim.x + threadIdx.x;
	int ld = (id+1) << 2;

	if( ld <= numElements ) {
	
		float4 t1 = arr1[id];
		float4 t2 = arr2[id];
	
		t1.x -= t2.x;
		t1.y -= t2.y;
		t1.z -= t2.z;
		t1.w -= t2.w;

		arr1[id] = t1;
	}
	else if( ld - numElements < 4 ) {
	
		int d = ld - numElements;
		
		if( d >= 1 ) { arr1[id].x -= arr2[id].x; }
		if( d >= 2 ) { arr1[id].y -= arr2[id].y; }
		if( d >= 3 ) { arr1[id].z -= arr2[id].z; }
		if( d >= 4 ) { arr1[id].w -= arr2[id].w; }
	}
}

extern "C"
void SubMatrix( int nsize, float* arr1, float* arr2 ) {

	sub<<< ( nsize + MATRIX::CTA_SIZE - 1 )/MATRIX::CTA_SIZE, MATRIX::CTA_SIZE >>>( nsize, (float4*)arr1, (float4*)arr2 );
}

//----------------------------------------------------------------------------
//
// mul kernel
// tinh tich ma tran voi mot so, ta dung mang float4 de du lieu doc duoc la toi
// da.
//
//----------------------------------------------------------------------------
__global__ void mul( int numElements, float4* arr, float lamda ){

	int id = blockIdx.x*blockDim.x + threadIdx.x;
	int ld = (id+1) << 2;

	if( ld <= numElements ) {

		float4 t = arr[id];
	
		t.x *= lamda;
		t.y *= lamda;
		t.z *= lamda;
		t.w *= lamda;

		arr[id] = t;
	}
	else if( ld - numElements < 4 ) {
	
		int d = ld - numElements;
		
		if( d >= 1 ) { arr[id].x *= lamda; }
		if( d >= 2 ) { arr[id].y *= lamda; }
		if( d >= 3 ) { arr[id].z *= lamda; }
		if( d >= 4 ) { arr[id].w *= lamda; }
	}
}

extern "C"
void MulNum( float lamda, int nsize, float* arr ) {

	mul<<< ( nsize + MATRIX::CTA_SIZE - 1 )/MATRIX::CTA_SIZE, MATRIX::CTA_SIZE >>>( nsize, (float4*)arr, lamda );
}

//----------------------------------------------------------------------------
//
// muldirect kernel
// lay tung phan tu cua hai ma tran nhan truc tiep, ta dung mang float4 de du
// lieu doc duoc la toi da.
//
//----------------------------------------------------------------------------
__global__ void mulDirect( int numElements, float4* arr1, float4* arr2 ) {

	int id = blockIdx.x*blockDim.x + threadIdx.x;
	int ld = (id+1) << 2;

	if( ld <= numElements ) {
	
		float4 t1 = arr1[id];
		float4 t2 = arr2[id];
	
		t1.x *= t2.x;
		t1.y *= t2.y;
		t1.z *= t2.z;
		t1.w *= t2.w;

		arr1[id] = t1;
	}
	else if( ld - numElements < 4 ) {
	
		int d = ld - numElements;
		
		if( d >= 1 ) { arr1[id].x *= arr2[id].x; }
		if( d >= 2 ) { arr1[id].y *= arr2[id].y; }
		if( d >= 3 ) { arr1[id].z *= arr2[id].z; }
		if( d >= 4 ) { arr1[id].w *= arr2[id].w; }
	}
}

extern "C"
void MulDirect( int nsize, float* arr1, float* arr2 ) {

	mulDirect<<< ( nsize + MATRIX::CTA_SIZE - 1 )/MATRIX::CTA_SIZE, MATRIX::CTA_SIZE >>>( nsize, (float4*)arr1, (float4*)arr2 );
}

//----------------------------------------------------------------------------
//
// sigmoidgra kernel
// tinh dao ham sigmoid cho tung phan tu, ta dung mang float4 de du lieu doc 
// duoc la toi da.
//
//----------------------------------------------------------------------------
__global__ void resulSigmoidGra( int numElements, float4* arr1, float4* arr2 ) {

	int id = blockIdx.x*blockDim.x + threadIdx.x;
	int ld = (id+1) << 2;

	if( ld <= numElements ) {

		float4 t = arr1[id];
	
		t.x *= ( 1 - t.x );
		t.y *= ( 1 - t.y );
		t.z *= ( 1 - t.z );
		t.w *= ( 1 - t.w );

		arr2[id] = t;
	}
	else if( ld - numElements < 4 ) {
	
		int d = ld - numElements;
		
		if( d >= 1 ) { arr2[id].x = arr1[id].x*( 1 - arr1[id].x ); }
		if( d >= 2 ) { arr2[id].y = arr1[id].y*( 1 - arr1[id].y ); }
		if( d >= 3 ) { arr2[id].z = arr1[id].z*( 1 - arr1[id].z ); }
		if( d >= 4 ) { arr2[id].w = arr1[id].w*( 1 - arr1[id].w ); }
	}
}

extern "C"
void ResulSigmoidGra( int nsize, float* arr1, float* arr2 ) {

	resulSigmoidGra<<< ( nsize + MATRIX::CTA_SIZE - 1 )/MATRIX::CTA_SIZE, MATRIX::CTA_SIZE >>>( nsize, (float4*)arr1, (float4*)arr2 );
}

//----------------------------------------------------------------------------
//
// exp kernel
// tinh e^x cho tung phan tu, ta dung mang float4 de du lieu doc 
// duoc la toi da.
//
//----------------------------------------------------------------------------
__global__ void resulExp( int numElements, float4* arr ) {

	int id = blockIdx.x*blockDim.x + threadIdx.x;
	int ld = (id+1) << 2;

	if( ld <= numElements ) {

		float4 t = arr[id];
	
		t.x = exp( t.x );
		t.y = exp( t.y );
		t.z = exp( t.z );
		t.w = exp( t.w );

		arr[id] = t;
	}
	else if( ld - numElements < 4 ) {
	
		int d = ld - numElements;
		
		if( d >= 1 ) { arr[id].x = exp( arr[id].x ); }
		if( d >= 2 ) { arr[id].y = exp( arr[id].y ); }
		if( d >= 3 ) { arr[id].z = exp( arr[id].z ); }
		if( d >= 4 ) { arr[id].w = exp( arr[id].w ); }
	}
}

extern "C"
void ResulExp( int nsize, float* arr ) {

	resulExp<<< ( nsize + MATRIX::CTA_SIZE - 1 )/MATRIX::CTA_SIZE, MATRIX::CTA_SIZE >>>( nsize, (float4*)arr );
}

//----------------------------------------------------------------------------
//
// block_sum kernel
// tinh tong mot mang.
// http://code.google.com/p/stanford-cs193g-sp2010/source/browse/trunk/tutorials/sum_reduction.cu
//
//----------------------------------------------------------------------------
__global__ void block_sum( float *input, float *per_block_results, const size_t n) {

	__shared__ float sdata[MATRIX::CTA_SIZE];

	int i = blockIdx.x * blockDim.x + threadIdx.x;
	
	// load input into __shared__ memory
	float x = 0;
	
	if(i < n) { x = input[i]; }
	sdata[threadIdx.x] = x;
	
	__syncthreads();

	// contiguous range pattern
	for( int offset = blockDim.x / 2; offset > 0; offset >>= 1 ) {

		if( threadIdx.x < offset ) {
			
			// add a partial sum upstream to our own
			sdata[threadIdx.x] += sdata[threadIdx.x + offset];
		}
		
		// wait until all threads in the block have
		// updated their partial sums
		__syncthreads();
	}
	
	// thread 0 writes the final result
	if( threadIdx.x == 0 ) {

		per_block_results[blockIdx.x] = sdata[0];
	}
}

extern "C"
float SumArray( int nsize, float* arr ) {

	const size_t block_size = MATRIX::CTA_SIZE;
	int resul;
	
	// allocate space to hold one partial sum per block, plus one additional
	// slot to store the total sum
	float *d_partial_sums_and_total = 0;
	cudaMalloc( (void**)&d_partial_sums_and_total, sizeof(float)*( nsize/block_size + 1 ) );

	while( nsize > 1 ) {

		int num_blocks = ( nsize + block_size - 1 )/block_size;

		// launch one kernel to compute, per-block, a partial sum
		block_sum <<<num_blocks,block_size>>>( arr, d_partial_sums_and_total, nsize );

		nsize = num_blocks;
	}

	cudaMemcpy( &resul, d_partial_sums_and_total, sizeof(float), cudaMemcpyDeviceToHost );
	cudaFree( d_partial_sums_and_total );

	return resul;
}
//----------------------------------------------------------------------------
//
// copy du lieu cho MATRIX.
//
//----------------------------------------------------------------------------
extern "C"
void CopyData( int size, float* &des, float* arr, MemoryDevice mem ) {

	if( mem == RamMemory ) {

		cudaMemcpy( des, arr, size, cudaMemcpyHostToDevice );
	}
	else {

		cudaMemcpy( des, arr, size, cudaMemcpyDeviceToDevice );
	}
}

//----------------------------------------------------------------------------
//
// khoi tao du lieu cho MATRIX.
//
//----------------------------------------------------------------------------
extern "C"
void initialize( int nsize, float* &des, float* arr, MemoryDevice mem ) {

	int size = nsize*sizeof(float);
	cudaMalloc( ( void** )( &des ), size );

	if( arr != 0 ) {

		CopyData( size, des, arr, mem );
	}
}

//----------------------------------------------------------------------------
//
// giai phong vung nho global
//
//----------------------------------------------------------------------------
extern "C"
void finalize( float* mtr ) {

	if( mtr != 0 ) { cudaFree( mtr ); }
}

//----------------------------------------------------------------------------
//
// copy ra host memory
//
//----------------------------------------------------------------------------
extern "C"
void copToHost( int nsize, float* mtr, float* arr ) {

	cudaMemcpy( arr, mtr, nsize*sizeof( float ), cudaMemcpyDeviceToHost );
}