//
/*   CUDA kernel code for Matrix * Vector operation program
*    Copyright (C) 2009 Goffredo Marocchi
*
*    This program is free software; you can redistribute it and/or modify
*    it under the terms of the GNU General Public License as published by
*    the Free Software Foundation; either version 2 of the License, or
*    (at your option) any later version.
*
*    This program is distributed in the hope that it will be useful,
*    but WITHOUT ANY WARRANTY; without even the implied warranty of
*    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
*    GNU General Public License for more details.
*
*    You should have received a copy of the GNU General Public License along
*    with this program; if not, write to the Free Software Foundation, Inc.,
*    51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
*/


//#include <cutil_inline.h>
//#include <cutil_math.h>
//#include <vector_types.h>

//#include "IGGS_MatTest_defines.h"
//#include <nvMatrix.h>

using namespace std;

/**\brief Fast Integer Multiplication Macro
*
*On G80-class hardware 24-bit multiplication takes 4 clocks per warp
*(the same as for floating point  multiplication and\n addition),
*whereas full 32-bit multiplication takes 16 clocks per warp.
*So if integer multiplication operands are  guaranteed to\n fit into 24 bits
*always lie withtin [-8M, 8M - 1] range in signed case),
*explicit 24-bit multiplication is preferred for performance.
*
*/
#define IMUL(a, b) __mul24(a, b)

#if SHARED_MEM == 0

__global__ void MatTest(
		float *d_C,
		float *d_A,
		float *d_B
){

	int bx = blockIdx.x ;
	int tx = threadIdx.x; // Thread index

	int block_id = bx* blockDim.x;
	int i = tx + block_id ;

	d_C[i] = 0.0f;

	for (int c = 0; c < COLS; c++) {

		d_C[i] += d_A[indexC(i,c,ROWS)] * d_B[c];

		//this kernel uses  bytes of Shared Memory and  bit registers...
	}

}

//elif si the Pre-Processor command for "else if"
#elif SHARED_MEM == 1

/** \brief CUDA kernel code.\n
 * The purpose of this function is to execute the M*V operation using custom written CUDA code.
*
*	\param d_A reference to the source matrix (device memory),
*			float*.
*	\param d_B reference to the source vector (device memory),
*			float*.
*	\param d_C reference to the result vector (device memory),
*			float*.
*	\return void
*/
__global__ void MatTest(
		float *d_C,
		float *d_A,
		float *d_B
){

	int bx = blockIdx.x ;
	int tx = threadIdx.x; // Thread index

	int block_id = bx* blockDim.x;
	int i = tx + block_id ;

	//printf ("tx: %d\n", tx); printf ("bx: %d\n", bx); printf ("i: %d\n", i); //DEBUG line to be active in DEVICE EMU mode

	__shared__ float mA[TBLOCK];

	float temp = tex1Dfetch(texVecB, 0);
	float tempM = 0.0f;
	int idx = 0;

	tempM = MATC(d_A, i, 0, ROWS);
	mA[tx] = 0.0f;
	//__syncthreads(); //safe to comment out for the same reasons as below...

	#pragma unroll
	for (idx = 1; idx < COLS; idx++) {

		tempM *= temp;
		temp = tex1Dfetch(texVecB, idx);
		mA[tx] += tempM;
		tempM = MATC(d_A, i, idx, ROWS);

		//__syncthreads();   //safe to do here, syncing threads in the Thread block with this barrier is really pointless
		//								  here because each thread only has to wait that its own work from the previous loop
		//								  iteration is finished and not on the work of other threads...
		//								  ... all threads proceed in lockstep as before and
		//                               we gain about 1 ms in reduced execution time.
	}

	tempM *= temp;
	mA[tx] += tempM;

	d_C[i] = mA[tx];
	//d_C[i] = 100.0f;
}

// (C/C++ version of the algorithm used above)
//	for (int j = 0; j < C; j++) {
//		temp = b[j];
//		for (int i = 0; i < R; i++) {
//			if ( j == 0) c[i] = 0;
//			c[i] += MATC(a,i,j,R) * temp;

#endif
