/*
 * Copyright 1993-2010 NVIDIA Corporation.  All rights reserved.
 *
 * NVIDIA Corporation and its licensors retain all intellectual property and 
 * proprietary rights in and to this software and related documentation. 
 * Any use, reproduction, disclosure, or distribution of this software 
 * and related documentation without an express license agreement from
 * NVIDIA Corporation is strictly prohibited.
 *
 * Please refer to the applicable NVIDIA end user license agreement (EULA) 
 * associated with this source code for terms and conditions that govern 
 * your use of this NVIDIA software.
 * 
 */

#ifndef _MATRIXMUL_KERNEL_H_
#define _MATRIXMUL_KERNEL_H_

#include <stdio.h>

////////////////////////////////////////////////////////////////////////////////
// This kernel performs element wise matrix multiplication
// One grid of blocks is instantiated, each block covers a 16x16 "tile" within
// the product matrix, C. Therefore, each block contains 256 threads. Each
// thread will be assigned to an element in the product matrix, C. Each threads
// location in the product matrix is determined by its blockIdx, blockDimensions, 
// and threadIdx. 
//
// Code for this function was borrowed from the CUDA C Programming Guide, p.23
////////////////////////////////////////////////////////////////////////////////

template <class T>
__global__ void
matrixMul_Kernel( T* A, T* B, T* C, int m, int n, int k ) 
{
	float cValue = 0.0;
	
	//get correct row and column from matrices A and B respectively
	//the multiply
	int row = blockIdx.y * blockDim.y + threadIdx.y;
	int col = blockIdx.x * blockDim.x + threadIdx.x;
	
	for(int i = 0; i < n; ++i)
		cValue += A[row * n + i] + B[i * k + col];
		
	C[row * k + col] = cValue;
}

#endif
