// gpu (device) based matrix/matrix gpu code
//-------------------------------------------------------------------------
// Included CUDA libraries
//-------------------------------------------------------------------------
#include <cutil.h>
#include "cutil_inline.h"

#define TILE_WIDTH 16 // block x and y dimensions

__global__ void MatrixMulKernel(float *Md, float *Nd, float *Pd, int Width) {
   // ==================================================================
   // Solution part 4
   // Determine the output index of each thread.
   // Compute the dot product of one row of Md and one column of Nd
   // for each thread.
   // Write the computed value to matrix P at the correct output index
   // ==================================================================

   // Calculate the global row and column indices of the Pd matrix
   __shared__ float Mds[TILE_WIDTH][TILE_WIDTH];
   __shared__ float Nds[TILE_WIDTH][TILE_WIDTH];

   float Pvalue = 0;

   int bx = blockIdx.x;
   int by = blockIdx.y;
   int tx = threadIdx.x;
   int ty = threadIdx.y;

   int row = by * TILE_WIDTH + ty;
   int col = bx * TILE_WIDTH + tx;

   //Loop over the Md and Nd tiles required to compute the Pd element
   for (int m = 0; m < Width/TILE_WIDTH; ++m)
   {
	   Mds[tx][ty] = Md[row*Width + m*TILE_WIDTH+tx];
	   Nds[tx][ty] = Nd[col + (m*TILE_WIDTH + ty)*Width];
           __syncthreads();

	   for (int k = 0; k < TILE_WIDTH; ++k)
	   {
		   Pvalue += Mds[ty][k] * Nds[k][tx];
	   }
           __syncthreads();
	   Pd[(row * Width) + col] = Pvalue;
   }
   // End of solution part 4 ===========================================

}

void MatrixMultiplication(float *M, float *N, float *P, int Width) {
   int size = Width*Width*sizeof(float);
   float *Md, *Nd, *Pd;

   // ===================================================================
   // Solution part 1: Copy Input Data from Host to Device
   //    Create Device Buffers for the two input matrices
   //    Copy memory from the host memory to the device buffer (device memory)
   //    Check for error generated while using each OpenCL API call
   // ===================================================================

   // Allocate device memory and Transfer host arrays M and N
   cudaMalloc( (void **)&Md, size);
   cudaMalloc( (void **)&Nd, size);

   cudaMemcpy( Md, M, size, cudaMemcpyHostToDevice);
   cudaMemcpy( Nd, N, size, cudaMemcpyHostToDevice);

   // Allocate device memory of P array for results
   cudaMalloc( (void **)&Pd, size);




   // End of solution Part 1 ============================================


   // ===================================================================
   // Solution part 2
   //    A. Initialize the block and grid dimensions of the kernel about
   //       to be launched.
   //       [You may assume that each matrix dimension is a multiple of the
   //        defined constant block_size.]
   //    B. Launch the kernel with appropriate kernel arguments
   //    Do not forget to check for success at each stage before proceeding.
   // ===================================================================

   // Setup the kernel execution configuration parameters
   dim3 dimBlock(TILE_WIDTH,TILE_WIDTH);

   dim3 dimGrid(Width/TILE_WIDTH, Width/TILE_WIDTH);

   cutilCheckMsg("Error before Kernel kickoff\n");

   // Stage A:  Setup the kernel execution configuration parameters

   // Stage B: Launch the kernel!! -- using the appropriate function arguments

   MatrixMulKernel<<<dimGrid,dimBlock>>>(Md,Nd,Pd,Width);

   cutilCheckMsg("Error: Kernel Launch Failure\n");

   // End of solution Part 2 ============================================

   // ===================================================================
   // Solution part 3
   // Copy Results Device back to Host
   // ===================================================================

   // Transfer P from device to host
   cudaMemcpy(P, Pd, size, cudaMemcpyDeviceToHost);

   // End of solution Part 3 ============================================


   // Free device matrices
   cudaFree(Md);
   cudaFree(Nd);
   cudaFree(Pd);
}
