/**
   main.cu - Created by Timothy Morey on 7/27/11

   This file defines a simple program that will perform a matrix multiplication
   on a GPU device.
*/

#include <stdio.h>
#include <stdlib.h>
#include <time.h>

/**
   Fills the matrix A with random floating point values between 0.0 and 10.0.

   @param A A one-dimensional array of floats, which is to be interpretted as a
   matrix in row-major order.
   @param rows The number of rows in A.
   @param cols The number of columns in A.
 */
__host__ void fillMatrix(float *A, int rows, int cols);

/**
   Defines a kernel that multiplis the matrix A by the matrix B and stores the
   result in C (AxB=C).

   @param A A one-dimensional array of floats, which is to be interpretted as an
   m-by-n matrix in row-major order.   
   @param B A one-dimensional array of floats, which is to be interpretted as an
   n-by-p matrix in row-major order.   
   @param C A one-dimensional array of floats, which is to be interpretted as a
   m-by-p matrix in row-major order.  This matrix will be store the output of
   the calculation.
   @param m The number of rows in A and C.
   @param n The number of columns in A and the number of rows in B.
   @param p The number of columns in B and C.
 */
__global__ void matrixMult(float *A, float *B, float *C, int m, int n, int p);

/**
   Multiplies the matrix A by B and stores the result in C (AxB=C), using a 
   traditional CPU-based algorithm.

   @param A A one-dimensional array of floats, which is to be interpretted as an
   m-by-n matrix in row-major order.   
   @param B A one-dimensional array of floats, which is to be interpretted as an
   n-by-p matrix in row-major order.   
   @param C A one-dimensional array of floats, which is to be interpretted as a
   m-by-p matrix in row-major order.  This matrix will be store the output of
   the calculation.
   @param m The number of rows in A and C.
   @param n The number of columns in A and the number of rows in B.
   @param p The number of columns in B and C.
 */
__host__ void matrixMultTraditional(float *A, float*B, float*C, int m, int n, 
				    int p);

/**
   Prints the contents of the matrix A to standard output.

   @param A A one-dimensional array of floats, which is to be interpretted as an
   m-by-n matrix in row-major order.   
   @param m The number of rows in A.
   @param n The number of columns in A.
   @param label A label that will be printed out to identify the matrix.
 */
__host__ void printMatrix(float *A, int m, int n, char *label);

int main()
{
  int m, n, p;                  // matrix dimensions
  size_t sizeA, sizeB, sizeC;   // matrix sizes, in bytes
  float *hostA, *hostB, *hostC; // host copies of the matrices
  float *devA, *devB, *devC;    // device copies of the matrices
    
  // yes, its tacky to hard-code these like this... 
  // nothing to see here, move along.
  m = 3;
  n = 4;
  p = 3;
  
  // calculate the sizes of our matrices
  sizeA = m * n * sizeof(float);
  sizeB = n * p * sizeof(float);
  sizeC = m * p * sizeof(float);

  // allocate memory on the host (CPU) for the matrices
  hostA = (float*)malloc(sizeA);
  hostB = (float*)malloc(sizeB);
  hostC = (float*)malloc(sizeC);

  // fill A and B with random values
  fillMatrix(hostA, m, n);
  printMatrix(hostA, m, n, "A");

  fillMatrix(hostB, n, p);
  printMatrix(hostB, n, p, "B");

  // allocate memory on the device (GPU) for the matrices
  cudaMalloc((void**)&devA, sizeA);
  cudaMalloc((void**)&devB, sizeB);
  cudaMalloc((void**)&devC, sizeC);

  // copy the contents of the matrices to the device
  cudaMemcpy(devA, hostA, sizeA, cudaMemcpyHostToDevice);
  cudaMemcpy(devB, hostB, sizeB, cudaMemcpyHostToDevice);

  // invoke the kernel
  matrixMult<<<1, m * p>>>(devA, devB, devC, m, n, p);
    
  // copy the calculated contents of C back to the host
  cudaMemcpy(hostC, devC, sizeC, cudaMemcpyDeviceToHost);
  printMatrix(hostC, m, p, "C (GPU)");

  // for comparison, also do the calculation with the traditional function
  matrixMultTraditional(hostA, hostB, hostC, m, n, p);
  printMatrix(hostC, m, p, "C (CPU)");

  // relase memory on device (GPU)
  cudaFree(devA);
  cudaFree(devB);
  cudaFree(devC);

  // release memory on host (CPU)
  free(hostA);
  free(hostB);
  free(hostC);
}

__host__ void fillMatrix(float *A, int rows, int cols)
{
  for(int r = 0; r < rows; r++)
  {
    for(int c = 0; c < cols; c++)
    {
      A[r * cols + c] = rand() * 10.0 / RAND_MAX;
    }
  }
}

__global__ void matrixMult(float *A, float *B, float *C, 
			   int m, int n, int p)
{
  int i = threadIdx.x;
  int row = i / p;
  int col = i - row * p;

  C[i] = 0.0;
  for(int offset = 0; offset < n; offset++)
  {
    C[i] += A[row * n + offset] *
      B[offset * p + col];
  }
}

__host__ void matrixMultTraditional(float *A, float *B, float *C,
			   int m, int n, int p)
{
  for(int row = 0; row < m; row++)
  {
    for(int col = 0; col < p; col++)
    {
      int i = row * p + col;
      C[i] = 0.0;

      for(int offset = 0; offset < n; offset++)
      {
	C[i] += A[row * n + offset] *
	  B[offset * p + col];
      }
    }
  }
}

__host__ void printMatrix(float *A, int m, int n, char* label)
{
  printf("\n%s:\n", label);

  for(int row = 0; row < m; row++)
  {
    printf("| ");

    for(int col = 0; col < n; col++)
    {
      printf("%8.4f ", A[row * n + col]);
    }

    printf("|\n");
  }
}
