/* 
 * Compute corrcoef matrix using OpenMP or CUDA
 * To set number of threads in OpenMP, use OMP_NUM_THREADS environment variable
 */
#include <stdio.h>
#include <stdlib.h>
#include <omp.h>

#include "util.h"
#include "common.h"
#include "corrcoefmatrix_omp.h"

//#define CUDA
#define OMP
//#define DEBUG

void usage(int argc, char **argv) {
   fprintf(stderr,"Usage: %s <rawfilename> <nrow> <ncol>\n",argv[0]);
}

int main(int argc, char **argv) {
  char *ctrl_filename; 
  float *in, *out;
  int row,col;
  int padded_row;
  int sizeIn, sizeOut;

  double st,et;
  
  if (argc < 4) {
     usage(argc,argv);
     exit(1);
  }
  st = getTimeStamp();

  ctrl_filename = argv[1];
  row = atoi(argv[2]);
  col = atoi(argv[3]);
//  in = read_matrix(ctrl_filename,TILESIZE,&row,&col,&padded_row); 
  in = read_raw_matrix(ctrl_filename,TILESIZE,row,col,&padded_row); 
  sizeIn = padded_row*col*sizeof(float);
  sizeOut = padded_row*padded_row*sizeof(float);
  out = (float *) malloc(sizeOut);

  et = getTimeStamp();
  fprintf(stderr,"Read %dx%d matrix file time = %d ms.\n",row,col,(int) (et-st));
  
  st = getTimeStamp();

#ifdef CUDA
  fprintf(stderr,"Running in CUDA mode...\n");
  dim3 blockDim(TILESIZE,TILESIZE);
  int grid_size_x = padded_row/blockDim.x;
  int grid_size_y = padded_row/blockDim.y;
  dim3 gridDim(grid_size_x,grid_size_y);
  cudaError_t err;
  float *inAG, *inBG, *outG;

  if ( (grid_size_x > 65535) || (grid_size_y > 65535) ) {
     fprintf(stderr,"Grid size too large %dx%d\n",grid_size_x,grid_size_y);
     exit(1);
  }
  fprintf(stderr,"Grid size %dx%d\n",grid_size_x,grid_size_y);

  if ((err=cudaMalloc((void **) &inAG,sizeIn)) != cudaSuccess) {
     fprintf(stderr,"cudaMalloc error: %s!\n",cudaGetErrorString(err));
     exit(1);
  }
  if ((err=cudaMalloc((void **) &inBG,sizeIn)) != cudaSuccess) {
     fprintf(stderr,"cudaMalloc error: %s!\n",cudaGetErrorString(err));
     exit(1);
  }
  if ((err=cudaMalloc((void **) &outG,sizeOut)) != cudaSuccess) {
     fprintf(stderr,"cudaMalloc error: %s!\n",cudaGetErrorString(err));
     exit(1);
  }

  if ((err=cudaMemcpy(inAG,in,sizeIn,cudaMemcpyHostToDevice)) != cudaSuccess) {
     fprintf(stderr,"cudaMemcpy from host to device fail: %s\n",cudaGetErrorString(err));
     exit(1);
  }
  if ((err=cudaMemcpy(inBG,in,sizeIn,cudaMemcpyHostToDevice)) != cudaSuccess) {
     fprintf(stderr,"cudaMemcpy from host to device fail: %s\n",cudaGetErrorString(err));
     exit(1);
  }

  st = getTimeStamp();
  corrcoefmatrixAB_cuda<<<gridDim,blockDim>>>(outG,inAG,inBG,padded_row,col);

  cudaThreadSynchronize();

  // check for error
  check_cuda_errors(__FILE__,__LINE__);

  cudaMemcpy(out,outG,sizeOut,cudaMemcpyDeviceToHost);
  cudaFree(inAG);
  cudaFree(inBG);
  cudaFree(outG);

  et = getTimeStamp();
  write_submatrix(out,padded_row,padded_row,row,row);

#else
  #pragma omp parallel 
  #pragma omp single 
  //fprintf(stderr,"Running in OpenMP mode with %d threads\n",omp_get_num_threads());
  //corrcoefmatrixAB_omp(out,in,in,row,col);    // OpenMP version
  fprintf(stderr,"Running in OpenMP tiling mode with %d threads\n",omp_get_num_threads());
  corrcoefmatrixAB_omp_tiled(out,in,in,row,col);    // OpenMP version
  et = getTimeStamp();
  write_matrix(out,row,row);
#endif


  fprintf(stderr,"Calculation time = %d ms.\n",(int) (et-st));
}


