#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <time.h>

#include <ga.h>
#include <mpi.h>
#include <math.h>
#include <macdecls.h>

#include <CL/opencl.h>
#include "clError.h"
#include "ini.h"

#include "../util.h"
#include "../common.h"
#include "helper.h"

#define NDIM 2
//#define N 128
//#define M 50
//#define BLOCKSIZE 16
#define N 20480
#define M 100
#define BLOCKSIZE 1024
#define REDUCED_BLOCKSIZE 128

#define INI "oclconfig.ini"
typedef struct {
   cl_device_type device_type;
} Configuration;

int ini_handler(void *data, const char *section, const char *name, const char *value) {
   Configuration *config = (Configuration *) data;
   if (strcasecmp("DEVICE_TYPE",name) == 0) {
      if (strcasecmp("CL_DEVICE_TYPE_CPU",value) == 0) {
         config->device_type = CL_DEVICE_TYPE_CPU;
      } else if (strcasecmp("CL_DEVICE_TYPE_GPU",value) == 0) {
         config->device_type = CL_DEVICE_TYPE_GPU;
      }
   }
}

/* a short function to print a message and exit */
void error_exit(char msg[]) {
    fprintf(stderr, "%s", msg);
    MPI_Finalize(); exit(EXIT_FAILURE);
}

float *calculation(int task_id, cl_command_queue queue, cl_kernel kernel, cl_mem inAG, cl_mem inBG, cl_mem outCG, float *inA, float *inB, float *outC, int row, int col, size_t tileSize);

int main(int argc, char **argv) {
   double start_time0, start_time, end_time;

   float a[N][M], b[N][M], c[N][N];
   int me, nProcs, num_rows, num_cols, num_blocks;
   //int heap=3000000, stack=3000000;

   int dims[NDIM], chunk[NDIM], ld[NDIM], count_dims[1], count_chunk[1];
   int g_a, g_b, g_c, i, j, icount, jcount, ncols;
   long g_count, count;
   
   int count_lo[1], count_hi[1], count_ld[1];
   int adims[2], achunk[2], alo[2], ahi[2], ald[2];
   int bdims[2], bchunk[2], blo[2], bhi[2], bld[2];
   int cdims[2], cchunk[2], clo[2], chi[2], cld[2];

   MPI_Init(&argc, &argv);
   GA_Initialize();
   me = GA_Nodeid(); nProcs=GA_Nnodes();

   char hostname[128];
   int hostname_len;
   MPI_Get_processor_name(hostname,&hostname_len);
   printf("Rank %d is running on %s\n",me,hostname);

   if ( (N % BLOCKSIZE) > 0) {
      fprintf(stderr,"N must be a multiple of BLOCKSIZE\n");
      MPI_Abort(MPI_COMM_WORLD,1);
      exit(1);
   }

   // Calcudate output blocks of C matrix
   num_rows = N / BLOCKSIZE;
   num_cols = N / BLOCKSIZE;
   num_blocks = num_rows*num_cols;

   /* create a global array g_a ,g_b, g_c */

   adims[0] = N;
   adims[1] = M;
   achunk[0] = N / nProcs;
   achunk[1] = M;

   g_a = NGA_Create(C_FLOAT, NDIM, adims, "array A", achunk);
   if (!g_a) GA_Error("create failed: A", NDIM);
   if (me==0) printf(" Create Array A\n");
   
   g_b = GA_Duplicate(g_a, "array B");
   if (!g_b) GA_Error("duplicate failed", NDIM);
   if (me==0) printf(" Create Array B\n");

   cdims[0] = N;
   cdims[1] = N;
   cchunk[0] = N / nProcs;
   cchunk[1] = N;

   g_c = NGA_Create(C_FLOAT, NDIM, cdims, "array C", cchunk);

   if (!g_c) GA_Error("create failed: C", NDIM);
   if (me==0) printf(" Create Array C\n");

   /* initialze data in matrices a and b*/

   for (i=0; i < N; i++){
      for (j=0; j < M; j++){
         a[i][j] = i+j;
         b[i][j] = i+j;
      }
   }

   /* copy data to global arrays g_a and g_b */
   alo[0] = 0; blo[0] = 0;
   alo[1] = 0; blo[1] = 0;
   ahi[0] = N-1; bhi[0] = N-1;
   ahi[1] = M-1; bhi[1] = M-1;
   ald[0] = M; bld[0] = M;
   ald[1] = -1; bld[1] = -1; // unused
      
   if (me == 0){
      NGA_Put(g_a, alo, ahi, a , ald);
      NGA_Put(g_b, blo, bhi, b , bld);
   }

   //if (me==0) GA_Print(g_b);

   count_dims[0]=1; count_chunk[0]=1;

   g_count = NGA_Create(C_LONG, 1, count_dims, "Array Count", count_chunk);

   count_lo[0]=0; count_hi[0]=0; count_ld[0]=0;
   count = 0;    

   if (me==0) NGA_Put(g_count, count_lo , count_hi, &count, count_ld);

   ncols = N / BLOCKSIZE;

   /* set zero value in matrices a, b and c*/
   for (i=0; i< N; i++){
      for (j=0;j < M; j++){
         a[i][j] = 0.0;
         b[i][j] = 0.0;
      }
   }
   for (i=0; i< N; i++){
      for(j=0; j < N;j++){
         c[i][j] = 0.0;   
      }
   }

   start_time0 = MPI_Wtime();
   start_time = MPI_Wtime();

   cl_device_id device_id;
   cl_context context;
   cl_command_queue queue;
   cl_kernel kernel;
   cl_int err;

   // Read ini configuration file
   Configuration config;
   char *home = getenv("HOME");
   if (ini_parse(INI,ini_handler,&config) < 0) {
      char* filename = (char *) malloc((strlen(home) + strlen(INI) + 2)*sizeof(char));
      strcpy(filename,home);
      strcat(filename,"/"INI);
      if (ini_parse(filename,ini_handler,&config) < 0) {
         fprintf(stderr,"Can't load config file\n");
         exit(1);
      }
      printf("Read oclconfig.ini from %s\n",home);
      free(filename);
   } else {
      printf("Read oclconfig.ini from .\n");
   }

   // Get a device 
   device_id = opencl_get_device_id(config.device_type);

   // Create a context
   context = clCreateContext(0,1,&device_id,NULL,NULL,&err);
   check_cl_error(err);

   // Create a command queue
   queue = clCreateCommandQueue(context,device_id,0,&err);
   check_cl_error(err);

   // Load and compile a kernel from source file 
   const char* sources[2] = {"pearson.cl","../common.h"};
   kernel = opencl_load_kernel_from_file(context,device_id,2,sources,"corrcoefmatrixAB_opencl");

   // Find appropriate tile size
   size_t wavefrontSize = opencl_get_wavefront_size(kernel,device_id);
   printf("Device wavefront size = %d\n",wavefrontSize);
   //cl_device_id device_id = opencl_query_device_id(queue);
   size_t workGroupSize = opencl_get_max_work_group_size(device_id);
   printf("Max work group size = %d\n",workGroupSize);
   size_t tileSize = TILESIZE;
   while (tileSize*tileSize > workGroupSize) {
      tileSize >>= 1;
   }
   printf("Tile size = %d\n",tileSize);
   if ( (BLOCKSIZE % tileSize) > 0) {
      fprintf(stderr,"BLOCKSIZE must be multiple of %d\n",tileSize);
      MPI_Abort(MPI_COMM_WORLD,1);
      exit(1);
   }

   cl_mem inAG, inBG, outCG;
   float *inA, *inB, *outC;
   int sizeIn, sizeOut;

   sizeIn = BLOCKSIZE*M*sizeof(float);
   sizeOut = BLOCKSIZE*BLOCKSIZE*sizeof(float);
   
   inA = (float *) malloc(sizeIn);
   inB = (float *) malloc(sizeIn);
   outC = (float *) malloc(sizeOut);

   inAG = clCreateBuffer(context,CL_MEM_READ_ONLY,sizeIn,NULL,&err);
   if (inAG == NULL) {
      fprintf(stderr,"device memory allocation error: %s!\n",clErrorString(err));
      MPI_Abort(MPI_COMM_WORLD,1);
   }
   inBG = clCreateBuffer(context,CL_MEM_READ_ONLY,sizeIn,NULL,&err);
   if (inBG == NULL) {
      fprintf(stderr,"device memory allocation error: %s!\n",clErrorString(err));
      MPI_Abort(MPI_COMM_WORLD,1);
   }
   outCG = clCreateBuffer(context,CL_MEM_WRITE_ONLY,sizeOut,NULL,&err);
   if (outCG == NULL) {
      fprintf(stderr,"device memory allocation error: %s!\n",clErrorString(err));
      MPI_Abort(MPI_COMM_WORLD,1);
   }

   end_time = MPI_Wtime();
   fprintf(stderr,"(process %d) spent %g in device and kernel initialization\n", me, (end_time-start_time));

   int process_num_tasks = 0;

   GA_Sync();

   do {
      count = NGA_Read_inc(g_count, count_lo, 1);
   
      if(count >= num_blocks) break;

      icount = count / ncols;
      jcount = count % ncols;

/*
      alo[0] = icount*BLOCKSIZE; alo[1] = 0;
      ahi[0] = (icount+1)*BLOCKSIZE-1; ahi[1] = M-1;

      blo[0] = jcount*BLOCKSIZE; blo[1] = 0;
      bhi[0] = (jcount+1)*BLOCKSIZE-1; bhi[1] = M-1;
*/
      alo[0] = icount*REDUCED_BLOCKSIZE; alo[1] = 0;
      ahi[0] = (icount+1)*REDUCED_BLOCKSIZE-1; ahi[1] = M-1;

      blo[0] = jcount*REDUCED_BLOCKSIZE; blo[1] = 0;
      bhi[0] = (jcount+1)*REDUCED_BLOCKSIZE-1; bhi[1] = M-1;

      printf("Process %d calculates block %d of C, icount=%d, jcount=%d, %d %d %d %d\n", me, count, icount, jcount, alo[0], alo[1], ahi[0], ahi[1]);
      
      // Get input data 
      start_time = MPI_Wtime();
      NGA_Get(g_a, alo, ahi, inA, ald);
      NGA_Get(g_b, blo, bhi, inB, bld);
      end_time = MPI_Wtime();
      fprintf(stderr,"(process %d) spent %g in NGA_Get()\n", me, (end_time-start_time));

      /*put result to global array c*/
      start_time = end_time;
      outC = calculation(count,queue,kernel,inAG,inBG,outCG,inA,inB,outC,BLOCKSIZE,M,tileSize);
      end_time = MPI_Wtime();
      fprintf(stderr,"(process %d) finished task = %d in %g\n", me, count,(end_time-start_time));

/*
      clo[0] = icount*BLOCKSIZE; clo[1] = jcount*BLOCKSIZE;
      chi[0] = (icount*BLOCKSIZE)+BLOCKSIZE-1; chi[1] = (jcount*BLOCKSIZE)+BLOCKSIZE-1;
      cld[0] = BLOCKSIZE; cld[1] = -1;
*/
      clo[0] = icount*REDUCED_BLOCKSIZE; clo[1] = jcount*REDUCED_BLOCKSIZE;
      chi[0] = (icount*REDUCED_BLOCKSIZE)+REDUCED_BLOCKSIZE-1; chi[1] = (jcount*REDUCED_BLOCKSIZE)+REDUCED_BLOCKSIZE-1;
      cld[0] = REDUCED_BLOCKSIZE; cld[1] = -1;

      start_time = end_time;
      NGA_Put(g_c, clo, chi, outC, cld);   
      end_time = MPI_Wtime();
      fprintf(stderr,"(process %d) spent %g in NGA_Put()\n", me, (end_time-start_time));
      process_num_tasks = process_num_tasks + 1;

      /*if(icount==6 && jcount==6){   
         for (i=0; i < N; i++){
            for (j=0;j <M;j++){
               printf("%.2f ", b[i][j]);
            }
            printf("\n");
         }   
      }*/
      //break;
   }
   while(count < num_blocks);

   fprintf(stderr,"process %d number of tasks = %d\n", me,
              process_num_tasks);

   clReleaseKernel(kernel);
   clReleaseMemObject(inAG);
   clReleaseMemObject(inBG);
   clReleaseMemObject(outCG);
   clReleaseCommandQueue(queue);
   clReleaseContext(context);

   free(inA);
   free(inB);
   free(outC);

   GA_Sync();

   end_time = MPI_Wtime();
   fprintf(stderr,"(process %d) finished all tasks = %d in %g\n", me, count,(end_time-start_time0));

   //if (me==0) GA_Print(g_c);

/*
   if (me==0) { // Print final output
      clo[0] = 0; clo[1] = 0;
      chi[0] = N-1; chi[1] = N-1;
      cld[0] = N; cld[1] = -1;

      NGA_Get(g_c, clo, chi, c, cld);

      for (i=0; i < N; i++) {
         for (j=0; j < N; j++) {
            printf(" %f",c[i][j]);
         }
         printf("\n");
      }

   }
*/
      
   GA_Destroy(g_a);
   GA_Destroy(g_b);
   GA_Destroy(g_c);

   GA_Terminate();
   MPI_Finalize();
   return 0;
}

float *calculation(int task_id, cl_command_queue queue, cl_kernel kernel, cl_mem inAG, cl_mem inBG, cl_mem outCG, float *inA, float *inB, float *outC, int row, int col, size_t tileSize) {
  int sizeIn, sizeOut;
  cl_int err;

  sizeIn = row*col*sizeof(float);
  sizeOut = row*row*sizeof(float);

  if ((err=clEnqueueWriteBuffer(queue,inAG,CL_TRUE,0,sizeIn,inA,0,NULL,NULL)) != CL_SUCCESS) {
     fprintf(stderr,"memcpy from host to device fail: %s\n",clErrorString(err));
     return NULL;
  }
  if ((err=clEnqueueWriteBuffer(queue,inBG,CL_TRUE,0,sizeIn,inB,0,NULL,NULL)) != CL_SUCCESS) {
     fprintf(stderr,"memcpy from host to device fail: %s\n",clErrorString(err));
     return NULL;
  }

  err = clSetKernelArg(kernel,0,sizeof(cl_mem),&outCG);
  err |= clSetKernelArg(kernel,1,sizeof(cl_mem),&inAG);
  err |= clSetKernelArg(kernel,2,sizeof(cl_mem),&inBG);
  err |= clSetKernelArg(kernel,3,sizeof(int),&row);
  err |= clSetKernelArg(kernel,4,sizeof(int),&col);
  err |= clSetKernelArg(kernel,5,sizeof(int),&tileSize);
  if (err != CL_SUCCESS) {
     fprintf(stderr,"set kernel arguments fails: %s\n",clErrorString(err));
     return NULL;
  }

  size_t localSize[2] = {tileSize,tileSize};
  size_t globalSize[2] = {row,row};

  err = clEnqueueNDRangeKernel(queue,kernel,2,NULL,globalSize,localSize,0,NULL,NULL);
  if (err != CL_SUCCESS) {
     fprintf(stderr,"execute the kernel fails: %s\n",clErrorString(err));
     return NULL;
  }

  clFinish(queue);


  if ((err=clEnqueueReadBuffer(queue,outCG,CL_TRUE,0,sizeOut,outC,0,NULL,NULL)) != CL_SUCCESS) {
     fprintf(stderr,"memcpy from device to host fail: %s\n",clErrorString(err));
     return NULL;
  }

  return outC;
}


