#include <stdio.h>
#include <stdlib.h>
#include <assert.h>
#include <math.h>
#include <complex.h>
#include <cuComplex.h>
#include <limits.h>
#include <omp.h>
#include <string.h>

#include <cube.h>

#define TILE_WIDTH 8
#define TILE_HEIGHT 8
#define NPOL 2
#define NSTATION 512
#define NTIME 10
#define NFREQUENCY 12
#define TOL 1e-3
#define NBASELINE ((NSTATION+1)*(NSTATION/2))

int write = 1;

#include <correlator.h>

// whether to texture cache or not
//#define USE_TEX
// note that textures are currently broken

#ifdef USE_TEX
texture<float2, 1, cudaReadModeElementType> tex1d;
texture<float2, 2, cudaReadModeElementType> tex2d;
texture<short2, 2, cudaReadModeNormalizedFloat> tex2dshort;
#endif

#include <1x1_simple.h>
#include <1x1_shared_float.h>
#include <1x1_shared_float4.h>
#include <2x2_simple.h>
#include <2x2_shared_float.h>
#include <2x2_shared_float2.h>
#include <2x2_shared_float4.h>

#define checkCudaError() do {                           \
 cudaError_t error = cudaGetLastError();               \
 if (error != cudaSuccess) {                            \
   printf("(CUDA) %s", cudaGetErrorString(error)); \
   printf(" (" __FILE__ ":%d)\n", __LINE__);  \
  }\
} while (0)

void random_complex(float complex* random_num, int length) {
  float a,b;
  //complex<float> random_num[length];
  for(int i=0; i<length; i++){
    a = ((rand()-INT_MAX/2) / (float)(INT_MAX/2)) + 10.0f;
    b = ((rand()-INT_MAX/2) / (float)(INT_MAX/2)) + 10.0f;
    random_num[i] = a + I*b;
  }
}

void outerProduct(float complex *compare_h, float complex *array_h) {
  int num_procs = omp_get_num_procs();
#pragma omp parallel num_threads(num_procs)
  {
#pragma omp for schedule(dynamic)
    for(int i=0; i<NFREQUENCY*NBASELINE; i++){
      int f = i/NBASELINE;
      int k = i - f*NBASELINE;
      int station1 = -0.5 + sqrt(0.25 + 2*k);
      int station2 = k - ((station1+1)*station1)/2;
      float complex sumXX = 0.0 + I*0.0;
      float complex sumXY = 0.0 + I*0.0;
      float complex sumYX = 0.0 + I*0.0;
      float complex sumYY = 0.0 + I*0.0;
      float complex inputRowX, inputRowY, inputColX, inputColY;
      for(int t=0; t<NTIME; t++){
	inputRowX = array_h[((f*NTIME + t)*NSTATION + station1)*NPOL];
	inputRowY = array_h[((f*NTIME + t)*NSTATION + station1)*NPOL + 1];
	inputColX = array_h[((f*NTIME + t)*NSTATION + station2)*NPOL];
	inputColY = array_h[((f*NTIME + t)*NSTATION + station2)*NPOL + 1];
	sumXX += inputRowX * ~inputColX;
	sumXY += inputRowX * ~inputColY;
	sumYX += inputRowY * ~inputColX;
	sumYY += inputRowY * ~inputColY;
      }
      compare_h[4*i] = sumXX;
      compare_h[4*i + 1] = sumXY;
      compare_h[4*i + 2] = sumYX;
      compare_h[4*i + 3] = sumYY;
    } //end parallel for loop
  }  //end parallel segment
}

//check that GPU caluculation matches the CPU one for increased matrix size
void checkResult(float complex *gpu, float complex *cpu, char *kernel) {

  printf("Checking %s kernel...\n", kernel); fflush(stdout);

  for(int f=0; f<NFREQUENCY; f++){
    for(int i=0; i<NSTATION; i++){
      for (int j=0; j<=i; j++) {
	int k = f*(NSTATION+1)*(NSTATION/2) + i*(i+1)/2 + j;
        for (int pol1=0; pol1<NPOL; pol1++) {
	  for (int pol2=0; pol2<NPOL; pol2++) {
	    int index = (k*NPOL+pol1)*NPOL+pol2;
	    //printf("%d %d %d %d %d %d %d\n",f,i,j,k,pol1,pol2,index);
	    //printf("%f  %f\n", __real__ cpu[index], __imag__ cpu[index]);
	    //printf("%f  %f\n", __real__ gpu[index], __imag__ gpu[index]);
            assert(cabsf(cpu[index] - gpu[index]) / cabsf(cpu[index]) < TOL);
	  }
	}
      }
    }
  }

  //if program makes it this far, then memory was copied successfully
  printf("Outer product operation successful for %s\n\n", kernel);
}

int main(int argc, char** argv) {

  CUBE_INIT();

  //  cudaSetDevice(0);

  //allocate memory pointers on host machine
  float complex *array_h;
  float complex *product_h;
  float complex *compare_h;
  //allocate memory pointers on the device
  float complex *array_d;
  float complex *product_d;

  int vecLength = NFREQUENCY * NTIME * NSTATION * NPOL;
  int matLength = NFREQUENCY * ((NSTATION+1)*(NSTATION/2)*NPOL*NPOL);
  int Nblock = NSTATION/TILE_WIDTH;

  //perform memory allocation
  array_h = (float complex *) malloc(vecLength*sizeof(float complex));
  product_h = (float complex *) malloc(matLength*sizeof(float complex));
  compare_h = (float complex *) malloc(matLength*sizeof(float complex));
  /*  
  cudaMalloc((void **) &array_d, vecLength*sizeof(float complex));
  cudaMalloc((void **) &product_d, matLength*sizeof(float complex));
  checkCudaError();
  */
  dim3 dimBlock(TILE_WIDTH,TILE_HEIGHT,1);
  //allocated exactly as many thread blocks as are needed
  dim3 dimGrid1(((Nblock+1)*Nblock)/2, NFREQUENCY); 
  dim3 dimGrid2(((Nblock/2+1)*(Nblock/2))/2, NFREQUENCY);

  random_complex(array_h, vecLength);

  outerProduct(compare_h, array_h);

  //move memory from host to device
  //  cudaMemcpy(array_d, array_h, vecLength*sizeof(float complex), cudaMemcpyHostToDevice);
  //  checkCudaError();
  /*
  // setting the L1 cache to 48K is faster for Fermi
  cudaFuncSetCacheConfig(simple1x1, cudaFuncCachePreferL1);

  // 1x1 tile
  cudaMemset(product_d, '0', matLength*sizeof(float complex));
  CUBE_KERNEL_CALL(simple1x1, dimGrid1, dimBlock, 0, (cuFloatComplex*)array_d, 
		   (cuFloatComplex*)product_d, write);
  checkCudaError();
  //  cudaThreadExit();
  cudaMemcpy(product_h, product_d, matLength*sizeof(float complex), cudaMemcpyDeviceToHost);
  checkCudaError();
  checkResult(product_h, compare_h, "simple1x1");


  if (TILE_WIDTH == 8 && TILE_HEIGHT == 8) {
    // 1x1 tile using shared memory float loads
    cudaMemset(product_d, '0', matLength*sizeof(float complex));
    CUBE_KERNEL_CALL(shared1x1float, dimGrid1, dimBlock, 0, (cuFloatComplex*)array_d, 
		     (cuFloatComplex*)product_d, write);
    checkCudaError();
    //  cudaThreadExit();
    cudaMemcpy(product_h, product_d, matLength*sizeof(float complex), cudaMemcpyDeviceToHost);
    checkCudaError();
    checkResult(product_h, compare_h, "shared1x1float");
  }

  // 1x1 tile using shared memory float4 loads
  cudaMemset(product_d, '0', matLength*sizeof(float complex));
  CUBE_KERNEL_CALL(shared1x1float4, dimGrid1, dimBlock, 0, (cuFloatComplex*)array_d, 
		   (cuFloatComplex*)product_d, write);
  checkCudaError();
  //  cudaThreadExit();
  cudaMemcpy(product_h, product_d, matLength*sizeof(float complex), cudaMemcpyDeviceToHost);
  checkCudaError();
  checkResult(product_h, compare_h, "shared1x1float4");

  // 2x2 tile
  cudaMemset(product_d, '0', matLength*sizeof(float complex));
  CUBE_KERNEL_CALL(simple2x2, dimGrid2, dimBlock, 0, (cuFloatComplex*)array_d, 
		   (cuFloatComplex*)product_d, write);
  checkCudaError();
  //  cudaThreadExit();
  cudaMemcpy(product_h, product_d, matLength*sizeof(float complex), cudaMemcpyDeviceToHost);
  checkCudaError();
  checkResult(product_h, compare_h, "simple2x2");

  if (TILE_WIDTH == 16 && TILE_HEIGHT == 16) {
    // 2x2 tile using shared memory float loads
    CUBE_KERNEL_CALL(shared2x2float, dimGrid2, dimBlock, 0, (cuFloatComplex*)array_d, 
		     (cuFloatComplex*)product_d, write);
    checkCudaError();
    //  cudaThreadExit();
    cudaMemcpy(product_h, product_d, matLength*sizeof(float complex), cudaMemcpyDeviceToHost);
    checkCudaError();
    checkResult(product_h, compare_h, "shared2x2float");
  }

  if (TILE_WIDTH == 8 && TILE_HEIGHT == 8) {
    // 2x2 tile using shared memory float2 loads
    cudaMemset(product_d, '0', matLength*sizeof(float complex));
#ifdef USE_TEX
    cudaChannelFormatDesc channelDesc = cudaCreateChannelDesc<float2>();
    cudaBindTexture2D(0, tex2d, array_d, channelDesc, NSTATION*NPOL, NTIME, NSTATION*NPOL*8);
    CUBE_KERNEL_CALL(shared2x2float2, dimGrid2, dimBlock, 0, (cuFloatComplex*)product_d,
		     write);
#else
    CUBE_KERNEL_CALL(shared2x2float2, dimGrid2, dimBlock, 0, (cuFloatComplex*)array_d, 
		     (cuFloatComplex*)product_d, write);
#endif
    checkCudaError();
    //  cudaThreadExit();
    cudaMemcpy(product_h, product_d, matLength*sizeof(float complex), cudaMemcpyDeviceToHost);
    checkCudaError();
    checkResult(product_h, compare_h, "shared2x2float2");
  }

  // 2x2 tile using shared memory float4 loads
  cudaMemset(product_d, '0', matLength*sizeof(float complex));
  CUBE_KERNEL_CALL(shared2x2float4, dimGrid2, dimBlock, 0, (cuFloatComplex*)array_d, 
		   (cuFloatComplex*)product_d, write);
  checkCudaError();
  //  cudaThreadExit();
  cudaMemcpy(product_h, product_d, matLength*sizeof(float complex), cudaMemcpyDeviceToHost);
  checkCudaError();
  checkResult(product_h, compare_h, "shared2x2float4");

  cudaFree(array_d);
  cudaFree(product_d);
  memset(product_h, '0', matLength*sizeof(float complex));
  */
  //begin multi-gpu portion
  //query system to determine OpenMP parameters
  int num_gpus = 1;
  //  cudaGetDeviceCount(&num_gpus);
  int num_procs = omp_get_num_procs();
  printf("Number of processors: %d\n", num_procs);
  printf("Number of GPUs: %d\n", num_gpus);

  //begin OpenMP section
#pragma omp parallel firstprivate(num_gpus) num_threads(num_gpus)
  {

    unsigned int tid = omp_get_thread_num();

    //assign each CPU thread to a GPU
    int gpu_id = -1;
    cudaSetDevice(tid);
    int gid = cudaGetDevice(&gpu_id);
    checkCudaError();
    if(tid != gid){
      printf("Error in CPU-GPU assignment\n");
      printf("Returning...\n");
     }
    else {

      //allocate memory on device
      cudaMalloc((void **) &array_d, vecLength*sizeof(float complex));
      cudaMalloc((void **) &product_d, matLength*sizeof(float complex));
      checkCudaError();

      //clear out any previous values
      cudaMemset(array_d, '0', vecLength*sizeof(float complex)/num_gpus);
      cudaMemset(product_d, '0', matLength*sizeof(float complex)/num_gpus);
      checkCudaError();

      //move relevant data down into the array; offset by a thread id's worth of vecLength
      cudaMemcpy(array_d, &array_h[tid*vecLength/num_gpus], vecLength*sizeof(float complex), cudaMemcpyHostToDevice);
      checkCudaError();

      //call kernel to perform the calculation
      CUBE_KERNEL_CALL(shared2x2float2, dimGrid2, dimBlock, 0, (cuFloatComplex*)array_d, (cuFloatComplex*)product_d, write);
      checkCudaError();

      //copy the data back, employing a similar strategy as above
      cudaMemcpy(&product_h[tid*matLength/num_gpus], product_d, matLength*sizeof(float complex), cudaMemcpyDeviceToHost);
      checkCudaError();

    }

  } //close parallel segment

  //check that mutli-gpu process yielded the correct answer
  checkResult(product_h, compare_h, "multi-shared2x2float2");


  //free device memory
  cudaFree(array_d);
  cudaFree(product_d);

  //free host memory
  free(array_h);
  free(product_h);
  free(compare_h);

  CUBE_WRITE();

  return 0;
}
