#include <stdio.h>
#include <stdlib.h>
#include <assert.h>
#include <math.h>
#include <complex.h>
#include <cuComplex.h>
#include <limits.h>
#include <omp.h>
#include <time.h>

#include <cube.h>

#define TILE_WIDTH 8
#define TILE_HEIGHT 8
#define NPOL 2
#define NSTATION 512
#define NTIME 500
#define NFREQUENCY 12
#define TOL 1e-3
#define NBASELINE ((NSTATION+1)*(NSTATION/2))
#define NGPU 2

int write = 1;

#include <correlator.h>

// whether to use multi-GPU or not
//#define MULTI_GPU
// NFREQUENCY and NSTATION must be divisible by the number of GPUs

// whether to use texture cache or not
//#define USE_TEX

#ifdef USE_TEX
texture<float2, 1, cudaReadModeElementType> tex1d;
texture<float2, 2, cudaReadModeElementType> tex2d;
texture<short2, 2, cudaReadModeNormalizedFloat> tex2dshort;
#endif

// uncomment to skip outer product CPU computation and not check GPU answers
#define SPEEDY

// uncomment to time CPU execution
//#define CLOCK_CPU

#include <1x1_simple.h>
#include <1x1_shared_float.h>
#include <1x1_shared_float4.h>
#include <2x2_simple.h>
#include <2x2_shared_float.h>
#include <2x2_shared_float2.h>
#include <2x2_shared_float4.h>
#include <2x1_simple.h>
#include <2x1_shared_float.h>
#include <1x2_simple.h>
#include <1x2_shared_float.h>

#define checkCudaError() do {				\
    cudaError_t error = cudaGetLastError();		\
    if (error != cudaSuccess) {				\
      printf("(CUDA) %s", cudaGetErrorString(error));	\
      printf(" (" __FILE__ ":%d)\n", __LINE__);		\
    }							\
} while (0)

void random_complex(float complex* random_num, int length) {
  float a,b;
  //complex<float> random_num[length];
  for(int i=0; i<length; i++){
    a = ((rand()-INT_MAX/2) / (float)(INT_MAX/2)) + 10.0f;
    b = ((rand()-INT_MAX/2) / (float)(INT_MAX/2)) + 10.0f;
    random_num[i] = a + I*b;
  }
}

void outerProduct(float complex *compare_h, float complex *array_h) {
  int num_procs = omp_get_num_procs();
#pragma omp parallel num_threads(num_procs)
  {
#pragma omp for schedule(dynamic)
    for(int i=0; i<NFREQUENCY*NBASELINE; i++){
      int f = i/NBASELINE;
      int k = i - f*NBASELINE;
      int station1 = -0.5 + sqrt(0.25 + 2*k);
      int station2 = k - ((station1+1)*station1)/2;
      float complex sumXX = 0.0 + I*0.0;
      float complex sumXY = 0.0 + I*0.0;
      float complex sumYX = 0.0 + I*0.0;
      float complex sumYY = 0.0 + I*0.0;
      float complex inputRowX, inputRowY, inputColX, inputColY;
      for(int t=0; t<NTIME; t++){
	inputRowX = array_h[((t*NFREQUENCY + f)*NSTATION + station1)*NPOL];
	inputRowY = array_h[((t*NFREQUENCY + f)*NSTATION + station1)*NPOL + 1];
	inputColX = array_h[((t*NFREQUENCY + f)*NSTATION + station2)*NPOL];
	inputColY = array_h[((t*NFREQUENCY + f)*NSTATION + station2)*NPOL + 1];
	sumXX += inputRowX * ~inputColX;
	sumXY += inputRowX * ~inputColY;
	sumYX += inputRowY * ~inputColX;
	sumYY += inputRowY * ~inputColY;
      }
      compare_h[4*i] = sumXX;
      compare_h[4*i + 1] = sumXY;
      compare_h[4*i + 2] = sumYX;
      compare_h[4*i + 3] = sumYY;
    } //end parallel for loop
  }  //end parallel segment
}

//check that GPU caluculation matches the CPU one for increased matrix size
void checkResult(float complex *gpu, float complex *cpu, char *kernel) {

  printf("Checking %s kernel...\n", kernel); fflush(stdout);

  for(int f=0; f<NFREQUENCY; f++){
    for(int i=0; i<NSTATION; i++){
      for (int j=0; j<=i; j++) {
	int k = f*(NSTATION+1)*(NSTATION/2) + i*(i+1)/2 + j;
        for (int pol1=0; pol1<NPOL; pol1++) {
	  for (int pol2=0; pol2<NPOL; pol2++) {
	    int index = (k*NPOL+pol1)*NPOL+pol2;
	    //printf("%d %d %d %d %d %d %d\n",f,i,j,k,pol1,pol2,index);
	    //printf("%f  %f\n", __real__ cpu[index], __imag__ cpu[index]);
	    //printf("%f  %f\n", __real__ gpu[index], __imag__ gpu[index]);
#ifndef SPEEDY
            assert(cabsf(cpu[index] - gpu[index]) / cabsf(cpu[index]) < TOL);
#endif
	  }
	}
      }
    }
  }

  //if program makes it this far, then memory was copied successfully
  printf("Outer product operation successful for %s\n\n", kernel);
}

int main(int argc, char** argv) {

  CUBE_INIT();
#ifndef MULTI_GPU
  cudaSetDevice(0);
#endif
  //allocate memory pointers on host machine
  float complex *array_h;
  float complex *product_h;
  float complex *compare_h;
#ifdef MULTI_GPU
  float complex *reorder_h;
#endif
  //allocate memory pointers on the device
  float complex *array_d;
  float complex *product_d;

  int vecLength = NFREQUENCY * NTIME * NSTATION * NPOL;
  int matLength = NFREQUENCY * ((NSTATION+1)*(NSTATION/2)*NPOL*NPOL);
  int Nblock = NSTATION/min(TILE_HEIGHT,TILE_WIDTH);

  //perform memory allocation
  array_h = (float complex *) malloc(vecLength*sizeof(float complex));
  product_h = (float complex *) malloc(matLength*sizeof(float complex));
  compare_h = (float complex *) malloc(matLength*sizeof(float complex));
#ifdef MULTI_GPU
  reorder_h = (float complex *) malloc(vecLength*sizeof(float complex));
#endif
#ifndef MULTI_GPU
  cudaMalloc((void **) &array_d, vecLength*sizeof(float complex));
  cudaMalloc((void **) &product_d, matLength*sizeof(float complex));
  checkCudaError();
#endif
  dim3 dimBlock(TILE_WIDTH,TILE_HEIGHT,1);
  //allocated exactly as many thread blocks as are needed
  dim3 dimGrid1(((Nblock+1)*Nblock)/2, NFREQUENCY); 
  dim3 dimGrid2(((Nblock/2+1)*(Nblock/2))/2, NFREQUENCY);

  random_complex(array_h, vecLength);

#ifdef CLOCK_CPU
  //calculate number of flops
  ulonglong nflops = (ulonglong)NFREQUENCY*(ulonglong)NBASELINE*(ulonglong)NTIME*(ulonglong)32;
  float time = 0;
  clock_t start, end;
  for(int i=0; i<CUBE_NCALL; i++){
    start = clock();
    outerProduct(compare_h, array_h);
    end = clock();
    time += (float)(end - start) / CLOCKS_PER_SEC;
  }
  time *= 1./CUBE_NCALL;
  time *= 1./omp_get_num_procs();
  float avg = nflops/time;
  printf("Gflops: %4.3f  Time: %4.3f  Gflops/s: %3.4f\n", nflops*1e-9, time, avg*1e-9);
#endif

#ifndef SPEEDY
  outerProduct(compare_h, array_h);
#endif

#ifndef MULTI_GPU
  //move memory from host to device
  cudaMemcpy(array_d, array_h, vecLength*sizeof(float complex), cudaMemcpyHostToDevice);
  checkCudaError();

  // setting the L1 cache to 48K is faster for Fermi
  cudaFuncSetCacheConfig(simple1x1, cudaFuncCachePreferL1);

  if (TILE_WIDTH == TILE_HEIGHT){
    // 1x1 tile
    cudaMemset(product_d, '0', matLength*sizeof(float complex));
    CUBE_KERNEL_CALL(simple1x1, dimGrid1, dimBlock, 0, (cuFloatComplex*)array_d, 
		     (cuFloatComplex*)product_d, write);
    checkCudaError();
    //  cudaThreadExit();
    cudaMemcpy(product_h, product_d, matLength*sizeof(float complex), cudaMemcpyDeviceToHost);
    checkCudaError();
    checkResult(product_h, compare_h, "simple1x1");
  }

  if (TILE_WIDTH == 8 && TILE_HEIGHT == 8) {
    // 1x1 tile using shared memory float loads
    cudaMemset(product_d, '0', matLength*sizeof(float complex));
    CUBE_KERNEL_CALL(shared1x1float, dimGrid1, dimBlock, 0, (cuFloatComplex*)array_d, 
		     (cuFloatComplex*)product_d, write);
    checkCudaError();
    //  cudaThreadExit();
    cudaMemcpy(product_h, product_d, matLength*sizeof(float complex), cudaMemcpyDeviceToHost);
    checkCudaError();
    checkResult(product_h, compare_h, "shared1x1float");
  }

  if (TILE_WIDTH == TILE_HEIGHT){
    // 1x1 tile using shared memory float4 loads
    cudaMemset(product_d, '0', matLength*sizeof(float complex));
    CUBE_KERNEL_CALL(shared1x1float4, dimGrid1, dimBlock, 0, (cuFloatComplex*)array_d, 
		     (cuFloatComplex*)product_d, write);
    checkCudaError();
    //  cudaThreadExit();
    cudaMemcpy(product_h, product_d, matLength*sizeof(float complex), cudaMemcpyDeviceToHost);
    checkCudaError();
    checkResult(product_h, compare_h, "shared1x1float4");
  }

  if (TILE_WIDTH == TILE_HEIGHT){
    // 2x2 tile
    cudaMemset(product_d, '0', matLength*sizeof(float complex));
    CUBE_KERNEL_CALL(simple2x2, dimGrid2, dimBlock, 0, (cuFloatComplex*)array_d, 
		     (cuFloatComplex*)product_d, write);
    checkCudaError();
    //  cudaThreadExit();
    cudaMemcpy(product_h, product_d, matLength*sizeof(float complex), cudaMemcpyDeviceToHost);
    checkCudaError();
    checkResult(product_h, compare_h, "simple2x2");
  }

  if (TILE_WIDTH == 16 && TILE_HEIGHT == 16 && NTIME%4 == 0) {
    // 2x2 tile using shared memory float loads
    CUBE_KERNEL_CALL(shared2x2float, dimGrid2, dimBlock, 0, (cuFloatComplex*)array_d, 
		     (cuFloatComplex*)product_d, write);
    checkCudaError();
    //  cudaThreadExit();
    cudaMemcpy(product_h, product_d, matLength*sizeof(float complex), cudaMemcpyDeviceToHost);
    checkCudaError();
    checkResult(product_h, compare_h, "shared2x2float");
  }

  if (TILE_WIDTH == 8 && TILE_HEIGHT == 8) {
    // 2x2 tile using shared memory float2 loads
    cudaMemset(product_d, '0', matLength*sizeof(float complex));
#ifdef USE_TEX
    cudaChannelFormatDesc channelDesc = cudaCreateChannelDesc<float2>();
    cudaBindTexture2D(0, tex2d, array_d, channelDesc, NFREQUENCY*NSTATION*NPOL, NTIME, NFREQUENCY*NSTATION*NPOL*8);
    CUBE_KERNEL_CALL(shared2x2float2, dimGrid2, dimBlock, 0, (cuFloatComplex*)product_d,
		     write);
#else
    CUBE_KERNEL_CALL(shared2x2float2, dimGrid2, dimBlock, 0, (cuFloatComplex*)array_d, 
		     (cuFloatComplex*)product_d, write);
#endif
    checkCudaError();
    //  cudaThreadExit();
    cudaMemcpy(product_h, product_d, matLength*sizeof(float complex), cudaMemcpyDeviceToHost);
    checkCudaError();
    checkResult(product_h, compare_h, "shared2x2float2");
  }

  if (TILE_WIDTH == TILE_HEIGHT) {
    // 2x2 tile using shared memory float4 loads
    cudaMemset(product_d, '0', matLength*sizeof(float complex));
    CUBE_KERNEL_CALL(shared2x2float4, dimGrid2, dimBlock, 0, (cuFloatComplex*)array_d, 
		     (cuFloatComplex*)product_d, write);
    checkCudaError();
    //  cudaThreadExit();
    cudaMemcpy(product_h, product_d, matLength*sizeof(float complex), cudaMemcpyDeviceToHost);
    checkCudaError();
    checkResult(product_h, compare_h, "shared2x2float4");
  }

  if (TILE_WIDTH == 16 && TILE_HEIGHT == 8) {
    // 2x1 tile
    cudaMemset(product_d, '0', matLength*sizeof(float complex));
    CUBE_KERNEL_CALL(simple2x1, dimGrid2, dimBlock, 0, (cuFloatComplex*)array_d,
		     (cuFloatComplex*)product_d, write);
    checkCudaError();
    // cudaThreadExit();
    cudaMemcpy(product_h, product_d, matLength*sizeof(float complex), cudaMemcpyDeviceToHost);
    checkCudaError();
    checkResult(product_h, compare_h, "simple2x1");
  }

  if (TILE_WIDTH == 16 && TILE_HEIGHT == 8) {
    // 2x1 tile using shared memory float loads
    cudaMemset(product_d, '0', matLength*sizeof(float complex));
    CUBE_KERNEL_CALL(shared2x1float, dimGrid2, dimBlock, 0, (cuFloatComplex*)array_d,
		     (cuFloatComplex*)product_d, write);
    checkCudaError();
    // cudaThreadExit();
    cudaMemcpy(product_h, product_d, matLength*sizeof(float complex), cudaMemcpyDeviceToHost);
    checkCudaError();
    checkResult(product_h, compare_h, "shared2x1float");
  }

  if (TILE_WIDTH == 8 && TILE_HEIGHT == 16) {
    // 1x2 tile
    cudaMemset(product_d, '0', matLength*sizeof(float complex));
    CUBE_KERNEL_CALL(simple1x2, dimGrid2, dimBlock, 0, (cuFloatComplex*)array_d,
		     (cuFloatComplex*)product_d, write);
    checkCudaError();
    // cudaThreadExit();
    cudaMemcpy(product_h, product_d, matLength*sizeof(float complex), cudaMemcpyDeviceToHost);
    checkCudaError();
    checkResult(product_h, compare_h, "simple1x2");
  }

  if (TILE_WIDTH == 8 && TILE_HEIGHT == 16) {
    // 1x2 tile using shared memory float loads
    cudaMemset(product_d, '0', matLength*sizeof(float complex));
    CUBE_KERNEL_CALL(shared1x2float, dimGrid2, dimBlock, 0, (cuFloatComplex*)array_d,
		     (cuFloatComplex*)product_d, write);
    checkCudaError();
    // cudaThreadExit();
    cudaMemcpy(product_h, product_d, matLength*sizeof(float complex), cudaMemcpyDeviceToHost);
    checkCudaError();
    checkResult(product_h, compare_h, "shared1x2float");
    }
#endif

#ifdef MULTI_GPU
  //begin multi-gpu portion
  //query system to determine OpenMP parameters
  int num_gpus = 0;
  cudaGetDeviceCount(&num_gpus);
  int num_procs = omp_get_num_procs();
  printf("Number of processors: %d\n", num_procs);
  printf("Number of available GPUs: %d\n", num_gpus);
  printf("Number of requested GPUs: %d\n", NGPU);
  if(num_gpus < NGPU){
    printf("Not enough GPUs available; check number defined\n");
    return 1;
  }
  dim3 dimGrid3(((Nblock/2+1)*(Nblock/2))/2, NFREQUENCY/NGPU);

#define F_PER_GPU (NFREQUENCY/NGPU)

  // reorder input array to send to gpus
  for(int f=0; f<NFREQUENCY; f++){
    for(int t=0; t<NTIME; t++){
      for(int s=0; s<NSTATION; s++){
	for(int pol=0; pol<NPOL; pol++){
	  reorder_h[(f/F_PER_GPU)*(NTIME*NSTATION*NPOL*F_PER_GPU) + 
		    ((t*F_PER_GPU + f%F_PER_GPU)*NSTATION + s)*NPOL + pol] = 
	    array_h[((t*NFREQUENCY + f)*NSTATION + s)*NPOL + pol];
	}
      }
    }
  }

  //begin OpenMP section
#pragma omp parallel num_threads(NGPU)
  {

    unsigned int tid = omp_get_thread_num();

    //assign each CPU thread to a GPU
    int gpu_id = -1;
    cudaSetDevice(tid);
    cudaGetDevice(&gpu_id);
    checkCudaError();
    if(tid != gpu_id){
      printf("Error in CPU-GPU assignment\n");
    }
    else if (TILE_HEIGHT == 8 && TILE_WIDTH == 8){

      //allocate memory on device
      cudaMalloc((void **) &array_d, vecLength*sizeof(float complex)/NGPU);
      cudaMalloc((void **) &product_d, matLength*sizeof(float complex)/NGPU);
      checkCudaError();

      //clear out any previous values
      cudaMemset(array_d, '0', vecLength*sizeof(float complex)/NGPU);
      cudaMemset(product_d, '0', matLength*sizeof(float complex)/NGPU);
      checkCudaError();

      //move relevant data down into the array; offset by a thread id's worth of vecLength
      cudaMemcpy(array_d, &reorder_h[tid*(vecLength/NGPU)], vecLength*sizeof(float complex)/NGPU,
		 cudaMemcpyHostToDevice);
      checkCudaError();

      //call kernel to perform the calculation
#ifdef USE_TEX
      cudaChannelFormatDesc channelDesc = cudaCreateChannelDesc<float2>();
      cudaBindTexture2D(0, tex2d, array_d, channelDesc, F_PER_GPU*NSTATION*NPOL, NTIME, F_PER_GPU*NSTATION*NPOL*8);
      CUBE_KERNEL_CALL(shared2x2float2, dimGrid3, dimBlock, 0, (cuFloatComplex*)product_d,
		       write);
#else
      CUBE_KERNEL_CALL(shared2x2float2, dimGrid3, dimBlock, 0, (cuFloatComplex*)array_d,
		       (cuFloatComplex*)product_d, write);
#endif
      checkCudaError();

      //copy the data back, employing a similar strategy as above
      cudaMemcpy(&product_h[tid*(matLength/NGPU)], product_d, matLength*sizeof(float complex)/NGPU,
		 cudaMemcpyDeviceToHost);
      checkCudaError();

      cudaFree(array_d);
      cudaFree(product_d);
    }
  } //close parallel segment

  //check that mutli-gpu process yielded the correct answer
  checkResult(product_h, compare_h, "multi-shared2x2float2");

#endif

  //free device memory
#ifndef MULTI_GPU
  cudaFree(array_d);
  cudaFree(product_d);
#endif
  //free host memory
  free(array_h);
  free(product_h);
  free(compare_h);

  CUBE_WRITE();

  return 0;
}
