#include <stdio.h>
#include <string.h>
#include <math.h>

#include "util.h"
#include "common.h"

/*
 *  CUDA version for two vector arrays A and B
 */
__global__ void corrcoefmatrixAB_cuda(float *out, float *inA, float *inB, int n, int m) {
  __shared__ float inAS[TILESIZE][TILESIZE];
  __shared__ float inBS[TILESIZE][TILESIZE];

  int i,j,k;
  int tileBegin;
  float a_x,b_x;
  float sum_a, sum_b, sum_a2, sum_b2, sum_ab, corrcoef;

  i = blockIdx.y*blockDim.y + threadIdx.y;
  j = blockIdx.x*blockDim.x + threadIdx.x;

  sum_a = sum_a2 = sum_b = sum_b2 = sum_ab = 0;
  for (tileBegin=0; tileBegin < m; tileBegin += blockDim.x) {
     // Load data into shared memory
     inAS[threadIdx.y][threadIdx.x] = inA[(blockIdx.y*blockDim.y + threadIdx.y)*m+tileBegin+threadIdx.x];
     inBS[threadIdx.y][threadIdx.x] = inB[(blockIdx.x*blockDim.x + threadIdx.y)*m+tileBegin+threadIdx.x];
     __syncthreads();

     // Do partial calculation on each tile
     for (k=0; k < blockDim.x; k++) {
        if (tileBegin + k < m) { // we should test if it is as good as padding
           a_x = inAS[threadIdx.y][k];
           b_x = inBS[threadIdx.x][k];
           sum_a += a_x;
           sum_a2 += a_x*a_x;
           sum_b += b_x;
           sum_b2 += b_x*b_x;
           sum_ab += a_x*b_x;
        }
     }
     __syncthreads();
  }
  corrcoef = (m*sum_ab - sum_a*sum_b)/sqrtf((m*sum_a2-sum_a*sum_a)*(m*sum_b2-sum_b*sum_b));
  out[i*n+j] = corrcoef;
}

#if (__GNUC__ >= 4 && __GNUC_MINOR__ < 4)
extern "C"
#endif
float *calculation_cuda(float *inA, float *inB, int row, int col) {
   float *inAG, *inBG, *outG;
   float *out;
   int sizeIn, sizeOut;
   cudaError_t err;

   cudaDeviceProp deviceProp;
   cudaGetDeviceProperties(&deviceProp,0);
   printf("Running on %s\n",deviceProp.name);

   if ((row % TILESIZE) != 0) {
      fprintf(stderr,"Number of rows is not a multiple of TILESIZE\n");
      exit(1);
   }
 
   sizeIn = row*col*sizeof(float);
   sizeOut = row*row*sizeof(float);
   out = (float *) malloc(sizeOut);

   if ((err=cudaMalloc((void **) &inAG,sizeIn)) != cudaSuccess) {
      fprintf(stderr,"cudaMalloc error: %s!\n",cudaGetErrorString(err));
      exit(1);
   }
   if ((err=cudaMalloc((void **) &inBG,sizeIn)) != cudaSuccess) {
      fprintf(stderr,"cudaMalloc error: %s!\n",cudaGetErrorString(err));
      exit(1); 
   }
   if ((err=cudaMalloc((void **) &outG,sizeOut)) != cudaSuccess) {
      fprintf(stderr,"cudaMalloc error: %s!\n",cudaGetErrorString(err));
   }

  
   dim3 blockDim(TILESIZE,TILESIZE);
   dim3 gridDim(row/TILESIZE,row/TILESIZE);

   if ((err=cudaMemcpy(inAG,inA,sizeIn,cudaMemcpyHostToDevice)) != cudaSuccess) {
      fprintf(stderr,"cudaMemcpy from host to device fail: %s\n",cudaGetErrorString(err));
      return NULL;
   }
   if ((err=cudaMemcpy(inBG,inB,sizeIn,cudaMemcpyHostToDevice)) != cudaSuccess) {
      fprintf(stderr,"cudaMemcpy from host to device fail: %s\n",cudaGetErrorString(err));
      return NULL;
   }

   corrcoefmatrixAB_cuda<<<gridDim,blockDim>>>(outG,inAG,inBG,row,col);

   cudaThreadSynchronize();

   // check for error
   err = cudaGetLastError();
   if(err != cudaSuccess)
   {
     fprintf(stderr,"CUDA error at %s:%i: %s\n", __FILE__, __LINE__, cudaGetErrorString(err));
     exit(1);
   }

   cudaMemcpy(out,outG,sizeOut,cudaMemcpyDeviceToHost);

   cudaFree(inAG);
   cudaFree(inBG);
   cudaFree(outG);

   return out;  
}


