#include <stdio.h>
#include <sys/time.h>

#define LEN (1024*1024*8)
#define NUMTHREADS 256

__global__ void add1Kernel(float *gM, int num);
__global__ void sumKernelV1(float *gM, int num);
__global__ void sumKernelV2(float *gM, int num);
__global__ void readWriteKernel(float *gM, int num);

double getTimeStamp();

int main(int argc, char **argv) {
   float *M;
   int i;
   int num;

   float *gM;
   int size;
   double st,et;

   cudaError_t err;
   
   num = LEN;

   M = (float *) malloc(num*sizeof(float));

   // prepare input
   for (i=0; i < num; i++) {
      M[i] = 1.0;
   }

   size = num*sizeof(float);

   if ((err=cudaMalloc((void**) &gM,size)) != cudaSuccess) {
      fprintf(stderr,"Malloc error!\n");
      exit(1);
   }
   if ((err=cudaMemcpy(gM,M,size,cudaMemcpyHostToDevice)) != cudaSuccess) {
      fprintf(stderr,"Copy host to device error!\n");
      exit(1);
   }

   st = getTimeStamp(); 
   dim3 blockDim(NUMTHREADS,1);
   dim3 gridDim(num/blockDim.x,1);
   printf("grid size = %d < 65535\n",num/blockDim.x);

   //add1Kernel<<<gridDim,blockDim>>>(gM,num);

   //for (unsigned int n = num; n > 1; n = n >> 1) {
   //   sumKernel<<<gridDim,blockDim>>>(gM,n);
   //}
 
   sumKernelV2<<<1,NUMTHREADS>>>(gM,num);

   //readWriteKernel<<<gridDim,blockDim>>>(gM,num);

   cudaThreadSynchronize();
   et = getTimeStamp();

   cudaMemcpy(M,gM,size,cudaMemcpyDeviceToHost);
   cudaFree(gM);

   printf("Calcultion time = %d ms.\n",(int) (et-st));

   //  check output
   for (i=0; i < num; i++) {
   //   printf("%f\n",M[i]);
   }

   st = getTimeStamp();
   float x;
   x = 0.0;
   for (i=0; i < num; i++) {
      x = x + M[i];
   }
   et = getTimeStamp();
   printf("Calcultion time = %d ms.\n",(int) (et-st));
   
}

__global__ void add1Kernel(float *gM, int num) {
   int i = blockIdx.x*blockDim.x + threadIdx.x; 
   if (i < num) {
      gM[i] = gM[i] + 1;   
   }
}

/*
 * V1 use reduction on global memory
 */
__global__ void sumKernelV1(float *gM, int num) {
   int i;
   int stride;
   i = blockIdx.x*blockDim.x + threadIdx.x; 
   stride = num/2;

   if (i < stride) {
      gM[i] = gM[i] + gM[i+stride];   
   }
}

/*
 * Faster than V1 due to less global memory access
 */
__global__ void sumKernelV2(float *gM, int num) {
   int i;
   int offset;

   __shared__ float sPartialSums[NUMTHREADS];

   i = blockIdx.x*blockDim.x + threadIdx.x; 

   sPartialSums[i] = 0;
  
   for (offset = 0; offset < num; offset += blockDim.x) {
      sPartialSums[i] += gM[offset+i];
   }

   __syncthreads();

   for (unsigned int stride = NUMTHREADS/2; stride > 0; stride = stride >> 1) {
      if (i < stride) {
         sPartialSums[i] += sPartialSums[i+stride];   
      }
      __syncthreads();
   }

   gM[i] = sPartialSums[i];
}

__global__ void readWriteKernel(float *gM, int num) {
   int i;
   float f;
   
   i = blockIdx.x*blockDim.x + threadIdx.x; 
   f = gM[i];
   gM[i] = f+1.0f;
}

/*
 * @return timestamp in millisecond
 */
double getTimeStamp() {
   struct timeval tv;
   struct timezone tz;
   double t;

   gettimeofday(&tv,&tz);
   t = ((double) tv.tv_sec)*1000 + tv.tv_usec/1000;
   return t;
}

