#include <stdio.h> 
#include <stdint.h>

#include "repeat.h"

const int page_size = 4;	// Scale stride and arrays by page size.
const double cps = 2400.195*10e6;

__global__ void global_latency (unsigned int * my_array, int array_length, int iterations, int ignore_iterations, unsigned long long * duration,int stride,int step, long long * result) {

	unsigned int start_time, end_time;
	unsigned int *j = (my_array);  // j = h_a    ha[0] = &ha[1] or (ha+1) 2. j = *j // j points to ha[1]  ....   *j  = value at h[1]
	volatile unsigned long long sum_time=0;
        int i=0;
	for (i = stride; i < array_length; i += stride) {
	  *(unsigned int **)&my_array[i] = (unsigned int*)&my_array[i - stride];
	}
	*(unsigned int **)&my_array[0] = (unsigned int*)&my_array[i - stride];
	unsigned int ** p = (unsigned int**)&my_array[0];

	
	__syncthreads();
	
	
	long long depend =0;
	for (int k = -1; k < iterations; k++) 
	{
		if (k==0) 
		{
			sum_time = 0; // ignore some iterations: cold icache misses
		}
		start_time = clock();
		//repeat256();
		depend += (long long)*p;

		repeat256(p = (unsigned int **)*p;);
		
		end_time = clock();

		sum_time += (end_time - start_time);
	}
    

	/*
	long long temp1=0;
	for (int i = 0; i < array_length; i +=stride) {
        temp1 += j[i];
	}
	sum_time = 0;
	duration[0] = 0;
	long long temp=0;
	sum_time = 0; // ignore some iterations: cold icache misses
	int totalSteps = 0;
	for (int i = 0; i < array_length; i +=stride) {
	  totalSteps++;
	  //temp += j[i];
          start_time = clock();
	  temp1 += j[i];
	  end_time = clock();
	  temp = (temp) + temp1;
          sum_time += (end_time - start_time);
	}
	temp = temp+100;
	
	
	//printf("%d \n",temp);
	int totalSteps =0;
	unsigned int * newV = 0;
	newV = ((unsigned int *)j[0]);

	for (int i = 0; i < array_length-stride; i +=stride) {
	 		start_time = clock();

		 newV = (unsigned int *)newV[0];
		 // printf(" Pointers assigned \n ");

		  		end_time = clock();
		sum_time += (end_time - start_time);
	  totalSteps++;

		  
		}
	
	
	*/
	
	/*for (int k = -ignore_iterations; k < iterations; k++) {
		if (k==0) {
			sum_time = 0; // ignore some iterations: cold icache misses
		}

		start_time = clock();
		  int indexToAccess =  f(k,step,stride)
	
		//j = (unsigned int *)j[0];
		//repeat256(j=(unsigned int *)*j;) // j = *j;
		end_time = clock();

		sum_time += (end_time - start_time);
	}
	*/
	
	((unsigned int*)my_array)[array_length] = (unsigned int)j;
	((unsigned int*)my_array)[array_length+1] = (unsigned int) sum_time;
	duration[0] = sum_time/(iterations*256);
	result[0] = depend;
}

void global_latency2 (unsigned int * addr,int stride, int N, int iterations, int ignore_iterations, unsigned long long * duration) {

	unsigned int start_time, end_time;
	//unsigned int *j = (unsigned int*)p; 
	volatile unsigned long long sum_time;
	unsigned int ** p;
	sum_time = 0;
	duration[0] = 0;
	int i;
	for (i = stride; i < N; i += stride) {
	  *(unsigned int **)&addr[i] = (unsigned int*)&addr[i - stride];
	}
	*(unsigned int **)&addr[0] = (unsigned int*)&addr[i - stride];
	p = (unsigned int**)&addr[0];

	for (int k = 0; k < iterations; k++) 
	{
		if (k==0) 
		{
			sum_time = 0; // ignore some iterations: cold icache misses
		}
		start_time = clock();
		//repeat256();
		p = (unsigned int **)*p;
		
		end_time = clock();

		sum_time += (end_time - start_time);
	}

	//((unsigned int*)my_array)[array_length] = (unsigned int)j;
	//((unsigned int*)my_array)[array_length+1] = (unsigned int) sum_time;
	//duration[0] = sum_time;
}
int gcf(int a, int b)
{
	if (a == 0) return b;
	return gcf(b % a, a);
}

/* Construct an array of N unsigned ints, with array elements initialized
   so kernel will make stride accesses to the array. Then launch kernel
   10 times, each making iterations*256 global memory accesses. */
void parametric_measure_global(int N, int iterations, int ignore_iterations, int stride) {
	
	int i;
	unsigned int * h_a;
	unsigned int * d_a;
	unsigned long long * duration;
	unsigned long long * latency;
	unsigned long long latency_sum = 0;
	long long * r;

			
	unsigned int * addr;
    
	
	
	//only using one thread??
	dim3 threads(BLOCKDIM_X, BLOCKDIM_Y);
	dim3 grid(BLOCKDIM_X, BLOCKDIM_Y, 1);
	
	// Don't die if too much memory was requested.
	//if (N > 241600000) { printf ("OOM.\n"); return; }

	/* allocate arrays on CPU */
	h_a = (unsigned int *)malloc(sizeof(unsigned int) * (N+2));
	if (h_a==0)
	{
	  	printf("h_a BOOM!");
	}
	  
	
	latency = (unsigned long long *)malloc(sizeof(unsigned long long));
		if (latency==0)
	{
	  	printf("latency BOOM!");
	}
	  
	
	
	/* allocate arrays on GPU */
	cudaMalloc ((void **) &d_a, sizeof(unsigned int) * (N+2));
	checkCUDAError("Can't allocate d_a array on GPU");
	cudaMalloc ((void **) &duration, sizeof(unsigned long long));
	checkCUDAError("Can't allocate duration array on GPU");
	cudaMalloc ((void **) &r, sizeof(long long));

	
	
	//cudaMalloc ((void **) &p, sizeof(unsigned int) * (N+2));
	checkCUDAError("Can't allocate p array on GPU");

	
   	/* initialize array elements on CPU with pointers into d_a. */
	
	int step = gcf (stride, N);	// Optimization: Initialize fewer elements.
	for (i = 0; i < N; i += step) {
		// Device pointers are 32-bit on GT200.
		h_a[i] = ((unsigned int)(uintptr_t)d_a) + ((i + stride) % N)*sizeof(unsigned int);	
	}
	checkCUDAError("Can't access d_a array on GPU");

	h_a[N] = 0;
	h_a[N+1] = 0;
	
	cudaThreadSynchronize ();

        /* copy array elements from CPU to GPU */
        cudaMemcpy((void *)d_a, (void *)h_a, sizeof(unsigned int) * N, cudaMemcpyHostToDevice);
	checkCUDAError("Error copying h_a to d_a array on GPU");

	
	//cudaMemcpy((void *)p, (void *)addr, sizeof(unsigned int) * N, cudaMemcpyHostToDevice);
	checkCUDAError("Error copying addr to p array on GPU");

	
	
	cudaThreadSynchronize ();


	/* Launch a multiple of 10 iterations of the same kernel and take the average to eliminate interconnect (TPCs) effects */

	for (int l=0; l <10; l++) {
	
		/* launch kernel*/
		dim3 Db = dim3(1);
		dim3 Dg = dim3(1,1,1);

		//printf("Launch kernel with parameters: %d, N: %d, stride: %d\n", iterations, N, stride); 
		global_latency <<<grid, threads>>>(d_a, N, iterations, ignore_iterations, duration,stride,step,r);
		
		checkCUDAError("Error while running kernel on GPU");

		cudaThreadSynchronize ();

		cudaError_t error_id = cudaGetLastError();
        	if (error_id != cudaSuccess) {
			printf("Error is %s\n", cudaGetErrorString(error_id));
		}

		/* copy results from GPU to CPU */
		cudaThreadSynchronize ();

	        //cudaMemcpy((void *)h_a, (void *)d_a, sizeof(unsigned int) * (N+2), cudaMemcpyDeviceToHost);
        	cudaMemcpy((void *)latency, (void *)duration, sizeof(unsigned long long), cudaMemcpyDeviceToHost);

	        cudaThreadSynchronize ();
		latency_sum+=latency[0];
		

	}

	/* free memory on GPU */
	cudaFree(d_a);
	cudaFree(duration);
	cudaThreadSynchronize ();

        /*free memory on CPU */
        free(h_a);
        free(latency);

	printf("%f, %5.12f\n", (double)(latency_sum/(10)),(((double)(latency_sum/(10))/cps))*10e9);

}

void measure_global() {
	

	int N, iterations, stride;

	printf("\nGlobal memory latency\n");

	iterations = 100;
	stride = 4 ;
	
	N = (4*1024)*16;
	for (N = 1024 ;N <= (256*1024*1024); N=N*2) 
	{
	  
	  printf("[Start Run] Array Size in KB: %d\n",N*4);
	  printf("Stride, Latency (clocks), Latency (nanoseconds)\n");
	  for(stride=16*1024*1024;stride>=4;stride=stride/4)
	  {
	    if(stride<=N){
		printf ("%5d, ",stride*4);
		parametric_measure_global(N , iterations, 1, stride);
	    }
	  }
	  printf("[End Run] Array Size in KB: %d\n",N*4);
	}
}
