
#include "cuda_runtime.h"
#include "device_launch_parameters.h"

#include <stdio.h>
#include <stdlib.h>

/* 
 *	This program demostrates the usage of shared memory with specific block size (as Width)
 *  to capture the row data (only one column)
 *
 *	Example:
 *  --------
 *  BlockSize  = 2
 *	Input Data[]= {1,2,3,4,5,6,7,8,9,10}
 *  OutputSize = InputSize / BlockSize
 *
 *  Output[j] = Data[i] + Data[i+1]
 *  
 */

typedef unsigned long long uint64_t;

// This define as the Column Width
#define BLOCKSIZE		2	

cudaError_t testSharedMemReduction(uint64_t *c, uint64_t *a, size_t size, size_t size_output);

__global__ void testSharedMemReductionKernel(uint64_t *c, 
											 uint64_t*a,
											 uint64_t totalelement)
{
    uint64_t offset = blockIdx.x * blockDim.x + threadIdx.x;

	__shared__ uint64_t sharedData[2];
	uint64_t tmpval = 0;

	while (offset < totalelement)
	{
		sharedData[threadIdx.x] = a[offset];

		//printf("[%d,%d,%d][%d]\n",blockIdx.x, blockDim.x, threadIdx.x, sharedData[threadIdx.x]);

		//__syncthreads();
	
		//printf("[a][%d,%d]\n", sharedData[0], sharedData[1]);

		for(uint64_t i = 0 ; i < threadIdx.x+1; i++ ) {
			tmpval += sharedData[i];

			//printf("[%d,%d,%d][%d],tmpval=%d, a[%d]=%d\n",blockIdx.x, blockDim.x, threadIdx.x, offset ,tmpval, i, sharedData[i]);
		}
		__syncthreads();

		//printf("[b]\n");

		//printf("-------\n");
		if (threadIdx.x == 1)
			c[blockIdx.x] = tmpval;

		offset += blockDim.x * gridDim.x;
	}

	//printf("[%d,%d,%d]-%d\n", blockIdx.x, blockDim.x, threadIdx.x, c[offset] );

}

__global__ void testSharedMemReductionKernel_Orig(int *c, const int *a)
{
     int offset = blockIdx.x * blockDim.x + threadIdx.x;

	__shared__ unsigned int sharedData[2];
	int tmpval = 0;

	sharedData[threadIdx.x] = a[offset];

	printf("[%d,%d,%d][%d]\n",blockIdx.x, blockDim.x, threadIdx.x, sharedData[threadIdx.x]);

	__syncthreads();
	
	//printf("[a][%d,%d]\n", sharedData[0], sharedData[1]);

	for( int i = 0 ; i < threadIdx.x+1; i++ ) {
		tmpval += sharedData[i];

		//printf("[%d,%d,%d][%d],tmpval=%d, a[%d]=%d\n",blockIdx.x, blockDim.x, threadIdx.x, offset ,tmpval, i, sharedData[i]);
	}
	__syncthreads();

	//printf("[b]\n");

	//printf("-------\n");
	if (threadIdx.x == 1)
		c[blockIdx.x] = tmpval;

	//printf("[%d,%d,%d]-%d\n", blockIdx.x, blockDim.x, threadIdx.x, c[offset] );

}

// Helper function for using CUDA to add vectors in parallel.
cudaError_t testSharedMemReduction(uint64_t *c, 
								   uint64_t *a, 
								   uint64_t size, 
								   uint64_t size_output)
{
    uint64_t *dev_a = 0;
    uint64_t *dev_c = 0;
    cudaError_t cudaStatus;

    // Choose which GPU to run on, change this on a multi-GPU system.
    cudaStatus = cudaSetDevice(0);
    if (cudaStatus != cudaSuccess) {
        fprintf(stderr, "cudaSetDevice failed!  Do you have a CUDA-capable GPU installed?");
        goto Error;
    }

    // Allocate GPU buffers for three vectors (two input, one output)    .
    cudaStatus = cudaMalloc((void**)&dev_c, size_output * sizeof(uint64_t));
    if (cudaStatus != cudaSuccess) {
        fprintf(stderr, "cudaMalloc failed!");
        goto Error;
    }

    cudaStatus = cudaMalloc((void**)&dev_a, size * sizeof(uint64_t));
    if (cudaStatus != cudaSuccess) {
        fprintf(stderr, "cudaMalloc failed!");
        goto Error;
    }

    // Copy input vectors from host memory to GPU buffers.
    cudaStatus = cudaMemcpy(dev_a, a, size * sizeof(uint64_t), cudaMemcpyHostToDevice);
    if (cudaStatus != cudaSuccess) {
        fprintf(stderr, "cudaMemcpy failed!");
        goto Error;
    }

    // Launch a kernel on the GPU with one thread for each element.
    testSharedMemReductionKernel<<<size, 2>>>(dev_c, dev_a, size);

    // cudaDeviceSynchronize waits for the kernel to finish, and returns
    // any errors encountered during the launch.
    cudaStatus = cudaDeviceSynchronize();
    if (cudaStatus != cudaSuccess) {
        fprintf(stderr, "cudaDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus);
        goto Error;
    }

    // Copy output vector from GPU buffer to host memory.
    cudaStatus = cudaMemcpy(c, dev_c, size_output * sizeof(uint64_t), cudaMemcpyDeviceToHost);
    if (cudaStatus != cudaSuccess) {
        fprintf(stderr, "cudaMemcpy failed!");
        goto Error;
    }

Error:
    cudaFree(dev_c);
    cudaFree(dev_a);
    
    return cudaStatus;
}

uint64_t getTotalElementInMB(uint64_t megabytes)
{
	uint64_t ans = 0;

	ans = (megabytes * 1024 * 1024) / sizeof(uint64_t) ;

	return ans;
}

uint64_t getTotalElementInGB(float gigabytes)
{
	float ans = 0;

	ans = (gigabytes * 1024 * 1024 * 1024) / sizeof(uint64_t) ;

	return (uint64_t)ans;
}

int main()
{
    uint64_t arrayInputSize = getTotalElementInMB(100);
	uint64_t arrayOutputSize = arrayInputSize / 2;

    uint64_t *a;
    uint64_t *c;

	a = (uint64_t*)malloc(arrayInputSize * sizeof(uint64_t));
	c = (uint64_t*)malloc(arrayOutputSize * sizeof(uint64_t));
	
	for (uint64_t i=0; i<arrayInputSize; i++)
		a[i] = i+1;

    // Add vectors in parallel.
    cudaError_t cudaStatus = testSharedMemReduction(c, a, arrayInputSize, arrayOutputSize);
    if (cudaStatus != cudaSuccess) {
        fprintf(stderr, "testSharedMemReduction failed!");
        return 1;
    }

	printf("Input size	: %f(GB)\n", ((float)arrayInputSize * (float)sizeof(uint64_t)) / 1024 / 1024 / 1024);
	printf("Output size	: %f(GB)\n", ((float)arrayOutputSize * (float)sizeof(uint64_t)) / 1024 / 1024 /1024);

//	for (int i=0; i<arrayOutputSize; i++) {
///*		printf("%d(a[%d]) + %d(a[%d]) = %d\n",
//			a[i], i*(i), a[i+1], i*(i+1), c[i]);*/	
//		printf("%d\n", c[i]);
//	}

	int result = 0;
	for (uint64_t i=0; i<arrayOutputSize; i++) {
		uint64_t tmpa = a[i*2] + a[i*2+1];

		if (tmpa != c[i]) {
			printf("Not match at i=%d, a=%d, c=%d\n", i, a, c[i]);
			result = 1;
		}
	}

	if (result == 0)
		printf("Successfully match total of %d elemenet!\n", arrayInputSize);


    // cudaDeviceReset must be called before exiting in order for profiling and
    // tracing tools such as Nsight and Visual Profiler to show complete traces.
    cudaStatus = cudaDeviceReset();
    if (cudaStatus != cudaSuccess) {
        fprintf(stderr, "cudaDeviceReset failed!");
        return 1;
    }

	//free(a);
	//free(c);

	system("pause");

    return 0;
}
