#include <stdio.h>
#include <stdlib.h>
#include <sys/types.h>
#include <sys/stat.h>
#include <sys/fcntl.h>
#include <sys/mman.h>
#include <unistd.h>
#include <string.h>
#include "hash.h"
#include <cuda.h>

#define BI 1000000000

#  define CUDA_SAFE_CALL_NO_SYNC( call) do {                                \
    cudaError err = call;                                                    \
    if( cudaSuccess != err) {                                                \
        fprintf(stderr, "Cuda error in file '%s' in line %i : %s.\n",        \
                __FILE__, __LINE__, cudaGetErrorString( err) );              \
        exit(EXIT_FAILURE);                                                  \
    } } while (0)
#  define CUDA_SAFE_CALL( call) do {                                        \
    CUDA_SAFE_CALL_NO_SYNC(call);                                            \
    cudaError err = cudaThreadSynchronize();                                 \
    if( cudaSuccess != err) {                                                \
        fprintf(stderr, "Cuda error in file '%s' in line %i : %s.\n",        \
                __FILE__, __LINE__, cudaGetErrorString( err) );              \
        exit(EXIT_FAILURE);                                                  \
    } } while (0)

__global__ void count_hash_num(struct tuple * dim, int * dimNum,int *num){
	int stride = blockDim.x * gridDim.x;
	int offset = blockIdx.x * blockDim.x + threadIdx.x;
	int dNum = *dimNum;

	for(int i=offset;i<dNum;i+=stride){
		struct tuple tmp = dim[i];
		int hKey = tmp.key % HSIZE;
		atomicAdd(&(num[hKey]),1);
	}

}

__global__ void build_hash_table(struct tuple *dim, int* dimNum, int *psum,struct tuple * bucket){

	int stride = blockDim.x * gridDim.x;
	int offset = blockIdx.x * blockDim.x + threadIdx.x;
	int dNum = *dimNum;

	for(int i=offset;i<dNum;i+=stride){
		struct tuple tmp = dim[i];
		int hKey = tmp.key % HSIZE;
		bucket[atomicAdd(&psum[hKey],1)] = tmp;
	}
}


__global__ void count_join_result(int * num, int * psum, struct tuple * bucket, struct tuple * fact, int *factNum, int * count){
	int lcount = 0;
	int startIndex;
	int fNum = *factNum;
	int stride = blockDim.x * gridDim.x;
	int offset = blockIdx.x*blockDim.x + threadIdx.x; 

	startIndex = blockIdx.x*blockDim.x + threadIdx.x;
	for(int i=startIndex;i<fNum;i+=stride){
		int skey = fact[i].value;
		int hkey = skey %HSIZE;
		int keyNum = num[hkey];
		int pSum = psum[hkey];

		/*
		 *	probe hash table 
		 */
		for(int j=0;j<keyNum;j++){
			int start = pSum;
			if(bucket[start+j].key == skey){
				lcount ++;
			}
		}
	}
	__syncthreads();
	count[offset] = lcount;	
}

__global__ void join(int *num,int *resPsum, int * psum, struct tuple * bucket, struct tuple * fact, int *factNum, struct tuple * result){

	int startIndex;
	int fNum = *factNum;
	int stride = blockDim.x * gridDim.x;

	startIndex = blockIdx.x*blockDim.x + threadIdx.x;
	int resP = resPsum[startIndex];
	int offset = resP;
	for(int i=startIndex;i<fNum;i+=stride){
		int skey = fact[i].value;
		int hkey = skey%HSIZE;
		int keyNum = num[hkey];
		int pSum = psum[hkey];

		/*
		 *	probe hash table 
		 */
		for(int j=0;j<keyNum;j++){
			int start = pSum;
			if(bucket[start+j].key == skey){
				struct tuple tmp;
				tmp.key = skey;
				tmp.value = bucket[start+j].value;
				result[offset] = tmp;
				offset ++;
				break;
			}
		}
	}

}

static size_t getGlobalMem(int deviceID){
	struct cudaDeviceProp pro;
	size_t free = 0, total = 0;

	//cudaGetDeviceProperties(&pro,deviceID);
	//return pro.totalGlobalMem;
	//cuMemGetInfo(&free,&total);
	return free;
}

int main(int argc, char * argv[]){
	int fact,dim;
	int dimSize, factSize;
	int factNum, dimNum;
	char * factTable, *dimTable;
	int i;

	struct tuple * host_result; 
	int * host_psum, *cpu_count, *resPsum;
	int count = 0;

	int * gpu_hashNum;
	struct tuple * gpu_result;
	struct tuple *gpu_bucket, *gpu_fact, * gpu_dim;
	int * gpu_count, *gpu_factNum,  *gpu_psum, *gpu_resPsum, *gpu_dimNum;

	struct timespec start,end;
	float gpuTime;

	if(argc!=3){
		printf("error: factTable dimTable\n");
		exit(-1);
	}

	clock_gettime(CLOCK_REALTIME,&start);

	fact = open(argv[1],O_RDONLY);
	dim = open(argv[2],O_RDONLY);

	dimSize = lseek(dim,0,SEEK_END);
	factSize = lseek(fact,0,SEEK_END);
	factNum = factSize/sizeof(struct tuple);
	dimNum = dimSize/sizeof(struct tuple);

	dimTable = (char *)mmap(0,dimSize,PROT_READ,MAP_SHARED,dim,0);
	factTable = (char *)mmap(0,factSize,PROT_READ,MAP_SHARED,fact,0);


	dim3 grid(1024);
	dim3 block(16);

/*
 * 	build hash table on GPU
 */
	

	cudaEvent_t startGPU, stopGPU;
	cudaEventCreate(&startGPU);
	cudaEventCreate(&stopGPU);
	cudaEventRecord(startGPU,0);
	int * hashCount;
	int *gpu_psum1;

	hashCount = (int *) malloc(sizeof(int)*HSIZE);

	CUDA_SAFE_CALL(cudaMalloc((void**)&gpu_dim,dimSize));
	CUDA_SAFE_CALL(cudaMemcpy(gpu_dim,dimTable,dimSize,cudaMemcpyHostToDevice));
	CUDA_SAFE_CALL(cudaMalloc((void**)&gpu_dimNum,sizeof(int)));
	CUDA_SAFE_CALL(cudaMemcpy(gpu_dimNum,&dimNum,sizeof(int),cudaMemcpyHostToDevice));
	CUDA_SAFE_CALL(cudaMalloc((void**)&gpu_hashNum,sizeof(int)*HSIZE));
	CUDA_SAFE_CALL(cudaMemset(gpu_hashNum,0,sizeof(int)*HSIZE));

	count_hash_num<<<grid,block>>>(gpu_dim,gpu_dimNum,gpu_hashNum);
	CUDA_SAFE_CALL(cudaMemcpy(hashCount,gpu_hashNum,sizeof(int)*HSIZE,cudaMemcpyDeviceToHost));

	cudaEventRecord(stopGPU,0);
	cudaEventSynchronize(stopGPU);
	cudaEventElapsedTime(&gpuTime,startGPU,stopGPU);
	printf("GPU count hash result time:%lf\n",gpuTime);

	CUDA_SAFE_CALL(cudaMalloc((void**)&gpu_psum,HSIZE*sizeof(int)));
	CUDA_SAFE_CALL(cudaMalloc((void**)&gpu_psum1,HSIZE*sizeof(int)));
	host_psum = (int *)malloc(HSIZE* sizeof(int));
	memset(host_psum,0,HSIZE* sizeof(int));

	for(i=0;i<HSIZE;i++){
		if(i==0){
			host_psum[i] = 0;
		}else if(i==1){
			host_psum[i] = hashCount[0];
		}
		else
			host_psum[i] = hashCount[i-1] + host_psum[i-1];
	}

	free(hashCount);
	CUDA_SAFE_CALL(cudaMemcpy(gpu_psum,host_psum,sizeof(int)*HSIZE,cudaMemcpyHostToDevice));
	CUDA_SAFE_CALL(cudaMemcpy(gpu_psum1,gpu_psum,sizeof(int)*HSIZE,cudaMemcpyDeviceToDevice));

	CUDA_SAFE_CALL(cudaMalloc((void **)&gpu_bucket,dimSize));

	build_hash_table<<<grid,block>>>(gpu_dim,gpu_dimNum,gpu_psum1,gpu_bucket);

	CUDA_SAFE_CALL(cudaFree(gpu_dimNum));
	CUDA_SAFE_CALL(cudaFree(gpu_dim));
	CUDA_SAFE_CALL(cudaFree(gpu_psum1));
	CUDA_SAFE_CALL(cudaEventRecord(stopGPU,0));
	cudaEventSynchronize(stopGPU);
	cudaEventElapsedTime(&gpuTime,startGPU,stopGPU);
	printf("GPU build hash table time:%lf\n",gpuTime);

/*
 *	join on GPU
*/

	CUDA_SAFE_CALL(cudaMalloc((void**)&gpu_fact,factSize));
	CUDA_SAFE_CALL(cudaMalloc((void**)&gpu_factNum,sizeof(int)));
	count = HSIZE;

	int threadNum = grid.x * block.x;

	CUDA_SAFE_CALL(cudaMemcpy(gpu_factNum,(void *)&factNum,sizeof(int),cudaMemcpyHostToDevice));
	CUDA_SAFE_CALL(cudaMemcpy(gpu_fact,factTable,factSize,cudaMemcpyHostToDevice));
	CUDA_SAFE_CALL(cudaMalloc((void**)&gpu_count,sizeof(int)*threadNum));
	CUDA_SAFE_CALL(cudaMemset(gpu_count,0,sizeof(int)*threadNum));

	count_join_result<<<grid,block>>>(gpu_hashNum, gpu_psum, gpu_bucket, gpu_fact, gpu_factNum,gpu_count);
	cpu_count = (int *) malloc(sizeof(int)*threadNum);
	CUDA_SAFE_CALL(cudaMemcpy(cpu_count,gpu_count,sizeof(int)*threadNum,cudaMemcpyDeviceToHost));
	CUDA_SAFE_CALL(cudaFree(gpu_count));
	cudaEventRecord(stopGPU,0);
	cudaEventSynchronize(stopGPU);
	cudaEventElapsedTime(&gpuTime,startGPU,stopGPU);
	printf("GPU count join result time:%lf\n",gpuTime);


	resPsum = (int *) malloc(sizeof(int)*threadNum);
	CUDA_SAFE_CALL(cudaMalloc((void**)&gpu_resPsum,sizeof(int)*threadNum));

	for(i=0;i<threadNum;i++){
		if(i==0){
			resPsum[i] = 0;
		}else if(i==1){
			resPsum[i] = cpu_count[i-1];
		}
		else
			resPsum[i] = cpu_count[i-1] + resPsum[i-1];
	}

	count = resPsum[threadNum-1] + cpu_count[threadNum-1];

	CUDA_SAFE_CALL(cudaMalloc((void**)&gpu_result,count * sizeof(struct tuple)));
	host_result = (struct tuple*) malloc(count * sizeof(struct tuple));

	CUDA_SAFE_CALL(cudaMemcpy(gpu_resPsum,resPsum,sizeof(int)*threadNum,cudaMemcpyHostToDevice));
	free(resPsum);

	join<<<grid,block>>>(gpu_hashNum,gpu_resPsum,gpu_psum,gpu_bucket,gpu_fact, gpu_factNum,gpu_result);
	CUDA_SAFE_CALL(cudaMemcpy(host_result,(void*)gpu_result,count*sizeof(struct tuple),cudaMemcpyDeviceToHost));


	CUDA_SAFE_CALL(cudaFree(gpu_fact));
	CUDA_SAFE_CALL(cudaFree(gpu_factNum));
	CUDA_SAFE_CALL(cudaFree(gpu_hashNum));
	CUDA_SAFE_CALL(cudaFree(gpu_count));
	CUDA_SAFE_CALL(cudaFree(gpu_psum));
	CUDA_SAFE_CALL(cudaFree(gpu_result));
	CUDA_SAFE_CALL(cudaFree(gpu_bucket));
	close(dim);
	close(fact);

	cudaEventRecord(stopGPU,0);
	cudaEventSynchronize(stopGPU);
	cudaEventElapsedTime(&gpuTime,startGPU,stopGPU);
	printf("GPU total time:%lf\n",gpuTime);

	clock_gettime(CLOCK_REALTIME,&end);
	double timeE = (end.tv_sec -  start.tv_sec)* BI + end.tv_nsec - start.tv_nsec;
	printf("CPU time: %lf\n", timeE);
	return 0;
}
