#include <stdio.h>
#include <stdlib.h>
#include <sys/types.h>
#include <sys/stat.h>
#include <sys/fcntl.h>
#include <sys/mman.h>
#include <unistd.h>
#include <string.h>
#include <cuda.h>
#include "../include/inviJoin.h"
#include "../include/gpuCudaLib.h"
#include "../include/cpuCudaLib.h"
#include "scanImpl.cu"



__global__ static void count_join_result(char* fact,int min,int max, long fNum, int * factFilter){
	int stride = blockDim.x * gridDim.x;
	long offset = blockIdx.x*blockDim.x + threadIdx.x;

	for(int i=offset;i<fNum;i+=stride){
		int fKey = ((int *)fact)[i];
		if(fKey >=min && fKey<=max)		
			factFilter[i] = fKey;
	}

}

__global__ static void count_join_result_dict(struct dictHeader * dheader,int min,int max, long fNum, int * factFilter){
	int stride = blockDim.x * gridDim.x;
	long offset = blockIdx.x*blockDim.x + threadIdx.x;

	for(int i=offset; i<fNum;i+=stride){
		int fKey = dheader->hash[i];
		if(fKey >=min && fKey<=max)		
			factFilter[i] = fKey;
	}
}

__global__ void static unpack_rle(char * fact, char * rle, long tupleNum,int dNum){

        int offset = blockIdx.x*blockDim.x + threadIdx.x;
        int stride = blockDim.x * gridDim.x;

        for(int i=offset; i<dNum; i+=stride){

                int fvalue = ((int *)(fact+sizeof(struct rleHeader)))[i];
                int fcount = ((int *)(fact+sizeof(struct rleHeader)))[i + dNum];
                int fpos = ((int *)(fact+sizeof(struct rleHeader)))[i + 2*dNum];

                for(int k=0;k<fcount;k++){
                        ((int*)rle)[fpos + k] = fvalue;
                }
        }
}


__global__ static void transform_dict_filter(int * dictFilter, char *fact, long tupleNum, int dNum,  int * filter){

        int stride = blockDim.x * gridDim.x;
        int offset = blockIdx.x*blockDim.x + threadIdx.x;

        struct dictHeader *dheader;
        dheader = (struct dictHeader *) fact;

        int byteNum = dheader->bitNum/8;
        int numInt = (tupleNum * byteNum +sizeof(int) - 1) / sizeof(int)  ;

        for(long i=offset; i<numInt; i += stride){
                int tmp = ((int *)(fact + sizeof(struct dictHeader)))[i];

                for(int j=0; j< sizeof(int)/byteNum; j++){
                        int fkey = 0;
                        memcpy(&fkey, ((char *)&tmp) + j*byteNum, byteNum);

                        filter[i* sizeof(int)/byteNum + j] = dictFilter[fkey];
                }
        }
}


__global__ static void count_join_result_rle(char* col,int min,int max, long fNum, int * factFilter){

	int stride = blockDim.x * gridDim.x;
        int tid = blockIdx.x * blockDim.x + threadIdx.x;
        int con;

        struct rleHeader *rheader = (struct rleHeader *) col;
        int dNum = rheader->dictNum;

        for(int i = tid; i<dNum; i += stride){
                int fkey = ((int *)(col+sizeof(struct rleHeader)))[i];
                int fcount = ((int *)(col+sizeof(struct rleHeader)))[i + dNum];
                int fpos = ((int *)(col+sizeof(struct rleHeader)))[i + 2*dNum];

                con = (fkey >=min) && (fkey <=max);

                for(int k=0;k<fcount;k++){
                        factFilter[fpos+k] = con;
                }

        }

}

__global__ void static joinFact_dict_other(int *resPsum, char * fact,  struct dictHeader *dheader, int byteNum,int attrSize, long  num, int * filter, char * result){

        int startIndex = blockIdx.x*blockDim.x + threadIdx.x;
        int stride = blockDim.x * gridDim.x;
        long localOffset = resPsum[startIndex] * attrSize;

        for(long i=startIndex;i<num;i+=stride){
                if(filter[i] != 0){
                        int key = 0;
                        memcpy(&key, fact + sizeof(struct dictHeader) + i* byteNum, byteNum);
                        memcpy(result + localOffset, &dheader->hash[key], attrSize);
                        localOffset += attrSize;
                }
        }
}

__global__ void static joinFact_dict_int(int *resPsum, char * fact, struct dictHeader *dheader, int byteNum, int attrSize, long  num, int * filter, char * result){

        int startIndex = blockIdx.x*blockDim.x + threadIdx.x;
        int stride = blockDim.x * gridDim.x;
        long localCount = resPsum[startIndex];

        for(long i=startIndex;i<num;i+=stride){
                if(filter[i] != 0){
                        int key = 0;
                        memcpy(&key, fact + sizeof(struct dictHeader) + i* byteNum, byteNum);
                        ((int*)result)[localCount] = dheader->hash[key];
                        localCount ++;
                }
        }
}


__global__ void static joinFact_other(int *resPsum, char * fact,  int attrSize, long  num, int * filter, char * result){

        int startIndex = blockIdx.x*blockDim.x + threadIdx.x;
        int stride = blockDim.x * gridDim.x;
        long localOffset = resPsum[startIndex] * attrSize;

        for(long i=startIndex;i<num;i+=stride){
                if(filter[i] != 0){
                        memcpy(result + localOffset, fact + i*attrSize, attrSize);
                        localOffset += attrSize;
                }
        }
}

__global__ void static joinFact_int(int *resPsum, char * fact,  int attrSize, long  num, int * filter, char * result){

        int startIndex = blockIdx.x*blockDim.x + threadIdx.x;
        int stride = blockDim.x * gridDim.x;
        long localCount = resPsum[startIndex];

        for(long i=startIndex;i<num;i+=stride){
                if(filter[i] != 0){
                        ((int*)result)[localCount] = ((int *)fact)[i];
                        localCount ++;
                }
        }
}

__global__ void static joinDim_int(int *resPsum, char * dim, int attrSize, long num, int *factF,int * filter, char * result){

        int startIndex = blockIdx.x*blockDim.x + threadIdx.x;
        int stride = blockDim.x * gridDim.x;
        long localCount = resPsum[startIndex];

        for(long i=startIndex;i<num;i+=stride){
                if( filter[i] != 0){
                	int dimId = factF[i];
                        ((int*)result)[localCount] = ((int*)dim)[dimId-1];
                        localCount ++;
                }
        }
}

__global__ void static joinDim_other(int *resPsum, char * dim, int attrSize, long num, int* factF,int * filter, char * result){

        int startIndex = blockIdx.x*blockDim.x + threadIdx.x;
        int stride = blockDim.x * gridDim.x;
        long localOffset = resPsum[startIndex] * attrSize;

        for(long i=startIndex;i<num;i+=stride){
                if( filter[i] != 0){
                	int dimId = factF[i];
                        memcpy(result + localOffset, dim + (dimId-1)* attrSize, attrSize);
                        localOffset += attrSize;
                }
        }
}



__global__ void static merge(int ** filter, long fNum, int dNum,int * result, int *count, int * totalCount){

	int startIndex = blockIdx.x*blockDim.x + threadIdx.x;
	int stride = blockDim.x * gridDim.x;
	int lcount = 0;

	for(long i=startIndex; i<fNum; i+=stride){
		int tmp = 1;
		for(int j=0;j<dNum;j++){
			if(filter[j][i] == 0){
				tmp = 0;
				break;
			}
		}
		lcount += tmp;
		result[i] = tmp; 
	}
	count[startIndex] = lcount;
	atomicAdd(totalCount,lcount);
}


struct tableNode * inviJoin(struct joinNode *jNode, struct statistic *pp){

	struct tableNode * res = NULL;
	char ** gpu_fact;
	int ** gpuFilter;

	struct timespec start,end;
	clock_gettime(CLOCK_REALTIME,&start);

	dim3 grid(1024);
	dim3 block(256);

	int blockNum = jNode->factTable->tupleNum / block.x + 1;
	if(blockNum < 1024)
		grid = blockNum;

	int threadNum = grid.x * block.x;

	res = (struct tableNode *) malloc(sizeof(struct tableNode));
	res->tupleSize = jNode->tupleSize;
	res->totalAttr = jNode->totalAttr;
	res->attrType = (int *) malloc(sizeof(int) * res->totalAttr);
	res->attrSize = (int *) malloc(sizeof(int) * res->totalAttr);
	res->dataPos = (int *) malloc(sizeof(int) * res->totalAttr);
	res->attrTotalSize = (int *) malloc(sizeof(int) * res->totalAttr);
	res->dataFormat = (int *) malloc(res->totalAttr * sizeof(int));

	res->content = (char **) malloc(sizeof(char *) * jNode->totalAttr );

	for(int i=0;i<res->totalAttr;i++){
		res->attrType[i] = jNode->attrType[i];
		res->attrSize[i] = jNode->attrSize[i];
		if(jNode->keepInGpu[i] == 1){
			res->dataPos[i] = GPU;
		}else{
			res->dataPos[i] = MEM;
		}
		res->dataFormat[i] = UNCOMPRESSED;
	}

	int filterSize = jNode->factTable->tupleNum * sizeof(int);
	int *factInGpu = (int *) malloc(sizeof(int) * jNode->dimNum);

	gpuFilter = (int **) malloc(sizeof(int *) * jNode->dimNum);
	gpu_fact = (char **) malloc(sizeof(char *) * jNode->dimNum);

	for(int k=0;k<jNode->dimNum;k++){
		int index = jNode->factIndex[k];
		long totalSize = jNode->factTable->attrTotalSize[index];

		if(jNode->factTable->dataPos[index] == MEM || jNode->factTable->dataPos[index] == PINNED){
			factInGpu[k] = 0;
			CUDA_SAFE_CALL_NO_SYNC(cudaMalloc((void **)&(gpu_fact[k]), totalSize));
			CUDA_SAFE_CALL_NO_SYNC(cudaMemcpy(gpu_fact[k],jNode->factTable->content[index], totalSize, cudaMemcpyHostToDevice));

		}else if(jNode->factTable->dataPos[index] == GPU || jNode->factTable->dataPos[index] == UVA){
			factInGpu[k] = 1;
			gpu_fact[k] = jNode->factTable->content[index];
		}

		CUDA_SAFE_CALL_NO_SYNC(cudaMalloc((void **)&(gpuFilter[k]), filterSize));
		CUDA_SAFE_CALL_NO_SYNC(cudaMemset(gpuFilter[k],0,filterSize));
	}


	int min[3],max[3];
	min[0] = 119345;
	max[0] = 179879;
	min[1]  = 7944;
	max[1] = 11944;
	min[2] = 1;
	max[2] = 2192;

	for(int k=0;k<jNode->dimNum;k++){
		int index = jNode->factIndex[k];
		int format = jNode->factTable->dataFormat[index];
		int dataPos = jNode->factTable->dataPos[index];

		if(format == UNCOMPRESSED)
			count_join_result<<<grid,block>>>(gpu_fact[k],min[k],max[k],jNode->factTable->tupleNum, gpuFilter[k]);

		else if(format == DICT){
			struct dictHeader * gpuDictHeader;
			struct dictHeader * dheader;
			int dNum;

			CUDA_SAFE_CALL_NO_SYNC(cudaMalloc((void**)&gpuDictHeader, sizeof(struct dictHeader)));

			if(dataPos == MEM || dataPos == UVA){
				dheader = (struct dictHeader *) jNode->factTable->content[index];
				dNum = dheader->dictNum;
				CUDA_SAFE_CALL_NO_SYNC(cudaMemcpy(gpuDictHeader,dheader,sizeof(struct dictHeader),cudaMemcpyHostToDevice));
			}else{
				dheader = (struct dictHeader *) malloc(sizeof(struct dictHeader));
                        	CUDA_SAFE_CALL_NO_SYNC(cudaMemcpy(dheader,jNode->factTable->content[index],sizeof(struct dictHeader), cudaMemcpyDeviceToHost));
                        	dNum = dheader->dictNum;
                        	CUDA_SAFE_CALL_NO_SYNC(cudaMemcpy(gpuDictHeader,dheader,sizeof(struct dictHeader),cudaMemcpyHostToDevice));
                        	free(dheader);
			}

			int * gpuDictFilter;
			CUDA_SAFE_CALL_NO_SYNC(cudaMalloc((void**)&gpuDictFilter, sizeof(int)*dNum));
			CUDA_SAFE_CALL_NO_SYNC(cudaMemset(gpuDictFilter,0,sizeof(int)*dNum));

			count_join_result_dict<<<grid,block>>>(gpuDictHeader,min[k],max[k],dNum, gpuDictFilter);

			transform_dict_filter<<<grid,block>>>(gpuDictFilter,gpu_fact[k],jNode->factTable->tupleNum,dNum, gpuFilter[k]);

			CUDA_SAFE_CALL_NO_SYNC(cudaFree(gpuDictHeader));
			CUDA_SAFE_CALL_NO_SYNC(cudaFree(gpuDictFilter));

		}else if(format == RLE){
			count_join_result_rle<<<grid,block>>>(gpu_fact[k],min[k],max[k],jNode->factTable->tupleNum, gpuFilter[k]);

		}

	}

	for(int k=0;k<jNode->dimNum;k++){
		if(factInGpu[k] == 0)
			CUDA_SAFE_CALL_NO_SYNC(cudaFree(gpu_fact[k]));
	}

	free(gpu_fact);
	free(factInGpu);

	int * gpuFinalFilter;
	int * gpuCount, *gpuTotalCount;
	int * gpuResPsum;
	int ** gpuFilterAddr;

	CUDA_SAFE_CALL_NO_SYNC(cudaMalloc((void **) &gpuFilterAddr, sizeof(int *) * jNode->dimNum));

	for(int k=0;k<jNode->dimNum;k++){
		CUDA_SAFE_CALL_NO_SYNC(cudaMemcpy(&(gpuFilterAddr[k]), &(gpuFilter[k]), sizeof(int *), cudaMemcpyHostToDevice));
	}

	CUDA_SAFE_CALL_NO_SYNC(cudaMalloc((void **)&gpuFinalFilter,sizeof(int) * jNode->factTable->tupleNum));
	CUDA_SAFE_CALL_NO_SYNC(cudaMalloc((void **)&gpuCount,sizeof(int) *  threadNum));
	CUDA_SAFE_CALL_NO_SYNC(cudaMalloc((void **)&gpuTotalCount,sizeof(int)));
	CUDA_SAFE_CALL_NO_SYNC(cudaMemset(gpuTotalCount,0,sizeof(int)));
	CUDA_SAFE_CALL_NO_SYNC(cudaMalloc((void **)&gpuResPsum,sizeof(int) * threadNum));

	merge<<<grid,block>>>(gpuFilterAddr,jNode->factTable->tupleNum,jNode->dimNum,gpuFinalFilter, gpuCount,gpuTotalCount);

	int totalCount = 0;

	CUDA_SAFE_CALL_NO_SYNC(cudaMemcpy(&totalCount,gpuTotalCount,sizeof(int),cudaMemcpyDeviceToHost));
	CUDA_SAFE_CALL_NO_SYNC(cudaFree(gpuTotalCount));

	res->tupleNum = totalCount;
	printf("%d\n",totalCount);

	for(int i=0;i<res->totalAttr;i++){
		res->attrTotalSize[i] = totalCount * res->attrSize[i];
	}

	scanImpl(gpuCount,threadNum,gpuResPsum, pp);

	gpu_fact = (char **) malloc(sizeof(char *) * jNode->totalAttr);
	factInGpu = (int *) malloc(sizeof(int) * jNode->totalAttr);
	char **gpuResult = (char **) malloc(sizeof(char *) * jNode->totalAttr);
	int *attrSize = (int *) malloc(sizeof(int) * jNode->totalAttr);
	int *attrType = (int *) malloc(sizeof(int) * jNode->totalAttr);

	for(int i=0; i< jNode->factOutputNum;i++){
		int index = jNode->factOutputIndex[i];
		int aSize = jNode->factTable->attrSize[index];
		int size = jNode->factTable->attrTotalSize[index];
		attrSize[i] = aSize;
		attrType[i] = jNode->factTable->attrType[index];
		CUDA_SAFE_CALL_NO_SYNC(cudaMalloc((void**)&(gpuResult[i]),aSize * totalCount));
		if(jNode->factTable->dataPos[index] == MEM || jNode->factTable->dataPos[index] == PINNED){
			factInGpu[i] = 0;
			CUDA_SAFE_CALL_NO_SYNC(cudaMalloc((void**)&(gpu_fact[i]),size));
			CUDA_SAFE_CALL_NO_SYNC(cudaMemcpy(gpu_fact[i], jNode->factTable->content[index],size, cudaMemcpyHostToDevice));
		}else if(jNode->factTable->dataPos[index] == GPU || jNode->factTable->dataPos[index] == UVA){
			factInGpu[i] = 1;
			gpu_fact[i] = jNode->factTable->content[index];
		}
	}

	int k = jNode->factOutputNum;
	for(int i=0;i<jNode->dimNum;i++){
		for(int j=0;j<jNode->dimOutputNum[i]; j++){
			int index = jNode->dimOutputIndex[i][j];
			int aSize = jNode->dimTable[i]->attrSize[index];
			attrSize[k] = aSize;
			attrType[k] = jNode->dimTable[i]->attrType[index];
			CUDA_SAFE_CALL_NO_SYNC(cudaMalloc((void**)&(gpuResult[k]),aSize * totalCount));
			if(jNode->dimTable[i]->dataPos[index] == MEM || jNode->dimTable[i]->dataPos[index] == PINNED){
				factInGpu[k] = 0;
				CUDA_SAFE_CALL_NO_SYNC(cudaMalloc((void**)&(gpu_fact[k]),aSize*jNode->dimTable[i]->tupleNum));
				CUDA_SAFE_CALL_NO_SYNC(cudaMemcpy(gpu_fact[k],jNode->dimTable[i]->content[index], aSize*jNode->dimTable[i]->tupleNum,cudaMemcpyHostToDevice));
			}else if (jNode->dimTable[i]->dataPos[index] ==GPU ||jNode->dimTable[i]->dataPos[index] == UVA){
				factInGpu[k] = 1;
				gpu_fact[k] = jNode->dimTable[i]->content[index];
			}
			k++;
		}
	}

	for(int i=0;i<jNode->factOutputNum;i++){
		int index = jNode->factOutputIndex[i];
		int format = jNode->factTable->dataFormat[index];

		if(format == UNCOMPRESSED){
			if(attrType[i] != STRING)
				joinFact_int<<<grid,block>>>(gpuResPsum,gpu_fact[i],attrSize[i],jNode->factTable->tupleNum,gpuFinalFilter,gpuResult[i]);
			else
				joinFact_other<<<grid,block>>>(gpuResPsum,gpu_fact[i],attrSize[i],jNode->factTable->tupleNum,gpuFinalFilter,gpuResult[i]);

		}else if(format == DICT){

			struct dictHeader * dheader;
                        int byteNum;
                        struct dictHeader * gpuDictHeader;

                        dheader = (struct dictHeader *)jNode->factTable->content[index];
                        byteNum = dheader->bitNum/8;
                        CUDA_SAFE_CALL_NO_SYNC(cudaMalloc((void**)&gpuDictHeader,sizeof(struct dictHeader)));
                        CUDA_SAFE_CALL_NO_SYNC(cudaMemcpy(gpuDictHeader,dheader,sizeof(struct dictHeader),cudaMemcpyHostToDevice));

			if (attrType[i] != STRING)
				joinFact_dict_int<<<grid,block>>>(gpuResPsum,gpu_fact[i], gpuDictHeader,byteNum,attrSize[i], jNode->factTable->tupleNum,gpuFinalFilter,gpuResult[i]);
			else
				joinFact_dict_other<<<grid,block>>>(gpuResPsum,gpu_fact[i], gpuDictHeader,byteNum,attrSize[i], jNode->factTable->tupleNum,gpuFinalFilter,gpuResult[i]);

			CUDA_SAFE_CALL_NO_SYNC(cudaFree(gpuDictHeader));


		}else if (format == RLE){
			struct rleHeader* rheader;

			rheader = (struct rleHeader*)jNode->factTable->content[index];

			int dNum = rheader->dictNum;

			char * gpuRle;
			CUDA_SAFE_CALL_NO_SYNC(cudaMalloc((void **)&gpuRle, jNode->factTable->tupleNum * sizeof(int)));

			unpack_rle<<<grid,block>>>(gpu_fact[i], gpuRle,jNode->factTable->tupleNum, dNum);

			joinFact_int<<<grid,block>>>(gpuResPsum,gpuRle, attrSize[i], jNode->factTable->tupleNum,gpuFinalFilter,gpuResult[i]);

			CUDA_SAFE_CALL_NO_SYNC(cudaFree(gpuRle));

		}

	}

	k = jNode->factOutputNum;
	for(int i=0;i<jNode->dimNum;i++){

		for(int j=0;j<jNode->dimOutputNum[i];j++){
			if (attrType[k] != STRING){
				joinDim_int<<<grid,block>>>(gpuResPsum,gpu_fact[k],attrSize[k],jNode->factTable->tupleNum,gpuFilter[i],gpuFinalFilter,gpuResult[k]);
			}else
				joinDim_other<<<grid,block>>>(gpuResPsum,gpu_fact[k],attrSize[k],jNode->factTable->tupleNum,gpuFilter[i],gpuFinalFilter,gpuResult[k]);
			k++;
		}
	}

	for(int i=0;i<jNode->dimNum;i++){
		CUDA_SAFE_CALL_NO_SYNC(cudaFree(gpuFilter[i]));
	}
	for(int i=0;i<jNode->totalAttr;i++){
		if(factInGpu[i] == 0)
			CUDA_SAFE_CALL_NO_SYNC(cudaFree(gpu_fact[i]));
	}

	for(int i=0;i<jNode->factOutputNum;i++){
		int pos = jNode->factOutputPos[i];
		if(res->dataPos[pos] == MEM){
			res->content[pos] = (char *) malloc(res->tupleNum * res->attrSize[pos]);
			CUDA_SAFE_CALL_NO_SYNC(cudaMemcpy(res->content[pos], gpuResult[i], res->tupleNum * res->attrSize[pos],cudaMemcpyDeviceToHost));
			CUDA_SAFE_CALL_NO_SYNC(cudaFree(gpuResult[i]));
		}else if(res->dataPos[pos] == GPU){
			res->content[pos] = gpuResult[i];
		}
	}
	for(int i=0;i<jNode->dimOutputTotal;i++){
		int pos = jNode->dimOutputPos[i];
		if(res->dataPos[pos] == MEM){
			res->content[pos] = (char *) malloc(res->tupleNum * res->attrSize[pos]);
			CUDA_SAFE_CALL_NO_SYNC(cudaMemcpy(res->content[pos], gpuResult[i+jNode->factOutputNum], res->tupleNum * res->attrSize[pos],cudaMemcpyDeviceToHost));
			CUDA_SAFE_CALL_NO_SYNC(cudaFree(gpuResult[i+jNode->factOutputNum]));
		}else if(res->dataPos[pos] == GPU){
			res->content[pos] = gpuResult[i+jNode->factOutputNum];
		}
	}

	CUDA_SAFE_CALL_NO_SYNC(cudaFree(gpuFinalFilter));
	CUDA_SAFE_CALL_NO_SYNC(cudaFree(gpuResPsum));

	clock_gettime(CLOCK_REALTIME,&end);
	double timeE = (end.tv_sec -  start.tv_sec)* BILLION + end.tv_nsec - start.tv_nsec;
	pp->total += timeE / (1000 * 1000);
	return res;

}
