#include "head.cuh"

#define MULTI_VIEW 2
#define SINGL_VIEW 1

#ifdef _IS_MULTI_VIEW_ 
#define VIEW MULTI_VIEW
#else
#define VIEW SINGL_VIEW
#endif

__global__ void quantize(float *d_B, const int r, const int n){
	int row = threadIdx.y + blockDim.y * blockIdx.y;
	int col = threadIdx.x + blockDim.x * blockIdx.x;
	if (row < r&&col < n){
		if (d_B[row + col*r] <= 0)
			d_B[row + col*r] = 0.0f;
		else
			d_B[row + col*r] = 1.0f;
	}
}


int main(void){
	cudaError_t cudaStatus = cudaSetDevice(0);
	if (cudaStatus != cudaSuccess) {
		fprintf(stderr, 
			"cudaSetDevice failed! Do you have a CUDA-capable GPU installed?");
		system("pause"); exit(1);
	}
	printf("Now: %s\n", UIviewFileName);
	clock_t start_of_main = clock();


	float *UIview = NULL;
	float *UUview = NULL;
	float split_rate = 0.8f;
	float *UIview_test = NULL;
	std::stringstream ss;

	UIview = (float*)malloc(nUsers*nItems*sizeof(float));
	CHECK_PARAM(UIview != NULL, "allocate memory space for UIview failed.");
	UIview_test = (float*)malloc(nUsers*nItems*sizeof(float));
	CHECK_PARAM(UIview_test != NULL, "allocate memory space for UIview_test failed.");
	CHECK_PARAM(read_data_2_matrix(UIview, UIview_test, split_rate), 
		"read data set failed.");
	ss.str("");
	ss << nUsers << "_" << nItems << "_" << "UIview_test.txt";
	write_test_data(UIview_test, nUsers, nItems, ss.str().c_str());
	free(UIview_test); UIview_test = NULL;
	
	float *d_X; /* nUsers X d */
	float *d_UIview; /* nUsers X nItems */
	int d;
	if (VIEW == SINGL_VIEW){
		d = nItems;
		CHECK_CUDA(cudaMalloc(&d_X, nUsers*d*sizeof(float)));
		CHECK_CUDA(cudaMemcpy(d_X, UIview, nUsers*d*sizeof(float),
			cudaMemcpyHostToDevice));
		free(UIview); UIview = NULL;
		d_UIview = d_X;
	}
	else if (VIEW == MULTI_VIEW){
		d = nItems + nUsers;
		UUview = (float*)malloc(nUsers*nUsers*sizeof(float));
		CHECK_PARAM(UUview != NULL, "allocate memory space for UUview failed.");
		CHECK_PARAM(read_UUview(UUview),"read UUview failed.");
		CHECK_CUDA(cudaMalloc(&d_X, nUsers*d*sizeof(float)));
		CHECK_CUDA(cudaMemcpy(d_X, UIview, nUsers*nItems*sizeof(float),
			cudaMemcpyHostToDevice));
		CHECK_CUDA(cudaMemcpy(d_X + nUsers*nItems, UUview, nUsers*nUsers*sizeof(float),
			cudaMemcpyHostToDevice));
		free(UUview); UUview = NULL;
		free(UIview); UIview = NULL;
		CHECK_CUDA(cudaMalloc(&d_UIview, nUsers*nItems*sizeof(float)));
		CHECK_CUDA(cudaMemcpy(d_UIview, d_X, nUsers*nItems*sizeof(float),
			cudaMemcpyDeviceToDevice));
	}
	

	clock_t end_of_read_dataset = clock();
	printf("read data set done. cost time:%.2f s\n", 
		(double)(end_of_read_dataset - start_of_main) / CLOCKS_PER_SEC);

	int x[5] = { 8, 16, 32, 64, 128 };
	for (int round = 0; round < 5; round++){
		clock_t start_of_round = clock();
		printf("===================================\nround:%d %d bits\n", 
			round, x[round]);

		int r = x[round];
		unsigned long long seed = clock() % 1000;
		float mean_of_uniform = 0.0f;
		float standard_deviation = 1.0f;
		float alpha = 1.0f;
		float beta = 0.0f;
		cublasHandle_t cublashandle;
		CHECK_CUDA_STATUS(cublasCreate(&cublashandle));
		curandGenerator_t gen;
		CHECK_CURAND(curandCreateGenerator(&gen, CURAND_RNG_PSEUDO_DEFAULT));
		CHECK_CURAND(curandSetPseudoRandomGeneratorSeed(gen, seed));

		float *d_W;
		CHECK_CUDA(cudaMalloc(&d_W, r*d*sizeof(float)));
		CHECK_CURAND(curandGenerateNormal(gen, d_W, r*d, 
			mean_of_uniform, standard_deviation));
		float *d_B;
		CHECK_CUDA(cudaMalloc(&d_B, r*nUsers*sizeof(float)));
		CHECK_CUDA_STATUS(cublasSgemm(cublashandle, CUBLAS_OP_N, CUBLAS_OP_T,
			r, nUsers, d, &alpha, d_W, r, d_X, nUsers, &beta, d_B, r));
		CHECK_CUDA(cudaFree(d_W));
		dim3 szBlock(128, 8, 1);
		dim3 szGrid((nUsers + szBlock.x - 1) / szBlock.x, (r + szBlock.y - 1) / szBlock.y, 1);
		quantize << <szGrid, szBlock >> >(d_B, r, nUsers);

		float *B = (float *)malloc(r*nUsers*sizeof(float));
		CHECK_PARAM(B!=NULL,"allocate space for B failed.");
		CHECK_CUDA(cudaMemcpy(B, d_B, r*nUsers*sizeof(float), cudaMemcpyDeviceToHost));
		CHECK_CUDA(cudaFree(d_B)); d_B = NULL;

		ss.str("");
		ss << nUsers << "_" << nItems << "_" << r << "_" << "B_code.txt";
		write_into_file(B, r, nUsers, ss.str().c_str());
		free(B); B = NULL;

		CHECK_CUDA(cudaMalloc(&d_W, r*nUsers*sizeof(float)));
		CHECK_CURAND(curandGenerateNormal(gen, d_W, r*nUsers,
			mean_of_uniform, standard_deviation));
		float *d_D;
		CHECK_CUDA(cudaMalloc(&d_D, r*nItems*sizeof(float)));
		CHECK_CUDA_STATUS(cublasSgemm(cublashandle, CUBLAS_OP_N, CUBLAS_OP_N,
			r, nItems, nUsers, &alpha, d_W, r, d_UIview, nUsers, &beta, d_D, r));
		CHECK_CUDA(cudaFree(d_W));
		szGrid.x = (nItems + szBlock.x - 1) / szBlock.x;
		quantize << <szGrid, szBlock >> >(d_D, r, nItems);


		float *D = (float *)malloc(r*nItems*sizeof(float));
		CHECK_PARAM(D != NULL, "allocate space for D failed.");
		CHECK_CUDA(cudaMemcpy(D, d_D, r*nItems*sizeof(float), cudaMemcpyDeviceToHost));
		CHECK_CUDA(cudaFree(d_D)); d_D = NULL;

		ss.str("");
		ss << nUsers << "_" << nItems << "_" << r << "_" << "D_code.txt";
		write_into_file(D, r, nItems, ss.str().c_str());
		free(D); D=NULL;
		
		CHECK_CURAND(curandDestroyGenerator(gen));
		CHECK_CUDA_STATUS(cublasDestroy(cublashandle));
		clock_t end_of_round = clock();
		printf("LSH cost time %.2f\n", (double)(end_of_round - start_of_round) / CLOCKS_PER_SEC);

	}
	clock_t end_of_main = clock();
	printf("total cost time %.2f\n", (double)(end_of_main - start_of_main) / CLOCKS_PER_SEC);
	
	if (VIEW == SINGL_VIEW){
		CHECK_CUDA(cudaFree(d_UIview));
	}
	else if (VIEW == MULTI_VIEW){
		CHECK_CUDA(cudaFree(d_X));
		CHECK_CUDA(cudaFree(d_UIview));
	}

	CHECK_CUDA(cudaDeviceReset());
	system("pause");
	return EXIT_SUCCESS; 
}