#include "head.cuh"

__global__ void quantize(float *d_B, const int r, const int n, const float *d_sum_of_B){
	int row = threadIdx.y + blockDim.y * blockIdx.y;
	int col = threadIdx.x + blockDim.x * blockIdx.x;
	if (row < r&&col < n){
		if (d_B[row + col*r] <= d_sum_of_B[row]/n)
			d_B[row + col*r] = 0.0f;
		else
			d_B[row + col*r] = 1.0f;
	}
}

__global__ void get_grad_of_F(float *d_grad_of_F, const int r, const int nu, 
	const float *d_UIview, const float *d_F, const float *d_H, const int ni, 
	const float lambda, const float *d_sum_of_F){
	int tid = threadIdx.x;/* [0,r) */
	int bid = blockIdx.x;/* [0,nu) */

	float temp = 0.0f;
	float t_sum = 0.0f;
	for (int i = 0; i < ni; i++){
		if (d_UIview[bid + i*nu] > 0){
			t_sum = 0.0f;
			for (int j = 0; j < r; j++){
				t_sum += d_F[j + bid*r] * d_H[j + i*r];
			}
			temp += (d_UIview[bid + i*nu] - 0.5f - 0.5f / r*t_sum)*d_H[tid + i*r];
		}
	}
	d_grad_of_F[tid + bid*r] = -1.0f*temp/r + 2 * lambda * d_sum_of_F[tid];
}

__global__ void get_grad_of_H(float *d_grad_of_H, const int r, const int ni, 
	const float *d_UIview, const float *d_F, const float *d_H, const int nu, 
	const float lambda, const float *d_sum_of_H){
	int tid = threadIdx.x;/* [0,r) */
	int bid = blockIdx.x;/* [0,ni) */

	float temp = 0.0f;
	float t_sum = 0.0f;
	for (int i = 0; i < nu; i++){
		if (d_UIview[i + bid*nu] > 0){
			t_sum = 0.0f;
			for (int j = 0; j < r; j++){
				t_sum += d_F[j + i*r] * d_H[j + bid*r];
			}
			temp += (d_UIview[i + bid*nu] - 0.5f - 0.5f / r*t_sum)*d_F[tid + i*r];
		}
	}
	d_grad_of_H[tid + bid*r] = -1.0f*temp / r + 2 * lambda*d_sum_of_H[tid];
}

__global__ void get_obj_part1(const float *d_UIview, const float *d_F, const float *d_H,
	const int r, const int nu, const int ni,
	float *d_temp){
	int row = threadIdx.y + blockDim.y*blockIdx.y;
	int col = threadIdx.x + blockDim.x*blockIdx.x;
	int tid = threadIdx.x + threadIdx.y*blockDim.x;
	int bid = blockIdx.x + blockIdx.y*gridDim.x;

	__shared__ float sharm[1024];
	sharm[tid] = 0.0f;
	__syncthreads();


	if (row < nu&&col < ni){
		if (d_UIview[row + col*nu] > 0){
			float temp;
			float t_sum = 0;
			for (int i = 0; i < r; i++){
				t_sum += d_F[i + row*r] * d_H[i + col*r];
			}
			temp = d_UIview[row + col*nu] - 0.5f - 0.5f / r*t_sum;
			temp = temp*temp;
			sharm[tid] = temp;
		}
	}
	__syncthreads();
	if (tid == 0){
		float shared_sum = 0.0f;
		for (int i = 0; i < 1024; i++){
			shared_sum += sharm[i];
		}
		d_temp[bid] = shared_sum;
	}
}

int main(void){
	cudaError_t cudaStatus = cudaSetDevice(0);
	if (cudaStatus != cudaSuccess) {
		fprintf(stderr, 
			"cudaSetDevice failed! Do you have a CUDA-capable GPU installed?");
		system("pause"); exit(1);
	}
	printf("Now: %s\n", UIviewFileName);
	clock_t start_of_main = clock();


	float *UIview = NULL;
	float split_rate = 0.8f;
	float *UIview_test = NULL;
	std::stringstream ss;

	UIview = (float*)malloc(nUsers*nItems*sizeof(float));
	CHECK_PARAM(UIview != NULL, "allocate memory space for UIview failed.");
	UIview_test = (float*)malloc(nUsers*nItems*sizeof(float));
	CHECK_PARAM(UIview_test != NULL, "allocate memory space for UIview_test failed.");
	CHECK_PARAM(read_data_2_matrix(UIview, UIview_test, split_rate), "read data set failed.");
	ss.str("");
	ss << nUsers << "_" << nItems << "_" << "UIview_test.txt";
	write_test_data(UIview_test, nUsers, nItems, ss.str().c_str());
	free(UIview_test); UIview_test = NULL;
	
	clock_t end_of_read_dataset = clock();
	printf("read data set done. cost time:%.2f s\n", 
		(double)(end_of_read_dataset - start_of_main) / CLOCKS_PER_SEC);

	float *d_UIview;
	CHECK_CUDA(cudaMalloc(&d_UIview, nUsers*nItems*sizeof(float)));
	CHECK_CUDA(cudaMemcpy(d_UIview, UIview, nUsers*nItems*sizeof(float), cudaMemcpyHostToDevice));

	int x[5] = { 8, 16, 32, 64, 128 };
	for (int round = 0; round < 5; round++){
		clock_t start_of_round = clock();
		printf("===================================\nround:%d %d bits\n", round, x[round]);


		int r = x[round];
		unsigned long long seed = clock() % 1000;
		float mean_of_uniform = 0.0f;
		float standard_deviation = 1.0f;
		float alpha = 1.0f;
		float beta = 0.0f;
		float step_size = 0.0001;
		float lambda = 0.001;
		
		curandGenerator_t gen;
		CHECK_CURAND(curandCreateGenerator(&gen, CURAND_RNG_PSEUDO_DEFAULT));
		CHECK_CURAND(curandSetPseudoRandomGeneratorSeed(gen, seed));
		float *d_F;
		CHECK_CUDA(cudaMalloc(&d_F, r*nUsers*sizeof(float)));
		float *d_H;
		CHECK_CUDA(cudaMalloc(&d_H, r*nItems*sizeof(float)));

		CHECK_CURAND(curandGenerateNormal(gen, d_F, r*nUsers, mean_of_uniform, standard_deviation));
		CHECK_CURAND(curandGenerateNormal(gen, d_H, r*nItems, mean_of_uniform, standard_deviation));

		CHECK_CURAND(curandDestroyGenerator(gen));

		cublasHandle_t cublashandle;
		CHECK_CUDA_STATUS(cublasCreate(&cublashandle));
		float *d_sum_of_F, *h_sum_of_F;
		h_sum_of_F = (float*)malloc(r*sizeof(float));
		CHECK_CUDA(cudaMalloc(&d_sum_of_F, r*sizeof(float)));
		float *d_sum_of_H, *h_sum_of_H;
		h_sum_of_H = (float*)malloc(r*sizeof(float));
		CHECK_CUDA(cudaMalloc(&d_sum_of_H, r*sizeof(float)));
		float *d_grad_of_F;
		CHECK_CUDA(cudaMalloc(&d_grad_of_F, r*nUsers*sizeof(float)));
		float *d_grad_of_H;
		CHECK_CUDA(cudaMalloc(&d_grad_of_H, r*nItems*sizeof(float)));
		bool converge = false;
		int iter = 0;

		cudaStream_t stream4F;
		cudaStream_t stream4H;
		CHECK_CUDA(cudaStreamCreate(&stream4F));
		CHECK_CUDA(cudaStreamCreate(&stream4H));

		while (!converge){
			// you can speedup it by streams
			CHECK_CUDA_STATUS(cublasSetStream(cublashandle, stream4F));
			for (int i = 0; i < r; i++){
				CHECK_CUDA_STATUS(cublasSasum(cublashandle, nUsers, d_F + i, r, &h_sum_of_F[i]));
			}
			CHECK_CUDA_STATUS(cublasSetStream(cublashandle, stream4H));
			for (int i = 0; i < r; i++){
				CHECK_CUDA_STATUS(cublasSasum(cublashandle, nItems, d_H + i, r, &h_sum_of_H[i]));
			}
			CHECK_CUDA(cudaMemcpyAsync(d_sum_of_F, h_sum_of_F, r*sizeof(float), 
				cudaMemcpyHostToDevice, stream4F));
			CHECK_CUDA(cudaMemcpyAsync(d_sum_of_H, h_sum_of_H, r*sizeof(float),
				cudaMemcpyHostToDevice, stream4H));

			get_grad_of_F << <nUsers, r, 0, stream4F >> >(d_grad_of_F, r, nUsers, d_UIview, d_F, d_H, nItems, lambda, d_sum_of_F);
			get_grad_of_H << <nItems, r, 0, stream4H >> >(d_grad_of_H, r, nItems, d_UIview, d_F, d_H, nUsers, lambda, d_sum_of_H);

			alpha = -1.0f*step_size;
			beta = 1.0f;
			CHECK_CUDA_STATUS(cublasSetStream(cublashandle, stream4F));
			CHECK_CUDA_STATUS(cublasSaxpy(cublashandle, r*nUsers, &alpha, d_grad_of_F, 1, d_F, 1));
			CHECK_CUDA_STATUS(cublasSetStream(cublashandle, stream4H));
			CHECK_CUDA_STATUS(cublasSaxpy(cublashandle, r*nItems, &alpha, d_grad_of_H, 1, d_H, 1));

			CHECK_CUDA(cudaStreamSynchronize(stream4F));
			CHECK_CUDA(cudaStreamSynchronize(stream4H));

			if (iter > 50){
				converge = true;
			}
			else{
				float temp1;
				CHECK_CUDA_STATUS(cublasSnrm2(cublashandle, r*nUsers, d_grad_of_F, 1, &temp1));
				float temp2;
				CHECK_CUDA_STATUS(cublasSnrm2(cublashandle, r*nItems, d_grad_of_H, 1, &temp2));
				printf("iter:%d %f %f ", iter, temp1, temp2);
				if (temp1 <= 1e-3&&temp2 <= 1e-3){
					converge = true;
				}

				dim3 szb(32, 32, 1);
				dim3 szg((nItems + szb.x - 1) / szb.x, (nUsers + szb.y - 1) / szb.y, 1);
				float *d_temp;
				CHECK_CUDA(cudaMalloc(&d_temp, szg.x*szg.y*sizeof(float)));
				CHECK_CUDA(cudaMemset(d_temp, 0, szg.x*szg.y*sizeof(float)));
				get_obj_part1 << <szg, szb >> >(d_UIview, d_F, d_H, r, nUsers, nItems, d_temp);
				float h_temp;
				CHECK_CUDA_STATUS(cublasSasum(cublashandle, szg.x*szg.y, d_temp, 1, &h_temp));
				float obj;
				CHECK_CUDA_STATUS(cublasSnrm2(cublashandle, r, d_sum_of_F, 1, &temp1));
				CHECK_CUDA_STATUS(cublasSnrm2(cublashandle, r, d_sum_of_H, 1, &temp2));
				obj = h_temp + temp1*temp1 + temp2*temp2;
				printf("pbj value:%f\n",obj);
			}
			iter++;
		}
		CHECK_CUDA(cudaStreamDestroy(stream4F));
		CHECK_CUDA(cudaStreamDestroy(stream4H));
		CHECK_CUDA(cudaFree(d_grad_of_F)); d_grad_of_F = NULL;
		CHECK_CUDA(cudaFree(d_grad_of_H)); d_grad_of_H = NULL;

		CHECK_CUDA_STATUS(cublasDestroy(cublashandle));

		dim3 szBlock(128, 8, 1);
		dim3 szGrid((nUsers + szBlock.x - 1) / szBlock.x, (r + szBlock.y - 1) / szBlock.y, 1);
		quantize << <szGrid, szBlock >> >(d_F, r, nUsers, d_sum_of_F);
		CHECK_CUDA(cudaFree(d_sum_of_F)); d_sum_of_F = NULL;
		free(h_sum_of_F); h_sum_of_F = NULL;

		float *B = (float *)malloc(r*nUsers*sizeof(float));
		CHECK_PARAM(B!=NULL,"allocate space for B failed.");
		CHECK_CUDA(cudaMemcpy(B, d_F, r*nUsers*sizeof(float), cudaMemcpyDeviceToHost));
		CHECK_CUDA(cudaFree(d_F)); d_F = NULL;

		ss.str("");
		ss << nUsers << "_" << nItems << "_" << r << "_" << "B_code.txt";
		write_into_file(B, r, nUsers, ss.str().c_str());
		free(B); B = NULL;

		szGrid.x = (nItems + szBlock.x - 1) / szBlock.x;
		szGrid.y = (r + szBlock.y - 1) / szBlock.y;
		quantize << <szGrid, szBlock >> >(d_H, r, nItems, d_sum_of_H);
		CHECK_CUDA(cudaFree(d_sum_of_H)); d_sum_of_H = NULL;
		free(h_sum_of_H); h_sum_of_H = NULL;

		float *D = (float *)malloc(r*nItems*sizeof(float));
		CHECK_PARAM(D != NULL, "allocate space for D failed.");
		CHECK_CUDA(cudaMemcpy(D, d_H, r*nItems*sizeof(float), cudaMemcpyDeviceToHost));
		CHECK_CUDA(cudaFree(d_H)); d_H = NULL;

		ss.str("");
		ss << nUsers << "_" << nItems << "_" << r << "_" << "D_code.txt";
		write_into_file(D, r, nItems, ss.str().c_str());
		free(D); D=NULL;
		
		clock_t end_of_round = clock();
		printf("BCCF cost time %.2f\n", (double)(end_of_round - start_of_round) / CLOCKS_PER_SEC);

	}
	clock_t end_of_main = clock();
	printf("total cost time %.2f\n", (double)(end_of_main - start_of_main) / CLOCKS_PER_SEC);
	
	CHECK_CUDA(cudaFree(d_UIview));
	

	CHECK_CUDA(cudaDeviceReset());
	system("pause");
	return EXIT_SUCCESS; 
}