#include "head.cuh"

__global__ void get_M_kernel(float *d_M, int nu, int ni, int r){
	int col = threadIdx.y + blockDim.y * blockIdx.y;
	int row = threadIdx.x + blockDim.x * blockIdx.x;
	if (row < nu&&col < ni){
		d_M[row + nu*col] /= 2.0f*r;
		d_M[row + nu*col] += 0.5f;
	}
}
__global__ void make_sign_B_kenel(float *d_B, const int r, const int n){
	int row = threadIdx.y + blockDim.y * blockIdx.y;
	int col = threadIdx.x + blockDim.x * blockIdx.x;
	if (row < r&&col < n){
		if (d_B[row + col*r]>=0)
			d_B[row + col*r] = 1;
		else
			d_B[row + col*r] = -1;
	}
}

void make_sign_B(float *d_B, const int r, const int n){
	dim3 szBlock(128, 8, 1);
	dim3 szGrid((n + szBlock.x - 1) / szBlock.x, (r + szBlock.y - 1) / szBlock.y, 1);
	make_sign_B_kenel << <szGrid, szBlock >> >(d_B, r, n);
}
__global__ void make_zerone_B_kenel(float *d_B, const int r, const int n){
	int row = threadIdx.y + blockDim.y * blockIdx.y;
	int col = threadIdx.x + blockDim.x * blockIdx.x;
	if (row < r&&col < n){
		if (d_B[row + col*r] >= 0)
			d_B[row + col*r] = 1;
		else
			d_B[row + col*r] = 0;
	}
}
void make_zerone_B(float *d_B, const int r, const int n){
	dim3 szBlock(128, 8, 1);
	dim3 szGrid((n + szBlock.x - 1) / szBlock.x, (r + szBlock.y - 1) / szBlock.y, 1);
	make_zerone_B_kenel << <szGrid, szBlock >> >(d_B, r, n);
}

__global__ void get_mean_of_U_kernel(float *d_res, const float *d_a, const int d, const int n){
	int row = blockIdx.x;
	int threads_num = blockDim.x;
	__shared__ float shared_mem[1024];
	int tid = threadIdx.x;
	shared_mem[tid] = 0.0f;
	__syncthreads();
	float t_sum = 0.0f;
	for (int i = 0; i*threads_num + tid < n; ++i){
		t_sum += d_a[row + (i*threads_num + tid)*d];
	}
	shared_mem[tid] = t_sum;
	__syncthreads();
	if (tid == 0){
		t_sum = 0.0f;
		for (int i = 0; i < 1024; i++){
			t_sum += shared_mem[i];
		}
		d_res[row] = t_sum / n;
	}
}

void get_mean_of_U(float *d_miu, /* dX1 */
	const float *d_U, /* dXn */
	const int d, const int n){
	get_mean_of_U_kernel << <d, 1024 >> >(d_miu, d_U, d, n);
}

void solve_eig(float *d_A, /*in as A;
						   out as eigenvectors*/
						   float *d_W, int A_hight, int A_width){
	cusolverDnHandle_t handle = NULL;
	int*devInfo = NULL;
	float *d_work = NULL;
	int lwork = 0;
	int info_gpu = 0;
	CHECK_CUSOLVER(cusolverDnCreate(&handle));
	CHECK_CUDA(cudaMalloc(&devInfo, sizeof(int)));
	cusolverEigMode_t jobz = CUSOLVER_EIG_MODE_VECTOR;
	cublasFillMode_t uplo = CUBLAS_FILL_MODE_LOWER;
	CHECK_CUSOLVER(cusolverDnSsyevd_bufferSize(handle, jobz, uplo,
		A_width, d_A, A_hight, d_W, &lwork));
	cudaDeviceSynchronize();
	CHECK_CUDA(cudaGetLastError());
	printf("\nlwork:%d\n",lwork);
	CHECK_CUDA(cudaMalloc(&d_work, sizeof(float)*lwork));

	CHECK_CUSOLVER(cusolverDnSsyevd(handle, jobz, uplo,
		A_width, d_A, A_hight, d_W, d_work, lwork, devInfo));
	cudaDeviceSynchronize();
	CHECK_CUDA(cudaMemcpy(&info_gpu, devInfo, sizeof(int), cudaMemcpyDeviceToHost));//must be zero
	if (info_gpu != 0)
		throw std::runtime_error("solve eig error.");
	CHECK_CUDA(cudaFree(devInfo));
	CHECK_CUDA(cudaFree(d_work));
	if (handle) cusolverDnDestroy(handle);
}

__global__ void zero_mean_U(float *d_zero_mean_U, const float *d_U, const float *d_miu, const int n, const int m){
	int row = blockIdx.x;
	int threads_num = blockDim.x;
	int tid = threadIdx.x;
	for (int i = 0; i*threads_num + tid < m; ++i){
		d_zero_mean_U[row + n*(i*threads_num + tid)] = d_U[row + n*(i*threads_num + tid)] - d_miu[row];
	}
}

__global__ void get_d_temp4(float *d_temp4, float *d_temp3, const float alpha, const float sv, const int r, const int n, const int m){
	int tid = threadIdx.x + blockDim.x*blockIdx.x;
	if (tid < n){
		for (int i = 0; i < r; i++){
			d_temp4[i + r*tid] = alpha*sv*0.5f*m*d_temp3[r];
		}
	}
}

__global__ void get_d_Dh(float *d_Dh, const float *d_temp1, const float *d_temp2, const float *d_temp4, const int r, const int n){
	int tid = threadIdx.x + blockDim.x*blockIdx.x;
	if (tid < n){
		for (int i = 0; i < r; i++){
			d_Dh[i + r*tid] = d_temp1[i + r*tid] + d_temp2[i + r*tid] - d_temp4[i + r*tid];
		}
	}
}

__global__ void swap(float *d_V, const int n, const int r){
	int tid = threadIdx.x + blockDim.x*blockIdx.x;
	if (tid < n){
		float temp;
		for (int i = 0; i < r / 2; i++){
			temp = d_V[tid + n*(r - 1 - i)];
			d_V[tid + n*(r - 1 - i)] = d_V[tid + i*n];
			d_V[tid + i*n] = temp;
		}
	}
}

__global__ void get_Ph(float *d_Ph, const float *d_cov_UT, const int m, const int r){
	int tid = threadIdx.x + blockDim.x*blockIdx.x;
	if (tid < m){
		for (int i = 0; i < r; i++){
			d_Ph[tid + i*m] = d_cov_UT[tid + m*(m - i - 1)];
		}
	}
}

__global__ void get_Pg_kernel(float *d_Pg, const int n, const int r, float *d_W){
	int tid = threadIdx.x + blockDim.x*blockIdx.x;
	if (tid < n){
		for (int i = 0; i < r; i++){
			if (d_W[i]>0)
				d_Pg[tid + i*n] = d_Pg[tid + i*n] / sqrtf(d_W[i]);
		}
	}
}

void get_Pg(float *d_Pg, const int n, const int r,
	const float *h_cov_U, const float *h_W, const int m, const float *h_zero_mean_U, cublasHandle_t cublashandle){
	float *d_eigvectors;
	CHECK_CUDA(cudaMalloc(&d_eigvectors, m*r*sizeof(float)));
	CHECK_CUDA(cudaMemcpy(d_eigvectors, h_cov_U, m*r*sizeof(float), cudaMemcpyHostToDevice));
	float *d_zero_mean_U;
	CHECK_CUDA(cudaMalloc(&d_zero_mean_U, n*m*sizeof(float)));
	CHECK_CUDA(cudaMemcpy(d_zero_mean_U, h_zero_mean_U, n*m*sizeof(float), cudaMemcpyHostToDevice));
	float alpha(1);
	float beta(0);
	CHECK_CUBLAS(cublasSgemm(cublashandle, CUBLAS_OP_N, CUBLAS_OP_N, n, r, m, &alpha, d_zero_mean_U, n, d_eigvectors, m, &beta, d_Pg, n));
	CHECK_CUDA(cudaFree(d_zero_mean_U)); d_zero_mean_U = NULL;
	CHECK_CUDA(cudaFree(d_eigvectors)); d_eigvectors = NULL;

	float *d_W;
	CHECK_CUDA(cudaMalloc(&d_W, r*sizeof(float)));
	CHECK_CUDA(cudaMemcpy(d_W, h_W, r*sizeof(float), cudaMemcpyHostToDevice));
	get_Pg_kernel << <(n + 1024 - 1) / 1024, 1024 >> >(d_Pg, n, r, d_W);
	CHECK_CUDA(cudaFree(d_W)); d_W = NULL;

}
void update_d_H(float *d_H, const float *d_Rh, const float *d_Uh, const float lambda, const float sv, const int r, const int n, const int m, const float *d_G, const float *UT, cublasHandle_t cublashandle){
	float *d_temp1;
	CHECK_CUDA(cudaMalloc(&d_temp1, r*n*sizeof(float)));
	float alpha = 1.0f / n;
	float beta = 0.0f;
	CHECK_CUBLAS(cublasSgemm(cublashandle, CUBLAS_OP_N, CUBLAS_OP_N, r, n, r, &alpha, d_Rh, r, d_Uh, r, &beta, d_temp1, r));
	float *d_temp2;
	CHECK_CUDA(cudaMalloc(&d_temp2, r*n*sizeof(float)));
	alpha = lambda*sv / (2.0f*r*n*m);
	float *d_UT;
	CHECK_CUDA(cudaMalloc(&d_UT, m*n*sizeof(float)));
	CHECK_CUDA(cudaMemcpy(d_UT, UT, m*n*sizeof(float), cudaMemcpyHostToDevice));
	CHECK_CUBLAS(cublasSgemm(cublashandle, CUBLAS_OP_N, CUBLAS_OP_N, r, n, m, &alpha, d_G, r, d_UT, m, &beta, d_temp2, r));
	CHECK_CUDA(cudaFree(d_UT)); d_UT = NULL;
	float *d_temp3;
	CHECK_CUDA(cudaMalloc(&d_temp3, r*sizeof(float)));
	get_mean_of_U(d_temp3, d_G, r, m);
	float *d_temp4;
	CHECK_CUDA(cudaMalloc(&d_temp4, r*n*sizeof(float)));
	get_d_temp4 << <(n + 1024 - 1) / 1024, 1024 >> >(d_temp4, d_temp3, alpha, sv, r, n, m);
	CHECK_CUDA(cudaFree(d_temp3));

	float *d_Dh;
	CHECK_CUDA(cudaMalloc(&d_Dh, r*n*sizeof(float)));
	get_d_Dh << <(n + 1024 - 1) / 1024, 1024 >> >(d_Dh, d_temp1, d_temp2, d_temp4, r, n);
	CHECK_CUDA(cudaFree(d_temp1));
	CHECK_CUDA(cudaFree(d_temp2));
	CHECK_CUDA(cudaFree(d_temp4));

	float *d_Dh_DhT;
	CHECK_CUDA(cudaMalloc(&d_Dh_DhT, r*r*sizeof(float)));
	alpha = 1.0f;
	beta = 0.0f;
	CHECK_CUBLAS(cublasSgemm(cublashandle, CUBLAS_OP_N, CUBLAS_OP_T, r, r, n, &alpha, d_Dh, r, d_Dh, r, &beta, d_Dh_DhT, r));
	float *d_W;
	CHECK_CUDA(cudaMalloc(&d_W, r*sizeof(float)));
	solve_eig(d_Dh_DhT, d_W, r, r);
	swap << <(r + 1024 - 1) / 1024, 1024 >> >(d_Dh_DhT, r, r);
	swap << <1, 1024 >> >(d_W, 1, r);

	float *d_V;
	CHECK_CUDA(cudaMalloc(&d_V, n*r*sizeof(float)));
	CHECK_CUBLAS(cublasSgemm(cublashandle, CUBLAS_OP_T, CUBLAS_OP_N, n, r, r, &alpha, d_Dh, r, d_Dh_DhT, r, &beta, d_V, n));
	CHECK_CUDA(cudaFree(d_Dh));
	get_Pg_kernel << <(n + 1024 - 1) / 1024, 1024 >> >(d_V, n, r, d_W);
	CHECK_CUDA(cudaFree(d_W));
	CHECK_CUBLAS(cublasSgemm(cublashandle, CUBLAS_OP_N, CUBLAS_OP_T, r, n, r, &alpha, d_Dh_DhT, r, d_V, n, &beta, d_H, r));
	CHECK_CUDA(cudaFree(d_V));
	CHECK_CUDA(cudaFree(d_Dh_DhT));
	make_sign_B(d_H, r, n);
}

void update_d_Rh(float *d_Rh, const float *d_H, const float *d_Uh, const int r, const int n, cublasHandle_t cublashandle){
	float *d_A;
	CHECK_CUDA(cudaMalloc(&d_A, r*r*sizeof(float)));
	float alpha(1);
	float beta(0);
	CHECK_CUBLAS(cublasSgemm(cublashandle, CUBLAS_OP_N, CUBLAS_OP_T, r, r, n, &alpha, d_H, r, d_Uh, r, &beta, d_A, r));

	float *d_ATA, *d_AAT;
	CHECK_CUDA(cudaMalloc(&d_AAT, r*r*sizeof(float)));
	CHECK_CUDA(cudaMalloc(&d_ATA, r*r*sizeof(float)));
	CHECK_CUBLAS(cublasSgemm(cublashandle, CUBLAS_OP_T, CUBLAS_OP_N, r, r, r, &alpha, d_A, r, d_A, r, &beta, d_ATA, r));
	CHECK_CUBLAS(cublasSgemm(cublashandle, CUBLAS_OP_N, CUBLAS_OP_T, r, r, r, &alpha, d_A, r, d_A, r, &beta, d_AAT, r));
	CHECK_CUDA(cudaFree(d_A));

	float *d_W;
	CHECK_CUDA(cudaMalloc(&d_W, r*sizeof(float)));
	solve_eig(d_AAT, d_W, r, r);
	solve_eig(d_ATA, d_W, r, r);
	CHECK_CUDA(cudaFree(d_W)); d_W = NULL;
	swap << <(r + 1024 - 1) / 1024, 1024 >> >(d_AAT, r, r);
	swap << <(r + 1024 - 1) / 1024, 1024 >> >(d_ATA, r, r);
	CHECK_CUBLAS(cublasSgemm(cublashandle, CUBLAS_OP_N, CUBLAS_OP_T, r, r, r, &alpha, d_AAT, r, d_ATA, r, &beta, d_Rh, r));
	CHECK_CUDA(cudaFree(d_ATA));
	CHECK_CUDA(cudaFree(d_AAT));
}


int main(void){
	cudaError_t cudaStatus = cudaSetDevice(0);
	if (cudaStatus != cudaSuccess) {
		fprintf(stderr,
			"cudaSetDevice failed! Do you have a CUDA-capable GPU installed?");
		system("pause"); exit(1);
	}
	printf("Now: %s\n", UIviewFileName);
	clock_t start_of_main = clock();

	Params params;
	params.split_rate = 0.8f;
	params.lambda = 1e0;
	float lambda = params.lambda;
	float alpha;
	float beta;
	cublasHandle_t cublashandle;
	float *U, *d_U;
	float *UT, *d_UT;
	float *h_zero_mean_UT, *d_zero_mean_UT;
	float *h_zero_mean_U, *d_zero_mean_U;
	float *h_cov_UT, *d_cov_UT;
	float *h_cov_U, *d_cov_U;


	int n = nUsers;
	int m = nItems;
	U = (float*)malloc(n*m*sizeof(float));
	CHECK_PARAM(U != NULL, "allocate space for U failed.");
	UT = (float*)malloc(m*n*sizeof(float));
	CHECK_PARAM(UT != NULL, "allocate space for UT failed.");
	float *UIview_test = (float*)malloc(n*m*sizeof(float));
	CHECK_PARAM(UIview_test != NULL, "allocate space for UIview_test failed.");
	int nnz;
	CHECK_PARAM(read_data_2_matrix(U, UIview_test, params.split_rate, UT, nnz),
		"read data set failed.\n");
	std::stringstream ss;
	ss.str("");
	ss << n << "_" << m << "_" << "UIview_test.txt";
	write_test_data(UIview_test, n, m, ss.str().c_str());
	free(UIview_test);
	clock_t end_of_read_dataset = clock();
	printf("read data set done. cost time:%.2f\n", (double)(end_of_read_dataset - start_of_main) / CLOCKS_PER_SEC);


	CHECK_CUDA(cudaMalloc(&d_UT, m*n*sizeof(float)));
	CHECK_CUDA(cudaMemcpy(d_UT, UT, m*n*sizeof(float), cudaMemcpyHostToDevice));
	
	float *d_miu_h;
	CHECK_CUDA(cudaMalloc(&d_miu_h, m*sizeof(float)));
	get_mean_of_U(d_miu_h, d_UT, m, n);
	CHECK_CUDA(cudaMalloc(&d_zero_mean_UT, m*n*sizeof(float)));
	zero_mean_U << <m, 1024 >> >(d_zero_mean_UT, d_UT, d_miu_h, m, n);
	CHECK_CUDA(cudaFree(d_UT)); d_UT = NULL;
	CHECK_CUDA(cudaFree(d_miu_h)); d_miu_h = NULL;

	h_zero_mean_UT = (float*)malloc(m*n*sizeof(float));
	CHECK_PARAM(h_zero_mean_UT != NULL, "allocate space for h_zero_mean_UT failed.");
	CHECK_CUDA(cudaMemcpy(h_zero_mean_UT, d_zero_mean_UT, m*n*sizeof(float), cudaMemcpyDeviceToHost));
	
	
	CHECK_CUDA(cudaMalloc(&d_cov_UT, m*m*sizeof(float)));
	CHECK_CUBLAS(cublasCreate(&cublashandle));
	alpha = 1.0f;
	beta = 0.0f;
	CHECK_CUBLAS(cublasSgemm(cublashandle, CUBLAS_OP_N, CUBLAS_OP_T, m, m, n, &alpha, d_zero_mean_UT, m, d_zero_mean_UT, m, &beta, d_cov_UT, m));
	CHECK_CUBLAS(cublasDestroy(cublashandle));
	CHECK_CUDA(cudaDeviceSynchronize());
	CHECK_CUDA(cudaGetLastError());
	CHECK_CUDA(cudaFree(d_zero_mean_UT)); d_zero_mean_UT = NULL;
	
	float *d_W;
	CHECK_CUDA(cudaMalloc(&d_W, m*sizeof(float)));
	solve_eig(d_cov_UT, d_W, m, m);
	CHECK_CUDA(cudaFree(d_W)); d_W = NULL;
	h_cov_UT = (float*)malloc(m*m*sizeof(float));
	CHECK_PARAM(h_cov_UT != NULL, "allocate space for h_cov_UT failed.");
	CHECK_CUDA(cudaMemcpy(h_cov_UT, d_cov_UT, m*m*sizeof(float), cudaMemcpyDeviceToHost));
	CHECK_CUDA(cudaFree(d_cov_UT)); d_cov_UT = NULL;
	
	CHECK_CUDA(cudaMalloc(&d_U, n*m*sizeof(float)));
	CHECK_CUDA(cudaMemcpy(d_U, U, n*m*sizeof(float),cudaMemcpyHostToDevice));
	float *d_miu_g;
	CHECK_CUDA(cudaMalloc(&d_miu_g, n*sizeof(float)));
	get_mean_of_U(d_miu_g, d_U, n, m);
	
	CHECK_CUDA(cudaMalloc(&d_zero_mean_U, n*m*sizeof(float)));
	zero_mean_U << <n, 1024 >> >(d_zero_mean_U, d_U, d_miu_g, n, m);
	CHECK_CUDA(cudaFree(d_U)); d_U = NULL;
	CHECK_CUDA(cudaFree(d_miu_g)); d_miu_g = NULL;
	
	h_zero_mean_U = (float*)malloc(n*m*sizeof(float));
	CHECK_PARAM(h_zero_mean_U != NULL, "allocate space for h_zero_mean_U failed.");
	CHECK_CUDA(cudaMemcpy(h_zero_mean_U, d_zero_mean_U, n*m*sizeof(float), cudaMemcpyDeviceToHost));
	
	CHECK_CUDA(cudaMalloc(&d_cov_U, m*m*sizeof(float)));
	CHECK_CUBLAS(cublasCreate(&cublashandle));
	CHECK_CUBLAS(cublasSgemm(cublashandle, CUBLAS_OP_T, CUBLAS_OP_N, m, m, n, &alpha, d_zero_mean_U, n, d_zero_mean_U, n, &beta, d_cov_U, m));
	CHECK_CUBLAS(cublasDestroy(cublashandle));
	CHECK_CUDA(cudaFree(d_zero_mean_U)); d_zero_mean_U = NULL;
	CHECK_CUDA(cudaMalloc(&d_W, m*sizeof(float)));
	solve_eig(d_cov_U, d_W, m, m);
	swap << <(m + 1024 - 1) / 1024, 1024 >> >(d_cov_U, m, m);
	swap << <1, 1024 >> >(d_W, 1, m);
	h_cov_U = (float*)malloc(m*m*sizeof(float));
	CHECK_PARAM(h_cov_U != NULL, "allocate space for h_cov_U failed.");
	CHECK_CUDA(cudaMemcpy(h_cov_U, d_cov_U, m*m*sizeof(float), cudaMemcpyDeviceToHost));
	CHECK_CUDA(cudaFree(d_cov_U)); d_cov_U = NULL;
	float *h_W = (float*)malloc(m*sizeof(float));
	CHECK_PARAM(h_W != NULL, "allocate space for h_W failed.");
	CHECK_CUDA(cudaMemcpy(h_W, d_W, m*sizeof(float),cudaMemcpyDeviceToHost));
	CHECK_CUDA(cudaFree(d_W)); d_W;

	//CHECK_CUDA(cudaMalloc(&d_cov_U, n*n*sizeof(float)));
	//CHECK_CUBLAS(cublasCreate(&cublashandle));
	//CHECK_CUBLAS(cublasSgemm(cublashandle, CUBLAS_OP_N, CUBLAS_OP_T, n, n, m, &alpha, d_zero_mean_U, n, d_zero_mean_U, n, &beta, d_cov_U, n));
	//CHECK_CUBLAS(cublasDestroy(cublashandle));
	//
	//CHECK_CUDA(cudaFree(d_zero_mean_U)); d_zero_mean_U = NULL;
	//CHECK_CUDA(cudaMalloc(&d_W, n*sizeof(float)));
	//solve_eig(d_cov_U, d_W, n, n);// d_cov_U is too big for GPU to eigdecompose

	//CHECK_CUDA(cudaFree(d_W)); d_W = NULL;
	
	//h_cov_U = (float*)malloc(n*n*sizeof(float));
	//CHECK_PARAM(h_cov_U != NULL, "allocate space for h_cov_U failed.");
	//CHECK_CUDA(cudaMemcpy(h_cov_U, d_cov_U, n*n*sizeof(float), cudaMemcpyDeviceToHost));
	//CHECK_CUDA(cudaFree(d_cov_U)); d_cov_U = NULL;
	clock_t before_round = clock();
	printf("before round cost time %.2f\n", (double)(before_round - start_of_main) / CLOCKS_PER_SEC);


	int x[5] = { 8, 16, 32, 64, 128 };
	for (int round = 0; round < 5; round++){
		clock_t start_of_round = clock();
		printf("=====================\nround:%d %d bits\n",round,x[round]);
		params.r = x[round];
		int r = params.r;
		if (d_cov_UT == NULL){
			CHECK_CUDA(cudaMalloc(&d_cov_UT, m*m*sizeof(float)));
			CHECK_CUDA(cudaMemcpy(d_cov_UT, h_cov_UT, m*m*sizeof(float), cudaMemcpyHostToDevice));
		}
		float *d_Ph;
		CHECK_CUDA(cudaMalloc(&d_Ph, m*r*sizeof(float)));

		get_Ph << <(m + 1024 - 1) / 1024, 1024 >> >(d_Ph, d_cov_UT, m, r);
		CHECK_CUDA(cudaFree(d_cov_UT)); d_cov_UT = NULL;

		float *d_Uh;
		CHECK_CUDA(cudaMalloc(&d_Uh, r*n*sizeof(float)));
		if (d_zero_mean_UT == NULL){
			CHECK_CUDA(cudaMalloc(&d_zero_mean_UT, m*n*sizeof(float)));
			CHECK_CUDA(cudaMemcpy(d_zero_mean_UT, h_zero_mean_UT, m*n*sizeof(float), cudaMemcpyHostToDevice));
		}
		CHECK_CUBLAS(cublasCreate(&cublashandle));
		CHECK_CUBLAS(cublasSgemm(cublashandle, CUBLAS_OP_T, CUBLAS_OP_N, r, n, m, &alpha, d_Ph, m, d_zero_mean_UT, m, &beta, d_Uh, r));
		CHECK_CUDA(cudaFree(d_Ph)); d_Ph = NULL;
		CHECK_CUDA(cudaFree(d_zero_mean_UT)); d_zero_mean_UT = NULL;

		float *d_Pg;
		CHECK_CUDA(cudaMalloc(&d_Pg, n*r*sizeof(float)));
		get_Pg(d_Pg, n, r, h_cov_U, h_W, m, h_zero_mean_U, cublashandle);

		float *d_Ug;
		CHECK_CUDA(cudaMalloc(&d_Ug, r*m*sizeof(float)));
		if (d_zero_mean_U == NULL){
			CHECK_CUDA(cudaMalloc(&d_zero_mean_U, n*m*sizeof(float)));
			CHECK_CUDA(cudaMemcpy(d_zero_mean_U, h_zero_mean_U, n*m*sizeof(float), cudaMemcpyHostToDevice));
		}
		CHECK_CUBLAS(cublasSgemm(cublashandle, CUBLAS_OP_T, CUBLAS_OP_N, r, m, n, &alpha, d_Pg, n, d_zero_mean_U, n, &beta, d_Ug, r));
		CHECK_CUBLAS(cublasDestroy(cublashandle));
		CHECK_CUDA(cudaFree(d_zero_mean_U)); d_zero_mean_U = NULL;
		CHECK_CUDA(cudaFree(d_Pg)); d_Pg = NULL;
		
		
		unsigned long long seed = clock() % 1000;
		float mean_of_uniform = 0.0f;
		float standard_deviation = 1.0f;

		curandGenerator_t gen;
		CHECK_CURAND(curandCreateGenerator(&gen, CURAND_RNG_PSEUDO_DEFAULT));
		CHECK_CURAND(curandSetPseudoRandomGeneratorSeed(gen, seed));
		float *d_Rh;
		CHECK_CUDA(cudaMalloc(&d_Rh, r*r*sizeof(float)));
		CHECK_CURAND(curandGenerateNormal(gen, d_Rh, r*r, mean_of_uniform, standard_deviation));
		float *d_Rg;
		CHECK_CUDA(cudaMalloc(&d_Rg, r*r*sizeof(float)));
		CHECK_CURAND(curandGenerateNormal(gen, d_Rg, r*r, mean_of_uniform, standard_deviation));
		CHECK_CURAND(curandDestroyGenerator(gen));
		
		float *d_H;
		CHECK_CUDA(cudaMalloc(&d_H, r*n*sizeof(float)));
		CHECK_CUBLAS(cublasCreate(&cublashandle));
		CHECK_CUBLAS(cublasSgemm(cublashandle, CUBLAS_OP_N, CUBLAS_OP_N, r, n, r, &alpha, d_Rh, r, d_Uh, r, &beta, d_H, r));
		float *d_G;
		CHECK_CUDA(cudaMalloc(&d_G, r*m*sizeof(float)));
		CHECK_CUBLAS(cublasSgemm(cublashandle, CUBLAS_OP_N, CUBLAS_OP_N, r, m, r, &alpha, d_Rg, r, d_Ug, r, &beta, d_G, r));
		
		CHECK_CUBLAS(cublasDestroy(cublashandle));
		
		make_sign_B(d_H, r, n);
		make_sign_B(d_G, r, m);
	
		float *d_M;
		CHECK_CUDA(cudaMalloc(&d_M, n*m*sizeof(float)));

		cublasHandle_t cublashandle;
		CHECK_CUBLAS(cublasCreate(&cublashandle));
		clock_t start_of_iter, end_of_iter;
		dim3 szGrid, szBlock;
		szBlock.z = szGrid.z = 1;
		for (int iter = 0; iter < 20; iter++){
			printf("iter %d...", iter);
			start_of_iter = clock();
			cudaEvent_t start, stop;
			float elapsedTime = 0.0;

			cudaEventCreate(&start);
			cudaEventCreate(&stop);
			cudaEventRecord(start, 0);
			// update sigma
			CHECK_CUBLAS(cublasSgemm(cublashandle, CUBLAS_OP_T, CUBLAS_OP_N, n, m, r, &alpha, d_H, r, d_G, r, &beta, d_M, n));
			if (n >= 2 * m){
				szBlock.x = 16;
				szBlock.y = 64;
			}
			else if (m >= 2 * m){
				szBlock.x = 64;
				szBlock.y = 16;
			}
			else{
				szBlock.x = 32;
				szBlock.y = 32;
			}
			szGrid.x = (m - 1 + szBlock.x) / szBlock.x;
			szGrid.y = (n - 1 + szBlock.y) / szBlock.y;
			get_M_kernel << <szGrid, szBlock >> >(d_M, n, m, r);
			float sv1;
			if (d_U == NULL){
				CHECK_CUDA(cudaMalloc(&d_U, n*m*sizeof(float)));
				CHECK_CUDA(cudaMemcpy(d_U, U, n*m*sizeof(float), cudaMemcpyHostToDevice));
			}
			CHECK_CUBLAS(cublasSdot(cublashandle, n*m, d_M, 1, d_U, 1, &sv1));
			CHECK_CUDA(cudaFree(d_U)); d_U = NULL;
			float sv2;
			CHECK_CUBLAS(cublasSnrm2(cublashandle, n*m, d_M, 1, &sv2));
			float sv = sv1 / powf(sv2, 2);
			printf("sigma done...");

			//update d_H
			update_d_H(d_H, d_Rh, d_Uh, lambda, sv, r, n, m, d_G, UT, cublashandle);
			
			printf("d_H done...");

			//update d_G
			update_d_H(d_G, d_Rg, d_Ug, lambda, sv, r, m, n, d_H, U, cublashandle);
			
			printf("d_G done...");

			//update d_Rh

			update_d_Rh(d_Rh, d_H, d_Uh, r, n, cublashandle);

			printf("d_Rh done...");

			//update d_Rg

			update_d_Rh(d_Rg, d_G, d_Ug, r, m, cublashandle);

			printf("d_Rg done...");

			cudaDeviceSynchronize();
			CHECK_CUDA(cudaGetLastError());
			end_of_iter = clock();

			cudaEventRecord(stop, 0);
			cudaEventSynchronize(stop);

			cudaEventElapsedTime(&elapsedTime, start, stop);

			cudaEventDestroy(start);
			cudaEventDestroy(stop);
			printf("iter done! cost %.2f(CPU) %.2f(GPU)\n", iter, (double)(end_of_iter - start_of_iter) / CLOCKS_PER_SEC, elapsedTime);
		}
		CHECK_CUDA(cudaFree(d_M)); d_M = NULL;
		CHECK_CUDA(cudaFree(d_Rh)); d_Rh = NULL;
		CHECK_CUDA(cudaFree(d_Rg)); d_Rg = NULL;
		CHECK_CUDA(cudaFree(d_Uh)); d_Uh = NULL;
		CHECK_CUDA(cudaFree(d_Ug)); d_Ug = NULL;
		make_zerone_B(d_H, r, n);
		make_zerone_B(d_G, r, m);


		CHECK_CUBLAS(cublasDestroy(cublashandle));

		float *B = (float *)malloc(r*n*sizeof(float));
		CHECK_CUDA(cudaMemcpy(B, d_H, r*n*sizeof(float), cudaMemcpyDeviceToHost));
		CHECK_CUDA(cudaFree(d_H)); d_H = NULL;

		ss.str("");
		ss << n << "_" << m << "_" << r << "_" << "B_code.txt";
		write_into_file(B, r, n, ss.str().c_str());
		free(B);

		float *D = (float *)malloc(r*m*sizeof(float));
		CHECK_CUDA(cudaMemcpy(D, d_G, r*m*sizeof(float), cudaMemcpyDeviceToHost));
		CHECK_CUDA(cudaFree(d_G)); d_G = NULL;
		ss.str("");
		ss << n << "_" << m << "_" << r << "_" << "D_code.txt";
		write_into_file(D, r, m, ss.str().c_str());
		free(D);


		printf("write B and D into files done.\n");

		clock_t end_of_round = clock();
		printf("round cost time %.2f\n", (double)(end_of_round - start_of_round) / CLOCKS_PER_SEC);

	}
	clock_t end_of_main = clock();
	printf("total cost time:%.2f\n", (double)(end_of_main - start_of_main) / CLOCKS_PER_SEC);
	CHECK_CUDA(cudaDeviceReset());
	system("pause");
	return EXIT_SUCCESS; 
}