#include "head.cuh"

#include <vector>
#include <iomanip>
#include <fstream>

void read_UIview_and_IUview_data(float *UIview, const int m, const int n, float *IUview,
	const char *filename){
	FILE *f;
	f = fopen(filename, "r");
	if (f == NULL){
		printf("read %s failed!\n", filename);
		system("pause");
		exit(1);
	}
	for (int i = 0; i < m*n; i++) UIview[i] = 0.0f;
	for (int i = 0; i < m*n; i++) IUview[i] = 0.0f;

	int row = 0;
	int col = 0;
	float val = 0.0;


	fseek(f, 0, SEEK_SET);
	while (EOF != fscanf(f, "%u%u%f\n", &row, &col, &val)){
		UIview[row + col*m] = val;
		IUview[col + n*row] = val;
	}

	fclose(f);
}

void write_UIview_data(const float *UIview_test,
	const int m,
	const int n,
	const char * filename){
	FILE *f;
	f = fopen(filename, "w");
	for (int i = 0; i < m; i++){
		for (int j = 0; j < n; j++){
			if (UIview_test[i + j*m]>0)
				fprintf(f, "%d %d %.1f\n", i, j, UIview_test[i + j*m]);
		}
	}
	fclose(f);
}

void read_Z_from_file(float *Z, const int d, const int n, const char *filename){
	std::ifstream fin;
	fin.open(filename);

	if (!fin.is_open()){
		printf("read %s failed!\n",filename);
		system("pause");
		exit(1);
	}
	else{
		int row = 0;
		int col = 0;
		while (!fin.eof()){
			if (col == d){ col = 0; row++; }
			fin >> Z[row + n*col];
			col++;
		}
	}
	fin.close();
}

void write_Z_into_file(float *Z, int d, int n, const char *filename){
	FILE *f;
	fopen_s(&f, filename, "w");
	std::stringstream ss;
	for (int i = 0; i < n; i++){
		ss.str("");
		int j = 0;
		for (; j < d - 1; j++){
			ss << Z[i + j*n] << "\t";
		}
		ss << Z[i + j*n] << "\n";
		fprintf(f, "%s", ss.str().c_str());
	}
	fclose(f);
}
void params_to_string(Params &params, std::string &s){
	std::stringstream ss;
	ss << params.alpha << "_" << params.beta << "_" << params.num_of_anchors << "_"
		<< params.num_of_nearest_anchors << "_" << params.rho1 << "_" << params.rho2;
	s = ss.str();
	ss.str("");
}

bool read_data_2_matrix(float *UIview, float *UIview_test, 
	const float split_rate, float *IUview){	
	FILE *fh1;
	fh1 = fopen(UIviewFileName, "r");
	if (fh1 == NULL){
		return false;
	}
	for (int i = 0; i < nUsers*nItems; ++i){
		UIview[i] = 0.0; 
		IUview[i] = 0.0; 
	}
	for (int i = 0; i < nUsers*nItems; ++i){ 
		UIview_test[i] = 0.0; 
	}
	int row = 0;
	int col = 0;
	float val = 0.0;


	fseek(fh1, 0, SEEK_SET);
	std::vector<std::pair<int, float>> tv;
	int start_index = 0;
	while (EOF != fscanf(fh1, "%u%u%f\n", &row, &col, &val)){
		if (start_index == row){
			tv.push_back(std::make_pair(col, val));
		}
		else{
			std::random_shuffle(tv.begin(), tv.end());
			int train_part = (int)(tv.size()*split_rate);
			int i = 0;
			for (; i < train_part; i++){
				UIview[IDX2C(start_index, tv[i].first, nUsers)] = tv[i].second;
				IUview[IDX2C(tv[i].first, start_index, nItems)] = tv[i].second;
			}
			for (; i < tv.size(); i++){
				UIview_test[IDX2C(start_index, tv[i].first, nUsers)] = tv[i].second;
			}
			start_index = row;
			tv.clear();
			tv.push_back(std::make_pair(col, val));
		}

	}
	std::random_shuffle(tv.begin(), tv.end());
	int train_part = (int)(tv.size()*split_rate);
	int i = 0;
	for (; i < train_part; i++){
		UIview[IDX2C(row, tv[i].first, nUsers)] = tv[i].second;
		IUview[IDX2C(tv[i].first, row, nItems)] = tv[i].second;
	}
	for (; i < tv.size(); i++){
		UIview_test[IDX2C(row, tv[i].first, nUsers)] = tv[i].second;
	}
	tv.erase(tv.begin(), tv.end());
	fclose(fh1);

	return true;
}

bool get_anchors_by_Kmeans(
	float *devData, float *devCluster,
	int d, int n,
	int num_of_kmeans, int num_of_anchors)
{
	// devData and devCluster should be nXd
	// num_of_anchors: num of cluster centers
	kMeans(devData, d, n, num_of_anchors, num_of_kmeans, devCluster);

	CHECK_CUDA(cudaDeviceSynchronize());
	CHECK_CUDA(cudaGetLastError());
	return true;
}

__global__ static void get_square_distance_kernel(
	const float *data, const float *anchors, float *Distance,
	const int ndata, const int nanchors, const int nDim,
	const int rowPerThread, const int colPerThread){
	for (int rIdx = 0; rIdx < rowPerThread; ++rIdx)
	{
		int r = rIdx * gridDim.x * blockDim.x + blockIdx.x * blockDim.x + threadIdx.x;
		if (r >= ndata)
			break;

		for (int cIdx = 0; cIdx < colPerThread; ++cIdx)
		{
			int c = cIdx * gridDim.y * blockDim.y + blockIdx.y * blockDim.y + threadIdx.y;
			if (c >= nanchors)
				break;
			float square_dist = 0;
			float t;
			for (int i = 0; i < nDim; i++){
				t = (data[i*ndata + r] - anchors[i*nanchors + c]);
				square_dist += t * t;
			}
			Distance[c*ndata + r] = square_dist;
		}
	}
}

void get_square_distance(const float *d_data, const float *d_anchors, 
	float *d_Distance, /* out */
	const int ndata,
	const int nanchors,
	const int nDim){
	//Distance[i][j] = square_dist_of(data[i], anchors[j])
	dim3 szGrid, szBlock;
	int rowPerThread, colPerThread;//num of rows/cols processed per thread
	get_kernel_config(ndata, nanchors, szGrid, szBlock, rowPerThread, colPerThread);
	get_square_distance_kernel << <szGrid, szBlock >> >(d_data, d_anchors, d_Distance,
		ndata, nanchors, nDim, rowPerThread, colPerThread);

	cudaDeviceSynchronize();
	CHECK_CUDA(cudaGetLastError());

}

void get_pos_and_val(const float *d_view, const float * d_anchors,
	int *pos,
	//float *val,
	const int nanchors,
	const int nObjs,
	const int nDim,
	const int num_of_nearest_anchors){

	float *Distance = (float*)malloc(nObjs*nanchors*sizeof(float));
	CHECK_PARAM(Distance != NULL, "allocate space for Distance failed.");
	for (int i = 0; i < nObjs*nanchors; i++) Distance[i] = 0.0f;
	
	float *d_Distance;
	CHECK_CUDA(cudaMalloc(&d_Distance, nObjs*nanchors*sizeof(float)));
	get_square_distance(d_view, d_anchors, d_Distance, nObjs, nanchors, nDim);
	CHECK_CUDA(cudaMemcpy(Distance, d_Distance, nObjs*nanchors*sizeof(float),
		cudaMemcpyDeviceToHost));
	CHECK_CUDA(cudaFree(d_Distance));

	Ele *tmp;
	tmp = (Ele*)malloc(nanchors*sizeof(Ele));
	for (int i = 0; i < nObjs; i++){
		for (int j = 0; j < nanchors; j++){
			tmp[j].val = Distance[i + j*nObjs];
			tmp[j].pos = j;
		}
		std::sort(tmp, tmp + nanchors);
		for (int j = 0; j < num_of_nearest_anchors; j++){
			pos[i + j*nObjs] = tmp[j].pos;
		}
	}
	free(tmp);
	free(Distance);
}
void get_Z_by_LAE(const float *IUview, const float *d_IUview,
	const float* anchors_of_iuview, const float *d_anchors_of_iuview,
	const int nusers,const int nitems,const int nanchors,
	float *iZ,
	const int num_of_nearest_anchors,
	const int num_of_LAE_iters,
	cublasHandle_t cublashandle){

	int *pos;
	pos = (int *)malloc(nitems*num_of_nearest_anchors*sizeof(int));
	for (int i = 0; i < nitems*num_of_nearest_anchors; i++) pos[i] = 0;
	get_pos_and_val(d_IUview, d_anchors_of_iuview, pos, nanchors, nitems, nusers, num_of_nearest_anchors);

	float *val;
	val = (float*)malloc(nitems*num_of_nearest_anchors*sizeof(float));
	CHECK_PARAM(val != NULL, "allocate space for val failed.");
	const float *data;
	data = IUview;
	const float *anchors;
	anchors = anchors_of_iuview;
	int nDim = nusers;
	int nS = num_of_nearest_anchors;
	int nAnchors = nanchors;

	// now we have pos, val, daa, anchors which are all allocated well.
	// data: nitems X nDim float
	// pos: nitems X nS int 
	// val: nitems X nS float
	// anchors: nAnchors X nDim float
	clock_t start = clock();
	float *x = (float*)malloc(nDim*sizeof(float));
	float *U = (float*)malloc(nS*nDim*sizeof(float));
	float *tmp = (float *)malloc(nS*sizeof(float));
	float *val_one_line = (float*)malloc(nS*sizeof(float));
	for (int i = 0; i < nitems; i++){
		float norm_x_2 = 0.0f;
		for (int j = 0; j < nDim; j++){
			x[j] = data[i + nitems*j];
			norm_x_2 += x[j] * x[j];
		}
		float norm_x = sqrtf(norm_x_2);
		for (int j = 0; j < nDim; j++){
			x[j] = x[j] / norm_x;
		}
		for (int j = 0; j < nS; j++) tmp[j] = 0.0f;

		int anchor_idx;
		for (int k = 0; k < nS; k++){
			anchor_idx = pos[i + nitems*k];
			for (int j = 0; j < nDim; j++){
				U[k + nS*j] = anchors[anchor_idx + nAnchors*j];
				tmp[k] += anchors[anchor_idx + nAnchors*j] * anchors[anchor_idx + nAnchors*j];
			}
		}
		for (int j = 0; j < nS; j++){
			tmp[j] = 1.0 / sqrtf(tmp[j]);
		}
		for (int k = 0; k < nS; k++){
			for (int j = 0; j < nDim; j++){
				U[k + nS*j] = U[k + nS*j] * tmp[k];
			}
		}

		// now we have U, x and num_of_LAE_iters
		// remember that U is nS X nDim not dXs
		LAE_process(x, nDim, U, nS, val_one_line, num_of_LAE_iters,
			cublashandle);

		for (int j = 0; j < nS; j++){
			val[i + nitems*j] = val_one_line[j];
		}
		//printf("Items: %d\n", i);
	}
	clock_t finish = clock();
	printf("lae of Z cost time:%.2f\n", 
		(double)(finish - start) / CLOCKS_PER_SEC);
	// get iZ by pos and val
	// uZ: nitems X nAnchors
	// pos: nitems X nS
	// val: nitems X nS
	for (int i = 0; i < nitems; i++){
		for (int j = 0; j < nanchors; j++)
			iZ[i + nitems*j] = 0.0;
		for (int j = 0; j < nS; j++){
			iZ[i + nitems*(pos[i + j*nitems])] = val[i + j*nitems];
		}
	}

	free(pos);
	free(val);
	free(tmp);
	free(x);
	free(U);
	free(val_one_line);

}

__global__ void block_sum(float *input, const int nObjs)
{
	extern __shared__ float sdata[]; //size: blockDim.x
	int tid = threadIdx.x;
	int i = blockIdx.x * blockDim.x + tid;
	sdata[tid] = (i < nObjs) ? input[i] : 0;
	__syncthreads();


	for (int offset = blockDim.x / 2;
		offset > 0;
		offset >>= 1)
	{
		if (tid < offset)
		{
			if (tid + offset + blockDim.x * blockIdx.x < nObjs)
				sdata[threadIdx.x] += sdata[threadIdx.x + offset];
		}

		__syncthreads();
	}

	if (threadIdx.x == 0)
	{
		input[blockIdx.x] = sdata[0];
	}
}

__global__ void get_tmp_kernel(float *tmp, const float *Z, /* both are nObjs X nDim*/
	const float *lambda, /* nDim */
	const int nObjs,
	const int nDim,
	const int OPS){
	int row = threadIdx.y + blockDim.y * blockIdx.y;
	int col = threadIdx.x + blockDim.x * blockIdx.x;
	if (row < nObjs&&col < nDim){
		if (OPS == M_TIMES_OPS)
			tmp[row + col*nObjs] = Z[row + col*nObjs] * lambda[col];
		if (OPS == M_MINUS_OPS)
			tmp[row + col*nObjs] = Z[row + col*nObjs] - lambda[col];
		if (OPS == M_ADD_OPS)
			tmp[row + col*nObjs] = Z[row + col*nObjs] + lambda[col];
		if (OPS == M_DIVIDE_OPS)
			tmp[row + col*nObjs] = Z[row + col*nObjs] / lambda[col];
	}
}

void get_S_from_Z(float *d_S,/* out nObjs X nObjs*/
	const float *Z, const int nObjs, const int nDim,
	cublasHandle_t cublashandle){
	float *d_Z;
	CHECK_CUDA(cudaMalloc(&d_Z, nObjs*nDim*sizeof(float)));
	CHECK_CUDA(cudaMemcpy(d_Z, Z, nObjs*nDim*sizeof(float), cudaMemcpyHostToDevice));
	
	float *lambda = (float *)malloc(nDim*nDim*sizeof(float));
	for (int i = 0; i < nDim*nDim; i++) lambda[i] = 0.0;
	for (int i = 0; i < nDim; i++){
		float tmp;
		CHECK_CUDA_STATUS(cublasSasum(cublashandle, nObjs, d_Z + i*nObjs, 1, &tmp));
		if (tmp>0) lambda[i+nDim*i] = 1.0 / tmp;
		else lambda[i+nDim*i] = tmp;
	}

	float *d_lambda;
	CHECK_CUDA(cudaMalloc(&d_lambda, nDim*nDim*sizeof(float)));
	CHECK_CUDA(cudaMemcpy(d_lambda, lambda, nDim*nDim*sizeof(float), cudaMemcpyHostToDevice));
	free(lambda);

	float *d_tmp;
	CHECK_CUDA(cudaMalloc(&d_tmp, nObjs*nDim*sizeof(float)));
	float alpha = 1.0f;
	float beta = 0.0f;
	CHECK_CUDA_STATUS(cublasSgemm(cublashandle, CUBLAS_OP_N, CUBLAS_OP_N,
		nObjs, nDim, nDim,
		&alpha,
		d_Z, nObjs,
		d_lambda, nDim,
		&beta,
		d_tmp, nObjs));
	CHECK_CUDA(cudaFree(d_lambda));


	CHECK_CUDA_STATUS(cublasSgemm(cublashandle, CUBLAS_OP_N, CUBLAS_OP_T,
		nObjs, nObjs, nDim,
		&alpha,
		d_tmp, nObjs,
		d_Z, nObjs,
		&beta,
		d_S, nObjs));

	CHECK_CUDA(cudaFree(d_tmp));
	CHECK_CUDA(cudaFree(d_Z));
	cudaDeviceSynchronize();
	CHECK_CUDA(cudaGetLastError());

}

__global__ void sign_kernel(float *d_matrix, const int d1, const int d2){
	int row = threadIdx.y + blockDim.y * blockIdx.y;
	int col = threadIdx.x + blockDim.x * blockIdx.x;
	if (row < d1&&col < d2){
		if (d_matrix[row + col*d1] >= 0)
			d_matrix[row + col*d1] = 1.0f;
		else
			d_matrix[row + col*d1] = -1.0f;
	}
}

void make_sign(float *d_Matrix, const int d1, const int d2){

	dim3 szBlock(1024/d1, d1, 1); //total num of threads must less than 1024
	dim3 szGrid((d2 + szBlock.x - 1) / szBlock.x, 1, 1);
	sign_kernel << <szGrid, szBlock >> >(d_Matrix, d1, d2);
	cudaDeviceSynchronize();
	CHECK_CUDA(cudaGetLastError());
}

__global__ void get_mean(const float *d_WT, /* n X b */
	const int b, const int n, float *d_mean){
	int i = threadIdx.x;
	cublasHandle_t cublashandle;
	cublasCreate(&cublashandle);
	cublasSasum(cublashandle, n, d_WT + i*n, 1, &d_mean[i]);
	d_mean[i] /= n;
	cublasDestroy(cublashandle);
}


void solve_eig(float *d_A, /*in as A;
						  out as eigenvectors*/
	float *d_W, int A_hight, int A_width){
	cusolverDnHandle_t handle = NULL;
	int*devInfo = NULL;
	float *d_work = NULL;
	int lwork = 0;
	int info_gpu = 0;
	CHECK_CUSOLVER(cusolverDnCreate(&handle));
	CHECK_CUDA(cudaMalloc(&devInfo,sizeof(int)));
	cusolverEigMode_t jobz = CUSOLVER_EIG_MODE_VECTOR;
	cublasFillMode_t uplo = CUBLAS_FILL_MODE_LOWER;
	CHECK_CUSOLVER(cusolverDnSsyevd_bufferSize(handle, jobz, uplo, 
		A_width, d_A, A_hight, d_W, &lwork));
	CHECK_CUDA(cudaMalloc(&d_work, sizeof(double)*lwork));

	CHECK_CUSOLVER(cusolverDnSsyevd(handle, jobz, uplo,
		A_width, d_A, A_hight, d_W, d_work, lwork, devInfo));
	cudaDeviceSynchronize();
	CHECK_CUDA(cudaMemcpy(&info_gpu, devInfo, sizeof(int), cudaMemcpyDeviceToHost));//must be zero
	if (info_gpu!=0)
		throw std::runtime_error("solve eig error.");
	CHECK_CUDA(cudaFree(devInfo));
	CHECK_CUDA(cudaFree(d_work));
	if (handle) cusolverDnDestroy(handle);
}

__global__ void set_v_mean(float *d_v, const float v_mean, const int m){
	int tid = threadIdx.x + blockDim.x*blockIdx.x;
	if (tid < m){
		d_v[tid] -= v_mean;
	}
}

void my_MGS(const float *d_Q, /* in: m X n */
	const int m, // is relatively big
	const int n,
	float *d_new_Q, /* out: m X k */
	const int k, /* k>n */
	cublasHandle_t cublashandle){
	CHECK_CUDA(cudaMemcpy(d_new_Q, d_Q, m*n*sizeof(float), cudaMemcpyDeviceToDevice));

	curandGenerator_t gen;
	CHECK_CURAND(curandCreateGenerator(&gen, CURAND_RNG_PSEUDO_DEFAULT));
	CHECK_CURAND(curandSetPseudoRandomGeneratorSeed(gen, 123456));


	float *d_v;
	CHECK_CUDA(cudaMalloc(&d_v, m*sizeof(float)));//because curandGenerateNormal need a multiple of 2
	float *d_U_j;
	CHECK_CUDA(cudaMalloc(&d_U_j,m*sizeof(float)));
	for (int i = n; i < k; i++){// not i <= k

		CHECK_CURAND(curandGenerateUniform(gen, d_v, m));
		float v_mean;
		CHECK_CUDA_STATUS(cublasSasum(cublashandle, m, d_v, 1, &v_mean));
		v_mean /= m;
		set_v_mean << <(m + 1024 - 1) / 1024, 1024 >> >(d_v, v_mean, m);
		//TODO: v = v - mean(v) 

		for (int j = 0; j < i; j++){
			float tmp;
			CHECK_CUDA(cudaMemcpy(d_U_j, d_new_Q + j*m, m*sizeof(float), cudaMemcpyDeviceToDevice));
			CHECK_CUDA_STATUS(cublasSdot(cublashandle, m, d_U_j, 1, d_v, 1, &tmp));
			float alpha = -1 * tmp;
			CHECK_CUDA_STATUS(cublasSaxpy(cublashandle, m, &alpha, d_U_j, 1, d_v, 1));
		}
		CHECK_CUDA(cudaMemcpy(d_new_Q + i*m, d_v, m*sizeof(float), cudaMemcpyDeviceToDevice));//chao chu h_Q fan wei le
	}
	CHECK_CURAND(curandDestroyGenerator(gen));
	//CHECK_CUDA(cudaMemcpy(d_new_Q, d_new_Q, m*k*sizeof(float), cudaMemcpyDeviceToDevice));

	CHECK_CUDA(cudaFree(d_U_j));
	CHECK_CUDA(cudaFree(d_v));
}

__global__ void set_d_tmp(float *d_tmp, const float *d_eigvalues, const int b){
	int row = threadIdx.y + blockDim.y * blockIdx.y;
	int col = threadIdx.x + blockDim.x * blockIdx.x;
	if (row < b&&col < b){//TODO: row<b && col < b
		d_tmp[row + col*b] *= 1.0f / sqrtf(d_eigvalues[col]);
	}
}

void update_svd(const float *d_W, /*d_W is b X n*/
	const int b, const int n,
	float *d_out, /*b X n*/
	cublasHandle_t cublashandle){
	dim3 szBlock(1024/b, b, 1);
	dim3 szGrid((n + szBlock.x - 1) / szBlock.x, 1, 1);
	float *d_mean;
	CHECK_CUDA(cudaMalloc(&d_mean, b*sizeof(float)));
	float *d_WT;/*transpose of d_W*/
	CHECK_CUDA(cudaMalloc(&d_WT, b*n*sizeof(float)));
	float alpha = 1.0f;
	float beta = 0.0f;
	CHECK_CUDA_STATUS(cublasSgeam(cublashandle, CUBLAS_OP_T, CUBLAS_OP_T,
		n, b,
		&alpha,
		d_W, b,
		&beta,
		d_W, b,
		d_WT, n));
	get_mean << <1, b >> >(d_WT, b, n, d_mean);
	cudaDeviceSynchronize();
	CHECK_CUDA(cudaGetLastError());

	dim3 szofBlock(1, 512, 1);
	dim3 szofGrid(b, (n + szofBlock.y - 1) / szofBlock.y, 1);
	float *d_JW;
	CHECK_CUDA(cudaMalloc(&d_JW,n*b*sizeof(float)));
	get_tmp_kernel << <szofGrid, szofBlock >> >(d_JW, d_WT, d_mean, n, b, M_MINUS_OPS);
	CHECK_CUDA(cudaFree(d_mean));
	CHECK_CUDA(cudaFree(d_WT));


	float *d_tmp;
	CHECK_CUDA(cudaMalloc(&d_tmp, b*b*sizeof(float)));
	CHECK_CUDA_STATUS(cublasSgemm(cublashandle,CUBLAS_OP_T,CUBLAS_OP_N,b,b,n,&alpha,d_JW,n,d_JW,n,&beta,d_tmp,b));
	float *d_eigenvalues;
	CHECK_CUDA(cudaMalloc(&d_eigenvalues,b*sizeof(float)));
	solve_eig(d_tmp, d_eigenvalues, b, b);

	float *eigenvalues = (float*)malloc(b*sizeof(float));
	CHECK_CUDA(cudaMemcpy(eigenvalues, d_eigenvalues, b*sizeof(float), cudaMemcpyDeviceToHost));
	int *zeroIdx = (int *)malloc(b*sizeof(int));
	int sum_of_zeroidx = 0;
	for (int i = 0; i < b; i++){
		if (eigenvalues[i] <= 1e-10){
			sum_of_zeroidx += 1;
			zeroIdx[i] = 1;
		}
		else{
			zeroIdx[i] = 0;
		}
	}
	if (sum_of_zeroidx == 0){
		float *d_P;
		CHECK_CUDA(cudaMalloc(&d_P,b*b*sizeof(float)));
		CHECK_CUDA(cudaMemcpy(d_P,d_tmp,b*b*sizeof(float),cudaMemcpyDeviceToDevice));
		szofBlock.x = 8;
		szofBlock.y = 8;
		szofGrid.x = (b + szofBlock.x - 1) / szofBlock.x;
		szofGrid.y = (b + szofBlock.y - 1) / szofBlock.y;
		set_d_tmp << <szofGrid, szofBlock >> >(d_tmp, d_eigenvalues, b);
		CHECK_CUDA(cudaFree(d_eigenvalues));
		float *d_tmp2;
		CHECK_CUDA(cudaMalloc(&d_tmp2, b*n*sizeof(float)));
		CHECK_CUDA_STATUS(cublasSgemm(cublashandle, CUBLAS_OP_T, CUBLAS_OP_T, b, n, b, &alpha, d_P, b, d_JW, n, &beta, d_tmp2, b));
		alpha = sqrtf(n);
		//d_out = sqrt(n)*d_tmp*d_tmp2
		
		CHECK_CUDA_STATUS(cublasSgemm(cublashandle, CUBLAS_OP_N, CUBLAS_OP_N, b, n, b, &alpha, d_tmp, b, d_tmp2, b, &beta, d_out, b));
		CHECK_CUDA(cudaFree(d_tmp2));
		CHECK_CUDA(cudaFree(d_P));
	}
	else{
		float *h_tmp = (float*)malloc(b*b*sizeof(float));
		CHECK_CUDA(cudaMemcpy(h_tmp,d_tmp,b*b*sizeof(float),cudaMemcpyDeviceToHost));
		float *nonZeroEigenvectors = (float*)malloc(b*(b-sum_of_zeroidx)*sizeof(float));
		int j = 0;
		for (int i = 0; i < b; i++){
			if (zeroIdx[i] != 1){
				for (int k = 0; k < b; k++){
					nonZeroEigenvectors[k + b*j] = h_tmp[k + b*i] / sqrtf(eigenvalues[i]);
				}
				j++;
			}
		}
		free(h_tmp);
		float *d_P;
		CHECK_CUDA(cudaMalloc(&d_P,b*(b-sum_of_zeroidx)*sizeof(float)));
		CHECK_CUDA(cudaMemcpy(d_P,nonZeroEigenvectors,b*(b-sum_of_zeroidx)*sizeof(float),cudaMemcpyHostToDevice));
		free(nonZeroEigenvectors);
		float *d_Q;
		CHECK_CUDA(cudaMalloc(&d_Q,n*(b-sum_of_zeroidx)*sizeof(float)));
		CHECK_CUDA_STATUS(cublasSgemm(cublashandle,CUBLAS_OP_N,CUBLAS_OP_N,n,b-sum_of_zeroidx,b,&alpha,d_JW,n,d_P,b,&beta,d_Q,n));
		CHECK_CUDA(cudaFree(d_P));
		float *d_new_Q;
		CHECK_CUDA(cudaMalloc(&d_new_Q,n*b*sizeof(float)));
		my_MGS(d_Q, n, b - sum_of_zeroidx, d_new_Q, b, cublashandle);
		CHECK_CUDA(cudaFree(d_Q));

		alpha = sqrtf(n);
		CHECK_CUDA_STATUS(cublasSgemm(cublashandle, CUBLAS_OP_N, CUBLAS_OP_T, b, n, b, &alpha, d_tmp, b, d_new_Q, n, &beta, d_out, b));
		CHECK_CUDA(cudaFree(d_new_Q));
	}
	free(eigenvalues);
	free(zeroIdx);
	CHECK_CUDA(cudaFree(d_JW));
	CHECK_CUDA(cudaFree(d_tmp));

}

bool does_matrix1_equals_matrix2(const float *d_A, 
	const float *d_B, 
	const int d, const int n,
	cublasHandle_t cublashandle){
	//d_A and d_B are both d X n

	float *d_C;
	CHECK_CUDA(cudaMalloc(&d_C,d*n*sizeof(float)));
	float alpha = 1.0f;
	float beta = -1.0f;
	CHECK_CUDA_STATUS(cublasSgeam(cublashandle,CUBLAS_OP_N,CUBLAS_OP_N,d,n,&alpha,d_A,d,&beta,d_B,d,d_C,d));
	float res;
	CHECK_CUDA_STATUS(cublasSasum(cublashandle,d*n,d_C,1,&res));

	CHECK_CUDA(cudaFree(d_C));
	if (res <= 1e-5 && res >= -1e-5)
		return true;
	return false;
}
__global__ void get_nonzeros_kernel(const float *d_ST,
	const int n, const int i, 
	float *d_idx_of_nonzeros/*out nX1*/){
	int tid = threadIdx.x + blockIdx.x*blockDim.x;
	if (tid < n){
		if (d_ST[tid + n*i] > 1e-5){
			d_idx_of_nonzeros[tid] = 1.0f;
		}
		else{
			d_idx_of_nonzeros[tid] = 0.0f;
		}
	}
}

__global__ void dosomething_on_device(float d_ss, const float *d_MM, int k, int r, float *d_B, int current_idx,
	const float *d_Ms, const float *d_x,
	float d_temp, const float *d_S_u, int nObjs,
	int *d_no_change_count){
	d_ss -= d_MM[k + k*r] * d_B[k + current_idx*r];
	d_ss -= d_Ms[k] + d_x[k];
	d_temp -= d_B[current_idx*r + k] * d_S_u[current_idx + current_idx*nObjs];
	d_ss += d_temp;
	if (d_ss > 0){
		if (d_B[k + current_idx*r] == -1){
			d_no_change_count[0]++;
		}
		else{
			d_B[k + current_idx*r] = -1;
		}
	}
	else if (d_ss < 0){
		if (d_B[k + current_idx*r] == 1){
			d_no_change_count[0]++;
		}
		else{
			d_B[k + current_idx*r] = 1;
		}
	}
	else{
		d_no_change_count[0]++;
	}
}

__device__ void dosomething(float d_ss, const float *d_MM, int k, int r, float *d_B, int current_idx,
	const float *d_Ms, const float *d_x,
	float d_temp, const float *d_S_u, int nObjs,
	int &d_no_change_count){
	d_ss -= d_MM[k + k*r] * d_B[k + current_idx*r];
	d_ss -= d_Ms[k] + d_x[k];
	d_temp -= d_B[current_idx*r + k] * d_S_u[current_idx + current_idx*nObjs];
	d_ss += d_temp;
	if (d_ss > 0){
		if (d_B[k + current_idx*r] == -1){
			d_no_change_count++;
		}
		else{
			d_B[k + current_idx*r] = -1;
		}
	}
	else if (d_ss < 0){
		if (d_B[k + current_idx*r] == 1){
			d_no_change_count++;
		}
		else{
			d_B[k + current_idx*r] = 1;
		}
	}
	else{
		d_no_change_count++;
	}
}


__global__ void make_zeroone_kernel(float *d_matrix,
	const int r,
	const int m){
	int row = threadIdx.y + blockDim.y * blockIdx.y;
	int col = threadIdx.x + blockDim.x * blockIdx.x;
	if (row < r&&col < m){
		if (d_matrix[row + col*r] < 0)
			d_matrix[row + col*r] = 0.0f;
	}
}

void make_zeroone_code(float *d_B, const int r, const int m){
	dim3 szBlock(1024/r, r, 1);// because the limitation of device
							// szBlock.x*szBlock.y*szBlock.y <= 1024
	dim3 szGrid(((m - 1 + szBlock.x) / szBlock.x), 1, 1);
	make_zeroone_kernel << <szGrid, szBlock >> >(d_B, r, m);
	cudaDeviceSynchronize();
	CHECK_CUDA(cudaGetLastError());
}

__global__ void get_d_MM_Ms(const float *d_ST, const int n, const int m,
	const int i,
	const float *d_D,
	float *d_MM, const int r,
	float *d_Ms, const float minS, const float maxS){
	int row = threadIdx.y + blockDim.y * blockIdx.y;
	int col = threadIdx.x + blockDim.x * blockIdx.x;
	if (row < r&&col < r){
		float tmp = 0.0f;
		float tmp_ms = 0.0f;
		for (int j = 0; j < n; j++){
			if (d_ST[j + n*i] > 0){
				if (col == 0){
					tmp_ms += d_D[row + r*j] * (2 * ((d_ST[j + n*i] - minS) / (maxS - minS)) - 1)*r;
				}
				tmp += d_D[row + r*j] * d_D[col + r*j];
			}
		}
		d_MM[row + col*r] = tmp;
		if (col == 0)
			d_Ms[row] = tmp_ms;
	}
}


__global__ void update_B_kernel_new(
	float *d_B, /* r X m */
	const float *d_B0, /* n X m */
	const float *d_ST, /* n X m */ 
	const int n, const int m,
	const float *d_S_u, /* m X m */
	const float *d_D, /* r X n */
	const int r,
	const float *d_X, /* r X m */
	const int minS, const int maxS,
	float *d_MM_buffer)
{
	int bid = blockIdx.x;/* [0,m) */
	int tid = threadIdx.x;/* [0,1024) */
	__shared__ float for_Ms[128];// if r is more than 128 bits then modify here
	float *d_Ms = for_Ms;/* r x 1 */
	int a = tid / 32;/* [0,32) */
	int b = tid % 32;/* [0,32) */
	__syncthreads();

	float *d_MM = d_MM_buffer + r*r*bid;
	const float *d_x = d_X + bid*r;

	int szx = (r + 32 - 1) / 32;
	int szy = (r + 32 - 1) / 32;
	int ta, tb;
	int tmp_ms;
	for(int j=0;j<n;j++){
		if(d_ST[j+bid*n]>0){
			for (int x = 0; x < szx; x++){
                for (int y = 0; y < szy; y++){
                    ta = a + x * 32;
                    tb = b + y * 32;
					if (ta<r&&tb<r) //TODO
						d_MM[tb+ta*r]+=d_D[tb+r*j]*d_D[ta+r*j];
                }
			}
		}
	}
	__syncthreads();

	int interval = 1024/r;//r must be power of 2
	if(0==tid%interval){
		int tr = tid/interval;// [0,r)
		tmp_ms=0;
		for(int j=0;j<n;j++){
			if(d_ST[j+bid*n]>0)
				tmp_ms += d_D[tr + r*j] * (2 * ((d_ST[j + bid*n] - minS) / (maxS - minS)) - 1)*r;//TODO
		}
		d_Ms[tr]=tmp_ms;
	}

	__syncthreads();
	if (tid == 0){
		bool converge = false;
		int it = 0;
		int _no_change_sum = 0;
		while (!converge){
			_no_change_sum = 0;
			for (int tr = 0; tr<r; tr++){
				
				float d_ss = 0.0f;
				for (int i = 0; i < r; i++){
					d_ss += d_MM[tr + i*r] * d_B[i + bid*r];
				}
				d_ss -= d_MM[tr + tr*r] * d_B[tr + bid*r];
				d_ss -= d_Ms[tr] + d_x[tr];

                	        float d_temp=0.0f;
                        	for (int i = 0; i < m; i++){// cost long time
					d_temp += d_B0[tr + i*r] * d_S_u[i + bid*m];
	                        }
        	                d_temp -= d_B0[bid*r + tr] * d_S_u[bid + bid*m];
                	        d_ss += d_temp;

                        	if (d_ss > 0 && d_B[tr + bid*r] != -1){
                                	d_B[tr + bid*r] = -1;
	                        }else if (d_ss < 0 && d_B[tr + bid*r] != 1){
        	                        d_B[tr + bid*r] = 1;
                	        }else{
                        	        _no_change_sum++;
                        	}
                	}
	                if(it>=4||_no_change_sum==r){
        	                converge=true;
                	}
			it++;
		}
	}

}

void MVDCF(
	float *B,/* out1: r X nusers*/
	float *D,/* out2: r X nitems*/
	const Params &params,
	float *h_S,
	float *h_ST,
	float *h_S_i,
	float *h_S_u){

	clock_t start, finish;
	clock_t init;
	clock_t user_finish, item_finish, svd_finish;
	start = clock();
	init = start;

	float *d_S;  /* nUsers X nItems */
	float *d_ST; /* nItems X nUsers */
	float *d_S_i;/* nItems X nItems */
	float *d_S_u;/* nUsers X nUsers */

	cublasHandle_t cublashandle;

	int r = params.r;
	int m = nUsers;
	int n = nItems;
	float minS = params.minS;
	float maxS = params.maxS;

	float alpha;
	float beta;

	unsigned long long seed = 123456;
	float mean_of_uniform = 0.0f;
	float standard_deviation = 1.0f;

	float *d_B, *d_D;
	float *d_B0, *d_D0;
	float *d_X, *d_Y;
	CHECK_CUDA(cudaMalloc(&d_B, r*m*sizeof(float)));
	CHECK_CUDA(cudaMalloc(&d_D, r*n*sizeof(float)));

	curandGenerator_t gen;
	CHECK_CURAND(curandCreateGenerator(&gen, CURAND_RNG_PSEUDO_DEFAULT));
	CHECK_CURAND(curandSetPseudoRandomGeneratorSeed(gen, seed));
	CHECK_CURAND(curandGenerateNormal(gen, d_B, r*m, mean_of_uniform, standard_deviation));
	CHECK_CURAND(curandGenerateNormal(gen, d_D, r*n, mean_of_uniform, standard_deviation));
	CHECK_CURAND(curandDestroyGenerator(gen));

	alpha = params.alpha;
	beta = params.beta;
	make_sign(d_B, r, m);
	CHECK_CUDA(cudaMalloc(&d_X, r*m*sizeof(float)));
	
	CHECK_CUDA_STATUS(cublasCreate(&cublashandle));
	update_svd(d_B, r, m, d_X, cublashandle);
	CHECK_CUDA_STATUS(cublasSscal(cublashandle, r*m, &alpha, d_X, 1));

	make_sign(d_D, r, n);
	CHECK_CUDA(cudaMalloc(&d_Y,r*n*sizeof(float)));
	update_svd(d_D, r, n, d_Y, cublashandle);
	CHECK_CUDA_STATUS(cublasSscal(cublashandle, r*n, &beta, d_Y, 1));
	CHECK_CUDA_STATUS(cublasDestroy(cublashandle));

	CHECK_CUDA(cudaMalloc(&d_B0,r*m*sizeof(float)));
	CHECK_CUDA(cudaMalloc(&d_D0,r*n*sizeof(float)));

	dim3 szGrid, szBlock;
	szGrid.z = szBlock.z = 1;

	float *d_MM_buffer = NULL;

	finish = clock();
	printf("before while loop: %.2f\n",(double)(finish-init)/CLOCKS_PER_SEC);

	bool converge = false;
	int it = 0;
	while (!converge){
		printf("it:%d\n",it);
		start = clock();
		CHECK_CUDA(cudaMemcpy(d_B0, d_B, r*m*sizeof(float), cudaMemcpyDeviceToDevice));
		CHECK_CUDA(cudaMemcpy(d_D0, d_D, r*n*sizeof(float), cudaMemcpyDeviceToDevice));

		CHECK_CUDA(cudaMalloc(&d_ST, n*m*sizeof(float)));
		CHECK_CUDA(cudaMemcpy(d_ST, h_ST, n*m*sizeof(float), cudaMemcpyHostToDevice));
		CHECK_CUDA(cudaMalloc(&d_S_u, m*m*sizeof(float)));
		CHECK_CUDA(cudaMemcpy(d_S_u, h_S_u, m*m*sizeof(float), cudaMemcpyHostToDevice));
		CHECK_CUDA(cudaMalloc(&d_MM_buffer, r*r*m*sizeof(float)));

		update_B_kernel_new << <m, 1024 >> >(d_B, d_B0, d_ST, n, m, d_S_u, d_D, r, d_X, minS, maxS, d_MM_buffer);

		cudaDeviceSynchronize();
		CHECK_CUDA(cudaGetLastError());

		CHECK_CUDA(cudaFree(d_MM_buffer));d_MM_buffer=NULL;
		CHECK_CUDA(cudaFree(d_S_u));d_S_u=NULL;
		CHECK_CUDA(cudaFree(d_ST));d_ST=NULL;
		user_finish = clock();
		printf(" user part: %.2f\n",(double)(user_finish-start)/CLOCKS_PER_SEC);
		
		CHECK_CUDA(cudaMalloc(&d_S, m*n*sizeof(float)));
		CHECK_CUDA(cudaMemcpy(d_S, h_S, m*n*sizeof(float), cudaMemcpyHostToDevice));
		CHECK_CUDA(cudaMalloc(&d_S_i, n*n*sizeof(float)));
		CHECK_CUDA(cudaMemcpy(d_S_i, h_S_i, n*n*sizeof(float), cudaMemcpyHostToDevice));
		CHECK_CUDA(cudaMalloc(&d_MM_buffer, r*r*n*sizeof(float)));
		update_B_kernel_new << <n, 1024 >> >(d_D, d_D0, d_S, m, n, d_S_i, d_B, r, d_Y, minS, maxS, d_MM_buffer);

		cudaDeviceSynchronize();
		CHECK_CUDA(cudaGetLastError());

		CHECK_CUDA(cudaFree(d_MM_buffer));d_MM_buffer=NULL;
		CHECK_CUDA(cudaFree(d_S)); d_S = NULL;
		CHECK_CUDA(cudaFree(d_S_i)); d_S_i = NULL;

		item_finish = clock();
		printf(" item part:%.2f\n", (double)(item_finish - user_finish) / CLOCKS_PER_SEC);

		CHECK_CUDA_STATUS(cublasCreate(&cublashandle));
		update_svd(d_B, r, m, d_X, cublashandle);
		CHECK_CUDA_STATUS(cublasSscal(cublashandle, r*m, &alpha, d_X, 1));
		update_svd(d_D, r, n, d_Y, cublashandle);
		CHECK_CUDA_STATUS(cublasSscal(cublashandle, r*n, &beta, d_Y, 1));
		CHECK_CUDA_STATUS(cublasDestroy(cublashandle));

		svd_finish = clock();
		printf("    update svd part:%.2f one iter:%.2f\n",
			(double)(svd_finish - item_finish) / CLOCKS_PER_SEC,
			(double)(svd_finish - start) / CLOCKS_PER_SEC);
		if (it > 50){
			converge = true;
			printf("total iters:%d cost time:%.2f\n", it,
				(double)(svd_finish - init) / CLOCKS_PER_SEC);
		}
		else{
			CHECK_CUDA_STATUS(cublasCreate(&cublashandle));
			bool condition1 = does_matrix1_equals_matrix2(d_B0, d_B, r, m, cublashandle);
			if (condition1){
				bool condition2 = does_matrix1_equals_matrix2(d_D0, d_D, r, n, cublashandle);
				if (condition2){
					converge = true;
					printf("total iters:%d cost time:%.2f\n", it,
						(double)(svd_finish - init) / CLOCKS_PER_SEC);

				}
			}
			CHECK_CUDA_STATUS(cublasDestroy(cublashandle));
		}

		it++;
		//system("pause");
	}// end of while

	CHECK_CUDA(cudaFree(d_X)); d_X = NULL;
	CHECK_CUDA(cudaFree(d_Y)); d_Y = NULL;
	CHECK_CUDA(cudaFree(d_B0)); d_B0 = NULL;
	CHECK_CUDA(cudaFree(d_D0)); d_D0 = NULL;
	if(d_MM_buffer)	CHECK_CUDA(cudaFree(d_MM_buffer)); d_MM_buffer = NULL;

	make_zeroone_code(d_B, r, m);
	make_zeroone_code(d_D, r, n);

	CHECK_CUDA(cudaMemcpy(B, d_B, r*m*sizeof(float), cudaMemcpyDeviceToHost));
	CHECK_CUDA(cudaMemcpy(D, d_D, r*n*sizeof(float), cudaMemcpyDeviceToHost));

	CHECK_CUDA(cudaFree(d_B)); d_B = NULL;
	CHECK_CUDA(cudaFree(d_D)); d_D = NULL;

}
void write_test_data(const float *UIview_test,
	const int m,
	const int n,
	const char * filename){
	FILE *f;
	f = fopen(filename, "w");
	for (int i = 0; i < m; i++){
		for (int j = 0; j < n; j++){
			if (UIview_test[i+j*m]>0)
				fprintf(f,"%d %d %.1f\n",i,j, UIview_test[i+j*m]);
		}
	}
	fclose(f);
}
void write_into_file(float *B, int r, int n, const char *filename){
	FILE *f;
	f = fopen(filename, "w");
	std::stringstream ss;
	for (int i = 0; i < n; i++){
		ss.str("");
		for (int j = 0; j < r; j++){
			ss << (int)B[j + r*i];
		}
		ss << "\n";
		fprintf(f, "%s", ss.str().c_str());
	}
	fclose(f);
}
