#include "head.cuh"

#include <algorithm>
#include <vector>
#include <iomanip>
#include <fstream>

void read_UIview_and_IUview_data(float *UIview, const int m, const int n, float *IUview,
	const char *filename){
	FILE *f;
	f = fopen(filename, "r");
	if (f == NULL){
		printf("read %s failed!\n", filename);
		system("pause");
		exit(1);
	}
	for (int i = 0; i < m*n; i++) UIview[i] = 0.0f;
	for (int i = 0; i < m*n; i++) IUview[i] = 0.0f;

	int row = 0;
	int col = 0;
	float val = 0.0;


	fseek(f, 0, SEEK_SET);
	while (EOF != fscanf(f, "%u%u%f\n", &row, &col, &val)){
		UIview[row + col*m] = val;
		IUview[col + n*row] = val;
	}

	fclose(f);
}

void write_UIview_data(const float *UIview_test,
	const int m,
	const int n,
	const char * filename){
	FILE *f;
	f = fopen(filename, "w");
	for (int i = 0; i < m; i++){
		for (int j = 0; j < n; j++){
			if (UIview_test[i + j*m]>0)
				fprintf(f, "%d %d %.1f\n", i, j, UIview_test[i + j*m]);
		}
	}
	fclose(f);
}

void read_Z_from_file(float *Z, const int d, const int n, const char *filename){
	std::ifstream fin;
	fin.open(filename);

	if (!fin.is_open()){
		printf("read %s failed!\n", filename);
		system("pause");
		exit(1);
	}
	else{
		int row = 0;
		int col = 0;
		while (!fin.eof()){
			if (col == d){ col = 0; row++; }
			fin >> Z[row + n*col];
			col++;
		}
	}
	fin.close();
}

void write_Z_into_file(float *Z, int d, int n, const char *filename){
	FILE *f;
	fopen_s(&f, filename, "w");
	std::stringstream ss;
	for (int i = 0; i < n; i++){
		ss.str("");
		int j = 0;
		for (; j < d - 1; j++){
			ss << Z[i + j*n] << "\t";
		}
		ss << Z[i + j*n] << "\n";
		fprintf(f, "%s", ss.str().c_str());
	}
	fclose(f);
}

void params_to_string(Params &params, std::string &s){
	std::stringstream ss;
	ss << params.num_of_anchors << "_" << params.num_of_nearest_anchors;
	s = ss.str();
	ss.str("");
}


bool read_data_2_matrix(float *UIview, float *UIview_test, float *UUview, 
	const float split_rate, float *IUview){
	FILE *fh1;
	fh1 = fopen(UIviewFileName, "r");
	if (fh1 == NULL){
		return false;
	}
	for (int i = 0; i < nUsers*nItems; ++i){
		UIview[i] = 0.0; 
		IUview[i] = 0.0; 
	}
	for (int i = 0; i < nUsers*nItems; ++i){ 
		UIview_test[i] = 0.0; 
	}
	for (int i = 0; i < nUsers*nUsers; ++i){ 
		UUview[i] = 0.0; 
	}
	int row = 0;
	int col = 0;
	float val = 0.0;


	fseek(fh1, 0, SEEK_SET);
	std::vector<std::pair<int, float>> tv;
	int start_index = 0;
	while (EOF != fscanf(fh1, "%u%u%f\n", &row, &col, &val)){
		if (start_index == row){
			tv.push_back(std::make_pair(col, val));
		}
		else{
			std::random_shuffle(tv.begin(), tv.end());
			int train_part = (int)(tv.size()*split_rate);
			int i = 0;
			for (; i < train_part; i++){
				UIview[IDX2C(start_index, tv[i].first, nUsers)] = tv[i].second;
				IUview[IDX2C(tv[i].first, start_index, nItems)] = tv[i].second;
			}
			for (; i < tv.size(); i++){
				UIview_test[IDX2C(start_index, tv[i].first, nUsers)] = tv[i].second;
			}
			start_index = row;
			tv.clear();
			tv.push_back(std::make_pair(col, val));
		}

	}
	std::random_shuffle(tv.begin(), tv.end());
	int train_part = (int)(tv.size()*split_rate);
	int i = 0;
	for (; i < train_part; i++){
		UIview[IDX2C(row, tv[i].first, nUsers)] = tv[i].second;
		IUview[IDX2C(tv[i].first, row, nItems)] = tv[i].second;
	}
	for (; i < tv.size(); i++){
		UIview_test[IDX2C(row, tv[i].first, nUsers)] = tv[i].second;
	}
	tv.erase(tv.begin(), tv.end());
	fclose(fh1);

	FILE *fh2;
	fh2 = fopen(UUviewFileName, "r");
	if (fh2 == NULL){
		return false;
	}
	fseek(fh2, 0, SEEK_SET);
	//UUview need to be symmetric and it's diagnal elements are all 1s
	while (2 == fscanf(fh2, "%u%u\n", &row, &col)){
		UUview[IDX2C(row, col, nUsers)] = 1.0;
		UUview[IDX2C(col, row, nUsers)] = 1.0;
	}
	for (int i = 0; i < nUsers; i++){
		UUview[IDX2C(i, i, nUsers)] = 1.0;
	}
	fclose(fh2);

	return true;
}

bool get_anchors_by_Kmeans(
	float *devData, float *devCluster,
	int d, int n,
	int num_of_kmeans, int num_of_anchors)
{
	// devData and devCluster should be nXd
	// num_of_anchors: num of cluster centers
	kMeans(devData, d, n, num_of_anchors, num_of_kmeans, devCluster);

	CHECK_CUDA(cudaDeviceSynchronize());
	CHECK_CUDA(cudaGetLastError());
	return true;
}

__global__ static void get_square_distance_kernel(
	const float *data, const float *anchors, float *Distance,
	const int ndata, const int nanchors, const int nDim,
	const int rowPerThread, const int colPerThread){
	for (int rIdx = 0; rIdx < rowPerThread; ++rIdx)
	{
		int r = rIdx * gridDim.x * blockDim.x + blockIdx.x * blockDim.x + threadIdx.x;
		if (r >= ndata)
			break;

		for (int cIdx = 0; cIdx < colPerThread; ++cIdx)
		{
			int c = cIdx * gridDim.y * blockDim.y + blockIdx.y * blockDim.y + threadIdx.y;
			if (c >= nanchors)
				break;
			float square_dist = 0;
			float t;
			for (int i = 0; i < nDim; i++){
				t = (data[i*ndata + r] - anchors[i*nanchors + c]);
				square_dist += t * t;
			}
			Distance[c*ndata + r] = square_dist;
		}
	}
}

void get_square_distance(const float *d_data, const float *d_anchors, 
	float *d_Distance, /* out */
	const int ndata,
	const int nanchors,
	const int nDim){
	//Distance[i][j] = square_dist_of(data[i], anchors[j])
	dim3 szGrid, szBlock;
	int rowPerThread, colPerThread;//num of rows/cols processed per thread
	get_kernel_config(ndata, nanchors, szGrid, szBlock, rowPerThread, colPerThread);
	get_square_distance_kernel << <szGrid, szBlock >> >(d_data, d_anchors, d_Distance,
		ndata, nanchors, nDim, rowPerThread, colPerThread);

	cudaDeviceSynchronize();
	CHECK_CUDA(cudaGetLastError());

}
struct Ele{
	float val;
	int pos;
	bool operator < (const Ele& rt)
	const {
		return this->val < rt.val;
	}
};

void get_pos_and_val(const float *d_view, const float * d_anchors,
	int *pos,
	//float *val,
	const int nanchors,
	const int nObjs,
	const int nDim,
	const int num_of_nearest_anchors){

	float *Distance = (float*)malloc(nObjs*nanchors*sizeof(float));
	CHECK_PARAM(Distance != NULL, "allocate space for Distance failed.\n");
	for (int i = 0; i < nObjs*nanchors; i++) Distance[i] = 0.0f;
	
	float *d_Distance;
	CHECK_CUDA(cudaMalloc(&d_Distance, nObjs*nanchors*sizeof(float)));
	get_square_distance(d_view, d_anchors, d_Distance, nObjs, nanchors, nDim);
	CHECK_CUDA(cudaMemcpy(Distance, d_Distance, nObjs*nanchors*sizeof(float),
		cudaMemcpyDeviceToHost));
	CHECK_CUDA(cudaFree(d_Distance));

	Ele *tmp;
	tmp = (Ele*)malloc(nanchors*sizeof(Ele));
	for (int i = 0; i < nObjs; i++){
		for (int j = 0; j < nanchors; j++){
			tmp[j].val = Distance[i + j*nObjs];
			tmp[j].pos = j;
		}
		std::sort(tmp, tmp + nanchors);
		for (int j = 0; j < num_of_nearest_anchors; j++){
			pos[i + j*nObjs] = tmp[j].pos;
		}
	}
	free(tmp);
	free(Distance);
}
void get_iZ_by_LAE(const float *IUview, const float *d_IUview,
	const float* anchors_of_iuview, const float *d_anchors_of_iuview,
	const int nusers,const int nitems,const int nanchors,
	float *iZ,
	const int num_of_nearest_anchors,
	const int num_of_LAE_iters,
	cublasHandle_t cublashandle){

	int *pos;
	pos = (int *)malloc(nitems*num_of_nearest_anchors*sizeof(int));
	for (int i = 0; i < nitems*num_of_nearest_anchors; i++) pos[i] = 0;
	get_pos_and_val(d_IUview, d_anchors_of_iuview, pos, nanchors, nitems, nusers, num_of_nearest_anchors);

	float *val;
	val = (float*)malloc(nitems*2*num_of_nearest_anchors*sizeof(float));
	CHECK_PARAM(val != NULL, "allocate space for val failed.");
	const float *data;
	data = IUview;
	const float *anchors;
	anchors = anchors_of_iuview;
	int nDim = nusers;
	int nS = num_of_nearest_anchors;
	int nAnchors = nanchors;

	// now we have pos, val, daa, anchors which are all allocated well.
	// data: nitems X nDim float
	// pos: nitems X nS int 
	// val: nitems X nS float
	// anchors: nAnchors X nDim float
	clock_t start = clock();
	float *x = (float*)malloc(nDim*sizeof(float));
	float *U = (float*)malloc(nS*nDim*sizeof(float));
	float *tmp = (float *)malloc(nS*sizeof(float));
	float *val_one_line = (float*)malloc(nS*sizeof(float));
	for (int i = 0; i < nitems; i++){
		float norm_x_2 = 0.0f;
		for (int j = 0; j < nDim; j++){
			x[j] = data[i + nitems*j];
			norm_x_2 += x[j] * x[j];
		}
		float norm_x = sqrtf(norm_x_2);
		for (int j = 0; j < nDim; j++){
			x[j] = x[j] / norm_x;
		}
		for (int j = 0; j < nS; j++) tmp[j] = 0.0f;

		int anchor_idx;
		for (int k = 0; k < nS; k++){
			anchor_idx = pos[i + nitems*k];
			for (int j = 0; j < nDim; j++){
				U[k + nS*j] = anchors[anchor_idx + nAnchors*j];
				tmp[k] += anchors[anchor_idx + nAnchors*j] * anchors[anchor_idx + nAnchors*j];
			}
		}
		for (int j = 0; j < nS; j++){
			tmp[j] = 1.0 / sqrtf(tmp[j]);
		}
		for (int k = 0; k < nS; k++){
			for (int j = 0; j < nDim; j++){
				U[k + nS*j] = U[k + nS*j] * tmp[k];
			}
		}

		// now we have U, x and num_of_LAE_iters
		// remember that U is nS X nDim not dXs
		LAE_process(x, nDim, U, nS, val_one_line, num_of_LAE_iters,
			cublashandle);

		for (int j = 0; j < nS; j++){
			val[i + nitems*j] = val_one_line[j];
		}
		//printf("Items: %d\n", i);
	}
	clock_t finish = clock();
	printf("lae of iZ cost time:%.2f\n", 
		(double)(finish - start) / CLOCKS_PER_SEC);
	// get iZ by pos and val
	// uZ: nitems X nAnchors
	// pos: nitems X nS
	// val: nitems X nS
	for (int i = 0; i < nitems; i++){
		for (int j = 0; j < nanchors; j++)
			iZ[i + nitems*j] = 0.0;
		for (int j = 0; j < nS; j++){
			iZ[i + nitems*(pos[i + j*nitems])] = val[i + j*nitems];
		}
	}

	free(pos);
	free(val);
	free(tmp);
	free(x);
	free(U);
	free(val_one_line);

}
void get_uZ_by_LAE(const float* UIview, const float* UUview,
	const float *d_UIview, const float *d_UUview,
	const float* anchors_of_uiview, const float* anchors_of_uuview,
	const float *d_anchors_of_uiview, const float *d_anchors_of_uuview,
	const int nusers, const int nitems, const int nanchors,
	float* uZ, /* out */
	const int num_of_nearest_anchors,
	const int num_of_LAE_iters,
	cublasHandle_t cublashandle){

	int *pos_of_uiview;
	pos_of_uiview = (int*)malloc(nusers*num_of_nearest_anchors*sizeof(int));
	CHECK_PARAM(pos_of_uiview != NULL, "allocate pos_of_uiview space failed.\n");
	for (int i = 0; i < nusers*num_of_nearest_anchors; i++) pos_of_uiview[i] = 0;

	get_pos_and_val(d_UIview, d_anchors_of_uiview,
		pos_of_uiview,
		nanchors, nusers, nitems, num_of_nearest_anchors);

	int *pos_of_uuview;
	pos_of_uuview = (int*)malloc(nusers*num_of_nearest_anchors*sizeof(int));
	CHECK_PARAM(pos_of_uuview != NULL, "allocate pos_of_uuview space failed.\n");
	for (int i = 0; i < nusers*num_of_nearest_anchors; i++) pos_of_uuview[i] = 0;

	get_pos_and_val(d_UUview, d_anchors_of_uuview,
		pos_of_uuview,
		nanchors, nusers, nusers, num_of_nearest_anchors);

	float *val = (float*)malloc(nusers*(2 * num_of_nearest_anchors)*sizeof(float));
	CHECK_PARAM(val != NULL, "in uZ: allocate space for val failed.\n");

	int nDim = nitems + nusers;
	int nS = 2 * num_of_nearest_anchors;

	clock_t start = clock();
	float *x = (float*)malloc(nDim*sizeof(float));
	float *U = (float*)malloc(nS*nDim*sizeof(float));
	float *tmp = (float *)malloc(nS*sizeof(float));
	float *val_one_line = (float*)malloc(nS*sizeof(float));
	for (int i = 0; i < nusers; i++){
		float norm_x_2 = 0.0f;
		for (int j = 0; j < nDim; j++){
			if (j < nitems) 
				x[j] = UIview[i + nusers*j];
			else 
				x[j] = UUview[i + nusers*(j - nitems)];
			norm_x_2 += x[j] * x[j];
		}
		float norm_x = sqrtf(norm_x_2);
		if (norm_x!=0)
			for (int j = 0; j < nDim; j++){
				x[j] = x[j] / norm_x;
			}

		for (int j = 0; j < nS; j++) tmp[j] = 0.0f;
		int anchor_idx;
		for (int k = 0; k < nS; k++){
			for (int j = 0; j < nDim; j++){
				if (k < nS / 2 && j < nitems){
					anchor_idx = pos_of_uiview[i + nusers*k];
					U[k + nS*j] = anchors_of_uiview[anchor_idx + nanchors*j];
					tmp[k] += U[k + nS*j] * U[k + nS*j];
				}
				else if (k >= nS / 2 && j >= nitems){
					anchor_idx = pos_of_uuview[i + nusers*(k - nS / 2)];
					U[k + nS*j] = anchors_of_uuview[anchor_idx + nanchors*(j - nitems)];
					tmp[k] += U[k + nS*j] * U[k + nS*j];
				}
				else
					U[k + nS*j] = 0.0;
			}
		}
		for (int j = 0; j < nS; j++){
			if (tmp[j]>0)
				tmp[j] = 1.0 / sqrtf(tmp[j]);
		}
		for (int k = 0; k < nS; k++){
			for (int j = 0; j < nDim; j++){
				U[k + nS*j] = U[k + nS*j] * tmp[k];
			}
		}

		// now we have U, x and num_of_LAE_iters
		// remember that U is nS X nDim
		LAE_process(x, nDim, U, nS, val_one_line, num_of_LAE_iters,
			cublashandle);

		for (int j = 0; j < nS; j++){
			val[i + nusers*j] = val_one_line[j];
		}
		//printf("Users: %d\n",i);
	}

	clock_t finish = clock();
	printf("lae of uZ cost time:%.2f\n",
		(double)(finish - start) / CLOCKS_PER_SEC);

	for (int i = 0; i < nusers; i++){
		for (int j = 0; j < 2 * nanchors; j++)
			uZ[i + nusers*j] = 0.0;
		for (int j = 0; j < nS; j++){
			if (j<nS/2)
				uZ[i + nusers*(pos_of_uiview[i + j*nusers])] = val[i + j*nusers];
			else
				uZ[i + nusers*(nanchors+pos_of_uuview[i + (j-nS/2)*nusers])] = val[i + j*nusers];
		}
	}


	free(pos_of_uiview);
	free(pos_of_uuview);
	free(val);
	free(tmp);
	free(x);
	free(U);
	free(val_one_line);
	
}

__global__ void block_sum(float *input, const int nObjs)
{
	extern __shared__ float sdata[]; //size: blockDim.x
	int tid = threadIdx.x;
	int i = blockIdx.x * blockDim.x + tid;
	sdata[tid] = (i < nObjs) ? input[i] : 0;
	__syncthreads();


	for (int offset = blockDim.x / 2;
		offset > 0;
		offset >>= 1)
	{
		if (tid < offset)
		{
			if (tid + offset + blockDim.x * blockIdx.x < nObjs)
				sdata[threadIdx.x] += sdata[threadIdx.x + offset];
		}

		__syncthreads();
	}

	if (threadIdx.x == 0)
	{
		input[blockIdx.x] = sdata[0];
	}
}

__global__ void get_tmp_kernel(float *tmp, const float *Z, /* both are nObjs X nDim*/
	const float *lambda, /* nDim */
	const int nObjs,
	const int nDim,
	const int OPS){
	int row = threadIdx.y + blockDim.y * blockIdx.y;
	int col = threadIdx.x + blockDim.x * blockIdx.x;
	if (row < nObjs&&col < nDim){
		if (OPS == M_TIMES_OPS)
			tmp[row + col*nObjs] = Z[row + col*nObjs] * lambda[col];
		if (OPS == M_MINUS_OPS)
			tmp[row + col*nObjs] = Z[row + col*nObjs] - lambda[col];
		if (OPS == M_ADD_OPS)
			tmp[row + col*nObjs] = Z[row + col*nObjs] + lambda[col];
		if (OPS == M_DIVIDE_OPS)
			tmp[row + col*nObjs] = Z[row + col*nObjs] / lambda[col];
	}
}


__global__ void set_V_D(float *d_V, float *d_D, const float *d_M, const float *d_Eigvals, const int r, const int d){
	int tid = threadIdx.x;/* [0,r) */
	for (int i = 0; i < d; i++){
		d_V[i + tid*d] = d_M[i + (d - tid - 2)*d];
	}
	d_D[tid] = d_Eigvals[d - tid - 2];
}

void get_M_from_Z(float *d_M,/*out d X d */
	float *d_lambda,
	const float *d_Z, const int n, const int d,
	cublasHandle_t cublashandle){

	float *lambda = (float *)malloc(d*sizeof(float));
	for (int i = 0; i < d; i++){
		float tmp;
		CHECK_CUDA_STATUS(cublasSasum(cublashandle, n, d_Z + i*n, 1, &tmp));
		if (tmp>0) 
			lambda[i] = sqrtf(1.0 / tmp);
		else
			lambda[i] = tmp;
	}
	CHECK_CUDA(cudaMemcpy(d_lambda, lambda, d*sizeof(float), cudaMemcpyHostToDevice));
	free(lambda);

	float *d_temp;
	CHECK_CUDA(cudaMalloc(&d_temp, n*d*sizeof(float)));
	CHECK_CUDA_STATUS(cublasSdgmm(cublashandle, CUBLAS_SIDE_RIGHT, n, d, d_Z, n, d_lambda, 1, d_temp, n));
	float *d_temp2;
	CHECK_CUDA(cudaMalloc(&d_temp2, d*d*sizeof(float)));
	float alpha(1.0f);
	float beta(0.0f);
	CHECK_CUDA_STATUS(cublasSgemm(cublashandle, CUBLAS_OP_T, CUBLAS_OP_N, d, d, n, &alpha, d_temp, n, d_temp, n, &beta, d_temp2, d));
	CHECK_CUDA(cudaFree(d_temp));
	alpha = 0.5f;
	beta = 0.5f;
	CHECK_CUDA_STATUS(cublasSgeam(cublashandle, CUBLAS_OP_N, CUBLAS_OP_T, d, d, &alpha, d_temp2, d, &beta, d_temp2, d, d_M, d));
	CHECK_CUDA(cudaFree(d_temp2));

}

__global__ void get_temp_V_kernel(float *d_temp, const float *d_V, const float *d_D, const int d, const int r){
	int row = threadIdx.x + blockDim.x*blockIdx.x;
	int col = threadIdx.y + blockDim.y*blockIdx.y;
	if (row < d&&col < r){
		d_temp[row + col*d] = d_V[row + col*d] / sqrtf(d_D[col]);
	}
}
__global__ void make_sign_2(float *d_B, const int r, const int n){
	int row = threadIdx.x + blockDim.x*blockIdx.x;
	int col = threadIdx.y + blockDim.y*blockIdx.y;
	if (row < r&&col < n){
		if (d_B[row + col*r] <= 0)
			d_B[row + col*r] = 0.0f;
		else
			d_B[row + col*r] = 1.0f;
	}
}
void get_BinaryCode(float *HB, float *d_M, float *d_Eigvals, float *d_lambda, float *d_Z, const int r, const int d, const int n){

	float *d_V, *d_D;
	CHECK_CUDA(cudaMalloc(&d_V, d*r*sizeof(float)));
	CHECK_CUDA(cudaMalloc(&d_D, r*sizeof(float)));

	set_V_D << <1, r >> >(d_V, d_D, d_M, d_Eigvals, r, d);
	float *d_temp;
	CHECK_CUDA(cudaMalloc(&d_temp, d*r*sizeof(float)));
	
	dim3 szBlock(128, 8, 1);
	dim3 szGrid((d + szBlock.x - 1) / szBlock.x, (r + szBlock.y - 1) / szBlock.y, 1);
	get_temp_V_kernel << <szGrid, szBlock >> >(d_temp, d_V, d_D, d, r);
	CHECK_CUDA(cudaFree(d_V));
	CHECK_CUDA(cudaFree(d_D));


	float *d_W;
	CHECK_CUDA(cudaMalloc(&d_W, d*r*sizeof(float)));
	cublasHandle_t cublashandle;
	CHECK_CUDA_STATUS(cublasCreate(&cublashandle));
	CHECK_CUDA_STATUS(cublasSdgmm(cublashandle, CUBLAS_SIDE_LEFT, d, r, d_temp, d, d_lambda, 1, d_W, d));

	float *d_B;
	CHECK_CUDA(cudaMalloc(&d_B, r*n*sizeof(float)));
	float alpha(1.0f);
	float beta(0.0f);
	CHECK_CUDA_STATUS(cublasSgemm(cublashandle, CUBLAS_OP_T, CUBLAS_OP_T, r, n, d, &alpha, d_W, d, d_Z, n, &beta, d_B, r));
	CHECK_CUDA_STATUS(cublasDestroy(cublashandle));
	CHECK_CUDA(cudaFree(d_W));
	CHECK_CUDA(cudaFree(d_temp));

	szBlock.x = 8;
	szBlock.y = 128;
	szGrid.x = (r + szBlock.x - 1) / szBlock.x;
	szGrid.y = (n + szBlock.y - 1) / szBlock.y;
	make_sign_2 << <szGrid, szBlock >> >(d_B, r, n);
	CHECK_CUDA(cudaMemcpy(HB, d_B, r*n*sizeof(float), cudaMemcpyDeviceToHost));
	CHECK_CUDA(cudaFree(d_B));
}

__global__ void sign_kernel(float *d_matrix, const int d1, const int d2){
	int row = threadIdx.y + blockDim.y * blockIdx.y;
	int col = threadIdx.x + blockDim.x * blockIdx.x;
	if (row < d1&&col < d2){
		if (d_matrix[row + col*d1] >= 0)
			d_matrix[row + col*d1] = 1.0f;
		else
			d_matrix[row + col*d1] = -1.0f;
	}
}

void make_sign(float *d_Matrix, const int d1, const int d2){

	dim3 szBlock(1024/d1, d1, 1); //total num of threads must less than 1024
	dim3 szGrid((d2 + szBlock.x - 1) / szBlock.x, 1, 1);
	sign_kernel << <szGrid, szBlock >> >(d_Matrix, d1, d2);
	cudaDeviceSynchronize();
	CHECK_CUDA(cudaGetLastError());
}

__global__ void get_mean(const float *d_WT, /* n X b */
	const int b, const int n, float *d_mean){
	int i = threadIdx.x;
	cublasHandle_t cublashandle;
	cublasCreate(&cublashandle);
	cublasSasum(cublashandle, n, d_WT + i*n, 1, &d_mean[i]);
	d_mean[i] /= n;
	cublasDestroy(cublashandle);
}

void solve_eig(float *d_A, /*in as A;
						  out as eigenvectors*/
	float *d_W, int A_hight, int A_width){
	cusolverDnHandle_t handle = NULL;
	int*devInfo = NULL;
	float *d_work = NULL;
	int lwork = 0;
	int info_gpu = 0;
	CHECK_CUSOLVER(cusolverDnCreate(&handle));
	CHECK_CUDA(cudaMalloc(&devInfo,sizeof(int)));
	cusolverEigMode_t jobz = CUSOLVER_EIG_MODE_VECTOR;
	cublasFillMode_t uplo = CUBLAS_FILL_MODE_LOWER;
	CHECK_CUSOLVER(cusolverDnSsyevd_bufferSize(handle, jobz, uplo, 
		A_width, d_A, A_hight, d_W, &lwork));
	CHECK_CUDA(cudaMalloc(&d_work, sizeof(double)*lwork));

	CHECK_CUSOLVER(cusolverDnSsyevd(handle, jobz, uplo,
		A_width, d_A, A_hight, d_W, d_work, lwork, devInfo));
	cudaDeviceSynchronize();
	CHECK_CUDA(cudaMemcpy(&info_gpu, devInfo, sizeof(int), cudaMemcpyDeviceToHost));//must be zero
	if (info_gpu!=0)
		throw std::runtime_error("solve eig error.");
	CHECK_CUDA(cudaFree(devInfo));
	CHECK_CUDA(cudaFree(d_work));
	if (handle) cusolverDnDestroy(handle);
}


__global__ void set_d_tmp(float *d_tmp, const float *d_eigvalues, const int b){
	int row = threadIdx.y + blockDim.y * blockIdx.y;
	int col = threadIdx.x + blockDim.x * blockIdx.x;
	if (row <= b&&col <= b){
		d_tmp[row + col*b] *= 1.0f / d_eigvalues[col];
	}
}

__global__ void get_nonzeros_kernel(const float *d_ST,
	const int n, const int i, 
	float *d_idx_of_nonzeros/*out nX1*/){
	int tid = threadIdx.x + blockIdx.x*blockDim.x;
	if (tid < n){
		if (d_ST[tid + n*i] > 1e-5){
			d_idx_of_nonzeros[tid] = 1.0f;
		}
		else{
			d_idx_of_nonzeros[tid] = 0.0f;
		}
	}
}


__global__ void make_zeroone_kernel(float *d_matrix,
	const int r,
	const int m){
	int row = threadIdx.y + blockDim.y * blockIdx.y;
	int col = threadIdx.x + blockDim.x * blockIdx.x;
	if (row < r&&col < m){
		if (d_matrix[row + col*r] < 0)
			d_matrix[row + col*r] = 0.0f;
	}
}

void make_zeroone_code(float *d_B, const int r, const int m){
	dim3 szBlock(1024/r, r, 1);// because the limitation of device
							// szBlock.x*szBlock.y*szBlock.y <= 1024
	dim3 szGrid(((m - 1 + szBlock.x) / szBlock.x), 1, 1);
	make_zeroone_kernel << <szGrid, szBlock >> >(d_B, r, m);
	cudaDeviceSynchronize();
	CHECK_CUDA(cudaGetLastError());
}

void write_test_data(const float *UIview_test,
	const int m,
	const int n,
	const char * filename){
	FILE *f;
	f = fopen(filename, "w");
	for (int i = 0; i < m; i++){
		for (int j = 0; j < n; j++){
			if (UIview_test[i+j*m]>0)
				fprintf(f,"%d %d %.1f\n",i,j, UIview_test[i+j*m]);
		}
	}
	fclose(f);
}
void write_into_file(float *B, const int r, const int n, const char *filename){
	FILE *f;
	f = fopen(filename, "w");
	std::stringstream ss;
	for (int i = 0; i < n; i++){
		ss.str("");
		for (int j = 0; j < r; j++){
			ss << (int)B[j + r*i];
		}
		ss << "\n";
		fprintf(f, "%s", ss.str().c_str());
	}
	fclose(f);
}
