#include "head.cuh"
#include "kernels_head.cuh"

void kMeans(float *d_anchors, /* out: d X num_of_clusters */
	const int num_of_clusters,
	const CsrMatOnDevice &d_csrNDview, /* n X d sparse matrix */
	const int num_of_keans_iters/* maximum number of kmeans loop */){
	int n = d_csrNDview.row_di;
	int d = d_csrNDview.col_di;
	int loop = 0;
	int *d_membership; // d_membership[i] is the cluster_id to which objest_i is related.
	CHECK_CUDA(cudaMalloc(&d_membership, n*sizeof(int)));

	int numThreadsPerBlock = 128;
	int numBlocksPerGrid = (num_of_clusters + numThreadsPerBlock - 1) / numThreadsPerBlock;
	init_cluster_centers << <numBlocksPerGrid, numThreadsPerBlock >> >(d_anchors, num_of_clusters, d_csrNDview.row_ptr, d_csrNDview.cols_ind, d_csrNDview.vals, n, d);
	CHECK_CUDA(cudaDeviceSynchronize());
	CHECK_CUDA(cudaGetLastError());
	
	dim3 szGrid, szBlock;
	szBlock.z = szGrid.z = 1;

	float *d_euclid_squared_dist_2;
	CHECK_CUDA(cudaMalloc(&d_euclid_squared_dist_2, num_of_clusters*n*sizeof(float)));

	numThreadsPerBlock = 512;
	numBlocksPerGrid = (n + numThreadsPerBlock - 1) / numThreadsPerBlock;
	do{
		szBlock.y = 8; szBlock.x = 64;//=1024/8 or 512/8
		szGrid.x = (n + szBlock.x - 1) / szBlock.x;
		szGrid.y = (num_of_clusters + szBlock.y - 1) / szBlock.y;
		get_euclid_square_dist_kernel << <szGrid, szBlock >> >(d_euclid_squared_dist_2, num_of_clusters, n, d_anchors, d_csrNDview.vals, d_csrNDview.row_ptr, d_csrNDview.cols_ind, d);
		//CHECK_CUDA(cudaDeviceSynchronize());
		//CHECK_CUDA(cudaGetLastError());

		find_nearest_cluster << <numBlocksPerGrid, numThreadsPerBlock >> >(d_euclid_squared_dist_2, d_membership, n, num_of_clusters);
		//CHECK_CUDA(cudaDeviceSynchronize());
		//CHECK_CUDA(cudaGetLastError());

		szBlock.x = 8; szBlock.y = 64;//=1024/8 or 512/8
		szGrid.y = (d + szBlock.y - 1) / szBlock.y;
		szGrid.x = (num_of_clusters + szBlock.x - 1) / szBlock.x;
		update_cluster << < szGrid, szBlock >> > (d_anchors,d_csrNDview.row_ptr, d_csrNDview.cols_ind, d_csrNDview.vals,n,num_of_clusters,d,d_membership);
		//cudaDeviceSynchronize();
		//CHECK_CUDA(cudaGetLastError());
	} while (loop++ < num_of_keans_iters);

	CHECK_CUDA(cudaDeviceSynchronize());
	CHECK_CUDA(cudaGetLastError());
	CHECK_CUDA(cudaFree(d_euclid_squared_dist_2));
	CHECK_CUDA(cudaFree(d_membership));
}