//#include <stdio.h>

__global__ void cudaComputeKernel(double* c_tp1, double* c_tp2, double* c_tp3, double* c_w1, double* c_w2, double* c_w3, int row, int* c_bc, int* c_bg,
 double* c_wMatrix, int n, int width, int d, int NUM_THREADS)
{
//__shared__ double c_pt1[1500];

	if((blockIdx.x * NUM_THREADS+threadIdx.x)>=n)	
		return ;
//c_tp1[blockIdx.x * NUM_THREADS + threadIdx.x] = blockIdx.x;
//c_tp2[blockIdx.x * NUM_THREADS + threadIdx.x] = threadIdx.x ;
c_tp1[blockIdx.x * NUM_THREADS + threadIdx.x] = 0;
c_tp2[blockIdx.x * NUM_THREADS + threadIdx.x] = 0;
c_tp3[blockIdx.x * NUM_THREADS + threadIdx.x] = 0;
	double mult1 = 1.0, mult2 = 1.0, mult3 = 1.0;

	if (c_bc[ threadIdx.x*width + row ])
	{	
//c_tp1[blockIdx.x * NUM_THREADS + threadIdx.x] = blockIdx.x;
//c_tp2[blockIdx.x * NUM_THREADS + threadIdx.x] = threadIdx.x ;
//		c_tp1[blockIdx.x * NUM_THREADS + threadIdx.x] = 900;
//		c_tp2[blockIdx.x * NUM_THREADS + threadIdx.x] = 1000;

		for (int j=0; j<d  ;  j++){ // for each attribute
				double tempVal1, nextMult1, tempVal2, nextMult2, tempVal3, nextMult3;
				if (c_bg[threadIdx.x*d + j] == row){ // if the component is dominant
					tempVal1 = (c_w1[j] / (c_w1[j] + c_wMatrix[threadIdx.x*d + j])) ;
					tempVal2 = (c_w2[j] / (c_w2[j] + c_wMatrix[threadIdx.x*d + j])) ;
					tempVal3 = (c_w3[j] / (c_w3[j] + c_wMatrix[threadIdx.x*d + j])) ;
//c_tp1[blockIdx.x * NUM_THREADS + threadIdx.x] = c_w1[j];
//c_tp1[blockIdx.x * NUM_THREADS + threadIdx.x] = (int) c_wMatrix[threadIdx.x*d+j];
//c_tp3[blockIdx.x * NUM_THREADS + threadIdx.x] = c_w3[j];
				} else { // if one of the other active components is dominant
					tempVal1 = (c_wMatrix[threadIdx.x*d + j] / (c_w1[j] + c_wMatrix[threadIdx.x*d + j])) ;
					tempVal2 = (c_wMatrix[threadIdx.x*d + j] / (c_w2[j] + c_wMatrix[threadIdx.x*d + j])) ;
					tempVal3 = (c_wMatrix[threadIdx.x*d + j] / (c_w3[j] + c_wMatrix[threadIdx.x*d + j])) ;
				}
				nextMult1 = mult1 * tempVal1; 
                                nextMult2 = mult2 * tempVal2;
				nextMult3 = mult3 * tempVal3;
				if (nextMult1 > 1e-300)
                                  mult1 = nextMult1;
				else {
					c_tp1[blockIdx.x * NUM_THREADS + threadIdx.x] += log(1e-300);
					mult1 = nextMult1*1e300;
				}
				if (nextMult2 > 1e-300)
					mult2 = nextMult2;
				else {
					c_tp2[blockIdx.x * NUM_THREADS + threadIdx.x] += log(1e-300);
					mult2 = nextMult2*1e300;
				}
				if (nextMult3 > 1e-300)
					mult3 = nextMult3;
				else {
					c_tp3[blockIdx.x * NUM_THREADS + threadIdx.x] += log(1e-300);
					mult3 = nextMult3*1e300;
				}
		}
	}
	//__syncthreads();
	c_tp1[blockIdx.x * NUM_THREADS + threadIdx.x] +=log(mult1);
	c_tp2[blockIdx.x * NUM_THREADS + threadIdx.x] +=log(mult2);
	c_tp3[blockIdx.x * NUM_THREADS + threadIdx.x] +=log(mult3);
//__syncthreads();
//c_tp1[0]+=c_tp1[blockIdx.x * NUM_THREADS + threadIdx.x];
//c_tp2[0]+=c_tp2[blockIdx.x * NUM_THREADS + threadIdx.x];
//c_tp3[0]+=c_tp3[blockIdx.x * NUM_THREADS + threadIdx.x];


}
