
extern "C" __global__ void Vector_Add(const float *A, const float *B, float *C, int N)
{
	int i = blockDim.x * blockIdx.x + threadIdx.x;
	if(i < N)
		C[i] = A[i] + B[i];
}

extern "C" __global__ void PrecalcDotProds(float *kernel,
	const float *points, int point_num, int dimension_num)
{
	int x = blockDim.x * blockIdx.x + threadIdx.x;
	int y = blockDim.y * blockIdx.y + threadIdx.y;

	if(x < point_num && y <= x) {
		const float *pa = points + x * dimension_num;
		const float *pb = points + y * dimension_num;
		
		/*float result = 0, c = 0;
		for(int i = 0; i < dimension_num; ++ i) {
			float input = pa[i] * pb[i];
			float y = input - c;
			float t = result + y;
			c = (t - result) - y;
			result = t;
		}
		// precise kahan summation*/
		
		float result = 0;
		for(int i = 0; i < dimension_num; ++ i)
			result += pa[i] * pb[i];
		// fast dot product

		kernel[x + point_num * y] = result;
		//kernel[y + point_num * x] = result; // turns out to be symmetric
		// second mirror not required here, we will duplicate it in PrecalcKernel_RBF_others()
	}
}

extern "C" __global__ void PrecalcKernel_RBF_others(float *dotprods, int point_num, float twoSigmaSquared)
{
	int x = blockDim.x * blockIdx.x + threadIdx.x;
	int y = blockDim.y * blockIdx.y + threadIdx.y;

	if(x < point_num && y < x) { // not "<=" !! we don't want to access the diagonal
		float dot12 = dotprods[x + point_num * y];
		float dot11 = dotprods[x + point_num * x];
		float dot22 = dotprods[y + point_num * y];

		dotprods[y + point_num * x] = dotprods[x + point_num * y] =
			__expf(-(dot12 * -2 + dot11 + dot22) / twoSigmaSquared); // turns out to be symmetric as well
	}
}

extern "C" __global__ void PrecalcKernel_RBF_diagonal(float *dotprods, int point_num)
{
	int x = blockDim.x * blockIdx.x + threadIdx.x;

	if(x < point_num)
		dotprods[x + point_num * x] = 1; // 1 = exp(-0 / twoSigmaSquared)
}

extern "C" __global__ void Update_WeightVector(float *weights, const float *points,
	int dimension_num, int i1, int i2, float t1, float t2)
{
	int x = blockDim.x * blockIdx.x + threadIdx.x;

	if(x < dimension_num) {
		weights[x] += points[i1 * dimension_num + x] * t1 +
					  points[i2 * dimension_num + x] * t2;
	}
}

extern "C" __global__ void Update_ErrorCache(float *errors, const float *kernel_func,
	int point_num, int i1, int i2, float t1, float t2, float delta_offset)
{
	int x = blockDim.x * blockIdx.x + threadIdx.x;

	if(x < point_num) {
		float f_err = errors[x];
		f_err += kernel_func[i1 * point_num + x] * t1 +
				 kernel_func[i2 * point_num + x] * t2 - delta_offset;
		errors[x] = (x == i1 || x == i2)? 0 : f_err; // attempt to mask-out the value without branching
	}
}

extern "C" __global__ void Evaluate_NonlinearKernel(float *output, const float *kernel_func,
	const float *alphas, const int *targets, int point_num, int k)
{
	int i = blockDim.x * blockIdx.x + threadIdx.x;

	if(i < point_num) {
		output[i] = kernel_func[i + k] * alphas[i] * targets[i];
		// just multiply all of them
	}
}

template <unsigned int blockSize>
__device__ inline void Reduce_NonlinearKernel(float *output, const float *kernel_func,
	const float *alphas, const int *targets, int point_num)
{
	__shared__ float sdata[blockSize];

    unsigned int tid = threadIdx.x;
    unsigned int i = blockIdx.x * (blockDim.x * 2) + threadIdx.x;

	sdata[tid] = (i < point_num)? kernel_func[i] * alphas[i] * targets[i] : 0;
    if(i + blockSize < point_num)
        sdata[tid] += kernel_func[i + blockSize] * alphas[i + blockSize] * targets[i + blockSize];
	// perform first level of reduction,
    // reading from global memory, writing to shared memory

    __syncthreads();

    for(unsigned int s = blockDim.x / 2; s > 32; s >>= 1) {
        if(tid < s)
            sdata[tid] += sdata[tid + s];

        __syncthreads();
    }
    // do reduction in shared mem

    if(blockSize >= 64)
		sdata[tid] += sdata[tid + 32];
    if(blockSize >= 32)
		sdata[tid] += sdata[tid + 16];
    if(blockSize >= 16)
		sdata[tid] += sdata[tid +  8];
    if(blockSize >=  8)
		sdata[tid] += sdata[tid +  4];
    if(blockSize >=  4)
		sdata[tid] += sdata[tid +  2];
    if(blockSize >=  2)
		sdata[tid] += sdata[tid +  1];
	// note this will probably break if run from emulated device

    if(tid == 0)
		output[blockIdx.x] = sdata[0];
    // write result for this block to global mem 
}

extern "C" __global__ void Reduce_NonlinearKernel_1(float *output, const float *kernel_func,
	const float *alphas, const int *targets, int point_num)
{
	Reduce_NonlinearKernel<1>(output, kernel_func, alphas, targets, point_num);
}

extern "C" __global__ void Reduce_NonlinearKernel_2(float *output, const float *kernel_func,
	const float *alphas, const int *targets, int point_num)
{
	Reduce_NonlinearKernel<2>(output, kernel_func, alphas, targets, point_num);
}

extern "C" __global__ void Reduce_NonlinearKernel_4(float *output, const float *kernel_func,
	const float *alphas, const int *targets, int point_num)
{
	Reduce_NonlinearKernel<4>(output, kernel_func, alphas, targets, point_num);
}

extern "C" __global__ void Reduce_NonlinearKernel_8(float *output, const float *kernel_func,
	const float *alphas, const int *targets, int point_num)
{
	Reduce_NonlinearKernel<8>(output, kernel_func, alphas, targets, point_num);
}

extern "C" __global__ void Reduce_NonlinearKernel_16(float *output, const float *kernel_func,
	const float *alphas, const int *targets, int point_num)
{
	Reduce_NonlinearKernel<16>(output, kernel_func, alphas, targets, point_num);
}

extern "C" __global__ void Reduce_NonlinearKernel_32(float *output, const float *kernel_func,
	const float *alphas, const int *targets, int point_num)
{
	Reduce_NonlinearKernel<32>(output, kernel_func, alphas, targets, point_num);
}

extern "C" __global__ void Reduce_NonlinearKernel_64(float *output, const float *kernel_func,
	const float *alphas, const int *targets, int point_num)
{
	Reduce_NonlinearKernel<64>(output, kernel_func, alphas, targets, point_num);
}

extern "C" __global__ void Reduce_NonlinearKernel_128(float *output, const float *kernel_func,
	const float *alphas, const int *targets, int point_num)
{
	Reduce_NonlinearKernel<128>(output, kernel_func, alphas, targets, point_num);
}

extern "C" __global__ void Reduce_NonlinearKernel_512(float *output, const float *kernel_func,
	const float *alphas, const int *targets, int point_num)
{
	Reduce_NonlinearKernel<512>(output, kernel_func, alphas, targets, point_num);
}

template <unsigned int blockSize>
__device__ inline void KernelReduce_StaticUnrolling(float *result, const float *kernel, const float *alphas, const int *targets, int vectorLength)
{
	__shared__ float accumResult[blockSize];

	unsigned int x = threadIdx.x + blockIdx.x * blockSize * 2;
	unsigned int gridSize = blockSize * 2 * gridDim.x;

	float f_sum = 0;
	while(x < vectorLength) {
		f_sum += kernel[x] * alphas[x] * targets[x];
		if(x + blockSize < vectorLength)
			f_sum += kernel[x + blockSize] * alphas[x + blockSize] * targets[x + blockSize];
		x += gridSize;
	}
	accumResult[threadIdx.x] = f_sum;
	__syncthreads();
	// each thread loads two values

	if(blockSize >= 512) {
		if(threadIdx.x < 256)
			accumResult[threadIdx.x] = f_sum = f_sum + accumResult[threadIdx.x + 256];
		__syncthreads();
	}
	if(blockSize >= 256) {
		if(threadIdx.x < 128)
			accumResult[threadIdx.x] = f_sum = f_sum + accumResult[threadIdx.x + 128];
		__syncthreads();
	}
	if(blockSize >= 128) {
		if(threadIdx.x < 64)
			accumResult[threadIdx.x] = f_sum = f_sum + accumResult[threadIdx.x + 64];
		__syncthreads();
	}
	if(threadIdx.x < 32) { // last warp (doesn't need __syncthreads() anymore)
		if(blockSize >= 64)
			accumResult[threadIdx.x] = f_sum = f_sum + accumResult[threadIdx.x + 32];
		if(blockSize >= 32)
			accumResult[threadIdx.x] = f_sum = f_sum + accumResult[threadIdx.x + 16];
		if(blockSize >= 16)
			accumResult[threadIdx.x] = f_sum = f_sum + accumResult[threadIdx.x + 8];
		if(blockSize >= 8)
			accumResult[threadIdx.x] = f_sum = f_sum + accumResult[threadIdx.x + 4];
		if(blockSize >= 4)
			accumResult[threadIdx.x] = f_sum = f_sum + accumResult[threadIdx.x + 2];
		if(blockSize >= 2)
			accumResult[threadIdx.x] = f_sum = f_sum + accumResult[threadIdx.x + 1];
	}
	// reduce accumulator by tree-like reduction

    if(threadIdx.x == 0)
		result[blockIdx.x] = accumResult[0];
	// store result
}


extern "C" __global__ void Evaluate_NonlinearKernel_512(float *output, int resultOffset, const float *kernel_func,
	const float *alphas, const int *targets, int point_num, int k)
{
	KernelReduce_StaticUnrolling<512>(output + resultOffset, kernel_func + k, alphas, targets, point_num);
}

extern "C" __global__ void Evaluate_NonlinearKernel_256(float *output, int resultOffset, const float *kernel_func,
	const float *alphas, const int *targets, int point_num, int k)
{
	KernelReduce_StaticUnrolling<256>(output + resultOffset, kernel_func + k, alphas, targets, point_num);
}

extern "C" __global__ void Evaluate_NonlinearKernel_128(float *output, int resultOffset, const float *kernel_func,
	const float *alphas, const int *targets, int point_num, int k)
{
	KernelReduce_StaticUnrolling<128>(output + resultOffset, kernel_func + k, alphas, targets, point_num);
}

extern "C" __global__ void Evaluate_NonlinearKernel_64(float *output, int resultOffset, const float *kernel_func,
	const float *alphas, const int *targets, int point_num, int k)
{
	KernelReduce_StaticUnrolling<64>(output + resultOffset, kernel_func + k, alphas, targets, point_num);
}

extern "C" __global__ void Evaluate_NonlinearKernel_32(float *output, int resultOffset, const float *kernel_func,
	const float *alphas, const int *targets, int point_num, int k)
{
	KernelReduce_StaticUnrolling<32>(output + resultOffset, kernel_func + k, alphas, targets, point_num);
}

extern "C" __global__ void Evaluate_NonlinearKernel_16(float *output, int resultOffset, const float *kernel_func,
	const float *alphas, const int *targets, int point_num, int k)
{
	KernelReduce_StaticUnrolling<16>(output + resultOffset, kernel_func + k, alphas, targets, point_num);
}

extern "C" __global__ void Evaluate_NonlinearKernel_8(float *output, int resultOffset, const float *kernel_func,
	const float *alphas, const int *targets, int point_num, int k)
{
	KernelReduce_StaticUnrolling<8>(output + resultOffset, kernel_func + k, alphas, targets, point_num);
}

extern "C" __global__ void Evaluate_NonlinearKernel_4(float *output, int resultOffset, const float *kernel_func,
	const float *alphas, const int *targets, int point_num, int k)
{
	KernelReduce_StaticUnrolling<4>(output + resultOffset, kernel_func + k, alphas, targets, point_num);
}

extern "C" __global__ void Evaluate_NonlinearKernel_2(float *output, int resultOffset, const float *kernel_func,
	const float *alphas, const int *targets, int point_num, int k)
{
	KernelReduce_StaticUnrolling<2>(output + resultOffset, kernel_func + k, alphas, targets, point_num);
}

extern "C" __global__ void Evaluate_NonlinearKernel_1(float *output, int resultOffset, const float *kernel_func,
	const float *alphas, const int *targets, int point_num, int k)
{
	KernelReduce_StaticUnrolling<1>(output + resultOffset, kernel_func + k, alphas, targets, point_num);
}

template <unsigned int blockSize>
__device__ inline void SimpleReduce_StaticUnrolling(float *result, float *vector, int vectorLength)
{
	__shared__ float accumResult[blockSize];

	unsigned int x = threadIdx.x + blockIdx.x * blockSize * 2;
	unsigned int gridSize = blockSize * 2 * gridDim.x;

	float f_sum = 0;
	while(x < vectorLength) {
		f_sum += vector[x];
		if(x + blockSize < vectorLength)
			f_sum += vector[x + blockSize];
		x += gridSize;
	}
	accumResult[threadIdx.x] = f_sum;
	__syncthreads();
	// each thread loads two values

	if(blockSize >= 512) {
		if(threadIdx.x < 256)
			accumResult[threadIdx.x] = f_sum = f_sum + accumResult[threadIdx.x + 256];
		__syncthreads();
	}
	if(blockSize >= 256) {
		if(threadIdx.x < 128)
			accumResult[threadIdx.x] = f_sum = f_sum + accumResult[threadIdx.x + 128];
		__syncthreads();
	}
	if(blockSize >= 128) {
		if(threadIdx.x < 64)
			accumResult[threadIdx.x] = f_sum = f_sum + accumResult[threadIdx.x + 64];
		__syncthreads();
	}
	if(threadIdx.x < 32) { // last warp (doesn't need __syncthreads() anymore)
		if(blockSize >= 64)
			accumResult[threadIdx.x] = f_sum = f_sum + accumResult[threadIdx.x + 32];
		if(blockSize >= 32)
			accumResult[threadIdx.x] = f_sum = f_sum + accumResult[threadIdx.x + 16];
		if(blockSize >= 16)
			accumResult[threadIdx.x] = f_sum = f_sum + accumResult[threadIdx.x + 8];
		if(blockSize >= 8)
			accumResult[threadIdx.x] = f_sum = f_sum + accumResult[threadIdx.x + 4];
		if(blockSize >= 4)
			accumResult[threadIdx.x] = f_sum = f_sum + accumResult[threadIdx.x + 2];
		if(blockSize >= 2)
			accumResult[threadIdx.x] = f_sum = f_sum + accumResult[threadIdx.x + 1];
	}
	// reduce accumulator by tree-like reduction

    if(threadIdx.x == 0)
		result[blockIdx.x] = accumResult[0];
	// store result
}

extern "C" __global__ void SimpleReduce_512(float *result, int resultOffset, float *vector, int offset, int length)
{
	SimpleReduce_StaticUnrolling<512>(result + resultOffset, vector + offset, length);
}
extern "C" __global__ void SimpleReduce_256(float *result, int resultOffset, float *vector, int offset, int length)
{
	SimpleReduce_StaticUnrolling<256>(result + resultOffset, vector + offset, length);
}
extern "C" __global__ void SimpleReduce_128(float *result, int resultOffset, float *vector, int offset, int length)
{
	SimpleReduce_StaticUnrolling<128>(result + resultOffset, vector + offset, length);
}
extern "C" __global__ void SimpleReduce_64(float *result, int resultOffset, float *vector, int offset, int length)
{
	SimpleReduce_StaticUnrolling<64>(result + resultOffset, vector + offset, length);
}
extern "C" __global__ void SimpleReduce_32(float *result, int resultOffset, float *vector, int offset, int length)
{
	SimpleReduce_StaticUnrolling<32>(result + resultOffset, vector + offset, length);
}
extern "C" __global__ void SimpleReduce_16(float *result, int resultOffset, float *vector, int offset, int length)
{
	SimpleReduce_StaticUnrolling<16>(result + resultOffset, vector + offset, length);
}
extern "C" __global__ void SimpleReduce_8(float *result, int resultOffset, float *vector, int offset, int length)
{
	SimpleReduce_StaticUnrolling<8>(result + resultOffset, vector + offset, length);
}
extern "C" __global__ void SimpleReduce_4(float *result, int resultOffset, float *vector, int offset, int length)
{
	SimpleReduce_StaticUnrolling<4>(result + resultOffset, vector + offset, length);
}
extern "C" __global__ void SimpleReduce_2(float *result, int resultOffset, float *vector, int offset, int length)
{
	SimpleReduce_StaticUnrolling<2>(result + resultOffset, vector + offset, length);
}
extern "C" __global__ void SimpleReduce_1(float *result, int resultOffset, float *vector, int offset, int length)
{
	SimpleReduce_StaticUnrolling<1>(result + resultOffset, vector + offset, length);
}