#ifndef SVM_KERNELS
#define SVM_KERNELS
/// Includes projects
#include "../common/framework.h"
#include "reduce.h"
/// Includes cuda
#include <vector_types.h>
#include <cuda_texture_types.h>
#include <texture_types.h>
#include <texture_fetch_functions.h>

texture<int2, 2, cudaReadModeElementType> ellDataTex;
texture<int, 1, cudaReadModeElementType> ellRowLenTex;

class Kernel {
public:
	virtual ~Kernel() {}
};

class Linear : public Kernel {
public:
	__device__ float selfK(int idxX, float parameterA, float parameterB, float parameterC) {
		float sum = 0.0f;
		for(int ptrX=0, lenX = tex1Dfetch(ellRowLenTex, idxX);ptrX<lenX;ptrX++) {
			int2 X = tex2D(ellDataTex, idxX, ptrX);
			float y = __int_as_float(X.y);
			sum += y*y;
		}
		return sum;
	}

	__device__ float selfKernel(float* pointerA, int pitchA, float* pointerAEnd, float parameterA, float parameterB, float parameterC) {
		float accumulant = 0.0f;
		do {
			float value = *pointerA;
			accumulant += value * value;
			pointerA += pitchA;
		} while (pointerA < pointerAEnd);
		return accumulant;
	}

	__device__ float K(int idxX, float* Y, float parameterA, float parameterB, float parameterC, float* selfDot, int yIndex) {
		float sum = 0.0f;
		int lenX = tex1Dfetch(ellRowLenTex, idxX);
		for(int ptrX=0;ptrX<lenX;ptrX++) {
			int2 X = tex2D(ellDataTex, idxX, ptrX);
			sum += __int_as_float(X.y)* *(Y+X.x);
		}
		return sum;
	}

	__device__ float kernel(float* pointerA, int pitchA, float* pointerAEnd, float* pointerB, int pitchB, float parameterA, float parameterB, float parameterC) {
		float accumulant = 0.0f;
		do {
			accumulant += (*pointerA) * (*pointerB);
			pointerA += pitchA;
			pointerB += pitchB;
		} while (pointerA < pointerAEnd);
		return accumulant;
	}

	__device__ void parallelKernel(unsigned int tid, float* pointerA, float* pointerAEnd, float* pointerB, float* sharedTemps, float parameterA, float parameterB, float parameterC) {
		pointerA += tid;
		pointerB += tid;
		sharedTemps[tid] = 0.0f;
		while (pointerA < pointerAEnd) {
			sharedTemps[tid] += (*pointerA) * (*pointerB);
			pointerA += blockDim.x;
			pointerB += blockDim.x;
		}
		__syncthreads();

		sumReduce<BLOCKSIZE>(sharedTemps,tid);
	}

	//This function assumes we're doing two kernel evaluations at once:
	//Phi1(a, b) and Phi2(a, c)
	//b and c are already in shared memory, so we don't care about minimizing
	//their memory accesses, but a is in global memory
	//So we have to worry about not accessing a twice
	__device__ void dualK(int idxX, float *Y1, float *Y2, float parameterA, float parameterB, float parameterC, float &phiAB, float &phiAC, float* selfDot, int iHigh, int iLow) {
		float sum1 = 0.0f;
		float sum2 = 0.0f;
		int lenX = tex1Dfetch(ellRowLenTex, idxX);
		for(int ptrX=0 ; ptrX<lenX ; ptrX++) {
			int2 X = tex2D(ellDataTex, idxX, ptrX);
			float fx = __int_as_float(X.y);
			sum1 += fx* *(Y1+X.x);
			sum2 += fx* *(Y2+X.x);
		}
		phiAB = sum1;
		phiAC = sum2;
	}

	__device__ void dualKernel(float* pointerA, int pitchA, float* pointerAEnd, float* pointerB, int pitchB, float* pointerC, int pitchC, float parameterA, float parameterB, float parameterC, float& phi1, float& phi2) {
		float accumulant1 = 0.0f;
		float accumulant2 = 0.0f;
		do {
			float xa = *pointerA;
			accumulant1 += xa * (*pointerB);
			accumulant2 += xa * (*pointerC);
			pointerA += pitchA;
			pointerB += pitchB;
			pointerC += pitchC;
		} while (pointerA < pointerAEnd);
		phi1 = accumulant1;
		phi2 = accumulant2;
	}
};

class Polynomial : public Kernel {
public:
	__device__ float selfK(int idxX, float a, float r, float d) {
		float sum = 0.0f;
		for(int ptrX=0, lenX = tex1Dfetch(ellRowLenTex, idxX);ptrX<lenX;ptrX++) {
			int2 X = tex2D(ellDataTex, idxX, ptrX);
			float y = __int_as_float(X.y);
			sum += y*y;
		}
		sum = sum*a + r;
		return __powf(sum, d);
	}

	__device__ float selfKernel(float* pointerA, int pitchA, float* pointerAEnd, float a, float r, float d) {
		float accumulant = 0.0f;
		do {
			float value = *pointerA;
			accumulant += value * value;
			pointerA += pitchA;
		} while (pointerA < pointerAEnd);
		accumulant = accumulant * a + r;
		float result = accumulant;
		for (float degree = 2.0f; degree <= d; degree = degree + 1.0f) {
			result *= accumulant;
		}

		return result;
	}

	__device__ float K(int idxX, float* Y, float a, float r, float d, float* selfDot, int yIndex) {
		float sum = 0.0f;
		int lenX = tex1Dfetch(ellRowLenTex, idxX);
		for(int ptrX=0;ptrX<lenX;ptrX++) {
			int2 X = tex2D(ellDataTex, idxX, ptrX);
			sum += __int_as_float(X.y)* *(Y+X.x);
		}
		sum = sum*a + r;
		return __powf(sum, d);
	}

	__device__ float kernel(float* pointerA, int pitchA, float* pointerAEnd, float* pointerB, int pitchB, float a, float r, float d) {
		float accumulant = 0.0f;
		do {
			accumulant += (*pointerA) * (*pointerB);
			pointerA += pitchA;
			pointerB += pitchB;
		} while (pointerA < pointerAEnd);
		accumulant = accumulant * a + r;
		float result = accumulant;
		for (float degree = 2.0f; degree <= d; degree = degree + 1.0f) {
			result *= accumulant;
		}

		return result;
	}

	__device__ void parallelKernel(unsigned int tid, float* pointerA, float* pointerAEnd, float* pointerB, float* sharedTemps, float a, float r, float d) {
		pointerA += tid;
		pointerB += tid;
		sharedTemps[tid] = 0.0f;
		while (pointerA < pointerAEnd) {
			sharedTemps[tid] += (*pointerA) * (*pointerB);
			pointerA += blockDim.x;
			pointerB += blockDim.x;
		}
		__syncthreads();

		sumReduce<BLOCKSIZE>(sharedTemps,tid);
		if (tid == 0) {
			float accumulant = sharedTemps[0] * a + r;

			float result = accumulant;
			for (float degree = 2.0f; degree <= d; degree = degree + 1.0f) {
				result *= accumulant;
			}
			sharedTemps[0] = result;
		}

	}

	//This function assumes we're doing two kernel evaluations at once:
	//Phi1(a, b) and Phi2(a, c)
	//b and c are already in shared memory, so we don't care about minimizing
	//their memory accesses, but a is in global memory
	//So we have to worry about not accessing a twice
	__device__ void dualK(int idxX, float *Y1, float *Y2, float a, float r, float d, float &phiAB, float &phiAC, float* selfDot, int iHigh, int iLow) {
		float sum1 = 0.0f;
		float sum2 = 0.0f;
		int lenX = tex1Dfetch(ellRowLenTex, idxX);
		for(int ptrX=0 ; ptrX<lenX ; ptrX++) {
			int2 X = tex2D(ellDataTex, idxX, ptrX);
			float fx = __int_as_float(X.y);
			sum1 += fx* *(Y1+X.x);
			sum2 += fx* *(Y2+X.x);
		}
		sum1 = sum1 * a + r;
		sum2 = sum2 * a + r;
		phiAB = __powf(sum1, d);
		phiAC = __powf(sum2, d);
	}

	__device__ void dualKernel(float* pointerA, int pitchA, float* pointerAEnd, float* pointerB, int pitchB, float* pointerC, int pitchC, float a, float r, float d, float& phi1, float& phi2) {
		float accumulant1 = 0.0f;
		float accumulant2 = 0.0f;
		do {
			float xa = *pointerA;
			accumulant1 += xa * (*pointerB);
			accumulant2 += xa * (*pointerC);
			pointerA += pitchA;
			pointerB += pitchB;
			pointerC += pitchC;
		} while (pointerA < pointerAEnd);

		accumulant1 = accumulant1 * a + r;
		float result = accumulant1;
		for (float degree = 2.0f; degree <= d; degree = degree + 1.0f) {
			result *= accumulant1;
		}
		phi1 = result;

		accumulant2 = accumulant2 * a + r;
		result = accumulant2;
		for (float degree = 2.0f; degree <= d; degree = degree + 1.0f) {
			result *= accumulant2;
		}
		phi2 = result;
	}
};

class Gaussian : public Kernel {
public:
	__device__ float selfKernel(float* pointerA, int pitchA, float* pointerAEnd, float ngamma, float parameterB, float parameterC) {
		return 1.0f;
	}

	__device__ float selfDot(int idxX) {
		int lenX = tex1Dfetch(ellRowLenTex, idxX);
		float sum = 0.0f;
		for(int ptrX=0;ptrX<lenX;ptrX++) {
			int2 X = tex2D(ellDataTex, idxX, ptrX);
			float y = __int_as_float(X.y);
			sum += y*y;
		}
		return sum;
	}

	__device__ float K(int idxX, float* Y, float ngamma, float paramterB, float parameterC, float* selfDot, int yIndex) {
		float sum = 0.0f;
		int lenX = tex1Dfetch(ellRowLenTex, idxX);
		for(int ptrX=0;ptrX<lenX;ptrX++) {
			int2 X = tex2D(ellDataTex, idxX, ptrX);
			sum += __int_as_float(X.y)* *(Y+X.x);
		}
		//float sum = 0.0f;
		//int ptrX = 0, lenX = tex1Dfetch(ellRowLenTex, idxX);
		//int2 X = (lenX==0)?(make_int2(-1,0)):(tex2D(ellDataTex, idxX, ptrX));
		//for(int colY = 0;colY<yEnd;colY+=yPitch) {
		//	float v = *(Y+colY);
		//	if(colY == X.x){
		//		v -= __int_as_float(X.y);
		//		if(++ptrX < lenX)
		//			X = tex2D(ellDataTex, idxX, ptrX);
		//	}
		//	sum += v*v;
		//}

		//float sum = 0.0f;
		//int ptrX = 0, lenX = tex1Dfetch(ellRowLenTex, idxX);
		//int colY = 0;
		//while(ptrX < lenX) {
		//	int colX = tex2D(ellColumnTex, idxX, ptrX);
		//	while(colY < colX) {
		//		float v = *(Y+pitchY*colY);
		//		sum += v*v;
		//		colY++;
		//	}
		//	float d = tex2D(ellEntryTex, idxX, ptrX) - *(Y+pitchY*colY);
		//	sum += d*d;
		//	colY++;
		//	ptrX++;
		//}
		//while(colY < nDimension) {
		//	float v = *(Y+pitchY*colY);
		//	sum += v*v;
		//	colY++;
		//}
		float dot = selfDot[yIndex];
		dot += selfDot[idxX];
		sum = -2.0f * sum;
		sum += dot;
		return __expf(sum*ngamma);
	}

	__device__ float kernel(float* pointerA, int pitchA, float* pointerAEnd, float* pointerB, int pitchB/*1*/, float ngamma/*negative gamma*/, float parameterB, float parameterC) {
		float accumulant = 0.0f;
		do {
			float diff = __fadd_rn(*pointerA ,-*pointerB);
			accumulant = __fadd_rn(accumulant,__fmul_rn(diff,diff));
			pointerA += pitchA;
			pointerB += pitchB;
		} while (pointerA < pointerAEnd);
		return __expf(__fmul_rn(ngamma , accumulant));
	}

	__device__ void parallelKernel(unsigned int tid, float* pointerA, float* pointerAEnd, float* pointerB, float* sharedTemps, float ngamma, float parameterB, float parameterC) {
		pointerA += tid;
		pointerB += tid;
		sharedTemps[tid] = 0.0f;
		while (pointerA < pointerAEnd) {
			float diff = (*pointerA) - (*pointerB);
			sharedTemps[tid] += diff * diff;
			pointerA += blockDim.x;
			pointerB += blockDim.x;
		}
		__syncthreads();

		sumReduce<BLOCKSIZE>(sharedTemps,tid);
		if (tid == 0) {
			sharedTemps[0] = __expf(sharedTemps[0] * ngamma);
		}
	}

	//This function assumes we're doing two kernel evaluations at once:
	//Phi1(a, b) and Phi2(a, c)
	//b and c are already in shared memory, so we don't care about minimizing
	//their memory accesses, but a is in global memory
	//So we have to worry about not accessing a twice

	__device__ void dualK(int idxX, float *Y1, float *Y2, float ngamma, float parameterB, float parameterC, float &phiAB, float &phiAC, float* selfDot, int iHigh, int iLow) {
		float sum1 = 0.0f;
		float sum2 = 0.0f;

		int lenX = tex1Dfetch(ellRowLenTex, idxX);
		//float bound = 1.0f;
		//bool bound = false;
		//for(int ptrX=0 ; ptrX<ellWidth ; ptrX++) {
		//	bound = (bound || ptrX == lenX);
		//	if(!bound) {
		//		int2 X = tex2D(ellDataTex, idxX, ptrX);
		//		float fx = __int_as_float(X.y);
		//		sum1 += fx* *(Y1+X.x);
		//		sum2 += fx* *(Y2+X.x);
		//	}
		//}
		for(int ptrX=0 ; ptrX<lenX ; ptrX++) {
			int2 X = tex2D(ellDataTex, idxX, ptrX);
			float fx = __int_as_float(X.y);
			sum1 += fx* *(Y1+X.x);
			sum2 += fx* *(Y2+X.x);
		}
		float dotA = selfDot[idxX];
		{
			sum1 = -2.0f * sum1;
			float dotB = selfDot[iHigh];
			sum1 += dotA + dotB;
			phiAB = __expf(ngamma * sum1);
		}
		{
			sum2 = -2.0f * sum2;
			float dotC = selfDot[iLow];
			sum2 += dotA + dotC;
			phiAC = __expf(ngamma * sum2);
		}
		
		
		//int ptrX = 0, lenX = tex1Dfetch(ellRowLenTex, idxX);
		//int2 X = (lenX==0)?(make_int2(-1,0)):(tex2D(ellDataTex, idxX, ptrX));
		//for(int colY = 0;colY<nDimension;colY++) {
		//	float v = *(Y1+colY);
		//	float u = *(Y2+colY);
		//	if(colY == X.x){
		//		float xVal = __int_as_float(X.y);
		//		v -= xVal;
		//		u -= xVal;
		//		if(++ptrX < lenX)
		//			X = tex2D(ellDataTex, idxX, ptrX);
		//	}
		//	sum1 += v*v;
		//	sum2 += u*u;
		//}
		//phiAB = __expf(sum1*ngamma);
		//phiAC = __expf(sum2*ngamma);
	}

	//__device__ void dualK(int idxX, float *Y1, float *Y2, int nDimension, float ngamma,  float &phiAB, float &phiAC) {
	//	float sum1 = 0.0f;
	//	float sum2 = 0.0f;
	//	int ptrX = 0, lenX = tex1Dfetch(ellRowLenTex, idxX);
	//	int2 X = (lenX==0)?(make_int2(-1,0)):(tex2D(ellDataTex, idxX, ptrX));
	//	for(int colY = 0;colY<nDimension;colY++) {
	//		float v = *(Y1+colY);
	//		float u = *(Y2+colY);
	//		if(colY == X.x){
	//			float xVal = __int_as_float(X.y);
	//			v -= xVal;
	//			u -= xVal;
	//			if(++ptrX < lenX)
	//				X = tex2D(ellDataTex, idxX, ptrX);
	//		}
	//		sum1 += v*v;
	//		sum2 += u*u;
	//	}
	//	phiAB = __expf(sum1*ngamma);
	//	phiAC = __expf(sum2*ngamma);
	//}

	__device__ void dualKernel(float* pointerA, int pitchA, float* pointerAEnd, float* pointerB, int pitchB, float* pointerC, int pitchC, float ngamma, float parameterB, float parameterC, float& phi1, float& phi2) {
		float accumulant1 = 0.0f;
		float accumulant2 = 0.0f;
		do {
			float xa = *pointerA;
			float diff = xa - (*pointerB);
			accumulant1 += diff * diff;
			diff = xa - (*pointerC);
			accumulant2 += diff * diff;
			pointerA += pitchA;
			pointerB += pitchB;
			pointerC += pitchC;
		} while (pointerA < pointerAEnd);
		phi1 = __expf(ngamma * accumulant1);
		phi2 = __expf(ngamma * accumulant2);
	}
};

class Sigmoid : public Kernel {
public:
	__device__ float selfK(int idxX, float a, float r, float parameterC) {
		float sum = 0.0f;
		for(int ptrX=0, lenX = tex1Dfetch(ellRowLenTex, idxX);ptrX<lenX;ptrX++) {
			int2 X = tex2D(ellDataTex, idxX, ptrX);
			float y = __int_as_float(X.y);
			sum += y*y;
		}
		sum = sum*a + r;
		return tanhf(sum);
	}

	__device__ float selfKernel(float* pointerA, int pitchA, float* pointerAEnd, float a, float r, float parameterC) {
		float accumulant = 0.0f;
		do {
			float value = *pointerA;
			accumulant += value * value;
			pointerA += pitchA;
		} while (pointerA < pointerAEnd);
		accumulant = accumulant * a + r;
		return tanhf(accumulant);
	}

	__device__ float K(int idxX, float* Y, float a, float r, float parameterC, float* selfDot, int yIndex) {
		float sum = 0.0f;
		int lenX = tex1Dfetch(ellRowLenTex, idxX);
		for(int ptrX=0;ptrX<lenX;ptrX++) {
			int2 X = tex2D(ellDataTex, idxX, ptrX);
			sum += __int_as_float(X.y)* *(Y+X.x);
		}
		sum = sum*a + r;
		return tanhf(sum);
	}

	__device__ float kernel(float* pointerA, int pitchA, float* pointerAEnd, float* pointerB, int pitchB, float a, float r, float parameterC) {
		float accumulant = 0.0f;
		do {
			accumulant += (*pointerA) * (*pointerB);
			pointerA += pitchA;
			pointerB += pitchB;
		} while (pointerA < pointerAEnd);
		accumulant = accumulant * a + r;
		return tanhf(accumulant);
	}

	__device__ void parallelKernel(unsigned int tid, float* pointerA, float* pointerAEnd, float* pointerB, float* sharedTemps, float a, float r, float parameterC) {
		pointerA += tid;
		pointerB += tid;
		sharedTemps[tid] = 0.0f;
		while (pointerA < pointerAEnd) {
			sharedTemps[tid] += (*pointerA) * (*pointerB);
			pointerA += blockDim.x;
			pointerB += blockDim.x;
		}
		__syncthreads();

		sumReduce<BLOCKSIZE>(sharedTemps,tid);
		if (tid == 0) {
			float accumulant = sharedTemps[0];
			accumulant = accumulant*a + r;
			sharedTemps[0] = tanhf(accumulant);
		}
	}

	//This function assumes we're doing two kernel evaluations at once:
	//Phi1(a, b) and Phi2(a, c)
	//b and c are already in shared memory, so we don't care about minimizing
	//their memory accesses, but a is in global memory
	//So we have to worry about not accessing a twice
	__device__ void dualK(int idxX, float *Y1, float *Y2, float a, float r, float parameterC, float &phiAB, float &phiAC, float* selfDot, int iHigh, int iLow ) {
		float sum1 = 0.0f;
		float sum2 = 0.0f;
		int lenX = tex1Dfetch(ellRowLenTex, idxX);
		for(int ptrX=0 ; ptrX<lenX ; ptrX++) {
			int2 X = tex2D(ellDataTex, idxX, ptrX);
			float fx = __int_as_float(X.y);
			sum1 += fx* *(Y1+X.x);
			sum2 += fx* *(Y2+X.x);
		}
		sum1 = sum1 * a + r;
		sum2 = sum2 * a + r;
		phiAB = tanhf(sum1);
		phiAC = tanhf(sum2);
	}
	
	__device__ void dualKernel(float* pointerA, int pitchA, float* pointerAEnd, float* pointerB, int pitchB, float* pointerC, int pitchC, float a, float r, float parameterC, float& phi1, float& phi2) {
		float accumulant1 = 0.0f;
		float accumulant2 = 0.0f;
		do {
			float xa = *pointerA;
			accumulant1 += xa * (*pointerB);
			accumulant2 += xa * (*pointerC);
			pointerA += pitchA;
			pointerB += pitchB;
			pointerC += pitchC;
		} while (pointerA < pointerAEnd);
		accumulant1 = accumulant1 * a + r;
		phi1= tanhf(accumulant1);
		accumulant2 = accumulant2 * a + r;
		phi2= tanhf(accumulant2);
	}
};


#endif
