#ifndef SVM_KERNELS
#define SVM_KERNELS

#include <math.h>
#include "../common/framework.h"
#include "reduce.h"

struct Linear {
	static __device__ float selfKernel(float* pointerA, int pitchA, float* pointerAEnd, float parameterA, float parameterB, float parameterC) {
		float accumulant = 0.0f;
		do {
			float value = *pointerA;
			accumulant += value * value;
			pointerA += pitchA;
		} while (pointerA < pointerAEnd);
		return accumulant;
	}

	static __device__ float K(int idxX, float* Y, int pitchY, int nDimension, float ngamma, float paramterB, float parameterC) {
		return 1.0f;
	}

	static __device__ float kernel(float* pointerA, int pitchA, float* pointerAEnd, float* pointerB, int pitchB, float parameterA, float parameterB, float parameterC) {
		float accumulant = 0.0f;
		do {
			accumulant += (*pointerA) * (*pointerB);
			pointerA += pitchA;
			pointerB += pitchB;
		} while (pointerA < pointerAEnd);
		return accumulant;
	}

	static __device__ void parallelKernel(float* pointerA, float* pointerAEnd, float* pointerB, float* sharedTemps, float parameterA, float parameterB, float parameterC) {
		pointerA += threadIdx.x;
		pointerB += threadIdx.x;
		sharedTemps[threadIdx.x] = 0.0f;
		while (pointerA < pointerAEnd) {
			sharedTemps[threadIdx.x] += (*pointerA) * (*pointerB);
			pointerA += blockDim.x;
			pointerB += blockDim.x;
		}
		__syncthreads();

		sumReduce(sharedTemps);
	}

	//This function assumes we're doing two kernel evaluations at once:
	//Phi1(a, b) and Phi2(a, c)
	//b and c are already in shared memory, so we don't care about minimizing
	//their memory accesses, but a is in global memory
	//So we have to worry about not accessing a twice
	static __device__ void dualK(int idxX, float *Y1, float *Y2, int nDimension, float ngamma, float parameterB, float parameterC, float &phiAB, float &phiAC) {
	}

	static __device__ void dualKernel(float* pointerA, int pitchA, float* pointerAEnd, float* pointerB, int pitchB, float* pointerC, int pitchC, float parameterA, float parameterB, float parameterC, float& phi1, float& phi2) {
		float accumulant1 = 0.0f;
		float accumulant2 = 0.0f;
		do {
			float xa = *pointerA;
			accumulant1 += xa * (*pointerB);
			accumulant2 += xa * (*pointerC);
			pointerA += pitchA;
			pointerB += pitchB;
			pointerC += pitchC;
		} while (pointerA < pointerAEnd);
		phi1 = accumulant1;
		phi2 = accumulant2;
	}
};

struct Polynomial {
	static __device__ float selfKernel(float* pointerA, int pitchA, float* pointerAEnd, float a, float r, float d) {
		float accumulant = 0.0f;
		do {
			float value = *pointerA;
			accumulant += value * value;
			pointerA += pitchA;
		} while (pointerA < pointerAEnd);
		accumulant = accumulant * a + r;
		float result = accumulant;
		for (float degree = 2.0f; degree <= d; degree = degree + 1.0f) {
			result *= accumulant;
		}

		return result;
	}

	static __device__ float K(int idxX, float* Y, int pitchY, int nDimension, float ngamma, float paramterB, float parameterC) {
		return 1.0f;
	}

	static __device__ float kernel(float* pointerA, int pitchA, float* pointerAEnd, float* pointerB, int pitchB, float a, float r, float d) {
		float accumulant = 0.0f;
		do {
			accumulant += (*pointerA) * (*pointerB);
			pointerA += pitchA;
			pointerB += pitchB;
		} while (pointerA < pointerAEnd);
		accumulant = accumulant * a + r;
		float result = accumulant;
		for (float degree = 2.0f; degree <= d; degree = degree + 1.0f) {
			result *= accumulant;
		}

		return result;
	}

	static __device__ void parallelKernel(float* pointerA, float* pointerAEnd, float* pointerB, float* sharedTemps, float a, float r, float d) {
		pointerA += threadIdx.x;
		pointerB += threadIdx.x;
		sharedTemps[threadIdx.x] = 0.0f;
		while (pointerA < pointerAEnd) {
			sharedTemps[threadIdx.x] += (*pointerA) * (*pointerB);
			pointerA += blockDim.x;
			pointerB += blockDim.x;
		}
		__syncthreads();

		sumReduce(sharedTemps);
		if (threadIdx.x == 0) {
			float accumulant = sharedTemps[0] * a + r;

			float result = accumulant;
			for (float degree = 2.0f; degree <= d; degree = degree + 1.0f) {
				result *= accumulant;
			}
			sharedTemps[0] = result;
		}

	}

	//This function assumes we're doing two kernel evaluations at once:
	//Phi1(a, b) and Phi2(a, c)
	//b and c are already in shared memory, so we don't care about minimizing
	//their memory accesses, but a is in global memory
	//So we have to worry about not accessing a twice
	static __device__ void dualK(int idxX, float *Y1, float *Y2, int nDimension, float ngamma, float parameterB, float parameterC, float &phiAB, float &phiAC) {
	}
	static __device__ void dualKernel(float* pointerA, int pitchA, float* pointerAEnd, float* pointerB, int pitchB, float* pointerC, int pitchC, float a, float r, float d, float& phi1, float& phi2) {
		float accumulant1 = 0.0f;
		float accumulant2 = 0.0f;
		do {
			float xa = *pointerA;
			accumulant1 += xa * (*pointerB);
			accumulant2 += xa * (*pointerC);
			pointerA += pitchA;
			pointerB += pitchB;
			pointerC += pitchC;
		} while (pointerA < pointerAEnd);

		accumulant1 = accumulant1 * a + r;
		float result = accumulant1;
		for (float degree = 2.0f; degree <= d; degree = degree + 1.0f) {
			result *= accumulant1;
		}
		phi1 = result;

		accumulant2 = accumulant2 * a + r;
		result = accumulant2;
		for (float degree = 2.0f; degree <= d; degree = degree + 1.0f) {
			result *= accumulant2;
		}
		phi2 = result;
	}
};

struct Gaussian {
	static __device__ float selfKernel(float* pointerA, int pitchA, float* pointerAEnd, float ngamma, float parameterB, float parameterC) {
		return 1.0f;
	}

	static __device__ float K(int idxX, float* Y, int pitchY, int nDimension, float ngamma, float paramterB, float parameterC) {
		float sum = 0.0f;
		int ptrX = 0, lenX = tex1Dfetch(ellRowLenTex, idxX);
		int2 X = (lenX==0)?(make_int2(-1,0)):(tex2D(ellDataTex, idxX, ptrX));
		for(int colY = 0;colY<nDimension;colY++) {
			float v = *(Y+colY*pitchY);
			if(colY == X.x){
				v -= __int_as_float(X.y);
				if(++ptrX < lenX)
					X = tex2D(ellDataTex, idxX, ptrX);
			}
			sum += __fmul_rn(v,v);
		}

		//float sum = 0.0f;
		//int ptrX = 0, lenX = tex1Dfetch(ellRowLenTex, idxX);
		//int colY = 0;
		//while(ptrX < lenX) {
		//	int colX = tex2D(ellColumnTex, idxX, ptrX);
		//	while(colY < colX) {
		//		float v = *(Y+pitchY*colY);
		//		sum += v*v;
		//		colY++;
		//	}
		//	float d = tex2D(ellEntryTex, idxX, ptrX) - *(Y+pitchY*colY);
		//	sum += d*d;
		//	colY++;
		//	ptrX++;
		//}
		//while(colY < nDimension) {
		//	float v = *(Y+pitchY*colY);
		//	sum += v*v;
		//	colY++;
		//}
		return __expf(sum*ngamma);
	}

	//static __device__ float K(int idxX, int idxY, float ngamma, float parameterB, float parameterC) {
	//	float sum = 0.0f;
	//	int ptrX = 0, ptrY = 0;
	//	int lenX = tex1Dfetch(ellRowLenTex,idxX), lenY = tex1Dfetch(ellRowLenTex,idxY);
	//	while(ptrX < lenX && ptrY < lenY) {
	//		int colX = tex2D(ellColumnTex,ptrX,idxX), colY = tex2D(ellColumnTex,ptrY,idxY);
	//		if(colX == colY) {
	//			float d = tex2D(ellEntryTex,ptrX,idxX) - tex2D(ellEntryTex,ptrY,idxY);
	//			sum += d*d;
	//			ptrX++;
	//			ptrY++;
	//		} else {
	//			if(colX > colY) {
	//				float v = tex2D(ellEntryTex,ptrY,idxY);
	//				sum += v*v;
	//				ptrY++;
	//			} else {
	//				float v = tex2D(ellEntryTex,ptrX,idxX);
	//				sum += v*v;
	//				ptrX++;
	//			}
	//		}
	//	}
	//	while(ptrX < lenX) {
	//		float v = tex2D(ellEntryTex,ptrX,idxX);
	//		sum += v*v;
	//		ptrX++;
	//	}
	//	while(ptrY < lenY) {
	//		float v = tex2D(ellEntryTex,ptrY,idxY);
	//		sum += v*v;
	//		ptrY++;
	//	}
	//	return __expf(ngamma*sum);
	//}

	static __device__ float kernel(float* pointerA, int pitchA, float* pointerAEnd, float* pointerB, int pitchB/*1*/, float ngamma/*negative gamma*/, float parameterB, float parameterC) {
		float accumulant = 0.0f;
		do {
			float diff = __fadd_rn(*pointerA ,-*pointerB);
			accumulant = __fadd_rn(accumulant,__fmul_rn(diff,diff));
			pointerA += pitchA;
			pointerB += pitchB;
		} while (pointerA < pointerAEnd);
		return __expf(__fmul_rn(ngamma , accumulant));
	}

	static __device__ void parallelKernel(float* pointerA, float* pointerAEnd, float* pointerB, float* sharedTemps, float ngamma, float parameterB, float parameterC) {
		pointerA += threadIdx.x;
		pointerB += threadIdx.x;
		sharedTemps[threadIdx.x] = 0.0f;
		while (pointerA < pointerAEnd) {
			float diff = (*pointerA) - (*pointerB);
			sharedTemps[threadIdx.x] += diff * diff;
			pointerA += blockDim.x;
			pointerB += blockDim.x;
		}
		__syncthreads();

		sumReduce(sharedTemps);
		if (threadIdx.x == 0) {
			sharedTemps[0] = __expf(sharedTemps[0] * ngamma);
		}
	}

	//This function assumes we're doing two kernel evaluations at once:
	//Phi1(a, b) and Phi2(a, c)
	//b and c are already in shared memory, so we don't care about minimizing
	//their memory accesses, but a is in global memory
	//So we have to worry about not accessing a twice

	static __device__ void dualK(int idxX, float *Y1, float *Y2, int nDimension, float ngamma, float parameterB, float parameterC, float &phiAB, float &phiAC) {
		float sum1 = 0.0f;
		float sum2 = 0.0f;
		int ptrX = 0, lenX = tex1Dfetch(ellRowLenTex, idxX);
		int2 X = (lenX==0)?(make_int2(-1,0)):(tex2D(ellDataTex, idxX, ptrX));
		for(int colY = 0;colY<nDimension;colY++) {
			float v = *(Y1+colY);
			float u = *(Y2+colY);
			if(colY == X.x){
				float xVal = __int_as_float(X.y);
				v -= xVal;
				u -= xVal;
				if(++ptrX < lenX)
					X = tex2D(ellDataTex, idxX, ptrX);
			}
			sum1 += v*v;
			sum2 += u*u;
		}
		//while(ptrX < lenX) {
		//	int colX = tex2D(ellColumnTex, idxX, ptrX);
		//	while(colY < colX) {
		//		float v = *(Y1+colY);
		//		float u = *(Y2+colY);
		//		sum1 += __fmul_rn(v,v);
		//		sum2 += __fmul_rn(u,u); 
		//		colY++;
		//	}
		//	float x = tex2D(ellEntryTex, idxX, ptrX);
		//	float d1 = x - *(Y1+colY);
		//	float d2 = x - *(Y2+colY);
		//	sum1 += __fmul_rn(d1,d1);
		//	sum2 += __fmul_rn(d2,d2);
		//	colY++;
		//	ptrX++;
		//}
		//while(colY < nDimension) {
		//	float v = *(Y1+colY);
		//	float u = *(Y2+colY);
		//	sum1 += __fmul_rn(v,v);
		//	sum2 += __fmul_rn(u,u);
		//	colY++;
		//}
		phiAB = __expf(sum1*ngamma);
		phiAC = __expf(sum2*ngamma);
	}

	static __device__ void dualKernel(float* pointerA, int pitchA, float* pointerAEnd, float* pointerB, int pitchB, float* pointerC, int pitchC, float ngamma, float parameterB, float parameterC, float& phi1, float& phi2) {
		float accumulant1 = 0.0f;
		float accumulant2 = 0.0f;
		do {
			float xa = *pointerA;
			float diff = xa - (*pointerB);
			accumulant1 += diff * diff;
			diff = xa - (*pointerC);
			accumulant2 += diff * diff;
			pointerA += pitchA;
			pointerB += pitchB;
			pointerC += pitchC;
		} while (pointerA < pointerAEnd);
		phi1 = __expf(ngamma * accumulant1);
		phi2 = __expf(ngamma * accumulant2);
	}
};

struct Sigmoid {
	static __device__ float selfKernel(float* pointerA, int pitchA, float* pointerAEnd, float a, float r, float parameterC) {
		float accumulant = 0.0f;
		do {
			float value = *pointerA;
			accumulant += value * value;
			pointerA += pitchA;
		} while (pointerA < pointerAEnd);
		accumulant = accumulant * a + r;
		return tanh(accumulant);
	}

	static __device__ float K(int idxX, float* Y, int pitchY, int nDimension, float ngamma, float paramterB, float parameterC) {
		return 1.0f;
	}

	static __device__ float kernel(float* pointerA, int pitchA, float* pointerAEnd, float* pointerB, int pitchB, float a, float r, float parameterC) {
		float accumulant = 0.0f;
		do {
			accumulant += (*pointerA) * (*pointerB);
			pointerA += pitchA;
			pointerB += pitchB;
		} while (pointerA < pointerAEnd);
		accumulant = accumulant * a + r;
		return tanh(accumulant);
	}

	static __device__ void parallelKernel(float* pointerA, float* pointerAEnd, float* pointerB, float* sharedTemps, float a, float r, float parameterC) {
		pointerA += threadIdx.x;
		pointerB += threadIdx.x;
		sharedTemps[threadIdx.x] = 0.0f;
		while (pointerA < pointerAEnd) {
			sharedTemps[threadIdx.x] += (*pointerA) * (*pointerB);
			pointerA += blockDim.x;
			pointerB += blockDim.x;
		}
		__syncthreads();

		sumReduce(sharedTemps);
		if (threadIdx.x == 0) {
			float accumulant = sharedTemps[0];
			sharedTemps[0] = tanh(accumulant);
		}

	}

	//This function assumes we're doing two kernel evaluations at once:
	//Phi1(a, b) and Phi2(a, c)
	//b and c are already in shared memory, so we don't care about minimizing
	//their memory accesses, but a is in global memory
	//So we have to worry about not accessing a twice
	static __device__ void dualK(int idxX, float *Y1, float *Y2, int nDimension, float ngamma, float parameterB, float parameterC, float &phiAB, float &phiAC) {
	}
	
	static __device__ void dualKernel(float* pointerA, int pitchA, float* pointerAEnd, float* pointerB, int pitchB, float* pointerC, int pitchC, float a, float r, float parameterC, float& phi1, float& phi2) {
		float accumulant1 = 0.0f;
		float accumulant2 = 0.0f;
		do {
			float xa = *pointerA;
			accumulant1 += xa * (*pointerB);
			accumulant2 += xa * (*pointerC);
			pointerA += pitchA;
			pointerB += pitchB;
			pointerC += pitchC;
		} while (pointerA < pointerAEnd);
		accumulant1 = accumulant1 * a + r;
		phi1= tanh(accumulant1);
		accumulant2 = accumulant2 * a + r;
		phi2= tanh(accumulant2);

	}
};


#endif
