#include <cublas.h>
#include <cutil.h>
#include <cuda.h>
#include <cfloat>
#include "../common/framework.h"
#include "../common/deviceSelect.h"
#include "svmClassify.h"
#include "svmClassifyKernels.h"

/**
* This function computes self dot products (Euclidean norm squared) for every vector in an array
* @param devSource the vectors, in column major format
* @param devSourcePitchInFloats the pitch of each row of the vectors (this is guaranteed to be >= sourceCount.  It might be greater due to padding, to keep each row of the source vectors aligned.
* @param devDest a vector which will receive the self dot product
* @param sourceCount the number of vectors
* @param sourceLength the dimensionality of each vector
*/
__global__ void makeSelfDots(float* devSource, int devSourcePitchInFloats, float* devDest, int sourceCount, int sourceLength) {
	int index = __mul24(BLOCKSIZE, blockIdx.x) + threadIdx.x;
	if (index < sourceCount) {
		float dot = 0.0f;
		for (int i = 0; i < sourceLength; i++) {
			float currentElement = devSource[devSourcePitchInFloats*i + index]; 
			dot += currentElement * currentElement;
		}
		devDest[index] = dot;
	}
}

/**
* This function constructs a matrix devDots, where devDots_(i,j) = ||data_i||^2 + ||SV_j||^2
* @param devDots the output array
* @param devDotsPitchInFloats the pitch of each row of devDots.  Guaranteed to be >= nSV
* @param devSVDots a vector containing ||SV_j||^2 for all j in [0, nSV)
* @param devDataDots a vector containing ||data_i||^2 for all i in [0, nPoints)
* @param nSV the number of Support Vectors in the classifier
*/
__global__ void makeDots(float* devDots, int devDotsPitchInFloats, float* devSVDots, float* devDataDots, int nSV, int nPointsInSlice) {
	extern __shared__ float localDataDots[];
	unsigned int tid = threadIdx.x;
	unsigned int svIndex = __mul24(blockDim.x, blockIdx.x) + tid;
	unsigned int dataIndex = __mul24(blockDim.x, blockIdx.y);

	float localSVDots;

	if (svIndex < nSV) {
		localSVDots = devSVDots[svIndex];
	}

	if (dataIndex + tid< nPointsInSlice) {
		localDataDots[tid] = devDataDots[dataIndex+tid];
	}
	__syncthreads();
	
	if(svIndex < nSV) {
		devDots += devDotsPitchInFloats*dataIndex + svIndex;
		for(int i = 0, loopend = min(blockDim.x , nPointsInSlice-dataIndex);i < loopend; i++, devDots+=devDotsPitchInFloats) {
			*(devDots) = localSVDots + localDataDots[i];
		}
	}
}

/**
* This function completes the kernel evaluations and begins the reductions to form the classification result.
* @param devNorms this contains partially completed kernel evaluations.  For most kernels, devNorms_(i, j) = data_i (dot) sv_j.  For the RBF kernel, devNorms_(i, j) = -gamma*(||data_i||^2 + ||sv_j||^2 - 2* data_i (dot) sv_j)
* @param devNormsPitchInFloats contains the pitch of the partially completed kernel evaluations.  It will always be >= nSV.
* @param devAlphas this is the alpha vector for the SVM classifier
* @param devAlphaPitchInFloats
* @param nSV the number of support vectors
* @param result
* @param resultPitch
* @param rho
*/
template<unsigned int blockSize>
__global__ void computeKernelsReduce(float* devNorms, int devNormsPitchInFloats, float* devAlphas, int devAlphaPitchInFloats, int nSV, float* result, int resultPitch, float* rho) {
	extern __shared__ float localValue[];
	unsigned int tid = threadIdx.x;
	unsigned int svIndex = tid;
	unsigned int normIndex = devNormsPitchInFloats*blockIdx.x;
	unsigned int bidy = blockIdx.y;

	devAlphas += bidy * devAlphaPitchInFloats;
	localValue[tid] = 0.0f;

	while(svIndex < nSV) {
		float alpha = devAlphas[svIndex];
		localValue[tid] += alpha * devNorms[normIndex + svIndex];
		svIndex += blockDim.x;
	}
	__syncthreads();

	if(blockSize >= 512) {if(tid < 256) {localValue[tid] += localValue[tid+256];} __syncthreads();}
	if(blockSize >= 256) {if(tid < 128) {localValue[tid] += localValue[tid+128];} __syncthreads();}
	if(blockSize >= 128) {if(tid <  64) {localValue[tid] += localValue[tid+ 64];} __syncthreads();}
	if(tid < 32) sumWarpReduce<blockSize>(localValue, tid);
	if(tid == 0) {
		result[bidy * resultPitch + blockIdx.x] = localValue[0] + rho[bidy];
	}
}

template<unsigned int blockSize>
__device__ void sumWarpReduce(volatile float* localValue, unsigned int tid) {
	if(blockSize >= 64) localValue[tid] += localValue[tid+32];
	if(blockSize >= 32) localValue[tid] += localValue[tid+16];
	if(blockSize >= 16) localValue[tid] += localValue[tid+ 8];
	if(blockSize >=  8) localValue[tid] += localValue[tid+ 4];
	if(blockSize >=  4) localValue[tid] += localValue[tid+ 2];
	if(blockSize >=  2) localValue[tid] += localValue[tid+ 1];
}

template<> __global__ void argmax<true>(float* devResult, int devResultPitchInFloats, int nPointsInSlice, int ntasks) {
	int pIndex = __mul24(blockIdx.x,blockDim.x) + threadIdx.x;
	if(pIndex < nPointsInSlice) {
		int classLabel;
		if(devResult[pIndex] > 0.0f)
			classLabel = 0;
		else
			classLabel = 1;
		devResult[pIndex] = __int_as_float(classLabel);
	}
}

template<> __global__ void argmax<false>(float* devResult, int devResultPitchInFloats, int nPointsInSlice, int ntasks) {
	int pIndex = __mul24(blockIdx.x,blockDim.x) + threadIdx.x;
	if(pIndex < nPointsInSlice) {
		float sum = __int_as_float(0xff000000);
		int classLabel;
		for(int i=0;i<ntasks;i++) {
			float result = devResult[i*devResultPitchInFloats+pIndex];
			if(result > sum) {
				sum = result;
				classLabel = i;
			}
		}
		devResult[pIndex] = __int_as_float(classLabel);
		//devResult[pIndex] = sum;
	}
}

//template<KernelType K> __global__ void computeK(float* devNorms, int devNormsPitchInFloats, int nSV, float coef0, float degree) {}

template<> __global__ void computeK<GAUSSIAN> (float* devNorms, int devNormsPitchInFloats, int nSV, float coef0, float degree) {
	unsigned int svIndex = threadIdx.x;
	unsigned int pointIndex = devNormsPitchInFloats*blockIdx.y;
	while (svIndex < nSV) {
		float norm = devNorms[pointIndex + svIndex];
		norm = __expf(norm);
		devNorms[pointIndex + svIndex] = norm;
		svIndex += blockDim.x;
	}
}

template<> __global__ void computeK<POLYNOMIAL> (float* devNorms, int devNormsPitchInFloats, int nSV, float coef0, float degree) {
	unsigned int svIndex = threadIdx.x;
	unsigned int pointIndex = devNormsPitchInFloats*blockIdx.y;
	if(svIndex < nSV) {
		float norm = devNorms[pointIndex + svIndex];
		norm = __powf(norm + coef0, degree);
		devNorms[pointIndex + svIndex] = norm;
		svIndex += blockDim.x;
	}
}

template<> __global__ void computeK<SIGMOID> (float* devNorms, int devNormsPitchInFloats, int nSV, float coef0, float degree) {
	unsigned int svIndex = threadIdx.x;
	unsigned int pointIndex = devNormsPitchInFloats*blockIdx.y;
	if(svIndex < nSV) {
		float norm = devNorms[pointIndex + svIndex];
		norm = tanhf(norm + coef0);
		devNorms[pointIndex + svIndex] = norm;
		svIndex += blockDim.x;
	}
}

void dummy() {
	float *a,*b,*c,*d;
	int A,B,C,D;
	a = b = c = d = NULL;
	A = B = C = D = 0;
	computeKernelsReduce<512><<<1, 1, 0>>>(a, A, b, B, C, c, D, d);
	computeKernelsReduce<256><<<1, 1, 0>>>(a, A, b, B, C, c, D, d);
	computeKernelsReduce<128><<<1, 1, 0>>>(a, A, b, B, C, c, D, d);
	computeKernelsReduce< 64><<<1, 1, 0>>>(a, A, b, B, C, c, D, d);
	computeKernelsReduce< 32><<<1, 1, 0>>>(a, A, b, B, C, c, D, d);
	computeKernelsReduce< 16><<<1, 1, 0>>>(a, A, b, B, C, c, D, d);
	computeKernelsReduce<  8><<<1, 1, 0>>>(a, A, b, B, C, c, D, d);
	computeKernelsReduce<  4><<<1, 1, 0>>>(a, A, b, B, C, c, D, d);
	computeKernelsReduce<  2><<<1, 1, 0>>>(a, A, b, B, C, c, D, d);
	computeKernelsReduce<  1><<<1, 1, 0>>>(a, A, b, B, C, c, D, d);
}