#include "KNNGeneralizer.h"

#include "MREAgent.h"
using namespace std; 

//debugging info: for detecting memory leaks
#include "Util.h"
#ifdef _DEBUG
   #define new MYDEBUG_NEW
#endif



#define USE_METRIC_LEARNING

KNNGeneralizer::~KNNGeneralizer(void)
{
	resetTrees(); 

	delete[] nnIdx; 
	delete[] dists; 
	delete[] dataSize; 

	if (otf)
		delete otf; 
}


KNNGeneralizer::KNNGeneralizer(MREAgent *p, int an, int od, taskspec_t &spec)
:learners(an,od), dataPoints(an,od), targets(an,od), reducedDimensions(an,od), TFGenerator(p,an,od,spec)
, MatlabDrawer("KNNGeneralizer")
{
	K = 7;		//using 3 neighbors
	epsilon = 0.05; 
	sigma2 = 0.5*0.5; 


	otf = new OriginalTFGenerator(p,an,od,spec); 



	dataSize = new int[action_number]; 
	dists = new ANNdist[K]; 
	nnIdx = new ANNidx[K]; 
	memset(dataSize, 0, action_number*sizeof(int)); 

	for(int i=0; i< action_number; i++)
	{
		for(int j=0; j< od; j++)	//set the parameters of each learner(each output)
		{
			learners(i,j) = 0; 
			dataPoints(i,j) = 0; 
			targets(i,j) = 0;
			reducedDimensions(i,j) = obs_dim; 
		}
	}
}


void KNNGeneralizer::batchLearn(std::list<Transition>& history)
{
	resetTrees(); 
	updatePoints(history); 

	transformData(); 

	//now we can learn. 
	for(int i=0; i< action_number; i++)
		for(int j=0; j< obs_dim ; j++)	//set the parameters of each learner(each output)
		{
			learners(i,j) = new ANNkd_tree(dataPoints(i,j), dataSize[i], reducedDimensions(i,j)); 
		}
}


void KNNGeneralizer::transformData()
{
	for(int i=0; i< action_number; i++)
		for(int j=0; j< obs_dim ; j++)	//for each learner
			for(int k=0; k< dataSize[i]; k++)	//for each obs
			{
				for(int x=0; x< obs_dim; x++)	//for each dim in obs
				{
					//rescale input 
					dataPoints(i,j)[k][x] = (dataPoints(i,j)[k][x] - m_agent->taskSpec.double_observations[x].min) / (m_agent->taskSpec.double_observations[x].max - m_agent->taskSpec.double_observations[x].min); 
				}
				//we also rescale outputs to make sure numerical problems don't occur (the step size 'alpha' in metric learning is fixed, 
				// if the output is too small or too large, we need to change the step size too). Note that we're regressing for j-th dimension here.
				targets(i,j)[k] /= (m_agent->taskSpec.double_observations[j].max - m_agent->taskSpec.double_observations[j].min); 
			}

#ifdef USE_METRIC_LEARNING
	for(int i=0; i< action_number; i++)
		for(int j=0; j< obs_dim ; j++)	
		{
			//we have one KNN regression problem here to deal with: 
			double** x = dataPoints(i,j); 
			double*  y = targets(i,j); 

			//create variable x (note that since matlab stores column-wise and we have row-wise here, and it's more
			//efficient to transpose the matrix in matlab we send data as if it is obs_dim*dataSize while it's the
			//other way around. it is crucial that we transpose data in matlab before we do anything. 
			mxArray* xm = mxCreateDoubleMatrix(obs_dim, dataSize[i] , mxREAL); 
			double* tmpPtr = (double*) mxGetPr(xm); 
			memcpy(tmpPtr, x[0], obs_dim*dataSize[i]*sizeof(double)); 
			int res = engPutVariable(ep, "x", xm); 

			mxArray* ym = mxCreateDoubleMatrix(dataSize[i] , 1,  mxREAL); 
			 tmpPtr = (double*) mxGetPr(ym); 
			memcpy(tmpPtr, y, dataSize[i]*sizeof(double)); 
			res = engPutVariable(ep, "y", ym); 


			//transpose x
			engEvalString(ep,"x = x';");
	
			//run the dimension reduction and transpose again 
			char* tmpStr = new char[120]; 
			sprintf(tmpStr, "[xptmp, A{%d}{%d} , pc{%d}{%d}] = pcaRegression(x,y,0.80);xptmp=xptmp';xp{%d}{%d}=xptmp;", i+1,j+1,i+1,j+1,i+1,j+1); //matlab index starts at 1 
			engEvalString(ep, tmpStr);
			delete tmpStr; 


			//just for debugging: backup x and y in matlab
			tmpStr = new char[80]; 
			sprintf(tmpStr, "xbackup{%d}{%d} = x; ybackup{%d}{%d} = y; ", i+1,j+1, i+1,j+1); 
			engEvalString(ep, tmpStr); 
			delete tmpStr; 

			engEvalString(ep, "dim = size(xptmp,1);"); 
			mxArray* dimm = engGetVariable(ep, "dim"); 
			mxArray* xpm = engGetVariable(ep, "xptmp");

			double dim = 0; 
			memcpy(&dim, mxGetPr(dimm), sizeof(double)); 
			reducedDimensions(i,j) = (int) dim; 


			//reallocate data using new dimension
			if (dataPoints(i,j))
				annDeallocPts(dataPoints(i,j)); 
			dataPoints(i,j) = annAllocPts(dataSize[i], reducedDimensions(i,j)); 


			//copy data
			memcpy(dataPoints(i,j)[0], mxGetPr(xpm), dataSize[i]*reducedDimensions(i,j)*sizeof(double)); 
		}
#endif

}


void KNNGeneralizer::learn(const Transition* t)
{
	//we don't do online learning
}


//this is the local version that predicts only one output dimension
bool KNNGeneralizer::predict(const Observation st, Action a, int dim, int k,  Observation_type& result, double& distance)
{
	if (! learners(a,dim))
		return false; 


	Observation newst = MREAgent::copyObservation(st); 
	for(int x=0; x< obs_dim; x++)	//for each dim in obs
	{
		newst[x] = (newst[x] - m_agent->taskSpec.double_observations[x].min) / (m_agent->taskSpec.double_observations[x].max - m_agent->taskSpec.double_observations[x].min); 
	}

#ifdef USE_METRIC_LEARNING
	
	mxArray* xm = mxCreateDoubleMatrix(obs_dim, 1 , mxREAL); 
	double* tmpPtr = (double*) mxGetPr(xm); 
	memcpy(tmpPtr, newst , obs_dim*sizeof(double));

	int res = engPutVariable(ep, "tmpObs", xm); 
	char* tmpStr = new char[60]; 
	sprintf(tmpStr, "tmpObs = ( A{%d}{%d}*tmpObs)'*pc{%d}{%d};", a+1, dim+1, a+1, dim+1); 
	engEvalString(ep, tmpStr);

	mxArray* xpm = engGetVariable(ep, "tmpObs");

	memcpy(newst, mxGetPr(xpm), reducedDimensions(a,dim)*sizeof(double)); 
	delete[] tmpStr; 
#endif

	try{
		learners(a,dim)->annkSearch(newst, k, nnIdx, dists, epsilon); 
	} catch(...)
	{
		delete[] newst; 
		return false; 
	}

	delete[] newst; 

	double* kernels= new double[k]; 
	computeKernels(dists, kernels); 
	
	result = 0; 
	double denom = 0; 
	for(int i=0; i< k ; i++)
	{
		result += targets(a,dim)[nnIdx[i]]*kernels[i]; 
		denom += kernels[i]; 
	}

	distance = denom; 
	result /= denom;
	delete[] kernels; 



	//the output was scaled to [0,1] before. rescaling back now
	result *= (m_agent->taskSpec.double_observations[dim].max - m_agent->taskSpec.double_observations[dim].min); 

	return true; 
}

void KNNGeneralizer::computeKernels(double* dists, double* kernels)
{
	for(int i=0; i< K ; i++)
	{
		kernels[i] = expf(-dists[i])/ sigma2; 
	}
}




Observation KNNGeneralizer::predict(Observation st, Action a)
{

	Observation result = new Observation_type[obs_dim]; 

	for(int i=0; i< obs_dim; i++)
	{
		double tmpVal; 
		if (!predict(st, a, i, K, result[i],tmpVal ))
		{
			delete[] result; 
			return 0; 
		}

		if (LEARN_DIFFERENCES)
			result[i] += st[i]; 
	}

//	Observation tmp = otf->predict(st, a); 
//	printf("prediction is wrong [%lf,%lf] -> [%lf,%lf]\n", tmp[0], tmp[1], result[0], result[1]); 



	return result;
//	return tmp; 
}


double KNNGeneralizer::getConfidence(Observation st, Action a)
{
	return 1.0; //warning: 
}


void KNNGeneralizer::resetTrees()
{
	for(int i=0; i< action_number; i++)
		for(int j=0; j< obs_dim ; j++)	//set the parameters of each learner(each output)
		{
			if (learners(i,j))
			{
				delete learners(i,j); 
				learners(i,j) = 0; 
			}

			if (dataPoints(i,j))
				annDeallocPts(dataPoints(i,j)); 

			if (targets(i,j))
			{
				delete[]  targets(i,j); 
				targets(i,j) = 0; 
			}
		}
}


void KNNGeneralizer::updatePoints(std::list<Transition>& l)
{
	memset(dataSize, 0, action_number*sizeof(int)); 

	bool* use = new bool[l.size()]; 
	eliminateBYShuffle(use, l.size(), 200); 


	//compute how many data for each action, so we can allocate space for them
	int i=0; 
	for(list<Transition>::iterator it=l.begin(); it != l.end(); it++, i++)
		if (use[i])
			dataSize[(*it).action]++; 

	//allocate space for data
	for(int i=0; i< action_number; i++)
		for(int j=0; j< obs_dim ; j++)	//set the parameters of each learner(each output)
		{
			if (dataSize[i]>0)
			{
				dataPoints(i,j) = annAllocPts(dataSize[i], obs_dim); 
				targets(i,j) = new Observation_type[dataSize[i]]; 
			}
		}


	int* pos = new int[action_number]; 
	memset(pos, 0, action_number*sizeof(int)); 

	int ctr = 0; 
	i = 0; 
	for(list<Transition>::iterator it=l.begin(); it != l.end(); it++, i++)
	{
		if(! use[i])
			continue; 

		 ctr++; 

		int action = (*it).action; 
		Observation obs = (*it).start; 
		//put each point in the corresponding places
		for(int j=0; j< obs_dim; j++)
		{
			double* tmp = dataPoints(action,j)[pos[action]]; 
			memcpy(tmp, obs, obs_dim*sizeof(Observation_type)); 

			double target = (*it).end[j]; 
			if (LEARN_DIFFERENCES)
				target -= (*it).start[j]; 

			targets(action, j)[pos[action]] = target; 
		}
		pos[action]++; 
	}

	delete[] pos; 
	delete[] use; 
}


void KNNGeneralizer::draw()
{
	const int resolution = 30; 
	MatlabMatrix<double>& vals = createMeshedDataStructure(resolution, m_agent->dimension,  m_agent); 

	int cntr = 0; 
	Observation tmp = new Observation_type[m_agent->dimension]; 
	for (int i=0; i< resolution; i++)
	{
		for(int j=0; j< resolution; j++, cntr++)
		{
			tmp[0] = vals(cntr,0); 
			tmp[1] = vals(cntr,1); 

			for(int x=0; x< obs_dim; x++)
			{	
				double dists; 
				double tmpVal; 
				predict(tmp, 0, x, 7, tmpVal, dists); 
				vals(cntr,2+x) = dists; 
			}
		}
	}

	MatlabDrawer::drawMeshedValues(vals, "2D"); 

	delete[] tmp; 
	delete (&vals); 
}