#include "KNNGeneralizer.h"
#include "MahalanobisMetricLearner.h"
#include "MREAgent.h"
#include <math.h>
using namespace std; 

//debugging info: for detecting memory leaks
#include "Util.h"
#ifdef _DEBUG
   #define new MYDEBUG_NEW
#endif


//#define USE_METRIC_LEARNING


//these two variables are used to skip metric learning in some of the batchlearning times
int alaki = 0; 
int SKIP = 100; 

KNNGeneralizer::~KNNGeneralizer(void)
{
#ifdef USE_METRIC_LEARNING
	delete metricLearner; 
#endif

	resetTrees(); 
/*
	delete[] nnIdx; 
	delete[] dists; 
	delete[] dataSize; 
*/
	if (otf)
		delete otf; 
}


KNNGeneralizer::KNNGeneralizer(MREAgent *p, int an, int od, taskspec_t &spec)
:learners(an,od), dataPoints(an,od), targets(an,od), reducedDimensions(an,od), TFGenerator(p,an,od,spec)
, MatlabDrawer("KNNGeneralizer")
{
	DRAW_OPTION = 2; //how to draw ourselves. look at definitions of drawOption for more info 


	K = 5;		
	epsilon = 0.05; 
	sigma2 = 0.4*0.4; 

	switch(DRAW_OPTION)
	{
	case 1:
		//first row draws knownness, second row draws points before metric learning, third row draws transformed points. 
		//each column is for one dimension. action is fixed 
		nRow = 3; 
		nCol = od; 
		break;
	case 2:
		nRow = action_number+2; 
		nCol = od; 
		break;
	}


	otf = new OriginalTFGenerator(p,an,od,spec); 

#ifdef USE_METRIC_LEARNING
	metricLearner = new MahalanobisMetricLearner(this); 
#endif

	dataSize = new int[action_number]; 
	dists = new ANNdist[K]; 
	nnIdx = new ANNidx[K]; 
	memset(dataSize, 0, action_number*sizeof(int)); 

	for(int i=0; i< action_number; i++)
	{
		for(int j=0; j< od; j++)	//set the parameters of each learner(each output)
		{
			learners(i,j) = 0; 
			dataPoints(i,j) = 0; 
			targets(i,j) = 0;
			reducedDimensions(i,j) = obs_dim; 
		}
	}
}

void KNNGeneralizer::resetTrees()
{
	for(int i=0; i< action_number; i++)
		for(int j=0; j< obs_dim ; j++)	//set the parameters of each learner(each output)
		{
			if (learners(i,j))
			{
				delete learners(i,j); 
				learners(i,j) = 0; 
			}

			if (dataPoints(i,j))
				annDeallocPts(dataPoints(i,j)); 

			if (targets(i,j))
			{
				delete[]  targets(i,j); 
				targets(i,j) = 0; 
			}
		}
}

void KNNGeneralizer::updatePointsIndividual(std::list<Transition>& l, bool* use,  ANNpointArray& data, Observation_type* & targets,  int action, int dim)
{
	int i = 0; 
	int pos = 0; 
	for(list<Transition>::iterator it=l.begin(); it != l.end(); it++, i++)
	{
		if(! use[i])
			continue; 

		int oAction = (*it).action; 

		if (oAction != action)
			continue; 

		Observation obs = (*it).start; 
		double* tmp = data[pos]; 
		memcpy(tmp, obs, obs_dim*sizeof(Observation_type)); 

		double target = (*it).end[dim]; 
		if (LEARN_DIFFERENCES)
			target -= (*it).start[dim]; 

		targets[pos] = target; 
		pos++; 
	}
}


void KNNGeneralizer::updatePoints(std::list<Transition>& l)
{
	memset(dataSize, 0, action_number*sizeof(int)); 

	bool* use = new bool[l.size()]; 
	eliminateBYShuffle(use, l.size(), 200); 


	//compute how many data for each action, so we can allocate space for them
	int i=0; 
	for(list<Transition>::iterator it=l.begin(); it != l.end(); it++, i++)
		if (use[i])
			dataSize[(*it).action]++; 

	//allocate space for data
	for(int i=0; i< action_number; i++)
		for(int j=0; j< obs_dim ; j++)	//set the parameters of each learner(each output)
		{
			if (dataSize[i]>0)
			{
				dataPoints(i,j) = annAllocPts(dataSize[i], obs_dim); 
				targets(i,j) = new Observation_type[dataSize[i]]; 
			}
		}

	int* pos = new int[action_number]; 
	memset(pos, 0, action_number*sizeof(int)); 

	int ctr = 0; 
	i = 0; 
	for(list<Transition>::iterator it=l.begin(); it != l.end(); it++, i++)
	{
		if(! use[i])
			continue; 

		 ctr++; 

		int action = (*it).action; 
		Observation obs = (*it).start; 
		//put each point in the corresponding places
		for(int j=0; j< obs_dim; j++)
		{
			double* tmp = dataPoints(action,j)[pos[action]]; 
			memcpy(tmp, obs, obs_dim*sizeof(Observation_type)); 

			double target = (*it).end[j]; 
			if (LEARN_DIFFERENCES)
				target -= (*it).start[j]; 

			targets(action, j)[pos[action]] = target; 
		}
		pos[action]++; 
	}

	delete[] pos; 
	delete[] use; 
}


void KNNGeneralizer::batchLearn(std::list<Transition>& history)
{
/*	resetTrees(); 
	updatePoints(history); 

	transformData(); 
*/
	alaki++; 

	ANNpointArray tmpPoints; 
	Observation_type* tmpTargets; 

	//(1)compute new data size values into a temporary storage container. 
	int* tmpDataSize = new int[action_number]; 
	memset(tmpDataSize, 0, action_number*sizeof(int)); 

	bool* use = new bool[history.size()]; 

	//warning: the third argument is how many we select for learning. now we select everything 
	eliminateBYShuffle(use, history.size(), history.size()); 

	int i=0; 
	for(list<Transition>::iterator it=history.begin(); it != history.end(); it++, i++)
		if (use[i])
			tmpDataSize[(*it).action]++; 
	//----------- (1)

	for(int i=0; i< action_number; i++)
		for(int j=0; j< obs_dim ; j++)	//set the parameters of each learner(each output)
		{
			if (tmpDataSize[i]>0)
			{
				tmpPoints = annAllocPts(tmpDataSize[i], obs_dim); 
				tmpTargets = new Observation_type[tmpDataSize[i]]; 
			}
			updatePointsIndividual (history, use, tmpPoints, tmpTargets, i,j);  
			int tmpReducedDimension = transformData(tmpPoints,  tmpTargets,tmpDataSize[i], i, j); 


			ANNkd_tree* tmpLearner = new ANNkd_tree(tmpPoints, tmpDataSize[i], tmpReducedDimension); 

			//mutex in this part because we're transferring new data to our learners
			boost::mutex::scoped_lock *  lock = new boost::mutex::scoped_lock(m_mutex);
#ifdef USE_METRIC_LEARNING
			if (alaki % SKIP ==1)
				metricLearner->submitAPCChanges(i,j); 
#endif
			if (learners(i,j))
				delete learners(i,j); 
			learners(i,j) = tmpLearner; 
			dataSize[i] = tmpDataSize[i]; 

			reducedDimensions(i,j) = tmpReducedDimension; 

			if (dataPoints(i,j))
				annDeallocPts(dataPoints(i,j)); 
			dataPoints(i,j) = tmpPoints;

			if (targets(i,j))
			{
				delete[]  targets(i,j); 
			}
			targets(i,j) = tmpTargets; 
			delete lock; 
			//mutex out 
		}

	delete[] use; 
	delete[] tmpDataSize; 


	draw(); 
}

int KNNGeneralizer::transformData(ANNpointArray& data,  Observation_type*& targets, int size, int action, int dim)
{
	for(int k=0; k< size; k++)	//for each obs
	{
		for(int x=0; x< obs_dim; x++)	//for each dim in obs
		{
			//rescale input 
			data[k][x] = (data[k][x] - m_agent->taskSpec.double_observations[x].min) / (m_agent->taskSpec.double_observations[x].max - m_agent->taskSpec.double_observations[x].min); 
		}
		//we also rescale outputs to make sure numerical problems don't occur (the step size 'alpha' in metric learning is fixed, 
		// if the output is too small or too large, we need to change the step size too). Note that we're regressing for j-th dimension here.
		targets[k] /= (m_agent->taskSpec.double_observations[dim].max - m_agent->taskSpec.double_observations[dim].min); 
	}

#ifdef USE_METRIC_LEARNING
	if (alaki % SKIP ==1)
		return metricLearner->learnTransformation(data, targets, size, action, dim); 	
	else
	{
		metricLearner->transformInMatlab(data, size, action, dim); 
		return reducedDimensions(action,dim); 
	}
#else
	return obs_dim; 
#endif
}



void KNNGeneralizer::transformData()
{

	for(int i=0; i< action_number; i++)
		for(int j=0; j< obs_dim ; j++)	//for each learner
		{
			reducedDimensions(i,j) = transformData(dataPoints(i,j), targets(i,j), dataSize[i], i, j); 
#ifdef USE_METRIC_LEARNING
			metricLearner->submitAPCChanges(i,j); 
#endif
		}








/*
	for(int i=0; i< action_number; i++)
		for(int j=0; j< obs_dim ; j++)	//for each learner
			for(int k=0; k< dataSize[i]; k++)	//for each obs
			{
				for(int x=0; x< obs_dim; x++)	//for each dim in obs
				{
					//rescale input 
					dataPoints(i,j)[k][x] = (dataPoints(i,j)[k][x] - m_agent->taskSpec.double_observations[x].min) / (m_agent->taskSpec.double_observations[x].max - m_agent->taskSpec.double_observations[x].min); 
				}
				//we also rescale outputs to make sure numerical problems don't occur (the step size 'alpha' in metric learning is fixed, 
				// if the output is too small or too large, we need to change the step size too). Note that we're regressing for j-th dimension here.
				targets(i,j)[k] /= (m_agent->taskSpec.double_observations[j].max - m_agent->taskSpec.double_observations[j].min); 
			}

#ifdef USE_METRIC_LEARNING
	metricLearner->learnTransformation(); 		
#endif
*/
}


void KNNGeneralizer::learn(const Transition* t)
{
	//we don't do online learning
}


//this is the local version that predicts only one output dimension
double KNNGeneralizer::predict(const Observation st, Action a, int dim, int k,  Observation_type& result, double& confidence)
{
	if (! learners(a,dim))
		return 0; 


	Observation newst = MREAgent::copyObservation(st); 
	for(int x=0; x< obs_dim; x++)	//for each dim in obs
	{
		newst[x] = (newst[x] - m_agent->taskSpec.double_observations[x].min) / (m_agent->taskSpec.double_observations[x].max - m_agent->taskSpec.double_observations[x].min); 
	}

#ifdef USE_METRIC_LEARNING
	metricLearner->transform(newst, a, dim); 
#endif

	try{
		learners(a,dim)->annkSearch(newst, k, nnIdx, dists, epsilon); 
	} catch(...)
	{
		delete[] newst; 
		return false; 
	}

	delete[] newst; 

	double* kernels= new double[k]; 
	computeKernels(dists, kernels); 
	
	result = 0; 
	double denom = 0; 
	confidence = 0; 
	for(int i=0; i< k ; i++)
	{
		result += targets(a,dim)[nnIdx[i]]*kernels[i]; 
		denom += kernels[i]; 
		confidence += dists[i]; 
	}

	confidence = denom/K; 
	result /= denom;
	delete[] kernels; 


	//the output was scaled to [0,1] before. rescaling back now
	result *= (m_agent->taskSpec.double_observations[dim].max - m_agent->taskSpec.double_observations[dim].min); 

//	confidence = 1.0; //warning:

	return confidence; 
}

void KNNGeneralizer::computeKernels(double* dists, double* kernels)
{
	for(int i=0; i< K ; i++)
	{
		kernels[i] = expf(-dists[i]/ sigma2); 
	}
}

Observation KNNGeneralizer::predict(Observation st, Action a, double& confidence)
{
	boost::mutex::scoped_lock lock (m_mutex);

	Observation result = new Observation_type[obs_dim]; 

	confidence = 1; 
	for(int i=0; i< obs_dim; i++)
	{
		double tmpVal; 
		double tmpConf = predict(st, a, i, K, result[i],tmpVal ); 
		if (tmpConf==0)
		{
			confidence = tmpConf; 
			delete[] result; 
			return 0; 
		}else if ( tmpConf < confidence)
			confidence = tmpConf; 

		if (LEARN_DIFFERENCES)
			result[i] += st[i]; 
	}

//	confidence = 1.0; //warning:

//	confidence += (1-confidence)/2.0; 


	Observation tmp = otf->predict(st, a); //warning:
//	return tmp; //warning:
	//	printf("prediction is wrong [%lf,%lf] -> [%lf,%lf]\n", tmp[0], tmp[1], result[0], result[1]); 

	return result;
}


Observation KNNGeneralizer::predict(Observation st, Action a)
{
	double tmp; 
	return predict(st, a, tmp); 
}


double KNNGeneralizer::getConfidence(Observation st, Action a)
{
	return 0; //warning: 
}


/*
This option fixes an action. Then for that action it plots the knownness for each output dimension in the first row. 
The second row has all the points with the fixed action (it draws the first two dimension). this row has duplicate graphs
The third row has the projected points for each output dimension. We can look at how each output dimension has been reduced and 
what effects it has on the knownness. 
*/
void KNNGeneralizer::drawOption1()
{
	int action = 1; //we're fixing the action for now to avoid cluttered plots

	//draw knownness --------------------------------------
	const int resolution = 30; 
	MatlabMatrix<double>& vals = createMeshedDataStructure(resolution, m_agent->dimension,  m_agent); 

	int cntr = 0; 
	Observation tmp = new Observation_type[m_agent->dimension]; 

	for(int i=2; i< obs_dim; i++)		//set other dimension values to the middle (because we can only show 2 dimensions)
		tmp[i] = (m_agent->taskSpec.double_observations[i].max + m_agent->taskSpec.double_observations[i].min)/2.0; 

	for (int i=0; i< resolution; i++)
	{
		for(int j=0; j< resolution; j++, cntr++)
		{
			tmp[0] = vals(cntr,0); 
			tmp[1] = vals(cntr,1); 

			for(int x=0; x< obs_dim; x++)
			{	
				double dists; 
				double tmpVal; 
		
				//mutex in this part because we're transferring new data to our learners
				boost::mutex::scoped_lock *  lock = new boost::mutex::scoped_lock(m_mutex);
				predict(tmp, action, x, K, tmpVal, dists);		
				delete lock; 
				vals(cntr,2+x) = dists; 
			}
		}
	}

	MatlabDrawer::drawMeshedValues(vals, "2D",1); 
	delete[] tmp; 
	delete (&vals); 


	// draw points -----------------------------------
	drawScatteredPoints(action, obs_dim+1); 

	//-------let's set the labels so we know what we've drawn: 

	char* stmp = new char[200]; 
	//the first row is for knownness
	sprintf(stmp, "h=subplot(%d,%d,1); set(get(h,'YLabel'),'String','knownness');title('dim=1');", nRow, nCol); 
	engEvalString(getEngine(), stmp); 


	for(int i=2; i<=obs_dim; i++)
	{
		sprintf(stmp, "subplot(%d,%d,%d); title('dim=%d'); ", nRow, nCol, i, i); 
		engEvalString(getEngine(), stmp); 
	}

	//the second row is for raw data
	sprintf(stmp, "h=subplot(%d,%d,%d); set(get(h,'YLabel'),'String','Raw Data');", nRow, nCol, obs_dim+1); 
	engEvalString(getEngine(), stmp); 

#if defined(USE_METRIC_LEARNING)
	//the third row is for transformed data
	sprintf(stmp, "h=subplot(%d,%d,%d); set(get(h,'YLabel'),'String','Transformed');title('new dim=%d');", nRow, nCol, 2*obs_dim+1, reducedDimensions(action, 0)); 
	engEvalString(getEngine(), stmp); 


	for(int i=2; i<=obs_dim; i++)
	{
		sprintf(stmp, "subplot(%d,%d,%d); title('new dim=%d'); ", nRow, nCol,2*obs_dim+ i, reducedDimensions(action, i-1)); 
		engEvalString(getEngine(), stmp); 
	}

#endif
	delete[] stmp; 
}



/*
This option is to draw knownness for all action/output-dimensions.
*/

void KNNGeneralizer::drawOption2()
{

	const int resolution = 30; 
	MatlabMatrix<double>& vals = createMeshedDataStructure(resolution, m_agent->dimension,  m_agent); 
	Observation tmp = new Observation_type[m_agent->dimension]; 
	for(int a=0; a < action_number; a++)  //for each action
	{ 
		int cntr = 0; 
		for(int i=2; i< obs_dim; i++)		//set other dimension values to the middle (because we can only show 2 dimensions)
			tmp[i] = (m_agent->taskSpec.double_observations[i].max + m_agent->taskSpec.double_observations[i].min)/2.0; 

		for (int i=0; i< resolution; i++)
		{
			for(int j=0; j< resolution; j++, cntr++)
			{
				tmp[0] = vals(cntr,0); 
				tmp[1] = vals(cntr,1); 

				for(int x=0; x< obs_dim; x++)
				{	
					double dists; 
					double tmpVal; 
			
					//mutex in this part because we're transferring new data to our learners
					boost::mutex::scoped_lock *  lock = new boost::mutex::scoped_lock(m_mutex);
					predict(tmp, a, x, K, tmpVal, dists);		
					delete lock; 
					vals(cntr,2+x) = dists; 
				}
			}
		}
		MatlabDrawer::drawMeshedValues(vals, "2D",a*obs_dim +1); 
	}
	delete[] tmp; 
	delete (&vals); 


	//draw scattered point for 1 action only 
	int action = 0; 
	drawScatteredPoints(action, action_number*obs_dim+1); 


	//-------let's set the labels so we know what we've drawn: 

	char* stmp = new char[200]; 
	//mark the dimension over the first row 
	for(int i=1; i<=obs_dim; i++)
	{
		sprintf(stmp, "subplot(%d,%d,%d); title('dim=%d'); ", nRow, nCol, i, i); 
		engEvalString(getEngine(), stmp); 
	}

	for(int i=0; i< action_number; i++)
	{
		sprintf(stmp, "h=subplot(%d,%d,%d); set(get(h,'YLabel'),'String','action=%d')", nRow, nCol, i*obs_dim+1, i ); 
		engEvalString(getEngine(), stmp); 
	}

	//the next row is for raw data
	sprintf(stmp, "h=subplot(%d,%d,%d); title('Raw Data for fixed action %d');", nRow, nCol, action_number*obs_dim+1,action); 
	engEvalString(getEngine(), stmp); 

#if defined(USE_METRIC_LEARNING)
	//the next row is for transformed data
	sprintf(stmp, "h=subplot(%d,%d,%d); set(get(h,'YLabel'),'String','Transformed');title('new dim=%d');", nRow, nCol, (action_number+1)*obs_dim+1, reducedDimensions(action, 0)); 
	engEvalString(getEngine(), stmp); 


	for(int i=2; i<=obs_dim; i++)
	{
		sprintf(stmp, "subplot(%d,%d,%d); title('new dim=%d'); ", nRow, nCol,(action_number+1)*obs_dim+ i, reducedDimensions(action, i-1)); 
		engEvalString(getEngine(), stmp); 
	}

#endif
	delete[] stmp; 


}




void KNNGeneralizer::draw()
{
	switch(DRAW_OPTION)
	{
	case 1:
		drawOption1(); 
		break;
	case 2:
		drawOption2(); 
		break;
	}
}

//since the variables we want to draw are already in matlab, we use our own version of draw scattered points
void KNNGeneralizer::drawScatteredPoints(int action, int startPos)
{
#if defined(MATLAB_PLOT) 
	char* tmp = new char[200]; 
	for (int i=0; i< obs_dim; i++)
	{
#if defined(USE_METRIC_LEARNING) //all the variables are in matlab workspace
		//draw original points
		sprintf(tmp,"subplot(%d,%d,%d);", nRow, nCol, startPos+i); 
		engEvalString(getEngine(), tmp); 
		
		sprintf(tmp, "scatter(xbackup{1}{%d}(:,1), xbackup{1}{%d}(:,2),'.');",i+1, i+1);  
		engEvalString(getEngine(), tmp); 

		//draw transformed points
		sprintf(tmp,"subplot(%d,%d,%d);", nRow, nCol, startPos+obs_dim+i); 
		engEvalString(getEngine(), tmp); 
		
		if (reducedDimensions(action, i)<2)
			sprintf(tmp, "scatter(xp{1}{%d}(1,:), zeros(1,size(xp{1}{%d},2)),'+');", i+1,i+1); 
		else
			sprintf(tmp, "scatter(xp{1}{%d}(1,:), xp{1}{%d}(2,:),'.');",i+1, i+1);  
		engEvalString(getEngine(), tmp); 
#else  
/*
		//we're only drawing original points: 
		//create variable x (note that since matlab stores column-wise and we have row-wise here, and it's more
		//efficient to transpose the matrix in matlab we send data as if it is obs_dim*dataSize while it's the
		//other way around. it is crucial that we transpose data in matlab before we do anything. 
		mxArray* xm = mxCreateDoubleMatrix(obs_dim, dataSize[action] , mxREAL); 
		double* tmpPtr = (double*) mxGetPr(xm); 
		memcpy(tmpPtr, dataPoints(action,0)[0], obs_dim*dataSize[action]*sizeof(double)); 
		int res = engPutVariable(getEngine(), "x", xm); 

		//draw transformed points
		sprintf(tmp,"subplot(%d,%d,%d);", nRow, nCol, obs_dim+i+1); 
		engEvalString(getEngine(), tmp); 

		engEvalString(getEngine(), "scatter(x(1,:), x(2,:),'.');");
		engEvalString(getEngine(), "axis([0 1; 0 1]);");  
*/

		//pardon my horrible coding here :-s   it's either this, or i have to put the drawing implementation above here (the commented one)
		//oh, and i know about the memory leak gholi, lol
		MatlabMatrix<double>* gholi = new MatlabMatrix<double>(obs_dim, dataSize[action]); 
		gholi->storage = (double*) dataPoints(action,0)[0]; 
		MatlabDrawer::drawScatteredPoints(*gholi, obs_dim+i+1); 
#endif
	}
	delete[] tmp; 

#endif
}