#include "KNNSimpleGeneralizer.h"
#include "PLSMetricLearner.h"
#include "MREAgent.h"
#include <math.h>
using namespace std; 

//debugging info: for detecting memory leaks
#include "Util.h"
#ifdef _DEBUG
   #define new MYDEBUG_NEW
#endif


#define USE_METRIC_LEARNING


KNNSimpleGeneralizer::~KNNSimpleGeneralizer(void)
{
#ifdef USE_METRIC_LEARNING
	delete metricLearner; 
#endif

	resetTrees(); 
/*
	delete[] nnIdx; 
	delete[] dists; 
	delete[] dataSize; 
*/
	if (otf)
		delete otf; 
}


KNNSimpleGeneralizer::KNNSimpleGeneralizer(MREAgent *p, int an, int od, taskspec_t &spec)
:TFGenerator(p,an,od,spec)
, MatlabDrawer("KNNSimpleGeneralizer")
{
	DRAW_OPTION = 1; //how to draw ourselves. look at definitions of drawOption for more info 


	learners.resize(an); 
	dataPoints.resize(an); 
	targets.resize(an); 
	

	K = 5;		
	epsilon = 0.05; 
	sigma2 = 0.4*0.4; 

	switch(DRAW_OPTION)
	{
	case 1:
		//first row draws knownness, second row draws points before metric learning, third row draws transformed points. 
		//each column is for one action; 
		nRow = 3; 
		nCol = an; 
		break;
	case 2:
		nRow = action_number+2; 
		nCol = od; 
		break;
	}


	otf = new OriginalTFGenerator(p,an,od,spec); 

#ifdef USE_METRIC_LEARNING
	metricLearner = new PLSMetricLearner(this); 
#endif

	dataSize = new int[action_number]; 
	reducedDimensions = new int[action_number]; 
	dists = new ANNdist[K]; 
	nnIdx = new ANNidx[K]; 
	memset(dataSize, 0, action_number*sizeof(int)); 

	for(int i=0; i< action_number; i++)//set the parameters of each learner(each output)
	{
		learners[i] = 0; 
		dataPoints[i] = 0; 
		targets[i] = 0;
		reducedDimensions[i] = obs_dim; 
	}
}

void KNNSimpleGeneralizer::resetTrees()
{
	for(int i=0; i< action_number; i++)//set the parameters of each learner(each output)
	{
		if (learners[i])
		{
			delete learners[i]; 
			learners[i] = 0; 
		}

		if (dataPoints[i])
			annDeallocPts(dataPoints[i]); 

		if (targets[i])
			annDeallocPts(targets[i]); 
	}
}


void KNNSimpleGeneralizer::updatePoints(std::list<Transition>& l, bool* use,  ANNpointArray& data, ANNpointArray& targets,  int action)
{
	int i = 0; 
	int pos = 0; 
	for(list<Transition>::iterator it=l.begin(); it != l.end(); it++, i++)
	{
		if(! use[i])
			continue; 

		int oAction = (*it).action; 

		if (oAction != action)
			continue; 

		Observation obs = (*it).start; 
		double* tmp = data[pos]; 
		memcpy(tmp, obs, obs_dim*sizeof(Observation_type)); 

		Observation end = (*it).end ; 
		tmp = targets[pos]; 
		memcpy(tmp, end, obs_dim*sizeof(Observation_type)); 

		pos++; 
	}
}

/*
we need to make sure this process is thread safe. 
we have action_number learners to update. we should update them one at a time. 
to make sure we don't stop other parts of the agent while we do the updates, we learn them one at a 
time into a temp structure. Then when the update is done, we mutex in, copy the temp into the 
corresponding structure and mutex out. this way no other processes will be locked out by us. 
*/
void KNNSimpleGeneralizer::batchLearn(std::list<Transition>& history)
{
	ANNpointArray tmpPoints; 
	ANNpointArray tmpTargets; 

	//(1)compute new data size values into a temporary storage container. 
	int* tmpDataSize = new int[action_number]; 
	memset(tmpDataSize, 0, action_number*sizeof(int)); 

	bool* use = new bool[history.size()]; 

	//warning: the third argument is how many we select for learning. now we select everything 
	eliminateBYShuffle(use, history.size(), history.size()); 

	int i=0; 
	for(list<Transition>::iterator it=history.begin(); it != history.end(); it++, i++)
		if (use[i])
			tmpDataSize[(*it).action]++; 
	//----------- (1)

	for(int i=0; i< action_number; i++)			//for each learner
	{
		if (tmpDataSize[i]>0)
		{
			tmpPoints = annAllocPts(tmpDataSize[i], obs_dim); 
			tmpTargets = annAllocPts(tmpDataSize[i], obs_dim);  
		}

		updatePoints (history, use, tmpPoints, tmpTargets, i);  //copy data from history into tmpPoints and tmpTargets data structures
		int tmpReducedDimension = transformData(tmpPoints,  tmpTargets,tmpDataSize[i], i); //transform data in tmpPoints and tmpTargets (do dim reduction)

		ANNkd_tree* tmpLearner = new ANNkd_tree(tmpPoints, tmpDataSize[i], tmpReducedDimension); //create the KNN tree 

		//now we can update our old learner for action i with this new one we just built. 
		//mutex in this part because we're transferring new data to our learners
			boost::mutex::scoped_lock *  lock = new boost::mutex::scoped_lock(m_mutex);

#ifdef USE_METRIC_LEARNING
				metricLearner->submitParams(i);   //inform the metric learner that it's ok to update its weight vector (before this they were computed into a tmp structure)
#endif
			if (learners[i])
				delete learners[i]; 

			learners[i] = tmpLearner; 
			dataSize[i] = tmpDataSize[i]; 

			reducedDimensions[i] = tmpReducedDimension; 

			if (dataPoints[i])
				annDeallocPts(dataPoints[i]); 
			dataPoints[i] = tmpPoints;

			if (targets[i])
			{
				annDeallocPts(targets[i]);  
			}
			targets[i] = tmpTargets; 
			delete lock; 
			//mutex out 
		}

	delete[] use; 
	delete[] tmpDataSize; 


	draw(); 
}


int KNNSimpleGeneralizer::transformData(ANNpointArray& data,ANNpointArray& targets,  int size, int action)
{
	for(int k=0; k< size; k++)	//for each obs
	{
		for(int x=0; x< obs_dim; x++)	//for each dim in obs
		{
			//rescale input 
			data[k][x] = (data[k][x] - m_agent->taskSpec.double_observations[x].min) / (m_agent->taskSpec.double_observations[x].max - m_agent->taskSpec.double_observations[x].min); 

			//we also rescale outputs to make sure numerical problems don't occur (the step size 'alpha' in metric learning is fixed, 
			// if the output is too small or too large, we need to change the step size too). Note that we're regressing for j-th dimension here.
			if (LEARN_DIFFERENCES)
				targets[k][x] = (targets[k][x]-data[k][x])/(m_agent->taskSpec.double_observations[x].max - m_agent->taskSpec.double_observations[x].min); 
			else
				targets[k][x] = (targets[k][x] - m_agent->taskSpec.double_observations[x].min) / (m_agent->taskSpec.double_observations[x].max - m_agent->taskSpec.double_observations[x].min); 
		}
	}

#ifdef USE_METRIC_LEARNING
	if (true )
		return metricLearner->learnTransformation(data, targets, size, action); 	//learn the transformation and transform data and targets, return new reduced dim
	else
	{
		metricLearner->transformInMatlab(data, size, action);						//don't learn transformation. just transform data using old weight vector
		return reducedDimensions[action]; 
	}
#else
	return obs_dim; 
#endif
}


void KNNSimpleGeneralizer::learn(const Transition* t)
{
	//we don't do online learning
}

void KNNSimpleGeneralizer::computeKernels(double* dists, double* kernels)
{
	for(int i=0; i< K ; i++)
	{
		kernels[i] = expf(-dists[i]/ sigma2); 
	}
}


Observation KNNSimpleGeneralizer::predict(Observation st, Action a, double& confidence)
{
	confidence = 0; 

	if (! learners[a])
		return 0; 

	Observation newst = MREAgent::copyObservation(st); 
	for(int x=0; x< obs_dim; x++)	//for each dim in obs
	{
		newst[x] = (newst[x] - m_agent->taskSpec.double_observations[x].min) / (m_agent->taskSpec.double_observations[x].max - m_agent->taskSpec.double_observations[x].min); 
	}

#ifdef USE_METRIC_LEARNING
	metricLearner->transform(newst, a); 
#endif

	try{
		learners[a]->annkSearch(newst, K, nnIdx, dists, epsilon); 
	} catch(...)
	{
		delete[] newst; 
		return 0; 
	}

	delete[] newst; 

	double* kernels= new double[K]; 
	computeKernels(dists, kernels); 


	Observation result = new Observation_type[obs_dim]; 
	memset(result, 0, obs_dim*sizeof(Observation_type)); 

	double denom = 0; 
	for(int i=0; i< K ; i++)
	{
		for(int j=0; j< obs_dim; j++)
			result[j] += targets[a][nnIdx[i]][j]*kernels[i]; 
		denom += kernels[i]; 
		confidence += dists[i]; 
	}

	confidence = denom/K; 

	for(int j=0; j< obs_dim; j++)
	{
		result[j] /= denom;

		//the output was scaled to [0,1] before. rescaling back now
		//warning: this has to be changed if we're not doing difference learning
		if (LEARN_DIFFERENCES)
			result[j] = st[j]+ result[j]*(m_agent->taskSpec.double_observations[j].max - m_agent->taskSpec.double_observations[j].min); 
		else
			result[j]= m_agent->taskSpec.double_observations[j].min + result[j]*(m_agent->taskSpec.double_observations[j].max - m_agent->taskSpec.double_observations[j].min); 
	}

	delete[] kernels; 



//	confidence = 1.0; //warning:

	return result; 

}


Observation KNNSimpleGeneralizer::predict(Observation st, Action a)
{
	double tmp; 
	return predict(st, a, tmp); 
}


double KNNSimpleGeneralizer::getConfidence(Observation st, Action a)
{
	return 0; //warning: 
}


/*
Each action has a column. For each action it plots the knownness in the first row. 
The second row has all the points for that action (it draws the first two dimension). 
The third row has the projected points for each action. 
*/

void KNNSimpleGeneralizer::drawOption1()
{
	for(int a=0; a< action_number ; a++)
	{
		//draw knownness --------------------------------------
		const int resolution = 30; 
		MatlabMatrix<double>& vals = createMeshedDataStructure(resolution,1,  m_agent); 

		int cntr = 0; 
		Observation tmp = new Observation_type[m_agent->dimension]; 

		for(int i=2; i< obs_dim; i++)		//set other dimension values to the middle (because we can only show 2 dimensions)
			tmp[i] = (m_agent->taskSpec.double_observations[i].max + m_agent->taskSpec.double_observations[i].min)/2.0; 

		for (int i=0; i< resolution; i++)
		{
			for(int j=0; j< resolution; j++, cntr++)
			{
				tmp[0] = vals(cntr,0); 
				tmp[1] = vals(cntr,1); 

				double dists; 

				//mutex in this part because we're using the predict function 
				boost::mutex::scoped_lock *  lock = new boost::mutex::scoped_lock(m_mutex);
				predict(tmp, a, dists);		
				delete lock; 
				vals(cntr,2) = dists; 
			}
		}

		MatlabDrawer::drawMeshedValues(vals, "2D",a+1); 
		delete[] tmp; 
		delete (&vals); 


	
		//draw scattered points
		drawScatteredPoints(a); 

	}//for each action (a column)


	//-------let's set the labels so we know what we've drawn: 

	char* stmp = new char[200]; 
	//the first row is for knownness
	sprintf(stmp, "h=subplot(%d,%d,1); set(get(h,'YLabel'),'String','knownness');title('action=0');", nRow, nCol); 
	engEvalString(getEngine(), stmp); 


	for(int i=2; i<=action_number ; i++)
	{
		sprintf(stmp, "subplot(%d,%d,%d); title('action=%d'); ", nRow, nCol, i, i-1); 
		engEvalString(getEngine(), stmp); 
	}

	//the second row is for raw data
	sprintf(stmp, "h=subplot(%d,%d,%d); set(get(h,'YLabel'),'String','Raw Data');", nRow, nCol, action_number +1); 
	engEvalString(getEngine(), stmp); 

#if defined(USE_METRIC_LEARNING)
	//the third row is for transformed data
	sprintf(stmp, "h=subplot(%d,%d,%d); set(get(h,'YLabel'),'String','Transformed');title('new dim=%d');", nRow, nCol, 2*action_number +1, reducedDimensions[0]); 
	engEvalString(getEngine(), stmp); 


	for(int i=2; i<=action_number ; i++)
	{
		sprintf(stmp, "subplot(%d,%d,%d); title('new dim=%d'); ", nRow, nCol,2*action_number + i, reducedDimensions[i-1]); 
		engEvalString(getEngine(), stmp); 
	}

#endif
	delete[] stmp; 

}



/*
This option is to draw knownness for all action/output-dimensions.
*/

void KNNSimpleGeneralizer::drawOption2()
{
/*
	const int resolution = 30; 
	MatlabMatrix<double>& vals = createMeshedDataStructure(resolution, m_agent->dimension,  m_agent); 
	Observation tmp = new Observation_type[m_agent->dimension]; 
	for(int a=0; a < action_number; a++)  //for each action
	{ 
		int cntr = 0; 
		for(int i=2; i< obs_dim; i++)		//set other dimension values to the middle (because we can only show 2 dimensions)
			tmp[i] = (m_agent->taskSpec.double_observations[i].max + m_agent->taskSpec.double_observations[i].min)/2.0; 

		for (int i=0; i< resolution; i++)
		{
			for(int j=0; j< resolution; j++, cntr++)
			{
				tmp[0] = vals(cntr,0); 
				tmp[1] = vals(cntr,1); 

				for(int x=0; x< obs_dim; x++)
				{	
					double dists; 
					double tmpVal; 
			
					//mutex in this part because we're transferring new data to our learners
					boost::mutex::scoped_lock *  lock = new boost::mutex::scoped_lock(m_mutex);
					predict(tmp, a, x, K, tmpVal, dists);		
					delete lock; 
					vals(cntr,2+x) = dists; 
				}
			}
		}
		MatlabDrawer::drawMeshedValues(vals, "2D",a*obs_dim +1); 
	}
	delete[] tmp; 
	delete (&vals); 


	//draw scattered point for 1 action only 
	int action = 0; 
	drawScatteredPoints(action, action_number*obs_dim+1); 


	//-------let's set the labels so we know what we've drawn: 

	char* stmp = new char[200]; 
	//mark the dimension over the first row 
	for(int i=1; i<=obs_dim; i++)
	{
		sprintf(stmp, "subplot(%d,%d,%d); title('dim=%d'); ", nRow, nCol, i, i); 
		engEvalString(getEngine(), stmp); 
	}

	for(int i=0; i< action_number; i++)
	{
		sprintf(stmp, "h=subplot(%d,%d,%d); set(get(h,'YLabel'),'String','action=%d')", nRow, nCol, i*obs_dim+1, i ); 
		engEvalString(getEngine(), stmp); 
	}

	//the next row is for raw data
	sprintf(stmp, "h=subplot(%d,%d,%d); title('Raw Data for fixed action %d');", nRow, nCol, action_number*obs_dim+1,action); 
	engEvalString(getEngine(), stmp); 

#if defined(USE_METRIC_LEARNING)
	//the next row is for transformed data
	sprintf(stmp, "h=subplot(%d,%d,%d); set(get(h,'YLabel'),'String','Transformed');title('new dim=%d');", nRow, nCol, (action_number+1)*obs_dim+1, reducedDimensions(action, 0)); 
	engEvalString(getEngine(), stmp); 


	for(int i=2; i<=obs_dim; i++)
	{
		sprintf(stmp, "subplot(%d,%d,%d); title('new dim=%d'); ", nRow, nCol,(action_number+1)*obs_dim+ i, reducedDimensions(action, i-1)); 
		engEvalString(getEngine(), stmp); 
	}

#endif
	delete[] stmp; 
*/

}




void KNNSimpleGeneralizer::draw()
{
	switch(DRAW_OPTION)
	{
	case 1:
		drawOption1(); 
		break;
	case 2:
		drawOption2(); 
		break;
	}
}

//since the variables we want to draw are already in matlab, we use our own version of draw scattered points
void KNNSimpleGeneralizer::drawScatteredPoints(int action)
{
#if defined(MATLAB_PLOT) 
	char* tmp = new char[200]; 
	//draw original points
	sprintf(tmp,"subplot(%d,%d,%d);", nRow, nCol, 1*action_number + action + 1); //the second row 
	engEvalString(getEngine(), tmp); 
	
	sprintf(tmp, "scatter(xbackup{%d}(:,1), xbackup{%d}(:,2),'.');",action+1, action+1);  
	engEvalString(getEngine(), tmp); 


#if defined(USE_METRIC_LEARNING) //draw the transformed points in the third row 
	//draw transformed points
	sprintf(tmp,"subplot(%d,%d,%d);", nRow, nCol, 2*action_number + action + 1); //the third row 
	engEvalString(getEngine(), tmp); 
	
	if (reducedDimensions[action]<2)
		sprintf(tmp, "scatter(xs{%d}(1,:), zeros(1,size(xs{%d},2)),'+');", action+1,action+1); 
	else
		sprintf(tmp, "scatter(xs{%d}(1,:), xs{%d}(2,:),'.');",action+1, action+1);  
	engEvalString(getEngine(), tmp); 
	
#endif
	delete[] tmp; 

#endif

}