
#include "lwpr.h"
#include "LWPRLearner.h" 
#include "ParamReader.h"



#define	LEARN_DIFFERENCES	true		//param: indicates wheter we learn difference vectors or target states directly
#define	DEBUG_PRINT

#include "MREAgent.h"
using namespace MT; 

#ifdef	DEBUG_PRINT
FILE* learnGholi; 
#endif

LWPRLearner::~LWPRLearner()
{

#ifdef	DEBUG_PRINT
	fclose(learnGholi); 
#endif
}

LWPRLearner::LWPRLearner(MREAgent* m, int an, int od, taskspec_t& spec)
{
	m_agent = m; 
#ifdef	DEBUG_PRINT
	learnGholi = fopen("data/learnGholi.out", "w"); 
#endif
	ballMoveProbHelper = 0; 
	//confidence statistics stuff
	min_confidence = 100 ; 
	max_confidence = 0; 
	stable_avg_confidence =  avg_confidence = 0; 
	count_confidence = 0; 
	confidence_reset = true; 

	action_number = an; 
	obs_dim = od; 
	dirty_flag = true; 

	//warning: make priors
	memset(priors, 1, sizeof(bool)*MAX_DIMENSION*MAX_DIMENSION); 
	memset(priorCounts, obs_dim, sizeof(int)*MAX_DIMENSION); 

	for(int i=0; i< MAX_DIMENSION; i++)
	  priorCounts[i] = obs_dim;
	
//	  printf("priorCount[%d] = %d \n", i, priorCounts[i]);
	
	if(MREAgent::environment == BUMBLEBALL)
	{
		memset(priors, 0, sizeof(bool)*MAX_DIMENSION*MAX_DIMENSION); 

		//things that affect robot x  (x,y,orient)
		priors[0][0] = true; 
		priors[1][0] = true; 
		priors[2][0] = true; 
		priorCounts[0] = 3; 
		
		//things that affect robot y  (x,y,orient)
		priors[0][1] = true; 
		priors[1][1] = true; 
		priors[2][1] = true; 
		priorCounts[1] = 3; 
		
		priors[0][2] = priors[1][2] = true; //x,y affect orientation
		priorCounts[2] = 2; 


/*		//these are deprecated: we use DDD for the ball
		priors[3][3] = priors[4][3] = true; //ball affects ball
		priorCounts[3] = 2; 
		priors[3][4] = priors[4][4] = true; //ball affects ball
		priorCounts[4] = 2; 
*/
	}


	learners = new Lwpr* [an]; 
	for(int i=0; i< action_number; i++)
	{
		learners[i] = new Lwpr [od]; 
		
		for(int j=0; j< od; j++)	//set the parameters of each learner(each output)
		{
			//add norm  //todo: norm should be updated dynamically!
			learners[i][j].norm.clear(); 

			int p=0; 
			for(int k=0; k< od; k++)	//add 1 for each dimension
				if (priors[k][j])
				{
					double coeff = (spec.double_observations[k].max - spec.double_observations[k].min); 
					switch(k)
					{
					case 2:			//angle
						coeff = 1; 
					}

					learners[i][j].norm.insert(p++, coeff);
				}
		}
	}

	if (ParamReader::LOAD_MODEL)
	{
		char fname[20]={0};
		for(int i=0; i< action_number; i++)
			for(int j=0; j< od; j++)
			{
				sprintf(fname, "data/lwpr-%d-%d.out", i,j); 
				if (fileExists(fname))
					learners[i][j].load(fname); 
			}
	}
}


void LWPRLearner::save()
{
	if (ParamReader::SAVE_MODEL)
	{
		char fname[25]={0};
		for(int i=0; i< action_number; i++)
			for(int j=0; j< obs_dim; j++)
			{
				sprintf(fname, "data/lwpr-%d-%d.out", i,j); 
				learners[i][j].save(fname); 
			}
	}
}

void LWPRLearner::learn(const Transition* t) 
{
	doubleA end(1);			//we always predict to one of the output dimensions

//--------------------------------------------

	for(int i=0; i< 3; i++)//train each of the learners (one for each output dimension)
	{
		
		doubleA start(priorCounts[i]); 
		int j=0; 
		for(int k=0; k< obs_dim; k++)
			if (priors[k][i])
				if (k==2 && (t->action ==0 || t->action==2 ))
				{
					if (i==0)		//x is linearly dependent on the cos(angle)
						start(j++) =  cos(deg2rad(t->start[k]));  //warning:
					else if (i==1)	//y is linearly dependent on the sin(angle)
						start(j++) =  sin(deg2rad(t->start[k]));  //warning:
					else
						start(j++) =  t->start[k];  //cos(t->start[k]);  //warning:
				}
				else
					start(j++) = t->start[k]; 

		if (LEARN_DIFFERENCES) 
		{
			end(0) = t->end[i] - t->start[i]; //we're learning the differences
			if(i==2) //angle is circular
			{
				if ( end(0) < -180)
					end(0) += 180; 
				else if (end(0) > 180) 
					end(0) -= 360; 
			}
		}
		else
			end(0) = t->end[i]; 

#ifdef	DEBUG_PRINT
		//for testing: dumping to a file
		if (t->action == 0 && i== 0 )
		{
			fprintf(learnGholi, "%lf %lf %lf(%lf) %lf\n", start(0), start(1), start(2), t->start[2], end(0)); 
			fflush(learnGholi); 
		}
#endif
		learners[t->action][i].learn(start,end); 
	}

	if (MREAgent::environment == BUMBLEBALL)
	{
		learnBallMove(t); 
	}

}


double LWPRLearner::getConfidence(Observation st, Action a)
{
	double result = 100; 

	doubleA end(1); 
	for(int i=0; i< obs_dim; i++)
	{
		doubleA start(priorCounts[i]); 
		int j=0; 
		for(int k=0; k< obs_dim; k++)
			if (priors[k][i])
				start(j++) = st[k]; 

		double tmp = learners[a][i].confidence(start); 
		if (tmp < result)
			result = tmp; 
	}

	//update statistics 
	if (confidence_reset)
	{
		stable_avg_confidence = avg_confidence; 

		min_confidence = 100; 
		max_confidence = 0; 
		avg_confidence = 0; 
		count_confidence = 0; 
		confidence_reset = false; 
	}

	if (result > max_confidence) max_confidence = result; 
	if (result < min_confidence) min_confidence = result; 
	avg_confidence = (avg_confidence*count_confidence) + result; 
	avg_confidence /= ++count_confidence; 


	return result; 
}

void LWPRLearner::learnBallMove(const Transition * t)
{
#ifndef	USE_BALL
	return; 
#endif

	dirty_flag = true; 

	int xdiff = (int) (t->end[3] - t->start[3]); //3 is ball x
	int ydiff = (int) (t->end[4] - t->start[4]); //4 is ball y

		
	DDDPair pdiff (xdiff,ydiff);

	if (ballMoveCnt.find(pdiff) == ballMoveCnt.end())
		ballMoveCnt[pdiff] = 1; 
	else
		ballMoveCnt[pdiff]++; 
}

//end should be instantiated before calling this
void LWPRLearner::predictBallMove(const Observation st, Observation end)
{


#ifndef	USE_BALL
	return;
#endif

	end[3] = st[3]; 
	end[4] = st[4]; 


	//is ballMoveProb out of date? 
	if (dirty_flag)
	{
		//recompute ballMoveProb: 
		long totalTrans =0; 
		for(DDDCount::iterator it=ballMoveCnt.begin(); it!= ballMoveCnt.end(); it++)
			totalTrans += (*it).second; 

//		printf("recomputing ball model\n"); 

		ballMoveProb.clear();
		for(DDDCount::iterator it=ballMoveCnt.begin(); it!= ballMoveCnt.end(); it++)
		{
			ballMoveProb[(*it).first] = (*it).second / (double)totalTrans; 
//			printf("[%d,%d] [%lf]\n", (*it).first.first, (*it).first.second,(*it).second / (double)totalTrans); 
		}

		//now compute the array
		if (ballMoveProbHelper)
			delete[] ballMoveProbHelper; 

		ballMoveProbHelper = new DDDProb [ballMoveProb.size()+1]; 
		ballMoveProbHelper[0] = DDDProb ( DDDPair(0,0),0); 
		double inc = 0;
		
		int i=1;
		for(DDDProbMap::iterator it= ballMoveProb.begin(); it != ballMoveProb.end(); it++, i++)
		{
			inc += (*it).second; 
			ballMoveProbHelper[i] = DDDProb ( (*it).first, inc); 
		}
		dirty_flag = false; 
	}

	double prob = floatRand(1.0); 


	//use binary search on the array to access the corresponding move
	if (ballMoveProbHelper)
	{
		int indub = ballMoveProb.size(); 
		int indlb = 0;
		int ind; 
		bool found = false; 

		while(true)
		{
			ind = (indub + indlb)/2; 
			
			//is ind the correct one? 
			if ( ballMoveProbHelper[ind].second >= prob &&  ballMoveProbHelper[ind-1].second <= prob) 
				break;

			//should we go to the right ?
			if ( ballMoveProbHelper[ind].second < prob)
				indlb = ind; 
			else
				indub = ind; 
		}
	
		//ind has the right one
		end[3] = st[3] + ballMoveProbHelper[ind].first.first;
		end[4] = st[4] + ballMoveProbHelper[ind].first.second;


		//bring to bound
		if (end[3] < m_agent->taskSpec.double_observations[3].min)
			end[3] = m_agent->taskSpec.double_observations[3].min; 
		if (end[3] > m_agent->taskSpec.double_observations[3].max)
			end[3] = m_agent->taskSpec.double_observations[3].max; 

		if (end[4] < m_agent->taskSpec.double_observations[4].min)
			end[4] = m_agent->taskSpec.double_observations[4].min; 
		if (end[4] > m_agent->taskSpec.double_observations[4].max)
			end[4] = m_agent->taskSpec.double_observations[4].max; 


		return;
	}

/*
	double tmp=0; 

	for(DDDProbMap::iterator it= ballMoveProb.begin(); it != ballMoveProb.end(); it++)
	{
		tmp += (*it).second; 
		if (tmp > prob) 
		{
			end[3] = st[3] + (*it).first.first; 
			end[4] = st[4] + (*it).first.second; 
			return;
		}
	}
*/


}


Observation LWPRLearner::predict(Observation st, Action a)
{
	Observation result = new Observation_type[obs_dim]; 

	doubleA end(1); 

	for(int i=0; i< obs_dim; i++)
	{
		
		//we use DDD to predict ball move
		if (MREAgent::environment == BUMBLEBALL && i >=3)
			break;


		doubleA start(priorCounts[i]); 
		int j=0; 
		for(int k=0; k< obs_dim; k++)
			if (priors[k][i])
				if (k==2 && (a==0 || a==2))
				{
					if (i==0)
						start(j++) =  cos(deg2rad(st[k]));  //warning:
					else if (i==1)
						start(j++) =  sin(deg2rad(st[k]));  //warning:
					else
						start(j++) =  st[k];  //cos(t->start[k]);  //warning:
				}
				else
					start(j++) = st[k]; 

		learners[a][i].predict(start,end); 
		if (end.d0==0)
		{
			delete result; 
			return 0; 
		}
		if (LEARN_DIFFERENCES)
		{
			result[i] = end(0) + st[i];  
			if (i==2) 
			{
				if (result[i] < -180) 
					result[i] += 360; 
				else if (result[i] > 180)
					result[i] -= 360; 
			}
		}
		else
			result[i] = end(0); 
	}

	if (MREAgent::environment == BUMBLEBALL)
	{
		predictBallMove(st, result); 
	}

	return result; 
}

void LWPRLearner::batchLearn( list<Transition>& history)
{

	int BATCH_LEARN_STEPS = 3; //param:
	int size = history.size(); 

#ifdef OS_LINUX
	measureTimeStart();
#endif

	//creating permutation vector
	Transition** vec = new Transition* [size]; 

	int cntr = 0; 
	for(list<Transition>::iterator it = history.begin(); it != history.end(); it++, cntr++)
		vec[cntr] = & (*it); 


	for(int i=0; i< BATCH_LEARN_STEPS; i++)
	{

		//permute 
		for(int j=0; j< size-1; j++)
		{
			int sel = intRand(size-j); 
			if (sel >= size-j)	sel = size-j - 1; 
			
			//swap
			Transition* tt = vec[sel]; 
			vec[sel] = vec[size-j-1]; 
			vec[size-j-1] = tt; 
		}


#ifdef	OS_LINUX
		if(measureTimeEnd() > ParamReader::RUN_TIMEOUT)
		{
			printf("timeout batch learning %lf\n", measureTimeEnd());
			break;
		}
#endif

		for(int j=0; j< size; j++)
			learn(vec[j]); 
	}

	delete vec; 

}







