#include "FittedVISolver.h"
#include "MREAgent.h"
#include "ParamReader.h"
#include <math.h>
#include <vector>

namespace planning
{

#define		NUMBER_OF_ITERATIONS	40		//param:


int FittedVISolver::K=5; 

#define TINY 0.000001			//used to keep off the boundaries

FittedVISolver::FittedVISolver(MREAgent* a, TFGenerator* tf, RFGeneralizer* rf)
:SlaveableMDPSolver(a,tf,rf) 
{
	NUMBER_OF_SLAVES = 3; 


	sampleSize = (int) pow( (float)ParamReader::FVI_DISCRETIZED_SIZE, a->dimension);

	samples = annAllocPts( sampleSize , a->dimension);   
	solvers.resize(sampleSize); 
	generateUniformSamples(samples, ParamReader::FVI_DISCRETIZED_SIZE ); 

	knnLearner = new ANNkd_tree(samples, sampleSize, a->dimension ); //create the KNN tree 
}

FittedVISolver::~FittedVISolver()
{
	annDeallocPts(samples); 

	for(int i=0; i< solvers.size(); i++)
		delete solvers[i]; 

	delete knnLearner; 
}


//make sure you don't call this function on terminated state unless you uncomment the first two lines (removed for performance)
double FittedVISolver::getQValue(const Observation state, Action a)
{
//	if (rfGens->isTerminal(state))
//		return 0; 

	BellmanBackuper* bb = createBackuper(state); 
	bb->update(); 
	return bb->getQValue(a); 

/*
	double qvalue = -1; 
	Observation next = tfGens->predict(state, a); 		
	if (!next)	//unknown, use max value
		qvalue = ParamReader::rmax  / (1 - ParamReader::GAMMA) ; 
	else		//use bellman backup to get it
	{
		double nextValue = getStateValue(next, horizon-1); 
		double reward = rfGens->predict(0,0 , state); 
		qvalue = reward + ParamReader::GAMMA * nextValue; 
		delete next; 
	}
	return qvalue; 
*/
}

Action FittedVISolver::getBestAction(Observation state)
{
	BellmanBackuper* bb = createBackuper(state);  
	bb->update(); 
	Action result =  bb->getBestAction();  
	delete bb; 
	return result; 

/*
	if (rfGens->isTerminal(state))
		return agent->randomAction(); 
	vector<Action> results; 
	double bestResult = -9999999; 
	for(int i=0; i< agent->numberOfActions; i++)
	{
		double qvalue = getQValue(state, i); 
		if (qvalue > bestResult)
		{
			bestResult = qvalue; 
			results.clear(); 
			results.push_back(i); 
		}else if (qvalue == bestResult)
		{
			bestResult = qvalue; 
			results.push_back(i); 
		}
	}
	
	int randInd = intRand(results.size()); 
	if (randInd >= results.size())	randInd = results.size() -1 ; 
	return results[randInd]; 
*/
}
double FittedVISolver::getStateValue(const Observation state)
{
	BellmanBackuper* bb = createBackuper(state); 
	bb->update(); 
	double result =  bb->getValue(); 
	delete bb; 
	return result; 

/*
	double value = -1; 
	if (rfGens->isTerminal(state))
		value = rfGens->predict(0,0,state); 
	else 
	{
		if (horizon > 0)
		{
			value = -9999999; 
			for(int i=0; i< agent->numberOfActions; i++)
			{
				double qvalue = getQValue(state, i, horizon); 
				if (qvalue > value)
					value = qvalue; 
			}
		}else if (! tree->predict(state, value))
			value = ParamReader::rmax  / (1 - ParamReader::GAMMA) ; 
	}

	return value; 
*/
}

void FittedVISolver::computeKernels(double* dists, double* kernels)
{
	double sigma2 = 0.16; 
	for(int i=0; i< K ; i++)
	{
		kernels[i] = expf(-dists[i]/ sigma2); 
	}
}


double FittedVISolver::getStateValueFast(const Observation state)
{
	double val = -999999999; 

	ANNidx* nnIdx= new ANNidx[K];									//this is a temporary place to put the indexes of the k nearest neighbors
	ANNdist* dists= new ANNdist[K];	
	//this is a temporary place to put the distances of the k nn
	try{
		//mutex in this part because ann structure is not multi-thread safe
		boost::mutex::scoped_lock *  lock = new boost::mutex::scoped_lock(m_mutex);
		knnLearner->annkSearch(state, K, nnIdx, dists, 0.05); 
		delete lock; 
	} catch(...)
	{
		delete[] nnIdx; 
		delete[] dists; 
		return 0; 
	} 
	double* kernels= new double[K]; 
	computeKernels(dists, kernels); 
	for(int i=0; i< action_number; i++)
	{
		double qv = 0; 
		double denom = 0; 
		for(int j=0; j< K; j++)
		{
			int ss = solvers.size(); 
			BellmanBackuper* bb = solvers[nnIdx[j]]; 
			qv += bb->getValue()*kernels[j]; 
			denom += kernels[j]; 
		}

		qv /= denom; 

		if (qv> val)
			val = qv; 
	}
	delete[] nnIdx; 
	delete[] dists; 
	delete[] kernels; 
	return val; 
}




void FittedVISolver::operator ()()
{
	while (true)
	{
		solveModel(agent->lastState); 	
	}
}


void FittedVISolver::solveModel(Observation currentState)
{
	double maxBellmanError = 1; 


	printf("beginning to solve FVI\n"); 


	isVIRunning = true; 
#ifdef USE_THREADS
    boost::thread_group thrds;
	if (NUMBER_OF_SLAVES>0)
	{
		for(int i=0; i< NUMBER_OF_SLAVES; i++)
	        thrds.create_thread(FittedVISolverSlave(this));
	}
#endif

	for(int i=0; i< NUMBER_OF_ITERATIONS; i++)
	{
		double err = 0; 
		for(int j=0; j< sampleSize; j++)
		{
			double tmp = solvers[j]->update(); 
			if (err<tmp)
				err = tmp; 
		}//j for each point in the corpus
		printf("max error in iteration %d is: %lf\n", i, err); 
		if (err < ParamReader::PLN_VI_EPSILON)
			break; 
	}//i iteration
	isVIRunning = false; 

		draw(); 

	/*
	printf("beginning to solve corpus of size %d\n", sampleSize); 
	for(int i=0; i< NUMBER_OF_ITERATIONS&& maxBellmanError > ParamReader::VI_EPSILON  ; i++)
	{
		maxBellmanError = 0; 
		for(int j=0; j< sampleSize; j++)
//		for(list<KData>::iterator it= samples.begin(); it != samples.end(); it++)	//for each point in the set
		{
			//update the state value 
			Observation st = samples[j]; 
			if ( rfGens->isTerminal( st))	//here we suppose the first time we put the values is the reward of this state, so we don't change it
				continue; 

			double newValue = -99999; 
			for(int i=0; i< agent->numberOfActions; i++)
			{
				double tmpValue = getQValue(st, i, ParamReader::MULTISTEP_BACKUP_HORIZON ); 
				if (tmpValue > newValue ) 
					newValue = tmpValue; 
			}
			newValue += rfGens->predict((*it).first, 0, (*it).first ); 
			double diff = fabs( (*it).second - newValue); 
			if (maxBellmanError < diff)
				maxBellmanError = diff; 
			(*it).second = newValue; 
		}
		//learn the trees
		tree->root->clearData(); 
		tree->root->addPointsToCurrentStructure(samples); 
		printf("solved round %d, max error is %lf\n", i, maxBellmanError); 
	}//each iteration
	
	//delete the observations
/*
	for(list<KData>::iterator it = l.begin(); it != l.end(); it++)
		delete (*it).first ; 
*/


}



bool  FittedVISolver::getNextItem(Observation prev, Observation next, int n)
{
	Observation result = MREAgent::copyObservation(prev); 

	int movingDirection = agent->dimension -1; 

	while(movingDirection >=0)
	{
		double moveSize = (agent->taskSpec.double_observations[movingDirection].max  - agent->taskSpec.double_observations[movingDirection].min) / (n-1);

		result[movingDirection] += moveSize ;
		
		//this is to make sure we form a nice grid around the borders and yet keep within the boundaries. for example if we want to have two points, we should have min+epsilon and max-epsilon 
		if (result[movingDirection] > agent->taskSpec.double_observations[movingDirection].max  && result[movingDirection] <= agent->taskSpec.double_observations[movingDirection].max + 1.1*TINY)
			result[movingDirection] = agent->taskSpec.double_observations[movingDirection].max - TINY; 

		if (result[movingDirection] >= agent->taskSpec.double_observations[movingDirection].max)
		{
			result[movingDirection] = agent->taskSpec.double_observations[movingDirection].min + TINY; 
			movingDirection --; 
		} else
			break; 
	}


	//warning: todo: i should find a better implementation that doesn't require result & next at the same time. 
	if (movingDirection <0)
	{
		delete[] result;		
		return false; 
	}

//	MREAgent::copyObservation(result, next); 
	memcpy(next, result, agent->dimension*sizeof(Observation_type)); 


	delete[] result; 
	return true; 
}


void FittedVISolver::generateUniformSamples(ANNpointArray& pts, int sampleSize)
{

	Observation first = pts[0]; 
	for(int i=0; i< agent->dimension; i++)
	{
		first[i] = agent->taskSpec.double_observations[i].min + TINY; 
	}

	int pos = 0; 
	bool flag = true; 
	while(flag)
	{
		flag = getNextItem(pts[pos], pts[pos+1], sampleSize); 
		solvers[pos] = createBackuper(pts[pos]);  
		pos++;
	}

	printf("created %d number of solvers\n", pos); 
}

BellmanBackuper* FittedVISolver::createBackuper(const Observation obs)
{
//	return new SimpleBellmanBackuper(this, obs); 
//	return new UCTBackuper(this, obs); 
	return new MonteCarloBackuper(this, obs); 
}

//------------------------------------------other classes -------------------------------


BellmanBackuper::BellmanBackuper(MDPSolver* p, const Observation st)
{
	parent= p;
	state = MREAgent::copyObservation(st);
}

void BellmanBackuper::setState(const Observation o)
{
	delete[] state; 
	state = MREAgent::copyObservation(o);
}

BellmanBackuper::~BellmanBackuper()
{
	delete[] state; 
}


SimpleBellmanBackuper::SimpleBellmanBackuper(MDPSolver* p, const Observation st)
:BellmanBackuper(p,st)
{
	qvalues = new double[parent->action_number]; 	
	memset(qvalues, 0, parent->action_number*sizeof(double)); 
}

SimpleBellmanBackuper::~SimpleBellmanBackuper()
{
	delete[] qvalues; 
}

double SimpleBellmanBackuper::update()
{
	double berr = 0; 
	if ( parent->rfGens->isTerminal( state))	//here we suppose the first time we put the values is the reward of this state, so we don't change it
		return 0; 

	double newValue = -99999; 
	for(int i=0; i< parent->action_number; i++)
	{
		Observation next = parent->tfGens->predict(state, i); 
		if (! next)
			continue; 

		double newq = parent->rfGens->predict(state, i, state) + ParamReader::GAMMA*parent->getStateValueFast(next); 
		if (fabs(newq - qvalues[i])> berr)
		{

			berr = fabs(newq - qvalues[i]); 
			if (berr > 1000)
				printf("wtf\n"); 
		}
		qvalues[i] = newq; 
		delete[] next; 
	}

	return berr; 
}


double SimpleBellmanBackuper::getQValue(Action a)
{
//	ASSERT(a>=0 && a< parent->action_number); 
	return qvalues[a]; 
}

double SimpleBellmanBackuper::getValue()
{
	double val = -999999999; //todo: put neg infinity

	for(int i=0; i< parent->action_number; i++)
		if (qvalues[i]> val)
			val = qvalues[i]; 
	return val; 
}

Action SimpleBellmanBackuper::getBestAction()
{
	int ba = 0; 

	for(int i=1; i< parent->action_number; i++)
		if (qvalues[i]> qvalues[ba])
			ba = i;  

	return ba; 
}



void FittedVISolverSlave::doVI()
{
	while(parent->isVIRunning)
	{
		long i = intRand(parent->sampleSize); 
		parent->solvers[i]->update(); 
	}
}

}//namespace