#include "FittedVISolver.h"
#include "MREAgent.h"
#include "KDTreeLearner.h"
#include "RFGeneralizer.h"
#include "ParamReader.h"
#include "KDTree.h"
#include <math.h>
#include <vector>
#define		NUMBER_OF_ITERATIONS	10		//param:

using namespace kdtree; 

#define TINY 0.000001			//used to keep off the boundaries

FittedVISolver::FittedVISolver(MREAgent* a, TFGenerator* tf, RFGeneralizer* rf)
:MDPSolver(a,tf,rf) 
{
	kdGens = (KDTreeLearner*) tf; 
	tree = new KDTree(); 
	taskspec_t& taskSpec = agent->taskSpec; 
	tree->setSplitCriterion(KDTree::SPLIT_USE_MAX_SAMPLES); //warning: 
//	tree->setSplitCriterion(KDTree::SPLIT_USE_MAX_ALLOWED_ERROR ); 
	tree->generalizerType = KDTree::GENERALIZER_USE_MEAN ; 
	tree->maxNumberOfSamples = 30; 
	tree->dimension = agent->dimension ;  
	tree->ranges = taskSpec.double_observations; 
	tree->knownMaxLength = 0.3 ;  //this is normalized (should be less than 1)
	tree->knownMinPoints = a->dimension+5; 
	tree->maxAllowedError = 0.1; //this is not used: only used when using error for splitting
	tree->setDimensionParameters(agent->dimension, taskSpec.double_observations); 

	generateUniformSamples(samples, ParamReader::NUMBER_OF_VI_SAMPLES ); 
}

FittedVISolver::~FittedVISolver()
{
	if (tree)
		delete tree; 

	clearSamples(); 
}


//make sure you don't call this function on terminated state unless you uncomment the first two lines (removed for performance)
double FittedVISolver::getQValue(Observation state, Action a, int horizon)
{
//	if (rfGens->isTerminal(state))
//		return 0; 

	double qvalue = -1; 
	Observation next = tfGens->predict(state, a); 		
	if (!next)	//unknown, use max value
		qvalue = ParamReader::rmax  / (1 - ParamReader::GAMMA) ; 
	else		//use bellman backup to get it
	{
		double nextValue = getStateValue(next, horizon-1); 
		qvalue = ParamReader::GAMMA * nextValue; 
		delete next; 
	}
	return qvalue; 
}
Action FittedVISolver::getBestAction(Observation state)
{
	if (rfGens->isTerminal(state))
		return agent->randomAction(); 
	vector<Action> results; 
	double bestResult = -9999999; 
	for(int i=0; i< agent->numberOfActions; i++)
	{
		double qvalue = getQValue(state, i); 
		if (qvalue > bestResult)
		{
			bestResult = qvalue; 
			results.clear(); 
			results.push_back(i); 
		}else if (qvalue == bestResult)
		{
			bestResult = qvalue; 
			results.push_back(i); 
		}
	}
	
	int randInd = intRand(results.size()); 
	if (randInd >= results.size())	randInd = results.size() -1 ; 
	return results[randInd]; 
}
double FittedVISolver::getStateValue(const Observation state, int horizon)
{
	double value = -1; 
	if (rfGens->isTerminal(state))
		value = rfGens->predict(0,0,state); 
	else 
	{
		if (horizon > 0)
		{
			value = -9999999; 
			for(int i=0; i< agent->numberOfActions; i++)
			{
				double qvalue = getQValue(state, i, horizon); 
				if (qvalue > value)
					value = qvalue; 
			}
		}else if (! tree->predict(state, value))
			value = ParamReader::rmax  / (1 - ParamReader::GAMMA) ; 
	}

	return value; 

}

void FittedVISolver::operator ()()
{
	while (true)
	{
		solveModel(agent->lastState); 	
	}
}


void FittedVISolver::solveModel(Observation currentState)
{
	//we totally ignore currentState in this model 
	//how many nodes do we have? trying to find out how many samples to get from each node(just a rough estimate)
/*
	int totalNodes = 0; 
	for(int i=0; i< agent->numberOfActions; i++)
		for(int j=0; j< agent->taskSpec.obs_dim ; j++)	//for each tree
			totalNodes += kdGens->learners[i][j].totalNodes;  

	int samplePerNode = ParamReader::NUMBER_OF_VI_SAMPLES / totalNodes  +1; 

	//generate the list of points(we should delete observations inside this list at the end of the method)
	list<KData> l; 
	for(int i=0; i< agent->numberOfActions; i++)
	{
		for(int j=0; j< agent->taskSpec.obs_dim ; j++)	//for each tree
		{
			list<KData> tmpList; 
			if (! kdGens->learners[i][j].root)
			{
				printf("not enough points\n"); 
				return ; 
			}
			kdGens->learners[i][j].root->createRandomPoints(tmpList, samplePerNode); 

			//fill in the rewards values 
			for(list<KData>::iterator it=tmpList.begin(); it != tmpList.end(); it++)
			{
				if (tree->root)	//use the current value function as the startup 
				{
					(*it).second = getStateValue((*it).first); 
					continue; 
				}

				(*it).second = rfGens->predict( (*it).first , i, (*it).first); 
			}

			//merge
			l.insert(l.end(), tmpList.begin(), tmpList.end()); 
		}//for each tree
	}
*/

	double maxBellmanError = 1; 
	clearSamples(); 
//	samples.clear(); 
	
	//try mixing uniform sampling with trajectory:
	long needed = 1; 
	for(int i=0; i< agent->dimension ; i++)
		needed *= ParamReader::NUMBER_OF_VI_SAMPLES; 

	int remains =  needed -  samples.size();
	int n = pow((double)remains,(double)1.0/agent->dimension); 
	printf("needed: %d\tavailable:%d\tn:%d\n", needed, samples.size(), n); 
	if (n>0)
		generateUniformSamples(samples, n); 


	tree->resetPoints(); 
	tree->addPoints(samples); 
	printf("beginning to solve corpus of size %d\n", samples.size()); 
	for(int i=0; i< NUMBER_OF_ITERATIONS&& maxBellmanError > ParamReader::VI_EPSILON  ; i++)
	{
		maxBellmanError = 0; 
		for(list<KData>::iterator it= samples.begin(); it != samples.end(); it++)	//for each point in the set
		{
			//update the state value 
			Observation st = (*it).first ; 
			if ( rfGens->isTerminal( st))	//here we suppose the first time we put the values is the reward of this state, so we don't change it
				continue; 
			double newValue = -99999; 
			for(int i=0; i< agent->numberOfActions; i++)
			{
				double tmpValue = getQValue(st, i, ParamReader::MULTISTEP_BACKUP_HORIZON ); 
				if (tmpValue > newValue ) 
					newValue = tmpValue; 
			}
			newValue += rfGens->predict((*it).first, 0, (*it).first ); 
			double diff = fabs( (*it).second - newValue); 
			if (maxBellmanError < diff)
				maxBellmanError = diff; 
			(*it).second = newValue; 
		}
		//learn the trees
		tree->root->clearData(); 
		tree->root->addPointsToCurrentStructure(samples); 
		printf("solved round %d, max error is %lf\n", i, maxBellmanError); 
	}//each iteration
	
	//delete the observations
/*
	for(list<KData>::iterator it = l.begin(); it != l.end(); it++)
		delete (*it).first ; 
*/

}


void FittedVISolver::clearSamples()
{
	for(list<KData>::iterator it = samples.begin(); it != samples.end(); it++)
	{
		Observation_type* obs = (*it).first; 
		if (obs)
			delete[] obs; 
	}

	samples.clear(); 

}

Observation FittedVISolver::getNextItem(Observation st, int n)
{
	Observation result = MREAgent::copyObservation(st); 

	int movingDirection = agent->dimension -1; 

	while(movingDirection >=0)
	{
		double moveSize = (agent->taskSpec.double_observations[movingDirection].max  - agent->taskSpec.double_observations[movingDirection].min) / (n-1);

		result[movingDirection] += moveSize ;
		
		//this is to make sure we form a nice grid around the borders and yet keep within the boundaries. for example if we want to have two points, we should have min+epsilon and max-epsilon 
		if (result[movingDirection] > agent->taskSpec.double_observations[movingDirection].max  && result[movingDirection] <= agent->taskSpec.double_observations[movingDirection].max + TINY)
			result[movingDirection] = agent->taskSpec.double_observations[movingDirection].max - TINY; 

		if (result[movingDirection] >= agent->taskSpec.double_observations[movingDirection].max)
		{
			result[movingDirection] = agent->taskSpec.double_observations[movingDirection].min + TINY; 
			movingDirection --; 
		} else
			break; 
	}

	if (movingDirection <0)
	{
		delete result; 
		result = 0; 
	}

	return result; 
}


void FittedVISolver::generateUniformSamples(list< std::pair<Observation,double> >& l, int sampleSize)
{

	Observation first = new Observation_type[agent->dimension]; 
	for(int i=0; i< agent->dimension; i++)
	{
		first[i] = agent->taskSpec.double_observations[i].min + TINY; 
	}

	while(first)
	{
		double val = 0; 

		val = getStateValue(first); 
//		val = rfGens->predict(first, 0, first); 
		l.push_back( KData(first, val)); 
		first = getNextItem(first, sampleSize); 
	}
}


//---------------------------------------------FittedVIMetric----------------------------


FittedVIMetric::FittedVIMetric(MREAgent* a, TFGenerator* tf, RFGeneralizer* rf)
:FittedVISolver(a,tf,rf) 
{
	

}









