#include "DiscreteMDPSolver.h"
#include "MREAgent.h"
#include "ParamReader.h"
#include <vector>
#include <math.h>
#include <time.h>

namespace planning
{
namespace discreteSolver
{

double	DiscretizedState::VMAX;

DiscreteMDPSolver::DiscreteMDPSolver(MREAgent*a, TFGenerator* tf, RFGeneralizer* rf)
:SlaveableMDPSolver(a,tf,rf)
{
	NUMBER_OF_SLAVES = 3;
	states = 0;

	DiscretizedState::VMAX = ParamReader::rmax / ParamReader::GAMMA;

	constructStates();
}

DiscreteMDPSolver::~DiscreteMDPSolver(void)
{
	destroyStates();
}


Action DiscreteMDPSolver::getBestAction(Observation state)
{
	if (!state)
		return 0;

	long ind = observation2Index(state);
	return states[ind]->getBestAction();
}

void DiscreteMDPSolver::solveModel(Observation currentState)
{
	clearStateContents();			//clear the transition prob table
	constructTransitionFunction();
	doVI();
	draw();
}

double DiscreteMDPSolver::getStateValue(const Observation o)
{
	if (!o)
		return 0;

	long ind = observation2Index(o);
	return states[ind]->getValue();
}

void DiscreteMDPSolver::operator()()
{
}

void DiscreteMDPSolver::destroyStates()
{
	if (! states)
		return;

	for(int i=0; i< stateSize; i++)
		delete states[i];

	delete[] states;
}


void DiscreteMDPSolver::clearStateContents()
{
	if (!states)
		return;

	for(int i=0; i< stateSize; i++)
		for(int j=0; j< action_number; j++)
			states[i]->tfunc[j].clear();
}

void DiscreteMDPSolver::constructStates()
{
	//clear old states
	if (states)
		destroyStates();

	stateSize = powl(ParamReader::GRD_DISCRETIZED_SIZE, agent->dimension);

	states = new DiscretizedState* [stateSize+1];	//one is for sink state

	//create the sink state at the end
	states[stateSize] = new DiscretizedState(stateSize,this);
	for(int i=0; i< action_number; i++)
		states[stateSize]->qvals[i] = 0;
	states[stateSize]->value = 0;



	taskspec_t& tt = agent->taskSpec;
	double* mins = new double[agent->dimension];
	double* maxs = new double[agent->dimension];
	double* steps = new double[agent->dimension]; //how much is the length of each state along dimension i

	//initialize and set the value of steps
	for(int i=0; i< agent->dimension; i++)
	{
		steps[i] = (tt.double_observations[i].max - tt.double_observations[i].min)/ParamReader::GRD_DISCRETIZED_SIZE;
		mins[i] = agent->taskSpec.double_observations[i].min;
		maxs[i] = agent->taskSpec.double_observations[i].min + steps[i];
	}

	for(int i=0; i< stateSize; i++)
	{
		states[i] = new DiscretizedState(i,this);
		memcpy(states[i]->mins, mins, agent->dimension*sizeof(double));
		memcpy(states[i]->maxs, maxs, agent->dimension*sizeof(double));

		//goto the next cell
		int iind = 0;
		while(iind<agent->dimension)
		{
			mins[iind] = maxs[iind];
			maxs[iind] += steps[iind];
			//normal operation
			if (maxs[iind] <= tt.double_observations[iind].max + 0.0000001)
				break;

			//we have reached the end of this dimension
			mins[iind] = tt.double_observations[iind].min;
			maxs[iind] = mins[iind] + steps[iind];
			iind ++;
		}
	}

	delete[] mins;
	delete[] maxs;
	delete[] steps;

}

long  DiscreteMDPSolver::observation2Index(const Observation o)
{
	//removed for performance improvement
/*	if(!o)
		return 0;
*/
	long  ind = 0;
	long  base = 0;

	for(int i=0; i< agent->dimension; i++)
	{
		base = powf(ParamReader::GRD_DISCRETIZED_SIZE, i);
		double step = (agent->taskSpec.double_observations[i].max - agent->taskSpec.double_observations[i].min)/ ParamReader::GRD_DISCRETIZED_SIZE;
		int td = (int)((o[i] - agent->taskSpec.double_observations[i].min) / ( step) );
		//exception for the last point in which the max is inclusive instead of exclusive
		if (td == ParamReader::GRD_DISCRETIZED_SIZE)
			td--;

		ind += td*base;
	}
	return ind;
}


void DiscreteMDPSolver::constructTransitionFunction()
{

	//these define a range of states that each tf builder is responsible for
	long  minInd = 0;
	long  maxInd = 0;

	parentTFRunning = true;
	srand ( time(NULL) );


#ifdef USE_THREADS
    boost::thread_group thrds;
	for(int i=0; i< NUMBER_OF_SLAVES; i++)				//one for us and the rest for slaves
	{
		maxInd = minInd + ceil((double)( (stateSize+1)/(NUMBER_OF_SLAVES)));
		if (maxInd >= stateSize-1) maxInd = stateSize;
        thrds.create_thread(FiniteMDPTFBuilder(this,minInd,maxInd, true));
		minInd = maxInd;
	}
	thrds.join_all();
#else
	FiniteMDPTFBuilder tf(this, 0, stateSize,false);
	tf.buildTF();
#endif

//	FiniteMDPTFBuilder tf(this, 0, stateSize,false);
//	tf.buildTF();

	parentTFRunning = false;
}


void DiscreteMDPSolver::doVI()
{
	int horizon = 300;

	parentVIRunning = true;
#ifdef USE_THREADS
    boost::thread_group thrds;
	if (NUMBER_OF_SLAVES>0)
	{
		for(int i=0; i< NUMBER_OF_SLAVES; i++)
	        thrds.create_thread(FiniteMDPVI(this));
	}
#endif


	//this is the maximum change in value in one iteration (if it's close to 0, we can terminate VI)
	double diff = 9999999;
#ifndef OS_WINDOWS
	double insertionTime = measureTimeEnd();	//the time it took to make an MDP
	measureTimeStart();
#endif
	int t=1;
	for(t = 1; t <= horizon; t++)
	{
		if (t % 10 == 0)
		{
			printf(".");
			fflush(stdout);
		}
#ifndef	OS_WINDOWS
		if(measureTimeEnd() > ParamReader::AGNT_RUN_TIMEOUT)
		{
			printf("timeout %lf\n", measureTimeEnd());
			isVIRunning = false;
			break;
		}
#endif
		if (diff < ParamReader::PLN_VI_EPSILON ) //terminate iteration if values dont change very much
		{
			parentVIRunning = false;
			break;
		}
		diff = 0;
		for(int i=0; i<stateSize; i++)//for each state
		{
			double oldval = states[i]->getValue();
			states[i]->update();
			double newval = states[i]->getValue();

			if (diff < fabs(newval - oldval))
				diff = fabs(newval - oldval);
		}
	}//for t
#ifndef OS_WINDOWS
	printf("iterations=%d time:(%lf+%lf)=%lf\n",t,insertionTime, measureTimeEnd(),  measureTimeEnd()+ insertionTime  );
#else
	printf("iteration=%d \n",t);
#endif
	//	printf("[unknown: %ld] confidence[%lf,%lf,%lf] \n", unknown, tfGens->min_confidence,tfGens->getAvgConfidence(), tfGens->max_confidence );
//	printf("solved... unknown: %d \n", unknown);

	parentVIRunning = false;

#ifdef USE_THREADS
	thrds.join_all();
#endif 

}


/*
void DiscreteMDPSolver::doVI()
{
	long unknown = 0;	//stat: number of unknown states
	int horizon = 100;

	//this is the maximum change in value in one iteration (if it's close to 0, we can terminate VI)
	double diff = 9999999;
#ifndef OS_WINDOWS
	double insertionTime = measureTimeEnd();	//the time it took to make an MDP
	measureTimeStart();
#endif
	int t=1;
	for(t = 1; t <= horizon; t++)
	{
		if (t % 10 == 0)
		{
			printf(".");
			fflush(stdout);
		}
#ifndef	OS_WINDOWS
		if(measureTimeEnd() > ParamReader::AGNT_RUN_TIMEOUT)
		{
			printf("timeout %lf\n", measureTimeEnd());
			isVIRunning = false;
			break;
		}
#endif
		if (diff < ParamReader::PLN_VI_EPSILON ) //terminate iteration if values dont change very much
		{
			break;
		}
		diff = 0;
		for(int i=0; i< stateSize; i++)//for each state
		{

				double maxAction=-999999;
				int maxActionInd = 0;
				vector<int> maxActionInds;
				for(int e=0; e < action_number; e++)
				{
					//we should compute the value of each action by taking its reward plus the value of the states we can reach by this action

					double val ;
					DiscretizedState* s = states[i];

					val = 0;
					for(list<TransProb>::iterator it = s->tfunc[e].begin(); it != s->tfunc[e].end(); it++)
					{
						val += (*it).prob*( (*it).reward + ParamReader::GAMMA*(*it).nextState->getValue());
					}

					s->qvals[e] = val;
					if (val > maxAction)
					{
						maxAction = val;
						maxActionInd = e;
						maxActionInds.clear();
						maxActionInds.push_back(e);
					}
					else if (val == maxAction)
						maxActionInds.push_back(e);
				}//for e

				if (diff < fabs(states[i]->getValue()  - maxAction))
					diff = fabs(states[i]->getValue() - maxAction);
				states[i]->value  = maxAction;
			}
	}//for t
#ifndef OS_WINDOWS
	printf("t=%d [unknown: %ld] time:(%lf+%lf)=%lf\n",t,  unknown,insertionTime, measureTimeEnd(),  measureTimeEnd()+ insertionTime  );
#else
	printf("t=%d [unknown: %ld]\n",t,  unknown );
#endif
	//	printf("[unknown: %ld] confidence[%lf,%lf,%lf] \n", unknown, tfGens->min_confidence,tfGens->getAvgConfidence(), tfGens->max_confidence );
//	printf("solved... unknown: %d \n", unknown);

}
*/

//-----------------------------------TFBuilder ---------------------




void FiniteMDPTFBuilder::buildTF()
{
	Observation tos = new Observation_type[parent->agent->dimension];
	for(long  i=start; i< end; i++)	//for each state we are assigned to
	{
		//check slaving condition
		if (slave && !parent->parentTFRunning )
			break;


		double knownness = 0;
		for(int action=0; action < parent->action_number; action++)
		{
			map<long , pair<double,int> > cntMap;
			int totalSample = 0;

			for(int j=0; j< ParamReader::GRD_EST_SAMPLE_SIZE; j++)
			{
				double tk;
				parent->states[i]->makeRandomState(tos);
				Observation tend = parent->tfGens->predict(tos, action, tk);

				if (tk> knownness)
					knownness = tk;

				if (!tend)
					continue;

				parent->agent->bringObservation2bound(tend);
				double r = parent->rfGens->predict(tos,action, tos);


				long endInd  = parent->observation2Index(tend);

				//debug:
//				for(int k=0; k< parent->agent->dimension; k++)
//					if (parent->states[endInd]->mins[k] > tend[k]+0.00000001 ||
//						parent->states[endInd]->maxs[k] < tend[k]-0.000001)
//						printf("wtf baba jan\n");
				//endd


				if (parent->rfGens->isTerminal(tend))
					endInd = parent->stateSize;

				delete[] tend;

				//add this to the count list
				map<long, pair<double,int> >::iterator it = cntMap.find(endInd);
				if (it == cntMap.end())
				{
					cntMap[endInd] = ( pair<double,int>(r,1));
				}else
				{
					(*it).second.first  += r;		//add the reward
					(*it).second.second  ++;		//how many times
				}
				totalSample++;
			}//each sample

			//now build the transition probs out of the cntMap
			DiscretizedState* st = parent->states[i];
			st->knownness[action] = knownness;
			st->tfunc[action].clear();
			for(map<long,pair<double,int> >::iterator it = cntMap.begin(); it != cntMap.end(); it++)
			{
				TransProb tp ;
				tp.nextState = parent->states[(*it).first];
//				tp.nextIndex = (*it).first;
				tp.prob = (*it).second.second / (double)totalSample;
				tp.reward = (*it).second.first / (*it).second.second;

				st->tfunc[action].push_back(tp);
			}
		}//each action
	}//each state

	delete[] tos;
}




//------------------------------------DIscretizedState----------------

void DiscretizedState::print(FILE* f)
{
	fprintf(f, "\n%d: [%lf,%lf][%lf,%lf]\n", m_index, mins[0], maxs[0], mins[1],maxs[1]);

	for(int i=0; i< parent->action_number; i++)
	{
		fprintf(f,"\taction %d:\n", i);
		for(list<TransProb>::iterator it= tfunc[i].begin(); it != tfunc[i].end(); it++)
		{
			fprintf(f,"\t\t %ld, %lf, %lf \n",(*it).nextState->m_index, (*it).prob, (*it).reward);
		}
	}
}

double DiscretizedState::getQValue(int action)
{
	return knownness[action]*qvals[action] + (1-knownness[action])*VMAX;
}

DiscretizedState::DiscretizedState(long si, DiscreteMDPSolver* p)
{
	m_index = si;
	parent = p;

	qvals = new double[p->action_number];
	memset(qvals,0,p->action_number*sizeof(double));

	knownness = new double[p->action_number];
	memset(knownness,0,p->action_number*sizeof(double));

	tfunc = new list<TransProb> [p->action_number];

	value = 0 ;

	mins = new double[parent->agent->dimension];
	maxs = new double[parent->agent->dimension];
}

DiscretizedState::~DiscretizedState()
{
	if(qvals)
		delete[] qvals;

	if (knownness)
		delete[] knownness;

	delete[] tfunc;
	delete[] mins;
	delete[] maxs;

}

void DiscretizedState::makeRandomState(Observation o)
{
	for(int i=0; i< parent->agent->dimension; i++)
	{
		o[i] = floatRand(mins[i], maxs[i]);
//		o[i] = (maxs[i] + mins[i])/2.0;
	}
}

double DiscretizedState::getValue()
{
	return value;

/*	double bestVal = getQValue(0)];
	for(int i=1; i< parent->action_number; i++)
		if (getQValue(i)> bestVal)
			bestVal = getQValue(i);

	return bestVal;
*/
}

Action DiscretizedState::getBestAction()
{
	std::vector<int> bestInds;
	double bestVal = getQValue(0) ;

	bestInds.push_back(0);
	for(int i=1; i< parent->action_number; i++)
		if (getQValue(i) > bestVal)
		{
			bestInds.clear();
			bestInds.push_back(i);
			bestVal = getQValue(i) ;
		}else if (getQValue(i) == bestVal)
		{
			bestInds.push_back(i);
		}

	int finalInd = intRand(bestInds.size());
	if (finalInd  >= bestInds.size())
		finalInd = bestInds.size() -1 ;

	return bestInds[finalInd];
}

//do one backup
void DiscretizedState::update()
{
	double bestVal = -9999999;
	for(int i=0; i< parent->action_number; i++)
	{
		double val = 0;
		for(list<TransProb>::iterator it=tfunc[i].begin(); it != tfunc[i].end(); it++)
		{
			TransProb& tp = (*it);
			val += (*it).prob* ((*it).reward + ParamReader::GAMMA*( (*it).nextState->getValue()));
		}
		qvals[i] = val;

		val = getQValue(i);
		if(bestVal < val)
			bestVal = val;
	}

	value = bestVal;
}


//--------------------------------------FiniteMDPVI -------------------------------------


void FiniteMDPVI::doVI()
{

	while(parent->parentVIRunning)
	{
		long i = intRand(parent->stateSize);
		parent->states[i]->update();
	}

}

}//namespace discreteSolver
}//planning
