#include <math.h>
#include "GridSolver.h"
#include "MREAgent.h"
#include "Util.h"
#include <boost/thread/thread.hpp>



#ifdef _DEBUG
   #define new MYDEBUG_NEW
#endif



namespace GridSolver
{


//-----------------------------------------GridMDPForwardSolver methods --------------------------
GridMDPForwardSolver::GridMDPForwardSolver(MREAgent *a, TFGenerator *tf, RFGeneralizer *rf)
:GridMDPSolver(a,tf,rf)
{
}

Action GridMDPForwardSolver::getBestAction(Observation state)
{
	int DEPTH = 0; 


	if (rfGens->isTerminal(state))
		return agent->randomAction(); 


	vector<Action> results; 
	double bestResult = -9999999; 
	for(int i=0; i< agent->numberOfActions; i++)
	{
//		double qvalue = getState(state).actions[i].qvalue; 
		double qvalue = getQValue(state, i, DEPTH); 
		if (qvalue > bestResult)
		{
			bestResult = qvalue; 
			results.clear(); 
			results.push_back(i); 
		}else if (qvalue == bestResult)
		{
			bestResult = qvalue; 
			results.push_back(i); 
		}
	}
	
	int randInd = 0 ; 
	if (results.size()>1) 
	{
		intRand(results.size()); 
		if (randInd >= results.size())	randInd = results.size() -1 ; 
	}

//	if (results[randInd] != getState(state).policy && results.size()==1)
//		printf("how can it be possible %lf -> %lf \n", getState(state).actions[results[0]].qvalue, bestResult); 

	return results[randInd]; 
}

//make sure you don't call this function on terminated state unless you uncomment the first two lines (removed for performance)
double GridMDPForwardSolver::getQValue(Observation state, Action a, int horizon)
{
//	if (rfGens->isTerminal(state))
//		return 0; 
	if (horizon<=0)
	{
		return getState(state).actions[a].qvalue; 
	}

	State& gholi = getState(state); 

	double qvalue = -1; 
	Observation next = tfGens->predict(state, a); 		
	if (!next)	//unknown, use max value
		qvalue = ParamReader::rmax  / (1 - ParamReader::GAMMA) ; 
	else		//use bellman backup to get it
	{
		double nextValue = getStateValue(next, horizon-1); 
		double reward = rfGens->predict(0,0 , state); 
		qvalue = reward + ParamReader::GAMMA * nextValue; 
		delete next; 
	}
	return qvalue; 
}

double GridMDPForwardSolver::getStateValue(const Observation state, int horizon)
{
	double value = -1; 
	if (rfGens->isTerminal(state))
		value= rfGens->predict(0,0,state); 
	else 
	{
		if (horizon > 1) //warning:
		{
			value = -9999999; 
			for(int i=0; i< agent->numberOfActions; i++)
			{
				double qvalue = getQValue(state, i, horizon); 
				if (qvalue > value)
					value = qvalue; 
			}
		}else    //use the gridsolver results to do the prediction
		{
			return getState(state).value; 
		}
	}


	return value; 
}
//-----------------------------------------------------------------------------------------------


GridMDPSolver::GridMDPSolver(MREAgent*a, TFGenerator* tf, RFGeneralizer* rf)
:SlaveableMDPSolver(a,tf,rf)
{
	NUMBER_OF_SLAVES = 2; 
	setSubplotDimension(1, a->numberOfActions); 
	init(); 
}





GridMDPSolver::~GridMDPSolver()
{
	if (states)
		delete[] states; 

}


void GridMDPSolver::init()
{
	State::actionsNumber = agent->numberOfActions ; 
	StateIndex::dimension = agent->dimension ; 
	taskSpec =&( agent->taskSpec); 
	//initialize discretize values 
	if (ParamReader::GRD_DISCRETIZED_SIZE > 0)
        for(int i=0; i < agent->dimension; i++)
			discretized_size[i] = ParamReader::GRD_DISCRETIZED_SIZE;
	int size = 1; 
	for(int i=0; i< agent->dimension; i++)
	{
		size *= discretized_size[i]; 
		State::fictituousState.index[i] = discretized_size[i]-1; //fill in the fictituous state index (this is after all legal states)
		State::sinkState.index[i] = discretized_size[i] - 1; //same for sink state
	}
	State::fictituousState.index[agent->dimension -1] ++; //this will make it go beyond
	State::sinkState.index[agent->dimension -1] +=2; //this state is after fictituous state
	State::totalNumberOfStates = size; 
	states = new State[size+2];		//+2 is for fictituous and sink states
	// set the value of imaginary states
	states[size].value = 0; 
	states[size+1].value = (-ParamReader::rmax /(1-ParamReader::GAMMA)); //heaven state ; //sink state
}
State& GridMDPSolver::getState(Observation o )
{
	StateIndex si = observation2stateindex(o); 
	return getStateEX(si); 
}//
State& GridMDPSolver::getStateEX(const StateIndex& si)
{
	return states[stateIndex2directIndex(si)]; 
}
double GridMDPSolver::getStateValue(StateIndex& si)
{
	State& st = getStateEX(si); 
	return st.value ; 
}//
void GridMDPSolver::updatePolicyForState(StateIndex& si)
{
	long i = stateIndex2directIndex(si);
	State& s = states[i];
	double maxAction=-9999998; 
	int maxActionInd = 0; 
	vector<int> maxActionInds; 
	for(int e=0; e < State::actionsNumber; e++)	
	{
		//we should compute the value of each action by taking its reward plus the value of the states we can reach by this action
		double val ; 
		StateAction& sa = s.actions[e]; 
/*		if (s.actions[e].totalVisits >= ParamReader::RMAX_KNOWN_THRESHOLD)
		{
			StateAction& sa = states[i].actions[e];
			sa.makeKnown(); 
		}
*/
		val = sa.reward;  
		for(probMap::iterator it = sa.transProbs.begin(); it != sa.transProbs.end(); it++)
		{
			StateIndex ss = (*it).first;
			val += ParamReader::GAMMA*(*it).second.first* getStateValue(ss);
		}
		sa.qvalue = val; 
		if (val > maxAction)
		{
			maxAction = val;
			maxActionInd = e; 
			maxActionInds.clear(); 
			maxActionInds.push_back(e); 
		}
		else if (val == maxAction)
			maxActionInds.push_back(e); 
	}
	states[i].value = maxAction; 
	if (maxActionInds.size()> 1)
	{	
		unsigned int rind = intRand(maxActionInds.size()); 
		if (rind == maxActionInds.size())
			rind--; 
		states[i].policy  = maxActionInds[rind]; 
	}else 
		states[i].policy  = maxActionInd; 
}


Action GridMDPSolver::getBestAction(Observation state)
{
	if (rfGens->isTerminal(state))
		return agent->randomAction(); 
	return getState(state).policy; 
}


void GridMDPSolver::operator ()()
{
	running = true; 
	while (running)	//this will run until stop() is called
	{
		printf("+++++++++++++++++++++START resolving model with %d number of slaves...\n", NUMBER_OF_SLAVES); 
		solveModel(agent->lastState);
	}
}

void GridMDPSolver::solveModel(Observation currentObservation)
{

/*	StateIndex currentState = observation2stateindex(currentObservation); 
	
	long ins =0; 
	//---------fill in unknown state/actions with our generalizer. ---------------
	//we do it in a wise way. do it for near states first (it helps if we have timeout)
	{
		bool*  updateFlags = new bool [State::totalNumberOfStates]; 
		memset(updateFlags, 0, sizeof(bool)*State::totalNumberOfStates ); 
		list<StateIndex> queue; 
		queue.push_back(currentState); 
	#ifndef OS_WINDOWS
		measureTimeStart();
	#endif
		
		long total=0; 
		while(! queue.empty())
		{
	#ifndef	OS_WINDOWS
			if(measureTimeEnd() > ParamReader::AGNT_RUN_TIMEOUT)
			{
				printf("timeout generalization %lf\n", measureTimeEnd());
				break;
			}
	#endif
			//update the first state
			StateIndex si = queue.front(); 
			long di = stateIndex2directIndex(si); 
			updateFlags[di] = true; 
	for(int e=0; e < State::actionsNumber; e++)	
			{
				StateAction& sa = states[di].actions[e];
//				if (sa.totalVisits < ParamReader::RMAX_KNOWN_THRESHOLD )
				doGeneralization(sa,e,di); 
			}
		
			//put its neighbors (it only works upto 5 dimensions) depend5:
			StateIndex tmp = si; 
			for(int a =-1; a<=1; a++)
			{
				tmp.index[0] = si.index[0] + a; 
				for(int b=-1; b<=1; b++)
				{
					if (StateIndex::dimension == 1)
					{
						addToQueue(queue, updateFlags,  tmp); 
						break; 
					}
					tmp.index[1] = si.index[1] + b; 
					for(int c=-1; c<=1; c++)
					{
						if (StateIndex::dimension == 2)
						{
							addToQueue(queue,  updateFlags, tmp); 
							break; 
						}
						tmp.index[2] = si.index[2] + c; 
						for(int d=-1; d<=1; d++)
						{
							if (StateIndex::dimension == 3)
							{
								addToQueue(queue, updateFlags,  tmp); 
								break; 
							}
							tmp.index[3] = si.index[3] + d; 
							for(int f=-1; f<=1; f++)
							{
								if (StateIndex::dimension == 4)
								{
									addToQueue(queue, updateFlags,  tmp); 
									break; 
								}
								tmp.index[4] = si.index[4] + f; 
								addToQueue(queue,  updateFlags, tmp); 
							}
						}
					}
				}
			}//put neighbors 
			queue.pop_front(); //remove from queue
			ins++; 
		}
		delete updateFlags ; 
	}
//---------------------------------------------------------------
	printf("total insertion was %ld\n", ins); 
*/

	for(long i=0; i< State::totalNumberOfStates; i++)
	{
		for(int j=0; j< State::actionsNumber; j++)
			doGeneralization(states[i].actions[j],j,i); 
	}
	doVI(); 
	draw(); 
}


void GridSolverSlave::doVI()
{
	while(parent->isVIRunning)
	{
		long i = intRand(State::totalNumberOfStates); 

		double maxAction=-999999; 
		int maxActionInd = 0; 
		vector<int> maxActionInds; 
		for(int e=0; e < State::actionsNumber; e++)	
		{
			//we should compute the value of each action by taking its reward plus the value of the states we can reach by this action
			double val ; 
			State& s = parent->states[i]; 
			StateAction& sa = s.actions[e]; 
			val = sa.reward;  
			for(probMap::iterator it = sa.transProbs.begin(); it != sa.transProbs.end(); it++)
			{
				StateIndex ss = (*it).first;
				val +=  (*it).second.first* (ParamReader::GAMMA*parent->states[parent->stateIndex2directIndex(ss)].value) ;
			}
			sa.qvalue = val; 
			if (val > maxAction)
			{
				maxAction = val;
				maxActionInd = e; 
				maxActionInds.clear(); 
				maxActionInds.push_back(e); 
			}
			else if (val == maxAction)
				maxActionInds.push_back(e); 
		}
			
		//mutex in here because we are changing the value and policy of the state 
		//(maybe we dont care much, because it's VI and one miss update is not important. it's not worth the overhead)
		parent->states[i].value  = maxAction; 
		if (maxActionInds.size()> 1)
		{	
			unsigned int rind = intRand(maxActionInds.size()); 
			if (rind == maxActionInds.size())
				rind--;

			parent->states[i].policy  = maxActionInds[rind]; 
		}else 
			parent->states[i].policy  = maxActionInd; 

		//mutex out here
	}
}



void GridMDPSolver::doVI()
{
	long unknown = 0;	//stat: number of unknown states 
	int horizon = 100; 

	isVIRunning = true; 
#ifdef USE_THREADS
    boost::thread_group thrds;
	if (NUMBER_OF_SLAVES>0)
	{
		for(int i=0; i< NUMBER_OF_SLAVES; i++)
	        thrds.create_thread(GridSolverSlave(this));
	}
#endif



	//this is the maximum change in value in one iteration (if it's close to 0, we can terminate VI)
	double diff = 9999999; 
#ifndef OS_WINDOWS
	double insertionTime = measureTimeEnd();	//the time it took to make an MDP
	measureTimeStart();
#endif
	int t=1; 
	for(t = 1; t <= horizon; t++)
	{
		if (t % 10 == 0)
		{
			printf("."); 
			fflush(stdout); 
		}
#ifndef	OS_WINDOWS
		if(measureTimeEnd() > ParamReader::AGNT_RUN_TIMEOUT)
		{
			printf("timeout %lf\n", measureTimeEnd());
			isVIRunning = false; 
			break;
		}
#endif
		if (diff < ParamReader::PLN_VI_EPSILON ) //terminate iteration if values dont change very much
		{
			isVIRunning = false; 
			break;
		}
		diff = 0; 
		for(int i=0; i<State::totalNumberOfStates; i++)//for each state
		{
			
				double maxAction=-999999; 
				int maxActionInd = 0; 
				vector<int> maxActionInds; 
				for(int e=0; e < State::actionsNumber; e++)	
				{
				
					//we should compute the value of each action by taking its reward plus the value of the states we can reach by this action
					
					double val ; 
					State& s = states[i]; 
					StateAction& sa = s.actions[e]; 
					if (!sa.isKnown && t==1)
						unknown++; 
					val = sa.reward;  
//					val = 0; 
					for(probMap::iterator it = sa.transProbs.begin(); it != sa.transProbs.end(); it++)
					{
						StateIndex ss = (*it).first;
						val +=  (*it).second.first* (ParamReader::GAMMA*states[stateIndex2directIndex(ss)].value) ;
					}
					sa.qvalue = val; 
					if (val > maxAction)
					{
						maxAction = val;
						maxActionInd = e; 
						maxActionInds.clear(); 
						maxActionInds.push_back(e); 
					}
					else if (val == maxAction)
						maxActionInds.push_back(e); 
				}//for e

				if (diff < fabs(states[i].value  - maxAction)) 
					diff = fabs(states[i].value - maxAction); 
				states[i].value  = maxAction; 
				if (maxActionInds.size()> 1)
				{	
					unsigned int rind = intRand(maxActionInds.size()); 
					if (rind == maxActionInds.size())
						rind--; 
					states[i].policy  = maxActionInds[rind]; 
				}else 
					states[i].policy  = maxActionInd; 
			}
	}//for t
#ifndef OS_WINDOWS
	printf("t=%d [unknown: %ld] time:(%lf+%lf)=%lf\n",t,  unknown,insertionTime, measureTimeEnd(),  measureTimeEnd()+ insertionTime  ); 
#else
	printf("t=%d [unknown: %ld]\n",t,  unknown ); 
#endif
	//	printf("[unknown: %ld] confidence[%lf,%lf,%lf] \n", unknown, tfGens->min_confidence,tfGens->getAvgConfidence(), tfGens->max_confidence ); 
//	printf("solved... unknown: %d \n", unknown); 

	isVIRunning = false; 

}



//maps an index to our flat array index (depend5:)
int GridMDPSolver::stateIndex2directIndex(const StateIndex& si)
{
	int leap = 1; 
	for(int i=1 ; i < agent->dimension; i++)
		leap *= discretized_size[i]; 
	int index = si.index[0]*leap; 
	leap /= discretized_size[1]; 
	if (agent->dimension > 1)
	{
		index += si.index[1]*leap; 
	}
	if (agent->dimension > 2)
	{
		leap /= discretized_size[2]; 
		index += si.index[2]*leap; 
	}
	if (agent->dimension > 3)
	{
		leap /= discretized_size[3]; 
		index += si.index[3]*leap; 
	}
	if (agent->dimension > 4)
	{
		leap /= discretized_size[4]; 
		index += si.index[4]; 
	}
	return index; 
}

//this function makes some sample for this state using function appx. 
void GridMDPSolver::doGeneralization(StateAction& sa, int action,  int ind)
{
	if (! ParamReader::AGNT_USE_GENERALIZATION)
		return; 
	sa.transProbs.clear(); 
	countMap tmpCount; 
	
	int sample_size = 0; 
	double knownness=0; 
	for(int i=0; i< ParamReader::GRD_EST_SAMPLE_SIZE; i++)
	{
		Observation start = stateindex2observation_randomize(directindex2statindex(ind));
		double tmp; 
		//tmp = tfGens->getConfidence(start, action); 
		Observation end = tfGens->predict(start, action, tmp); 
		if (tmp > knownness)
			knownness = tmp; 
		//todo: maybe we should also consider confidence
		if (!end)
		{
			delete[] start; 
			continue; 
		}

		bringObservation2bound(end); 
		StateIndex endInd; 

		if (rfGens->isTerminal(end))
			endInd = State::sinkState; 
		else
			endInd = observation2stateindex(end); 


		double r = rfGens->predict(start, action, end); 

		delete[]  start; 
		delete[]  end; 

		sample_size ++; 
		StateAction::addToCountList(tmpCount, endInd,r); 
	}

	sa.buildTransitionProbs(tmpCount,sample_size, knownness); 
	sa.reward = sa.computeReward(); 
}

Observation GridMDPSolver::stateindex2observation_randomize(const StateIndex& ind)
{
	Observation result = new Observation_type[StateIndex::dimension]; 
	for(int i=0; i<StateIndex::dimension; i++)
	{
		double interval = (taskSpec->double_observations[i].max - taskSpec->double_observations[i].min) / (double)discretized_size[i];
		double intBegin = taskSpec->double_observations[i].min + ind.index[i]*interval; 
		result[i] = floatRand(intBegin, intBegin + interval); 
	}

	return result; 
}//

StateIndex GridMDPSolver::directindex2statindex(int ind)
{
	StateIndex result; 
	
	int div = 1; 
	for(int i=1; i< StateIndex::dimension; i++)
		div *= discretized_size[i]; 

	for(int i=0; i< StateIndex::dimension; i++)
	{
		result.index[i] = ind / div; 
		ind = ind % div; 
		if (div > 1)
			div /= discretized_size[i+1]; 
	}

	return result; 
}//


void GridMDPSolver::bringObservation2bound(Observation o)
{
	for(int i=0; i< StateIndex::dimension; i++)
		if (o[i]> taskSpec->double_observations[i].max)
			o[i] = taskSpec->double_observations[i].max - 0.001; 
		else if (o[i] < taskSpec->double_observations[i].min)
			o[i] = taskSpec->double_observations[i].min + 0.001; 
}

//depend5: 
StateIndex GridMDPSolver::observation2stateindex(Observation o)
{
	if (!o)
		return StateIndex(0,0,0,0,0); 

	bringObservation2bound(o); 

/*	for(int i=0; i< StateIndex::dimension; i++)
		if (o[i]> taskSpec->obs_maxs[i] || o[i] < taskSpec->obs_mins[i])
		{
			return State::sinkState ;
			printObservation(o); 
			printf("impossible -------------------------- out of bound observation\n"); 
	}
*/


	int aa = (int) ((o[0] - taskSpec->double_observations[0].min) / ((taskSpec->double_observations[0].max - taskSpec->double_observations[0].min)/discretized_size[0])); 
	if (aa == discretized_size[0])
		aa--; 

	int bb, cc, dd, ee; 
	bb = cc = dd = ee = 0; 

	if (agent->dimension > 1) 
		bb = (int) ((o[1] - taskSpec->double_observations[1].min) / ((taskSpec->double_observations[1].max - taskSpec->double_observations[1].min)/discretized_size[1])); 

	if (bb == discretized_size[1])
		bb--; 

	if (agent->dimension > 2) 
		cc = (int) ((o[2] - taskSpec->double_observations[2].min) / ((taskSpec->double_observations[2].max - taskSpec->double_observations[2].min)/discretized_size[2])); 

	if (cc ==  discretized_size[2])
		cc--; 

	if (agent->dimension > 3) 
		dd = (int) ((o[3] - taskSpec->double_observations[3].min) / ((taskSpec->double_observations[3].max - taskSpec->double_observations[3].min)/discretized_size[3])); 

	if (dd == discretized_size[3])
		dd--; 

	if (agent->dimension > 4) 
		ee = (int) ((o[4] - taskSpec->double_observations[4].min) / ((taskSpec->double_observations[4].max - taskSpec->double_observations[4].min)/discretized_size[4])); 
	
	if (ee == discretized_size[4])
		ee--; 

	return StateIndex(aa,bb,cc,dd,ee); 
}//

//this function is only used in solveModel
void GridMDPSolver::addToQueue(list<StateIndex>& queue, bool* flags, StateIndex& si)
{
	for(int i=0; i< StateIndex::dimension; i++)
	{
		if (si.index[i]<0 || si.index[i] >= discretized_size[i])
			return; 
	}

	long di = stateIndex2directIndex(si); 
	if (flags[di])
		return; 

	queue.push_back(si); 
	flags[di] = true;
}


void GridMDPSolver::print()
{
	for(int i=0; i< State::totalNumberOfStates ; i++)
	{
		StateIndex si = directindex2statindex(i); 
		State& s = states[i]; 

		si.print(); 
		s.print(); 
	}

}

double GridMDPSolver::getStateValue(const Observation o)
{
	StateIndex si = observation2stateindex(o);
	return	getStateValue(si); 
}



}
