#include "SarsaPlanner.h"
#include "MREAgent.h"
#include "ParamReader.h"

namespace planning
{

SarsaPlanner::SarsaPlanner(MREAgent* a, TFGenerator* tf, RFGeneralizer* rf)
:MDPSolver(a,tf,rf)
{
	RLgamma = 1.0; 
	minimum_trace =  0.01; 
	num_nonzero_traces  = 0; 

	ts= & (a->taskSpec) ;  //   (taskspec_t*)malloc(sizeof(taskspec_t));


/*	int decode_result = decode_taskspec( ts, _ts);
	if(decode_result!=0){
		printf("Could not decode task spec, code: %d for task spec: %s\n",decode_result,_ts);
		exit(1);
	}
*/

	//initialize some variables 
	NUM_ACTIONS = ts->int_actions[0].max - ts->int_actions[0].min + 1; 
	NUM_TILINGS = 14; 
	NUM_OBSERVATIONS = ts->num_double_observations; 

	//create containers
	QSA = new double [NUM_ACTIONS]; 
	weights = new double[MEMORY_SIZE]; 
	traces = new double [MEMORY_SIZE]; 
	nonzero_traces_inverse = new int[MEMORY_SIZE]; 
	activeFeatures = Allocate2DArray<int>(NUM_ACTIONS, NUM_TILINGS); 
	tileWidth = new double[NUM_OBSERVATIONS]; 

	for(int i=0; i< NUM_OBSERVATIONS; i++)
		tileWidth[i] = (ts->double_observations[i].max - ts->double_observations[i].min)/8.0; 


	memset(weights, 0, MEMORY_SIZE*sizeof(double)); //clear weights
	memset(traces, 0, MEMORY_SIZE*sizeof(double));	//clear all traces

	epsilon = 0.3;                    // probability of random action
	alpha = 0.2;                      // step size parameter
	lambda = 0.95;                    // trace-decay parameters	
}

SarsaPlanner::~SarsaPlanner(void)
{

	delete[] QSA; 
	delete[] weights; 
	Free2DArray(activeFeatures); 
}


void SarsaPlanner::operator ()()
{
/*	running = true; 
	while (running)	//this will run until stop() is called
	{
		printf("+++++++++++++++++++++START resolving model...\n"); 
		solveModel(agent->lastState);
	}
*/
}


Action SarsaPlanner::agent_start(Observation o , bool greedy)
{
    DecayTraces(0.0);                                           
    computeActiveFeatures(o);                                                     
    computeActionValues(); 
	
	selectEpsilonGreedyAction(o,oldAction, greedy);
                                     
	return oldAction;
}

Action SarsaPlanner::agent_step(double r, Observation o,bool greedy)
{
	updateTraces();
	
	computeActionValues(oldAction);    //compute value of Q[oldObservation][oldAction]
	double delta = r - QSA[oldAction];
	
	computeActiveFeatures(o);                                              
	computeActionValues();			  //new action values based on new observation											
	
	selectEpsilonGreedyAction(o,newAction,greedy);

	delta += RLgamma * QSA[newAction];
	
    updateWeights(delta);

	oldAction = newAction; 


	return newAction;
}

void SarsaPlanner::agent_end(double r)
{
	updateTraces();
		
	computeActionValues(oldAction);
	double delta = r - QSA[oldAction];
			
	updateWeights(delta);
	
	epsilon *= 0.99;
}

// HELPER FUNCTIONS ----------------------------------------------
 
void SarsaPlanner::selectEpsilonGreedyAction(Observation o, Action& action, bool greedy)
{
//select an action according to epsilon greedy policy 

	if(drand48() <= epsilon && !greedy) 
	{
		action = (int)(drand48() * (NUM_ACTIONS));
	}
	else
		action = argmax(QSA); 

	if (action >= NUM_ACTIONS)
		action = NUM_ACTIONS -1 ; 
}

void SarsaPlanner::updateTraces()
{
//At the beggining of every episode, decay traces, clear action traces and replace current trace

	DecayTraces(RLgamma*lambda);                              

	for (int a=0; a<NUM_ACTIONS; a++)                        
		if (a != oldAction)
			for (int j=0; j<NUM_TILINGS; j++) ClearTrace(activeFeatures[a][j]);
        for (int j=0; j<NUM_TILINGS; j++) SetTrace(activeFeatures[oldAction][j],1.0); // replace traces
}

void SarsaPlanner::updateWeights(double delta)
{
//Update weights with nonzero traces using td-error

	double temp = (alpha/NUM_TILINGS)*delta;
	for (int i=0; i<num_nonzero_traces; i++)                
	{ 
		int index = nonzero_traces[i];
		weights[index] += temp * traces[index];
	}  
}

void SarsaPlanner::computeActionValues() 
{
// Compute all the action values from current activeFeatures and weights

	for (int a=0; a<NUM_ACTIONS; a++) 
	{
		QSA[a] = 0;
		for (int j=0; j<NUM_TILINGS; j++) 
			QSA[a] += weights[activeFeatures[a][j]];
	}
}
		 
void SarsaPlanner::computeActionValues(Action a) 
{
// Compute a particular action value from current activeFeatures and weights

	QSA[a] = 0;
    for (int j=0; j<NUM_TILINGS; j++) 
		QSA[a] += weights[activeFeatures[a][j]];
}

void SarsaPlanner::computeActiveFeatures(Observation o)
{
// get set of active features for current observation. One for each action

	double* inputObservations = new double [NUM_OBSERVATIONS];

	for(int i=0; i< NUM_OBSERVATIONS; i++)
		inputObservations[i] = o[i] / tileWidth[i]; 

	for (int a=0; a<NUM_ACTIONS; a++)
		sarsa::tiles(&activeFeatures[a][0],NUM_TILINGS,MEMORY_SIZE, inputObservations,NUM_OBSERVATIONS,a);

	delete[] inputObservations; 
}

int SarsaPlanner::argmax(double* QSA)
{
// Returns index (action) of largest entry in QSA array, breaking ties randomly

	int best_action = 0;
    double best_value = QSA[0];
    int num_ties = 1;                    // actually the number of ties plus 1
    double value;
	
	for (int a=1; a<NUM_ACTIONS; a++) 
	{
		value = QSA[a];
        if (value >= best_value) 
            if (value > best_value)
			{
				best_value = value;
				best_action = a;
			}
            else 
			{
				num_ties++;
				if (0 == (int)(drand48()*num_ties))
				{
					best_value = value;
					best_action = a;
				}
			}
	}
    return best_action;
}

// ------------------------ Suttons Trace Code -----------------------------------------------------
void SarsaPlanner::SetTrace(int f, double new_trace_value)
// Set the trace for feature f to the given value, which must be positive
  { if (traces[f] >= minimum_trace) traces[f] = new_trace_value;         // trace already exists
    else { while (num_nonzero_traces >= MAX_NONZERO_TRACES) IncreaseMinTrace(); // ensure room for new trace
           traces[f] = new_trace_value;
           nonzero_traces[num_nonzero_traces] = f;
           nonzero_traces_inverse[f] = num_nonzero_traces;
           num_nonzero_traces++;}}

void SarsaPlanner::ClearTrace(int f)       
// Clear any trace for feature f
{ 
	if (!(traces[f]==0.0)) 
        ClearExistentTrace(f,nonzero_traces_inverse[f]); 
}

void SarsaPlanner::ClearExistentTrace(int f, int loc)
// Clear the trace for feature f at location loc in the list of nonzero traces
  { traces[f] = 0.0;
    num_nonzero_traces--;
    nonzero_traces[loc] = nonzero_traces[num_nonzero_traces];
    nonzero_traces_inverse[nonzero_traces[loc]] = loc;}

void SarsaPlanner::DecayTraces(double decay_rate)
// Decays all the (nonzero) traces by decay_rate, removing those below minimum_trace
{ 
	for (int loc=num_nonzero_traces-1; loc>=0; loc--)      // necessary to loop downwards
    { 
		int f = nonzero_traces[loc];
        traces[f] *= decay_rate;
        if (traces[f] < minimum_trace) ClearExistentTrace(f,loc);
	}

}

void SarsaPlanner::IncreaseMinTrace()
// Try to make room for more traces by incrementing minimum_trace by 10%, 
// culling any traces that fall below the new minimum
{
	minimum_trace += 0.1 * minimum_trace;
	for (int loc=num_nonzero_traces-1; loc>=0; loc--)      // necessary to loop downwards
	{ 
		int f = nonzero_traces[loc];
		if (traces[f] < minimum_trace) 
			ClearExistentTrace(f,loc);
	}
}

Action SarsaPlanner::getBestAction(Observation state)
{
	Action at = agent_start(state, true);  
	return at; 
}

void SarsaPlanner::makeRandomState(Observation st)
{
	for(int i=0; i< NUM_OBSERVATIONS; i++)
	{
		st[i] = floatRand(ts->double_observations[i].min, ts->double_observations[i].max);
		st[i] = ((ts->double_observations[i].max + ts->double_observations[i].min)/2.0 - st[i])/1.0; 
		st[i] = (ts->double_observations[i].max + ts->double_observations[i].min)/2.0 - st[i]; 
	}
}

void	SarsaPlanner::solveModel(Observation currentState)
{
	int episodes = 1000; 
	int steps = 300; 

	long totalSteps = episodes*steps;		//this is the number of times we query the model 

	Observation st1 = new Observation_type[NUM_OBSERVATIONS]; 
	Observation st2 = 0; 
	Action action = 0; 
	double r=0; 

	double tr = 0; 
	long counter = 0; 
	for(int i=0; counter<totalSteps && i< episodes*10 ; i++)		//each episode 
	{
		makeRandomState(st1);
		action = agent_start(st1); 

		for(int j=0; j< steps; j++)
		{
			counter++;				//increase the timestep 

			double conf; 
			st2 = tfGens->predict(st1, action, conf); 

			//in our model, there's a (1-confidence) probability of going to the nirvana state
			double dr = floatRand(1.0); 
			if (dr > conf)
			{
				agent_end(ParamReader::rmax / ParamReader::GAMMA); 
				break; 
			}


			if(!st2)		//unable to predict
				break; 

			r = rfGens->predict(st2,action, st2); 
			tr += r; 

			if (rfGens->isTerminal(st2))		//end of episode
			{
				agent_end(r); 
				delete[] st2; 
				break;
			}


			//normal operation
			delete[] st1; 
			st1 = st2; 

			action = agent_step(r, st1); 
		}
		if ((i % 100)==0)
			printf("\t\tepisode %d: avg return %lf\n", i, tr/(i+1)); 
	}

	delete[] st1; 

	draw(); 

}


double SarsaPlanner::getStateValue(const Observation o)
{
	Action at = getBestAction(o); 
	return QSA[at]; 
}

double SarsaPlanner::getStateValueFast(const Observation o)
{
	return getStateValue(o); 
}

}