
#include "OriginalTFGenerator.h"
#include "MREAgent.h"
#include "Util.h"

void compute_next_state(const observation_t* prev, const action_t* a, observation_t* next); 


OriginalTFGenerator::OriginalTFGenerator(MREAgent* p, int an, int od, taskspec_t& spec)
:TFGenerator(p,an,od,spec)
{
	m_spec = &spec; 
	allocateRLStruct(&action, p->taskSpec.num_int_actions, p->taskSpec.num_double_actions, p->taskSpec.charcount_actions); 
	allocateRLStruct(&prev, p->taskSpec.num_int_observations, p->taskSpec.num_double_observations, p->taskSpec.charcount_observations); 
	allocateRLStruct(&next, p->taskSpec.num_int_observations, p->taskSpec.num_double_observations, p->taskSpec.charcount_observations); 

	delete[] prev.doubleArray; 
	delete[] next.doubleArray; 
}

OriginalTFGenerator::~OriginalTFGenerator()
{
	clearRLStruct(&action); 


}


Observation OriginalTFGenerator::predict(Observation st, Action a)
{
#ifdef USE_THREADS 
	observation_t prevt; 
	observation_t nextt; 
	action_t actiont; 

	allocateRLStruct(&actiont, m_spec->num_int_actions, m_spec->num_double_actions,m_spec->charcount_actions); 
	allocateRLStruct(&prevt,m_spec->num_int_observations, m_spec->num_double_observations, m_spec->charcount_observations); 
	allocateRLStruct(&nextt, m_spec->num_int_observations,m_spec->num_double_observations, m_spec->charcount_observations); 
	
	memcpy(prevt.doubleArray, st, obs_dim*sizeof(Observation_type)); 
	actiont.intArray[0] = m_agent->mapAction(a); 
	compute_next_state(&prevt, &actiont, &nextt); 

	Observation result = new Observation_type[obs_dim]; 
	memcpy(result, nextt.doubleArray, obs_dim*sizeof(Observation_type)); 
	clearRLStruct(&prevt); 
	clearRLStruct(&nextt); 
	clearRLStruct(&actiont); 
	return result; 

#else
	Observation result = new Observation_type[obs_dim]; 
	prev.doubleArray = st; 
	next.doubleArray = result; 
	action.intArray[0] = m_agent->mapAction(a); 

	compute_next_state(&prev, &action, &next); 
	return result; 

#endif


//	compute_next_state(st, m_parent->mapAction( a), result); 
//	return result; 










	if (MREAgent::environment == PUDDLEWORLD)
	{
		double STEP = 0.05; 
		result[0] = st[0]; 
		result[1] = st[1]; 

		// compute next state out of current state and action
		if (a==0)
			result[1] += STEP; // UP
		else if (a==1)
			result[1] -= STEP; // DOWN
		else if (a==2)
			result[0] += STEP; // RIGHT
		else if (a==3)
			result[0] -= STEP; // LEFT
		

		// add noise
		// double noise;
		// for(int i = 0; i < stateSize; ++i) {
			// noise = (double) normal() * 0.01;
			// observation_queue[delay][i] += noise;
			// printf("Noise: %5.3lf ", noise);
		// }

		// bring state within bounds
		for(int i = 0; i < 2; ++i){
			if (result[i]>1.0)
				result[i] = 1.0;
			else if (result[i]<0.0)
				result[i] = 0.0;
		}
		return result; 
	}


	if (MREAgent::environment == MOUNTAINCAR_DR)
	{
		const double mcar_min_position = -1.2;
		const double mcar_max_position = 0.6;
		const double mcar_max_velocity = 0.07;            // the negative of this is also the minimum velocity

		result[0] = st[0];
		result[1] = st[1]; 

		result[1] += ((a-1)*0.001 + cos(3.0*result[0])*(-0.0025));
		if (result[1] > mcar_max_velocity) result[1] = mcar_max_velocity;
		if (result[1] < -mcar_max_velocity) result[1] = -mcar_max_velocity;
		  
		result[0] += result[1];
		if (result[0] > mcar_max_position) result[0] = mcar_max_position;
		if (result[0] < mcar_min_position) result[0] = mcar_min_position;
		if (result[0]==mcar_min_position && result[1]<0) result[1] = 0;

		return result; 
	}


}




void OriginalTFGenerator::batchLearn( list<Transition>& history)
{

}

double OriginalTFGenerator::getConfidence(Observation st, Action a)
{
	return 1.0; 
}



void OriginalTFGenerator::learn(const Transition* t) 
{

}
