#include "QPICollector.h"

#include <stdlib.h>
#include <math.h>

//TODO: this is temporarily put here. It should be moved to a utility file
double frand()
{
	int i = rand(); 

	return ((double)i) / RAND_MAX; 
}

//returns a random number in the range of [0,ub) 
int intRand(int ub) 
{
	return (int) (frand()*ub); 
}

//returns a random number in the range of [lb,ub)
int intRand(int lb, int ub)
{
	return intRand(ub-lb)+lb; 
}

action_t* QPICollector::getActionFromPi(const observation_t* state)
{
	return getActionFromPi(state, &piAction); 
}

action_t* QPICollector::getActionFromExpPi( const observation_t* state)
{
	return getActionFromExpPi(state, &expAction); 
}

//--------------------------------policy functions ------------------------


//should return the action that policy pi follows from state 
//Note: this function should put the action to piAction and return it as the result 
action_t* QPICollector::getActionFromPi(const observation_t* state, action_t* pi)
{

	pi->intArray[0] = guideToStartingPoint(state, goalState);  


	return pi; //do not remove this line
}

//should return the action that exploratory policy p_exp follows from state 
//Note: this function should put the action to expAction and return it as the result 
action_t* QPICollector::getActionFromExpPi( const observation_t* state, action_t* pi)
{
	pi->intArray[0] = intRand(ts.int_actions->min, ts.int_actions->max +1) ; 

	return pi; //do not remove this line 
}


int QPICollector::guideToStartingPoint(const observation_t* current, const observation_t& goal)
{
	const double PI = 3.141596; 

	double* cpos = current->doubleArray;
	double* gpos = goal.doubleArray; 

	double POS_THRESHOLD = 50; 
	double ANG_THRESHOLD = 30; 
	double BALL_THRESHOLD = 180; 


	double dist = (cpos[0] - gpos[0])*(cpos[0] - gpos[0]) + (cpos[1] - gpos[1])*(cpos[1] - gpos[1]); 
	dist = sqrt(dist); 

	double bdist = (cpos[0] - cpos[3])*(cpos[0] - cpos[3]) + (cpos[1] - cpos[4])*(cpos[1]-cpos[4]); 
	bdist = sqrt(bdist); 
	//turn to the right as long as we're very close to the ball (wait for the ball to clear us). 
//	printf("ball dist: %lf\n", bdist); 
	if (bdist < BALL_THRESHOLD) 
	{
		return 1; 
	}

	if (dist > POS_THRESHOLD)	//we should first go to that position
	{
//		printf("so far away from the goal: %lf\n", dist); 
		//compute the directed angle to goal
		double targetAngle; 
		double xdiff = gpos[0] - cpos[0]; 
		double ydiff = gpos[1] - cpos[1]; 
		if (xdiff !=0)
		{
			targetAngle= ydiff/xdiff; 
//				printf("tan is %lf\n", targetAngle); 
			targetAngle = atan(targetAngle); 
//				printf("raw targetAngle is %lf\n", targetAngle); 

			if (targetAngle > 0 && xdiff < 0)
				targetAngle -= PI; 
			else if (targetAngle < 0 && xdiff < 0)
				targetAngle += PI; 

			targetAngle *= 57.30; 
		}else { //the angle is either 90 or -90
			int sign =(ydiff)>=0 ? 1:-1;
			targetAngle = 90*sign;
		}

//			printf("targetAngle is %lf mine is: %lf\n", targetAngle, current_state.doubleArray[2]); 

		double angDiff = targetAngle - cpos[2]; 
		if (angDiff > 360) angDiff -= 360; 
		if (angDiff < -360) angDiff += 360; 
		if (fabs(angDiff)> 45)//we should turn to align ourselves to the goal
		{
//				printf("we should first turn to align to goal\n");
//				printf("difAngle is %lf\n", angDiff); 
			if (fabs(angDiff) > 180)
				angDiff = -angDiff; 
			int signa =(angDiff)>=0 ? 1:-1;
			return (2 + signa); //turn right and left are (1,3) respectively
		}else
		{
//				printf("going toward the goal\n"); 
			return 0;	//go forward action is 0
		}
	}else	//we'r at the right place. probably wrong angle
	{
//		printf("at the goal, perhaps wrong angle\n"); 
		double angDiff = gpos[2] - cpos[2]; 
		if (fabs(angDiff)> ANG_THRESHOLD)
		{
			if (fabs(angDiff) > 180)
				angDiff = -angDiff; 

			int sign =(angDiff)>=0 ? 1:-1;
			return (2 + sign); //turn right and left are (1,3) respectively
		}
	}
}


//------------------------------------------------------------------------

action_t* QPICollector::randomAction()
{
	piAction.intArray[0] = intRand(ts.int_actions->min, ts.int_actions->max +1) ; 

//	printf("generated random int in [%d,%d) --> %d\n", ts.int_actions->min, ts.int_actions->max+1, piAction.intArray[0]); 
	return &piAction; //do not remove this line 
}



QPICollector::QPICollector(void)
{
	srand(time(NULL)); 
	currentTrajectory = new Trajectory(); 

	testingPercentage = 0.5; 
	EXP_STEPS = 15; 
	gamma = 0.95; 
	expCounter = -1;		//not executing pi_exp

	//prepare raw history logging 
	rawHistoryFile = fopen("rawhistory.txt", "a+"); 
}

QPICollector::~QPICollector(void)
{
}


void QPICollector::init(const char* task_spec)
{
	printf("qpi data collector init:  '%s'\n", task_spec); 

	if (decode_taskspec(&ts, task_spec) != 0)
	{
		printf("Error: cannot parse the task spec\n"); 
		exit(1); 
	}
	

	gamma = ts.discount_factor ; 
	if (gamma >= 1.0 || gamma <=0.0)
		printf("Warning: discount factor should be less than 1, but is %lf\n", gamma); 


	allocateRLStruct(&piAction, ts.num_int_actions, ts.num_double_actions, ts.charcount_actions); 
	allocateRLStruct(&expAction, ts.num_int_actions, ts.num_double_actions, ts.charcount_actions); 
	allocateRLStruct(&tmpAction, ts.num_int_actions, ts.num_double_actions, ts.charcount_actions); 


	allocateRLStruct(&lastObservation, ts.num_int_observations, ts.num_double_observations, ts.charcount_observations); 

	allocateRLStruct(&goalState, ts.num_int_observations, ts.num_double_observations, ts.charcount_observations); 
	goalState.doubleArray[0] = 80; 
	goalState.doubleArray[1] = -80; 
	goalState.doubleArray[2] = 0; 
	goalState.doubleArray[3] = 0; 
	goalState.doubleArray[4] = 0; 



	srand(time(0));

}




/*
	Normally, if we're in a non-episodic domain, this function must be called only once. 
	The first time, we're in pi collecting mode and the first block of code is useless. 
*/

const action_t * QPICollector::start(const observation_t *this_observation)
{
	printf("start\n"); 

	//executing pi_exp 
	if (expCounter > -1 ) 
	{
		expCounter++; 
		if (expCounter < EXP_STEPS) 
			return getActionFromExpPi(this_observation); 
		else	//finished exploring 
		{
			expCounter = -1;		
			currentTrajectory = new Trajectory(); 
		}
	}

	replaceRLStruct(this_observation, &lastObservation); 
//	return getActionFromPi(this_observation); 
	executedAction = randomAction(); 
	return executedAction; 
}

const action_t * QPICollector::step(double reward, const observation_t *this_observation)
{
	printf("step\n"); 

	//logging the last transition: note that since this is a temporary transition, we don't have to use duplicate 
	// to create new instances of our structures
	Transition lt; 
	lt.state = &lastObservation; 
	lt.action = executedAction; 
	lt.reward = reward; 
	lt.nextstate = duplicateRLStructToPointer(this_observation); 
	lt.piAction = getActionFromPi(this_observation, &tmpAction); 
	lt.print(rawHistoryFile); 
	freeRLStructPointer(lt.nextstate); 



	//executing pi_exp 
	if (expCounter > -1 ) 
	{
		replaceRLStruct(this_observation, &lastObservation); 
		expCounter++; 
		if (expCounter < EXP_STEPS) 
			return executedAction = getActionFromExpPi(this_observation); 
		else	//finished exploring: the first action should be random 
		{
			//for logging purposes
			fprintf(rawHistoryFile, "-1\n"); 

			expCounter = -1;		
			currentTrajectory = new Trajectory(); 
//			return getActionFromPi(this_observation); 
			return executedAction=randomAction();	//the first action is random
		}
	}

	//update current trajectory based on data from last step 
	Transition t; 
	t.state = duplicateRLStructToPointer(& lastObservation); 
	t.action = duplicateRLStructToPointer(& piAction); 
	t.reward = reward; 
	t.nextstate = duplicateRLStructToPointer(this_observation); 
	t.piAction = duplicateRLStructToPointer(getActionFromPi(&lastObservation, &tmpAction));  
	currentTrajectory->theList.push_back(t); 

	replaceRLStruct(this_observation, &lastObservation); 

/*	printf("gholi\n"); 
	t.print(stdout); 
	currentTrajectory->theList.front().print(stdout); 
	currentTrajectory->theList.back().print(stdout); 
*/

	//should switch to exploration
	double d = frand(); 
	if (d> gamma) 
	{
		//for logging
		fprintf(rawHistoryFile, "-2\n"); 

		expCounter = 0; 
//		printf("pushing to history of size %d\n", history.size()); 
		history.push_back(currentTrajectory); 
		currentTrajectory  = new Trajectory(); 
		return getActionFromExpPi(this_observation); 
	}

	return executedAction= getActionFromPi(this_observation); 


}

void QPICollector::end(double reward)
{
	printf("end is called and exp is %d\n", expCounter); 
	//finish the trajectory if not exploring 
	if (expCounter == -1)
	{
		//update current trajectory based on data from last step 
		Transition t; 
		t.state = duplicateRLStructToPointer(& lastObservation); 
		t.action = duplicateRLStructToPointer(& piAction); 
		t.reward = reward; 
		t.nextstate = 0; 
		t.piAction = duplicateRLStructToPointer(getActionFromPi(&lastObservation, &tmpAction));  
		currentTrajectory->theList.push_back(t); 
		
//		printf("pushing to history of size %d\n", history.size()); 
		history.push_back(currentTrajectory); 
		currentTrajectory = new Trajectory(); 
	}
}

void QPICollector::cleanup()
{
	printf("cleanup\n"); 
	if (! currentTrajectory->theList.empty())
	{
		history.push_back(currentTrajectory); 
		currentTrajectory = new Trajectory(); 
	}

	save(); 
	fclose(rawHistoryFile); 
	//TODO: do we care about memory leaks? 
//	for (list<Trajectory*>::iterator it = history.begin(); it != history.end(); it++)
//		delete (*it); 
}

const char* QPICollector::message(const char* inMessage)
{
	if(strcmp(inMessage,"what is your name?")==0)
		return "my name is QPI data collector!";
	else if (strcmp(inMessage, "save") == 0)
	{
		printf("saving automatically because asked for it. \n"); 
		system("cp history.txt history.txt.bak"); 
		save(); 
		return "done"; 
	}

	return "I don't know how to respond to your message";
}


const list<Trajectory*>* QPICollector::getTestingData()
{
	if(testingData.empty())
		splitData(); 

	return &testingData; 
	
}


const list<Trajectory*>* QPICollector::getTrainingData()
{
	if(trainingData.empty())
		splitData(); 

	return &trainingData; 
}

//splits the history into training and testing trajectories 
void QPICollector::splitData()
{
	printf("splitting data into testing and training sets\n"); 

	for(list<Trajectory*>::iterator it = history.begin(); it != history.end(); it ++)
	{
		if (frand() < testingPercentage ) 
			testingData.push_back( *it); 
		else
			trainingData.push_back (*it); 
	}
}


void QPICollector::save()
{
	fflush(rawHistoryFile); 
	save(&history, "history.txt"); 
//	save(getTrainingData(), "training.txt"); 
//	save(getTestingData(), "testing.txt"); 

}


void QPICollector::save(const list<Trajectory*>* l, const char* path)
{
	//nothing to be saved 
	if (l->size() == 0)
		return; 

	FILE * fout = fopen(path, "w"); 

	printf("begin of save of size %d \n", l->size()); 

	//write instructions to the file: 
	l->front()->theList.front().printInfo(fout); 



	for(list<Trajectory*>::const_iterator it = l->begin(); it != l->end(); it++) //for each trajectory
	{
		Trajectory* tr = *it; 
		for(list<Transition>::iterator itt = tr->theList.begin(); itt != tr->theList.end(); itt++)
		{
			Transition& sar = *itt; 

			sar.print(fout); 
		}
		fprintf(fout, "\n"); 
	}

	fclose(fout); 
}
