#include "SarsaLambdaAgent.h"
#include "ParamReader.h"
#include <signal.h>
#include <rlglue/utils/C/RLStruct_util.h>
#include <rlglue/utils/C/TaskSpec_Parser.h>
#include <rlglue/Environment_common.h>/* env_ function prototypes and RL-Glue types */


//debugging info: for detecting memory leaks
//#ifdef _DEBUG
//   #define new MYDEBUG_NEW
//#endif


void agent_init(const char* _ts); 
const action_t* agent_start(const observation_t* o); 
const action_t *agent_step(double reward, const observation_t *this_observation); 
void agent_end(double reward); 
void agent_cleanup(); 
const char* agent_message(const char* inMessage);



const char* env_init();
const observation_t* env_start();
const reward_observation_terminal_t* env_step(const action_t* a);
void env_cleanup();



bool quit = false; 

void signalHandler(int signum)
{
	switch(signum)
	{
	case SIGINT:
		quit = true; 
		printf("Exiting........\n");
	}
}




/*
This methods runs the algorithm for a specified number of episodes/trials
It varies one parameter and reports it. 
The function pass in the argument has to return the value of the parameter for the i-th iteration. 
*/
void runDifferentParams(double (*func)(int), int max)
{
	double* episodeReturns;		//each cell of this has the total reward collected in that episode (there are runs*episodes number of cells)
	double* runReturns;			//each cell of this has the total reward collected in that run (there are 'runs' number of cells)

	ParamReader::load("param.in"); 	

	episodeReturns = new double[ParamReader::EXP_NUM_RUNS*ParamReader::EXP_NUM_EPISODES]; 
	runReturns = new double[ParamReader::EXP_NUM_RUNS]; 



	for(int k=0; k< max && !quit; k++)				//different parameters
	{
		double param; 
#ifndef OS_WINDOWS
		srand48(266);
#endif
		srand(266); 

		for(int x=0; x <  ParamReader::EXP_NUM_RUNS && !quit; x++)				//each run
		{
			runReturns[x] = 0;		//initialize run return

			const char* ts =  env_init(); 
			agent_init(ts); 
			delete[] ts; 

			//this sets the parameter to k-th value 
			 param = func(k); 

			for(int i=0; i < ParamReader::EXP_NUM_EPISODES && !quit; i++)	//episode
			{
				episodeReturns[x*ParamReader::EXP_NUM_EPISODES + i] = 0;	//init

				const observation_t* o = env_start(); 
				const action_t* a = agent_start(o); 
				int j; 
				for(j=0; j< ParamReader::EXP_NUM_STEPS && !quit; j++)//step
				{
					const reward_observation_terminal_t* ro =  env_step(a);
					episodeReturns[x*ParamReader::EXP_NUM_EPISODES + i] += ro->reward; 
					runReturns[x] += ro->reward; 
					
					if (ro->terminal) 
					{
						agent_end(ro->reward);
						break; 
					}else
						a = agent_step(ro->reward, ro->observation); 
				}

				if (i % 10==0)
					printf("episode #%d\tr: %4.2lf\tsteps: %d\tavg reward: %4.2lf\n", i, episodeReturns[x*ParamReader::EXP_NUM_EPISODES + i] , j, runReturns[x]/i  );
			}//each episode 

			agent_cleanup(); 
			env_cleanup(); 
			
			printf("%d run reward of %d-th  value (%lf) of param is %lf\n",x, k,param , runReturns[x]/ParamReader::EXP_NUM_EPISODES); 
		}//each run 

		double avgReward = 0; 
		for(int g=0; g< ParamReader::EXP_NUM_RUNS; g++)
			avgReward += runReturns[g]; 
		avgReward /= ParamReader::EXP_NUM_RUNS; 
		printf("average reward %d-th value (%lf) of param is %lf\n", k, param ,  avgReward/ParamReader::EXP_NUM_EPISODES); 
	}//for k
}

//a dummy function that doesn't set any parameters
double voidfunc(int ind)
{
	return -1; 
}


int main()
{
//_CrtSetBreakAlloc(1678291);

	//memory leak detection under windows
#ifdef OS_WINDOWS
_CrtSetDbgFlag ( _CRTDBG_ALLOC_MEM_DF | _CRTDBG_LEAK_CHECK_DF );
#endif

//	srand(time(NULL));
  
	signal(SIGINT, &signalHandler);

//	runDifferentSamples(); 



/*
	running for planning and online learning is the same. we just need to change the exp_type in param.in
*/

//	runDifferentParams(&MCLengthParam, 5); 
	runDifferentParams(&voidfunc, 1); 

	getchar(); 
	return 0; 
}
