#include "QLearningAgent.h"
#include "ParamReader.h"
#include <signal.h>
#include <rlglue/utils/C/RLStruct_util.h>
#include <rlglue/utils/C/TaskSpec_Parser.h>
#include <rlglue/Environment_common.h>/* env_ function prototypes and RL-Glue types */


//debugging info: for detecting memory leaks
#ifdef _DEBUG
   #define new MYDEBUG_NEW
#endif


void agent_init(const char* _ts);
const action_t* agent_start(const observation_t* o);
const action_t *agent_step(double reward, const observation_t *this_observation);
void agent_end(double reward);
void agent_cleanup();
const char* agent_message(const char* inMessage);



const char* env_init();
const observation_t* env_start();
const reward_observation_terminal_t* env_step(const action_t* a);
void env_cleanup();



bool quit = false;

void signalHandler(int signum)
{
	switch(signum)
	{
	case SIGINT:
		quit = true;
		printf("Exiting........\n");
	}
}




/*
This method runs 100, 66, 50, 33, 0 percent of the total specified in param.in from uniform sampling
look for 'average reward' in the output file
*/
void runDifferentSamples()
{
/*	ParamReader::load("param.in");
	double values[EPISODE_NUM];

	int solvePeriod = 0;
	long sampleSize = ParamReader::FVI_DISCRETIZED_SIZE*ParamReader::FVI_DISCRETIZED_SIZE ; //warning: works only for mountain car (depends on dimension)
	for(int j=0; j< 7 && !quit; j++)				//different solve_periods
	{
#ifndef OS_WINDOWS
		srand48(266);
#endif
		srand(266);

		double avgReward = 0;
		int avgRewardCnt = 0;


		switch(j)
		{
		case 0:
			solvePeriod = 5;
			break;
		case 1:
			solvePeriod = sampleSize /10;
			break;
		case 2:
			solvePeriod = sampleSize /3;
			break;
		case 3:
			solvePeriod = sampleSize /2;
			break;
		case 4:
			solvePeriod = sampleSize * .66;
			break;
		case 5:
			solvePeriod = sampleSize * 0.9;
			break;
		case 6:
			solvePeriod = sampleSize ;
			break;
		}

		for(int x=0; x < ParamReader::EXP_NUM_RUNS && !quit; x++)				//each run
		{

			const char* ts =  env_init();
			agent_init(ts);

			ParamReader::AGNT_SOLVE_PERIOD = solvePeriod;

			bool isComputing = false;
			long trialAge =0;
			int startEpisode = 0;
			double totalReward = 0;

			double runReward = 0;
			int runRewardCnt =0;

			for(int i=0; i < EPISODE_NUM && !quit; i++)	//episode
			{
				const observation_t* o = env_start();
				trialAge++;
				if (trialAge > ParamReader::AGNT_SOLVE_PERIOD && ! isComputing)
				{
					isComputing = true;
					startEpisode = i;
				}

				if (isComputing)
				{
					avgRewardCnt++;
					runRewardCnt++;
				}

				const action_t* a = agent_start(o);
				for(int j=0; j<  ParamReader::EXP_NUM_STEPS && !quit; j++)//step
				{
					const reward_observation_terminal_t* ro =  env_step(a);
					totalReward += ro->reward;

					trialAge++;
					if (isComputing)
					{
						avgReward += ro->reward;
						runReward += ro->reward;
					}
					if (ro->terminal )
					{
						agent_end(ro->reward);
						break;
					}else
						a = agent_step(ro->reward , ro->observation );
				}

				values[i] = totalReward;


				//finish the experiment
				if (isComputing && i > startEpisode + 50)
				{
					break;
				}
			}
			agent_cleanup();

			printf("run reward for solve period %d is %lf\n", ParamReader::AGNT_SOLVE_PERIOD , runReward/runRewardCnt);
		}//each run
		printf("average reward for solve period %d is %lf\n", ParamReader::AGNT_SOLVE_PERIOD ,  avgReward/avgRewardCnt);
	}//for j
*/

}

/*
This method runs for different set of backup horizons
*/




/*
This methods runs the algorithm for a specified number of episodes/trials
It varies one parameter and reports it.
The function pass in the argument has to return the value of the parameter for the i-th iteration.
*/
void runDifferentParams(double (*func)(int), int max)
{
	double* episodeReturns;		//each cell of this has the total reward collected in that episode (there are runs*episodes number of cells)
	double* runReturns;			//each cell of this has the total reward collected in that run (there are 'runs' number of cells)

	ParamReader::load("param.in");

	episodeReturns = new double[ParamReader::EXP_NUM_RUNS*ParamReader::EXP_NUM_EPISODES];
	runReturns = new double[ParamReader::EXP_NUM_RUNS];



	for(int j=0; j< max && !quit; j++)				//different parameters
	{
		double param;
#ifndef OS_WINDOWS
		srand48(266);
#endif
		srand(266);

		ParamReader::load("param.in");
		for(int x=0; x <  ParamReader::EXP_NUM_RUNS && !quit; x++)				//each run
		{
			//this sets the parameter to j-th value
			 param = func(j);

			 runReturns[x] = 0;		//initialize run return

			const char* ts =  env_init();
			agent_init(ts);
			delete[] ts;


			for(int i=0; i < ParamReader::EXP_NUM_EPISODES && !quit; i++)	//episode
			{
				episodeReturns[x*ParamReader::EXP_NUM_EPISODES + i] = 0;	//init

				const observation_t* o = env_start();
				const action_t* a = agent_start(o);
				int k=0;
				for(k=0; k< ParamReader::EXP_NUM_STEPS && !quit; k++)//step
				{
					const reward_observation_terminal_t* ro =  env_step(a);
					episodeReturns[x*ParamReader::EXP_NUM_EPISODES + i] += ro->reward;
					runReturns[x] += ro->reward;

					if (ro->terminal)
					{
						agent_end(ro->reward);
						break;
					}else
						a = agent_step(ro->reward, ro->observation);
				}

#ifdef OUTPUT_STATISTICS
				printf("episode #%d\tr: %4.2lf\tsteps: %d\tAvgR: %6.1lf\n", i,episodeReturns[x*ParamReader::EXP_NUM_EPISODES + i] , k , runReturns[x] / (i+1) );
#endif
			}//each episode

			agent_cleanup();
			env_cleanup();

			printf("%d run reward of %d-th  value (%lf) of param is %lf\n",x, j,param , runReturns[x]/ParamReader::EXP_NUM_EPISODES);
		}//each run

		double avgReward = 0;
		for(int k=0; k< ParamReader::EXP_NUM_RUNS; k++)
			avgReward += runReturns[k];
		avgReward /= ParamReader::EXP_NUM_RUNS;
		printf("average reward %d-th value (%lf) of param is %lf\n", j, param ,  avgReward/ParamReader::EXP_NUM_EPISODES);
	}//for j
}

double varyTileResolutionParam(int ind)
{
	int result = ind +1;

	ParamReader::AGNT_TILE_RESOLUTION = result;
	return result;
}


double varyTileNumbers(int ind)
{
	int result = 2 + ind*2; ;

	ParamReader::AGNT_NUMBER_OF_TILINGS = result;
	return result;
}

double varyLambda(int ind)
{
	double result = 0.1*ind;

	ParamReader::AGNT_LAMBDA = result;
	return result;
}

double varyEpsilon(int ind)
{
	double result = 0.1*ind;

	ParamReader::AGNT_EPSILON = result;
	return result;
}

double varyAlpha(int ind)
{
	double result = 0.1*ind;

	ParamReader::AGNT_ALPHA = result;
	return result;
}



//a dummy function that doesn't set any parameters
double voidfunc(int ind)
{
	return -1;
}


int main()
{
//_CrtSetBreakAlloc(1678291);

	//memory leak detection under windows
#ifdef OS_WINDOWS
_CrtSetDbgFlag ( _CRTDBG_ALLOC_MEM_DF | _CRTDBG_LEAK_CHECK_DF );
#endif

//	srand(time(NULL));

	signal(SIGINT, &signalHandler);

//	runDifferentSamples();



/*
	running for planning and online learning is the same. we just need to change the exp_type in param.in
*/

//	runDifferentParams(&varyEpsilon, 11);
//	runDifferentParams(&varyAlpha, 11);
//	runDifferentParams(&varyLambda, 11);
	runDifferentParams(&varyTileResolutionParam, 10);
//	runDifferentParams(&varyTileNumbers, 15);
//	runDifferentParams(&voidfunc, 1);

	getchar();
	return 0;
}
