#include "MREAgent.h"
#include "ParamReader.h"
#include <signal.h>
#include <rlglue/utils/C/RLStruct_util.h>
#include <rlglue/utils/C/TaskSpec_Parser.h>
#include <rlglue/Environment_common.h>/* env_ function prototypes and RL-Glue types */


//debugging info: for detecting memory leaks
#include "Util.h"
#ifdef _DEBUG
   #define new MYDEBUG_NEW
#endif


void agent_init(const char* task_spec);
const action_t *agent_start(const observation_t *this_observation);
const action_t *agent_step(double reward, const observation_t *this_observation); 
void agent_end(double reward); 
void agent_cleanup(); 
const char* agent_message(const char* inMessage);



const char* env_init();
const observation_t* env_start();
const reward_observation_terminal_t* env_step(const action_t* a);
void env_cleanup();



#define EPISODE_NUM  60		//number of episodes in each run
#define	NUM_RUN		 1			//number of runs
#define	STEP_NUM	300			//number of steps in each episode

bool quit = false; 

void signalHandler(int signum)
{
	switch(signum)
	{
	case SIGINT:
		quit = true; 
		printf("Exiting........\n");
	}
}


/*
This method runs 100, 66, 50, 33, 0 percent of the total specified in param.in from uniform sampling 
look for 'average reward' in the output file 
*/
void runDifferentSamples()
{
	ParamReader::load("param.in"); 
	double values[EPISODE_NUM];

	int solvePeriod = 0; 
	long sampleSize = ParamReader::NUMBER_OF_VI_SAMPLES*ParamReader::NUMBER_OF_VI_SAMPLES ; //warning: works only for mountain car (depends on dimension)
	for(int j=0; j< 7 && !quit; j++)				//different solve_periods
	{
#ifndef OS_WINDOWS
		srand48(266);
#endif
		srand(266); 

		double avgReward = 0; 
		int avgRewardCnt = 0; 


		switch(j)
		{
		case 0: 
			solvePeriod = 5; 
			break; 
		case 1: 
			solvePeriod = sampleSize /10;  
			break; 
		case 2: 
			solvePeriod = sampleSize /3; 
			break; 
		case 3: 
			solvePeriod = sampleSize /2; 
			break; 
		case 4: 
			solvePeriod = sampleSize * .66; 
			break; 
		case 5: 
			solvePeriod = sampleSize * 0.9; 
			break; 
		case 6: 
			solvePeriod = sampleSize ; 
			break; 
		}

		for(int x=0; x < NUM_RUN && !quit; x++)				//each run
		{

			const char* ts =  env_init(); 
			agent_init(ts); 

			ParamReader::SOLVE_PERIOD = solvePeriod; 

			bool isComputing = false;
			long trialAge =0; 
			int startEpisode = 0; 
			double totalReward = 0; 

			double runReward = 0; 
			int runRewardCnt =0; 

			for(int i=0; i < EPISODE_NUM && !quit; i++)	//episode
			{
				const observation_t* o = env_start(); 
				trialAge++; 
				if (trialAge > ParamReader::SOLVE_PERIOD && ! isComputing)
				{
					isComputing = true; 
					startEpisode = i; 
				}

				if (isComputing)
				{
					avgRewardCnt++;
					runRewardCnt++;
				}

				const action_t* a = agent_start(o); 
				for(int j=0; j< STEP_NUM && !quit; j++)//step
				{
					const reward_observation_terminal_t* ro =  env_step(a);
					totalReward += ro->reward; 
					
					trialAge++; 
					if (isComputing)
					{
						avgReward += ro->reward; 
						runReward += ro->reward;  
					}
					if (ro->terminal ) 
					{
						agent_end(ro->reward);
						break; 
					}else
						a = agent_step(ro->reward , ro->observation ); 
				}

				values[i] = totalReward; 


				//finish the experiment
				if (isComputing && i > startEpisode + 50)
				{
					break; 
				}
			}
			agent_cleanup(); 

			printf("run reward for solve period %d is %lf\n", ParamReader::SOLVE_PERIOD , runReward/runRewardCnt); 
		}//each run 
		printf("average reward for solve period %d is %lf\n", ParamReader::SOLVE_PERIOD ,  avgReward/avgRewardCnt); 
	}//for j


}

/*
This method runs for different set of backup horizons
*/
void runDifferentHorizons()
{
	
	double values[EPISODE_NUM];

	for(int j=1; j< 2 && !quit; j++)				//different horizons
	{

#ifndef OS_WINDOWS
		srand48(266);
#endif
		srand(266); 

		double avgReward = 0; 
		int avgRewardCnt = 0; 

		for(int x=0; x < NUM_RUN && !quit; x++)				//each run
		{

			const char* ts =  env_init(); 
			agent_init(ts); 
			delete[] ts; 

			ParamReader::MULTISTEP_BACKUP_HORIZON = j; 

			bool isComputing = false;
			long trialAge =0; 
			int startEpisode = 0; 
			double totalReward = 0; 

			double runReward = 0; 
			int runRewardCnt =0; 

			for(int i=0; i < EPISODE_NUM && !quit; i++)	//episode
			{
				const observation_t* o = env_start(); 
				trialAge++; 
				if (trialAge > ParamReader::SOLVE_PERIOD && ! isComputing)
				{
					isComputing = true; 
					startEpisode = i; 
				}

				if (isComputing)
				{
					avgRewardCnt++;
					runRewardCnt++;
				}

				const action_t* a = agent_start(o); 
				for(int j=0; j< STEP_NUM && !quit; j++)//step
				{
					const reward_observation_terminal_t* ro =  env_step(a);
					totalReward += ro->reward; 
					
					trialAge++; 
					if (isComputing)
					{
						avgReward += ro->reward; 
						runReward += ro->reward;
					}
					if (ro->terminal) 
					{
						agent_end(ro->reward);
						break; 
					}else
						a = agent_step(ro->reward, ro->observation); 
				}

				values[i] = totalReward; 


				//finish the experiment
				if (isComputing && i > startEpisode + 50)
				{
					break; 
				}
			}
			agent_cleanup(); 
			env_cleanup(); 

			printf("run reward for multistep backup of %d is %lf\n", ParamReader::MULTISTEP_BACKUP_HORIZON , runReward/runRewardCnt); 
		}//each run 
		printf("average reward for multistep backup of %d is %lf\n", ParamReader::MULTISTEP_BACKUP_HORIZON ,  avgReward/avgRewardCnt); 
		 
	}//for j
}


int main()
{
//_CrtSetBreakAlloc(3977);

	//memory leak detection under windows
#ifdef OS_WINDOWS
_CrtSetDbgFlag ( _CRTDBG_ALLOC_MEM_DF | _CRTDBG_LEAK_CHECK_DF );
#endif

//	srand(time(NULL));
  
	signal(SIGINT, &signalHandler);

//	runDifferentSamples(); 
	runDifferentHorizons(); 

	getchar(); 
	return 0; 
}
