#include <time.h>

#include "ril_debug.h"
#include "ctdlearner.h"
#include "cpolicies.h"
#include "cagent.h"
#include "cagentlogger.h"
#include "crewardmodel.h"
#include "canalyzer.h"
#include "ctaxidomain.h"
#include "chierarchiccontroller.h"
#include "cvfunctionlearner.h"
#include "cstate.h"
#include "caction.h"

#include <stdio.h>

// This is the entry point for this application
int main(void)
{
	srand((unsigned int)time((time_t *)NULL));
	
	char *grid_file = "../Taxi_10x10.txt";

	
	CTaxiDomain *taxi = NULL;
	CTransitionFunctionEnvironment *model = NULL;
	CAgent *agent = NULL;
	CRewardFunction *rewardFunction = NULL;

	printf ("-=<   Reinforcement Learning Benchmark - TaxiDomain   >=-\n\n");

	taxi = new CTaxiDomain(grid_file);
	taxi->setRewardStandard(-1);
	taxi->setRewardBounce(-5.0);
	taxi->setRewardSuccess(100.0);

	model = new CTransitionFunctionEnvironment(taxi);

	CStateList *stateList = new CStateList(model->getStateProperties());
	CState *state = new CState(model->getStateProperties());

	for (int i = 0; i < 50; i++)
	{
		taxi->getResetState(state);
		stateList->addState(state);
	}
	model->setStartStates(stateList);

	rewardFunction = taxi;
	agent = new CAgent(model);
	CPrimitiveAction *left = new CGridWorldAction(-1,0);
	CPrimitiveAction *right = new CGridWorldAction(1,0);
	CPrimitiveAction *up = new CGridWorldAction(0,-1);
	CPrimitiveAction *down = new CGridWorldAction(0,1);
	CPrimitiveAction *pickup = new CPickupAction();
	CPrimitiveAction *drop = new CPutdownAction();

	agent->addAction(left);
	agent->addAction(right);
	agent->addAction(up);
	agent->addAction(down);
	agent->addAction(pickup);
	agent->addAction(drop);
   
	CAbstractStateDiscretizer *discretizer = NULL;

	CFeatureQFunction *qTable1 = NULL;
	CTDLearner *learner1 = NULL;
	
	CAgentController *policy = NULL;

	int dim[] = {0, 1, 3 ,4}; 
	discretizer = new CModelStateDiscretizer(model->getStateProperties(), dim, 4);

	agent->addStateModifier(discretizer);
	
	// Init Learner
	qTable1= new CFeatureQFunction(agent->getActions(), discretizer);
	learner1 = new CQLearner(rewardFunction, qTable1);

	learner1->setParameter("QLearningRate", 0.1);

	learner1->setParameter("ReplacingETraces", 1.0);
	learner1->setParameter("Lambda", 0.9); 


	agent->addSemiMDPListener(learner1);
	
	policy = new CQStochasticPolicy(agent->getActions(), new CSoftMaxDistribution(20), qTable1);

	agent->setController(policy);
	
	agent->setKeyboardBreak(false);
	agent->setLogEpisode(true);

	int steps = 0;
	int totalSteps = 0;
	int ges_failed = 0;

	// Start Learning, Learn 50 Episodes
	for (int i = 0; i < 1000; i++)
	{
		// Start a new Episode, the agent gets reseted in one of the start states
		agent->startNewEpisode();
		// Learn 1 Episode with maximal 1000 steps 
		steps = agent->doControllerEpisode(1, 1000);

		totalSteps += steps;

		// Check if the Episode failed
		// The episode has failed if max_bounces has been reached (indicated through environmentModel->isFailed()), 
		// or max_steps has been reached

		if (model->isFailed() || steps >= 1000)
		{
			ges_failed++;
		}	

		if (i % 50 == 0 && i > 0)
		{
			printf("Episode %d - %d finished on average with %d steps, (%d failed)\n", i - 50, i, totalSteps / 50,  ges_failed);
			
			ges_failed = 0;
			totalSteps = 0;
		}

	}

	delete policy;
	delete learner1;
	delete agent;
	delete qTable1;
	delete model;
	delete taxi;
	
	delete discretizer;
	

	printf("\n\n<< Press Enter >>\n");
	getchar();
}


