#include <time.h>

#include "ril_debug.h"
#include "ctdlearner.h"
#include "cpolicies.h"
#include "cagent.h"
#include "cagentlogger.h"
#include "crewardmodel.h"
#include "canalyzer.h"
#include "ctaxidomain.h"
#include "chierarchiccontroller.h"
#include "cstate.h"
#include "caction.h"

#include <stdio.h>

// This is the entry point for this application
int main(void)
{
	srand((unsigned int)time((time_t *)NULL));
	
	char *grid_file = "../Taxi_10x10.txt";

	// Model dependend Variables
	CTaxiDomain *taxi = NULL;
	CTransitionFunctionEnvironment *model = NULL;
	CAgent *agent = NULL;
	CRewardFunction *rewardFunction = NULL;

	printf ("-=<   Reinforcement Learning Benchmark - TaxiDomain   >=-\n\n");

	taxi = new CTaxiDomain(grid_file);
	taxi->setRewardStandard(-1);
	taxi->setRewardBounce(-5.0);
	taxi->setRewardSuccess(100.0);

	model = new CTransitionFunctionEnvironment(taxi);

	// in order during the learning process, we will use 50 fixed start states  
	CStateList *stateList = new CStateList(model->getStateProperties());
	CState *state = new CState(model->getStateProperties());

	for (int i = 0; i < 50; i++)
	{
		taxi->getResetState(state);
		stateList->addState(state);
	}
	model->setStartStates(stateList);

	rewardFunction = taxi;
	agent = new CAgent(model);
	CPrimitiveAction *left = new CGridWorldAction(-1,0);
	CPrimitiveAction *right = new CGridWorldAction(1,0);
	CPrimitiveAction *up = new CGridWorldAction(0,-1);
	CPrimitiveAction *down = new CGridWorldAction(0,1);
	CPrimitiveAction *pickup = new CPickupAction();
	CPrimitiveAction *drop = new CPutdownAction();

	agent->addAction(left);
	agent->addAction(right);
	agent->addAction(up);
	agent->addAction(down);
	agent->addAction(pickup);
	agent->addAction(drop);
   
	CSemiMDPLastNRewardFunction *semiMDPRewardFunction = new CSemiMDPLastNRewardFunction(rewardFunction, 0.95);
	agent->addSemiMDPListener(semiMDPRewardFunction);


	CHierarchicalSemiMarkovDecisionProcess *hierarchicalRoot = new CHierarchicalSemiMarkovDecisionProcess(agent->getCurrentEpisode());


	/// Create 1st hierarchical Level (Pickup, Drop)

	hierarchicalRoot->addAction(pickup);
	hierarchicalRoot->addAction(drop);

	int dim1[] = {0,1};
	CAbstractStateDiscretizer *xyState = new CModelStateDiscretizer(model->getStateProperties(), dim1, 2);
	agent->addStateModifier(xyState);


	CTDLearner **learners = new CTDLearner*[taxi->getNumTargets()];
	CTaxiHierarchicalBehaviour **behaviours = new CTaxiHierarchicalBehaviour*[taxi->getNumTargets()];
	CQFunction **qfunctions = new CQFunction*[taxi->getNumTargets()];
	CAgentController **policies = new CAgentController*[taxi->getNumTargets()];

	for (int i = 0; i < taxi->getNumTargets(); i ++)
	{
		CTaxiHierarchicalBehaviour *behaviour = new CTaxiHierarchicalBehaviour(agent->getCurrentEpisode(), i, taxi);
		behaviour->addAction(left);
		behaviour->addAction(right);		
		behaviour->addAction(up);
		behaviour->addAction(down);
		behaviour->addStateModifier(xyState);
		hierarchicalRoot->addAction(behaviour);
		behaviour->sendIntermediateSteps = false;

		CQFunction *qFunc = new CFeatureQFunction(behaviour->getActions(), xyState);
		CTDLearner *tdLearner = new CQLearner(behaviour, qFunc);
		tdLearner->setParameter("QLearningRate", 0.1);
		behaviour->addSemiMDPListener(tdLearner);
		CAgentController *policy = new CQStochasticPolicy(behaviour->getActions(), new CGreedyDistribution(), qFunc);
		behaviour->setController(policy);

		behaviours[i] = behaviour;
		policies[i] = policy;
		qfunctions[i] = qFunc;
		learners[i] = tdLearner;
	}

	CActionSet *allActions = new CActionSet();

	for (int i = 0; i < agent->getActions()->size(); i++)
	{
		allActions->add(agent->getActions()->get(i));
	}

	for (int i = 0; i < taxi->getNumTargets(); i++)
	{
		allActions->add(behaviours[i]);
	}



	CHierarchicalController *hierarchicalController = new CHierarchicalController(agent->getActions(), allActions, hierarchicalRoot);
	/// The controller must be added to the listener list of the agent (as the only hierarchical object)!
	agent->addSemiMDPListener(hierarchicalController);

	hierarchicalController->addHierarchicalStackListener(hierarchicalRoot);

	for (int i = 0; i < taxi->getNumTargets(); i++)
	{
		hierarchicalController->addHierarchicalStackListener(behaviours[i]);
	}

	int dim2[] = {0, 1, 3, 4};

	CAbstractStateDiscretizer *discretizer = new CModelStateDiscretizer(model->getStateProperties(), dim2, 4);
	
	agent->addStateModifier(discretizer);
	hierarchicalRoot->addStateModifier(discretizer);

	CFeatureQFunction *qTable1= new CFeatureQFunction(hierarchicalRoot->getActions(), discretizer);
	CTDLearner *learner1 = new CQLearner(semiMDPRewardFunction, qTable1);

	learner1->setParameter("QLearningRate", 0.1);

	hierarchicalRoot->addSemiMDPListener(learner1);

	CAgentController *policy = new CQStochasticPolicy(hierarchicalRoot->getActions(), new CSoftMaxDistribution(100), qTable1);
	hierarchicalRoot->setController(policy);
	
	agent->setController(hierarchicalController);

	agent->setKeyboardBreak(false);
	agent->setLogEpisode(true);

	int steps = 0;
	int totalSteps = 0;
	int ges_failed = 0;

	// Start Learning, Learn 50 Episodes
	for (int i = 0; i < 1000; i++)
	{
		// Start a new Episode, the agent gets reseted in one of the start states
		agent->startNewEpisode();
		// Learn 1 Episode with maximal 1000 steps 
		steps = agent->doControllerEpisode(1, 1000);

		totalSteps += steps;

		// Check if the Episode failed
		// The episode has failed if max_bounces has been reached (indicated through environmentModel->isFailed()), 
		// or max_steps has been reached

		if (model->isFailed() || steps >= 1000)
		{
			ges_failed++;
		}	

		if (i % 50 == 0 && i > 0)
		{
			printf("Episode %d - %d finished on average with %d steps, (%d failed)\n", i - 50, i, totalSteps / 50,  ges_failed);
			
			ges_failed = 0;
			totalSteps = 0;
		}

	}

	delete policy;
	delete learner1;
	delete agent;
	delete qTable1;
	delete model;
	delete taxi;
		

	printf("\n\n<< Press Enter >>\n");
	getchar();
}


