// Copyright (C) 2003
// Gerhard Neumann (gneumann@gmx.net)
// Stephan Neumann (sneumann@gmx.net) 
//                
// This file is part of RL Toolbox.
// http://www.igi.tugraz.at/ril_toolbox
//
// All rights reserved.
// 
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// 1. Redistributions of source code must retain the above copyright
//    notice, this list of conditions and the following disclaimer.
// 2. Redistributions in binary form must reproduce the above copyright
//    notice, this list of conditions and the following disclaimer in the
//    documentation and/or other materials provided with the distribution.
// 3. The name of the author may not be used to endorse or promote products
//    derived from this software without specific prior written permission.
// 
// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
// IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.


/*****************************************************************************************************
Tutorial for the Reinforcement Learning Toolbox. In this example the Pole Balancing task is learned with
a Q-Learning algorithm. For discretization build-in single state discretizer are used. 
The task is learned until the agent manages to pole the cart for over 100000 steps or for 500 episodes.
******************************************************************************************************/

#include <time.h>
#include "ril_debug.h"
#include "cmultipolemodel.h"
#include "ctdlearner.h"
#include "cpolicies.h"
#include "cagent.h"
#include "cactorcritic.h"
#include "cqfunction.h"
#include "cqetraces.h"


#define one_degree 0.0174532	/* 2pi/360 */
#define six_degrees 0.1047192
#define twelve_degrees 0.2094384
#define fifty_degrees 0.87266


int main(void)
{
	// initialize the random generator
	//srand((unsigned int)time((time_t *)NULL));
	srand(1000);

	printf ("-=<   Reinforcement Learning Benchmark - Pole Balancing with Q-Function Learning >=-\n\n");

	// variable declaration
	CMultiPoleModel *model = NULL;
	CAgentController *policy = NULL;
	CAbstractStateDiscretizer *discState = NULL;

	CRewardFunction *rewardFunction = NULL;
	CAgent *agent;

	// create the model
	model = new CMultiPoleModel();
	// initialize the reward function
	/* Our environment model also implements the reward function interface */
	rewardFunction = model;

	// create the agent
	agent = new CAgent(model); 
	
	// create the discretizer with the build in classes
	// create the partition arrays
	double partitions1[] = { -0.8, 0.8}; // partition for x
	double partitions2[] = {-0.5, 0.5}; // partition for x_dot
	double partitions3[] = {-six_degrees, -one_degree, 0, one_degree, six_degrees}; // partition for theta
	double partitions4[] = {-fifty_degrees, fifty_degrees}; // partition for theta_dot

	// Create the discretizer for the state variables
	CAbstractStateDiscretizer *disc1 = new CSingleStateDiscretizer(0, 2, partitions1);
	CAbstractStateDiscretizer *disc2 = new CSingleStateDiscretizer(1, 2, partitions2);
	CAbstractStateDiscretizer *disc3 = new CSingleStateDiscretizer(2, 5, partitions3);
	CAbstractStateDiscretizer *disc4 = new CSingleStateDiscretizer(3, 2, partitions4);

	// Merge the 4 discretizer
	CDiscreteStateOperatorAnd *andCalculator = new CDiscreteStateOperatorAnd();

	andCalculator->addStateModifier(disc1);
	andCalculator->addStateModifier(disc2);
	andCalculator->addStateModifier(disc3);
	andCalculator->addStateModifier(disc4);

	discState = andCalculator;

	// add the discrete state to the agent's state modifier
	// discState must not be modified (e.g. with a State-Substitution) by now
	agent->addStateModifier(discState);

	
	// create the 2 actions for accelerating the cart and add them to the agent's action set
	CPrimitiveAction *primAction1 = new CMultiPoleAction(10.0);
	CPrimitiveAction *primAction2 = new CMultiPoleAction(-10.0);

	agent->addAction(primAction1);
	agent->addAction(primAction2);


	// Create the learner and the Q-Function
	CFeatureQFunction *qTable = new CFeatureQFunction(agent->getActions(), discState);
	
	CTDLearner *learner = new CQLearner(rewardFunction, qTable);
	// initialise the learning algorithm parameters
	learner->setParameter("QLearningRate", 0.1);
	learner->setParameter("DiscountFactor", 0.99);
	learner->setParameter("ReplacingETraces", 0.0);
	learner->setParameter("Lambda", 1.0);

	// Set the minimum value of a etrace, we need very small values
	learner->setParameter("ETraceTreshold", 0.00001);
	// Set the maximum size of the etrace list, standard is 100
	learner->setParameter("ETraceMaxListSize", 163);

	// add the Q-Learner to the listener list
	agent->addSemiMDPListener(learner);

	// Create the learners controller from the Q-Function, we use a SoftMaxPolicy
	policy = new CQStochasticPolicy(agent->getActions(), new CEpsilonGreedyDistribution(0.1), qTable);

	// set the policy as controller of the agent
	agent->setController(policy);
	
	// disable automatic logging of the current episode from the agent
	agent->setLogEpisode(false);

	int steps = 0;

	int max_Steps = 100000;
	// Learn for 500 Episodes
	for (int i = 0; i < 500; i++)
	{
		// set adaptive Epsilon
		policy->setParameter("EpsilonGreedy", 0.1 / (i + 1));
		// Do one training trial, with max max_Steps steps
		steps = agent->doControllerEpisode(1, max_Steps);

		printf("Episode %d %s with %d steps\n", i, model->isFailed() ? "failed" : "succeded", steps);
	
		if (steps >= max_Steps)
		{
			printf("Learned to balance the pole after %d Episodes\n", i);
			break;
		}
	}


	delete policy;
	delete learner;
	delete agent;
	delete qTable;
	delete model;
	
	printf("\n\n<< Press Enter >>\n");
	getchar();
}

