
#include "SimAgent.h"

// This is the entry point for this application

SimAgent::SimAgent(string ploadQFileName, int pexptNum, SimEnvironment *pSimEnv){
	exptNum = pexptNum;
	simEnv = pSimEnv;
	loadQFileName = ploadQFileName;
	functionApproxType = FN_APPROX_TILING;
	learner = LEARNER_SARSA;
}

void SimAgent::setParams(double pepsilon, int pepisodes){
	epsilon = pepsilon;
	episodes = pepisodes;
}

void SimAgent::action_add(std::vector<std::vector<double> > acts){

	for(int i=0;i< acts.size(); i++){
		agent->addAction(new SimAction(acts[i]));
	}

	std::cout<<" Available actions: "<<std::endl;
	for(int i=0; i< acts.size(); i++){
		CAction* act =agent->getActions()->get(i);
		SimAction *myAct = dynamic_cast<SimAction *>(act);
		for(int j=0; j <  myAct->getNumActionSizeVector(); j++) {
			std::cout<<myAct->getActionVal(j)<<" ";
		}
		std::cout<<std::endl;
	}
}

void SimAgent::applyFnApprox(int numdimensions,
		unsigned int *tile_dimensions,
		unsigned int *tile_partitions,
		unsigned int *rbf_dimensions,
		unsigned int *rbf_partitions,
		double *offsets,
		double *sigma,
		int num_hidden,
		int numActionTypes
		){

	CFeatureCalculator *featureCalc;

	switch(functionApproxType){
		case FN_APPROX_TILING: {

			// Now we can create our Feature Calculator
			featureCalc = new CTilingFeatureCalculator(numdimensions, tile_dimensions, tile_partitions, offsets);
			std::cout<<" using tiling"<<std::endl;
			agent->addStateModifier(featureCalc);
			qFunction = new CFeatureQFunction(agent->getActions(), featureCalc);
			break;
		}
		case FN_APPROX_RBF: {
			// Now we can create our Feature Calculator
			featureCalc = new CRBFFeatureCalculator(numdimensions, rbf_dimensions, rbf_partitions, offsets, sigma);
			std::cout<<" using rbf"<<std::endl;
			agent->addStateModifier(featureCalc);
			qFunction = new CFeatureQFunction(agent->getActions(), featureCalc);
			break;
		}
		case FN_APPROX_NEURAL: {
			int n_inputs = simEnv->getNumContinuousStates();
			int n_hidden = num_hidden;

			qFunction = new CQFunction(agent->getActions());

			for(int i=0; i< numActionTypes ; i++){
				Torch::MLP *mlp = new Torch::MLP(3, n_inputs, "linear", n_hidden, "sigmoid", n_hidden, "linear", 1);
				CTorchGradientFunction *gradientFn = new CTorchGradientFunction(mlp);
				CAbstractVFunction *fnFromGradientFn = new CVFunctionFromGradientFunction(gradientFn, simEnv->getStateProperties());
				CAction* act =agent->getActions()->get(i);
				qFunction->setVFunction(act, fnFromGradientFn);
			}
			std::cout<<" using neural network"<<std::endl;
			break;
		}


	}
}

void SimAgent::init()
{
	// Initialize the random generator 
	//srand((unsigned int) time(NULL));

	//printf ("-=<   Reinforcement Learning Example - Learning the Shortest Path Problem in a Gridworld   >=-\n\n");

	// Create the Gridworld, set max_bounces to 50. The model will reset the episode if "max_bounces" 
	// bounces (walk into the wall) are reached.
	
	//	CGridWorldModel *gridworldModel = new CGridWorldModel(gridworldFileName, 50);



	rewardFunction = new SimRewardFunction(simEnv, exptNum);
	rewardFunction->rewardType = simEnv->rewardType;

	rewardLogger= new CRewardLogger(rewardFunction);

	std::cout<<"SimAgent;: init() "<< simEnv->getStateProperties()->getNumDiscreteStates()<<std::endl;

	// Create the environment for the agent, the environment saves the current state of the agent.
	environmentModel = new CTransitionFunctionEnvironment(simEnv);
	
	std::cout<<"created environmentModel"<<std::endl;

	// the gridworld model implements the reward function too, so we can use this
//	CRewardFunction *rewardFunction = gridworldModel;
	
	// Create the agent in our environmentModel.

	agent = new CAgent(environmentModel);

	CFeatureCalculator *featureCalc;
	 std::cout<<" about to input actions"<<std::endl;

	switch(exptNum){
		case EXPERIMENT_1_SPHERE_GOAL:
		{
			int numActionTypes = 4;
			int actionVectorSize = 3;
			double a[4][3]
			                = { {0.0 , 0.0, 10.0},
								{0.0 , 0.0, -10.0},
								{10.0 , 0.0, 0.0},
								{-10.0 , 0.0, 0.0}
							};

			std::vector<std::vector<double> > acts;
			for(int i=0; i< numActionTypes; i++){
				acts.push_back(
						std::vector<double>(
								&(a[i][0]), &(a[i][actionVectorSize])
								));
			}

			action_add(acts);

			int numdimensions = 2;

			unsigned int tile_dimensions[] = {0,1};
			unsigned int tile_partitions[] = {25, 25};
			unsigned int rbf_dimensions[] = {0,1};
			unsigned int rbf_partitions[] = {25, 25};
			double offsets[] = {0.0 ,0.0};
			double sigma[] = {0.02, 0.02};
			int n_hidden = 10;

			applyFnApprox(numdimensions,
					tile_dimensions, tile_partitions,
					rbf_dimensions, rbf_partitions,
					offsets, sigma, n_hidden, numActionTypes);

			//std::cout<<" set tile features"<<std::endl;

//			agent->addStateModifier(featureCalc);
			break;
		}
		case EXPERIMENT_2_PUSH_BALL:
		{
			int numActionTypes = 4;
			int actionVectorSize = 3;

			double a[4][3]
							= { {0 , 0, 10},
								{0 , 0, -10},
								{10 , 0, 0},
								{-10 , 0, 0}
							};
			std::vector<std::vector<double> > acts;
			for(int i=0; i< numActionTypes; i++){
				acts.push_back(
						std::vector<double>(
								&(a[i][0]), &(a[i][actionVectorSize])
								));
			}

			action_add(acts);

			//std::cout<<" setting tile features"<<std::endl;

			int numdimensions = 4;
			unsigned int tile_dimensions[] = {0,1,2,3};
			unsigned int tile_partitions[] = {25, 25, 25, 25};
			unsigned int rbf_dimensions[] = {0,1,2,3};
			unsigned int rbf_partitions[] = {25, 25, 25, 25};
			double offsets[] = {0.0 ,0.0, 0.0 ,0.0};
			double sigma[] = {0.02, 0.02, 0.02, 0.02};
			int n_hidden = 10;

			applyFnApprox(numdimensions,
					tile_dimensions, tile_partitions,
					rbf_dimensions, rbf_partitions,
					offsets, sigma, n_hidden, numActionTypes);
			break;
		}
		case EXPERIMENT_3_JOINT_ACTION_PUSH:
		{

			int numActionTypes = 24;
			int actionVectorSize = 6;
			double a[24][6]
							= {
								{0 , 0, 20, 0 , 0, 0},
								{0 , 0, -20, 0 , 0, 0 },
								{20 , 0, 0, 0 , 0, 0},
								{-20 , 0, 0, 0 , 0, 0 },

								{0 , 0, 0, 0 , 0, 20},
								{0 , 0, 0, 0 , 0, -20 },
								{0 , 0, 0, 20 , 0, 0},
								{0 , 0, 0, -20 , 0, 0 },

								{0 , 0, 20, 0 , 0, 20},
								{0 , 0, 20, 0 , 0, -20 },
								{0 , 0, 20, 20 , 0, 0},
								{0 , 0, 20, -20 , 0, 0 },

								{0 , 0, -20, 0 , 0, 20},
								{0 , 0, -20, 0 , 0, -20 },
								{0 , 0, -20, 20 , 0, 0},
								{0 , 0, -20, -20 , 0, 0 },

								{20 , 0, 0, 0 , 0, 20},
								{20 , 0, 0, 0 , 0, -20 },
								{20 , 0, 0, 20 , 0, 0},
								{20 , 0, 0, -20 , 0, 0 },

								{-20 , 0, 0, 0 , 0, 20},
								{-20 , 0, 0, 0 , 0, -20 },
								{-20 , 0, 0, 20 , 0, 0},
								{-20 , 0, 0, -20 , 0, 0 },

							};

			std::vector<std::vector<double> > acts;
			for(int i=0; i< numActionTypes; i++){
				acts.push_back(
						std::vector<double>(
								&(a[i][0]), &(a[i][actionVectorSize])
								));
			}

			action_add(acts);

			//std::cout<<" setting tile features"<<std::endl;
			int numdimensions = 6;

			unsigned int tile_dimensions[] = {0, 1, 2, 3, 4, 5 };
			unsigned int tile_partitions[] = {12, 12, 12, 12, 12, 12};
			unsigned int rbf_dimensions[] = {0, 1, 2, 3, 4, 5 };
			unsigned int rbf_partitions[] = {12, 12, 12, 12, 12, 12};
			double offsets[] = {0.0, 0.0, 0.0, 0.0, 0.0, 0.0 };
			double sigma[] = {0.04, 0.04, 0.04, 0.04, 0.04, 0.04};
			int n_hidden = 10;

			applyFnApprox(numdimensions,
					tile_dimensions, tile_partitions,
					rbf_dimensions, rbf_partitions,
					offsets, sigma, n_hidden, numActionTypes);

			//std::cout<<" set tile features"<<std::endl;

			break;
		}

		case EXPERIMENT_4_COOPERATION_OBSTACLE:
			{

				int numActionTypes = 24;
				int actionVectorSize = 6;
				double a[24][6]
								= {
									{0 , 0, 10, 0 , 0, 0},
									{0 , 0, -10, 0 , 0, 0 },
									{10 , 0, 0, 0 , 0, 0},
									{-10 , 0, 0, 0 , 0, 0 },

									{0 , 0, 0, 0 , 0, 10},
									{0 , 0, 0, 0 , 0, -10 },
									{0 , 0, 0, 10 , 0, 0},
									{0 , 0, 0, -10 , 0, 0 },

									{0 , 0, 10, 0 , 0, 10},
									{0 , 0, 10, 0 , 0, -10 },
									{0 , 0, 10, 10 , 0, 0},
									{0 , 0, 10, -10 , 0, 0 },

									{0 , 0, -10, 0 , 0, 10},
									{0 , 0, -10, 0 , 0, -10 },
									{0 , 0, -10, 10 , 0, 0},
									{0 , 0, -10, -10 , 0, 0 },

									{10 , 0, 0, 0 , 0, 10},
									{10 , 0, 0, 0 , 0, -10 },
									{10 , 0, 0, 10 , 0, 0},
									{10 , 0, 0, -10 , 0, 0 },

									{-10 , 0, 0, 0 , 0, 10},
									{-10 , 0, 0, 0 , 0, -10 },
									{-10 , 0, 0, 10 , 0, 0},
									{-10 , 0, 0, -10 , 0, 0 },

								};

				std::vector<std::vector<double> > acts;
				for(int i=0; i< numActionTypes; i++){
					acts.push_back(
							std::vector<double>(
									&(a[i][0]), &(a[i][actionVectorSize])
									));
				}

				action_add(acts);

				int numdimensions = 4;
				unsigned int tile_dimensions[] = {0,1,2,3};
				unsigned int tile_partitions[] = {25, 25, 25, 25};
				unsigned int rbf_dimensions[] = {0,1,2,3};
				unsigned int rbf_partitions[] = {25, 25, 25, 25};
				double offsets[] = {0.0, 0.0, 0.0, 0.0};
				double sigma[] = {0.02, 0.02, 0.02, 0.02};
				int n_hidden = 10;

				applyFnApprox(numdimensions,
						tile_dimensions, tile_partitions,
						rbf_dimensions, rbf_partitions,
						offsets, sigma, n_hidden, numActionTypes);

				//std::cout<<" set tile features"<<std::endl;
				break;
			}
		case EXPERIMENT_5_CAPTURE_HUMAN:
			{
				int numActionTypes = 4;
				int actionVectorSize = 3;
				double a[4][3]
								= { {0 , 0, 10},
									{0 , 0, -10},
									{10 , 0, 0},
									{-10 , 0, 0}
								};

				std::vector<std::vector<double> > acts;
				for(int i=0; i< numActionTypes; i++){
					acts.push_back(
							std::vector<double>(
									&(a[i][0]), &(a[i][actionVectorSize])
									));
				}

				action_add(acts);

				//std::cout<<" setting tile features"<<std::endl;

				int numdimensions = 4;
				unsigned int tile_dimensions[] = {0,1,2,3};
				unsigned int tile_partitions[] = {25, 25, 25, 25};
				unsigned int rbf_dimensions[] = {0, 1, 2, 3 };
				unsigned int rbf_partitions[] = {25, 25, 25, 25};
				double offsets[] = {0.0, 0.0, 0.0, 0.0 };
				double sigma[] = {0.02, 0.02, 0.02, 0.02};
				int n_hidden = 10;

				applyFnApprox(numdimensions,
						tile_dimensions, tile_partitions,
						rbf_dimensions, rbf_partitions,
						offsets, sigma, n_hidden, numActionTypes);
				//std::cout<<" set tile features"<<std::endl;

				break;
			}
		case EXPERIMENT_6_MULTI_AGENT_PUSH:
			{

				int numActionTypes = 4;
				int actionVectorSize = 4;
				double a[4][4]
								= { {0 , 0, 20, id},
									{0 , 0, -20, id},
									{20 , 0, 0, id },
									{-20 , 0, 0, id}
								};

				std::vector<std::vector<double> > acts;
				for(int i=0; i< numActionTypes; i++){
					acts.push_back(
							std::vector<double>(
									&(a[i][0]), &(a[i][actionVectorSize])
									));
				}

				action_add(acts);

				std::cout<<" setting tile features"<<std::endl;
				int numdimensions = 6;

				unsigned int tile_dimensions[] = {0, 1, 2, 3, 4, 5 };
				unsigned int tile_partitions[] = {12, 12, 12, 12, 12, 12};
				unsigned int rbf_dimensions[] = {0, 1, 2, 3, 4, 5 };
				unsigned int rbf_partitions[] = {12, 12, 12, 12, 12, 12};
				double offsets[] = {0.0, 0.0, 0.0, 0.0, 0.0, 0.0 };
				double sigma[] = {0.04, 0.04, 0.04, 0.04, 0.04, 0.04};
				int n_hidden = 10;

				applyFnApprox(numdimensions,
						tile_dimensions, tile_partitions,
						rbf_dimensions, rbf_partitions,
						offsets, sigma, n_hidden, numActionTypes);

				std::cout<<" set tile features"<<std::endl;

				break;
			}
		case EXPERIMENT_7_JOINTS:
			{
				int numActionTypes = 6;
				int actionVectorSize = 4;

				double a[6][4]
								= { {0, 0 , 0, 10},
									{0, 0 , 0, -10},
									{0, 10 , 0, 0},
									{0, -10 , 0, 0},
									{1, 90.0, 0, 0},
									{1, -90.0, 0, 0}
								};

				std::vector<std::vector<double> > acts;
				for(int i=0; i< numActionTypes; i++){
					acts.push_back(
							std::vector<double>(
									&(a[i][0]), &(a[i][actionVectorSize])
									));
				}

				action_add(acts);

				//std::cout<<" setting tile features"<<std::endl;

				int numdimensions = 4;
				unsigned int tile_dimensions[] = {0,1,2,3};
				unsigned int tile_partitions[] = {25, 25, 25, 25};
				unsigned int rbf_dimensions[] = {0,1,2,3};
				unsigned int rbf_partitions[] = {12, 12, 12, 12};
				double offsets[] = {0.0 ,0.0, 0.0 ,0.0};
				double sigma[] = {0.04, 0.04, 0.04, 0.04};
				int n_hidden = 10;

				applyFnApprox(numdimensions,
						tile_dimensions, tile_partitions,
						rbf_dimensions, rbf_partitions,
						offsets, sigma, n_hidden, numActionTypes);

				break;
			}
		case EXPERIMENT_8_MULTI_IL_PUSH:
				{

					int numActionTypes = 4;
					int actionVectorSize = 4;
					double a[4][4]
									= { {0 , 0, 20, id},
										{0 , 0, -20, id},
										{20 , 0, 0, id },
										{-20 , 0, 0, id}
									};

					std::vector<std::vector<double> > acts;
					for(int i=0; i< numActionTypes; i++){
						acts.push_back(
								std::vector<double>(
										&(a[i][0]), &(a[i][actionVectorSize])
										));
					}

					action_add(acts);

					std::cout<<" setting tile features"<<std::endl;
					int numdimensions = 4;

					unsigned int tile_dimensions[] = {0, 1, 2, 3};
					unsigned int tile_partitions[] = {12, 12, 12, 12};
					unsigned int rbf_dimensions[] = {0, 1, 2, 3};
					unsigned int rbf_partitions[] = {12, 12, 12, 12};
					double offsets[] = {0.0, 0.0, 0.0, 0.0};
					double sigma[] = {0.04, 0.04, 0.04, 0.04};
					int n_hidden = 10;

					applyFnApprox(numdimensions,
							tile_dimensions, tile_partitions,
							rbf_dimensions, rbf_partitions,
							offsets, sigma, n_hidden, numActionTypes);

					std::cout<<" set tile features"<<std::endl;

					break;
				}
	}
	

  //  std::cout<<" actions done "<<std::endl;

	// Create an Agent Logger for logging the episodes
	// Our agent logger logs the gridworld model state and the actions of the agent. This logger holds all episodes in memory.

	logger = new CAgentLogger(simEnv->getStateProperties(), agent->getActions());
	// add the logger to the agent's listener list
	agent->addSemiMDPListener(logger);
	agent->addSemiMDPListener(rewardLogger);

//	std::cout<<"Created logger \n";

	// Q-Learning starts here

	// Create our Q-Function, we will use a Feature Q-Function, which is table-like representation of the Q-Function.
	// The Q-Function needs to know which actions and which state it has to use
	//qFunction = new CFeatureQFunction(agent->getActions(), featureCalc);

//	std::cout<<"qfunction created \n";

	//if (strcmp(load_q_filename, "nofile")!=0) { //If the string do not match

	if(loadQFileName.compare(nofile) != 0) {//If the string do not match
	    // Load the QFunction
		std::cout<<"agent: "<< id<<" loading data from file: "<<loadQFileName<<std::endl;
	    FILE *qFuncFile = fopen(loadQFileName.c_str(),"r");
	    qFunction->loadData(qFuncFile);
	    fclose(qFuncFile);
	}

	//std::cout<<"loading data from file \n";



	// Create the Q-Function learner, we will use a SarsaLearner
	// The Sarsa Learner needs the reward function, the Q-Function and the agent.
	// The agent is used to get the estimation policy, because Sarsa Learning is On-Policy learning.

	switch(learner){
		case LEARNER_SARSA:
			qFunctionLearner = new CSarsaLearner(rewardFunction, qFunction, agent);
			std::cout<<" Learner: SARSA"<<std::endl;
			break;
		case LEARNER_Q:
			qFunctionLearner = new CQLearner(rewardFunction, qFunction);
			std::cout<<" Learner: Q "<<std::endl;
			break;
	}

	//std::cout<<"qfunction learner created \n";

	// Create the Controller for the agent from the QFunction. We will use a EpsilonGreedy-Policy for exploration.
	//epsilon = 1.0;
	qLearnerPolicy = new CQStochasticPolicy(agent->getActions(), new CEpsilonGreedyDistribution(epsilon), qFunction);
		
	//std::cout<<"qfunction learner policy created \n";

	// Set some options of the Etraces which are not default
	qFunctionLearner->setParameter("ReplacingETraces", 1.0);
	qFunctionLearner->setParameter("Lambda", 0.95);
	
	//std::cout<<"set parameters \n";

	// Add the learner to the agent listener list, so he can learn from the agent's steps.
	agent->addSemiMDPListener(qFunctionLearner);
	
	// Set the controller of the agent
	agent->setController(qLearnerPolicy);
	
	// Disable logging of the current Episode
	agent->setLogEpisode(false);
}

void SimAgent::addActions(double **a, int numActionTypes, int actionVectorSize){

	std::vector<std::vector<double> > acts;
	for(int i=0; i< numActionTypes; i++){
		acts.push_back(
				std::vector<double>(
						&(a[i][0]), &(a[i][actionVectorSize])
						));
	}

	for(int i=0;i< acts.size(); i++){
		agent->addAction(new SimAction(acts[i]));
	}

	for(int i=0; i< acts.size(); i++){
		CAction* act =agent->getActions()->get(i);
		SimAction *myAct = dynamic_cast<SimAction *>(act);
		for(int j=0; j <  myAct->getNumActionSizeVector(); j++) {
			std::cout<<myAct->getActionVal(j)<<" ";
		}
		std::cout<<std::endl;
	}
}

void SimAgent::startLearning(){

	int steps = 0; 
    int ges_failed = 0, ges_succeeded = 0, last_succeeded = 0;
    
    int totalSteps = 0;

    int numWins = 0;
    int numMaxSteps = 1000;//00;

    std::cout<<" about to start learning for "<<episodes<<" episodes "<<std::endl;

    double delta = epsilon/episodes;


	// Start Learning, Learn 50 Episodes
	for (int i = 0; i < episodes; i++)
	{
		 int r=pthread_barrier_wait(&simEnv->simBarrier);
		double epsi = agent->getController()->getParameter("EpsilonGreedy");
		epsi -= delta;

		if(epsi <= 0 + 0.0000000001) epsi = 0.0;
		agent->getController()->setParameter("EpsilonGreedy",epsi);
		//std::cout<<"epsilon : "<<epsi<<std::endl;

		// Start a new Episode, the agent gets reseted in one of the start states
		agent->startNewEpisode();

	//	std::cout<<"SimAgent::startedNewEpisode()"<<std::endl;
		// Learn 1 Episode with maximal 1000 steps 
		steps = agent->doControllerEpisode(1, numMaxSteps);

		totalSteps += steps;

		// Check if the Episode failed
		// The episode has failed if max_bounces has been reached (indicated through environmentModel->isFailed()), 
		// or max_steps has been reached
		if (environmentModel->isFailed() || steps >= numMaxSteps)
		{
			ges_failed++;
			last_succeeded = 0;
			printf("Episode %d failed with %d steps\n", i, steps);
		}
		else
		{
			ges_succeeded++;
			last_succeeded++;
			numWins++;
			printf("Episode %d succeded with %d steps, %d Episodes succeded in the row\n", i, steps, last_succeeded);
		}
		
//		if (i%5000 == 0 && i!=0) {
//			char exptNumStr[5];
//			char episodeStr[5];
//			sprintf(exptNumStr, "%d", exptNum);
//			sprintf(episodeStr, "%d", i);
//			// Save the QFunction
//			string qtableFileName = experiment + exptNumStr + underscore + episodeStr + tableFile ;
//			FILE *qFuncFile = fopen(qtableFileName.c_str(),"w");
//			qFunction->saveData(qFuncFile);
//			fclose(qFuncFile);
//		}

	}

	printf(" Learned %d times\n", numWins);


	char exptNumStr[5];
	sprintf(exptNumStr, "%d", exptNum);
	// Save the QFunction
	char idStr[4];
	sprintf(idStr, "%d", id);

	std::cout<<"Writing qtable file "<<std::endl;

	string experimentDef = experiment + exptNumStr + underscore + idStr ;
	switch(learner){
	case LEARNER_SARSA: experimentDef += sarsa;
		break;
	case LEARNER_Q: experimentDef += qlearner;
		break;
	}
	if(simEnv->rewardType == REWARD_POTENTIAL){
		experimentDef += potential;
	}
	switch(functionApproxType){
	case FN_APPROX_TILING: experimentDef += tiling;
		break;
	case FN_APPROX_RBF: experimentDef += rbf;
		break;
	case FN_APPROX_NEURAL: experimentDef += neuralNetwork;
		break;

	}

	string qtableFileName = experimentDef + tableFile ;
	FILE *qFuncFile = fopen(qtableFileName.c_str(),"w");
	qFunction->saveData(qFuncFile);
	fclose(qFuncFile);

//	std::cout<<"Writing episodes file"<<std::endl;
//	// Save the QFunction
//	string episodesFileName = experimentDef + episodeFile ;
//	FILE *loggerFile = fopen(episodesFileName.c_str(),"w");
//	fprintf(loggerFile,"TotalEpisodes %d \n",episodes);
//	logger->saveData(loggerFile);
//	fclose(loggerFile);

	std::cout<<"Writing rewards file"<<std::endl;

	string rewardLogFileStr = experimentDef + rewardFile;
	FILE *rewardLogFile = fopen(rewardLogFileStr.c_str(), "w");
	fprintf(rewardLogFile,"TotalEpisodes %d \n",episodes);
	rewardLogger->saveData(rewardLogFile);
	fclose(rewardLogFile);
}

SimAgent::~SimAgent(){
	// Cleaning Up
	
	delete qFeatureFunction;
	delete qFunction;
	delete qLearnerPolicy;
	delete qFunctionLearner;
	delete logger;
	delete agent;
	delete environmentModel;
	delete simEnv;
}
