#ifndef QPICOLLECTOR_H
#define QPICOLLECTOR_H

#include <stdio.h>  /* for printf */
#include <string.h> /* for strcmp */
#include <time.h> /*for time()*/
#include <list>
using namespace std; 


#include <rlglue/utils/C/RLStruct_util.h>
#include <rlglue/utils/C/TaskSpec_Parser.h>


#include "QPIData.h"

/*
	This agent collects data for the Q-pi project. The way it does it is by holding two policies: 
	One is the target policy pi we want to collect data from and the other is an exploratory policy p_exp. 
	We execute pi, terminating it by probability 1-gamma at each timestep, then we execute p_exp for 
	constant number of timesteps EXP_STEPS to allow it to mix and repeat. 
	We assume that the environment is non-episodic. 
*/

class QPICollector
{
public:
	QPICollector(void);
	~QPICollector(void);

	//rlglue functions 
	void init(const char* task_spec); 
	const action_t * start(const observation_t *this_observation); 
	const action_t * step(double reward, const observation_t *this_observation); 
	void end(double reward); 
	void cleanup(); 
	const char* message(const char* inMessage); 


	action_t* getActionFromPi(const observation_t* state); 
	action_t* getActionFromExpPi( const observation_t* state); 
	action_t* getActionFromPi(const observation_t* state, action_t* pi); 
	action_t* getActionFromExpPi( const observation_t* state, action_t* pi); 
	action_t* randomAction(); 

	int guideToStartingPoint(const observation_t* current, const observation_t& goal); 
	const list<Trajectory*>* getTrainingData(); 
	const list<Trajectory*>* getTestingData(); 
	void save();		// saves training and testing data to files
	void save(const list<Trajectory*>* l, const char* path); 

	int EXP_STEPS;		//number of timesteps we run the exploration policy
	double gamma;		//discount factor. This is taken from the task spec string. make sure it's not 1
	double testingPercentage;	//a number between 0 and 1 that indicates how many perc. of data should be testing

	list<Trajectory*> history; 

	FILE* rawHistoryFile;		//this file will contain all the experiences including pi and pi_e trajs

private: 
	list<Trajectory*> trainingData; //these two structures only point to objs inside history. should not be deleted separately
	list<Trajectory*> testingData; 

	void splitData(); 

	action_t piAction; 
	action_t expAction;
	action_t tmpAction;					//just a temp structure to avoid creating and destroying it many times. 
	action_t* executedAction;			//this is always updated to the last action executed by the agent (it either points to piAction or expAction)

	observation_t lastObservation; 
	observation_t goalState;			//this is used in my current pi (if pi doesn't need it, it should be deleted)
	taskspec_t ts; 

	int expCounter;			//number of timesteps since we started executing pi_exp (-1 if we're not)
	Trajectory* currentTrajectory; 
	
};

#endif
