 
#ifndef		FITTED_VI_SOLVER
#define		FITTED_VI_SOLVER
#include <boost/thread/mutex.hpp>
#include <boost/unordered_map.hpp>

#include "MDPSolver.h"
#include <ANN/ANN.h>
#include "MatlabMatrix.h"
#include <list>
using namespace std; 
using namespace matlab; 

namespace planning
{

class BellmanBackuper; 

class FittedVISolver
	:public SlaveableMDPSolver
{
public: 
	FittedVISolver(MREAgent* a, TFGenerator* tf, RFGeneralizer* rf); 
	~FittedVISolver(); 

	virtual Action getBestAction(Observation state);
	virtual void	solveModel(Observation currentState); 
	virtual double getStateValue(const Observation state);  
	virtual double getQValue(const Observation state, Action a); 
	virtual void operator()(); 
	virtual double getStateValueFast(const Observation state); 
protected:
	void generateUniformSamples(ANNpointArray& l, int sampleSize);
	bool getNextItem(Observation prev, Observation next, int n);		//this is a utility function
	void computeKernels(double* dists, double* kernels);

	BellmanBackuper* createBackuper(const Observation s); 
void clearSamples(); 
public:

	static int K; 
	int sampleSize;									//how many samples are we solving for 

	ANNpointArray samples;							//basis points for doing FVI
	ANNkd_tree* knnLearner; 
	vector<BellmanBackuper*> solvers;				//each point has a UCTSolver object instead of the regular bellman backup
	friend class BellmanBackuper; 


	boost::mutex m_mutex; 

};


class BellmanBackuper
{
public:
	BellmanBackuper(MDPSolver* p, const Observation st);
	virtual void setState(const Observation st); 
	virtual ~BellmanBackuper(); 
	virtual double update()=0;						//do one update, return the bellman residual 
	virtual double getQValue(Action a)=0; 
	virtual double getValue()=0; 
	virtual Action getBestAction()=0; 
public:
	MDPSolver* parent; 
	Observation state; 
}; 

class UCTBackuper : public BellmanBackuper
{
public: 
	UCTBackuper(MDPSolver* p, const Observation st); 
	virtual ~UCTBackuper(); 
	virtual double update(); 
	virtual double getQValue(Action a); 
	virtual double getValue(); 
	virtual Action getBestAction(); 
	virtual void setState(const Observation o); 
public:

	int MAX_DEPTH;			//how deep each trajectory goes
	int MAX_SAMPLES;		//how many total samples allowed per update 

	boost::unordered_map<std::vector<int>, int> Nsd;		//counts for each state node
	boost::unordered_map<std::vector<int>, int> Nsad;		//counts for each state/action node
	boost::unordered_map<std::vector<int>, double> q;		//q-value stored for each state/action/depth

	double search(int depth, Observation obs, int& samples);		
	Action selectAction(Observation obs, int depth, bool greedy); 

	std::vector<int> discretize(Observation obs); 

	//some stuff for ease of use
	taskspec_t* spec; 
	int obs_dim; 
	int action_number; 
	double VMAX; 

	//more stuff 
	vector<int> stated;		//the same as state, but discretized
}; 

class MonteCarloBackuper: public BellmanBackuper
{
public:
	MonteCarloBackuper(MDPSolver* p, const Observation st);
	virtual void setState(const Observation st); 
	virtual ~MonteCarloBackuper(); 
	virtual double update();						//do one update, return the bellman residual 
	virtual double getQValue(Action a); 
	virtual double getValue(); 
	virtual Action getBestAction(); 


	double search(Observation st, Action a, int depth, int& samples); 

public:

	int MAX_DEPTH;			//how deep each trajectory goes
	int MAX_SAMPLES;		//how many total samples allowed per update 
	double* q; 

}; 

class SimpleBellmanBackuper: public BellmanBackuper
{
public: 
	SimpleBellmanBackuper(MDPSolver* p, const Observation st); 
	virtual ~SimpleBellmanBackuper(); 
	virtual double update(); 
	virtual double getQValue(Action a); 
	virtual double getValue(); 
	virtual Action getBestAction(); 
public:
	double* qvalues; 
};

class FittedVISolverSlave
{
public:
	FittedVISolverSlave(FittedVISolver* p){parent = p; }
	void operator()(){doVI();} 
	void doVI();
	FittedVISolver* parent; 
}; 

}//namespace


#endif
