#ifndef LEARNER_NIPS_HEADER
#define LEARNER_NIPS_HEADER

#include "types.h"
#include <list>
#include <map>
#include <vector>
#include <stdio.h>
#include "ParamReader.h"
#include "Util.h"

using namespace std; 

namespace GridSolver
{

#define		intPair		std::pair<StateIndex,std::pair<int,double> >
#define		doublePair	std::pair<StateIndex,std::pair<double,double> >
#define		probMap		std::map<StateIndex, std::pair<double,double> > //second pair is probability/reward
#define		countMap	std::map<StateIndex, std::pair<int,double> >
#define		TransData	Transition*
#define	PI	3.141597


#define		MAX_DIMENSION			5			//warning: (make sure it doesn't go over 5, because in GridSolver.cpp we rely on this. search for 'depend5:' for locations )
#define		MAX_ACTION				10			//warning:


class StateIndex
{
public: 
	StateIndex(); 

	StateIndex(int a, int b=0, int c=0, int d=0, int e=0)
	{
		index[0] = a; 
		index[1] = b; 
		index[2] = c; 
		index[3] = d; 
		index[4] = e; 
	}

	static double l1distance(const StateIndex& lhs, const StateIndex& rhs); 

	const string toString()const
	{
		string result = "["; 
		char tmp [80]; 
		for(int i=0; i< dimension; i++)
		{
			sprintf(tmp, "%d,", index[i]);
			result = result.append(tmp); 
		}
		result = result.append("]"); 
		return result; 
	}

	void print()const 
	{
		printf("["); 
		for(int i=0; i< dimension; i++)
			printf("%d,", index[i]); 

		printf("]\n"); 
//		printf("%s\n", toString()); 
	}

	bool operator <(const StateIndex& rhs) const; 
	bool operator ==(const StateIndex& rhs) const; 
	bool operator !=(const StateIndex& rhs) const; 



	int index[MAX_DIMENSION]; 
	static int dimension;		//this is the dimension that we use here
};

class StateActionIndex : public StateIndex
{
public:
	StateActionIndex(int ai, int a, int b=0, int c=0, int d=0, int e=0)
		:StateIndex(a,b,c,d,e)
	{
		actionIndex = ai; 
	}
	int actionIndex; 
};

class Transition 
{
public:
	Transition() { start = end = 0; } 

	Observation start; 
	Observation end; 
	double reward; 
	Action action; 
};


// if a stateAction is not known yet, 
// we should make sure we first estimate reward and transition function before 
// we use reward and transProbs data members
class StateAction{
public: 
	StateAction(); 

	static FILE* logFile; 

	double qvalue;	//q value of this state/action 
	bool isKnown; //have we tried this state/action enough?
	int totalVisits; //how many times we've tried this action
	double reward; 

	void makeKnown(); 
	static void addToCountList(countMap& l, const StateIndex& st, double rw); 
	void buildTransitionProbs(countMap& l, int tvisits, double knownness); 
	bool updateTransCounts(const StateIndex& st, double rw); 
	double computeReward(); 
	double innerDistance(); 
	void print(); 

	probMap transProbs;		//transition probabilities to other states
	countMap	transCounts;		//transition counts

}; 

class State
{
public:
	State() 
	{
		actions = new StateAction [actionsNumber]; 
		value = 0; 

		policy = intRand(actionsNumber); 
		if (policy >= actionsNumber)	policy= actionsNumber -1 ; 
		isFictituous = false; 
	}

	~State(){
		delete[] actions; 
	}

	void print(); 

	static int actionsNumber; 
	static StateIndex fictituousState; //imaginary state index (with self loops of ParamReader::rmax) 
	static StateIndex sinkState; //imaginary state index(with self loops of 0)
	static long totalNumberOfStates;	

	StateIndex si;			//the index of this state in the agent
	StateAction * actions;	//an array of actions that are available for this state
	int policy; 
	bool isFictituous; 


	double value; 
};



} //namespace

#endif
