#include "MREAgent.h"
#include <time.h>
#include <sstream>
#include <fstream>

//#include "LWPRLearner.h"
#include "OriginalTFGenerator.h"
#include "MultiPredictorTFGeneralizer.h"
#include "OriginalRewardGenerator.h"
//#include "LWRRewardGenerator.h"
//#include "KNNRewardGenerator.h"
//#include "MKNNRewardGenerator.h"
#include "FMLKRGeneralizer.h"
#include "KNNSimpleGeneralizer.h"

#include "FittedVISolver.h"
#include "DiscreteMDPSolver.h"
#include "UCTSolver.h"
#include "GridSolver.h"

#include "ParamReader.h"
//---------------------------global functions

#ifdef _DEBUG
   #define new MYDEBUG_NEW
#endif


int MREAgent::dimension; 

ENVIRONMENT MREAgent::environment;
#define		SAVEFILE_VERSION	2.0		//just a number to make sure we don't load other files
FILE* historyFile; 
#define		HISTORY_FILE_PATH	"data/history.out"


void logValueFunction(); 


MREAgent::MREAgent(void)
{
	historyFile = 0; 
	tfGens = 0; 
	rfGens = 0;
	numberOfActions = 0; 
	stats.startingTime = time(NULL); 
}
MREAgent::~MREAgent(void)
{
	if(tfGens)
		delete tfGens; 

	if (rfGens)
		delete rfGens; 

	if (mdpSolver)
		delete mdpSolver; 

//	delete states; 
}
void MREAgent::init(const char* ts)
{
	printf("init string '%s'\n", ts); 
//	environment = CARTPOLE; 
//	environment =BUMBLEBALL;  
	environment = PUDDLEWORLD; 
	stats.hasSeenShortEpisode = 0; 
	stats.trialAge=0; 
	stats.trialReward=0; 
	stats.trialEpisode = 0; 
	stats.trial++; 
	
	loadParam("param.in"); 
	history.clear(); 
	lastAction = 0; 

#ifdef USE_THREADS
	plannerThread=0; 
#endif

	//before creating any matlab related object, we close any old matlab window (to avoid cluttered graphs)
//	MatlabInterface::closeEngines(); 

	printf("init run %d...\n", stats.trial ); 
	if (decode_taskspec(&taskSpec, ts) != 0)
	{
		printf("Error: cannot parse the task spec\n"); 
		exit(1); 
	}
	
	MREAgent::dimension = taskSpec.num_double_observations; 
	/*
	this part is changed if we want to use only parts of the available actions
	*/
	int j=0; 
	for(int i= (int)taskSpec.int_actions[0].min; i <= (int)taskSpec.int_actions[0].max; i++, j++)
			mappedAction[j] = i;
	numberOfActions =taskSpec.int_actions[0].max - taskSpec.int_actions[0].min +1; //  (int) ( taskSpec.int_actions[0].max - taskSpec.int_actions[0].min+1); 

	printf("Actions are: "); 
	for(int i=0; i < numberOfActions; i++)
		printf("\t%d", mappedAction[i]); 
	printf("\n"); 

	lastState = 0; 
//	taskSpec.print();
	initGeneralizers();
	ParamReader::print(); 


	initPlanner(); 


	if (ParamReader::LOAD_MODEL)
	{
		loadHistory("data/history.out"); 
	}
	if (ParamReader::SAVE_MODEL)
	{
		historyFile = fopen(HISTORY_FILE_PATH,"a"); //we append to the original file
	}

	//if the setup is planning, let's plan right now. 
	if (ParamReader::EXP_TYPE == PLANNING)
	{
		solveModel(0); //warning: MDPSolvers should be careful not to crash with null start state
	}

}

void MREAgent::loadHistory(const char* fname)
{
	return; //warning:
	printf("loading"); 
	FILE* fin = fopen(fname,"r");
	if (!fin)
	{
		printf("--------Error loading history: can't open the file\n"); 
		return;
	}
	int cnt = 0; 
try{	
	ifstream f;
	f.open(fname, ios::in); 
	Transition t; 
	t.start = new Observation_type[dimension];
	t.end  = new Observation_type[dimension];
	printf("Loading"); fflush(stdout); 
	while (f.good())
	{
		printf("."); fflush(stdout); 
		char line [35000]={0};
		f.getline(line, 35000);		//read one episode
		istringstream str(line); 

		if(strlen(line) > 0)
		{
			//read starting state 

			for(int i=0; i< dimension; i++)
				str >> t.start[i]; 

			//read steps 
			while(str.good()) {
				str >> t.action; 
				str >> t.reward; 
				t.reward -= ParamReader::rmax; 
				if (str.good())		//still in step
				{
					for(int i=0; i< dimension; i++)
						str >> t.end[i];
					//now we have the transition ready
					cnt++; 
					tfGens->learn(&t); 
					rfGens->addData(&t); 
				}else {				//reached the end of episode
					break;
				}
				Observation tmp = t.start; 
				t.start = t.end; 
				t.end = t.start ; 
			}
		}
	}
}
catch(...)
{
	printf("an error occured while reading history file\n"); 
}



/*	//read the transitions..
	long cntr =0; 
	while(! feof(fin))
	{
        double stmp; 
		int atmp; 

		//start
		Transition t; 
		t.start = new Observation_type[StateIndex::dimension]; 
		for(int i=0; i< StateIndex::dimension; i++)
		{
			fscanf(fin,"%lf ", &stmp); 

			t.start[i] = stmp; 
		}

		//action
		fscanf(fin,"%d ", &atmp); 
		t.action = atmp; 

		t.end = new Observation_type[StateIndex::dimension]; 
		for(int i=0; i< StateIndex::dimension; i++)
		{
			fscanf(fin,"%lf ", &stmp); 
			t.end[i] = stmp; 
		}

		//reward
		fscanf(fin,"%lf\n", &stmp); 
		t.reward = stmp; 

		//now we have the transition, lets add it to our model & FAs
		State& st = getStateEX(observation2stateindex(t.start)); 
		st.actions[t.action].updateTransCounts(observation2stateindex(t.end), t.reward ); 
		tfGens->learn(&t); 
		addToHistory(t);
		if (++cntr % 500 == 0)
		{
			printf("."); fflush(stdout); 
		}
	}
*/
	fclose(fin); 
	printf("done with %d samples \n", cnt); 
}



void MREAgent::loadParam(const char* fname)
{
	if (!ParamReader::load(fname))
		printf("WARNING: could not open param file -------------\n"); 
}

void MREAgent::initPlanner()
{
//	mdpSolver = new UCTSolver(this, tfGens, rfGens); 
//	mdpSolver = new FittedVISolver(this, tfGens, rfGens); //it should be called after initGen
	mdpSolver = new discreteSolver::DiscreteMDPSolver(this, tfGens, rfGens); 
//	mdpSolver = new GridSolver::GridMDPSolver(this, tfGens, rfGens); 
//	mdpSolver = new GridSolver::GridMDPForwardSolver(this, tfGens, rfGens); 
}

//initialize generalizers
void MREAgent::initGeneralizers()
{
	//building TF 

//	tfGens = new LWPRLearner(this, numberOfActions , taskSpec.obs_dim, taskSpec); 
	tfGens = new OriginalTFGenerator(this, numberOfActions , dimension, taskSpec); 
//	tfGens = new MultiPredictorTFGeneralizer(this, numberOfActions, dimension, taskSpec); 
//	tfGens = new FMLKRGeneralizer(this, numberOfActions, dimension, taskSpec); 
//	tfGens = new KNNSimpleGeneralizer(this, numberOfActions, dimension, taskSpec);
//	tfGens = new KDTreeLearner(this, numberOfActions , dimension, taskSpec); 

	rfGens = new OriginalRewardGenerator(this); 
//		rfGens = new MKNNRewardGenerator(this); 
//		rfGens = new LWRRewardGenerator(this); 
}

void MREAgent::addToHistory(Transition& t)
{

	int HISTORY_WINDOW_SIZE =100000; //param:
	double samplePercentage = 1; //how many percent is our experience vs. fake uniform samples


	history.push_back(t); 

/*	//warning: if we use fittedVI, we should uncomment these 
	FittedVISolver* fvs = (FittedVISolver*) mdpSolver; //should remove this if we're using another mdpSolver
	if (drand48() < samplePercentage)
	{
		fvs->samples.push_back(KData(t.start,fvs->getStateValue(t.start ))); 
		
		//create fake observation
		if (t.end)
		{
			const int fakeSamples = 0;	//how many new points do we want?

			for(int i=1; i<= fakeSamples; i++)
			{
				double interval = fakeSamples + 1; 

				Observation tmp = new Observation_type[dimension]; 
				for(int j=0; j< dimension; j++)
				{
					tmp[j] = ( i*t.start[j] + (interval-i)*t.end[j])/interval; 
				}

				fvs->samples.push_back(KData(tmp,fvs->getStateValue(tmp)));	//warning: we should be careful. this object is never deleted! memory leak 
			}
		}
	}
*/

	if (history.size() > HISTORY_WINDOW_SIZE)
	{
		if (history.front().start )
			delete[] history.front().start;

		if (history.front().end )
			delete[] history.front().end;

		history.pop_front(); 
//		fvs->samples.pop_front(); 
	}

}

void MREAgent::clearHistory()
{
	for(list<Transition>::iterator it = history.begin(); it != history.end(); it++)
	{
		if ( (*it).start)
			delete[] (*it).start ; 

		if ( (*it).end)
			delete[] (*it).end; 
	}
	history.clear(); 
}





Action MREAgent::start(Observation o)
{
//	printf("start\n"); 
//	printObservation(o); 
	if (ParamReader::SAVE_MODEL && historyFile)
	{
		//write this to a file
		fflush(historyFile); 
		fprintf(historyFile, "\n"); 
		for(int i=0; i< dimension; i++)
			fprintf(historyFile, "%lf ", o[i]); 
	}
//	printf("episode %d\tr: %4.2lf\tsteps: %d\tprEr: %4.3lf\rprEr: %4.3lf\tAvgR: %6.1lf\t age: %ld \n", stats.episode, stats.episodeReward , stats.episodeAge, predictionError, rewardPredictionError , stats.trialReward / stats.episode  , stats.age); 
	printf("episode #%d\tr: %4.2lf\tsteps: %d\tAvgR: %6.1lf\tage: %ld \n", stats.trialEpisode, stats.episodeReward , stats.episodeAge , stats.trialReward / stats.trialEpisode, stats.age  );
	fflush(stdout); 
	stats.trialEpisode++; 
	stats.episodeAge = 0; //starting a new episode
	stats.age ++; 
	stats.trialAge++; 
	stats.episodeReward=0; 
	copyObservation(o,lastState); 
	lastAction = getBestAction(lastState); //epsilon=0
	return mapAction(lastAction);
}
Action MREAgent::step(Reward r, Observation o)
{ 
//	usleep(100000); 


//	printf("step %d\n", stats.episodeAge); 
	if (ParamReader::SAVE_MODEL && historyFile)
	{
		//write this to a file
		fprintf(historyFile,"%d %lf ", mapAction(lastAction), r); 
		for(int i=0; i< dimension; i++)
			fprintf(historyFile, "%lf ", o[i]); 
		fflush(historyFile); 
	}
//	printf("step %d\t\t", stats.episodeAge); 
//	printObservation(o); 
	//update statistics
	stats.overalReward+= r; 
	stats.trialReward += r; 
	stats.episodeReward += r; 
	stats.age++; 
	stats.trialAge++;
	stats.episodeAge++;
	//end statistics
	//add the transition to the history
	Transition t; 
	copyObservation(lastState, t.start ) ; 
	t.action = lastAction; 
	copyObservation(o, t.end);  
	t.reward = r; 

	//just testing: how good are we predicting? 
/*	Observation tmpObs = tfGens->predict(lastState, t.action); 
	if (tmpObs && (tmpObs[0] != t.end[0] || tmpObs[1] != t.end[1]))
		printf("prediction is wrong [%lf,%lf] -> [%lf,%lf]\n", t.end[0], t.end[1], tmpObs[0], tmpObs[1]); 
*/

	//just testing reward:
	double tmpR = rfGens->predict(t.start, t.action, t.end)  ; 
	double ttmpR = rfGens->predict(t.start, t.action, t.start) ; 
	if ( tmpR != t.reward)
		printf("different rewards-------- was %lf predicted: %lf or %lf\n", t.reward, tmpR, ttmpR); 
	addToHistory(t); 
	learnGeneralization(&history.back()); 
/*
	//warning: updating \eta of the KDTreeLearner
	const double A = 3000; 
	KDTreeLearner* ktl = (KDTreeLearner*) tfGens; 
for(int i=0; i< this->numberOfActions ; i++)
	for(int j=0; j< dimension; j++)	//set the parameters of each learner(each output)
	{
		double tmp = A / (A+stats.trialAge ); 
		if (tmp < ktl->learners[i][j].knownMaxLength ) 
			tmp = ktl->learners[i][j].knownMaxLength;
		ktl->learners[i][j].eta = tmp; 
	}	
*/


	//should we replan?
	if (stats.trialAge % ParamReader::AGNT_SOLVE_PERIOD == 0 && ParamReader::EXP_TYPE != PLANNING)
//	if (stats.trialAge ==ParamReader::AGNT_SOLVE_PERIOD  )
	{
#ifdef USE_THREADS 
		if (! plannerThread)
			plannerThread = new boost::thread(boost::ref(*mdpSolver)); 
#else

		FILE* stest = fopen("shouldRun.in", "r"); 
		if (stest)
		{
			int rbool =0;
			fscanf(stest, "%d", &rbool); 
			if (rbool != 1)
			{
				solveModel(o); 
//				logValueFunction(); 
			}else
				printf("skipping solvemodel because of shouldRun file\n"); 
			fclose(stest); 
		}else
		{
			solveModel(o); 
//			logValueFunction(); 
		}
#endif
	}
	Action result = getBestAction(o); 
	copyObservation(o, lastState);			//update lastState for next step
	lastAction = result; 
	
//	printf("action is: %d\n", lastAction); 
	return mapAction(result);
}
void MREAgent::end(Reward r)
{

	if (ParamReader::SAVE_MODEL && historyFile)
	{
		//write this to a file
		fprintf(historyFile,"%d %lf ", mapAction(lastAction), r); 
	}

	//update statistics
	stats.overalReward += r; 
	stats.trialReward += r; 
	stats.episodeReward +=r; 

	//warning: update reward generalizer if we don't have access to the original one


}

void MREAgent::bringObservation2bound(Observation o)
{
	for(int i=0; i< dimension ; i++)
		if (o[i]> taskSpec.double_observations[i].max)
			o[i] = taskSpec.double_observations[i].max; 
		else if (o[i] < taskSpec.double_observations[i].min)
			o[i] = taskSpec.double_observations[i].min; 
}

Action MREAgent::mapAction(Action a)
{
	if (a > numberOfActions -1 )
	{
		printf("impossible------------------------------action is %d eage: %d\n", a, stats.episodeAge); 
	}

	Action result = mappedAction[a]; 
	return result; 
}

void MREAgent::cleanup()
{
	printf("episode #%d\tr: %4.2lf\tsteps: %d\tAvgR: %6.1lf\tage: %ld \n", stats.trialEpisode, stats.episodeReward , stats.episodeAge , stats.trialReward / stats.trialEpisode, stats.age  );

	printf("-------total time spent is: %ld\n", time(NULL) - stats.startingTime); 

	if(ParamReader::SAVE_MODEL)
	{
		if (historyFile)
		{
			fclose(historyFile); 
		}
		tfGens->save(); 
//		saveHistory("data/history.out"); 
	}

//wait for planner to shut down if using thread
#ifdef USE_THREADS 
		if (plannerThread)
		{
			mdpSolver->stop(); 
			printf("waiting for planner to end\n"); 
			plannerThread->join();  
			printf("done\n"); 
		}
#endif




	//this code will write out the reward prediction at some grid points (used for drawing in matlab)

/*	FILE* tmp = fopen("data/rewardEstimates.out","w"); 
	Observation o = new double[2]; 
	for(double i=0; i<1; i+= 0.04) 
		for(double j=0; j<1; j+= 0.04)
		{
			o[0] = i; 
			o[1] = j; 
			double val = rfGens->predict(0,0,o); 
			fprintf(tmp, "%lf\t%lf\t%lf\n", i,j,val); 
		}
	fclose(tmp); 
*/

	//free up some memory
	if (lastState)
		delete[] lastState; 

	free_taskspec_struct(&taskSpec );

	clearHistory(); 	


#ifdef USE_THREADS 
		if ( plannerThread)
			delete plannerThread; 
#endif

}

char* MREAgent::getName()
{
	return name; //"Nouri_Rutgers_ContinuousAgent";
}



void MREAgent::solveModel(Observation currentState)
{
	mdpSolver->solveModel(currentState); 
}


void MREAgent::learnGeneralization( Transition* t)
{
	if ( ! ParamReader::AGNT_LEARN_GENERALIZATION )
		return; 

	rfGens->addData(t); //warning: no end time for learning rewards

	if (stats.trialAge > ParamReader::AGNT_LEARN_GENERALIZATION_END_TIME && ParamReader::AGNT_LEARN_GENERALIZATION_END_TIME != -1)
		return; 

	if (stats.trialAge  % ParamReader::AGNT_BATCH_LEARNING_TIME == ParamReader::AGNT_BATCH_LEARNING_TIME - 1
		&& (stats.trialAge  < ParamReader::AGNT_BATCH_LEARNING_END_TIME || ParamReader::AGNT_BATCH_LEARNING_END_TIME ==-1) )	
	{
		printf("batch learning...\n");
		tfGens->batchLearn(history); 
		printf("done bl\n");
	}

	tfGens->learn(t); 
}





//this function makes some sample for this state using function appx. 


Action MREAgent::randomAction()
{
	int tmp =  intRand(numberOfActions); 
	if (tmp==numberOfActions)
		tmp--; 

	return tmp; 
}

char* MREAgent::sprintObservation(Observation o)
{
	char* result= new char[100];
	result[0] = 0; 
	result = strcat(result,"[");
	for(int i=0; i<dimension ; i++)
	{
		char tmp [30];
		sprintf(tmp , "%4.4lf, ",o[i]); 
		strcat(result, tmp);
	}
	result = strcat(result,"]");
	return result; 

//	return "N/A";
}//

void MREAgent::printObservation(Observation o)
{
//	printf("observation[%d]: ", o); 
	for(int i=0; i< MREAgent::dimension; i++)
		printf("%lf, ",o[i]); 

	printf("]\n"); 
}//

Observation MREAgent::copyObservation(const Observation o)
{
	Observation result =0; 
	copyObservation(o,result); 
	return result; 
}

void MREAgent::copyObservation(const Observation o, Observation& destination)
{
	if (destination != 0)
		delete [] destination; 

	destination = new Observation_type[MREAgent::dimension]; 

	for(int i=0; i < MREAgent::dimension; i++)
		destination[i] = o[i]; 
}
/*
void task_spec_struct::print()
{
		printf(" version: %lf \n", version ); 
		printf(" episodic: %c \n", episodic); 
		printf(" obs_dim: %d \n", obs_dim ); 
		printf(" num_descrete_obs_dim: %d \n", num_discrete_obs_dims); 
		printf(" num_continuous_obs_dims: %d \n", num_continuous_obs_dims); 
		printf(" obs_types: '%s' \n", obs_types); 
		for(int i=0; i < obs_dim; i++)
			printf(" obs[%d] (min,max) : (%lf,%lf) \n", i ,  obs_mins[i], obs_maxs[i]); 

		printf(" action_dim: %d \n",action_dim ); 
		printf("action_types : '%s' \n",action_types ); 
		for(int i=0; i < action_dim; i++)
			printf(" action[%d] (min,max) : (%lf,%lf) \n", i ,  action_mins[i], action_maxs[i]); 


		printf("num_continuous_action_dims: %d \n", num_continuous_action_dims); 
		printf("num_discrete_actino_dims: %d \n", num_discrete_action_dims); 

//		printf(" Rmin: %f \n", Rmin); 
//		printf(" Rmax: %f \n",Rmax ); 
}
*/

Action MREAgent::getBestAction(Observation state)
{

	//hard-coded policy for acrobot
/*	if (stats.age < 1000)
	{
		if (state[1]>0)
			return 0; 
		else
			return 2;
	}
*/

	
//return randomAction();
 

/*
	//hard-coded policy for plasmacar 

	double pi = 3.141597;
	if (stats.age < 1000)
	{
		if (state[6] > pi/4)	return 0;
		else if (state[6] < -pi/4)	return 2;
		return lastAction; 
	}

*/
	return	mdpSolver->getBestAction(state); 
}

const char* MREAgent::message(const char* msg)
{
	string s = "TO=0 FROM=4 CMD=1 VALTYPE=0 VALS=100:" ; 
	for(int i=0; i< 100; i++)
		s.append("0.000:"); 
	printf("responding with:[%d]\n%s\n",s.length(), s.data()); 

	return s.data();  


/*

	ValueFunctionRequestMessage vmsg(msg); 

//	printf("Message summary: \n%s\n", vmsg.toString());  
	vmsg.toString(); 
	if (vmsg.cmd != KAgentQueryValuesForObs)
		return ""; 

	vector<Observation> os = vmsg.parse(); 

	ValueFunctionResponseMessage rmsg; 
	for(int i=0; i< vmsg.values.size(); i++)
	{
		double v = mdpSolver->getStateValue(vmsg.values[i]); 
		rmsg.values.push_back(v); 
	}

	string txt = rmsg.makeMessage(); 
	printf("responding with:\n%s\n", txt.data()); 

	return txt.data(); 
*/

}

