#include "StdAfx.h"
#include ".\robotcontroller.h"
#include <stdlib.h>
#include <memory.h>

#define DEBUG_ACTION_SELECTION 0

#if DEBUG_ACTION_SELECTION
static FILE * fDebugTrain = NULL;
#endif

RobotController::RobotController(void)
{
	m_nCurrSpeed = m_nCurrTurnRate = 0;
	SetMaxSpeed(2);
	SetMaxTurnRate(1);

	
}

RobotController::~RobotController(void)
{
	
}

void RobotController::SetActionForTraining(int nTrainingIter, int nAction)
{
	double * arrLfInputs = GetArrayForStep(nTrainingIter);

	m_arrNumActionSelected[nTrainingIter] = nAction;

	for(int iKnob = 0; iKnob < NUM_ACTION_KNOB_SETTINGS; ++iKnob)
	{
		arrLfInputs[NUM_SENSORS_FOR_CONTROLLER+iKnob] = 0.0;
	}
	arrLfInputs[NUM_SENSORS_FOR_CONTROLLER+ActionValToSpeedKnob(nAction)] = 1.0;
	arrLfInputs[NUM_SENSORS_FOR_CONTROLLER+3-SIMPLE_MODEL+ActionValToTurnKnob(nAction)] = 1.0;

	
}


#if USE_NET_PER_ACTION

void RobotController::GetRewardsFromState(int nTrainingIter) 
{
	double * arrLfInputs = GetArrayForStep(nTrainingIter);
	for(int iAction = 0; iAction < NUM_ACTIONS; ++iAction)
	{
		const double * pLfResult = m_arrNetsPerAction[iAction].Eval(arrLfInputs);
		m_arrLfActionValues[iAction] = pLfResult[0];
	}
}

#else // worry about debugging for per-action nets later
#if DEBUG_ACTION_SELECTION
void RobotController::GetRewardsFromState(int nTrainingIter) 
{
	
	if(!fDebugTrain)
	{
		fDebugTrain = fopen("C:\\debug_learner.txt", "w");
	}

	double * arrLfInputs = GetArrayForStep(nTrainingIter);
	int iDbg = 0;
	// (NUM_INTERNAL_SENSORS+NUM_GOAL_SENSORS+NUM_RANGEFINDERS)
	for(iDbg=0; iDbg < NUM_RANGEFINDERS; ++iDbg)
	{
		fprintf(fDebugTrain, "Range|");
	}
	for(; iDbg < NUM_RANGEFINDERS+NUM_GOAL_SENSORS; ++iDbg)
	{
		fprintf(fDebugTrain, "Goal|");
	}
	for(; iDbg < NUM_SENSORS_FOR_CONTROLLER; ++iDbg)
	{
		fprintf(fDebugTrain, "State|");
	}
	for(iDbg = 0; iDbg < NUM_ACTION_KNOB_SETTINGS; ++iDbg)
	{
		fprintf(fDebugTrain, "ActionKnob|");
	}
	
	fprintf(fDebugTrain, "NnetResult\n");
	fflush(fDebugTrain);
	for(int iAction = 0; iAction < NUM_ACTIONS; ++iAction)
	{
		for(int iKnob = 0; iKnob < NUM_ACTION_KNOB_SETTINGS; ++iKnob)
		{
			arrLfInputs[NUM_SENSORS_FOR_CONTROLLER+iKnob] = 0.0;
		}
		arrLfInputs[NUM_SENSORS_FOR_CONTROLLER+ActionValToSpeedKnob(iAction)] = 1.0;
		arrLfInputs[NUM_SENSORS_FOR_CONTROLLER+3-SIMPLE_MODEL+ActionValToTurnKnob(iAction)] = 1.0;


		m_arrLfActionValues[iAction] = m_Nnet.Eval(arrLfInputs)[0];
		for(iDbg = 0; iDbg < NUM_SENSORS_FOR_CONTROLLER + NUM_ACTION_KNOB_SETTINGS; ++iDbg)
		{
			fprintf(fDebugTrain, "%lf|", arrLfInputs[iDbg]);
		}
		fprintf(fDebugTrain, "%lf\n", m_arrLfActionValues[iAction]);
		fflush(fDebugTrain);
	}
}

#else

void RobotController::GetRewardsFromState(int nTrainingIter) 
{
	double * arrLfInputs = GetArrayForStep(nTrainingIter);
	for(int iAction = 0; iAction < NUM_ACTIONS; ++iAction)
	{
		for(int iKnob = 0; iKnob < NUM_ACTION_KNOB_SETTINGS; ++iKnob)
		{
			arrLfInputs[NUM_SENSORS_FOR_CONTROLLER+iKnob] = 0.0;
		}
		arrLfInputs[NUM_SENSORS_FOR_CONTROLLER+ActionValToSpeedKnob(iAction)] = 1.0;
		arrLfInputs[NUM_SENSORS_FOR_CONTROLLER+3-SIMPLE_MODEL+ActionValToTurnKnob(iAction)] = 1.0;


		m_arrLfActionValues[iAction] = m_Nnet.Eval(arrLfInputs)[0];
	}
}

#endif

#endif

void RobotController::UpdateStateFromAction(int nAction)
{
	BoostSpeed(ActionValToSpeedKnob(nAction)-1);
	BoostTurnRate(ActionValToTurnKnob(nAction)-1);

}

void RobotController::SetRangeSensorData(int nTrainingIter, const double * arrLfSensorData)
{
	double * arrLfInputs = GetArrayForStep(nTrainingIter);
#if 0
	memcpy(static_cast<void *>(arrLfInputs), static_cast<const void *>(arrLfSensorData), NUM_RANGEFINDERS*sizeof(double));
#else
	for(int i = 0; i < NUM_RANGEFINDERS; ++i)
	{
		arrLfInputs[i] = 1.0/(1.0+arrLfSensorData[i]); // magic scale number so these don't
		// overpower learning
	}
#endif
}

void RobotController::SetGoalData(int nTrainingIter, double lfGoalParrallel, double lfGoalperp)
{
	double lfDistSqrd = lfGoalParrallel*lfGoalParrallel + lfGoalperp*lfGoalperp;
	double lfTheta = atan2(lfGoalperp, lfGoalParrallel);
	double * arrLfInputs = GetArrayForStep(nTrainingIter);
	arrLfInputs[NUM_RANGEFINDERS] = 1.0/(1.0+lfDistSqrd);
	arrLfInputs[NUM_RANGEFINDERS+1] = lfTheta;
}

void RobotController::SetInternalSensors(int nTrainingIter)
{
#if SIMPLE_MODEL
	return;
#else
	double * arrLfInputs = GetArrayForStep(nTrainingIter);
	arrLfInputs[NUM_RANGEFINDERS+2] = m_nCurrSpeed;
	arrLfInputs[NUM_RANGEFINDERS+3] = m_nCurrTurnRate;
#endif
}



void RobotController::TrainOnSequence(int m_nNumIters, double lfTrainRate, double lfDiscoutFactor)
{
	//double lfAdjust = lfValue - lfNeutralValue;
	//for(int i = m_nNumIters -1; i >= 0; --i)
	//{
	//	double lfIntendedOutput = lfNeutralValue + lfAdjust;
	//	double lfScratch;
	//	m_Nnet.Train(GetArrayForStep(i), &lfIntendedOutput, &lfScratch);
	//	lfAdjust *= lfDiscoutFactor;
	//}
}
bool RobotController::SaveNeuralNet(const char * szFileName)
{
	FILE * fiOut = fopen(szFileName, "w");
	if(!fiOut)
	{
		return false;
	}
#if USE_NET_PER_ACTION
	for(int iAction = 0; iAction < NUM_ACTIONS; ++iAction)
	{
		m_arrNetsPerAction[iAction].Serialize(fiOut);
	}
#else
	m_Nnet.Serialize(fiOut);
#endif
	fclose(fiOut);
	return true;
}

bool RobotController::LoadNeuralNet(const char * szFileName)
{
	FILE * fiIn = fopen(szFileName, "r");
	if(!fiIn)
	{
		return false;
	}
#if USE_NET_PER_ACTION
	for(int iAction = 0; iAction < NUM_ACTIONS; ++iAction)
	{
		m_arrNetsPerAction[iAction].DeSerialize(fiIn);
	}
#else
	m_Nnet.DeSerialize(fiIn);
#endif
	fclose(fiIn);
	return true;
}

void RobotController::Train(int m_nNumIters, double lfTrainRate, double lfDiscoutFactor, double lfValue, double lfNeutralValue)
{
	double lfAdjust = lfValue - lfNeutralValue;
	double lfIntendedOutput = lfNeutralValue + lfAdjust;
	double lfScratch;

	int i;
#if USE_NET_PER_ACTION
	m_arrNetsPerAction[m_arrNumActionSelected[m_nNumIters-1]].Train(
		GetArrayForStep(m_nNumIters -1), &lfIntendedOutput, &lfScratch);
	for(i = 0; i < m_nNumIters; ++i)
	{
		m_arrNetsPerAction[m_arrNumActionSelected[i]].SetLearnRate(lfTrainRate);
	}
#else
	m_Nnet.SetLearnRate(lfTrainRate);
	m_Nnet.Train(GetArrayForStep(m_nNumIters -1), &lfIntendedOutput, &lfScratch);
#endif
	
	for(i = m_nNumIters -2; i >= 0; --i)
	{
		// lfAdjust = m_Nnet.Eval(GetArrayForStep(i+1))[0];
		lfAdjust *= lfDiscoutFactor;	
		double lfIntendedOutput = lfNeutralValue + lfAdjust;
#if USE_NET_PER_ACTION
		
		m_arrNetsPerAction[m_arrNumActionSelected[i]].Train(
			GetArrayForStep(m_nNumIters -1), &lfIntendedOutput, &lfScratch);
#else
		m_Nnet.Train(GetArrayForStep(i), &lfIntendedOutput, &lfScratch);
#endif
	}
}

void RobotController::TrainQLearning(int m_nNumIters, double lfTrainRate, double lfDiscoutFactor, double lfValue)
{
	// double lfAdjust = lfValue - lfNeutralValue;
	double lfIntendedOutput = lfValue;
	double lfScratch;
#if USE_NET_PER_ACTION
	m_arrNetsPerAction[m_arrNumActionSelected[m_nNumIters-1]].Train(
		GetArrayForStep(m_nNumIters -1), &lfIntendedOutput, &lfScratch);
#else
	m_Nnet.Train(GetArrayForStep(m_nNumIters -1), &lfIntendedOutput, &lfScratch);
#endif
	
	for(int i = m_nNumIters -2; i >= 0; --i)
	{
		
#if USE_NET_PER_ACTION
		const double * pLfPrevResult = 
			m_arrNetsPerAction[m_arrNumActionSelected[i+1]].Eval(GetArrayForStep(i+1));
		lfIntendedOutput = pLfPrevResult[0];
		lfIntendedOutput *= lfDiscoutFactor;	
		m_arrNetsPerAction[m_arrNumActionSelected[i]].Train(
			GetArrayForStep(m_nNumIters -1), &lfIntendedOutput, &lfScratch);
#else
		lfIntendedOutput = m_Nnet.Eval(GetArrayForStep(i+1))[0];
		lfIntendedOutput *= lfDiscoutFactor;	
		m_Nnet.Train(GetArrayForStep(i), &lfIntendedOutput, &lfScratch);
#endif
	}
}

int RobotController::SelectActionProbabalistically(double lfBias, double lfMaxAnneal) const
{

#if DEBUG_ACTION_SELECTION
	if(!fDebugTrain)
	{
		fDebugTrain = fopen("C:\\debug_learner.txt", "w");
	}
#endif

	double arrLfSelectHelper[NUM_ACTIONS];
	double lfToAdd = exp(log(m_arrLfActionValues[0])*lfMaxAnneal);
	arrLfSelectHelper[0] = lfToAdd+lfBias;
	for(int i = 1; i < NUM_ACTIONS; ++i)
	{
		double lfToAdd = exp(log(m_arrLfActionValues[i])*lfMaxAnneal);	
		arrLfSelectHelper[i] = arrLfSelectHelper[i-1]+lfBias+lfToAdd;
	}
	double lfRand = rand()/((double)RAND_MAX);
	lfRand *= arrLfSelectHelper[NUM_ACTIONS-1];
	for(int i = 0; i < NUM_ACTIONS; ++i)
	{
		if(arrLfSelectHelper[i] > lfRand)
		{
#if DEBUG_ACTION_SELECTION
			fprintf(fDebugTrain, "Selecting action %d\n", i);
			fflush(fDebugTrain);
#endif
			return i;
		}
	}
	// ASSERT(0);
	return SelectMaxAction();
}

int RobotController::SelectMaxAction() const
{
#if DEBUG_ACTION_SELECTION
	if(!fDebugTrain)
	{
		fDebugTrain = fopen("C:\\debug_learner.txt", "w");
	}
#endif
	double lfMaxAction = m_arrLfActionValues[0];
	int nBestAction = 0;
	for(int i = 1; i < NUM_ACTIONS; ++i)
	{
		if(m_arrLfActionValues[i] > lfMaxAction)
		{
			lfMaxAction = m_arrLfActionValues[i];
			nBestAction = i;
		}
	}
#if DEBUG_ACTION_SELECTION
	fprintf(fDebugTrain, "MaxSelecting action %d\n", nBestAction);
	fflush(fDebugTrain);
#endif
	return nBestAction;
}

void RobotController::ClampSpeed()
{
	int nSign = 1-2*(m_nCurrSpeed < 0);
	int nAbsSpeed = abs(m_nCurrSpeed);
	int nDeltaSpeed = nAbsSpeed - m_nMaxSpeed;
	nDeltaSpeed = (nDeltaSpeed+abs(nDeltaSpeed))>>1;
	m_nCurrSpeed -= nSign*nDeltaSpeed;
}

void RobotController::ClampTurnRate()
{
	int nSign = 1-2*(m_nCurrTurnRate < 0);
	int nAbsTurnRate = abs(m_nCurrTurnRate);
	int nDeltaTurnRate = nAbsTurnRate - m_nMaxTurnRate;
	nDeltaTurnRate = (nDeltaTurnRate+abs(nDeltaTurnRate))>>1;
	m_nCurrTurnRate -= nSign*nDeltaTurnRate;
}