package com.evolved.automata.experimental;
import com.evolved.automata.*;
import com.evolved.automata.experimental.BehaviorDistribution.InferenceMethod;
import com.evolved.automata.experimental.bots.SRV1Robot;


import java.io.BufferedReader;
import java.io.BufferedWriter;
import java.util.*;

import com.evolved.automata.experimental.tests.*;

import java.util.concurrent.LinkedBlockingQueue;
import java.util.concurrent.atomic.*;

public class ProbabilisticTrainingAgent implements Serializer,Agent {

	
	// ******************************************
	//Begin of State Control Methods and Objects
	// ******************************************
	
	private enum StateDesc
	{
		INITIAL_ENTRY,
		LOADING_GENOME,
		INITIALIZING_DATASTRUCTURES,
		RELOAD_ENTRY,
		GETTING_CURRENT_TOTAL_STATE,
		UPDATING_AVOIDANCE_MAP_WITH_ACTION_QUEUE,
		UPDATING_ACTION_MAP_WITH_ACTION_QUEUE,
		CHECKING_IF_ACTION_QUEUE_AT_MAX_LENGTH,
		CLEARNING_ACTION_QUEUE,
		SELECTING_NEXT_ACTION,
		ENTRY_EXIT
	}
	
	HashMap<StateDesc, AgentState> j_StateMap;
	
	private AgentState GetState(StateDesc desc)
	{
		return j_StateMap.get(desc);
	}
	
	private void SetState(StateDesc desc, AgentState state)
	{
		j_StateMap.put(desc, state);
	}
	
	
	private interface AgentState
	{
		public AgentState GetNextState();
	}
	
	// *****************************************
	// End of State control methods and objects
	// *****************************************
	
	
	// ********************************
	// Begin of Genetic Related Fields
	// ********************************
	
	
	private enum AddGeneticInfo
	{
		ACTION_HISTORY_LENGTH,
		STATE_HISTORY_LENGTH,
		ACTION_QUEUE_MAX_LENGTH,
		INITIAL_ACTION,
		UTILITY_TYPE
	}
	
	final int  j_TypeUtilityDataOffset=5;
	BehaviorDistribution j_InnateNegativeResponses=null;
	BehaviorDistribution j_InnatePositiveResponses=null;
	
	BehaviorDistribution j_LearnedNegativeResponses=null;
	BehaviorDistribution j_LearnedPositiveResponses=null;
	BehaviorDistribution.ExtinctionPolicy j_ExtinctionPolicy;
	
	int j_NegInferencePolicy;
	int j_PosInferencePolicy;
	double j_PastActionSelectionWeight=10.0;
	
	SequenceMaker j_StateActionHistory;
	SensoryMotorDataProvider j_Provider=null;
	
	LinkedList<WeightedValue<Integer>> j_PossibleActions;

	double j_PriorActionFavoritismSkewFraction=3.0;
	double j_ExtinctionWeight=.75;
	
	int[] j_AdditionalGeneticInfo;
	double[] j_BaseInferentialFreedom;
	double[] j_AggregateInferentialFreedom;
	
	ProbabilisticGenome j_GeneticInfo;
	
	String j_UtilityType;
	public static boolean j_InferredDecision=false;
	
	// ******************************
	// End of Genetic Related Fields
	// ******************************
	
	public enum BehaviorDistributions
	{
		LEARNED_POSITIVE,
		LEARNED_NEGATIVE,
		INNATE_POSITIVE,
		INNATE_NEGATIVE
	}
	
	
	private enum UtilityState
	{
		POSITIVE,
		NEGATIVE,
		NEUTRAL
	}
	
	SubAgent j_SimpleAgent;
	boolean j_SubAgentOverideP=true;
	
	UtilityState j_CurrentUtilityState;
	
	// Stores the state-action history
	// This is represented as the mapping: StateActionId -> StateHistoryList  
	
	LinkedList j_StateActionQueue;
	
	LinkedList j_ActionList; // the set of available
	
	UtilityAndPerferenceModule j_UtilityInterpreter;
	UtilityAndPerferenceModule.Utility j_BaseUtility;
	Integer j_ActionChosen;
	Integer j_PreviousCompositeActionChosen;
	
	int[] j_CurrentStateData;
	int[] j_CurrentTotalState;
	int[] j_PreviousTotalState;
	double[] j_BaseClusterMaxSize;
	boolean j_ClearActionHistoryP;
	AgentState j_PreviousState;
	int j_ActionIndexUpdateStep;
	WeightedValue<Integer> j_PastActionSelection;
	int j_PreviousStateIndex=-1;
	LinkedBlockingQueue<Integer> j_ActionQueue;
	
	// When loading from a file, you must call the method
	// UpdateBaseInferentialFreedoms(provider.GetBaseSensoryInferentialFreedom());
	// manually
	public ProbabilisticTrainingAgent()
	{
		j_SerializerHelper= new SerializerAdapter(this);
		j_ActionQueue = new LinkedBlockingQueue<Integer>(1);
		j_UtilityInterpreter=null;
		j_GeneticInfo=null;
		j_StateMap= new HashMap<StateDesc, AgentState>();
		
		j_ActionIndexUpdateStep=100;
		j_ExtinctionPolicy = new SimpleExtinctionPolicy(j_ActionIndexUpdateStep, j_ExtinctionWeight);
		// Need to make this defined in the genome function of the genome
		//interpreter = new 
		InitializeStates();
		j_PreviousState=GetState(StateDesc.INITIAL_ENTRY);
		j_NegInferencePolicy=StaticTools.INF_IF_MISSING_DATA_THEN_NO_MATCH|StaticTools.INF_IF_NO_MATCH_THEN_USE_PRIOR|StaticTools.INF_IF_NO_MATCH_THEN_USE_10_NEIBORHO0D;
		j_PosInferencePolicy=StaticTools.INF_IF_MATCH_FAVOR_PREVIOUS|StaticTools.INF_IF_MISSING_DATA_THEN_NO_MATCH|StaticTools.INF_IF_NO_MATCH_THEN_CLUSTER_10|StaticTools.INF_IF_NO_MATCH_THEN_USE_PRIOR;
		
	}
	
	public ProbabilisticTrainingAgent(String name, SensoryMotorDataProvider provider, UtilityAndPerferenceModule interpreter, int actionHistoryLength, int stateHistoryLength, int actionQueueMaxLength, int lastActionExecuted, int stateBufferSize )
	{
		j_UniqueName=name;
		j_StateMap= new HashMap<StateDesc, AgentState>();
		j_SerializerHelper= new SerializerAdapter(this);
		j_PreviousCompositeActionChosen=new Integer(lastActionExecuted);
		j_UtilityInterpreter=interpreter;
		j_Provider=provider;
		InitializeStates();
		j_PreviousState=GetState(StateDesc.INITIAL_ENTRY);
		j_ActionIndexUpdateStep=100;
		j_ExtinctionPolicy = new SimpleExtinctionPolicy(j_ActionIndexUpdateStep, j_ExtinctionWeight);
		j_ActionQueue = new LinkedBlockingQueue<Integer>(1);
		j_NegInferencePolicy=StaticTools.INF_IF_MISSING_DATA_THEN_NO_MATCH|StaticTools.INF_IF_NO_MATCH_THEN_USE_PRIOR|StaticTools.INF_IF_NO_MATCH_THEN_USE_10_NEIBORHO0D;
		j_PosInferencePolicy=StaticTools.INF_IF_MATCH_FAVOR_PREVIOUS|StaticTools.INF_IF_MISSING_DATA_THEN_NO_MATCH|StaticTools.INF_IF_NO_MATCH_THEN_CLUSTER_10|StaticTools.INF_IF_NO_MATCH_THEN_USE_PRIOR;
		

		int[] sensoryBaseRadii = provider.GetStateBaseRadii();
		int[] actionBaseRadii = provider.GetActionBaseRadii();
		int[] totalRadii = GetTotalSensoryRadii(sensoryBaseRadii,stateHistoryLength,actionBaseRadii,actionHistoryLength);
		
		j_GeneticInfo=new ProbabilisticGenome(totalRadii,provider.GetActionOptions(),stateBufferSize);
		PackClassVariablesIntoGenome(interpreter,actionHistoryLength, stateHistoryLength,actionQueueMaxLength, lastActionExecuted );
		UpdateBaseInferentialFreedoms(provider.GetBaseSensoryInferentialFreedom());
	}
	
	
	
	

	public ProbabilisticTrainingAgent(String name, SensoryMotorDataProvider provider, ProbabilisticGenome genome, UtilityAndPerferenceModule interpreter, int actionHistoryLength, int stateHistoryLength, int actionQueueMaxLength, int lastActionExecuted )
	{
		j_UniqueName=name;
		j_StateMap= new HashMap<StateDesc, AgentState>();
		j_SerializerHelper= new SerializerAdapter(this);
		j_PreviousCompositeActionChosen=new Integer(lastActionExecuted);
		j_UtilityInterpreter=interpreter;
		j_GeneticInfo=genome;
		InitializeStates();
		j_PreviousState=GetState(StateDesc.INITIAL_ENTRY);
		j_ActionIndexUpdateStep=100;
		j_ExtinctionPolicy = new SimpleExtinctionPolicy(j_ActionIndexUpdateStep, j_ExtinctionWeight);
		PackClassVariablesIntoGenome(interpreter,actionHistoryLength, stateHistoryLength,actionQueueMaxLength, lastActionExecuted );
		j_ActionQueue = new LinkedBlockingQueue<Integer>(1);
		j_NegInferencePolicy=StaticTools.INF_IF_MISSING_DATA_THEN_NO_MATCH|StaticTools.INF_IF_NO_MATCH_THEN_USE_10_NEIBORHO0D;
		j_PosInferencePolicy=StaticTools.INF_IF_MATCH_FAVOR_PREVIOUS|StaticTools.INF_IF_MISSING_DATA_THEN_NO_MATCH|StaticTools.INF_IF_NO_MATCH_THEN_CLUSTER_10|StaticTools.INF_IF_NO_MATCH_THEN_USE_PRIOR;
		j_Provider=provider;
		UpdateBaseInferentialFreedoms(provider.GetBaseSensoryInferentialFreedom());
	}
	
	public void SetInferenceParameters(int inhibitoryStateGapPolicy, int excitoryStateGapPolicy,double[] freedom)
	{
		j_NegInferencePolicy=inhibitoryStateGapPolicy;
		j_PosInferencePolicy=excitoryStateGapPolicy;
		UpdateBaseInferentialFreedoms(freedom);
	}
	
	public void SetSubAgentOverideStatus(boolean overideLearnedBehaviorsP)
	{
		j_SubAgentOverideP = overideLearnedBehaviorsP;
	}
	public void UpdateBaseInferentialFreedoms(double[] freedom)
	{
		j_BaseInferentialFreedom=freedom;
	}

	public void SetSubAgent(SubAgent sub)
	{
		j_SimpleAgent=sub;
	}
	
	public void ClearActionHistoryAfterStim()
	{
		j_ClearActionHistoryP=true;
	}
	
	public void SetSensoryMotorProvider(SensoryMotorDataProvider provider)
	{
		j_Provider=provider;
		UpdateBaseInferentialFreedoms(provider.GetBaseSensoryInferentialFreedom());
	}
	
	public void WriteBehaviorDetailsToFile(String filefullname, BehaviorDistributions distrib)
	{
		switch (distrib)
		{
			case LEARNED_POSITIVE:
				if (j_LearnedPositiveResponses!=null)
					j_LearnedPositiveResponses.WriteDetailedBehaviorsToFile(filefullname);
				break;
			case LEARNED_NEGATIVE:
				if (j_LearnedNegativeResponses!=null)
					j_LearnedNegativeResponses.WriteDetailedBehaviorsToFile(filefullname);
				break;
			case INNATE_POSITIVE:
				if (j_InnatePositiveResponses!=null)
					j_InnatePositiveResponses.WriteDetailedBehaviorsToFile(filefullname);
				break;
			case INNATE_NEGATIVE:
				if (j_InnateNegativeResponses!=null)
					j_InnateNegativeResponses.WriteDetailedBehaviorsToFile(filefullname);
				break;
		}
		//WriteDetailedBehaviorsToFile
	}
	public void SetUtilityListener(UtilityListener listener)
	{
		j_UtilityInterpreter.SetUtilityListener(listener);
	}
	/*
	 * Main process method
	 * 
	 */
	public Integer[] ExecuteDecisionProcess(int[] currentStateData, LinkedList<WeightedValue<Integer>> actionOptions)
	{
		// 1
		j_ActionChosen = null;
		j_PossibleActions=actionOptions;
		// (1) Updates the current state of the agent
		j_CurrentStateData = currentStateData;
		if (j_PreviousCompositeActionChosen!=null)
		{
			// 2
			j_BaseUtility = j_UtilityInterpreter.UpdateState(currentStateData, new int[]{j_PreviousCompositeActionChosen.intValue()});
		}
		else
		{
			// 3
			j_BaseUtility = j_UtilityInterpreter.UpdateState(currentStateData, null);
		}
		switch (j_BaseUtility)
		{
			case POSITIVE:
				// 4
				j_CurrentUtilityState = UtilityState.POSITIVE;
				break;
			case NEGATIVE:
				// 5
				j_CurrentUtilityState = UtilityState.NEGATIVE;
				break;
			default:
				// 6
				j_CurrentUtilityState = UtilityState.NEUTRAL;
		}
		String dataLine = String.format("Current distance is: %1$s, utility is: %2$s", currentStateData[0],j_CurrentUtilityState.name());
		
		EvolutionaryEnvironment.DisplayTextLine(dataLine);
		// Main Process loop
		AgentState currentState = j_PreviousState;
		boolean entering=true;
		while ((entering)||(currentState!=GetState(StateDesc.ENTRY_EXIT)))
		{
			entering=false;
			currentState=currentState.GetNextState();
		}
		j_PreviousState=currentState;
		return new Integer[]{j_ActionChosen};
	}
	
	public void GeneSwap(Genome genome)
	{
		j_GeneticInfo=(ProbabilisticGenome)genome;
		j_InnateNegativeResponses = j_GeneticInfo.GetNegativeDistribution();
		j_InnatePositiveResponses = j_GeneticInfo.GetPositiveDistribution();
		InitializeLearnedResponses();
		j_UtilityInterpreter = StaticTools.UtilityTypeNameToObject(j_AdditionalGeneticInfo[AddGeneticInfo.UTILITY_TYPE.ordinal()]);
		UnpackClassVariablesFromGenome(false);
		
		j_StateActionQueue = new LinkedList();
		j_PreviousState=GetState(StateDesc.INITIALIZING_DATASTRUCTURES);
	}

	/*
	 * the first Genome is the parent genome whereas the second is the offspring.
	 * 
	 * (non-Javadoc)
	 * @see com.evolved.automata.experimental.Agent#GetTotalGenome()
	 */
	public Genome[] GetTotalGenome()
	{
		int[] newAdditional = (int[])j_AdditionalGeneticInfo.clone();
		ProbabilisticGenome newGenome = new ProbabilisticGenome();
		newGenome.SetAdditionalParameters(newAdditional);
		newGenome.SetInnateNegativeDistrib(j_InnateNegativeResponses.MergeClone(j_LearnedNegativeResponses,j_PossibleActions));
		newGenome.SetInnatePositiveDistrib(j_InnatePositiveResponses.MergeClone(j_LearnedPositiveResponses,j_PossibleActions));
		newGenome.SetFitness(j_GeneticInfo.GetFitness());
		return new Genome[]{j_GeneticInfo,newGenome};
	}
	
	public void SetActionExplicit(int actionId)
	{
		try
		{
			j_ActionQueue.put(new Integer(actionId));
		}
		catch (Exception e)
		{
			System.out.println("Error");
		}
	}
	
	
	private void InitializeLearnedResponses()
	{
		j_LearnedNegativeResponses = new BehaviorDistribution(j_InnateNegativeResponses.GetStateRadii(),j_InnateNegativeResponses.GetAllocationSet() );
		j_LearnedPositiveResponses = new BehaviorDistribution(j_InnatePositiveResponses.GetStateRadii(),j_InnatePositiveResponses.GetAllocationSet());
	}
	
	
	/* Currently, this function only allows one action to be defined.  Basic method is similar
	 * to GetTotalSensoryRadii.  Should make this method work like GetTotalSensoryRadii.
	 * 
	 */
	private double[] GetAggregateInferenceFreedom(double[] baseFreedom)
	{
		int stateRadius = j_CurrentStateData.length;
		int[] actions = (j_Provider!=null)?j_Provider.ExpandAction(0):new int[1];
		int actionRadius = actions.length;
		int stateHistoryIndex=AddGeneticInfo.STATE_HISTORY_LENGTH.ordinal();
		int actionHistoryIndex=AddGeneticInfo.ACTION_HISTORY_LENGTH.ordinal();
		int stateHistoryLength=j_AdditionalGeneticInfo[stateHistoryIndex];
		int actionHistoryLength=j_AdditionalGeneticInfo[actionHistoryIndex];
		double[] aggregate = new double[stateHistoryLength*stateRadius+actionRadius*actionHistoryLength];
		
		for (int i=0;i<stateRadius;i++)
			for (int j=0;j<stateHistoryLength;j++)
				aggregate[j+stateHistoryLength*i]=baseFreedom[i];
		for (int i=0;i<actionRadius;i++)
			for (int j=0;j<actionHistoryLength;j++)
				aggregate[stateRadius*stateHistoryLength+j+actionHistoryLength*i]=baseFreedom[stateRadius+i];

		return aggregate;
	}
	
	private int[] GetTotalSensoryRadii(int[] sensoryBaseRadii,int sensoryHistoryLength, int[] actionBaseRadii,int actionHistoryLength )
	{
		int[] totalRadii = new int[sensoryBaseRadii.length*sensoryHistoryLength+
		                           actionBaseRadii.length*actionHistoryLength];
		for (int i=0;i<sensoryBaseRadii.length;i++)
			for (int j=0;j<sensoryHistoryLength;j++)
				totalRadii[j+sensoryHistoryLength*i]=sensoryBaseRadii[i];
		for (int i=0;i<actionBaseRadii.length;i++)
			for (int j=0;j<actionHistoryLength;j++)
				totalRadii[sensoryBaseRadii.length*sensoryHistoryLength+j+actionHistoryLength*i]=actionBaseRadii[i];
		return totalRadii;
	}
	
	private void InitializeStates()
	{
		// Confirmed
		SetState(StateDesc.INITIAL_ENTRY, new AgentState()
			{
				public AgentState GetNextState()
				{
					//StaticTools.TraceLog("INITIAL_ENTRY");
					return GetState(StateDesc.LOADING_GENOME);
				}
				
			}
		
		);
		
		
		SetState(StateDesc.LOADING_GENOME, new AgentState()
			{
				public AgentState GetNextState()
				{
					return LoadingGenome();
				}
				
			}
		);
		
		
		SetState(StateDesc.INITIALIZING_DATASTRUCTURES, new AgentState()
			{
				public AgentState GetNextState()
				{
					return InitializingDataStructures();
				}
				
			}
		);
		
		SetState(StateDesc.RELOAD_ENTRY, new AgentState()
			{
				public AgentState GetNextState()
				{
					return ReloadInitialization();
				}
				
			}
		);
		
		
		SetState(StateDesc.GETTING_CURRENT_TOTAL_STATE, new AgentState()
			{
				public AgentState GetNextState()
				{
					return GettingCurrentTotalState();
				}
				
			}
		);
	
		// Undescribed states
		
		SetState(StateDesc.UPDATING_AVOIDANCE_MAP_WITH_ACTION_QUEUE, new AgentState()
			{
				public AgentState GetNextState()
				{
					return UpdateAvoidanceMethod();
				}
				
			}
		);
		
		SetState(StateDesc.UPDATING_ACTION_MAP_WITH_ACTION_QUEUE, new AgentState()
			{
				public AgentState GetNextState()
				{
					return UpdatingActionMapWithActionQueue();
				}
				
			}
		);
	
		SetState(StateDesc.CHECKING_IF_ACTION_QUEUE_AT_MAX_LENGTH, new AgentState()
			{
				public AgentState GetNextState()
				{
					return CheckActionQueueAtMax();
				}
				
			}
		);
		
		SetState(StateDesc.CLEARNING_ACTION_QUEUE, new AgentState()
			{
				public AgentState GetNextState()
				{
					//StaticTools.TraceLog("CLEARNING_ACTION_QUEUE");
					j_StateActionQueue = new LinkedList();
					return GetState(StateDesc.SELECTING_NEXT_ACTION);
				}
				
			}
		);
		
		SetState(StateDesc.SELECTING_NEXT_ACTION, new AgentState()
			{
				public AgentState GetNextState()
				{
					return SelectActionMethod();
				}
				
			}
		);
		
		SetState(StateDesc.ENTRY_EXIT, new AgentState()
			{
				public AgentState GetNextState()
				{
					//StaticTools.TraceLog("ENTRY_EXIT");
					return GetState(StateDesc.GETTING_CURRENT_TOTAL_STATE);
				}
				
			}
		);
	}
	
	private AgentState LoadingGenome()
	{
		//StaticTools.TraceLog("LOADING_GENOME");
		j_AdditionalGeneticInfo=j_GeneticInfo.GetAdditionalParameters();
		j_InnateNegativeResponses = j_GeneticInfo.GetNegativeDistribution();
		j_InnatePositiveResponses = j_GeneticInfo.GetPositiveDistribution();
		
		return GetState(StateDesc.INITIALIZING_DATASTRUCTURES);
	}
	
	
	// 
	private AgentState InitializingDataStructures()
	{
		unpackGeneticMaterial();
		InitializeLearnedResponses();
		
		InitializeBehavioralDistributions();
		return GetState(StateDesc.GETTING_CURRENT_TOTAL_STATE);
	}
	
	private void unpackGeneticMaterial()
	{
		int[] actions = (j_Provider!=null)?j_Provider.ExpandAction(0):new int[1];
		j_StateActionQueue = new LinkedList();
		int stateHistoryIndex=AddGeneticInfo.STATE_HISTORY_LENGTH.ordinal();
		int actionHistoryIndex=AddGeneticInfo.ACTION_HISTORY_LENGTH.ordinal();
		int stateHistoryLength=j_AdditionalGeneticInfo[stateHistoryIndex];
		int actionHistoryLength=j_AdditionalGeneticInfo[actionHistoryIndex];
		j_StateActionHistory = new SequenceMaker(j_CurrentStateData.length,stateHistoryLength,actions.length,actionHistoryLength,SequenceMaker.InitializationPolicy.EXTEND_FIRST_POINT);

	}
	
	private void InitializeBehavioralDistributions()
	{
		j_AggregateInferentialFreedom = GetAggregateInferenceFreedom(j_BaseInferentialFreedom);
		j_InnateNegativeResponses.SetBaseInferentialFreedom(j_AggregateInferentialFreedom);
		j_InnatePositiveResponses.SetBaseInferentialFreedom(j_AggregateInferentialFreedom);
		j_LearnedNegativeResponses.SetBaseInferentialFreedom(j_AggregateInferentialFreedom);
		j_LearnedNegativeResponses.SetExtinctionPolicy(j_ExtinctionPolicy);
		j_LearnedPositiveResponses.SetBaseInferentialFreedom(j_AggregateInferentialFreedom);
		j_LearnedPositiveResponses.SetExtinctionPolicy(j_ExtinctionPolicy);
	}
	
	
	private AgentState ReloadInitialization()
	{
		unpackGeneticMaterial();
		InitializeBehavioralDistributions();
		return GetState(StateDesc.GETTING_CURRENT_TOTAL_STATE);
	}
	
	
	private AgentState GettingCurrentTotalState()
	{
		//StaticTools.TraceLog("GETTING_CURRENT_TOTAL_STATE");
		j_CurrentTotalState=j_StateActionHistory.AddStateActionGroups(j_CurrentStateData, (j_Provider!=null)?j_Provider.ExpandAction(j_PreviousCompositeActionChosen.intValue()):new int[]{j_PreviousCompositeActionChosen.intValue()});
		
		switch (j_CurrentUtilityState)
		{
			case NEGATIVE:
				return GetState(StateDesc.UPDATING_AVOIDANCE_MAP_WITH_ACTION_QUEUE);
			case POSITIVE:
				return GetState(StateDesc.UPDATING_ACTION_MAP_WITH_ACTION_QUEUE);
			default:
				return GetState(StateDesc.CHECKING_IF_ACTION_QUEUE_AT_MAX_LENGTH);
		}
	}
	
	private AgentState CheckActionQueueAtMax()
	{
		//StaticTools.TraceLog("CHECKING_IF_ACTION_QUEUE_AT_MAX_LENGTH");
		int stateActionQueueMaxLengthIndex=AddGeneticInfo.ACTION_QUEUE_MAX_LENGTH.ordinal();
		int stateActionQueueMaxLength=j_AdditionalGeneticInfo[stateActionQueueMaxLengthIndex];
		
		
		if (j_StateActionQueue.size()>stateActionQueueMaxLength)
		{
			// Need to do this much better.
			j_StateActionQueue.removeFirst();
		}
		return GetState(StateDesc.SELECTING_NEXT_ACTION);
	}
	
	private AgentState UpdatingActionMapWithActionQueue()
	{
		//StaticTools.TraceLog("UPDATING_ACTION_MAP_WITH_ACTION_QUEUE");
		int[] decision;
		int actionDistribIndex;
		int actionIndex;
		for (Object oDecision: j_StateActionQueue)
		{
			decision=(int[])oDecision;
			actionDistribIndex=decision[0];
			actionIndex=decision[1];
			j_LearnedPositiveResponses.IncrementActionDistribution(actionDistribIndex, actionIndex, j_ActionIndexUpdateStep);
		}
		
		if (j_ClearActionHistoryP)
			return GetState(StateDesc.CLEARNING_ACTION_QUEUE);
		else
			return GetState(StateDesc.SELECTING_NEXT_ACTION);
	}
	
	private AgentState UpdateAvoidanceMethod()
	{
		//StaticTools.TraceLog("UPDATING_AVOIDANCE_MAP_WITH_ACTION_QUEUE");
		int[] decision;
		int actionDistribIndex;
		int actionIndex;
		for (Object oDecision: j_StateActionQueue)
		{
			decision=(int[])oDecision;
			actionDistribIndex=decision[0];
			actionIndex=decision[1];
			j_LearnedNegativeResponses.IncrementActionDistribution(actionDistribIndex, actionIndex, j_ActionIndexUpdateStep);
		}
		return GetState(StateDesc.CLEARNING_ACTION_QUEUE);
	}
	
	private AgentState SelectActionMethod()
	{
		//StaticTools.TraceLog("SELECTING_NEXT_ACTION");
		LinkedList<WeightedValue<Integer>> innatePositiveActionSet=null, totalPositiveActionSet=null, totalChoices=null, effectiveChoices=null;
		LinkedList<WeightedValue<Integer>> innateNegativeActionSet=null, learnedNegativeActionSet=null;
		int[][] out=null;
		// 1: Get innate inhibitions 

		innateNegativeActionSet= GetActionDistributionFromInhibitions(j_InnateNegativeResponses, j_NegInferencePolicy);
		// 2: Get learned inhibitions
		out = AITools.DistribViewer(innateNegativeActionSet); 
		learnedNegativeActionSet = GetActionDistributionFromInhibitions(j_LearnedNegativeResponses, j_NegInferencePolicy);
		out = AITools.DistribViewer(learnedNegativeActionSet); 
		// 3: add learned inhibitions to innate inhibitions if possible
		effectiveChoices=AITools.FirstNonNull(
				AITools.MergeLists(innateNegativeActionSet, learnedNegativeActionSet),
				innateNegativeActionSet,
				learnedNegativeActionSet);
			
		out = AITools.DistribViewer(effectiveChoices); 
		// 4 Get Innate Excitatory responses
		innatePositiveActionSet = GetActionDistributionFromExcitations(j_InnatePositiveResponses, j_PosInferencePolicy,true);
		out = AITools.DistribViewer(innatePositiveActionSet); 
		// 5 Combine with learned excitatory responses
		totalPositiveActionSet=AITools.MergeLists(
				innatePositiveActionSet, 
				GetActionDistributionFromExcitations(j_LearnedPositiveResponses, j_PosInferencePolicy, false));
		
		
		out = AITools.DistribViewer(totalPositiveActionSet); 
		// 5: Get net responses
		totalChoices = AITools.FirstNonNull(
				AITools.MergeSubtractLists(
					totalPositiveActionSet, 
					effectiveChoices,0.0)
				,
				totalPositiveActionSet);
		
		out = AITools.DistribViewer(totalChoices); 
		
		totalChoices=AITools.ListOveride(j_PossibleActions, totalChoices);
		out = AITools.DistribViewer(totalChoices); 
		WeightedValue<Integer> action;
		action = AITools.ChooseWeightedRandom(totalChoices, true);
		j_PastActionSelection=action;
		Integer externalAction=new Integer(0);
		try
		{
			externalAction = j_ActionQueue.take();
		}
		catch (Exception e)
		{
			
		}
		
		switch (externalAction.intValue())
		{
			case -2:
				if (j_SimpleAgent!=null)
				{	
					if (j_SubAgentOverideP)
					{
						j_ActionChosen =j_SimpleAgent.SelectAction(j_CurrentTotalState,j_BaseUtility ,j_PossibleActions);
					}
					else
					{
						j_ActionChosen =j_SimpleAgent.SelectAction(j_CurrentTotalState,j_BaseUtility ,AITools.FirstNonNull(AITools.MergeSubtractLists(j_PossibleActions,effectiveChoices,0.0), j_PossibleActions));
					}
					break;
				}
			case -1: // allowing robot to chose next action
				j_ActionChosen=action.GetValue();
				break;
			default:
				j_ActionChosen=externalAction;
		}
		
		
		j_PreviousCompositeActionChosen=j_ActionChosen;
		if (j_InferredDecision)
			StaticTools.TraceLog(String.format("**DECISION: %1$s -> %2$s", StaticTools.WriteArrayAsStringList(j_CurrentTotalState, " "),j_ActionChosen.intValue() ));
		else
			StaticTools.TraceLog(String.format("DECISION: %1$s -> %2$s", StaticTools.WriteArrayAsStringList(j_CurrentTotalState, " "),j_ActionChosen.intValue() ));
		j_InferredDecision=false;
		// Updating actionqueue
		int index = GetCurrentStateIndex(); 
		
		if (index!=-1)
		{
			LinkedList<Integer> choices;
			if ((j_PosInferencePolicy & StaticTools.INF_IF_NO_MATCH_THEN_CLUSTER_10)>0)
			{
				if (j_InnatePositiveResponses.CheckInferenceCluster(j_CurrentTotalState,.1))
				{
					j_StateActionQueue.addLast(new int[]{index, j_PreviousCompositeActionChosen.intValue()});
				}
				
			}
			else
				j_StateActionQueue.addLast(new int[]{index, j_PreviousCompositeActionChosen.intValue()});
			
			j_PreviousStateIndex=index;
			j_PreviousTotalState=j_CurrentTotalState;
		}
		
		return GetState(StateDesc.ENTRY_EXIT);
	}
	
	private int GetCurrentStateIndex()
	{
		boolean missing=false;
		for (int i=0;i<j_CurrentTotalState.length;i++)
		{
			if (j_CurrentTotalState[i]==-1)
			{
				missing=true;
				break;
			}
		}
		
		if (missing)
		{
			if (j_PreviousTotalState==null)
				return -1;
			for (int i=0;i<j_CurrentTotalState.length;i++)
			{
				if (j_CurrentTotalState[i]==-1)
				{
					j_CurrentTotalState[i]=j_PreviousTotalState[i];
				}
			}
		}
		/* Note: I using the negative response BehaviorDistribution to determine the state-action index for convenience.
		 * I could just as well have used the positive response distribution since they both will have identical state 
		 * radii.
		 */
		return j_InnateNegativeResponses.MapIndexRaw(j_CurrentTotalState);
	}


	
	
	private boolean MissingDataInTotalStateP()
	{
		for (int i=0;i<j_CurrentTotalState.length;i++)
		{	
			if (j_CurrentTotalState[i]==-1)
				return true;
		}
		return false;
	}
	

	
	private LinkedList<WeightedValue<Integer>> GetActionDistributionFromInhibitions(BehaviorDistribution distrib, int infParameters)
	{
		
		boolean missingData=MissingDataInTotalStateP();

		
		LinkedList<WeightedValue<Integer>> choices=null;
		BehaviorDistribution.InferenceStrategy iMethod = new BehaviorDistribution.InferenceStrategy(InferenceMethod.CLUSTER, .1);
		
		
		if (!missingData)
		{
			choices = distrib.SelectActionDistribution(j_CurrentTotalState, new BehaviorDistribution.InferenceStrategy(InferenceMethod.NONE, 0));
			if (choices!=null)
				return choices;
		}
		
		choices = distrib.SelectActionDistribution(j_CurrentTotalState, iMethod);
		if (choices!=null)
			return choices;

		
		return null;
	}
	
	
	
	private LinkedList<WeightedValue<Integer>> GetActionDistributionFromExcitations(BehaviorDistribution distrib, int infParameters, boolean ignorePriorAction)
	{
		
		int currentState=infParameters;
		boolean missingData=MissingDataInTotalStateP(), priorActionDefinedP;
		WeightedValue<Integer> previousActionValue=null;
		if (j_PreviousCompositeActionChosen!=null)
		{
			previousActionValue = new WeightedValue<Integer>(j_PreviousCompositeActionChosen,j_PriorActionFavoritismSkewFraction);
			priorActionDefinedP=true;
		}
		else
			priorActionDefinedP=false;
		
		LinkedList<WeightedValue<Integer>> choices=null;
		BehaviorDistribution.InferenceStrategy iMethod = new BehaviorDistribution.InferenceStrategy(InferenceMethod.CLUSTER, .1);
			
		if (!missingData)
		{
			if ((priorActionDefinedP)&&((currentState & StaticTools.INF_IF_MATCH_FAVOR_PREVIOUS)>0))
				choices = distrib.SelectActionDistribution(j_CurrentTotalState, previousActionValue, new BehaviorDistribution.InferenceStrategy(InferenceMethod.NONE, 0));
			else
				choices = distrib.SelectActionDistribution(j_CurrentTotalState, new BehaviorDistribution.InferenceStrategy(InferenceMethod.NONE, 0));
			
			if (choices!=null)
			{
				return choices;
			}
		}
		
		if ((priorActionDefinedP)&&((currentState & StaticTools.INF_IF_MATCH_FAVOR_PREVIOUS)>0))
			choices = distrib.SelectActionDistribution(j_CurrentTotalState,previousActionValue,iMethod);
		else
			choices = distrib.SelectActionDistribution(j_CurrentTotalState, iMethod);

			
		if (choices!=null)
		{
			return choices;
		}
		if ((priorActionDefinedP)&&(!ignorePriorAction))
		{
			choices = new LinkedList<WeightedValue<Integer>>();
			j_PastActionSelection = AITools.FindValueInList(j_PossibleActions, new WeightedValue<Integer>(j_PreviousCompositeActionChosen, j_PastActionSelectionWeight));
			if (j_PastActionSelection!=null)
			{	j_PastActionSelection.SetWeight(j_PriorActionFavoritismSkewFraction*j_PastActionSelection.GetWeight());
				choices.add(j_PastActionSelection);
			}
			else
				choices = j_PossibleActions;
			return choices;
		}
		
		return j_PossibleActions;
		
	}
	
	/****************************************
	 *  Serializer Related Fields and Methods
	 ****************************************
	 */
	
	private enum SaveState
	{
		SAVING_INNATE_POS_RESPONSES,
		SAVING_LEARNED_POS_RESPONSES,
		SAVING_INNATE_NEG_RESPONSES,
		SAVING_LEARNED_NEG_RESPONSES,
		SAVING_GENOME,
		EXIT
	}
	
	private enum LoadState
	{
		LOADING_INNATE_POS_RESPONSE,
		LOADING_LEARNED_POS_RESPONSES,
		LOADING_INNATE_NEG_RESPONSES,
		LOADING_LEARNED_NEG_RESPONSES,
		LOADING_GENOME,
		EXIT
	}
	
	private LoadState j_CurrentState;
	private String j_UniqueName;
	private SerializerAdapter j_SerializerHelper;
	
	public String GetTypeName()
	{
		return "ProbabilisticTrainingAgent";
	}
	
	public void PrepareSerialization()
	{
		j_CurrentState=LoadState.LOADING_INNATE_POS_RESPONSE;
	}
	
	public void SetName(String uniqueName)
	{
		j_UniqueName=uniqueName;
		j_SerializerHelper.SetUniqueName(uniqueName);
	}
	
	public void Load(BufferedReader reader)
	{
		j_SerializerHelper.Load(reader);
	}
	
	
	public void Load(String fileFullName)
	{
		j_SerializerHelper.Load(fileFullName);
		
	}
	

	
	public void Save(String fileFullName)
	{
		j_SerializerHelper.Save(fileFullName);
	}
	
	
	public  void Save(BufferedWriter writer)
	{
		j_SerializerHelper.Save(writer);
	}
	
	
	
	public void SaveData(BufferedWriter writer)
	{
		SaveState currentSaveState = SaveState.SAVING_INNATE_POS_RESPONSES;
		String data;
		boolean first=true;
		
		try
		{
			while (currentSaveState!=SaveState.EXIT)
			{
				switch (currentSaveState)
				{
					case SAVING_INNATE_POS_RESPONSES:
						if (j_InnatePositiveResponses!=null)
						{
							j_InnatePositiveResponses.SetName(j_SerializerHelper.ConstructUniqueName(j_InnatePositiveResponses.GetTypeName(),"innate_pos"));
							j_InnatePositiveResponses.Save(writer);
						}
						else
						{
							writer.write(":");
							writer.newLine();
						}
						currentSaveState=SaveState.SAVING_LEARNED_POS_RESPONSES;
						break;
					case SAVING_LEARNED_POS_RESPONSES:
						// Write each dimension
						if (j_LearnedPositiveResponses!=null)
						{
							j_LearnedPositiveResponses.SetName(j_SerializerHelper.ConstructUniqueName(j_LearnedPositiveResponses.GetTypeName(),"learned_pos"));
							j_LearnedPositiveResponses.Save(writer);
						}
						else
						{
							writer.write(":");
							writer.newLine();
						}
						currentSaveState=SaveState.SAVING_INNATE_NEG_RESPONSES;
						break;
					case SAVING_INNATE_NEG_RESPONSES:
						// Write each dimension
						if (j_InnateNegativeResponses!=null)
						{
							j_InnateNegativeResponses.SetName(j_SerializerHelper.ConstructUniqueName(j_InnateNegativeResponses.GetTypeName(),"innate_neg"));
							j_InnateNegativeResponses.Save(writer);
						}
						else
						{
							writer.write(":");
							writer.newLine();
						}
						currentSaveState=SaveState.SAVING_LEARNED_NEG_RESPONSES;
						break;
					case SAVING_LEARNED_NEG_RESPONSES:
						// Write each dimension
						if (j_LearnedNegativeResponses!=null)
						{
							j_LearnedNegativeResponses.SetName(j_SerializerHelper.ConstructUniqueName(j_LearnedNegativeResponses.GetTypeName(),"learned_neg"));
							j_LearnedNegativeResponses.Save(writer);
						}
						else
						{
							writer.write(":");
							writer.newLine();
						}
						currentSaveState=SaveState.SAVING_GENOME;
						break;
					case SAVING_GENOME:
						if (j_GeneticInfo!=null)
						{
							j_GeneticInfo.SetName(j_SerializerHelper.ConstructUniqueName(j_GeneticInfo.GetTypeName(),"genome"));
							j_GeneticInfo.Save(writer);
						}
						else
						{
							writer.write(":");
							writer.newLine();
						}
						currentSaveState=SaveState.EXIT;
						break;
					
				}
				
			}
		}
		catch (Exception e)
		{
			java.io.StringWriter traceText = new java.io.StringWriter();
			java.io.PrintWriter pWriter = new java.io.PrintWriter(traceText,true);
			e.printStackTrace(pWriter);
			pWriter.close();
			throw new RuntimeException(traceText.toString());
		}


	}
	
	public void AddData(String dataLine)
	{
		switch (j_CurrentState)
		{
			case LOADING_INNATE_POS_RESPONSE:
				j_CurrentState=LoadState.LOADING_LEARNED_POS_RESPONSES;
				break;
			case LOADING_LEARNED_POS_RESPONSES:
				j_CurrentState=LoadState.LOADING_INNATE_NEG_RESPONSES;
				break;
			case LOADING_INNATE_NEG_RESPONSES:
				
				j_CurrentState=LoadState.LOADING_LEARNED_NEG_RESPONSES;
				break;
			case LOADING_LEARNED_NEG_RESPONSES:
				j_CurrentState=LoadState.LOADING_GENOME;
				break;
			case LOADING_GENOME:
				j_CurrentState=LoadState.EXIT;
				break;
			
		}
	}
	
	
	public void AddData(Serializer inner, String instanceId)
	{
		switch (j_CurrentState)
		{
			case LOADING_INNATE_POS_RESPONSE:
				j_InnatePositiveResponses=(BehaviorDistribution)inner;
				j_CurrentState=LoadState.LOADING_LEARNED_POS_RESPONSES;
				break;
			case LOADING_LEARNED_POS_RESPONSES:
				j_LearnedPositiveResponses=(BehaviorDistribution)inner;
				j_CurrentState=LoadState.LOADING_INNATE_NEG_RESPONSES;
				break;
			case LOADING_INNATE_NEG_RESPONSES:
				j_InnateNegativeResponses=(BehaviorDistribution)inner;
				j_CurrentState=LoadState.LOADING_LEARNED_NEG_RESPONSES;
				break;
			case LOADING_LEARNED_NEG_RESPONSES:
				j_LearnedNegativeResponses=(BehaviorDistribution)inner;
				j_CurrentState=LoadState.LOADING_GENOME;
				break;
			case LOADING_GENOME:
				j_GeneticInfo=(ProbabilisticGenome)inner;
				j_CurrentState=LoadState.EXIT;
				j_PreviousState=GetState(StateDesc.RELOAD_ENTRY);
				UnpackClassVariablesFromGenome();
				break;
			
		}
	}
	
	private void UnpackClassVariablesFromGenome()
	{
		j_AdditionalGeneticInfo=j_GeneticInfo.GetAdditionalParameters();
		j_UtilityInterpreter = StaticTools.UtilityTypeNameToObject(j_AdditionalGeneticInfo[AddGeneticInfo.UTILITY_TYPE.ordinal()]);
		j_PreviousCompositeActionChosen= new Integer(j_AdditionalGeneticInfo[AddGeneticInfo.INITIAL_ACTION.ordinal()]);
		int[] utilityParameters = new int[j_AdditionalGeneticInfo.length-j_TypeUtilityDataOffset];
		for (int i=0;i<utilityParameters.length;i++)
			utilityParameters[i]=j_AdditionalGeneticInfo[j_TypeUtilityDataOffset+i];
		
		j_UtilityInterpreter.UpdateData(utilityParameters);
	}
	

	
	
	private void UnpackClassVariablesFromGenome(boolean setPreviousP)
	{
		j_AdditionalGeneticInfo=j_GeneticInfo.GetAdditionalParameters();
		j_UtilityInterpreter = StaticTools.UtilityTypeNameToObject(j_AdditionalGeneticInfo[AddGeneticInfo.UTILITY_TYPE.ordinal()]);
		if (setPreviousP)
			j_PreviousCompositeActionChosen= new Integer(j_AdditionalGeneticInfo[AddGeneticInfo.INITIAL_ACTION.ordinal()]);
		int[] utilityParameters = new int[j_AdditionalGeneticInfo.length-j_TypeUtilityDataOffset];
		for (int i=0;i<utilityParameters.length;i++)
			utilityParameters[i]=j_AdditionalGeneticInfo[j_TypeUtilityDataOffset+i];
		
		j_UtilityInterpreter.UpdateData(utilityParameters);
	}
	
	private void PackClassVariablesIntoGenome(UtilityAndPerferenceModule interpreter, int actionHistoryLength, int stateHistoryLength, int actionQueueMaxLength, int initialAction)
	{
		int[] utilityparameters = interpreter.GetGeneticParameters();
		int utilityParameterLength=utilityparameters.length;
		j_AdditionalGeneticInfo = new int[j_TypeUtilityDataOffset+utilityParameterLength];
		
		j_AdditionalGeneticInfo[AddGeneticInfo.ACTION_HISTORY_LENGTH.ordinal()]=actionHistoryLength;
		j_AdditionalGeneticInfo[AddGeneticInfo.STATE_HISTORY_LENGTH.ordinal()]=stateHistoryLength;
		j_AdditionalGeneticInfo[AddGeneticInfo.ACTION_QUEUE_MAX_LENGTH.ordinal()]=actionQueueMaxLength;
		j_AdditionalGeneticInfo[AddGeneticInfo.INITIAL_ACTION.ordinal()]=initialAction;
		j_AdditionalGeneticInfo[AddGeneticInfo.UTILITY_TYPE.ordinal()]=interpreter.GetUtilityType();
		for (int i=0;i<utilityParameterLength;i++)
			j_AdditionalGeneticInfo[i+j_TypeUtilityDataOffset]=utilityparameters[i];
		j_GeneticInfo.SetAdditionalParameters(j_AdditionalGeneticInfo);
	}
	
	
	// end of serializer methods

}
