package com.evolved.automata.experimental;
import com.evolved.automata.*;
import com.evolved.automata.experimental.BehaviorDistribution.InferenceMethod;


import java.io.BufferedReader;
import java.io.BufferedWriter;
import java.util.*;

public class ProbabilisticLearningAgent implements Serializer,Agent {
	
	
	
	
	// ******************************************
	//Begin of State Control Methods and Objects
	// ******************************************
	
	private enum StateDesc
	{
		INITIAL_ENTRY,
		LOADING_GENOME,
		INITIALIZING_DATASTRUCTURES,
		GETTING_CURRENT_TOTAL_STATE,
		UPDATING_AVOIDANCE_MAP_WITH_ACTION_QUEUE,
		UPDATING_ACTION_MAP_WITH_ACTION_QUEUE,
		CHECKING_IF_ACTION_QUEUE_AT_MAX_LENGTH,
		CLEARNING_ACTION_QUEUE,
		SELECTING_NEXT_ACTION,
		ENTRY_EXIT
	}
	
	HashMap<StateDesc, AgentState> j_StateMap;
	
	private AgentState GetState(StateDesc desc)
	{
		return j_StateMap.get(desc);
	}
	
	private void SetState(StateDesc desc, AgentState state)
	{
		j_StateMap.put(desc, state);
	}
	
	
	private interface AgentState
	{
		public AgentState GetNextState();
	}
	
	// *****************************************
	// End of State control methods and objects
	// *****************************************
	
	
	// ********************************
	// Begin of Genetic Related Fields
	// ********************************
	
	
	private enum AddGeneticInfo
	{
		ACTION_HISTORY_LENGTH,
		STATE_HISTORY_LENGTH,
		ACTION_QUEUE_MAX_LENGTH,
		INITIAL_ACTION,
		UTILITY_TYPE
	}
	
	final int  j_TypeUtilityDataOffset=5;
	BehaviorDistribution j_InnateNegativeResponses=null;
	BehaviorDistribution j_InnatePositiveResponses=null;
	
	BehaviorDistribution j_LearnedNegativeResponses=null;
	BehaviorDistribution j_LearnedPositiveResponses=null;
	
	int j_NegInferencePolicy;
	int j_PosInferencePolicy;
	double j_PastActionSelectionWeight=1.0;
	
	SequenceMaker j_StateActionHistory;
	
	LinkedList<WeightedValue<Integer>> j_PossibleActions;
	LinkedList<WeightedValue<Integer>> j_PriorPossibilities;
	double j_PriorActionFavoritismSkewFraction=3.0;
	int[] j_AdditionalGeneticInfo;
	double[] j_BaseInferentialFreedom;
	double[] j_AggregateInferentialFreedom;
	
	ProbabilisticGenome j_GeneticInfo;
	
	String j_UtilityType;
	
	
	// ******************************
	// End of Genetic Related Fields
	// ******************************
	
	
	private enum UtilityState
	{
		POSITIVE,
		NEGATIVE,
		NEUTRAL
	}
	
	UtilityState j_CurrentUtilityState;
	
	// Stores the state-action history
	// This is represented as the mapping: StateActionId -> StateHistoryList  
	
	LinkedList j_StateActionQueue;
	
	LinkedList j_ActionList; // the set of available
	
	UtilityAndPerferenceModule j_UtilityInterpreter;
	Integer j_ActionChosen;
	Integer j_PreviousActionChosen;
	
	int[] j_CurrentStateData;
	int[] j_CurrentTotalState;
	
	AgentState j_PreviousState;
	int j_ActionIndexUpdateStep;
	
	
	
	
	public ProbabilisticLearningAgent()
	{
		j_SerializerHelper= new SerializerAdapter(this);
		
		j_UtilityInterpreter=null;
		j_GeneticInfo=null;
		j_StateMap= new HashMap<StateDesc, AgentState>();
		j_PreviousState=GetState(StateDesc.INITIAL_ENTRY);
		j_ActionIndexUpdateStep=1;
		// Need to make this defined in the genome function of the genome
		//interpreter = new 
		InitializeStates();
		j_NegInferencePolicy=StaticTools.INF_IF_MISSING_DATA_THEN_NO_MATCH|StaticTools.INF_IF_NO_MATCH_THEN_USE_PRIOR;
		j_PosInferencePolicy=StaticTools.INF_IF_MATCH_FAVOR_PREVIOUS|StaticTools.INF_IF_MISSING_DATA_THEN_NO_MATCH|StaticTools.INF_IF_NO_MATCH_THEN_USE_25_NEIBORHO0D|StaticTools.INF_IF_NO_MATCH_THEN_USE_PRIOR;
		
	}
	
	public ProbabilisticLearningAgent(String name, ProbabilisticGenome genome, UtilityAndPerferenceModule interpreter, int actionHistoryLength, int stateHistoryLength, int actionQueueMaxLength, int lastActionExecuted )
	{
		j_UniqueName=name;
		j_StateMap= new HashMap<StateDesc, AgentState>();
		j_SerializerHelper= new SerializerAdapter(this);
		j_PreviousActionChosen=new Integer(lastActionExecuted);
		j_UtilityInterpreter=interpreter;
		j_GeneticInfo=genome;
		InitializeStates();
		j_PreviousState=GetState(StateDesc.INITIAL_ENTRY);
		j_ActionIndexUpdateStep=1;
		PackClassVariablesIntoGenome(interpreter,actionHistoryLength, stateHistoryLength,actionQueueMaxLength, lastActionExecuted );
		
		j_NegInferencePolicy=StaticTools.INF_IF_MISSING_DATA_THEN_NO_MATCH|StaticTools.INF_IF_NO_MATCH_THEN_USE_PRIOR;
		j_PosInferencePolicy=StaticTools.INF_IF_MATCH_FAVOR_PREVIOUS|StaticTools.INF_IF_MISSING_DATA_THEN_NO_MATCH|StaticTools.INF_IF_NO_MATCH_THEN_USE_25_NEIBORHO0D|StaticTools.INF_IF_NO_MATCH_THEN_USE_PRIOR;
		
	}
	
	public void SetInferenceParameters(int inhibitoryStateGapPolicy, int excitoryStateGapPolicy,double[] freedom)
	{
		j_NegInferencePolicy=inhibitoryStateGapPolicy;
		j_PosInferencePolicy=excitoryStateGapPolicy;
		UpdateBaseInferentialFreedoms(freedom);
	}
	
	public void UpdateBaseInferentialFreedoms(double[] freedom)
	{
		j_BaseInferentialFreedom=freedom;
	}
	private void InitializeLearnedResponses()
	{
		j_LearnedNegativeResponses = new BehaviorDistribution(j_InnateNegativeResponses.GetStateRadii());
		j_LearnedPositiveResponses = new BehaviorDistribution(j_InnatePositiveResponses.GetStateRadii());
	}
	
	/*
	 * Main process method
	 * 
	 */
	public Integer[] ExecuteDecisionProcess(int[] currentStateData, LinkedList<WeightedValue<Integer>> actionOptions)
	{
		j_ActionChosen = null;
		j_PossibleActions=actionOptions;
		// (1) Updates the current state of the agent
		j_CurrentStateData = currentStateData;
		UtilityAndPerferenceModule.Utility utility;
		if (j_PreviousActionChosen!=null)
			utility = j_UtilityInterpreter.UpdateState(currentStateData, new int[]{j_PreviousActionChosen.intValue()});
		else
			utility = j_UtilityInterpreter.UpdateState(currentStateData, null);
		
		switch (utility)
		{
			case POSITIVE:
				j_CurrentUtilityState = UtilityState.POSITIVE;
				break;
			case NEGATIVE:
				j_CurrentUtilityState = UtilityState.NEGATIVE;
				break;
			default:
				j_CurrentUtilityState = UtilityState.NEUTRAL;
		}
		
		
		// Main Process loop
		AgentState currentState = j_PreviousState;
		boolean entering=true;
		while ((entering)||(currentState!=GetState(StateDesc.ENTRY_EXIT)))
		{
			entering=false;
			currentState=currentState.GetNextState();
		}
		
		return new Integer[]{j_ActionChosen};
	}
	
	public void GeneSwap(Genome genome)
	{
		j_GeneticInfo=(ProbabilisticGenome)genome;
		j_AdditionalGeneticInfo=j_GeneticInfo.GetAdditionalParameters();
		j_InnateNegativeResponses = j_GeneticInfo.GetNegativeDistribution();
		j_InnatePositiveResponses = j_GeneticInfo.GetPositiveDistribution();
		InitializeLearnedResponses();
		j_UtilityInterpreter = StaticTools.UtilityTypeNameToObject(j_AdditionalGeneticInfo[AddGeneticInfo.UTILITY_TYPE.ordinal()]);
		
		// This might be off and might cause an array out of bounds error!
		int[] utilityParameters = new int[j_AdditionalGeneticInfo.length-j_TypeUtilityDataOffset];
		for (int i=0;i<utilityParameters.length;i++)
			utilityParameters[i]=j_AdditionalGeneticInfo[j_TypeUtilityDataOffset+i];
		
		j_UtilityInterpreter.UpdateData(utilityParameters);
		j_StateActionQueue = new LinkedList();
		j_PreviousState=GetState(StateDesc.INITIALIZING_DATASTRUCTURES);
	}

	/*
	 * the first Genome is the parent genome whereas the second is the offspring.
	 * 
	 * (non-Javadoc)
	 * @see com.evolved.automata.experimental.Agent#GetTotalGenome()
	 */
	public Genome[] GetTotalGenome()
	{
		int[] newAdditional = (int[])j_AdditionalGeneticInfo.clone();
		ProbabilisticGenome newGenome = new ProbabilisticGenome();
		newGenome.SetAdditionalParameters(newAdditional);
		newGenome.SetInnateNegativeDistrib(j_InnateNegativeResponses.MergeClone(j_LearnedNegativeResponses));
		newGenome.SetInnatePositiveDistrib(j_InnatePositiveResponses.MergeClone(j_LearnedPositiveResponses));
		newGenome.SetFitness(j_GeneticInfo.GetFitness());
		return new Genome[]{j_GeneticInfo,newGenome};
	}
	
	
	/* Currently, this function only allows one action to be defined
	 * 
	 */
	private double[] GetAggregateInferenceFreedom(double[] baseFreedom)
	{
		int stateRadius = j_CurrentStateData.length;
		int actionRadius = 1;
		int stateHistoryIndex=AddGeneticInfo.STATE_HISTORY_LENGTH.ordinal();
		int actionHistoryIndex=AddGeneticInfo.ACTION_HISTORY_LENGTH.ordinal();
		int stateHistoryLength=j_AdditionalGeneticInfo[stateHistoryIndex];
		int actionHistoryLength=j_AdditionalGeneticInfo[actionHistoryIndex];
		double[] aggregate = new double[stateHistoryLength*stateRadius+actionRadius*actionHistoryLength];
		int freedomIndex=0;
		int stateCounter=0;
		for (int i=0;i<aggregate.length;i++)
		{
			if (i<stateHistoryLength*stateRadius)
			{
				if (stateCounter==stateHistoryLength)
				{
					freedomIndex++;
					stateCounter=0;
				}
				aggregate[i]=baseFreedom[freedomIndex];
				stateCounter++;
			}
			else
				aggregate[i]=baseFreedom[freedomIndex];
		}
		return aggregate;
	}
	
	
	private void InitializeStates()
	{
		// Confirmed
		SetState(StateDesc.INITIAL_ENTRY, new AgentState()
			{
				public AgentState GetNextState()
				{
					return GetState(StateDesc.LOADING_GENOME);
				}
				
			}
		
		);
		
		
		SetState(StateDesc.LOADING_GENOME, new AgentState()
			{
				public AgentState GetNextState()
				{
					j_AdditionalGeneticInfo=j_GeneticInfo.GetAdditionalParameters();
					j_InnateNegativeResponses = j_GeneticInfo.GetNegativeDistribution();
					j_InnatePositiveResponses = j_GeneticInfo.GetPositiveDistribution();
					j_StateActionQueue = new LinkedList();
					InitializeLearnedResponses();
					return GetState(StateDesc.INITIALIZING_DATASTRUCTURES);
				}
				
			}
		);
		
		SetState(StateDesc.INITIALIZING_DATASTRUCTURES, new AgentState()
			{
				public AgentState GetNextState()
				{
					int stateHistoryIndex=AddGeneticInfo.STATE_HISTORY_LENGTH.ordinal();
					int actionHistoryIndex=AddGeneticInfo.ACTION_HISTORY_LENGTH.ordinal();
					
					int stateHistoryLength=j_AdditionalGeneticInfo[stateHistoryIndex];
					int actionHistoryLength=j_AdditionalGeneticInfo[actionHistoryIndex];
					
					j_StateActionHistory = new SequenceMaker(j_CurrentStateData.length,stateHistoryLength,1,actionHistoryLength,SequenceMaker.InitializationPolicy.EXTEND_FIRST_POINT);
					j_AggregateInferentialFreedom = GetAggregateInferenceFreedom(j_BaseInferentialFreedom);
					j_InnateNegativeResponses.SetBaseInferentialFreedom(j_BaseInferentialFreedom);
					j_InnatePositiveResponses.SetBaseInferentialFreedom(j_BaseInferentialFreedom);
					j_LearnedNegativeResponses.SetBaseInferentialFreedom(j_BaseInferentialFreedom);
					j_LearnedPositiveResponses.SetBaseInferentialFreedom(j_BaseInferentialFreedom);
					return GetState(StateDesc.GETTING_CURRENT_TOTAL_STATE);
				}
				
			}
		);
		
		SetState(StateDesc.GETTING_CURRENT_TOTAL_STATE, new AgentState()
			{
				public AgentState GetNextState()
				{
					j_CurrentTotalState=j_StateActionHistory.AddStateActionGroups(j_CurrentStateData, new int[]{j_PreviousActionChosen.intValue()});
					
					/* Note: I using the negative response BehaviorDistribution to determine the state-action index for convenience.
					 * I could just as well have used the positive response distribution since they both will have identical state 
					 * radii.
					 */
					int index = j_InnateNegativeResponses.MapIndexRaw(j_CurrentStateData);
					
					j_StateActionQueue.add(new int[]{index, j_PreviousActionChosen.intValue()});
					
					// for 
					switch (j_CurrentUtilityState)
					{
						case NEGATIVE:
							return GetState(StateDesc.UPDATING_AVOIDANCE_MAP_WITH_ACTION_QUEUE);
						case POSITIVE:
							return GetState(StateDesc.UPDATING_ACTION_MAP_WITH_ACTION_QUEUE);
						default:
							return GetState(StateDesc.CHECKING_IF_ACTION_QUEUE_AT_MAX_LENGTH);
					}
				
				}
				
			}
		);
	
		// Undescribed states
		
		SetState(StateDesc.UPDATING_AVOIDANCE_MAP_WITH_ACTION_QUEUE, new AgentState()
			{
				public AgentState GetNextState()
				{
					int[] decision;
					int actionDistribIndex;
					int actionIndex;
					for (Object oDecision: j_StateActionQueue)
					{
						decision=(int[])oDecision;
						actionDistribIndex=decision[0];
						actionIndex=decision[1];
						j_LearnedNegativeResponses.IncrementActionDistribution(actionDistribIndex, actionIndex, j_ActionIndexUpdateStep);
					}
					return GetState(StateDesc.CLEARNING_ACTION_QUEUE);
				}
				
			}
		);
		
		SetState(StateDesc.UPDATING_ACTION_MAP_WITH_ACTION_QUEUE, new AgentState()
			{
				public AgentState GetNextState()
				{
					int[] decision;
					int actionDistribIndex;
					int actionIndex;
					for (Object oDecision: j_StateActionQueue)
					{
						decision=(int[])oDecision;
						actionDistribIndex=decision[0];
						actionIndex=decision[1];
						j_LearnedPositiveResponses.IncrementActionDistribution(actionDistribIndex, actionIndex, j_ActionIndexUpdateStep);
					}
					return GetState(StateDesc.CLEARNING_ACTION_QUEUE);
				}
				
			}
		);
	
		SetState(StateDesc.CHECKING_IF_ACTION_QUEUE_AT_MAX_LENGTH, new AgentState()
			{
				public AgentState GetNextState()
				{
					int stateActionQueueMaxLengthIndex=AddGeneticInfo.ACTION_QUEUE_MAX_LENGTH.ordinal();
					int stateActionQueueMaxLength=j_AdditionalGeneticInfo[stateActionQueueMaxLengthIndex];
					
					
					if (j_StateActionQueue.size()>stateActionQueueMaxLength)
						return GetState(StateDesc.CLEARNING_ACTION_QUEUE);
					else
						return GetState(StateDesc.SELECTING_NEXT_ACTION);
				}
				
			}
		);
		
		SetState(StateDesc.CLEARNING_ACTION_QUEUE, new AgentState()
			{
				public AgentState GetNextState()
				{
					j_StateActionQueue = new LinkedList();
					return GetState(StateDesc.SELECTING_NEXT_ACTION);
				}
				
			}
		);
		
		SetState(StateDesc.SELECTING_NEXT_ACTION, new AgentState()
			{
				public AgentState GetNextState()
				{
					
					LinkedList<WeightedValue<Integer>> innatePositiveActionSet, totalPositiveActionSet, totalChoices, innateChoices;
					innateChoices= GetActionDistributionFromInhibitions(j_InnateNegativeResponses, j_NegInferencePolicy);
					totalChoices = AITools.MergeLists(
							innateChoices, 
							GetActionDistributionFromInhibitions(j_LearnedNegativeResponses, j_NegInferencePolicy));
					innatePositiveActionSet = GetActionDistributionFromExcitations(j_InnatePositiveResponses, totalChoices, j_PosInferencePolicy);
					totalPositiveActionSet=AITools.MergeLists(
							innatePositiveActionSet, 
							GetActionDistributionFromExcitations(j_LearnedPositiveResponses, totalChoices, j_PosInferencePolicy));
					WeightedValue<Integer> action;
					action = AITools.ChooseWeightedRandomFair(totalPositiveActionSet);
					
					j_ActionChosen=action.GetValue();
					j_PreviousActionChosen=j_ActionChosen;
					return GetState(StateDesc.ENTRY_EXIT);
				}
				
			}
		);
		
		SetState(StateDesc.ENTRY_EXIT, new AgentState()
			{
				public AgentState GetNextState()
				{
					return GetState(StateDesc.GETTING_CURRENT_TOTAL_STATE);
				}
				
			}
		);
	}
	

	private boolean MissingDataInTotalStateP()
	{
		for (int i=0;i<j_CurrentTotalState.length;i++)
		{	
			if (j_CurrentTotalState[i]==-1)
				return true;
		}
		return false;
	}
	
	private LinkedList<WeightedValue<Integer>> GetActionDistributionFromInhibitions(BehaviorDistribution distrib, int infParameters)
	{
		boolean missingData=MissingDataInTotalStateP(), priorPossibilitiesExistP;
		if (j_PriorPossibilities!=null)
			priorPossibilitiesExistP=true;
		else
			priorPossibilitiesExistP=false;
		
		LinkedList<WeightedValue<Integer>> choices=null;
		BehaviorDistribution.InferenceStrategy iMethod =  new BehaviorDistribution.InferenceStrategy(BehaviorDistribution.InferenceMethod.NONE, 0);
		if ((infParameters& StaticTools.INF_IF_NO_MATCH_THEN_USE_10_NEIBORHO0D)>0)
			iMethod=new BehaviorDistribution.InferenceStrategy(InferenceMethod.NEIGHBORHOOD, .1);
		else if ((infParameters& StaticTools.INF_IF_NO_MATCH_THEN_USE_25_NEIBORHO0D)>0)
			iMethod=new BehaviorDistribution.InferenceStrategy(InferenceMethod.NEIGHBORHOOD, .25);
		else if ((infParameters& StaticTools.INF_IF_NO_MATCH_THEN_USE_33_NEIBORHO0D)>0)
			iMethod=new BehaviorDistribution.InferenceStrategy(InferenceMethod.NEIGHBORHOOD, .33);
		else if ((infParameters& StaticTools.INF_IF_NO_MATCH_THEN_USE_50_NEIBORHO0D)>0)
			iMethod=new BehaviorDistribution.InferenceStrategy(InferenceMethod.NEIGHBORHOOD, .5);
		else if ((infParameters& StaticTools.INF_IF_NO_MATCH_THEN_USE_100_NEIBORHO0D)>0)
			iMethod=new BehaviorDistribution.InferenceStrategy(InferenceMethod.NEIGHBORHOOD, 1);
		else if ((infParameters & StaticTools.INF_IF_MISSING_DATA_THEN_USE_BAYESIAN)>0)
			iMethod=new BehaviorDistribution.InferenceStrategy(InferenceMethod.BAYESIAN, 1);
		
		if (!missingData)
		{
			choices = distrib.GetLowerActions(j_CurrentTotalState, new BehaviorDistribution.InferenceStrategy(InferenceMethod.NONE, 0));
			if (choices!=null)
			{
				j_PriorPossibilities=choices;
				return choices;
			}
		}
		
		if (iMethod!=null)
		{
			
			choices = distrib.GetLowerActions(j_CurrentTotalState, iMethod);
			if (choices!=null)
			{
				j_PriorPossibilities=choices;
				return choices;
			}
			
		}
			
		if ((priorPossibilitiesExistP)&&((infParameters & StaticTools.INF_IF_NO_MATCH_THEN_USE_PRIOR)>0))
		{
			return j_PriorPossibilities;
		}
		
		return j_PossibleActions;
		
		
	}
	
	private LinkedList<WeightedValue<Integer>> GetActionDistributionFromExcitations(BehaviorDistribution distrib,LinkedList<WeightedValue<Integer>> mask, int infParameters)
	{
		
		int currentState=infParameters;
		boolean missingData=MissingDataInTotalStateP(), priorActionDefinedP;
		WeightedValue<Integer> previousActionValue=null;
		if (j_PreviousActionChosen!=null)
		{
			previousActionValue = new WeightedValue<Integer>(j_PreviousActionChosen,j_PastActionSelectionWeight);
			priorActionDefinedP=true;
		}
		else
			priorActionDefinedP=false;
		
		LinkedList<WeightedValue<Integer>> choices=null;
		BehaviorDistribution.InferenceStrategy iMethod =  new BehaviorDistribution.InferenceStrategy(BehaviorDistribution.InferenceMethod.NONE, 0);
		if ((infParameters& StaticTools.INF_IF_NO_MATCH_THEN_USE_10_NEIBORHO0D)>0)
			iMethod=new BehaviorDistribution.InferenceStrategy(InferenceMethod.NEIGHBORHOOD, .1);
		else if ((infParameters& StaticTools.INF_IF_NO_MATCH_THEN_USE_25_NEIBORHO0D)>0)
			iMethod=new BehaviorDistribution.InferenceStrategy(InferenceMethod.NEIGHBORHOOD, .25);
		else if ((infParameters& StaticTools.INF_IF_NO_MATCH_THEN_USE_33_NEIBORHO0D)>0)
			iMethod=new BehaviorDistribution.InferenceStrategy(InferenceMethod.NEIGHBORHOOD, .33);
		else if ((infParameters& StaticTools.INF_IF_NO_MATCH_THEN_USE_50_NEIBORHO0D)>0)
			iMethod=new BehaviorDistribution.InferenceStrategy(InferenceMethod.NEIGHBORHOOD, .5);
		else if ((infParameters& StaticTools.INF_IF_NO_MATCH_THEN_USE_100_NEIBORHO0D)>0)
			iMethod=new BehaviorDistribution.InferenceStrategy(InferenceMethod.NEIGHBORHOOD, 1);
		else if ((infParameters & StaticTools.INF_IF_MISSING_DATA_THEN_USE_BAYESIAN)>0)
			iMethod=new BehaviorDistribution.InferenceStrategy(InferenceMethod.BAYESIAN, 1);
		
		//j_PosInferencePolicy=StaticTools.INF_IF_MATCH_FAVOR_PREVIOUS|StaticTools.INF_IF_MISSING_DATA_THEN_NO_MATCH|StaticTools.INF_IF_NO_MATCH_THEN_USE_25_NEIBORHO0D|StaticTools.INF_IF_NO_MATCH_THEN_USE_PRIOR;
		
		if (!missingData)
		{
			if ((priorActionDefinedP)&&((currentState & StaticTools.INF_IF_MATCH_FAVOR_PREVIOUS)>0))
				choices = distrib.SelectActionDistribution(j_CurrentTotalState, mask, previousActionValue, new BehaviorDistribution.InferenceStrategy(InferenceMethod.NONE, 0));
			else
				choices = distrib.SelectActionDistribution(j_CurrentTotalState, mask, new BehaviorDistribution.InferenceStrategy(InferenceMethod.NONE, 0));
			
			if (choices!=null)
			{
				return choices;
			}
		}
		
		if (iMethod!=null)
		{
			if ((priorActionDefinedP)&&((currentState & StaticTools.INF_IF_MATCH_FAVOR_PREVIOUS)>0))
				choices = distrib.SelectActionDistribution(j_CurrentTotalState, mask, previousActionValue,iMethod);
			else
				choices = distrib.SelectActionDistribution(j_CurrentTotalState, mask,iMethod);

		}
			
		if (choices!=null)
		{
			return choices;
		}
		if (priorActionDefinedP)
		{
			choices = new LinkedList<WeightedValue<Integer>>();
			choices.add(new WeightedValue<Integer>(j_PreviousActionChosen, j_PastActionSelectionWeight));
			return choices;
		}
		
		return j_PossibleActions;
		
	}
	
	
	
	/****************************************
	 *  Serializer Related Fields and Methods
	 ****************************************
	 */
	
	private enum SaveState
	{
		SAVING_INNATE_POS_RESPONSES,
		SAVING_LEARNED_POS_RESPONSES,
		SAVING_INNATE_NEG_RESPONSES,
		SAVING_LEARNED_NEG_RESPONSES,
		SAVING_GENOME,
		EXIT
	}
	
	private enum LoadState
	{
		LOADING_INNATE_POS_RESPONSE,
		LOADING_LEARNED_POS_RESPONSES,
		LOADING_INNATE_NEG_RESPONSES,
		LOADING_LEARNED_NEG_RESPONSES,
		LOADING_GENOME,
		EXIT
	}
	
	private LoadState j_CurrentState;
	private String j_UniqueName;
	private SerializerAdapter j_SerializerHelper;
	
	public String GetTypeName()
	{
		return "ProbabilisticLearningAgent";
	}
	
	public void PrepareSerialization()
	{
		j_CurrentState=LoadState.LOADING_INNATE_POS_RESPONSE;
	}
	
	public void SetName(String uniqueName)
	{
		j_UniqueName=uniqueName;
		j_SerializerHelper.SetUniqueName(uniqueName);
	}
	
	public void Load(BufferedReader reader)
	{
		j_SerializerHelper.Load(reader);
	}
	
	
	public void Load(String fileFullName)
	{
		j_SerializerHelper.Load(fileFullName);
		
	}
	

	
	public void Save(String fileFullName)
	{
		j_SerializerHelper.Save(fileFullName);
	}
	
	
	public  void Save(BufferedWriter writer)
	{
		j_SerializerHelper.Save(writer);
	}
	
	
	
	public void SaveData(BufferedWriter writer)
	{
		SaveState currentSaveState = SaveState.SAVING_INNATE_POS_RESPONSES;
		String data;
		boolean first=true;
		
		try
		{
			while (currentSaveState!=SaveState.EXIT)
			{
				switch (currentSaveState)
				{
					case SAVING_INNATE_POS_RESPONSES:
						if (j_InnatePositiveResponses!=null)
						{
							j_InnatePositiveResponses.SetName(j_SerializerHelper.ConstructUniqueName(j_InnatePositiveResponses.GetTypeName(),"innate_pos"));
							j_InnatePositiveResponses.Save(writer);
						}
						else
						{
							writer.write(":");
							writer.newLine();
						}
						currentSaveState=SaveState.SAVING_LEARNED_POS_RESPONSES;
						break;
					case SAVING_LEARNED_POS_RESPONSES:
						// Write each dimension
						if (j_LearnedPositiveResponses!=null)
						{
							j_LearnedPositiveResponses.SetName(j_SerializerHelper.ConstructUniqueName(j_LearnedPositiveResponses.GetTypeName(),"learned_pos"));
							j_LearnedPositiveResponses.Save(writer);
						}
						else
						{
							writer.write(":");
							writer.newLine();
						}
						currentSaveState=SaveState.SAVING_INNATE_NEG_RESPONSES;
						break;
					case SAVING_INNATE_NEG_RESPONSES:
						// Write each dimension
						if (j_InnateNegativeResponses!=null)
						{
							j_InnateNegativeResponses.SetName(j_SerializerHelper.ConstructUniqueName(j_InnateNegativeResponses.GetTypeName(),"innate_neg"));
							j_InnateNegativeResponses.Save(writer);
						}
						else
						{
							writer.write(":");
							writer.newLine();
						}
						currentSaveState=SaveState.SAVING_LEARNED_NEG_RESPONSES;
						break;
					case SAVING_LEARNED_NEG_RESPONSES:
						// Write each dimension
						if (j_LearnedNegativeResponses!=null)
						{
							j_LearnedNegativeResponses.SetName(j_SerializerHelper.ConstructUniqueName(j_LearnedNegativeResponses.GetTypeName(),"learned_neg"));
							j_LearnedNegativeResponses.Save(writer);
						}
						else
						{
							writer.write(":");
							writer.newLine();
						}
						currentSaveState=SaveState.SAVING_GENOME;
						break;
					case SAVING_GENOME:
						if (j_GeneticInfo!=null)
						{
							j_GeneticInfo.SetName(j_SerializerHelper.ConstructUniqueName(j_GeneticInfo.GetTypeName(),"genome"));
							j_GeneticInfo.Save(writer);
						}
						else
						{
							writer.write(":");
							writer.newLine();
						}
						currentSaveState=SaveState.EXIT;
						break;
					
				}
				
			}
		}
		catch (Exception e)
		{
			java.io.StringWriter traceText = new java.io.StringWriter();
			java.io.PrintWriter pWriter = new java.io.PrintWriter(traceText,true);
			e.printStackTrace(pWriter);
			pWriter.close();
			throw new RuntimeException(traceText.toString());
		}


	}
	
	public void AddData(String dataLine)
	{
		switch (j_CurrentState)
		{
			case LOADING_INNATE_POS_RESPONSE:
				j_CurrentState=LoadState.LOADING_LEARNED_POS_RESPONSES;
				break;
			case LOADING_LEARNED_POS_RESPONSES:
				j_CurrentState=LoadState.LOADING_INNATE_NEG_RESPONSES;
				break;
			case LOADING_INNATE_NEG_RESPONSES:
				
				j_CurrentState=LoadState.LOADING_LEARNED_NEG_RESPONSES;
				break;
			case LOADING_LEARNED_NEG_RESPONSES:
				j_CurrentState=LoadState.LOADING_GENOME;
				break;
			case LOADING_GENOME:
				j_CurrentState=LoadState.EXIT;
				break;
			
		}
	}
	
	
	public void AddData(Serializer inner, String instanceId)
	{
		switch (j_CurrentState)
		{
			case LOADING_INNATE_POS_RESPONSE:
				j_InnatePositiveResponses=(BehaviorDistribution)inner;
				j_CurrentState=LoadState.LOADING_LEARNED_POS_RESPONSES;
				break;
			case LOADING_LEARNED_POS_RESPONSES:
				j_LearnedPositiveResponses=(BehaviorDistribution)inner;
				j_CurrentState=LoadState.LOADING_INNATE_NEG_RESPONSES;
				break;
			case LOADING_INNATE_NEG_RESPONSES:
				j_InnateNegativeResponses=(BehaviorDistribution)inner;
				j_CurrentState=LoadState.LOADING_LEARNED_NEG_RESPONSES;
				break;
			case LOADING_LEARNED_NEG_RESPONSES:
				j_LearnedNegativeResponses=(BehaviorDistribution)inner;
				j_CurrentState=LoadState.LOADING_GENOME;
			case LOADING_GENOME:
				j_GeneticInfo=(ProbabilisticGenome)inner;
				j_CurrentState=LoadState.EXIT;
				UnpackClassVariablesFromGenome();
				break;
			
		}
	}
	
	private void UnpackClassVariablesFromGenome()
	{
		j_AdditionalGeneticInfo=j_GeneticInfo.GetAdditionalParameters();
		j_UtilityInterpreter = StaticTools.UtilityTypeNameToObject(j_AdditionalGeneticInfo[AddGeneticInfo.UTILITY_TYPE.ordinal()]);
		j_PreviousActionChosen= new Integer(j_AdditionalGeneticInfo[AddGeneticInfo.INITIAL_ACTION.ordinal()]);
		int[] utilityParameters = new int[j_AdditionalGeneticInfo.length-j_TypeUtilityDataOffset];
		for (int i=0;i<utilityParameters.length;i++)
			utilityParameters[i]=j_AdditionalGeneticInfo[j_TypeUtilityDataOffset+i];
		
		j_UtilityInterpreter.UpdateData(utilityParameters);
	}
	
	private void PackClassVariablesIntoGenome(UtilityAndPerferenceModule interpreter, int actionHistoryLength, int stateHistoryLength, int actionQueueMaxLength, int initialAction)
	{
		int[] utilityparameters = interpreter.GetGeneticParameters();
		int utilityParameterLength=utilityparameters.length;
		j_AdditionalGeneticInfo = new int[j_TypeUtilityDataOffset+utilityParameterLength];
		
		j_AdditionalGeneticInfo[AddGeneticInfo.ACTION_HISTORY_LENGTH.ordinal()]=actionHistoryLength;
		j_AdditionalGeneticInfo[AddGeneticInfo.STATE_HISTORY_LENGTH.ordinal()]=stateHistoryLength;
		j_AdditionalGeneticInfo[AddGeneticInfo.ACTION_QUEUE_MAX_LENGTH.ordinal()]=actionQueueMaxLength;
		j_AdditionalGeneticInfo[AddGeneticInfo.INITIAL_ACTION.ordinal()]=initialAction;
		for (int i=0;i<utilityParameterLength;i++)
			j_AdditionalGeneticInfo[i+j_TypeUtilityDataOffset]=utilityparameters[i];
		j_GeneticInfo.SetAdditionalParameters(j_AdditionalGeneticInfo);
	}

	
	// end of serializer methods
}
