package agentFramework.strategy;

import java.util.ArrayList;
import java.util.Enumeration;
import java.util.Hashtable;
import java.util.Random;

import agentFramework.action.Action;
import agentFramework.agent.Agent;
import agentFramework.core.env.Environment;
import agentFramework.utils.Debug;
import agentFramework.utils.Pair;

public class QLStrategy extends Strategy
{
	private Hashtable<Integer, QLStruct> 	QLAgentValues;		// the QL values for all the neighbors
	private double 							defaultPenalty;
	private double							alpha;

	public QLStrategy(Environment env, Agent owner, double alpha, double defaultPenalty) 
	{
		super(env, owner);
		
		this.defaultPenalty = defaultPenalty;
		this.alpha = alpha;
		this.QLAgentValues = new Hashtable<Integer, QLStruct>();
	}
	
	
	public void init()
	{
		// init the QL struct
		for (Integer neighbor : environment.getNeighbors(agent))
		{
			QLAgentValues.put(neighbor, new QLStruct(neighbor, environment.getPossibleActionList()));
		}
	}


	public Action makeDecision ()
	{
		Action bestAction = null;
		double bestGain = -1.0 * Double.MAX_VALUE; 		// -INFINITY
		
		for (Action action : this.environment.getPossibleActionList())
		{
			double gain = 0;
			
			for (Pair<Integer, Action> decision : this.agentDecisions)
			{
				QLStruct ql = QLAgentValues.get(decision.first);
				Pair<Action, Action> p = new Pair<Action, Action>(action, decision.second);
				gain += ql.getQLValue(p.toString());
			}

			if (gain > bestGain)
			{
				bestGain = gain;
				bestAction = action;
			}
			else if (gain == bestGain)
			{
				Random r = new Random();
				
				if (r.nextInt(2) == 1)
				{
					bestGain = gain;
					bestAction = action;
				}
			}
		}
		
		return bestAction;
	}

	@Override
	public void reviseStrategy() 
	{
		Double 							reward = environment.computeReward(agent);
		Hashtable<Integer, Action> 		neighborFinalActions = environment.getNeighborsFinalActions(this.agent);
		
		this.agent.addEarnings(reward);
		
		for (Pair <Integer, Action> decision : this.agentDecisions)
		{
			double 						r = reward;
			Pair<Action, Action> 		p = new Pair<Action, Action> (agent.getCurrentAction(), 
																	  decision.second);
			double 		currValue = QLAgentValues.get(decision.first).getQLValue(p.toString());			
			
			// check if the current agent lies
			if (!decision.second.equals(neighborFinalActions.get(decision.first)) )
			{
				r = (1 - alpha)*r - alpha*defaultPenalty;
			}
			
			currValue = alpha*r + (1 - alpha)*currValue;
			
			QLAgentValues.get(decision.first).QLValues.remove(p.toString());
			QLAgentValues.get(decision.first).QLValues.put(p.toString(), currValue);
		}
	}

	
	class QLStruct
	{
		public Integer										neighborId;
		public Hashtable<String, Double>				  	QLValues; 
		public final static double							DEFAULT_QL_VALUE = 0.0;
		
		public QLStruct(Integer neighborId, ArrayList<Action> possibleActions)
		{
			this.neighborId = neighborId;
			
			QLValues = new Hashtable<String, Double >();
			
			// init the QL values
			for (Action a : possibleActions)
			{
				for (Action b : possibleActions)
				{
					Pair<Action, Action> p = new Pair<Action, Action>(a, b);
					QLValues.put(p.toString(), DEFAULT_QL_VALUE);
				}
			}
		}
		
		
		public Double getQLValue(String key)
		{
			return QLValues.get(key);
		}
	}
}
