import java.util.ArrayList;
import java.util.HashMap;
import java.util.HashSet;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
import java.util.Random;
import java.util.Map.Entry;
import java.util.TreeMap;

public class Node {

	public static final int NUMBER_OF_POSSIBLE_ACTIONS = 3;
	public static final double DISCOUNT_FACTOR = 0.8;
	public static final double EST_NUM_1 = 100000.0;
	public double epsilonDecreasing;
	public double learningRate;

	public Random randomNumberGenerator;
	
	public List<Node> nodesWithinRange;
	
	List<Action> possibleActions;
	
	public int currentInterferences;

	public String name;

	public Environment environment;

	public State currentState;

	public Action currentAction;

	public Double currentReward;

	public List<Channel> acquiredChannels;

	public HashMap<StateAction, Double> Q;

	public Node(String nodeName) {
		name = nodeName;
		// Initialise possible actions
		possibleActions = new ArrayList<Action>();
		possibleActions.add(Action.ACQUIRE);
		possibleActions.add(Action.DROP);
		possibleActions.add(Action.NOTHING);
		// Initialise list of acquired channels
		acquiredChannels = new ArrayList<Channel>();
		// Initialise state of the node
		currentState = new State();
		randomNumberGenerator = new Random();
		// Initialise Q
		nodesWithinRange = new ArrayList<Node>();
		Q = new HashMap<StateAction, Double>();
		epsilonDecreasing = 0.2;
		learningRate = 0.8;
	}

	/**
	 * Carry out an iteration
	 */
	public void iterate() {
//		System.out.println(name + " has started iteration");
		currentState.resetInterferences();
//		System.out.println(name + "'s state is " + currentState);	
		// Reset number of interferences
		environment.interferingChannels = new ArrayList<Channel>();
		
		double randomDouble = randomNumberGenerator.nextDouble();
		Action actionToConduct = null;
		if (randomDouble > epsilonDecreasing && Q.size() > 0) {
//			System.out.println(name + " is exploiting");
			actionToConduct = exploit();
		} else {
//			System.out.println(name + " is exploring");
			actionToConduct = explore();
		}
		currentAction = actionToConduct;
//		System.out.println(name + " will do "  + currentAction);
		State stateToReward = new State(currentState);
		calculateReward(stateToReward, actionToConduct);
		currentState = conductAction(currentState, actionToConduct);
//		System.out.println(name + " updated its state to " + currentState);

		updateQ(currentReward, new StateAction(stateToReward, actionToConduct));
//		System.out.println(name + "'s Q:");
//		printQ();
		
		learningRate = learningRate - (learningRate /  EST_NUM_1);
		epsilonDecreasing = epsilonDecreasing - (epsilonDecreasing / EST_NUM_1);
	}

	/**
	 * Add a channel to the list of currently connected channels
	 */
	public State acquireChannel(State aState) {
		int randomChannelNumber = randomNumberGenerator.nextInt(environment.channels.size());
		Channel randomChannel = environment.channels.get(randomChannelNumber);
//		System.out.println(name + " is attempting to acquire");
		if (!randomChannel.isBusy) {
			randomChannel.isBusy = true;
			randomChannel.occupiedNode = this;
//			System.out.println(name + " acquired " + randomChannel.name);
			acquiredChannels.add(randomChannel);
			aState.increaseTransmittedChannels();
			environment.increaseChannelUsage();
		} else {
//			System.out.println(name + " failed to acquire");
			currentState.increaseInterferences();
			environment.numberOfInterferences += 1;
		}
		return aState;
	}

	/**
	 * Remove a channel from the list of currently connected channels
	 */
	public State dropChannel(State aState) {
//		System.out.println(name + " will drop");
		int randomChannelNumber = randomNumberGenerator.nextInt(acquiredChannels.size());
		Channel randomChannel = acquiredChannels.get(randomChannelNumber);
		randomChannel.isBusy = false;
		randomChannel.occupiedNode = null;
//		System.out.println(name + " dropped " + randomChannel.name);
		acquiredChannels.remove(randomChannel);
		aState.decreaseTransmittedChannels();
		environment.decreaseChannelUsage();
		return aState;
	}

	public State doNothing(State aState) {
//		System.out.println(name + " is not doing anything");
		return aState;
	}

	public double calculateReward(State aState, Action anAction) {
		int potentialTransmissionIncrease = 0;
		int potentialInterferenceDecrease = 0;
		if (anAction == Action.ACQUIRE) {
			potentialTransmissionIncrease = 1;
		} else if (anAction == Action.DROP) {
			potentialTransmissionIncrease = -1;
		}
		if (currentInterferences > 0) {
			if (anAction == Action.DROP) {
				potentialInterferenceDecrease = 1;
			} else if (anAction == Action.NOTHING) {
				potentialInterferenceDecrease = -1;
			} else if (anAction == Action.DROP) {
				potentialInterferenceDecrease = -2;
			}	
		}
		int remainingCapacity = Environment.MAXIMUM_ALLOWED_CONNECTIONS - currentState.numberOfTransmittedChannels;
		int transmittedChannels = aState.numberOfTransmittedChannels;
		currentReward = remainingCapacity * 2.0 + potentialInterferenceDecrease  * 2.0 + potentialTransmissionIncrease * 2.0 + transmittedChannels - (transmittedChannels * currentInterferences);
//		System.out.println(name + " got reward: " + currentReward);
		return currentReward;
	}

	public State conductAction(State stateToChange, Action actionToConduct) {
		if (actionToConduct == Action.ACQUIRE && stateToChange.numberOfTransmittedChannels < Environment.MAXIMUM_ALLOWED_CONNECTIONS) {
			stateToChange = acquireChannel(stateToChange);
		} else if (actionToConduct == Action.DROP && stateToChange.numberOfTransmittedChannels > 0) {
			stateToChange = dropChannel(stateToChange);
		} else {
			stateToChange = doNothing(stateToChange);
		}
		return stateToChange;
	}

	public Action getBestAction(State aState) {
//		System.out.println(name + " is calculating best action for state: " + aState);
		Double maxValue = - Double.MAX_VALUE;
		Action bestAction = null;
		List<Action> possibleActionsForCurrentState = new ArrayList<Action>(possibleActions);
		if (aState.numberOfTransmittedChannels == Environment.MAXIMUM_ALLOWED_CONNECTIONS) {
			possibleActionsForCurrentState.remove(Action.ACQUIRE);
		} else if (aState.numberOfTransmittedChannels == 0) {
			possibleActionsForCurrentState.remove(Action.DROP);
		}
		for (Action possibleAction : possibleActionsForCurrentState) {
			StateAction singleStateAction = new StateAction();
			singleStateAction.action = possibleAction;
			singleStateAction.state = aState;
			Double possibleReward = Q.get(singleStateAction);
			if (possibleReward != null && possibleReward > maxValue) {
				maxValue = possibleReward;
				bestAction = possibleAction;
			}
//			System.out.println("Got possible reward: " + possibleReward);
		}
		if (bestAction == null) {
			bestAction = Action.NOTHING;
		}
		return bestAction;
	}

	public Action exploit() {
		Action bestAction = getBestAction(currentState);
		return bestAction;
	}

	public Action explore() {
		int randomInt = randomNumberGenerator.nextInt(NUMBER_OF_POSSIBLE_ACTIONS);
		if (randomInt % 3 == 0 && acquiredChannels.size() < Environment.MAXIMUM_ALLOWED_CONNECTIONS) {
			return Action.ACQUIRE;
		} else if (randomInt % 3 == 1 && acquiredChannels.size() > 0) {
			return Action.DROP;
		} else {
			return Action.NOTHING;
		}
	}

	public void updateQ(Double reward, StateAction stateActionToReward) {
		// Construct a stateaction from current state and action
//		System.out.println(name + "is updating Q with stateaction: " + stateActionToReward);
//		System.out.println(name + "is updating Q with reward " + reward);
//		
//		System.out.println(name + " is obtaining future best action for it's current state " + currentState);
		Action futureBestAction = getBestAction(currentState);
//		System.out.println(name + "'s future best action is " + futureBestAction);
		StateAction futureStateAction = new StateAction(currentState, futureBestAction);
		
		// Construct a stateaction for current state and future best action
		
		Double oldValue = null;
		if (Q.containsKey(stateActionToReward)) {
			oldValue = Q.get(stateActionToReward);
		} else {
			oldValue = 0.0;
		}
		
		Double maxQ = null;
		
		if (Q.containsKey(futureStateAction)) {
			maxQ = Q.get(futureStateAction);
		} else {
			maxQ = 0.0;
		}
		
		Double newValue = oldValue + learningRate * (reward  + DISCOUNT_FACTOR * maxQ - oldValue);
		
//		System.out.println(name + " is updating Q with " + stateActionToReward + " and " + newValue);
		Q.put(stateActionToReward, newValue);
	}
	
	public void printQ() {
		TreeMap<StateAction, Double> orderedQ = new TreeMap<StateAction, Double>(new QValuesComparator());
		orderedQ.putAll(Q);
		Iterator<Entry<StateAction, Double>> iter = orderedQ.entrySet().iterator();
		while (iter.hasNext()) {
			Map.Entry<StateAction, Double> pairs = (Map.Entry<StateAction, Double>) iter.next();
			System.out.println("[State Action Pair: " + pairs.getKey().toString() + ", Q: " + pairs.getValue() + "]");
		}
	}

	public String toString() {
		return name + "'s current state: " + currentState;
	}
	
}
