package marek.ai.rl;

import java.util.ArrayList;
import java.util.HashMap;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
import java.util.Random;

public class ReinforcedLearning {
	public static final double LEARN_RATE = 0.5;
	public static class State {
		public final static int POSSIBLE_POSITIONS = 2;
		private int gY;
		private int gX;

		public void setgY(int gY) {
			this.gY = gY;
		}

		public int getgY() {
			return gY;
		}

		public void setgX(int gX) {
			this.gX = gX;
		}

		public int getgX() {
			return gX;
		}

		@Override
		public boolean equals(Object o) {
			if (!(o instanceof State)) {
				return false;
			}
			State s = (State) o;
			return (s.gX == this.gX && s.gY == this.gY);
		}

		@Override
		public int hashCode() {
			return Integer.parseInt(gX + "" + gY);
		}

	}

	public static class Action {
		private State from;
		private State to;

		public Action(State from, State to) {
			this.setFrom(from);
			this.setTo(to);
		}

		public void setFrom(State from) {
			this.from = from;
		}

		public State getFrom() {
			return from;
		}

		public void setTo(State to) {
			this.to = to;
		}

		public State getTo() {
			return to;
		}

		@Override
		public boolean equals(Object o) {
			if (!(o instanceof Action)) {
				return false;
			}
			Action a = (Action) o;
			return (a.from.equals(this.from) && a.to.equals(this.to));
		}

		@Override
		public int hashCode() {
			return Integer.parseInt(from.getgX() + "" + from.getgY() + ""
					+ to.getgX() + "" + to.getgY());
		}
	}

	public static class StateSpace {
		private Map<State, List<Action>> stateToActions;
		private List<State> stateList;
		private int nb2Actions = 0;
		private int nb3Actions = 0;
		private int nb4Actions = 0;

		public StateSpace() {
			stateToActions = new HashMap<State, List<Action>>();
			stateList = new ArrayList<State>();
			for (int y = 0; y < State.POSSIBLE_POSITIONS; y++) {
				for (int x = 0; x < State.POSSIBLE_POSITIONS; x++) {
					State s = new State();
					s.setgX(x);
					s.setgY(y);
					List<Action> possibleActionForS = new ArrayList<Action>();
					stateToActions.put(s, possibleActionForS);
					stateList.add(s);

					/*
					 * Possible action for a state are: 1- x+1 2- x-1 3- y+1 4-
					 * y-1 Given x-1 > -1 Given x+1 < POSSIBLE_POSITIONS Given
					 * y-1 > -1 Given y+1 < POSSIBLE_POSITIONS
					 */
					int nbActions = 0;
					// Xs state transitions
					for (int offsetX = -1; offsetX <= 1; offsetX += 2) {
						if (x + offsetX > -1
								&& x + offsetX < State.POSSIBLE_POSITIONS) {
							State to = new State();
							to.setgX(x + offsetX);
							to.setgY(y);
							Action a = new Action(s, to);
							possibleActionForS.add(a);
							nbActions++;
						}
					}

					// Ys state transitions
					for (int offsetY = -1; offsetY <= 1; offsetY += 2) {
						if (y + offsetY > -1
								&& y + offsetY < State.POSSIBLE_POSITIONS) {
							State to = new State();
							to.setgX(x);
							to.setgY(y + offsetY);
							Action a = new Action(s, to);
							possibleActionForS.add(a);
							nbActions++;
						}
					}
					switch (nbActions) {
					case 2:
						nb2Actions++;
						break;
					case 3:
						nb3Actions++;
						break;
					case 4:
						nb4Actions++;
						break;
					}
				}
			}
		}

		public List<Action> getPossibleActions(State s) {
			return stateToActions.get(s);
		}

		public int getNbPolicies() {
			int total = (int) Math.pow(2, nb2Actions);
			if (nb3Actions > 0)
				total *=(int) Math.pow(3, nb3Actions);
			if (nb4Actions > 0)
				total *=(int) Math.pow(4, nb4Actions);
			return total;
		}

		public int getNbStates() {
			return stateList.size();
		}
				
		public State selectRandomState() {
			Random r = new Random(System.currentTimeMillis());
			int stateIndex = r.nextInt(getNbStates());
			return stateList.get(stateIndex);
		}
	}
	
	public static class Policy {
		private Map<State, Action> stateToAction;
		private V v;
		
		
		
	}

	/* Discounted reward function
	 * 
	 */
	public static class V {
		Map<State, Integer> stateValue = new HashMap<State, Integer>();
		public int value(State s) {
			Integer i = stateValue.get(s);
			if (i == null) {
				i = new Integer(0);
				stateValue.put(s, i);
			}
			return i.intValue();
		}
		
		public void compute(State s, int reward, int iterationCount) {
			int sValue = value(s);
			sValue += Math.pow(LEARN_RATE,iterationCount) * reward;  
		}
	}

	public State transitState(State currentState, Action action) {
		return null;
	}

	public int reward(State currentState, Action executedAction) {
		if (State.POSSIBLE_POSITIONS == 2) {
			/*
			 *    0   1   X axis
			 *  +-------+
			 * 0| - | - |
			 *  |---+---|
			 * 1| B | A |
			 *  +---+---+
			 * Y axis 
			 */
			// Reward move of arm from A to B
			if (currentState.getgY() == 1 && currentState.getgX() == 1 && executedAction.getTo().getgY() == 1 && executedAction.getTo().getgX() == 0) {
				return 1;
			}
			// Negative reward move of arm from B to A
			if (currentState.getgY() == 1 && currentState.getgX() == 0 && executedAction.getTo().getgY() == 1 && executedAction.getTo().getgX() == 1) {
				return -1;
			}
		}
		return 0;
	}

	public static void main(String[] args) {
		StateSpace p = new StateSpace();
		System.out.println("Nb states: " + p.getNbStates() + " - Nb policies: " + p.getNbPolicies());
	}
}
