import java.util.HashSet;
import java.util.LinkedList;
import java.util.Random;

import org.rlcommunity.rlglue.codec.AgentInterface;
import org.rlcommunity.rlglue.codec.types.Action;
import org.rlcommunity.rlglue.codec.types.Observation;
import org.rlcommunity.rlglue.codec.util.AgentLoader;
import org.rlcommunity.rlglue.codec.taskspec.TaskSpec;
import org.rlcommunity.rlglue.codec.taskspec.ranges.IntRange;
import org.rlcommunity.rlglue.codec.taskspec.ranges.DoubleRange;

/**
 * 
 * @author Brian Tanner
 */
public class SkeletonAgent implements AgentInterface {

	Random randGenerator = new Random();
	Action lastAction;
	Observation lastObservation;

	final int EAGLE = 1; // For eagle-mode
	final int AGENT = 2; // For agent-mode
	static int agenda; // Keeps track of the mode
	final String[] strategies = { "DFS", "BFS", "IDS", "UCS", "ASS" };// The
																		// different
																		// search
																		// strategies

	static int strategyIndex = -1;// An index into the strategies array

	LinkedList<Node> q = new LinkedList<Node>();// The queue used to hold
												// the nodes to be
												// explored
	static HashSet<Node> partialStateNodes = new HashSet<Node>(); // Nodes to be
																	// updated
																	// by an
																	// observation

	static char[] pathToGoal;// Sequence of actions constructed by the eagle

	static int pathToGoalIndex;// An index into the pathToGoal array

	static int depthCounter;// A counter to the depth level used in Iterative
							// deeping search

	static HashSet<Node> expandedNodes;// list containing all the expanded nodes

	int expNodes = 0;
	// index to the goal indices if they are needed from the start as in
	// hueristic functions
	int goalX;
	int goalY;
	Node goalNode; // To keep track of what is my current goal (Reach gold or go
					// back safely)
	boolean returnAgent = false; // Boolean if is true indicates the agent has
									// grabbed the gold and is on
									// his way back to cell (0,0)
	boolean hasShot = false; // Boolean to keep track if the agent has used his
								// arrow

	long episodeStartTime; // a long to know the start time of each episode
							// in order to know thetotal time of it.

	/**
	 * This method tests if the input node is a goal node or not. If the agent
	 * is on his way back than the goal node is the cell (0,0) and the action is
	 * 'c'. If he is still searching for the gold then the goal node is a node
	 * having isGold bit == 1 and the action 'g'
	 * 
	 * @param node
	 * @return Boolean
	 */
	public boolean isGoal(Node node) {
		if (returnAgent && node.getState().getX() == 0
				&& node.getState().getY() == 0 && node.getAction() == 'c')
			return true;
		if (!returnAgent && node.getState().isGold() == 1
				&& node.getAction() == 'g')
			return true;
		return false;
	}

	/**
	 * This method takes a node as input (supposedly the goal node) and returns
	 * a series of chars representing the actions the agent will need to do in
	 * order to reach the node. The actions are retrieved by backtracking using
	 * the input node. An example is lfrffg
	 * 
	 * @param node
	 * @return String
	 */
	public String CreatePathToGoal(Node node) {
		Node parent = node.getParent();
		// Add the parents in reverse order so that the first node in the list
		// is the starting point and the last node is the goal node
		LinkedList<Node> path = new LinkedList<Node>();
		while (parent != null) {
			path.addFirst(parent);
			parent = parent.getParent();
		}
		path.add(node);
		// Loop again over all nodes to retrieve the actions. The actions are
		// concatenated to the string pathActions
		Node currNode;
		String pathActions = "";
		for (int i = 0; i < path.size(); i++) {
			currNode = path.get(i);
			// In case the cell containing the wumpus is on the path to the goal
			// and the agent hasnt used his arrow yet, then the action shoot is
			// inserted
			if (currNode.getState().isWumpus() == 1 && !hasShot) {
				pathActions += 's';
				hasShot = true;
			}
			pathActions += (currNode).getAction();
		}

		System.out.println("One way path " + pathActions);

		return pathActions;
	}

	/**
	 * This gets the path to goal(cell (0,0) on the way back after the agent has
	 * grabbed the gold
	 * 
	 * @param node
	 * @return char[]
	 */
	public char[] addReturnJourney(Node node) {
		Node parent = node.getParent();
		LinkedList<Node> path = new LinkedList<Node>();
		// Keep on getting parents until we reach the node with the grab action
		// and gold to avoid getting the path to the gold again
		while (parent != null && parent.getAction() != 'g') {
			path.addFirst(parent);
			parent = parent.getParent();
		}

		path.add(node);

		char[] pathActions = new char[path.size()];
		for (int i = 0; i < pathActions.length; i++) {
			pathActions[i] = (path.get(i)).getAction();

		}
		// char[] holding a series of action to the cell containing the gold and
		// back to the starting point
		char[] wholePath = new char[pathActions.length + pathToGoal.length];

		int i = 0;
		for (; i < pathToGoal.length; i++)
			wholePath[i] = pathToGoal[i];
		for (int j = 0; j < pathActions.length; j++)
			wholePath[i + j] = pathActions[j];

		System.out.println("Full path to grab the gold and climb back safely:"
				+ new String(wholePath));

		return wholePath;

	}

	/**
	 * Method for enqueuing nodes using Depth First Search algorithm. The loop
	 * starts with nodes in reverse order and inserts them at the beginning of
	 * the queue such that if the nodes are ABC so C is enqueued at the
	 * beginning of the queue then B then A so that they are inserted at the
	 * beginning of the queue with their original order
	 * 
	 * @param nodes
	 */
	public void enqueue_DFS(HashSet<Node> nodes) {
		Object[] nodesArray = nodes.toArray();
		for (int i = nodesArray.length - 1; i >= 0; i--) {
			q.addFirst((Node) nodesArray[i]);
		}
	}

	/**
	 * Method for enqueuing nodes using Iterative Deepening Search algorithm.
	 * Depth counter is a counter from 0 -> 12*12*BF
	 * 
	 * @param nodes
	 */
	public void enqueue_IDS(HashSet<Node> nodes) {

		Object[] nodesArray = nodes.toArray();
		int depth = ((Node) nodesArray[0]).getDepth();
		if (depth <= depthCounter) {
			enqueue_DFS(nodes);
		}
	}

	/**
	 * Method for enqueuing nodes using Breadth First Search algorithm. Nodes
	 * are enqueued at the end of the queue.
	 * 
	 * @param nodes
	 */
	public void enqueue_BFS(HashSet<Node> nodes) {
		Object[] nodesArray = nodes.toArray();
		for (int i = 0; i < nodesArray.length; i++) {
			q.add((Node) nodesArray[i]);
		}
	}

	/**
	 * Method for enqueuing nodes using Uniform Cost Search. Nodes are inserted
	 * in the queue in order of increasing path cost.
	 * 
	 * @param nodes
	 */
	public void enqueue_UCS(HashSet<Node> nodes) {
		Object[] nodesArray = nodes.toArray();
		for (int i = 0; i < nodesArray.length; i++) {
			Node temp = (Node) nodesArray[i];
			if (q.size() == 0) {
				q.add(temp);
				continue;
			}
			for (int j = q.size() - 1; j >= 0; j--) {
				Node qElem = (Node) q.get(j);
				if (temp.getPathCost() >= qElem.getPathCost()) {
					j = j + 1;
					q.add(j, temp);
					break;
				}
			}

		}
	}

	/**
	 * Method for enqueuing nodes using A* algorithm.
	 * 
	 * @param nodes
	 */
	public void enqueue_ASS(HashSet<Node> nodes) {

		q.addAll(nodes);
		Node tmp;
		Node tmp2;
		int compare1;
		int compare2;
		int length = q.size();

		for (int i = 0; i < length - 1; i++) {
			for (int j = 0; j < length - 1 - i; j++) {
				tmp = q.get(j);
				tmp2 = q.get(j + 1);
				compare1 = tmp.getHeuristic() + tmp.getPathCost();
				compare2 = tmp2.getHeuristic() + tmp2.getPathCost();
				if (compare2 < compare1) {

					q.set(j, tmp2);
					q.set(j + 1, tmp);
				}
			}
		}

	}

	/**
	 * Given a list of observations and nodes this method completes the
	 * information about the set of nodes from the data provided in the
	 * observation
	 * 
	 * @param nodes
	 * @param obs
	 * @return HashSet
	 */
	public HashSet<Node> updateWorkingNodeSet(HashSet<Node> nodes,
			Observation obs) {
		int[] intArray = obs.intArray;
		int numberOfNodes = intArray.length;
		HashSet<Node> updatedNodes = new HashSet<Node>();
		if (nodes == null || nodes.isEmpty()) {
			return null;
		}
		Object[] tempNodes = nodes.toArray();
		Node temp;
		int index = 0;
		for (int i = 0; i < numberOfNodes && index < tempNodes.length; i = i + 3) {
			temp = (Node) tempNodes[index];
			temp.getState().setGold(intArray[i]);
			temp.getState().setPit(intArray[i + 1]);
			temp.getState().setWumpus(intArray[i + 2]);
			if (strategyIndex == 4)
				temp.setHeuristic(calculateNodeHueristic(temp));
			updatedNodes.add(temp);
			index++;
		}

		return updatedNodes;
	}

	/**
	 * Given a node this method returns the heuristic used for the A* algorithim
	 * which is the manhattan distance between the position of a given node and
	 * the cell containing the distance. Also it checks if the state of this
	 * node a pit in it. If it does then it adds a value of hundred to the
	 * hueristic in order to be sorted in the end and not bother with it.
	 * 
	 * @param n
	 * @return
	 */
	public int calculateNodeHueristic(Node n) {
		int pit = 0;
		if (n.getState().isPit() == 1)
			pit = 100;

		int manDistance = Math.abs(n.getState().getX() - goalX)
				+ Math.abs(n.getState().getY() - goalY) + pit;

		return manDistance;
	}

	/**
	 * Given the intArray of the observation holding the observation for all
	 * nodes in the grid it sets the goalX and goalY which are the indices for
	 * the cell containing the gold
	 * 
	 * @param intArray
	 */
	public void findGoal(int[] intArray) {
		int i;
		for (i = 0; i < intArray.length; i += 3)
			if (intArray[i] == 1)
				break;
		i /= 3;

		goalX = i / 12;
		goalY = i % 12;

		System.out.println(goalX + " , " + goalY);
	}

	/**
	 * This method returns true if the given node occurs in the list of expanded
	 * nodes. Nodes are declared equal if they have the same state i.e same
	 * position and orientation
	 * 
	 * @param node
	 * @return Boolean
	 */
	public boolean isRepeated(Node node) {
		Object[] expandedNodesArray = expandedNodes.toArray();
		Node temp;
		for (int i = 0; i < expandedNodesArray.length; i++) {
			temp = (Node) expandedNodesArray[i];
			if (node.isEqual(temp))
				return true;
		}
		return false;
	}

	/**
	 * This method is responsible for creating the root node and placing it in a
	 * hash set
	 * 
	 * @return HashSet
	 */
	public HashSet<Node> constructNewStateNodes() {
		HashSet<Node> set = new HashSet<Node>();
		Node n = new Node(new State(0, 0, 0), null, '.', 0);
		set.add(n);
		return set;
	}

	/**
	 * This method returns a set containing all possible children of a given
	 * node. In case the node contains a pit then null is returned as the agent
	 * cannot go further if stands in a pit
	 * 
	 * @param node
	 * @return HashSet
	 */
	public HashSet<Node> getSuccessorStates(Node node) {
		State state = node.getState();
		HashSet<Node> succ = new HashSet<Node>();
		if (node.getState().isPit() == 1)
			return null;
		// The agent is allowed to perform the grab action only if the agent is
		// at the cell containing the gold. Do not need to check if has already
		// grabbed the gold as the gold will be removed from the cell and the
		// condition isGold will always fail
		if (node.getState().isGold() == 1 & !returnAgent) {
			succ.add(new Node(state, node, 'g', node.getPathCost() + 1));
		}
		// The agent is allowed to perform the climb action only if im at the
		// starting point cell (0,0)
		if (node.getState().getX() == 0 && node.getState().getY() == 0
				&& returnAgent) {
			succ.add(new Node(state, node, 'c', node.getPathCost() + 1));
		}

		succ.addAll(getAllPossibleNodes(node));
		return succ;
	}

	/**
	 * This method returns a set of all possible actions
	 * 
	 * @param node
	 * @return HashSet
	 */
	public HashSet<Node> getAllPossibleNodes(Node node) {
		HashSet<Node> succ = new HashSet<Node>();
		State state = node.getState();
		int x = state.getX();
		int y = state.getY();
		int orientation = state.getOrientation();

		Node succNode = new Node(new State(x, y, changeOrientation(orientation,
				'r')), node, 'r');
		succNode.setPathCost();
		if (isRepeated(succNode) == false)
			succ.add(succNode);

		int newX = x;
		int newY = y;
		if (x < 11 && orientation == 1)
			newX++;
		if (x > 0 && orientation == 3)
			newX--;
		if (y < 11 && orientation == 0)
			newY++;
		if (y > 0 && orientation == 2)
			newY--;
		succNode = new Node(new State(newX, newY, orientation), node, 'f');
		succNode.setPathCost();
		if (!isRepeated(succNode))
			succ.add(succNode);
		succNode = new Node(
				new State(x, y, changeOrientation(orientation, 'l')), node, 'l');
		succNode.setPathCost();
		if (isRepeated(succNode) == false)
			succ.add(succNode);
		return succ;
	}

	/**
	 * This method decides upon the enqueuing strategy depending on the strategy
	 * index
	 * 
	 * @param nodes
	 */
	public void addNodes(HashSet<Node> nodes) {
		// "DFS", "BFS", "IDS", "UCS", "ASS"
		if (nodes != null) {
			if (strategyIndex == 0)
				enqueue_DFS(nodes);
			else if (strategyIndex == 1)
				enqueue_BFS(nodes);
			else if (strategyIndex == 2)
				enqueue_IDS(nodes);
			else if (strategyIndex == 3)
				enqueue_UCS(nodes);
			else
				enqueue_ASS(nodes);
		}

	}

	/**
	 * This method is reponsible for changing the orientation according to the
	 * old orientation and the action whether left or right. The orientation is
	 * such that North is represented by 0, east by 1, south by 2, west by 3
	 * 
	 * @param oldOrientation
	 * @param action
	 * @return int
	 */
	public int changeOrientation(int oldOrientation, char action) {

		if (action == 'r')
			return (oldOrientation + 1) % 4;
		else
			return ((oldOrientation - 1) + 4) % 4;

	}

	/**
	 * Given a set of nodes this method returns an action. The intArray of the
	 * actions holds the indices of the cells in the nodes and intArray[0] is
	 * the number of nodes to be discovered.
	 * 
	 * @param nodes
	 * @return Action
	 */
	public Action getCellsNeededForDiscovery(HashSet<Node> nodes) {
		Action action = new Action();
		char[] charArray = new char[1];
		if (nodes == null) {
			charArray[0] = '.';
			action.charArray = charArray;
			return action;
		}
		Object[] tempNodes = nodes.toArray();
		Node temp;
		charArray[0] = 'q';
		int[] intArray = new int[nodes.size() * 2 + 1];
		intArray[0] = nodes.size();

		int c = 1;
		for (int i = 0; i < tempNodes.length; i++) {
			temp = (Node) tempNodes[i];
			intArray[c++] = temp.getState().getX();
			intArray[c++] = temp.getState().getY();
		}

		action.intArray = intArray;
		action.charArray = charArray;
		return action;

	}

	public void agent_init(String taskSpecification) {
		TaskSpec theTaskSpec = new TaskSpec(taskSpecification);
		System.out.println("Skeleton agent parsed the task spec.");
		System.out.println("Observation have "
				+ theTaskSpec.getNumDiscreteObsDims() + " integer dimensions");
		System.out.println("Actions have "
				+ theTaskSpec.getNumDiscreteActionDims()
				+ " integer dimensions");
		IntRange theObsRange = theTaskSpec.getDiscreteObservationRange(0);
		System.out.println("Observation (state) range is: "
				+ theObsRange.getMin() + " to " + theObsRange.getMax());
		IntRange theActRange = theTaskSpec.getDiscreteActionRange(0);
		System.out.println("Action range is: " + theActRange.getMin() + " to "
				+ theActRange.getMax());
		DoubleRange theRewardRange = theTaskSpec.getRewardRange();
		System.out.println("Reward range is: " + theRewardRange.getMin()
				+ " to " + theRewardRange.getMax());
	}

	/**
	 * This method resets all variables. This is called at the start of every
	 * episode
	 */
	public void reset() {
		hasShot = false;
		expandedNodes = new HashSet<Node>();
		q = new LinkedList<Node>();
		returnAgent = false;
		partialStateNodes = new HashSet<Node>();
		depthCounter = 0;
	}

	public Action agent_start(Observation observation) {
		episodeStartTime = System.currentTimeMillis();
		if (strategyIndex < 4)
			strategyIndex = strategyIndex + 1;

		System.out.println("STARTING STRATEGY : " + strategies[strategyIndex]);
		agenda = EAGLE;
		reset();
		if (strategyIndex == 4) {

			int[] intArray = new int[289];
			intArray[0] = 144;

			char[] charArray = { 'q' };
			int arrayIndex = 1;
			for (int i = 0; i < 12; i++)
				for (int j = 0; j < 12; j++) {
					intArray[arrayIndex] = i;
					intArray[arrayIndex + 1] = j;
					arrayIndex += 2;
				}

			Action action = new Action();
			action.charArray = charArray;
			action.intArray = intArray;
			return action;

		}
		partialStateNodes = constructNewStateNodes();

		pathToGoalIndex = -1;
		int[] intArray = { 1, 0, 0 };
		char[] charArray = new char[1];
		charArray[0] = 'q';
		Action returnAction = new Action();
		returnAction.charArray = charArray;
		returnAction.intArray = intArray;
		return returnAction;

	}

	/**
	 * This method handles some logic specific to IDS algorithm. It increments
	 * the depthCounter. In case the agent is on his way to get the gold the
	 * partial state nodes will initially have the cell (0,0). In case the agent
	 * is on his way back the partial state nodes will initially have the node
	 * with the gold and grab action since it is the new starting point
	 * 
	 * @return
	 */
	public Action IDSHandler() {
		Action returnAction = new Action();
		char[] charArray = new char[1];
		expNodes += expandedNodes.size();
		if (returnAgent == false) {
			depthCounter++;
			expandedNodes = new HashSet<Node>();
			partialStateNodes = constructNewStateNodes();
			int[] intArray = { 1, 0, 0 };
			charArray[0] = 'q';
			returnAction.charArray = charArray;
			returnAction.intArray = intArray;
			return returnAction;
		} else {
			depthCounter++;
			q = new LinkedList<Node>();
			expandedNodes = new HashSet<Node>();
			partialStateNodes = new HashSet<Node>();
			partialStateNodes.add(goalNode);
			return getCellsNeededForDiscovery(partialStateNodes);

		}

	}

	public Action agent_step(double reward, Observation observation) {
		Action returnAction = new Action();
		char[] charArray = new char[1];
		// In case the current strategy is A* then the first step has the
		// observation for the whole grid. In that case we need to find the gold
		// so that the position of the gold is identified and then we can set
		// the manhattan distance from the gold as a heuristic. And we need to
		// start by adding the start cell in the partial state node and the
		// algorithm continues
		if (strategyIndex == 4 && observation.intArray.length == 12 * 12 * 3) {
			findGoal(observation.intArray);
			partialStateNodes = constructNewStateNodes();
			agenda = EAGLE;
			pathToGoalIndex = -1;
			int[] intArray = { 1, 0, 0 };
			charArray[0] = 'q';
			returnAction.charArray = charArray;
			returnAction.intArray = intArray;
			// return action({'.'},{}); //note the dot character symbol '.'
			return returnAction;

		}

		if (agenda == EAGLE) {
			HashSet<Node> succesorStates = updateWorkingNodeSet(
					partialStateNodes, observation);
			if (succesorStates != null)
				addNodes(succesorStates);

			if (q.isEmpty() && strategies[strategyIndex].equals("IDS")
					&& depthCounter < 12 * 12 * 4) {
				return IDSHandler();

			}
			// If the queue is empty and we havent reached the goal, then there
			// is no way for the agent to reach the gold and a message
			// indicating so it printed
			if (q.isEmpty()) {
				if (pathToGoal == null || pathToGoal.length == 0)
					System.out
							.println("It is impossible for the agent to reach the gold");
				System.out.println("End of Episode using "
						+ strategies[strategyIndex] + ".");
				charArray[0] = 'x';
				returnAction.charArray = charArray;
				returnAction.intArray = new int[0];
				return returnAction;
			}
			Node first = q.removeFirst();
			expandedNodes.add(first);
			if (isGoal(first)) {

				if (!returnAgent) {
					System.out.println("GOLD FOUND");
					returnAgent = true;
					goalX = 0;
					goalY = 0;
					goalNode = first;
					if (strategies[strategyIndex].equals("IDS"))
						depthCounter = 0;
					pathToGoal = CreatePathToGoal(first).toCharArray();
					System.out.println("GOLD IS IN: " + first.getState().getX()
							+ " " + first.getState().getY());
					expNodes += expandedNodes.size();
					System.out
							.println("Number Of expanded Nodes in path to gold :"
									+ expNodes);
					expNodes = 0;
					q = new LinkedList<Node>();
					expandedNodes = new HashSet<Node>();
					partialStateNodes = new HashSet<Node>();
					partialStateNodes.add(first);
					return getCellsNeededForDiscovery(partialStateNodes);
				} else {
					System.out.println("RETURNED TO CELL "
							+ first.getState().getX() + " "
							+ first.getState().getY());
					expNodes += expandedNodes.size();
					System.out
							.println("Number Of expanded Nodes in return path :"
									+ expNodes);
					expNodes = 0;
					pathToGoal = addReturnJourney(first);
					agenda = AGENT;
					charArray[0] = '.';
					returnAction.charArray = charArray;
					returnAction.intArray = new int[0];
					// return action({'.'},{});
					return returnAction;
				}
			}
			partialStateNodes = getSuccessorStates(first);
			return getCellsNeededForDiscovery(partialStateNodes);
		}
		if (agenda == AGENT) {
			pathToGoalIndex = pathToGoalIndex + 1;
			charArray[0] = pathToGoal[pathToGoalIndex];
			returnAction.charArray = charArray;
			returnAction.intArray = new int[0];
			// return action({pathToGoal[PathToGoalIndex]},{});
			return returnAction;
		}

		return null;

	}

	public void agent_end(double reward) {
		long episodeEndTime = System.currentTimeMillis();

		System.out.println("Total time for " + strategies[strategyIndex]
				+ " is : " + (episodeEndTime - episodeStartTime));

	}

	public void agent_cleanup() {
		lastAction = null;
		lastObservation = null;
	}

	public String agent_message(String message) {
		if (message.equals("what is your name?"))
			return "my name is skeleton_agent, Java edition!";

		return "I don't know how to respond to your message";
	}

	/**
	 * This is a trick we can use to make the agent easily loadable.
	 * 
	 * @param args
	 */

	public static void main(String[] args) {
		AgentLoader theLoader = new AgentLoader(new SkeletonAgent());
		theLoader.run();
	}

}
