import java.util.ArrayList;
import java.util.Collections;
import java.util.Comparator;
import java.util.Random;
import org.rlcommunity.rlglue.codec.AgentInterface;
import org.rlcommunity.rlglue.codec.types.Action;
import org.rlcommunity.rlglue.codec.types.Observation;
import org.rlcommunity.rlglue.codec.util.AgentLoader;
import org.rlcommunity.rlglue.codec.taskspec.TaskSpec;
import org.rlcommunity.rlglue.codec.taskspec.ranges.IntRange;
import org.rlcommunity.rlglue.codec.taskspec.ranges.DoubleRange;

import dataStructures.Node;
import dataStructures.State;

public class WumpusAgent implements AgentInterface {
	
	
	final int EAGLE = 1; //For eagle-mode
    final int AGENT = 2; //For agent-mode
    
    final char NORTH = 'n';
    final char SOUTH = 's';
    final char EAST = 'e';
    final char WEST = 'w';
   
    final int FORWARD_COST = 2;
    final int RIGHT_COST = 1;
    final int LEFT_COST = 1;
    final int ARROW_SHOOT_COST = 10;
    
    final char FORWARD = 'f';
    final char LEFT = 'l';
    final char RIGHT = 'r';
    final char ARROW_SHOOT = 'a';
    final char CLIMB = 'c';
    final char GRAB = 'g';
    final char END_EPISODE = 'x';
    final char NO_ACTION = '.';
    final char QUERY = 'q';
    
	Random randGenerator = new Random();
	Action lastAction;
	Observation lastObservation;

    static int agenda; //Keeps track of the mode
    final String [] strategies = {"DFSearch","BFSearch","IDSearch","UCSearch","ASSearch"}; //The different search strategies
    static int strategyIndex = -1; //An index into the strategies array
    
    static ArrayList<State> q = new ArrayList<State>(); //The search queue
    static ArrayList<State> partialStateNodes = new ArrayList<State>(); //Nodes to be updated by an observation
    static ArrayList<State> checkedStates = new ArrayList<State>(); //contains checked states to avoid repeated states
    
    static ArrayList<Node> allNodes = new ArrayList<Node>(); //containing all nodes on th grid
    
    static ArrayList<Character> pathToGoal = new ArrayList<Character>(); //Sequence of actions constructed by the eagle to goal
    static ArrayList<Character> pathToHome  = new ArrayList<Character>(); //Sequence of actions constructed by the eagle to home
    
    static int pathToGoalIndex = 0; //An index into the pathToGoal array
    static int pathToHomeIndex = 0; //An index into the pathToHome array
    
    
    static int [] numberOfexpandedNodes = {0, 0, 0, 0, 0}; // in each of the strategies
    static int [] totalCost = {0, 0, 0, 0, 0}; // in each of the strategies
    
    static int IDSlimit = 0; //limit for the IDSearch
    static Node goalNode = new Node(0, 0);
    static boolean foundGoal;
    static State first = null;
    

	public void agent_init(String taskSpecification) {
		TaskSpec theTaskSpec = new TaskSpec(taskSpecification);
		System.out.println("Skeleton agent parsed the task spec.");
		System.out.println("Observation have "
				+ theTaskSpec.getNumDiscreteObsDims() + " integer dimensions");
		System.out.println("Actions have "
				+ theTaskSpec.getNumDiscreteActionDims()
				+ " integer dimensions");
		IntRange theObsRange = theTaskSpec.getDiscreteObservationRange(0);
		System.out.println("Observation (state) range is: "
				+ theObsRange.getMin() + " to " + theObsRange.getMax());
		IntRange theActRange = theTaskSpec.getDiscreteActionRange(0);
		System.out.println("Action range is: " + theActRange.getMin() + " to "
				+ theActRange.getMax());
		DoubleRange theRewardRange = theTaskSpec.getRewardRange();
		System.out.println("Reward range is: " + theRewardRange.getMin()
				+ " to " + theRewardRange.getMax());
	}
	
	public Action agent_start(Observation observation) {
    	
		strategyIndex = strategyIndex + 1;
    	partialStateNodes.clear();
    	partialStateNodes.add(constructNewStateNodes());
    	q.clear();
  
    	agenda = EAGLE;
    	pathToGoalIndex = -1;
    	pathToHomeIndex = -1;
   
    	foundGoal = false;
    	pathToGoal.clear(); 
    	pathToHome.clear(); 
    	checkedStates.clear();
 
    	return discoverAllcells();
    }
	
	
	// This method makes a query to all nodes at the beginning to discover the places of the gold, pits and wumpus
	private Action discoverAllcells() {
		Action action = new Action(12*12*2+1,0,1);
		action.charArray[0] = QUERY;
		action.intArray[0] = 12*12;
		
		for(int i = 0; i < 12; i++){
			for(int j = 0; j < 12; j++){
			action.intArray[(i*24) + 2*j + 1] = i;
			action.intArray[(i*24) + 2*j + 2] = j;
			}
		}
		
		return action;
	}
	
	//Gets the observation of the above queries and accordingly fills the data needed for each node
	//(hasPit, hasGold and hasWumpus)
	private void observeAllCells(Observation observation) {
		int x = 0;
		int y = 0;
		
		for(int i = 0; i < observation.intArray.length/3; i++){
			Node node = new Node(x, y);
			node.hasGold = observation.intArray[0 + i*3];
			node.hasPit = observation.intArray[1 + i*3];
			node.hasWumpus = observation.intArray[2 + i*3];
			if(node.hasGold == 1) goalNode = node;
			allNodes.add(node);
			if(y < 11) y++;
			else if(x < 11){
				x++;
				y=0;
			}
		}
	}
	
	//This method is for the eagle action. It gets the character 'q', the number of cells to query and their indicies
	//and returns the corresponding action
	private Action action(char character, ArrayList<State> actionStates) {
		Action action = new Action(actionStates.size()*2+1,0,1);
		action.charArray[0] = character;
		action.intArray[0] = actionStates.size();
	
		for(int i = 0; i < actionStates.size(); i++){
			action.intArray[(i*2) + 1] = actionStates.get(i).location.x;
			action.intArray[(i*2) + 2] = actionStates.get(i).location.y;
		}
		
		return action;
	}
	
	//This method is for the agent action. It gets any character other than 'q' and an empty list and returns 
	//the corresponding action
	private Action agentAction(char character, ArrayList<State> actionStates) {
		Action action = new Action(0,0,1);
		action.charArray[0] = character;
		System.out.println("Action: " + character);
		return action;
	}
	
	//This method constructs the root node of the search tree.
	private State constructNewStateNodes() {
		Node location = new Node(0, 0);
		State state = new State(NORTH, location, null, ' ', 1, 0, 0, generateHeuristic(location));
		return state;
	}

	 public Action agent_step(double reward, Observation observation) {
		    
		
		ArrayList<State> actionStates = new ArrayList<State>();
		
	   	if(agenda == EAGLE){
	   		if(allNodes.size() == 0) observeAllCells(observation); //If at the beginning, observe all the grid nodes
	   			
	   		if(!foundGoal){
	   			ArrayList<State> successorStates = updateWorkingNodeSet(partialStateNodes,observation);
	   			return findGoal(successorStates);
	    	}
	    		
	    	else{ //if(foundGoal) do the same as above but for the way back home
	   			ArrayList<State> successorStates = updateWorkingNodeSet(partialStateNodes,observation);
		    	return findHome(successorStates);
		    	}
	    }
	    	
	   	else if(agenda == AGENT){
		    		
	   		System.out.println("Number of Expanded Nodes in " + strategies[0] + ": " + numberOfexpandedNodes[0] + " with cost: " + totalCost[0]);
	   		System.out.println("Number of Expanded Nodes in " + strategies[1] + ": " + numberOfexpandedNodes[1] + " with cost: " + totalCost[1]);
	   		System.out.println("Number of Expanded Nodes in " + strategies[2] + ": " + numberOfexpandedNodes[2] + " with cost: " + totalCost[2]);
	   		System.out.println("Number of Expanded Nodes in " + strategies[3] + ": " + numberOfexpandedNodes[3] + " with cost: " + totalCost[3]);
	   		System.out.println("Number of Expanded Nodes in " + strategies[4] + ": " + numberOfexpandedNodes[4] + " with cost: " + totalCost[4]);
		    		
		  	if(pathToGoalIndex < pathToGoal.size()-1){
		   		pathToGoalIndex = pathToGoalIndex + 1;
	    		return agentAction(pathToGoal.get(pathToGoalIndex),actionStates);
	    	}
		    		
		  	else if(pathToHomeIndex < pathToHome.size()-1) {
		   		pathToHomeIndex = pathToHomeIndex + 1;
		   		return agentAction(pathToHome.get(pathToHomeIndex),actionStates);
		   	}
	    }
	   		
		return null;
	}
	 
	 private Action findGoal(ArrayList<State> successorStates) {
	
	    	//if any search other than IDS is used or IDS is used and the level of states doesn't exceed the limit
		 if(strategyIndex != 2 || (!successorStates.isEmpty() && successorStates.get(0).level <= IDSlimit)) 
		   	searchStrategy(successorStates);
		    		
		 if(q.size() != 0){
			 numberOfexpandedNodes[strategyIndex]++;
		   	first = q.remove(0);
			    		
			 if(isGoal(first) == 1){
			   		System.out.println("Found Gold");
			   		totalCost[strategyIndex] += first.costToState;
				    pathToGoal = createPathToGoal(first);
				   	foundGoal = true;
					
					//reset everything to get ready for search way back home
				   	//"first" here is the goal node
				 	partialStateNodes.clear();
				   	State state = new State(first.orientation, first.location, null, ' ', 1, 0, 0, generateHeuristic(first.location));
				    partialStateNodes.add(state);
				    q.clear();
				    checkedStates.clear();
				    IDSlimit = 0;
				    return action(NO_ACTION, new ArrayList<State>());
			    }
				    		
			 	partialStateNodes.clear();	
		   		partialStateNodes = first.children;
				    	
		   		return action(QUERY,partialStateNodes);
		 }
		    		
		else{ //if(q.size() == 0) 
		   		
		   //If IDS is used and the goal is not found with the current limit, reset everything, increment limit
		// and search again
		    if(strategyIndex == 2 && IDSlimit < 600) {
		    	
		    	totalCost[strategyIndex] += first.costToState;
	    		partialStateNodes.clear();
		   		Node location = new Node(0, 0);
		   		State state = new State(NORTH, location, null, ' ', 1, 0, 0, generateHeuristic(location));
		       	partialStateNodes.add(state);
		      	q.clear();
			    checkedStates.clear();
			    System.out.println("IDS Limit: " + IDSlimit);
			    IDSlimit++;
		    	return action(NO_ACTION,partialStateNodes);
		    }
		    		
			else{ //if after all the above the goal is not found, then it is unreachable
			   	System.out.println("Gold is unreachable");
			  	return action(END_EPISODE,partialStateNodes);
			   }
		   }
		
	}

	 private Action findHome(ArrayList<State> successorStates) {
			
	    	//if any search other than IDS is used or IDS is used and the level of states doesn't exceed the limit
		 if(strategyIndex != 2 || (!successorStates.isEmpty() && successorStates.get(0).level <= IDSlimit)) 
		   	searchStrategy(successorStates);
		    		
		   	if(q.size() != 0){
	    		numberOfexpandedNodes[strategyIndex]++;
	    		first = q.remove(0);
		    		
	    		if(first.location.x == 0 && first.location.y == 0 ){ // if home found
			  		
			   		System.out.println("Found Home");
			   		totalCost[strategyIndex] += first.costToState;
			   		pathToHome = createPathToHome(first);
			   		agenda = AGENT;
			   		return action(NO_ACTION, new ArrayList<State>());
			   		}
			    		
			   	partialStateNodes.clear();	
			   	partialStateNodes = first.children;
				    	
		    	return action(QUERY,partialStateNodes);
	    	}
		    		
		   	else{ //if(q.size() == 0) 
		   		
		   		//If IDS is used and the home is not found with the current limit, reset everything, increment limit
		   		// and search again
		    	if(strategyIndex == 2 && IDSlimit < 600) {
		    		
		    		totalCost[strategyIndex] += first.costToState;
	    			partialStateNodes.clear();
					State state = new State(first.orientation, first.location, null, ' ', 1, 0, 0, generateHeuristic(first.location));
			       	partialStateNodes.add(state);
			      	q.clear();
			       	checkedStates.clear();
			       	System.out.println("IDS Limit: " + IDSlimit);
		    		IDSlimit++;
		    		return action(NO_ACTION,partialStateNodes);
		    	}
		    		
			   	else{ //if after all the above the home is not found, then it is unreachable
			   		System.out.println("Home is unreachable");
			  		return action(END_EPISODE,partialStateNodes);
			   	}
		    }
		
	}
	 
	//This method creates the path to goal by starting from the goal and gets its previous action then gets the previous
	 //action of its parent and so on till it reaches node(0,0) and then invert these actions to get the actions from
	 //home to goal
	private ArrayList<Character> createPathToGoal(State first) {
					
		ArrayList<Character> goalPathTemp = new ArrayList<Character>();
		ArrayList<Character> goalPath = new ArrayList<Character>();
				
		while(first.parent != null){
			
			goalPathTemp.add(first.previousAction);
			first = first.parent;
		}
			
		for(int i = (goalPathTemp.size()-1); i > 0; i--){
			goalPath.add(goalPathTemp.get(i));
		}
		
		//these two actions are added as the agents stops at the node before the goal node and is facing it
		goalPath.add(FORWARD); 
		goalPath.add(GRAB);
				
		return goalPath;
	}
	 
	private ArrayList<Character> createPathToHome(State first) {
		
		ArrayList<Character> homePathTemp = new ArrayList<Character>();
		ArrayList<Character> homePath = new ArrayList<Character>();
	
		while(first.parent != null){
			homePathTemp.add(first.previousAction);
			first = first.parent;
		}
				
		for(int i = (homePathTemp.size()-1); i > 0; i--)
			homePath.add(homePathTemp.get(i));
		
		homePath.add(FORWARD);
		homePath.add(CLIMB);
			
		return homePath;
	}
	
	private void searchStrategy(ArrayList<State> successorStates) {
		
		switch(strategyIndex){
			case 0:	DFSearch(successorStates); break;
			case 1:	BFSearch(successorStates); break;
			case 2:	IDSearch(successorStates); break;
			case 3:	UCSearch(successorStates); break;
			case 4:	ASSearch(successorStates); break;
			default: break;
		}
		
	}
	
	//Sorts the q in ascending order according to the summation of the cost and the heuristic fn.
	@SuppressWarnings("unchecked")
	private void ASSearch(ArrayList<State> successorStates) {
		q.addAll(successorStates);
		
		Collections.sort(q, new Comparator(){ 
			public int compare(Object o1, Object o2) {

			State state1 = (State) o1;
			State state2 = (State) o2;

			if(state1.costToState + state1.heuristicValue > state2.costToState + state2.heuristicValue) 
				return 1;
			else if(state1.costToState + state1.heuristicValue < state2.costToState + state2.heuristicValue) 
				return -1;

			return 0; 
			} 
		});
		
	}
	
	//Sorts the q in ascending order according to the summation of the cost
	@SuppressWarnings("unchecked")
	private void UCSearch(ArrayList<State> successorStates) {
		q.addAll(successorStates);
		
		Collections.sort(q, new Comparator(){ 
			public int compare(Object o1, Object o2) {

			State state1 = (State) o1;
			State state2 = (State) o2;

			if(state1.costToState > state2.costToState) return 1;
			else if(state1.costToState < state2.costToState) return -1;

			return 0; 
			} 
		});

	}
	
	//Enqueues successor states at the end
	private void BFSearch(ArrayList<State> successorStates) {
		q.addAll(successorStates);
	}
	
	//Enqueues successor at the front
	private void DFSearch(ArrayList<State> successorStates) {
		
		successorStates.addAll(q);
		q.clear();
		q.addAll(successorStates);
	}
	
	//Just to added this method to formally have a method for the IDS but simply its a DFS with a limit handled in the 
	//agent_step
	private void IDSearch(ArrayList<State> successorStates){
		
		DFSearch(successorStates);	
	}
	
	private ArrayList<State> updateWorkingNodeSet(ArrayList<State> partialStateNodes2, Observation observation) {
			
		char left = 0;
		char right = 0;
		
		Node forwardNode = null;
		State currentState;
		
		boolean leftBound;
		boolean rightBound;
		boolean upperBound;
		boolean lowerBound;
			
			
		for(int i = 0; i < partialStateNodes2.size(); i++){
			
			currentState = partialStateNodes2.get(i);
			
			//filling data about the node of the current state
			currentState.location.hasGold = getNode(currentState.location.x, currentState.location.y).hasGold;
			currentState.location.hasPit =  getNode(currentState.location.x, currentState.location.y).hasPit;
			currentState.location.hasWumpus =  getNode(currentState.location.x, currentState.location.y).hasWumpus;
			
			//getting the movement constraints where the agent cannot do a forward action in order not to leave the grid
			leftBound = currentState.location.x == 0 && currentState.orientation == WEST;
			rightBound = currentState.location.x == 11 && currentState.orientation == EAST;
			upperBound = currentState.location.y == 11 && currentState.orientation == NORTH;
			lowerBound = currentState.location.y == 0 && currentState.orientation == SOUTH;
			
			//for each orientation we get the orientation the resulting from the left and right actions as well as the new
			//position resulting from the forward action
			switch(currentState.orientation){
				case NORTH: left = WEST; right = EAST; 
					forwardNode = new Node(currentState.location.x, currentState.location.y + 1);break;
				case SOUTH:left = EAST; right = WEST; 
					forwardNode = new Node(currentState.location.x, currentState.location.y - 1);break;
				case EAST: left = NORTH; right = SOUTH;
					forwardNode = new Node(currentState.location.x + 1, currentState.location.y);break;
				case WEST: left = SOUTH; right = NORTH; 
					forwardNode = new Node(currentState.location.x - 1, currentState.location.y);break;
			
				default: System.out.println("Didn't Enter Orientation Switch");break;
			}
			
			State state;
			ArrayList<State> children = new ArrayList<State>();
			children.clear();
			
			//If action is left, it checks that the state is not created before and creates the state
			if(!checkStateDuplicate(currentState.location, left)){
				state = new State(left, currentState.location, currentState, LEFT, 
						currentState.numArrows, currentState.costToState+LEFT_COST, 
						currentState.level+1, generateHeuristic(currentState.location));
				children.add(state);
			}
			
			//If action is right, it checks that the state is not created before and creates the state
			if(!checkStateDuplicate(currentState.location, right)){
				state = new State(right, currentState.location, currentState, RIGHT,
						currentState.numArrows, currentState.costToState+RIGHT_COST, 
						currentState.level+1, generateHeuristic(currentState.location));
				children.add(state);
			}
			
			//If action is forward, it checks that the state is not created before and creates the state 
			if(!checkStateDuplicate(forwardNode, currentState.orientation)){
				
				//if the new position is not out of bounds and doesn't have a pit or a wumpus
				if(!(lowerBound || upperBound || leftBound || rightBound) 
						&& getNode(forwardNode.x, forwardNode.y).hasPit == 0 ){
						if(getNode(forwardNode.x, forwardNode.y).hasWumpus == 0){
					
							state = new State(currentState.orientation, forwardNode, currentState, FORWARD,
							currentState.numArrows, currentState.costToState+FORWARD_COST, 
							currentState.level+1, generateHeuristic(forwardNode));
							children.add(state);
						}
						
						//if the new position has a wumpus, shoot it and set the node.hasWumpus to 0
						else if(getNode(forwardNode.x, forwardNode.y).hasWumpus == 1){
							state = new State(currentState.orientation, currentState.location, currentState, ARROW_SHOOT,
							currentState.numArrows-1, currentState.costToState+ARROW_SHOOT_COST, 
							currentState.level+1, generateHeuristic(forwardNode));
							children.add(state);
							getNode(forwardNode.x, forwardNode.y).hasWumpus = 0;
					}
				}
			}
			
			currentState.children = children;
			checkedStates.addAll(children);
			
		}
		return partialStateNodes2;
	}
	
	//Generates a heuristic function according to city block distance
	private int generateHeuristic(Node node){
		
		if(!foundGoal) //if from home to goal
			return Math.abs(goalNode.x - node.x) + Math.abs(goalNode.y - node.y);
		
		else //if from goal to home
			return Math.abs(0 - node.x) + Math.abs(0 - node.y);
	}
	
	private Node getNode(int x, int y){	
		for(int i = 0; i < allNodes.size(); i++){
		
			if(allNodes.get(i).x == x && allNodes.get(i).y == y)
				return allNodes.get(i);
		}
		return null;
	}
	
	//checks for duplicate states by finding whether there is any state with the same node and orientation is created before
	//if yes, state is not created
	private boolean checkStateDuplicate(Node node, char Orientation){	
		boolean checked = false;
		for(int i=0; i < checkedStates.size(); i++){
			State checkedState = checkedStates.get(i);
				if(node.x == checkedState.location.x && node.y == checkedState.location.y
						&& Orientation == checkedState.orientation){
					checked = true;
					break;
				}
		}
		return checked;
	}
	
	//returns where the node of this state has the gold or not
	private int isGoal(State first) {
		return first.location.hasGold;
	}
	
	
	//-----------------------------------------------------------------------------------------------------------------------

	public void agent_end(double reward) {
	}

	public void agent_cleanup() {
		lastAction = null;
		lastObservation = null;
	}

	public String agent_message(String message) {
		if (message.equals("what is your name?"))
			return "my name is skeleton_agent, Java edition!";

		return "I don't know how to respond to your message";
	}

	/**
	 * This is a trick we can use to make the agent easily loadable.
	 * 
	 * @param args
	 */

	public static void main(String[] args) {
		AgentLoader theLoader = new AgentLoader(new WumpusAgent());
		theLoader.run();
		
	}

}
