package edu.rutgers.rl3.comp;

import org.rlcommunity.rlglue.codec.AgentInterface;
import org.rlcommunity.rlglue.codec.types.Action;
import org.rlcommunity.rlglue.codec.types.Observation;
import org.rlcommunity.rlglue.codec.util.AgentLoader;


import java.util.*;
/**
 * A simple SARSA agent
 */
public class ExMarioAgent implements AgentInterface {
	
	 private Random randGenerator = new Random();
	 private int numActions = 12;
	 private int numStates = 0;
         private StateType state;
         private StateType lastState;
         private double episodeReward; 
         
         //using template to avoid any type casting issues
         private HashMap<StateType,double[]> m = null;
         private HashMap<Action,Integer> actionMap = null;
         private Action defaultAction;
         private Action lastAction; 
         private double sarsa_stepsize = 0.1;
         private double sarsa_epsilon = 0.1;
         private double sarsa_gamma = 1.0;
	 

        
        //private class wrapper...have written my own version of equals().
        private class StateType{
            public char[][] st=null;
            public int dim;
            public StateType(int n){
                st=new char[n][n];
                dim=n;
            }
            @Override
            public boolean equals(Object oth){
                if(this == oth) {
                 return true;
                }
                if (!(oth instanceof StateType)) {
                       return false;
                }
                StateType other = (StateType)oth;
                if(other.dim!=this.dim)return false;
                
                for(int i=0;i<dim;i++){
                    for(int j=0;j<dim;j++){
                        if(other.st[i][j]!=this.st[i][j]){
                            return false;
                        }

                    }
                }
                return true;


            }
            @Override
            public int hashCode () {
                 return st.hashCode();
            }
        }
	 
	/**
	 * Returns the char representing the tile at the given location.
	 * If unknown, returns '\0'.
	 *
	 * Valid tiles:
	 * M - the tile mario is currently on. there is no tile for a monster.
	 * $ - a coin
	 * b - a smashable brick
	 * ? - a question block
	 * | - a pipe. gets its own tile because often there are pirahna plants
	 *     in them
	 * ! - the finish line
	 * And an integer in [1,7] is a 3 bit binary flag
	 *  first bit is "cannot go through this tile from above"
	 *  second bit is "cannot go through this tile from below"
	 *  third bit is "cannot go through this tile from either side"
	 *
	 * @param x
	 * @param y
	 * @param obs
	 * @return
	 */
	public static char getTileAt(double xf, double yf, Observation obs) {
		int x = (int)xf;
		if (x<0)
			return '7';
		int y = 16-(int)yf;
		x -= obs.intArray[0];
		if (x<0 || x>21 || y<0 || y>15)
			return '\0';
		int index = y*22+x;
		return obs.charArray[index];
	}
	
	/**
	 * All you need to know about a monster.
	 
	 */
	static class Monster {
		double x;
		double y;
		/**
		 * The instantaneous change in x per step
		 */
		double sx;
		/**
		 * The instantaneous change in y per step
		 */
		double sy;
		/**
		 * The monster type
		 * 0 - Mario
		 * 1 - Red Koopa
		 * 2 - Green Koopa
		 * 3 - Goomba
		 * 4 - Spikey
		 * 5 - Pirahna Plant
		 * 6 - Mushroom
		 * 7 - Fire Flower
		 * 8 - Fireball
		 * 9 - Shell
		 * 10 - Big Mario
		 * 11 - Fiery Mario
		 */
		int type;
		/**
		 * A human recognizable title for the monster
		 */
		String typeName;
		/**
		 * Winged monsters bounce up and down
		 */
		boolean winged;
	}
	
	/**
	 * Gets all the monsters from the observation. Mario is included in this list.
	 * 
	 * @param obs
	 * @return
	 */
	public static Monster[] getMonsters(Observation obs) {
		Vector<Monster> monster_vec = new Vector<Monster>();
		for (int i=0; 1+2*i<obs.intArray.length; i++) {
			Monster m = new Monster();
			m.type = obs.intArray[1+2*i];
			m.winged = obs.intArray[2+2*i]!=0;
			switch (m.type) {
			case 0:
				m.typeName = "Mario";
				break;
			case 1:
				m.typeName = "Red Koopa";
				break;
			case 2:
				m.typeName = "Green Koopa";
				break;
			case 3:
				m.typeName = "Goomba";
				break;
			case 4:
				m.typeName = "Spikey";
				break;
			case 5:
				m.typeName = "Piranha Plant";
				break;
			case 6:
				m.typeName = "Mushroom";
				break;
			case 7:
				m.typeName = "Fire Flower";
				break;
			case 8:
				m.typeName = "Fireball";
				break;
			case 9:
				m.typeName = "Shell";
				break;
			case 10:
				m.typeName = "Big Mario";
				break;
			case 11:
				m.typeName = "Fiery Mario";
				break;
			}
			m.x = obs.doubleArray[4*i];
			m.y = obs.doubleArray[4*i+1];
			m.sx = obs.doubleArray[4*i+2];
			m.sy = obs.doubleArray[4*i+3];
			monster_vec.add(m);
		}
		return monster_vec.toArray(new Monster[0]);
	}
	/**
	 * Gets just mario's information.
	 * 
	 * @param obs
	 * @return
	 */
	public static Monster getMario(Observation obs) {
		Monster[] monsters = getMonsters(obs);
		for (Monster m : monsters) {
			if (m.type == 0 || m.type == 10 || m.type == 11)
				return m;
		}
		return null;
	}
	
	Random rand;
	
	/**
	 * How many steps since the beginning of this trial
	 */
	int step_number;
	/**
	 * How many steps since the beginning of this run
	 */
	int total_steps;
	/**
	 * The time that the current trial began
	 */
	long trial_start;

	/**
	 * The sequence of actions taken during the last trial
	 */
	//Vector<Action> last_actions;
	/**
	 * The sequence of actions taken so far during the current trial
	 */
	//Vector<Action> this_actions;
	
	ExMarioAgent() {
		rand = new Random(new java.util.Date().getTime());
		//last_actions = new Vector<Action>();
		//this_actions = new Vector<Action>();
		m = new HashMap<StateType,double[]>();
                
                lastAction = null;
                lastState = null;
                actionMap = new HashMap<Action,Integer>();
                int i=0;
                for(int m=-1;m<=1;m++)
                    for(int j=0;j<=1;j++)
                        for(int s=0;s<=1;s++,i++){
                            
                            Action temp = new Action(3,0);
                            if(m==1 && j==1 && s==0) defaultAction = temp;
                            temp.intArray[0]= m;//randGenerator.nextInt(3)-1;
                            temp.intArray[1]= j;//randGenerator.nextInt(2);
                            temp.intArray[2]= s;
                            actionMap.put(temp,new Integer(i));
                        }


	}

	public void agent_init(String task) {
		total_steps = 0;
	}
	
	public void agent_cleanup() {

	}
	
	public Action agent_start(Observation o) {
		trial_start = new Date().getTime();
		step_number = 0;
                episodeReward=0;
		
                //return new Action(1,0,0);
		return getAction(0,o);
	}

	public Action agent_step(double r, Observation o) {
                step_number++;
		total_steps++;
                episodeReward+=r;
                //System.out.println("\n\n\n\n\n\n" + step_number + "\n\n\n\n\n\n");

                return getAction(r, o);
	}

	public void agent_end(double r) {
		
            Integer lAction = actionMap.get(lastAction);

            double[] Q_sValues= m.get(lastState);
            double Q_sa = Q_sValues[lAction];

            Q_sValues[lAction] = Q_sa + sarsa_stepsize * (r - Q_sa);
            m.put(lastState, Q_sValues);


            System.out.println((episodeReward+r));
	}

	public String agent_message(String msg) {
		System.out.println("message asked:"+msg);
		return null;
	}
	
	
	/**
     *
     * Selects a random action with probability 1-sarsa_epsilon,
     * and the action with the highest value otherwise.  This is a
     * quick'n'dirty implementation, it does not do tie-breaking.


     * @param theState
     * @return
     */
    private int egreedy(StateType theState) {
        
        double[] actionArray = m.get(theState);
        //if (!exploringFrozen) {
            if (randGenerator.nextDouble() <= sarsa_epsilon || actionArray==null) {
                return randGenerator.nextInt(numActions);
            }
        //}

        
        /*otherwise choose the greedy action*/
        int maxIndex = 0;
        for (int a = 0; a < numActions; a++) {
            if (actionArray[a] >= actionArray[maxIndex]) {
                maxIndex = a;
            }
        }
        
        //change...we need to return index
        return maxIndex;
    }
	
	
	Action getAction(double reward, Observation o) {
	    state = new StateType(5);
            Action act= new Action(3, 0);

            
            Monster mario=ExMarioAgent.getMario(o);
            int mario_x = (int)mario.x;
            int mario_y = (int)mario.y;
            int k=0;
            for(int i=mario_x-2;i<=mario_x+2;i++,k++){
                for(int j=mario_y-2, l=0;j<=mario_y+2;j++){
                    state.st[k][l++]=getTileAt((double)i,(double)j,o);
                    //System.out.print(state.st[k][l-1]);
                }
                //System.out.println();
            }
            Boolean flag =false;

            // Get a set of the entries
            Set set = m.keySet();
            Iterator i = set.iterator();

            while(i.hasNext() && !flag) {
                StateType a = (StateType)i.next();
                if(a.equals(state)){
                    flag =true;
                    state = a;
                }
            }
            //System.out.println(flag + "\n\n\n\n\n\n   "+ step_number + "\n\n\n\n\n");

            if(!flag ){
                m.put(state, new double[12]);
                act = defaultAction;
            }

            else{
               // System.out.println("else!");
                int newActionInt = egreedy(state);

                Integer lAction = actionMap.get(lastAction);

                double[] Q_sValues= m.get(lastState);
                double Q_sa = Q_sValues[lAction];

                double[] newQ_sValues = m.get(state);
                double newQ_sa = newQ_sValues[newActionInt];


                //double new_Q_sa = Q_sa + sarsa_stepsize * (reward + sarsa_gamma * Q_sprime_aprime - Q_sa);
                Q_sValues[lAction] = Q_sa + sarsa_stepsize * (reward + sarsa_gamma * Q_sa - Q_sa);
                m.put(lastState, Q_sValues);

                // Get a set of the entries
                Set seta = actionMap.keySet();
                i = seta.iterator();

                while(i.hasNext()) {
                    Action a = (Action)i.next();
                    Integer val= actionMap.get(a);
                    if(val==newActionInt){
                        act=a;
                        break;
                    }
                }


            }
            
            lastState = state;    
            lastAction = act;
            return act;
	}

	public static void main(String[] args) {
		new AgentLoader(new ExMarioAgent()).run();
	}
}
