package org.chalmers.rl;

import java.util.HashMap;
import java.util.HashSet;
import java.util.Map;
import java.util.PriorityQueue;
import java.util.Queue;
import java.util.Random;
import java.util.Set;
import java.util.TreeSet;

import org.rlcommunity.rlglue.codec.AgentInterface;
import org.rlcommunity.rlglue.codec.taskspec.TaskSpec;
import org.rlcommunity.rlglue.codec.types.Action;
import org.rlcommunity.rlglue.codec.types.Observation;
import org.rlcommunity.rlglue.codec.util.AgentLoader;

public class DJAgent implements AgentInterface {
	private final static boolean DEBUG_QMAP = false;
	private final static boolean DEBUG_MOVES = false;
	private final static boolean DEBUG_REWARD = true;
	private final static boolean DEBUG_ALL_REWARD = false;
	
	private double totalReward = 0;
	
	private int numSteps = 0;
	private int numActions;
	private double gamma;
	
	private double eps = 0.5;
	private double eps_ini = 0.5;
	private double epsMin = 0.01;
	private double exp_discount = 0.9; // exploration decay
	
	private int ep_n = 0;
	
	private double initVal = 1.0;
	
	
	private double theta = 0.01; //priority threshold
//	private double lambda = 0.9; // TD learning
	
//	private double alpha = 0.1; // step size
	
	private int maxQueueSteps = 50;
	
	private Random rand = new Random();
	
	private Action lastAction;
    private Observation lastObservation;
	
	private Map<Integer, Actions> Qmap = new HashMap<Integer, Actions>();
	
	private PriorityQueue<SAQueueEntry> queue = new PriorityQueue<SAQueueEntry>();
	
	/**
    * Parse the task spec, make sure it is only 1 integer observation and
    * action, and then allocate the valueFunction.
    *
    * @param taskSpecification
    */
   public void agent_init(String taskSpecification) {
       TaskSpec theTaskSpec = new TaskSpec(taskSpecification);

       /* Lots of assertions to make sure that we can handle this problem.  */
       assert (theTaskSpec.getNumDiscreteObsDims() == 1);
       assert (theTaskSpec.getNumContinuousObsDims() == 0);
       assert (!theTaskSpec.getDiscreteObservationRange(0).hasSpecialMinStatus());
       assert (!theTaskSpec.getDiscreteObservationRange(0).hasSpecialMaxStatus());
       //numStates = theTaskSpec.getDiscreteObservationRange(0).getMax() + 1;

       
       assert (theTaskSpec.getNumDiscreteActionDims() == 1);
       assert (theTaskSpec.getNumContinuousActionDims() == 0);
       assert (!theTaskSpec.getDiscreteActionRange(0).hasSpecialMinStatus());
       assert (!theTaskSpec.getDiscreteActionRange(0).hasSpecialMaxStatus());
       numActions = theTaskSpec.getDiscreteActionRange(0).getMax() + 1;

       gamma=theTaskSpec.getDiscountFactor();

       theta = 0.001;
       System.out.println("Theta: " + theta + " max rew: " + theTaskSpec.getRewardMax());

   }

	@Override
	public Action agent_start(Observation observation) {
		ep_n++;
		int newActionInt = egreedy(observation.getInt(0));
		if (DEBUG_QMAP) {
			System.out.println(Qmap.toString());
			System.out.println("Epsilon: " + eps);
		}
		if (DEBUG_MOVES) {
			System.out.println("go s: " + observation.getInt(0) + " act: " + newActionInt);
		}
		/**
         * Create a structure to hold 1 integer action
         * and set the value
         */
        Action returnAction = new Action(1, 0, 0);
        returnAction.intArray[0] = newActionInt;

        lastAction = returnAction.duplicate();
        lastObservation = observation.duplicate();

        return returnAction;
	}

	@Override
	public Action agent_step(double reward, Observation observation) {
		numSteps++;
		
		if (reward != 0) {
			totalReward += reward;
			if (DEBUG_ALL_REWARD) {
				System.out.println("Got a reward " + reward + " The total is " + totalReward);
			}
		}
		
		int newStateInt = observation.getInt(0);
        int lastStateInt = lastObservation.getInt(0);
        int lastActionInt = lastAction.getInt(0);
        
        Actions last = get(lastStateInt);
        StateAction sa = last.observe(lastActionInt, newStateInt, reward);
        get(newStateInt).addBactrack(sa);
        
        updateModel(reward, gamma*maxVal(newStateInt));

        Action returnAction = new Action();
        returnAction.intArray = new int[]{egreedy(newStateInt)};
        
        if (DEBUG_MOVES) {
        	System.out.println("go last: " + lastStateInt + " new: " + newStateInt + " act: " + returnAction.intArray[0]);
		}
	
        lastAction = returnAction.duplicate();
        lastObservation = observation.duplicate();
        
        if (DEBUG_QMAP) {
		
        	System.out.println("\n------\n" + Qmap.toString() + "\n---------\n");
		}

        return returnAction;
	}

	private void updateModel(double reward, double nextQAValue) {
		int lastStateInt = lastObservation.getInt(0);
        int lastActionInt = lastAction.getInt(0);
        
		StateAction lastSA = get(lastStateInt).byAction(lastActionInt);
		
	
		double p = Math.abs(lastSA.val() - (reward + gamma*nextQAValue));
        if (p > theta) {
        	
        	queue.add(new SAQueueEntry(lastSA, p));
        }
        
        int steps = 0;
        
        
        while (!queue.isEmpty()) {
        	steps++;
        	if (steps > maxQueueSteps) {
        		break;
        	}
        	SAQueueEntry first = queue.poll();
        	StateAction sa = first.sa;
        	//double delta = backupSA(sa) - sa.val;
       	
         	TreeSet<StateAction> moves = Qmap.get(sa.myState).moves();
        	double tmp = backupSA(sa);
         	moves.remove(sa);
        	sa.setVal(tmp);
         	moves.add(sa);
         	
        	
        	StateAction[] back = get(sa.myState).backTrack();
        	
        	int maxbackups = 0;
        	for (StateAction backSA : back) {
        		p = Math.abs(backSA.val() - backupSA(backSA));
        		if (p > theta) {
        			maxbackups ++;
        			queue.add(new SAQueueEntry(backSA, p));
        		}
        		if (maxbackups > 10) {
        			break;
        		}
        	}
        }
	}

//	private void updateETraces() {
//		for (Actions a : Qmap.values()) {
//			for (StateAction sa : a.moves()) {
//				sa.eligTrace = sa.eligTrace*lambda*alpha;
//			}
//		}
//	}
	
//	private void nullETraces() {
//		for (Actions a : Qmap.values()) {
//			for (StateAction sa : a.moves()) {
//				sa.eligTrace = 0.0;
//			}
//		}
//	}
	
	private double backupSA(StateAction s) {
		Queue<Integer> states = s.transitions();
		
		if (states.size() == 0) {
			return initVal;
		}
		
//		double newVal = 0;
//		for (int i = 0; i < 20; i++) {
//			Integer sprim = states[rand.nextInt(states.length)]; 
//			newVal += s.avgRewardForTransition(sprim) + 
//				gamma*((1-eps)*maxVal(sprim) + eps*minVal(sprim));	
//		}
		double newVal = 0;
		for (Integer sprim : states) {
			newVal += s.avgRewardForTransition(sprim) + 
				gamma*((1-eps)*maxVal(sprim) + eps*minVal(sprim));
		}

		return newVal/states.size();  
	}
	
	@Override
	public void agent_end(double reward) {
		
		if (reward != 0) {
			totalReward += reward;
		}
		if (DEBUG_REWARD) {
			System.out.println(" The total is " + totalReward + " Ep: " + ep_n);
		}
		
		int lastStateInt = lastObservation.getInt(0);
        int lastActionInt = lastAction.getInt(0);
        
        Actions last = get(lastStateInt);
        last.observe(lastActionInt, Integer.MIN_VALUE, reward);

		updateModel(reward, 0);
		queue.clear();
		eps = Math.max(epsMin, eps*exp_discount); //reduce exploration
        lastObservation = null;
        lastAction = null;
        numSteps = 0;
        totalReward = 0;
    }

	@Override
	public void agent_cleanup() {
		Qmap.clear();
		queue.clear();
		lastObservation = null;
		lastAction = null;
		eps = eps_ini; //initial value
		ep_n = 0;
	}

	@Override
	public String agent_message(String message) {
		System.out.println(message);
		if(message.equals("what is your name?"))
            return "my name is DJAgent by Team 6, Java edition!";
		return "DJAgent(Java) doesn't hear your messages - the music is too loud";
	}

	private Actions get(Integer state) {
		if (Qmap.containsKey(state)) {
			return Qmap.get(state);
		} else {
			Actions a = new Actions(state);
			a.initVal = initVal;
			Qmap.put(state, a);
			return a;
		}
	}
	
	private double maxVal(Integer state) {
		if (!Qmap.containsKey(state)) {
			return initVal;
		} else {
			return Qmap.get(state).maxVal();
		}	
	}
	
	private double minVal(Integer state) {
		if (!Qmap.containsKey(state)) {
			return initVal;
		} else {
			return Qmap.get(state).minVal();
		}	
	}
	
	
	
	
	/**
    *
    * Selects a random action with probability 1-sarsa_epsilon,
    * and the action with the highest value otherwise.  This is a
    * quick'n'dirty implementation, it does not do tie-breaking.

    * @param theState
    * @return
    */
   private int egreedy(int theState) {
       if ((rand.nextDouble() <= eps) || !Qmap.containsKey(theState) 
    		   || Qmap.get(theState).moves().isEmpty()) {
            return rand.nextInt(numActions);
       }
       
       double maxVal = Qmap.get(theState).maxVal();
       
       if ((maxVal > initVal)||(Qmap.get(theState).moves().size() >= numActions)) {
    	   return Qmap.get(theState).greedy();
       } else {
    	   int act = rand.nextInt(numActions);
    	   Actions acts = Qmap.get(theState);
    	   StateAction current = acts.byAction(act);
    	   
    	   while ((current != null) && (current.val() <= initVal)) { //enforce exploration
    		   act = rand.nextInt(numActions);
    		    current = acts.byAction(act);
    	   }
    	   
    	   return act; //explore
       }   
   }
   
   private static class SAQueueEntry implements Comparable<SAQueueEntry> {
	   public StateAction sa;
	    public double priority;
	   
	   public SAQueueEntry(StateAction sa, double priority) {
		   this.sa = sa;
		   this.priority = priority;
	   }
	   
	   @Override
	   public int compareTo(SAQueueEntry o) {
			// TODO Auto-generated method stub
			return (priority > o.priority) ? 1 : -1;
	   }
	   
	   
   }
   
   public static void main(String[] args) {
	   String port = System.getProperty("RLGLUE_PORT");
	   String host = System.getProperty("RLGLUE_HOST");
	   
	   for (int i = 0; i < 10*7; i++) {
		   AgentLoader theAgentLoader=new AgentLoader(host, port, new DJAgent());
		   theAgentLoader.run();
		   System.out.println("--------------------------------");
	   }   
   }
}
