

from game import Directions, Agent, Actions

import random,util,time

class ValueEstimationAgent(Agent):


    def __init__(self, alpha=1.0, epsilon=0.05, gamma=0.8, numTraining = 10):
        """
        alpha    - velocidad de aprendizaje
        epsilon  - velocidad de exploracion
        gamma    - factor de descuento
        numTraining - numero de episodios de entrenamiento
        """
        self.alpha = float(alpha)
        self.epsilon = float(epsilon)
        self.discount = float(gamma)
        self.numTraining = int(numTraining)


    def getQValue(self, state, action):
        """
        Deberia retornar Q(estado, accion)
        """
        util.raiseNotDefined()

    def getValue(self, state):
        """
        Cual es el valor de este estado bajo la mejor accion?
        esta dedo por:
        V(s) = max_{a en acciones} Q(s,a)
        """
        util.raiseNotDefined()

    def getPolicy(self, state):
        """

        policy(s) = arg_max_{a in actions} Q(s,a)

        Si muchas acciones logran  el maximo Q-value,
        si no no importa cual es seleccionado.
        """
        util.raiseNotDefined()

    def getAction(self, state):

        util.raiseNotDefined()

class ReinforcementAgent(ValueEstimationAgent):


    def update(self, state, action, nextState, reward):
        """
                This class will call this function, which you write, after
                observing a transition and reward
        """
        util.raiseNotDefined()

    ####################################
    #    Read These Functions          #
    ####################################

    def getLegalActions(self,state):
        """
          Get the actions available for a given
          state. This is what you should use to
          obtain legal actions for a state
        """
        return self.actionFn(state)

    def observeTransition(self, state,action,nextState,deltaReward):
        """
			Llamada para informar al agente que una transicion ha sido observada. Esto resultara en un llamado
			a self.update para los mismos argumentos

        """
        self.episodeRewards += deltaReward
        self.update(state,action,nextState,deltaReward)

    def startEpisode(self):
        """
          Llamada al entorno cuando un nuevo episodio esta iniciando
        """
        self.lastState = None
        self.lastAction = None
        self.episodeRewards = 0.0

    def stopEpisode(self):
        """
          LLamada de el entorno cuando el episodio ha finalizado
        """
        if self.episodesSoFar < self.numTraining:
            self.accumTrainRewards += self.episodeRewards
        else:
            self.accumTestRewards += self.episodeRewards
        self.episodesSoFar += 1
        if self.episodesSoFar >= self.numTraining:
            
            self.epsilon = 0.0    # cero exploracion
            self.alpha = 0.0      # creo aprendizaje

    def isInTraining(self):
        return self.episodesSoFar < self.numTraining

    def isInTesting(self):
        return not self.isInTraining()

    def __init__(self, actionFn = None, numTraining=100, epsilon=0.5, alpha=0.5, gamma=1):
        """

        """
        if actionFn == None:
            actionFn = lambda state: state.getLegalActions()
        self.actionFn = actionFn
        self.episodesSoFar = 0
        self.accumTrainRewards = 0.0
        self.accumTestRewards = 0.0
        self.numTraining = int(numTraining)
        self.epsilon = float(epsilon)
        self.alpha = float(alpha)
        self.discount = float(gamma)


    def setEpsilon(self, epsilon):
        self.epsilon = epsilon

    def setLearningRate(self, alpha):
        self.alpha = alpha

    def setDiscount(self, discount):
        self.discount = discount

    def doAction(self,state,action):
        """
            llamada de la clase cuando una accion es tomada como estado
        """
        self.lastState = state
        self.lastAction = action


    def observationFunction(self, state):
        """
            Se llama a la simulacion
        """
        if not self.lastState is None:
            reward = state.getScore() - self.lastState.getScore()
            self.observeTransition(self.lastState, self.lastAction, state, reward)
        return state

    def registerInitialState(self, state):
        self.startEpisode()
        if self.episodesSoFar == 0:
            print 'Iniciando %d episodios de entrenamiento' % (self.numTraining)

    def final(self, state):
        """
   
        """
        deltaReward = state.getScore() - self.lastState.getScore()
        self.observeTransition(self.lastState, self.lastAction, state, deltaReward)
        self.stopEpisode()

        # Make sure we have this var
        if not 'episodeStartTime' in self.__dict__:
            self.episodeStartTime = time.time()
        if not 'lastWindowAccumRewards' in self.__dict__:
            self.lastWindowAccumRewards = 0.0
        self.lastWindowAccumRewards += state.getScore()

        NUM_EPS_UPDATE = 100
        if self.episodesSoFar % NUM_EPS_UPDATE == 0:
            print 'Estado del aprendizaje por refuerzo:'
            windowAvg = self.lastWindowAccumRewards / float(NUM_EPS_UPDATE)
            if self.episodesSoFar <= self.numTraining:
                trainAvg = self.accumTrainRewards / float(self.episodesSoFar)
                print '\tCompletado %d de  %d episodios de entrenamiento' % (
                       self.episodesSoFar,self.numTraining)
                print '\tPromedio de recompensas de todo el entrenamiento: %.2f' % (
                        trainAvg)
            else:
                testAvg = float(self.accumTestRewards) / (self.episodesSoFar - self.numTraining)
                print '\tCompletados %d episodios de entrenamiento' % (self.episodesSoFar - self.numTraining)
                print '\tPromedio de recompensa: %.2f' % testAvg
            print '\tRecompensa promedio en los ultimos %d episodios: %.2f'  % (
                    NUM_EPS_UPDATE,windowAvg)
            print '\tEpisodio  tomo %.2f segundos' % (time.time() - self.episodeStartTime)
            self.lastWindowAccumRewards = 0.0
            self.episodeStartTime = time.time()

        if self.episodesSoFar == self.numTraining:
            msg = 'Entrenamiento finalizado'
            print '%s\n%s' % (msg,'-' * len(msg))
