from game import *
from learningAgents import ReinforcementAgent
from featureExtractors import *

import random,util,math
          
class QLearningAgent(ReinforcementAgent):
  """
    Q-Learning Agent
    
    Functions you should fill in:
      - getQValue
      - getAction
      - getValue
      - getPolicy
      - update
      
    Instance variables you have access to
      - self.epsilon (exploration prob)
      - self.alpha (learning rate)
      - self.gamma (discount rate)
    
    Functions you should use
      - self.getLegalActions(state) 
        which returns legal actions
        for a state
  """
  def __init__(self, **args):
    "You can initialize Q-values here..."
    ReinforcementAgent.__init__(self, **args)

    "*** YOUR CODE HERE ***"
    self.qvalues = util.Counter() 
  
  def getQValue(self, state, action):
    """
      Returns Q(state,action)    
      Should return 0.0 if we never seen
      a state or (state,action) tuple 
    """
    "*** YOUR CODE HERE ***"
    return self.qvalues[(state, action)]
  
    
    # Randomize max value for q-value? 
    # I'm not sure how to do it
  def getValue(self, state):
    """
      Returns max_action Q(state,action)        
      where is max is over legal legalActions
    """
    "*** YOUR CODE HERE ***"
    legalActions = self.getLegalActions(state)
    
    # cope with terminal state, where no action is available
    if not legalActions:
        return 0.0
    
    qvalues = []
    
    for action in legalActions:
        qvalues.append(self.getQValue(state, action))
        
    return max(qvalues)
    
  def getPolicy(self, state):
    """
    What is the best action to take in a state
    """
    "*** YOUR CODE HERE ***"
    qvalues = util.Counter()
    legalActions = self.getLegalActions(state)
    
    # if there is no legal action
    if not legalActions:
        return None
    
    for action in legalActions:
        qvalues[action] = self.getQValue(state, action)
        
    maxVal = max(qvalues.values())
    argmaxList = [arg for (arg, val) in qvalues.items() if val == maxVal]
    
    return random.choice(argmaxList)
    
  def getAction(self, state):
    """
      What action to take in the current state. With
      probability self.epsilon, we should take a random
      action and take the best policy action otherwise.
    
      After you choose an action make sure to
      inform your parent self.doAction(state,action) 
      This is done for you, just don't clobber it
       
      HINT: You might want to use util.flipCoin(prob)
      HINT: To pick randomly from a list, use random.choice(list)
    """  
    # Pick Action
    legalActions = self.getLegalActions(state)
    action = None
    "*** YOUR CODE HERE ***"
    randomProb = self.epsilon
    
    if util.flipCoin(randomProb):
        action = random.choice(legalActions)
    else:
        action = self.getPolicy(state)
    
    # Need to inform parent of action for Pacman (do not delete this line)
    self.doAction(state,action)    
    
    return action
  
  def update(self, state, action, nextState, reward):
    """
      The parent class calls this to observe a 
      state = action => nextState and reward transition.
      You should do your Q-Value update here
      
      NOTE: You should never call this function,
      it will be called on your behalf
    """
    "*** YOUR CODE HERE ***"
    currentQValue = self.qvalues[(state, action)]
    self.qvalues[(state, action)] = currentQValue + self.alpha * (reward + self.gamma*self.getValue(nextState) - currentQValue)
    
class PacmanQAgent(QLearningAgent):
  "Exactly the same as QLearningAgent, but with different default parameters"
  
  def __init__(self, epsilon=0.05,gamma=0.8,alpha=0.2, numTraining=0, **args):
    """
    These default parameters can be changed from the pacman.py command line.
    For example, to change the exploration rate, try:
        python pacman.py -p PacmanQLearningAgent -a epsilon=0.1
    
    alpha    - learning rate
    epsilon  - exploration rate
    gamma    - discount factor
    numTraining - number of training episodes, i.e. no learning after these many episodes
    """
    args['epsilon'] = epsilon
    args['gamma'] = gamma
    args['alpha'] = alpha
    args['numTraining'] = numTraining
    QLearningAgent.__init__(self, **args)
    
class ApproximateQAgent(PacmanQAgent):
  """
     ApproximateQLearningAgent
     
     You should only have to overwrite getQValue
     and update.  All other QLearningAgent functions
     should work as is.
  """
  def __init__(self, extractor='IdentityExtractor', **args):
    self.featExtractor = util.lookup(extractor, globals())()
    PacmanQAgent.__init__(self, **args)

    # You might want to initialize weights here.
    "*** YOUR CODE HERE ***"
    self.weights = util.Counter()
    
  def getQValue(self, state, action):
    """
      Should return Q(state,action) = w * featureVector
      where * is the dotProduct operator
    """
    "*** YOUR CODE HERE ***"
    features = self.featExtractor.getFeatures(state, action)
    return self.weights * features
    
  def update(self, state, action, nextState, reward):
    """
       Should update your weights based on transition  
    """
    "*** YOUR CODE HERE ***"
    # update the weight
    features = self.featExtractor.getFeatures(state, action)
    
    for feature, value in features.items():
        currentWeight = self.weights[feature]
        
        # update weight vector
        correction = (reward + self.gamma * self.getValue(nextState)) - self.getQValue(state, action)
        self.weights[feature] = currentWeight + self.alpha * correction * features[feature]
    
  def final(self, state):
    "Called at the end of each game."
    # call the super-class final method
    PacmanQAgent.final(self, state)
    
    # did we finish training?
    if self.episodesSoFar == self.numTraining:
      # you might want to print your weights here for debugging
      "*** YOUR CODE HERE ***"
      pass

class BetterExtractor(FeatureExtractor):
  "Your Mini-contest 2 entry goes here.  Add features for capsuleClassic."
  
  def getFeatures(self, state, action):
    features = SimpleExtractor().getFeatures(state, action)
    # Add more features here
    "*** YOUR CODE HERE ***"
    return features

