"""
JAKE ELLOWITZ
"""

import random

class brain:
  
  def __init__ (self, n_states, n_actions):
    """
    Implementation of Q-Learning algorithm
    Requires n_states, n_actions to initialize the matrices.
    """
    self.max_episode_length = 2*n_actions
    self.epsilon = 0.1
    self.gamma = 0.9
    self.n_states = n_states
    self.n_actions = n_actions
    # These are randomized at first
    self.q = self.init_qs (self.n_states, self.n_actions)
    self.v = self.init_vs (self.n_states)
    # Adhering to convergence properties, we must track visits to (s,a)
    #self.visits = self.init_visits (self.n_states, self.n_actions)
    self.alpha = self.init_alphas (self.n_states, self.n_actions)
    # Get our initial policy based on initial q
    self.policy = self.init_policy (self.n_states,\
                                    self.n_actions,\
                                    self.q)
    # Hasn't iterated yet
    self.iteration = 0
 
  #def init_visits (self, n_states, n_actions):
  #  visits = []
  #  for i in xrange (0, n_states):
  #    visits.append ([])
  #    for j in xrange (0, n_actions):
  #      visits[i].append (0)
  #  return visits 
  
  def init_alphas (self, n_states, n_actions):
    alpha = []
    for i in xrange (0, n_states):
      alpha.append ([])
      for j in xrange (0, n_actions):
        alpha[i].append (1.)
    return alpha 

  def init_vs (self, n_states):
    v = []
    for i in xrange (0, n_states):
      v.append (0.)
    return v

  def init_qs (self, n_states, n_actions):
    """
    Initialize the action value functions randomly.
    Inputs: n_states, n_actions
    Outputs: q
    """
    q_min = -1.0
    q_max = 1.0
    q = []
    for i in xrange (0, n_states):
      q.append ([])
      for j in xrange (0, n_actions):
        q[i].append (q_min + (q_max-q_min)*random.random ())
    return q

  def init_policy (self, n_states, n_actions, q):
    policy = []
    for i in xrange (0, n_states):
      policy.append ([])
      for j in xrange (0, n_actions):
        policy[i].append (0.)
      self.policy = self.update_policy_state (n_states, n_actions, q, policy, i)
    return policy

  def update_policy_state (self, n_states, n_actions, q, policy, state):
    a_max = q[state].index (max (q[state]))
    for i in xrange (0, n_actions):
      if i == a_max:
        policy[state][i] = 1. - self.epsilon + self.epsilon/n_actions
      else:
        policy[state][i] = self.epsilon/n_actions
    return policy

  def update_policy (self, n_states, n_actions, q, policy):
    for i in xrange (0, n_states):
      policy = self.update_policy_state (n_states, n_actions, q, policy, i)
    return policy

  def update_qs (self, n_states, n_actions, q, alpha,\
                 old_state, old_action, reward, state):
    q[old_state][old_action] += alpha[old_state][old_action]*(reward\
                                            + self.gamma*max (q[state])\
                                            - q[old_state][old_action])
    return q

  def update_vs (self, n_states, q, v):
    for i in xrange (0, n_states):
      v[i] = sum (q[i])
    return v

  def get_next_action (self, policy, state):
    """
    Grab the next action based on the distribution of policy in state s
    Hence find a = \pi(s) using a random number. 
    Inputs: policy, state
    Outputs: action (number)
    """
    r = random.random ()
    cumulative_prob = 0
    for action in xrange (0, len(policy[state])):
      cumulative_prob += policy[state][action]
      if r <= cumulative_prob:
        return action
    print 'Got to end of get_next_action. There was a problem'
  
  def iterate (self, old_state, old_action, reward, state):
    """ 
    Handles all extra stuff:
    update the return with last state, last action, and reward.
    retrieves next action.
    built-in episode handling.
    Essentially, give this function the old state, the old action, the resulting
    award, and the current state, and you get back the action. Tracking the current
    state input and current action output is to be done externally to ensure they 
    correspond.
    """
    # Parameter preparation
    #self.visits[old_state][old_action] += 1
    #self.alpha[old_state][old_action] = 1./self.visits[old_state][old_action]
    self.alpha[old_state][old_action] /= (self.alpha[old_state][old_action] + 1.)
    self.q = self.update_qs (self.n_states, self.n_actions, self.q, self.alpha,\
                             old_state, old_action, reward, state)
    #self.v = self.update_vs (self.n_states, self.q, self.v)
    action = self.get_next_action (self.policy, state)
    self.iteration += 1
    # This signifies the end of an episode
    if action == 0 or self.iteration > self.max_episode_length:
      self.policy = self.update_policy (self.n_states, self.n_actions,\
                                        self.q, self.policy)
      self.iteration = 0
    return action
