"""
JAKE ELLOWITZ
"""

import random

class brain:
  
  def __init__ (self, n_states, n_actions):
    """
    Implementation of on-policy MC RL algorithm
    Requires n_states, n_actions to initialize the matrices.
    """
    self.episode_length = n_actions*2
    self.epsilon = 0.1
    self.n_states = n_states
    self.n_actions = n_actions
    # These are randomized at first
    self.q = self.init_qs (self.n_states, self.n_actions)
    self.v = self.init_vs (self.n_states)
    # return flags tell us if (s,a) has occurred. Another way to think
    # about the return flags is they allow us to start adding to Q(s,a)
    # after the first occurrence of (s,a)
    self.returns, self.return_flags = self.reset_returns (self.n_states, self.n_actions)
    self.av_returns = list (self.returns) # Can initialize like this
    # Get our initial policy based on initial q
    self.policy = self.update_policy (self.n_states,\
                                      self.n_actions,\
                                      self.q,\
                                      1,\
                                      self.epsilon)
    # Hasn't iterated yet
    self.iteration = 0
    # But we have to start in some episode
    self.episode_number = 1

  def init_vs (self, n_states):
    v = []
    for i in xrange (0, n_states):
      v.append (0.)
    return v
  
  def init_qs (self, n_states, n_actions):
    """
    Initialize the action value functions randomly.
    Inputs: n_states, n_actions
    Outputs: q
    """
    q_min = 0.0
    q_max = 10.0
    q = []
    for i in xrange (0, n_states):
      q.append ([])
      for j in xrange (0, n_actions):
        q[i].append (q_min + (q_max-q_min)*random.random ())
    return q

  def reset_returns (self, n_states, n_actions):
    """
    Used to reset the total episode returns and the return flags
    (Resetting return flags tells us first occurrence hasn't happened)
    Inputs: n_states, n_actions
    Outputs: returns, return_flags (both NULLED)
    """
    returns = []
    return_flags = []
    for i in xrange (0, n_states):
      returns.append ([])
      return_flags.append ([])
      for j in xrange (0, n_actions):
        returns[i].append (0.)
        return_flags[i].append (False)
    return returns, return_flags

  def update_returns (self, n_states, n_actions, return_flags, returns, reward):
    """
    Generates an updated version of the returns. Based on the latest reward.
    This is typically reset after each episode, having its information compacted 
    into the average returns list.
    Inputs: n_states, n_actions, return_flags, returns, reward
    Outputs: returns (list)
    """
    for i in xrange (0, n_states):
      for j in xrange (0, n_actions):
        if return_flags[i][j]:
          returns[i][j] += reward
    return returns
  
  def update_average_returns (self, n_states, n_actions,\
                              average_returns, next_returns, episode_number):
    """
    Generate an updated version of the average returns. Simple summation
    mathematics can prove the below (considering <R>_{n+1} = F(<R>_n))
    Inputs: n_states, n_actions, average_returns, next episode returns, episode_number
    outputs: Average_returns
    """
    for i in xrange (0, n_states):
      for j in xrange (0, n_actions):
        average_returns[i][j] = ((episode_number-1)*average_returns[i][j]+\
                                 next_returns[i][j])/episode_number
    return average_returns

  def update_policy (self, n_states, n_actions, q, episode_number, epsilon):
    """
    Output a policy using the action-value functions and an epsilon-greedy
    inputs: n_states, n_actions, 1, episode_number, epsilon
    outputs: policy
    """
    policy = []
    for i in xrange (0, n_states):
      policy.append ([])
      a_max = q[i].index (max (q[i]))
      for j in xrange (0, n_actions):
        if j == a_max:
          policy[i].append (1. - epsilon/episode_number + epsilon/(episode_number*n_actions))
        else:
          policy[i].append (epsilon/(episode_number*n_actions))
    return policy

  def update_qs (self, n_states, n_actions, q, av_returns):
    """
    As the MC algorithm specifies, the state-value function is nothing but the average
    of the returns (so long as this sample size is large). Using this we output a 
    new q based on <R>
    Inputs: n_states, n_actions, q, av_returns
    Outputs: q
    """
    for i in xrange (0, n_states):
      for j in xrange (0, n_actions):
        q[i][j] = av_returns[i][j]
    return q

  def update_vs (self, n_states, q, v):
    for i in xrange (0, n_states):
      v[i] = sum(q[i])
    return v

  def get_next_action (self, policy, state):
    """
    Grab the next action based on the distribution of policy in state s
    Hence find a = \pi(s) using a random number. 
    Inputs: policy, state
    Outputs: action (number)
    """
    r = random.random ()
    cumulative_prob = 0
    for action in xrange (0, len(policy[state])):
      cumulative_prob += policy[state][action]
      if r <= cumulative_prob:
        return action
    print 'Got to end of get_next_action. There was a problem'

  def new_episode_prep (self):
    """
    At the end of the episode, some things need to be reset, and the policy needs to
    be updated. This does all of that, no inputs attached.
    Inputs: none
    Outputs: none
    """
    self.iteration = 0
    self.episode_number += 1
    #print self.returns
    self.returns, self.return_flags = self.reset_returns (self.n_states, self.n_actions)
    self.q = self.update_qs (self.n_states, self.n_actions, self.q, self.av_returns)
    #self.v = self.update_vs (self.n_states, self.q, self.v)
    self.policy = self.update_policy (self.n_states, self.n_actions,\
                                      self.q, 1, self.epsilon)

  def iterate (self, old_state, old_action, reward, state):
    """ 
    Handles all extra stuff:
    update the return with last state, last action, and reward.
    retrieves next action.
    built-in episode handling.
    Essentially, give this function the old state, the old action, the resulting
    award, and the current state, and you get back the action. Tracking the current
    state input and current action output is to be done externally to ensure they 
    correspond.
    """
    self.return_flags [old_state][old_action] = True
    self.returns = self.update_returns (self.n_states, self.n_actions,\
                                        self.return_flags, self.returns, reward)
    self.av_returns = self.update_average_returns (self.n_states, self.n_actions,\
                                                   self.av_returns, self.returns, self.episode_number)
    action = self.get_next_action (self.policy, state)
    self.iteration += 1
    # End of an episode
    if self.iteration > self.episode_length or action == 0:
      self.new_episode_prep ()
    return action
