"""
JAKE ELLOWITZ -- jellowitz@clarku.edu
Simulation of simplified bot states: 4 sensors, 4 states, 4 actions

NOTES:
a = [left,right,up,down] = [[-1,0],[1,0],[0,-1],[0,1]]
policy ALWAYS give coefficient of 1/4, so we can just use it as 
  a coefficient to the elements in v.
P, R indexed by [a][s][s']

"""

from numpy import array, resize
import random
import sys

class brain:
  
  def __init__ (self, n_states=4, n_angles=4):
    self.n_states = 4
    self.n_actions = 4
    # Set the value function to 0 initially
    self.gamma = 0.9
    # define the \Delta V cutoff
    self.delta = 0.0001
  
    self.init_v ()
    self.init_q ()
    self.init_pi ()
    self.construct_s ()
    self.construct_a ()
    # P, R sxsxa matricies
    self.construct_p ()
    self.construct_r ()

    # buid the policy 
    self.run ()
  
  def init_v (self):
    self.v = []
    for i in xrange (0, self.n_states):
      self.v.append (0.)

  def init_q (self):
    self.q = []
    for i in xrange(0, self.n_states):
      self.q.append([])
      for j in xrange (0, self.n_actions):
        self.q[i].append (0.)

  def update_vs (self, n_states, q, v):
    return self.v

  def init_pi (self):
    # makes pi[s,a] default to 0.
    self.pi = []
    a_list = []
    for i in xrange (0, self.n_actions):
      a_list.append (0.)
    for j in xrange (0, self.n_states):
      self.pi.append (list(a_list))
    # Default so the probabilities work, but just choose first actions
    for j in xrange (0, self.n_states):
      self.pi [j][3] = 1.

  def construct_s (self):
    self.s = [0, 1, 2, 3]

  def construct_a (self):
    self.a = ['left', 'turn_around', 'right', 'forward']

  def construct_p (self):
    self.p = []
    default_val = 0.
    # default the 3-D matrix
    for i in xrange (0, len(self.a)):
      self.p.append ([])
      for j in xrange (0, len(self.s)):
        self.p[i].append ([])
        for k in xrange (0, len(self.s)):
          self.p[i][j].append (default_val)
    # Now do rotations
    for i in xrange (0, len(self.a)):
      for j in xrange (0, len(self.s)):
        for k in xrange (0, len(self.s)):
          # Must not include special case: moving.
          if (k-j)%self.n_states == (i+1)%self.n_states and i!=3:
            self.p[i][j][k] = 1.
          # Special moving forward case: against the wall
          elif i == 3 and (j == 0 or j==1):
            self.p[i][j][j] = 1.
            
    # Now more special cases of moving forward.
    self.p[3][3][0] = 0.5
    self.p[3][3][1] = 0.5
    self.p[3][2][0] = 0.5
    self.p[3][2][1] = 0.5
        
  def construct_r (self):
    self.r = []
    default_val = 0.
    for i in xrange (0, len(self.a)):
      self.r.append ([])
      for j in xrange (0, len(self.s)):
        self.r[i].append ([])
        for k in xrange (0, len(self.s)):
          self.r[i][j].append (default_val)
    # Do special cases
    for i in xrange (0, len(self.a)):
      for j in xrange (0, len(self.s)):
        for k in xrange (0, len(self.s)):
          # penalty if we keep hitting the wall
          if j==k and i==3:
            self.r[i][j][k] = -1.
          # Reward for moving forward if not a wall
          elif i==3:
            self.r[i][j][k] = 1.

  def next_action_from_pi (self, state_num):
    r = random.random ()
    cumulative_prob = 0.
    for a in self.pi [state_num]:
      cumulative_prob += a
      if r < cumulative_prob:
        return self.pi[state_num].index (a)
    print 'Got to end of next_action. There was a problem.'

  def update_policy (self):
    stable = True
    for j in xrange (0, len(self.s)):
      temp_qs = []
      pi_temp = list(self.pi)
      temp_a_list = [0.,0.,0.,0.]
      for i in xrange (0, len(self.a)):
        temp_sum = 0.
        for k in xrange (0, len(self.s)):
          temp_sum += self.p[i][j][k]*(\
                       self.r[i][j][k]+\
                       self.gamma*self.v[k])
        temp_qs.append (temp_sum)
      #Get the best action for the current state
      temp_a_list [temp_qs.index (max (temp_qs))] = 1.
      # Now pick that best action for the situation
      self.pi [j] = list(temp_a_list)
      if pi_temp != self.pi:
        stable = False
    if not stable:
      # keep building
      return self.build_policy ()
    else:
      #print self.p
      #print self.r
      #print self.pi
      return self.pi
          
  def build_policy (self):
    """
    Update the value function iteratively;
    thought there are arguably advantages for picking and index
    at random, it will converge either way.
    """
    cutoff = 0.
    # Backup v for checking
    v_temp = list (self.v)
    # go through all actions; 
    # note v[i] corresponds to s = s[i]
    for j in xrange (0, len(self.v)):
      # Get our next action
      next_a = self.next_action_from_pi (j)
      temp_sum = 0.
      for k in xrange (0, len(self.s)):
        temp_sum += self.p[next_a][j][k]*(\
                    self.r[next_a][j][k]+\
                    self.gamma*self.v[k])
      self.v[j] = temp_sum
      cutoff = max (cutoff, abs(v_temp[j]-self.v[j]))

    if cutoff < self.delta:
      return self.update_policy ()
    else:
      return self.build_policy ()

  def run (self):
    sys.setrecursionlimit(10000000)
    self.build_policy ()

  def iterate (self, old_state, old_action, reward, state):
    it_list = range (0,4)
    # Shuffle, handles cases where bot is stuck against wall
    random.shuffle (it_list)

    # Handle the otherwise unhandled state of when bot is stranded in
    # middle.
    if state == 0:
      return 0

    # Translate state (4 bits) into 0-3 state rep. here
    it_state = 0
    for i in it_list:
      if 1 & (state >> i):
        it_state = i
    # continue making the translation
    it_state = (it_state+1)%4
    
    it_action = self.next_action_from_pi (it_state)
    # translate to other format.
    # Numbers worked out correctly
    # Note the original set phif to - the value, current sets to +
    if it_action == 3:
      action = 0
    elif it_action == 0:
      action = 3
    elif it_action == 1:
      action = 2
    elif it_action == 2:
      action = 1
    else:
      print 'invalid action in it_brain.'

    return action


      
    
  

