""" Q-learning agent. """

def update_q(Q,state,action,reward,next_state,gamma=0.9,alpha=0.1,actions=[]):
    """ Updates a state-action value table using
        the standard Q-learning rule. This can be used without
        initializing the Q-table first, as missing states will
        simply be filled in on the fly.
          gamma - discounting factor
          alpha - learning rate
          actions - available actions for next_state
    """
    if state not in Q: Q[state] = {}
    if next_state not in Q: Q[next_state] = dict((a,0.0) for a in actions)
    
    Qs = Q[state]
    Qsp = Q[next_state]
    maxQ = max(Qsp.itervalues()) if Qsp != {} else 0.0
    old = Qs.get(action,0.0)
    error = (reward + gamma * maxQ) - old
    Qs[action] = old + alpha * error
    
def arg_max(dct):
    """ Find the arg max of a dictionary """
    maxval = -inf
    maxkey = None
    for k,v in dct.iteritems():
        if v > maxval:
            maxkey = k
            maxval = v
    return maxkey


class Agent(object):
    
    Q = {}
    
    def __init__(self, id, team, settings=None, field_rects=None, field_grid=None, nav_mesh=None, q_table_file='agent_q_table',
            position_coarseness=1, include_others=True, action_type=0, epsilon=0.1):
        """ Each agent is initialized at the beginning of each game.
            The first agent (id==0) can use this to set up global variables.
            Note that the properties pertaining to the game field might not be
            given for each game.
        """
        self.id = id
        self.team = team
        self.grid = field_grid
        self.mesh = nav_mesh
        self.settings = settings
                
        # State representation settings
        self.action_type = action_type
        self.include_others = include_others
        self.position_coarseness = position_coarseness
        self.poi = nav_mesh.keys()
        self.had_ammo = False
        self.target = None
        self.face_to = 0 if (self.team == 0) else pi
        
        # Setup for Q-learning
        self.epsilon = epsilon
        self.prev_state = None
        self.prev_action = None
        color = 'red' if team == 0 else 'blu'
        self.q_table_file = q_table_file

        if self.id == 0:
            if not os.path.exists(self.q_table_file):
                self.__class__.Q = {}
            else:
                self.__class__.Q = pickle.load(open(self.q_table_file,'r'))
        self.Q = self.__class__.Q # Store local reference.
        
        self.show_cp_state = False
        self.show_ammo_state = False
        
    
    def observe(self, observation):
        """ Each agent is passed an observation using this function,
            before being asked for an action. You can store either
            the observation object or its properties to use them
            to determine your action. Note that the observation object
            is modified in place.
        """
        # Toggle stuff depending on keypress
        if ord('c') in observation.keys:
            self.show_cp_state = not self.show_cp_state
        if ord('a') in observation.keys:
            self.show_ammo_state = not self.show_ammo_state
        self.observation = observation
        self.loc = observation.loc
        
    def get_state(self, observation):
        """ Returns state (estimate) from observation """
        bs = self.settings.tilesize*self.position_coarseness # Block Size
        x = observation.loc[0]//bs
        y = observation.loc[1]//bs
        have_ammo = observation.ammo > 0
        own_points = tuple(map(lambda x: x[2] == self.team, observation.cps))
        if self.include_others:
            friends = tuple((fx//bs, fy//bs) for (fx,fy) in observation.friends)
        else:
            friends = self.id
        return (x, y, have_ammo, own_points, friends)
        
    def get_reward(self, observation):
        """ Returns reward for last state/action """
        # Component based on CPs
        we_own = len([cp for cp in observation.cps if cp[2] == self.team])
        they_own = len([cp for cp in observation.cps if cp[2] == 1-self.team])
        reward =  float(we_own - they_own) - 0.1
        # Component based on picked up ammo
        if not self.had_ammo and observation.ammo > 0:
            reward += 10.0
        self.had_ammo = observation.ammo > 0
        return reward
        
    def get_actions(self, state):
        if self.action_type == 0:
            return [(-1,-1), (-1,0), (-1,1), (0,-1), (0,0), (0,1), (1,-1), (1,0), (1,1)]
        elif self.action_type == 1:
            return self.poi
        
    def action(self):
        """ This function is called every step and should
            return a tuple in the form: (turn, speed, shoot)
        """
        
        ## Q-LEARNING
        # Determine current state
        state = self.get_state(self.observation)
        # Determine reward
        reward = self.get_reward(self.observation)
        # Epsilon greedy action selection
        actions = self.get_actions(state)
        action = None
        if state in self.Q:
            action = arg_max(self.Q[state])
        if action is None or rand() < self.epsilon:
            action = random.choice(actions)
        # Update Q table
        if self.prev_state == None: 
            self.prev_state = state
            self.prev_action = action
        update_q(self.Q, self.prev_state, self.prev_action, reward, state, actions=actions)
        # Remember state and action for next step
        self.prev_state = state
        self.prev_action = action
        
        
        ## RETURNING A (turn,speed,shoot) ACTION TO THE GAME
        # Shoot enemies
        obs = self.observation
        ts = self.settings.tilesize
        shoot = False
        self.target = None
        foes = filter(lambda f: not line_intersects_grid(self.loc, f[0:2], self.grid, ts), obs.foes)
        foes = sorted(foes,key=lambda f: point_dist(f[0:2],self.loc))
        if foes:
            if obs.ammo > 0 and point_dist(foes[0],self.loc) < self.settings.max_range:
                self.target = foes[0][0:2]
                shoot = True
            foe_rel = point_sub(foes[0][0:2],self.loc)
            # Face to last seen enemy
            self.face_to = math.atan2(foe_rel[1],foe_rel[0])
        
        # Compute path to goal
        if self.target is None:
            if self.action_type == 0:
                self.target = point_add(self.loc,point_mul(action, self.settings.max_speed))
            elif self.action_type == 1:
                self.target = find_path(obs.loc, action, self.mesh, self.grid, ts)[0]
        
        dx = self.target[0]-obs.loc[0]
        dy = self.target[1]-obs.loc[1]
                
        if -1 < dx < 1 and -1 < dy < 1:
            dx = dy = 0
            turn = angle_fix(self.face_to-obs.angle)
            speed = 0
        else:
            turn = angle_fix(math.atan2(dy, dx)-obs.angle)
            speed = (dx**2 + dy**2)**0.5
        if turn > self.settings.max_turn or turn < -self.settings.max_turn:
            shoot = False
            if speed < self.settings.max_speed:
                speed = 0

        return (turn, speed, shoot)
        
    def debug(self, surface):
        """ Allows the agents to draw on the game UI,
            Refer to the pygame reference to see how you can
            draw on a pygame.surface. The given surface is
            not cleared automatically. Additionally, this
            function will only be called when the renderer is
            active, and it will only be called for the active team.
        """
        import pygame
        # Plot the Q-table by position
        if self.observation.selected:
            surface.fill((0,0,0,0))
            bs = self.settings.tilesize*self.position_coarseness # Block Size
            for (state, actions) in self.Q.items():
                (x,y,ammo,cps,friends) = state
                if ammo == self.show_ammo_state and cps[0] == self.show_cp_state:                
                    x = x*bs
                    y = y*bs
                    if actions:
                        ax, ay = arg_max(actions)
                        val = max(actions.itervalues())
                    else:
                        val = 0.0
                    best = x+bs/2 + ax*8, y+bs/2 + ay*8
                    r = min(255,max(0,int(255*-val)))
                    g = min(255,max(0,int(255*val)))
                    pygame.draw.rect(surface,(r,g,0,100),(x,y,bs,bs))
                    if self.action_type == 0:
                        pygame.draw.line(surface,(0,0,0), (x+bs/2,y+bs/2), best)
                    elif self.action_type == 1:
                        pygame.draw.line(surface,(0,0,0), (x+bs/2,y+bs/2), (ax, ay))
            if self.target is not None:
                pygame.draw.line(surface,(0,0,255), self.loc, self.target)
        
    def finalize(self, interrupted=False):
        """ This function is called after the game ends, 
            either due to time/score limits, or due to an
            interrupt (CTRL+C) by the user. Use it to
            store any learned variables and write logs/reports.
        """
        if self.id == 0:
            pickle.dump(self.Q,open(self.q_table_file,'wb'))
