from OpenNero import *
import time
import random
import math
import action_script
from Queue import Queue
from constants import *
from custom_functions import *

class GridBrain(AgentBrain):
    """
    Scripted behavior for Roomba agent
    A "lifetime" of an agent brain looks like this:
    1. __init__() is called (new brain)
    2. initialize() is called (passing specs for sensors, actions and rewards)
    3. start() is called
    4. act() is called 0 or more times
    5. end() is called
    6. if new episode, go to 3, otherwise go to 7
    7. destroy() is called
    """
    def __init__(self):
        """
        this is the constructor - it gets called when the brain object is first created
        """
        # call the parent constructor
        AgentBrain.__init__(self) # do not remove!
        self.action_sq = Queue()

          
    def initialize(self, init_info):
        """
        init_info contains a description of the observations the agent will see
        and the actions the agent will be able to perform
        """
        self.init_info = init_info
        return True

    def start(self, time, sensors):
        """
        Take in the initial sensors and return the first action
        """
        #print "start"
        return self.act(time, sensors, 0)
        
    def act(self, time, sensors, reward):
        """
        Take in new sensors and reward from previous action and return the next action
        Specifically, just move toward the closest crumb!
        """
        action = self.init_info.actions.get_instance()
        # just tell the agent which way to go!
        # action[0] = action_script.go_xy(sensors, 10.0, 10.0)
        #print sensors[0]
        
        if(RELATIVE_GRID):
            current_target_grid = 8
            current_max_crumbs = sensors[START_GRID_SENSORS + 8 * GRID_SENSORS + 1]+ 1
            
            for grid in range(0,8):
                if(sensors[START_GRID_SENSORS + grid * GRID_SENSORS] == 0 and sensors[START_GRID_SENSORS + grid * GRID_SENSORS + 1] > current_max_crumbs):
                    current_target_grid = grid
                    #print current_target_grid
                    current_max_crumbs = sensors[START_GRID_SENSORS + grid * GRID_SENSORS + 1]
            
            #print "Moving to grid %d with %d crumbs" % (current_target_grid, current_max_crumbs)
            action[0] = current_target_grid
        # ABSOLUTE GRID
        else:
            my_grid = give_grid_number(sensors[0], sensors[1])
            
            current_target_grid = random.randint(0, N_GRID)
            #print my_grid
            current_max_crumbs = 0
            
            neighboring_grids = give_neighbor_grids(my_grid)
            #print neighboring_grids
            
            # Get maximum grid of unoccupied, neighboring grids:
            for grid in neighboring_grids + [my_grid]:
                if(sensors[START_GRID_SENSORS + grid * GRID_SENSORS] == 0 and sensors[START_GRID_SENSORS + grid * GRID_SENSORS + 1] > current_max_crumbs):
                    current_target_grid = grid
                    #print current_target_grid
                    current_max_crumbs = sensors[START_GRID_SENSORS + grid * GRID_SENSORS + 1]
                    
            action[0] = current_target_grid
            #print "Going to grid %d with %d crumbs"  % (current_target_grid, current_max_crumbs)
        
        return action
        
    def end(self, time, reward):
        """
        take in final reward
        """
        return True

    def destroy(self):
        """
        called when the agent is destroyed
        """
        self.time = 0
        return True

        