from OpenNero import *
import time
import random
import math
import action_script
from Queue import Queue
from constants import *
from custom_functions import *

class RoombaBrain(AgentBrain):
    """
    Scripted behavior for Roomba agent
    A "lifetime" of an agent brain looks like this:
    1. __init__() is called (new brain)
    2. initialize() is called (passing specs for sensors, actions and rewards)
    3. start() is called
    4. act() is called 0 or more times
    5. end() is called
    6. if new episode, go to 3, otherwise go to 7
    7. destroy() is called
    """
    def __init__(self):
        """
        this is the constructor - it gets called when the brain object is first created
        """
        # call the parent constructor
        AgentBrain.__init__(self) # do not remove!
        self.action_sq = Queue()
        self.last_loc = (0,0)
        self.location = (0,0)
        self.change = False
          
    def initialize(self, init_info):
        """
        init_info contains a description of the observations the agent will see
        and the actions the agent will be able to perform
        """
        self.init_info = init_info
        return True

    def start(self, time, sensors):
        """
        Take in the initial sensors and return the first action
        """
        return self.act(time, sensors, 0)
        
    def act(self, time, sensors, reward):
        """
        Take in new sensors and reward from previous action and return the next action
        Specifically, just move toward the closest crumb!
        """
        action = self.init_info.actions.get_instance()
        # just tell the agent which way to go!
        
        if self.last_loc is not (0,0):
            self.last_loc = self.location
        
        self.location = (sensors[1], sensors[2])
        if self.last_loc == self.location:
            self.change = True
        else:
            self.change = False
        goal = (sensors[3], sensors[4])
        
        dist = get_distance(self.location, goal)
        
        agents=[]
        for i in range(5,13):
            # Make tuples of (x,y) per agent
            if i%2 == 1:
                a = tuple(sensors[i:i+2])
                #print a
                if get_distance(a, goal) <= dist:
                    if not self.change:
                        action[0] = action_script.go_opposite(sensors, a[0], a[1])
                        #print str(self.sid)+': ('+str(sensors[1])+','+str(sensors[2])+'), \nOPPOSI move to: ('+str(sensors[3])+','+str(sensors[4])+') dist:'+ str(dist) + ' direction: '+str(action)
                    else:
                        action[0] = action_script.go_fast(sensors[1], sensors[2], sensors[3], sensors[4])
                        #print str(self.sid)+': ('+str(sensors[1])+','+str(sensors[2])+'), \nDIRECT move to: ('+str(sensors[3])+','+str(sensors[4])+') dist: '+ str(dist) + ' direction: '+str(action)
                    return action
        if not self.change:
            action[0] = action_script.go_fast(sensors[1], sensors[2], sensors[3], sensors[4])
            #print str(self.sid)+': ('+str(sensors[1])+','+str(sensors[2])+'), \nDIRECT move to: ('+str(sensors[3])+','+str(sensors[4])+') dist: '+ str(dist) + ' direction: '+str(action)
        else:
            action[0] = action_script.go_opposite(sensors, a[0], a[1])
            #print str(self.sid)+': ('+str(sensors[1])+','+str(sensors[2])+'), \nOPPOSI move to: ('+str(sensors[3])+','+str(sensors[4])+') dist:'+ str(dist) + ' direction: '+str(action)
        return action
        
    def end(self, time, reward):
        """
        take in final reward
        """
        return True

    def destroy(self):
        """
        called when the agent is destroyed
        """
        self.time = 0
        return True
