# load standard libraries #
import random
import time, copy
from math import *

# load C-side code #
from OpenNero import *

# load Python functional scripts #
from common import *
import world_handler
from agent_handler import AgentState, AgentInit

# load agent script
from roomba import RoombaBrain
from RTNEATAgent import RTNEATAgent
from grid_agent import GridBrain
from QBrain import QBrain
import action_script
import os

#load custom code
from custom_functions import *
from constants import *

class SandboxMod:

    def __init__(self):
        """
        initialize the sandbox server
        """
        self.marker_map = {} # a map of cells and markers so that we don't have more than one per cell
        self.environment = None
        self.agent_ids = []
        self.crumbs_on_grid = [0] * N_GRID
        self.total_n_crumbs_left = 0
        self.crumb_grid_numbers = dict()
        
    def mark(self, x, y, marker):
        """ Mark a position (x, y) with the specified color """
        # remove the previous object, if necessary
        self.unmark(x, y)

        if not WINDOWLESS:
            # add a new marker object
            id = addObject(marker, Vector3f(x, y, -1), Vector3f(0,0,0), Vector3f(0.5,0.5,0.5), type = OBJECT_TYPE_MARKER)
        else:
            id= time.time()
        # remember the ID of the object we are about to create
        self.marker_map[(x, y)] = id
        
        # PLACE ON GRID
        current_grid = give_grid_number(x, y)
        self.crumbs_on_grid[current_grid] += 1
        self.total_n_crumbs_left += 1
        self.crumb_grid_numbers[(x,y)] = current_grid
        
        
    def mark_blue(self, x, y):
        self.mark(x, y,"data/shapes/cube/BlueCube.xml")
    
    def mark_green(self, x, y):
        self.mark(x, y,"data/shapes/cube/GreenCube.xml")
    
    def mark_yellow(self, x, y):
        self.mark(x, y,"data/shapes/cube/YellowCube.xml")
    
    def mark_white(self, x, y):
        self.mark(x, y,"data/shapes/cube/WhiteCube.xml")
    
    def unmark(self, x, y):
        if (x, y) in self.marker_map:
            if not WINDOWLESS:
                removeObject(self.marker_map[(x, y)])
            del self.marker_map[(x, y)]
            return True
        else:
            return False
    
    def setup_sandbox(self):
        """
        setup the sandbox environment
        """
        global XDIM, YDIM, HEIGHT, OFFSET
        self.environment = RoombaEnvironment(XDIM, YDIM)
        set_environment(self.environment)
    
    def reset_sandbox(self=None):
        """
        reset the sandbox and refill with stuff to vacuum
        """
        for id in self.marker_map.values():
            removeObject(id)  # delete id from Registry, not from dict
        self.marker_map = {}
        for id in self.agent_ids:
            removeObject(id)  # delete id from Registry, not from list
        self.agent_ids = []
        reset_ai()

    def remove_bots(self):
        """ remove all existing bots from the environment """
        disable_ai()
        for id in self.agent_ids:
            removeObject(id)  # delete id from Registry, not from list
        self.agent_ids = []

    def distribute_bots(self, num_bots, bot_type):
        """distribute bots so that they don't overlap"""
        # make a number of tiles to stick bots in
        N_TILES = 10
        tiles = [ (r,c) for r in range(N_TILES) for c in range(N_TILES)]
        if FIXED_SEED:
            random.seed(SEED)
        random.shuffle(tiles)
        for i in range(num_bots):
            (r,c) = tiles.pop() # random tile
            x, y = r * XDIM / float(N_TILES), c * YDIM / float(N_TILES) # position within tile
            if not SEED is None:
                random.seed(SEED+1000)
                x_rand = random.random()
                random.seed(SEED-1000)
                y_rand = random.random()
            else:
                x_rand = random.random()
                y_rand = random.random()
            x, y = x + x_rand * XDIM * 0.5 / N_TILES, y + y_rand * YDIM * 0.5 / N_TILES # random offset
            agent_id = addObject(bot_type, Vector3f(x, y, 0), Vector3f(0.5, 0.5, 0.5), type = OBJECT_TYPE_ROOMBA, collision = OBJECT_TYPE_ROOMBA)
            self.agent_ids.append(agent_id)
        

    def add_bots(self, bot_type, num_bots):
        disable_ai()
        num_bots = int(num_bots)
        if bot_type.lower().find("qlearning2") >= 0:
            self.distribute_bots(num_bots, "data/shapes/roomba/RoombaQ2.xml")
        else:
            return False
        enable_ai()
        return True

def in_bounds(x,y):
    return x > ROOMBA_RAD and x < XDIM - ROOMBA_RAD and y > ROOMBA_RAD and y < YDIM - ROOMBA_RAD

#################################################################################        
class RoombaEnvironment(Environment):
    """
    Sample Environment for the Sandbox
    """
    def __init__(self, XDIM, YDIM):
        """
        Create the environment
        """
        self.iii = 1
        Environment.__init__(self) 
        
        self.XDIM = XDIM
        self.YDIM = YDIM
        self.max_steps = STEPS_PER_EPISODE
        self.states = {} # dictionary of agent states
        self.crumbs = world_handler.pattern_cluster(NO_CRUMBS, "Roomba/world_config.txt")
        # only keep crumbs that are inside the walls
        self.crumbs = [c for c in self.crumbs if in_bounds(c.x,c.y)]

        self.init_list = AgentInit()
        self.init_list.add_type("<class 'Roomba.QBrain.QBrain'>")
        
        q2_abound = self.init_list.get_action("<class 'Roomba.QBrain.QBrain'>")
        q2_sbound = self.init_list.get_sensor("<class 'Roomba.QBrain.QBrain'>")
        q2_rbound = self.init_list.get_reward("<class 'Roomba.QBrain.QBrain'>")
        
        """ Bounds for Q2 (equals those for grid) """
        # actions
        q2_abound.add_discrete(0, N_GRID-1) # Choose any of the grid cells as goal
        # sensors
        q2_sbound.add_discrete(0,N_GRID-1)   # grid no of agent
        q2_sbound.add_discrete(0,NO_CRUMBS)   # n crumbs in grid
        q2_sbound.add_discrete(0,1)   # target reached?
        if SENSE_RADIUS > 0:
            for i in range(1, VISIBLE_CELLS): # number of cells around you can see at one position
                q2_sbound.add_discrete(0,N_GRID-1)   # grid no of grid
                q2_sbound.add_discrete(0,NO_CRUMBS)   # n crumbs in grid
        else:
            for i in range(0, VISIBLE_CELLS): # number of cells
                q2_sbound.add_discrete(0,N_GRID-1)   # grid no of grid
                q2_sbound.add_discrete(0,NO_CRUMBS)   # n crumbs in grid
            
        # rewards
        q2_rbound.add_continuous(-1000,100) # range for reward (=no of pellets picked up in last move)
        """ End Bounds for Q2 """

        # set up shop
        # Add Wayne's Roomba room with experimentally-derived vertical offset to match crumbs.
        addObject("data/terrain/RoombaRoom.xml", Vector3f(XDIM/2,YDIM/2, -1), Vector3f(0,0,0), Vector3f(XDIM/245.0, YDIM/245.0, HEIGHT/24.5), type = OBJECT_TYPE_WALLS)

        # getSimContext().addAxes()
        self.add_crumbs() 
        
        self.n_states = 0
        
        self.count = AGENT_AMOUNT -2
        self.reset_counter = 0
        
        self.agent_targets = dict()
        self.agent_last_stuck = dict()
        self.agent_num_stuck = dict()
        
        self.claimed_agents = []
        self.prev_n_states = 0
        self.episodeCounter = 0
        
        self.update_count = 0
        self.final_crumb_found = False
        
    def get_state(self, agent):
        if agent in self.states:
            return self.states[agent]
        else:
            self.n_states += 1
            pos = agent.state.position
            rot = agent.state.rotation
            self.states[agent] = AgentState(self.n_states, pos, rot)
            agent.sid = self.n_states
            
            self.agent_targets[agent] = (pos.x, pos.y)
            self.agent_last_stuck[agent] = 0
            self.agent_num_stuck[agent] = 0
            
            return self.states[agent]

    def add_crumbs(self):
        for pellet in self.crumbs:
            if not (pellet.x, pellet.y) in getMod().marker_map:
                getMod().mark_blue(pellet.x, pellet.y)
    
    def remove_crumbs(self):
        for pellet in self.crumbs:
            getMod().unmark(pellet.x, pellet.y)
        for grid in range(len(gMod.crumbs_on_grid)):
            gMod.crumbs_on_grid[grid] = 0
        gMod.crumb_grid_numbers={}
        self.crumbs = []
    
    def reset(self, agent):
        """ reset the environment to its initial state """
        #for a, state in self.states.items():
        state = self.get_state(agent)
        a = agent
        state.reset()
        a.state.position = state.initial_position
        a.state.rotation = state.initial_rotation
        a.state.velocity = state.initial_velocity
        self.reset_counter += 1
        if self.reset_counter == AGENT_AMOUNT:
            self.reset_counter = 0
            self.count = 0
            if FIXED_SEED:            
                self.add_crumbs()
            else:
                print "Replacing crumbs"
                self.remove_crumbs()
                self.crumbs = world_handler.pattern_cluster(NO_CRUMBS, "Roomba/world_config.txt")
                # only keep crumbs that are inside the walls
                self.crumbs = [c for c in self.crumbs if in_bounds(c.x,c.y)]
                self.add_crumbs()
        #print self.crumbs
        

        return True

    def get_agent_info(self, agent):
        """ return a blueprint for a new agent """
        return self.init_list.get_info(str(type(agent)))
    
    def step(self, agent, action):
        """
        A step for an agent
        """

        reward = 0
        
        state = self.get_state(agent) # the agent's status
        pos = agent.state.position
        
        if (state.is_out == True):
            getMod().unmark(pos.x, pos.y)
        else:
            assert(self.get_agent_info(agent).actions.validate(action)) # check if the action is valid
            # Possible actions: drive to nearest crumb in grid number action[0]

            crumbs_in_target_grid = [crumb for crumb in self.crumbs if gMod.crumb_grid_numbers[(crumb.x, crumb.y)] == action[0]]
            my_grid = give_grid_number(pos.x, pos.y)
            
            # No crumbs left: drive to center of grid
            if(len(crumbs_in_target_grid) == 0):
                target_xy = give_grid_center(action[0])
                # Drive to nearetst/center crumb in grid
            else:

                if ACTION_TYPE == 'closest' or (ACTION_TYPE == 'central' and action[0] == my_grid):
                    # Give closest crumb:
                    target_xy = give_closest_crumb(crumbs_in_target_grid, pos.x, pos.y)
                elif ACTION_TYPE == 'central':
                    # Give central crumb to the grid cell of choice:
                    target_xy = give_central_crumb(crumbs_in_target_grid, action[0])
            
            angle = action_script.go_fast(pos.x, pos.y, target_xy[0], target_xy[1]) # in range of -pi to pi
            degree_angle = degrees(angle)
            delta_angle = degree_angle - agent.state.rotation.z
            delta_dist = MAX_SPEED
            # print "Agent %d, at (%.1f, %.1f)"%(agent.sid, pos.x, pos.y)
            
            # AVOID COLLISIONS!
            closest = {}
            for other in self.states:
                if not other == agent:
                    d = get_distance((other.state.position.x, other.state.position.y), (pos.x, pos.y))
                    if d < 22:
                        closest[other] = d
            if len(closest) == 1:
                c_o = closest.keys()[0]
                #closest = closest[c_o]
                closest_angle = degrees(action_script.go_fast(pos.x, pos.y, c_o.state.position.x, c_o.state.position.y))
                delta_angle = closest_angle - agent.state.rotation.z + 100                
            elif len(closest) > 1:
                av_sin = 0
                av_cos = 0
                for a in closest.keys():
                    #Algorithm to approximate the average of multiple angles:
                    ang_rad = action_script.go_fast(pos.x, pos.y, a.state.position.x, a.state.position.y)
                    av_sin += sin(ang_rad)
                    av_cos += cos(ang_rad)
                closest_angle = degrees(atan2(av_sin, av_cos))
                delta_angle = closest_angle - agent.state.rotation.z + 180
            
            if(len(closest) > 0):
                self.agent_last_stuck[agent] = 0
                self.agent_num_stuck[agent] += 1
            elif(self.agent_num_stuck[agent] != 0):
                self.agent_last_stuck[agent] += 1
                if(self.agent_last_stuck[agent] > LAST_STUCK_COUNT):
                    self.agent_num_stuck[agent] = 0

            self.agent_targets[agent] = target_xy
            state.prev_position = (pos.x, pos.y)
            
            reward += self.update_position(agent, delta_dist, delta_angle)
        state.reward += reward

        return reward

    # delta_angle (degrees) is change in angle
    # delta_dist is change in distance (or velocity, since unit of time unchanged)
    def update_position(self, agent, delta_dist, delta_angle):
        """
        Updates position of the agent and collects pellets.
        """
        
        state = self.get_state(agent)
        state.step_count += 1

        position = agent.state.position
        rotation = agent.state.rotation

        # posteriori collision detection
        rotation.z = wrap_degrees(rotation.z, delta_angle)
        position.x += delta_dist*cos(radians(rotation.z))
        position.y += delta_dist*sin(radians(rotation.z))

        # check if one of 4 out-of-bound conditions applies
        # if yes, back-track to correct position
        if (position.x) < 0 or (position.y) < 0 or \
           (position.x) > self.XDIM or (position.y) > self.YDIM:

            # correct position
            if (position.x) < 0:
                position.x -= delta_dist*cos(radians(rotation.z))    
            if (position.y) < 0:
                position.y -= delta_dist*sin(radians(rotation.z))
            if (position.x) > self.XDIM:
                position.x -= delta_dist*cos(radians(rotation.z))
            if (position.y) > self.YDIM:
                position.y -= delta_dist*sin(radians(rotation.z))
            
        # register new position
        state.position = position
        state.rotation = rotation
        agent.state.position = position
        agent.state.rotation = rotation
        
        reward = MOVE_PENALTY
        

        # remove all crumbs within ROOMBA_RAD of agent position
        pos = (position.x, position.y)
        for crumb in self.crumbs:
            if (crumb.x, crumb.y) in getMod().marker_map:
                distance = sqrt((crumb[0] - pos[0])**2 + (crumb[1] - pos[1])**2)
                if distance < ROOMBA_RAD:
                    getMod().unmark(crumb.x, crumb.y)
                    # Remove from grid count
                    current_grid_number = gMod.crumb_grid_numbers[(crumb.x, crumb.y)]
                    gMod.crumbs_on_grid[current_grid_number] -= 1
                    gMod.total_n_crumbs_left -= 1
                    gMod.crumb_grid_numbers[(crumb.x, crumb.y)] = -1
                    
                    reward += crumb.reward
                
        # check if agent has expended its step allowance
        if (self.max_steps != 0) and (state.step_count >= self.max_steps):
            state.is_out = True    # if yes, mark it to be removed
        
        return reward
    
    def sense(self, agent, sensors):
        """ figure out what the agent should sense """
        state = self.get_state(agent)
        # get agent's position
        pos = agent.state.position
        target_grid = give_grid_number(self.agent_targets[agent][0], self.agent_targets[agent][1]) # grid where you are going to
        
        sensors[0] = give_grid_number(pos.x, pos.y)
        
        # Check which grids are occupied
        sensors[1] = gMod.crumbs_on_grid[int(sensors[0])]
        #print self.agent_targets
        #print agent
        
        # Only one crumb at a time
        if(not EMPTY_CELL and get_distance(self.agent_targets[agent], (pos.x, pos.y)) < DISTANCE_REACHED):# \
                #or get_distance((pos.x, pos.y), agent.state.prev_pos) <= 0.3:
            sensors[2] = 1
        # Clean entire cell
        elif(EMPTY_CELL and target_grid == sensors[0] and gMod.crumbs_on_grid[target_grid] == 0):
            sensors[2] = 1
        elif(self.agent_num_stuck[agent] > TIME_STUCK and RECONSIDER_STUCK):            
            self.agent_num_stuck[agent] = 0
            sensors[2] = 1
        else:
            sensors[2] = 0
        
        diffs = []
        
        if SENSE_RADIUS > 1: # see 4 direct cells around you
            diffs += [(1,0), (-1,0), (0,-1), (0, 1)]
        if SENSE_RADIUS > 2: # see 3 x 3 cells
            diffs += [(1,1), (-1,-1), (1,-1), (-1, 1)]
        if SENSE_RADIUS > 3: # see 3 x 3 + 4 cells
            diffs += [(2,0), (-2,0), (0,-2), (0, 2)]
     
   # see 5 x 5 cells? or 5x5 - 4..
        
        cell_indice = 3
        if SENSE_RADIUS > 0:
            for (x, y) in diffs:
                sensors[cell_indice] = give_grid_neighbour(sensors[0], x, y) # left side
                if sensors[cell_indice] != -1:
                    sensors[cell_indice + 1] = gMod.crumbs_on_grid[int(sensors[cell_indice])] 
                cell_indice += 2
        else: # See all grid cells
            for i in range(N_GRID):
                sensors[cell_indice] = i
                sensors[cell_indice+ 1] = gMod.crumbs_on_grid[int(sensors[cell_indice])]
                cell_indice +=2
        return sensors
    
    def is_active(self, agent):
        """ return true when the agent should act """
        return True
    
    def is_episode_over(self, agent):
        """ is the current episode over for the agent? """
        state = self.get_state(agent)

        if self.max_steps != 0 and state.step_count >= self.max_steps:
            return True
        
        if END_STATE and self.count%AGENT_AMOUNT==0 and sum(gMod.crumbs_on_grid) == 0:
            agent.final_crumb = True
            return True

        self.count+=1
        return False
    
    def cleanup(self):
        """
        cleanup the world
        """
        self.environment = None
        return True

gMod = None

def delMod():
    global gMod
    gMod = None

def getMod():
    global gMod
    if not gMod:
        gMod = SandboxMod()
    return gMod

def ServerMain():
    print "Starting Sandbox Environment"
    
