import random
import math
import pickle
from libs.munkres import Munkres

def createQtable():
    FILE = open('agent4_qtable.txt','w')
    pickle.dump({},FILE)
    FILE.close()

#tactics
class GlobalLevel():
    m = Munkres()
    matrix=[]
    #default tasks of agents
    task = [0,0,1,1,2,2]
    teamdata = None
    start=True
    
    ## There are 3 tasks:
    # 0 = guard home-base control point
    # 1 = guard mid-field control point
    # 2 = guard foe-base control point
    
    # pointer to LowLevelModule.
    LowLevelModule = None
    
    def __init__(self, lowlevelmodule, teamdata):
        self.LowLevelModule = lowlevelmodule
        self.teamdata = teamdata
    
    def divideTasks(self):
        if self.start:
            if self.teamdata[0][0]<3:
                self.task=[0,1,1,1,2,2]
            self.start=False
        self.matrix=[]
        for i in range(len(self.teamdata)):
            temp=[]
            for nr in self.task:
                temp.append(self.teamdata[i][nr])
            self.matrix.append(temp)
        indexes = self.m.compute(self.matrix)
        #print indexes, indexes[0][1],indexes[1][1],indexes[2][1],indexes[3][1]
        self.teamdata[0]['task'] = self.task[indexes[0][1]]
        self.teamdata[1]['task'] = self.task[indexes[1][1]]
        self.teamdata[2]['task'] = self.task[indexes[2][1]]
        self.teamdata[3]['task'] = self.task[indexes[3][1]]
        self.teamdata[4]['task'] = self.task[indexes[4][1]]
        self.teamdata[5]['task'] = self.task[indexes[5][1]]
        
    ## passing task
    def determineTask(self, id):
        #self.teamdata[id]['task'] = self.task[id]
        return self.teamdata[id]['task']

        
#agent survival
class LocalLevel():
    LEARN = True
    # size of play field (in pixels)
    fieldsize = [0,0]
    # settings, set by domination-game
    settings = None
    # team color
    team = None
    # pointer to LowLevelModule.
    LowLevelModule = None
    teamdata = None
    #Q-learning settings
    Qtable = {}
    old_state = None
    old_action = None
    old_actions = None
    e_trace = {}            #Eligibility trace for Q(lambda)
    Gamma = 0.9
    Alpha = 0.1
    Epsilon = 0.1
    Lambda = 0.8
    
    # states features:
    # AmmoAmount    = 3 values [0:0, 1:1-2, 2:3+]
    # AmmoDistance  = 4 values [0:0, 1:1, 2:2, 3:3+] 
    # TaskDistance  = 4 values [0:0, 1:1, 2:2, 3:3+]
    # FoeCanShootMe = 9 values
    # ICanShootFoe  = 9 values

    ## TODO:
    # - implement:         sameTaskFriends  [7]
    # - implement:         diffTaskFriends  [7]
    
    #Number of actions
    num_of_actions = 4   
    aPUT = 0
    aTSK = 1
    aAMMO = 2
    aFOES = 3
    
    # init LocalLevel()
    # -> Load Qtable
    def __init__(self, fieldsize, settings, team, lowlevelmodule, field_grid, Qtable, teamdata):
        self.fieldsize = fieldsize
        self.settings = settings
        self.team = team
        self.LowLevelModule = lowlevelmodule
        self.grid = field_grid
        self.teamdata = teamdata
        # q-table loaded & passed by game-engine
        self.Qtable = Qtable

###############
## Q-TABLE FUNCTIONS
###############
        
    def printQtableStats(self):
        print len(self.Qtable)
    
    def getValue(self, sap):
        #sap = state action-pair
        value = 0
        if sap in self.Qtable:
            value = self.Qtable[sap]
        return value

    def setValue(self, sap, v, r):
        #sap = state action-pair, v=value, r=reward
        
        if sap in self.Qtable:
            old_value = self.getValue(s,a)
            new_value = old_value + self.Alpha*(r + self.Gamma*v - old_value)
            self.Qtable[sap] = new_value
        else:
            self.Qtable[sap] = self.Alpha*(r + self.Gamma*v)
    
    def getStateActionPair(self, s, a):
        return (s[0],s[1],s[2],s[3],s[4], a)
        
###############
## STATE FUNCTIONS
###############

    #Determine AmmoAmount = 3 states
    def getAmmoState(self, ammoAmount):
        if (ammoAmount == 0):
            AmmoState = 0
        elif (1 <= ammoAmount <= 2):
            AmmoState = 1
        else: # AmmoAmount >= 3
            AmmoState = 2
        return AmmoState

    #Determine Task State = 3 states
    def getTaskState(self, cps, task):
        if (0 <= task <= 2):
            TaskState = cps[task][2]
        else:
            TaskState = 0
        return TaskState

    #Determine AmmoDistance = 5 states        
    def getAmmoDistanceState(self, pos, ammo, id, taskloc):
        # get closest ammo-loc
        AmmoDistanceState = self.LowLevelModule.getClosestAmmo(pos, id, taskloc)[1]
        
        # Max distance is 3
        if AmmoDistanceState > 4:
            AmmoDistanceState = 4
        return AmmoDistanceState
    
    #Determine TaskDistance = 6 states  
    def getTaskDistanceState(self, pos, task, cps):
        # get location of task
        t_pos = None
        if (0 <= task <= 2):
            t_pos = cps[task][0:2]
        else: 
            t_pos = [0,0]
            
        # calculate number of steps between current and task position
        TaskState = self.LowLevelModule.getNumberOfSteps(pos, t_pos)[0]
        if TaskState > 5:
            TaskState = 5
        
        return TaskState
    
    #Determine Foe state = 5 states
    def getFoeState(self, foeData):
        sMe = foeData[0][0]     # number of observed foes that can shoot me
        sMeN = foeData[0][1]    # number of observed foes that can not shoot me
        sI = foeData[0][2]      # number of observed foes that i can shoot
        sIN = foeData[0][3]     # number of observed foes that i can not shoot

        if (sMe + sMeN) == 0:   # no foes observed
            FoeState = 0
        elif sMe > 0:           # there are foes that can shoot me
            if sI > 0:             
                FoeState = 1        #and there are foes i can shoot
            else:
                FoeState = 2        #and there are no foes i can shoot
        else:                   # there are no foes that can shoot me
            if sI > 0:             
                FoeState = 3        #and there are foes i can shoot
            else:
                FoeState = 4        #and there are no foes i can shoot
        return FoeState
    
    # determine FriendSameTask = 7 states
    # determine FriendDiffTask = 7 states
    def getFriendState(self, friends, task):
        sameFriends = [0,0]
        diffFriends = [0,0]
        
        # 0 -  0 friends observed
        # 1 -  1 observed, 0-AmmoState
        # 2 -  1 observed, 1-AmmoState
        # 3 -  1 observed, 2-AmmoState
        # 4 - 2+ observed, 0-AmmoState
        # 5 - 2+ observed, 1-AmmoState
        # 6 = 2+ observed, 2-AmmoState
        
        #determine same-task/diff-task friends
        for pos in friends:
            for id in xrange(0,6):
                if (self.teamdata[id]['pos'] == pos[0:2]):
                    if (self.teamdata[id]['task'] == task):
                        sameFriends[0] += self.teamdata[id]['ammo']
                        sameFriends[1] += 1
                    else:
                        diffFriends[0] += self.teamdata[id]['ammo']
                        diffFriends[1] += 1
                        
        #calculate sameFriend-state
        if (sameFriends[1] > 0):
            avg = sameFriends[0] / sameFriends[1];
            AmmoState =  self.getAmmoState(avg)
            if (sameFriends[1] == 1):
                sameFriendState = 1 + AmmoState #state: 1,2,3
            else:
                sameFriendState = 4 + AmmoState #state: 4,5,6
        else:
            sameFriendState = 0                 #state: 0
            
        #calculate diffFriend-state
        if (diffFriends[1] > 0):
            avg = diffFriends[0] / diffFriends[1];
            AmmoState =  self.getAmmoState(avg)
            if (diffFriends[1] == 1):
                diffFriendState = 1 + AmmoState #state: 1,2,3
            else:
                diffFriendState = 4 + AmmoState #state: 4,5,6
        else:
            diffFriendState = 0                 #state: 0
     
        return (sameFriendState, diffFriendState)
        
    def getState(self, obs, tsk, ammo, id):
        # o = observationlist
        # t = task of agent
        # a = ammo locations

        AmmoState = self.getAmmoState(obs.ammo)
        TaskState = self.getTaskState(obs.cps, tsk)
        AmmoDistanceState = self.getAmmoDistanceState(obs.loc, ammo, id, obs.cps[tsk][0:2])
        TaskDistanceState = self.getTaskDistanceState(obs.loc, tsk, obs.cps)
        FoeState = self.getFoeState(self.foeData)
        
        return (AmmoState, TaskState, AmmoDistanceState, TaskDistanceState, FoeState)


#########
## ACTIONS
############

    def takeAction(self, a, obs, tsk, ammo, id):
        # a    = action taken by agent 
        # obs  = observation[] agent observations, passed by game
        # tsk  = task of agent, assigned by GlobalLevel()
        # ammo = global known ammo locations
        goal = None
        shoot = False
        if a == self.aPUT: # stay put
            goal = obs.loc
            
        elif a == self.aTSK: #go to task
            goal = obs.cps[tsk][0:2]
            
        elif a == self.aAMMO: # Move to closest Ammo-Location
            
            goal = self.LowLevelModule.getClosestAmmo(obs.loc, id, obs.cps[tsk][0:2])[2]

            #print "GOAL: " + str(goal)
            # if there is no ammo location, do nothing
            if goal == None:
                goal = obs.loc            

        elif a == self.aFOES: # attack foe
            #print "attack!"
            # choose which foe to attack
            goal = None
            shoot = False
            for i in xrange(0, len(self.foeData[1])-1):
                if self.foeData[1][i][1] == 1:
                    goal = obs.foes[i][0:2]
                    if obs.ammo > 0:
                        shoot = True
                        #print "shoot"
                        break
                    #else:
                    #    print "no ammo"
            if goal == None:
                goal = obs.loc
        #print "Goal= " + str(goal)
        #print "Action= " + str(a) 
        return (goal,shoot)

###########
## REWARDS
##########
       
    def getReward(self, obs, tsk):
        reward = 0
        myTeam = self.team
        otherTeam = 1 - self.team
        
        # check CPS & task
        # if you have to guard a CPS and it is taken: negative reward
        # if you have taken the CPS which is assigned to you: positive reward
        if (tsk == 0 or tsk == 1 or tsk == 2):
            new_cps_color = obs.cps[tsk][2]
            old_cps_color = self.old_obs.cps[tsk][2]
            if (new_cps_color is not old_cps_color):
                if new_cps_color is myTeam:
                    reward += 5
                    if old_cps_color is otherTeam:
                        reward += 5
                elif new_cps_color is otherTeam:
                    reward += -5
                    if old_cps_color is myTeam:
                        reward += -5
                else:
                    if old_cps_color is myTeam:
                        reward += -5
                    else:
                        reward += 5
            else:
                if new_cps_color is myTeam:
                    pass
                    #reward += 1

        #If ammo is obtained give a reward
        old_ammo_count = self.old_obs.ammo
        new_ammo_count = obs.ammo
        if old_ammo_count < new_ammo_count:
            reward += 1
        
        #reward shooting an enemy
        if self.old_shoot:
            reward += 2

        # negative reward if agent got killed in previous state
        if (obs.respawn_in == (self.settings.spawn_time - 1)):
            reward += -5
            
        return reward

##############
## LEARNING & SELECTION
#########
        
    def determineGoal(self, obs, tsk, ammo, id):
        # to determine goal(move-direction) of agent:
        # - determine state
        # - determine best action for state
        # - map best action to goal position
        

        samen= self.LowLevelModule.shouldCoop(obs, tsk, id, 50)
        #update local foeobservation    
        self.foeData = self.LowLevelModule.foeObservation(obs)
    
        # creat set of possible action in current state
        actionset = range(self.num_of_actions)
        
        # filter action set
        # if there are no foes, do not use foe-attack
        if (self.foeData[0][2] == 0):
            actionset.remove(self.aFOES)
            actionset.remove(self.aPUT)
        else:
            actionset = [self.aFOES]
        # do not stay still: increases learning & score!
        # ---> In what situation do you want to do nothing?
        #actionset.remove(self.aPUT)
                
        # observe state (s')
        state = self.getState(obs, tsk, ammo, id)
        
        # determine best action
        best_val = -1000000
        best_actions = []
        for a in actionset:
            val = self.getValue(self.getStateActionPair(state,a))
            if val >= best_val:
                best_val = val
                best_actions.append(a)
        
        # select an action form the list
        # ---> optimal action (a*)
        best_action = random.choice(best_actions)
        
        #Q-learning
        if self.LEARN:
            #choose random action --> exploration
            # ---> policiy derived action (a')
            # ----> if random() > epsilon: a* <- a'
            if random.random() < self.Epsilon:
                best_action = random.choice(actionset)
 
            #Q update
            if self.old_state:
                # reward: old_state (s) + old_action (a)
                reward = self.getReward(obs, tsk)

                # get state-action-pair-tuples
                sap = self.getStateActionPair(state, best_action)                  # s'
                sap_old = self.getStateActionPair(self.old_state, self.old_action) # s
                
                # calculate delta:
                delta = reward + self.Gamma*self.getValue(sap) - self.getValue(sap_old)
                
                #increse e_trace-value (replacing traces)
                self.e_trace[sap_old] = 1

                if best_action in best_actions:
                    # for all (s,a) withe-e-trace
                    for sa,v in self.e_trace.iteritems():
                        #sa = state (state,action)
                        #v = elgibility value)
                        if sa in self.Qtable:
                            self.Qtable[sa] += self.Alpha * delta * self.e_trace[sa]
                        else:
                            self.Qtable[sa] = self.Alpha * delta * self.e_trace[sa]
                            
                        self.e_trace[sa] = self.Gamma * self.Lambda * self.e_trace[sa]
                else:
                    for sa,v in self.e_trace.iteritems():
                        #sa = state (state,action)
                        #v = elgibility value)
                        if sa in self.Qtable:
                            self.Qtable[sa] += self.Alpha * delta * self.e_trace[sa]
                        else:
                            self.Qtable[sa] = self.Alpha * delta * self.e_trace[sa]
                    # set all state-action pairs to 0
                    self.e_trace = {} 
                    
                #self.setValue(self.old_state, self.old_action, best_val, reward) 

            self.old_state = state
            self.old_action = best_action
            self.old_actions = best_actions
            self.old_obs = obs
            
        goal,shoot = self.takeAction(best_action, obs, tsk, ammo, id)
        self.old_shoot = shoot
        # return goal(location to move to)
        return goal,shoot
    
    
# hardcoded level
# turn/shoot/ all kind of low-level math functions
class LowLevel():
    mesh = None
    grid = None
    settings = None
    stepDictionary = {}
    fieldsize = None
    timestep = 0
    ammodata = {}
    teamdata = {}
    
    def __init__(self, nav_mesh, field_grid, settings, fieldsize, teamdata):
        self.mesh = nav_mesh
        self.grid = field_grid
        self.settings = settings
        self.fieldsize = fieldsize
        self.teamdata = teamdata
        
    def getNumberOfSteps(self, start_pos, goal_pos):
        #Calculates the number of steps (without steering) which it takes to go from
        # a location (start_pos) to another (goal)
        # start_pos = start position in pixels
        # goal_pos  = goal position in pixels

        tilesize = self.settings.tilesize
        max_speed = self.settings.max_speed
        max_turn = self.settings.max_turn
        
        #first calculate tile of start & goal pos:
        startTileX = math.ceil(start_pos[0] / tilesize)
        startTileY = math.ceil(start_pos[1] / tilesize)
        goalTileX = math.ceil(goal_pos[0] / tilesize)
        goalTileY = math.ceil(goal_pos[1] / tilesize)

        # create tuple: (identiefies path between start & goal)
        tuple = (startTileX, startTileY, goalTileX, goalTileY)
        
        # if there is not a path: calculate!
        if tuple not in self.stepDictionary:
            steps = 0
            # calculate path
            path = find_path(start_pos, goal_pos, self.mesh, self.grid, tilesize)
            if path:
                dx = path[0][0] - start_pos[0]
                dy = path[0][1] - start_pos[1]
                #print dx , start_pos[0]          
                dstA = (dx**2 + dy**2)**0.5
                steps += math.ceil(dstA / max_speed)
                start = start_pos
                
                for i in range(1,len(path)):
                    #bereken dstB, dstC
                    dx = path[i][0] - path[i-1][0]
                    dy = path[i][1] - path[i-1][1]
                    dstB = (dx**2 + dy**2)**0.5
                    steps += math.ceil(dstB / max_speed)
                        
                    dx = path[i][0] - start[0]
                    dy = path[i][1] - start[1]
                    dstC = (dx**2 + dy**2)**0.5
                    hoek = math.acos((dstA**2+dstB**2-dstC**2)/(2*dstA*dstB))
                    if (math.pi-hoek) > (max_turn):
                        steps += 1
                        if (math.pi-hoek) > (2*max_turn):
                            steps += 1

                    dstA = dstB
                    start = path[i-1]
            self.stepDictionary[tuple] = [steps, path[0]]

        # return stored steps
        return self.stepDictionary[tuple]
    

    def shouldCoop(self, obs, tsk,id,close):
        joint=False
        for i in xrange(5):
            if i !=id:
                if self.teamdata[i]['task'] == tsk:
                    locfriend=self.teamdata[i]['pos']
                    if math.sqrt(math.pow(obs.loc[0]-locfriend[0],2)+math.pow(obs.loc[0]-locfriend[0],2))<close:
                        joint=True
                        
        return joint



    def foeObservation(self, obs):
        #based on obs.loc, obs.angle & obs.foe: determine which foe can shoot me, and which i can shoot 
        foeData = []
        sMe=0
        sI=0
        observedFoeData=[]
        
        for foe in obs.foes:
            #check if agents is near enough
            if point_dist(obs.loc, foe[0:2]) < self.settings.max_range:
                
                obstructed=False
                #check if foe is obstructed by friends, also prevents shooting friends later
                for friend in obs.friends:
                    if line_intersects_circ(obs.loc, foe[0:2], friend[0:2], 6):
                        obstructed=True
                        break
                        
                #check if foe is obstructed by other foes, this matters because he cant shoot me
                if obstructed is False:
                    for foe2 in obs.foes:
                        if foe!= foe2:
                            if line_intersects_circ(obs.loc, foe[0:2], foe2[0:2], 6):
                                obstructed=True
                                break   
                                
                #check if there is a wall in between:
                if obstructed is False:
                    obstructed = line_intersects_grid(obs.loc, foe[0:2], self.grid, self.settings.tilesize)
                
                # shot is clear!
                if obstructed is False:
                    #determine angle between agents
                    dx_me = foe[0] - obs.loc[0]
                    dy_me = foe[1] - obs.loc[1]
                    angle_me = angle_fix(math.atan2(dy_me,dx_me)-obs.angle)

                    dx_he = obs.loc[0] - foe[0]
                    dy_he = obs.loc[1] - foe[1]
                    angle_he = angle_fix(math.atan2(dy_he,dx_he)-foe[2])
                    
                    HeShootMe=0
                    IShootHim=0
                    
                    #check if he can shoot me
                    if abs(angle_he) <= self.settings.max_turn:
                        HeShootMe=1
                        sMe+=1
                        
                    #check if i can shoot him
                    if abs(angle_me) <= self.settings.max_turn:
                        IShootHim=1
                        sI+=1
                        
                    observedFoeData.append((HeShootMe,IShootHim))
                else:
                    observedFoeData.append((0,0))
            else:
                observedFoeData.append((0,0))
        #return: foeData = [(#sMe, #/sMe, #sI, #/sI), [observedFoeData]]
        #
        # sMe : Number of observed foes who can shoot me
        # /sMe: Number of observed foes who can not shoot me
        # sI  : Number of observed foes i can shoot
        # /sI : Number of observed foes i can not shoot
        # observedFoeData = list: per observed foe tuple: (HeCanShootMe, ICanShootHim)
        # 1 = true / 0 = false
        foeData=[(sMe,len(obs.foes)-sMe,sI,len(obs.foes)-sI),observedFoeData]

        return foeData


    ## TODO --> improved pathplanning?
    def pathPlanner():
        path = []
        return path
    
    def getClosestAmmo(self, pos, id, taskloc):
        # The Closest-Ammo location is the location which holds the path, needing the least time-steps to:
        # - get from current position to ammo location
        # - get the spawn ammo at ammo location
        # - walk from ammo location to task location
        
        totaldist = 10000
        ammodist = 0
        ammoloc = None
        
        for ammo in self.ammodata:
            # timings
            rt = self.ammodata[ammo][0]     # respawn time
            t = self.timestep               # current time 
            
            # gamesteps to get to Ammo from Pos
            s_posToAmmo = self.getNumberOfSteps(pos, ammo)[0]
            
            # time to wait at Ammo on location
            s_ammoWait = rt - (t + s_posToAmmo)
            if (s_ammoWait < 0):
                # Ammo respawns before arrival
                s_ammoWait = 0

            # gamesteps to get from Ammo to Task
            s_ammoToTask = self.getNumberOfSteps(ammo, taskloc)[0]
            
            # total steps to get at task via ammo
            s_total = s_posToAmmo + s_ammoWait + s_ammoToTask
            
            # check if current path is smallest
            if s_total < totaldist:
                totaldist = s_total
                ammodist = s_posToAmmo + s_ammoWait
                ammoloc = ammo
        
        #print "AGENT: " + str(id) + " Ammo: " + str(ammoloc) + "-" + str(ammodist) + "-" + str(totaldist)
        return (totaldist, ammodist, ammoloc)
                
    def updateAmmoData(self, pos, ammopacks):
        # calculated respawn time
        # - if an ammo pos is observable and there is ammo:   
        #                   --> respawn = True
        # - if an ammo pos is observable and there is no ammo (while it previous was!):
        #                   --> respawn = time.current + time.respawn
        # - if an ammo pos is observable and there is no ammo (while it previous wasn't there!):
        #                   --> respawn = respawn (nothing is done)
        # - if an ammo pos is not observable and time.respawn == time.current:
        #                   --> respawn = True
        # - if an ammo pos is not observable and time.respawn != time.current:
        #                   --> respawn = respawn (nothing is done)
        
        # update each ammopack
        obs_dist = self.settings.max_see
        for ammo in self.ammodata:
            dx = ammo[0] - pos[0]
            dy = ammo[1] - pos[1]
            
            # is ammo observable?
            if (-obs_dist < dx < obs_dist) and (-obs_dist < dy < obs_dist):
                # --> position is observable!
                # is there ammo?
                if ammo in ammopacks:
                    #--> there is ammo
                    self.ammodata[ammo][0] = True
                else:
                    #--> there is no ammo            
                    # Was it observed in previous step?
                    if self.ammodata[ammo][0] == True:
                        #--> yes there was, so it is taken!
                        self.ammodata[ammo][0] = self.timestep + self.settings.ammo_rate 
            else:
                # --> position is not observable!
                # does is respawn?
                if self.ammodata[ammo][0] == self.timestep:
                    #respawn!
                    self.ammodata[ammo][0] = True
                
        # check if there are observed ammo locations not yet in ammodata
        if (len(self.ammodata) != 6):
            for ammo in ammopacks:
                if ammo not in self.ammodata:                
                    # add observed spot & mirrored spot to ammodata
                    self.ammodata[ammo] = [True, None]
                    self.ammodata[(self.fieldsize[0] - ammo[0], ammo[1])] = [True, None]

                    
    def determineAction(self, obs, goal, shoot):
        # Compute path, angle and drive
        path = find_path(obs.loc, goal, self.mesh, self.grid, self.settings.tilesize)
        if path:
            dx = path[0][0]- obs.loc[0]
            dy = path[0][1]- obs.loc[1]
            turn = angle_fix(math.atan2(dy, dx) - obs.angle)
            speed = (dx**2 + dy**2)**0.5
            if turn > self.settings.max_turn or turn < -self.settings.max_turn:
                shoot = False
                speed = 0

                #check if obstructed
            obstructed=False
            for foe4 in obs.foes:
                if line_intersects_circ((obs.loc[1], obs.loc[1]), (dx, dy), (foe4[0], foe4[1]), 6):
                   obstructed=True
                   break
            for friend4 in obs.friends:
                if line_intersects_circ((obs.loc[1], obs.loc[1]), (dx, dy), (friend4[0], friend4[1]), 6):
                   obstructed=True
                   break
            if obstructed:
                turn=random.uniform(-math.pi/6, math.pi/6)        
        else:
            turn = 0
            speed = 0
        
        
        return (turn, speed, shoot)
        



class Agent(object):
    
    NAME = "DikkeBillen_jwt"
    
    def __init__(self, id, team, blob=None, settings=None, field_rects=None, field_grid=None, nav_mesh=None):
        """ Each agent is initialized at the beginning of each game.
            The first agent (id==0) can use this to set up global variables.
            Note that the properties pertaining to the game field might not be
            given for each game.
        """
        self.id = id
        self.team = team
        self.mesh = nav_mesh
        self.grid = field_grid
        self.settings = settings
        self.goal = None
        
        # Recommended way to share variables between agents.
        if id == 0:
            # set parameters
            self.__class__.global_ammo_loc = []
            self.__class__.ammodata = {}
            self.__class__.global_foes_loc = []
            self.__class__.teamdata = {0:{}, 1:{}, 2:{}, 3:{}, 4:{}, 5:{}}
            self.__class__.timesteps = 0
            # field size in pixels
            self.__class__.FIELDSIZE = (settings.tilesize*len(self.grid[0]), settings.tilesize*len(self.grid))
            
            # init different levels
            self.__class__.LowLevel = LowLevel(nav_mesh, field_grid, settings, self.__class__.FIELDSIZE, self.__class__.teamdata)
            self.__class__.LocalLevel = LocalLevel(self.__class__.FIELDSIZE, settings, team, self.__class__.LowLevel, field_grid, blob, self.__class__.teamdata)
            self.__class__.GlobalLevel = GlobalLevel(self.__class__.LowLevel, self.__class__.teamdata)
          

    def observe(self, observation):
        """ Each agent is passed an observation using this function,
            before being asked for an action. You can store either
            the observation object or its properties to use them
            to determine your action. Note that the observation object
            is modified in place.
        """
        self.observation = observation
        
        #next timestep!
        if (self.id == 0):
            self.__class__.LowLevel.timestep += 1
            
        # sort control point list:
        # Home-base CPS first
        # Mid-field CPS second
        # Foe-base CPS third
        self.observation.cps.sort()
        if self.team == 1:
            self.observation.cps.reverse()

        # is agent selected in interface?
        self.selected = observation.selected
        
        # Calculate the position of the Ammo-Locations
        ammopacks = filter(lambda x: x[2] == "Ammo", observation.objects)
        for pack in ammopacks:
            if pack[0:2] not in self.__class__.global_ammo_loc:
                self.__class__.global_ammo_loc.append(pack[0:2])
                # calculate position of mirrored version
                self.__class__.global_ammo_loc.append((self.__class__.FIELDSIZE[0] - pack[0], pack[1]))                
        
        
        # New ammo-datastucture! (with lock/unlock  and respawn mechanism
        ammopacks = filter(lambda x: x[2] == "Ammo", observation.objects)
        ammopacks2 = []
        for i in ammopacks:
            ammopacks2.append(i[0:2])
        ammopacks = ammopacks2
        self.__class__.LowLevel.updateAmmoData(observation.loc, ammopacks)
        
        #if self.id == 5:
        #    print str(self.__class__.LowLevel.ammodata)
        
        
        # setup team data
        self.__class__.teamdata[self.id]['pos'] = self.observation.loc
        self.__class__.teamdata[self.id]['ammo'] = self.observation.ammo
        # the distance to the CPS
        self.__class__.teamdata[self.id][0] = self.__class__.LowLevel.getNumberOfSteps(self.observation.loc, self.observation.cps[0][0:2])[0]
        self.__class__.teamdata[self.id][1] = self.__class__.LowLevel.getNumberOfSteps(self.observation.loc, self.observation.cps[1][0:2])[0]
        self.__class__.teamdata[self.id][2] = self.__class__.LowLevel.getNumberOfSteps(self.observation.loc, self.observation.cps[2][0:2])[0]
        
        # if all agents have updated their position et cetera, divide tasks among them
        # (this only has to be called once, therefore agent5 will do this)
        if (self.id == 5):
            #print "call"
            self.__class__.GlobalLevel.divideTasks()
        
                
    def action(self):
        """ This function is called every step and should
            return a tuple in the form: (turn, speed, shoot)
        """

        obs = self.observation
        #determine task of agent
        agent_task = self.__class__.GlobalLevel.determineTask(self.id)
        #print "TASK: " + str(self.id) + " - " + str(agent_task)
        
        #determin goal, based on task
        [goal,shoot] = self.__class__.LocalLevel.determineGoal(obs, agent_task, self.__class__.global_ammo_loc, self.id)
        #print "GOAL: " + str(self.id) + " - " + str(goal)
        self.goal = goal
        
        # determin action of agent, based on goal
        action = self.__class__.LowLevel.determineAction(obs, goal, shoot)
        
        #return length of q_table (measure for number of states visited)
        #if (self.id == 0) and ((obs.step+1)%10==0):
        #    self.__class__.LocalLevel.printQtableStats()

        return action
        
    def debug(self, surface):
        """ Allows the agents to draw on the game UI,
            Refer to the pygame reference to see how you can
            draw on a pygame.surface. The given surface is
            not cleared automatically. Additionally, this
            function will only be called when the renderer is
            active, and it will only be called for the active team.
        """
        import pygame
        # First agent clears the screen
        if self.id == 0:
            surface.fill((0,0,0,0))
        # Selected agents draw their info
        if self.selected:
            if self.goal is not None:
                # goal of agent
                pygame.draw.line(surface,(0,0,0), self.observation.loc, self.goal)
                '''
                # draw shoot-range circle
                pygame.draw.circle(surface, (0,0,0), self.observation.loc, self.settings.max_range, 1)
                dx_r = int(math.sin(self.observation.angle + self.settings.max_turn) * self.settings.max_range)
                dy_r = int(math.cos(self.observation.angle + self.settings.max_turn) * self.settings.max_range)
                dx_l = int(math.sin(self.observation.angle - self.settings.max_turn) * self.settings.max_range)
                dy_l = int(math.cos(self.observation.angle - self.settings.max_turn) * self.settings.max_range)
                endpoint_r = (self.observation.loc[0] + dy_r, self.observation.loc[1] + dx_r)
                endpoint_l = (self.observation.loc[0] + dy_l, self.observation.loc[1] + dx_l)
                pygame.draw.line(surface, (0,0,0), self.observation.loc, endpoint_r)
                pygame.draw.line(surface, (0,0,0), self.observation.loc, endpoint_l)
                '''
         
        
    def finalize(self, interrupted=False):
        """ This function is called after the game ends, 
            either due to time/score limits, or due to an
            interrupt (CTRL+C) by the user. Use it to
            store any learned variables and write logs/reports.
        """
        pass

        
'''
ADDITIONAL FUNCTIONS

    def hideLocation(self, obs): #this function finds a hide spot reachable within one round it will return a goal location or two (depends on turning) If i would also prune these there are to few possibilities
        hidespots=[]
        for i in xrange(1,6):
            hidespots.append([])
            for j in xrange(1,6):
                if obs.walls[i][j]==1:#your inside a wall
                    hidespots[-1].append(0)
                else:
                    hidespots[-1].append(obs.walls[i][j+1]+obs.walls[i][j-1]+obs.walls[i-1][j-1]+obs.walls[i-1][j]+obs.walls[i-1][j+1]+obs.walls[i+1][j-1]+obs.walls[i+1][j]+obs.walls[i+1][j+1])
        #for i in obs.walls: 
        #    print i
        #for i in hidespots:
        #    print i
        #print "holla"
        #determine best location
        bestX=0
        bestY=0
        for x in xrange(5):
            for y in xrange(5):
                if hidespots[x][y]>hidespots[bestX][bestY]:
                    bestX=x
                    bestY=y


        hidegoal=[obs.loc[0]+16*(bestX-3),obs.loc[1]-16*(bestY-3)]
        
        #print hidegoal
        return  hidegoal

    def guardHeading(self,obs): #faceclosest controlpoint
        #find closest
        distance=99999
        closestcp=[0,0]
        for cp in obs.cps:
            if math.sqrt(math.pow(obs.loc[0]-cp[0],2)+math.pow(obs.loc[1]-cp[1],2))<distance:
                closestcp=[cp[0],cp[1]]

        angle=obs.angle-(math.atan2(obs.loc[0]-cp[0],obs.loc[1]-cp[1])+math.pi)
        if angle>math.pi/6:
            angle=math.pi/6

        if angle<-math.pi/6:
            angle=-math.pi/6

        return angle
'''
