import random
import math
import pickle

def createQtable():
    module = LocalLevel(None, None, 0, None, None)
    qtable = module.initQtable()
    
    FILE = open('Qtable_agent3_dict.txt','w')
    pickle.dump(qtable,FILE)

    FILE.close()

#tactics
class GlobalLevel():
    #default tasks of agents
    task = [0,0,1,1,2,2]
    
    ## There are 3 tasks:
    # 0 = guard home-base control point
    # 1 = guard mid-field control point
    # 2 = guard foe-base control point
    
    # pointer to LowLevelModule.
    LowLevelModule = None
    
    def __init__(self, lowlevelmodule):
        self.LowLevelModule = lowlevelmodule
    
    ## TODO: passing tasks
    ## ---> determining which agent does what
    def determineTask(self, id):
        return self.task[id]

        
#agent survival
class LocalLevel():
    LEARN = True
    # size of play field (in pixels)
    fieldsize = [0,0]
    # settings, set by domination-game
    settings = None
    # team color
    team = None
    # pointer to LowLevelModule.
    LowLevelModule = None
    
    #Q-learning settings
    Qtable = []
    old_state = None
    Gamma = 0.9
    Alpha = 0.1
    Epsilon = 0.1
    Lamdba = 0.8
    
    # states features:
    # AmmoAmount    = 3 values [0:0, 1:1-2, 2:3+]
    # AmmoDistance  = 4 values [0:0, 1:1, 2:2, 3:3+] 
    # TaskDistance  = 4 values [0:0, 1:1, 2:2, 3:3+]
    # FoeCanShootMe = 6 values
    # ICanShootFoe  = 6 values


    ## TODO:
    # - implement:         sameTaskFriends  [7]
    # - implement:         diffTaskFriends  [7]
    
    #Number of actions
    num_of_actions = 4   
    aPUT = 0
    aTSK = 1
    aAMMO = 2
    aFOES = 3
    
    # init LocalLevel()
    # -> Load Qtable
    def __init__(self, fieldsize, settings, team, lowlevelmodule, field_grid, blob):
        self.fieldsize = fieldsize
        self.settings = settings
        self.team = team
        self.LowLevelModule = lowlevelmodule
        self.grid = field_grid
        self.blobfile = blob
        # load-Qtable
        self.Qtable = pickle.load(blob)

## Q-table functions
    def initQtable(self):
        Qtable = {}
        return Qtable
        
        
    def saveQtable(self):
        pickle.dump(self.Qtable, self.blobfile)

    def printQtableStats(self):
        print len(self.Qtable)
    
    def getValue(self, s, a):
        #s = state
        #a = action
        stateaction = (s[0],s[1],s[2],s[3],s[4], a)
        value = 0
        if stateaction in self.Qtable:
            value = self.Qtable[stateaction]
        return value

    def setValue(self, s, a, v, r):
        #s = state[], a=action, v=value, r=reward
        stateaction = (s[0],s[1],s[2],s[3],s[4], a)

        if stateaction in self.Qtable:
            old_value = self.getValue(s,a)
            new_value = old_value + self.Alpha*(r + self.Gamma*v - old_value)
            self.Qtable[stateaction] = new_value
        else:
            self.Qtable[stateaction] = self.Alpha*(r + self.Gamma*v)
## Q-table functions /
        
    ## TODO:
    # - implement:         sameTaskFriends  [7]
    # - implement:         diffTaskFriends  [7]
    def getState(self, obs, tsk, ammo):
        # o = observationlist
        # t = task of agent
        # a = ammo locations

    #Determine AmmoAmount = 3 states
        if (obs.ammo == 0):
            AmmoState = 0
        elif (1 <= obs.ammo <= 2):
            AmmoState = 1
        else: # AmmoAmount >= 3
            AmmoState = 2
            
    #Determine AmmoDistance = 4 states
        # get closest ammo-loc
        AmmoDistanceState = None
        dist = 10000
        for loc in ammo:
            steps = self.LowLevelModule.getNumberOfSteps(obs.loc, loc)
            if steps < dist:
                dist = steps
                AmmoDistanceState = loc
                
        # if there is no ammo location: maximum distance
        if AmmoDistanceState == None:
            AmmoDistanceState = 3
        
        # Max distance is 3
        if AmmoDistanceState > 3:
            AmmoDistanceState = 3
    
    #Determine TaskDistance = 4 states  
        # get location of task
        t_pos = None
        if (0 <= tsk <= 2):
            t_pos = obs.cps[tsk][0:2]
        else: 
            t_pos = [0,0]
            
        # calculate number of steps between current and task posistion
        TaskState = self.LowLevelModule.getNumberOfSteps(obs.loc, t_pos)
        if TaskState > 3:
            TaskState = 3
        
    #Determine FoeCanShootMe = 6 states
    #Determine ICanShootFoe = 6 states
        FoeCanShootMeState = 0
        ICanShootFoeState = 0
   
        # get foe-observation
        foeData = self.LowLevelModule.foeObservation(obs)
        observedFoes = foeData[0][0] + foeData[0][1]
        
        sMe = foeData[0][0]
        if (sMe > 2):
            sMe = 2
        
        sMeN = foeData[0][1]
        if (sMeN > 2):
            sMeN = 2        
            
        sI = foeData[0][2]
        if (sI > 2):
            sI = 2
        
        sIN = foeData[0][3]
        if (sIN > 2):
            sIN = 2       
            
        if observedFoes >= 2:
            FoeCanShootMeState = 3 + sMeN
            ICanShootFoeState = 3 +  sIN
            
            if (sMeN == 1 and sMe == 0):
                FoeCanShootMeState += 1
            
            if (sIN == 1 and sI == 0):
                ICanShootFoeState += 1
                
        elif observedFoes == 1:
            FoeCanShootMeState = 1 + sMeN
            ICanShootFoeState = 1 +  sIN
            
        else: #obsFoes = 0
            FoeCanShootMeState = 0
            ICanShootFoeState = 0
        
        return (AmmoState, AmmoDistanceState, TaskState, FoeCanShootMeState, ICanShootFoeState)

    def takeAction(self, a, obs, tsk, ammo):
        # a    = action taken by agent 
        # obs  = observation[] agent observations, passed by game
        # tsk  = task of agent, assigned by GlobalLevel()
        # ammo = global known ammo locations
        
        goal = None
        shoot = False
        
        if a == self.aPUT: # stay put
            goal = obs.loc
            
        elif a == self.aTSK: #go to task
            goal = obs.cps[tsk][0:2]
            
        elif a == self.aAMMO: # Move to closest Ammo-Location
            dist = 10000
            for loc in ammo:
                steps = self.LowLevelModule.getNumberOfSteps(obs.loc, loc)
                if steps < dist:
                    dist = steps
                    goal = loc
            # if there is no ammo location, do nothing
            if goal == None:
                goal = obs.loc            

        elif a == self.aFOES: # attack foe
            # choose which foe to attack
            foeData = self.LowLevelModule.foeObservation(obs) #dit is niet handig deze functie 2 keer aangeroepen.
            targetfound=False
            for i in xrange(0,len(foeData[1])):
                if foeData[1][i][1]==1:
                    foe3 = obs.foes[i][0:2] 
                    targetfound=True  
                    break
            if targetfound:
                goal = foe3
            
                # if you have ammo misschien aanpassen want je kan beter hier nooit komen je geen targer hebt

            else:#dit moet ook beter 
                goal=random.choice(obs.foes)
                goal=goal[0:2]
                
            if obs.ammo > 0:
                shoot = True
            else:
                shoot =False
        return (goal,shoot)

        
    def getReward(self, obs, tsk):
        reward = 0
        myTeam = self.team
        otherTeam = 1 - self.team
        
        # check CPS & task
        # if you have to guard a CPS and it is taken: negative reward
        # if you have taken the CPS which is assigned to you: positive reward
        if (tsk == 0 or tsk == 1 or tsk == 2):
            new_cps_color = obs.cps[tsk][2]
            old_cps_color = self.old_obs.cps[tsk][2]
            if (new_cps_color is not old_cps_color):
                if new_cps_color is myTeam:
                    reward += 5
                    if old_cps_color is otherTeam:
                        reward += 5
                elif new_cps_color is otherTeam:
                    reward += -5
                    if old_cps_color is myTeam:
                        reward += -5
                else:
                    if old_cps_color is myTeam:
                        reward += -5
                    else:
                        reward += 5
            else:
                if new_cps_color is myTeam:
                    pass
                    #reward += 1

        #If ammo is obtained give a reward
        old_ammo_count = self.old_obs.ammo
        new_ammo_count = obs.ammo
        if old_ammo_count < new_ammo_count:
            reward += 1
        
        #reward shooting an enemy
        if self.old_shoot:
            reward += 2

        # negative reward if agent got killed in previous state
        if (obs.respawn_in == (self.settings.spawn_time - 1)):
            reward += -5
            
        return reward
    
    def determineGoal(self, obs, tsk, ammo):
        # to determine goal(move-direction) of agent:
        # - determine state
        # - determine best action for state
        # - map best action to goal position
    
        # get state
        state = self.getState(obs, tsk, ammo)
        
        # creat set of possible action in current state
        actionset = range(self.num_of_actions)
        
        # filter action set
        # if there are no foes, do not use foe-attack
        if (obs.foes == []):
            actionset.remove(self.aFOES)
        # do not stay still: increases learning & score!
        # ---> In what situation do you want to do nothing?
        actionset.remove(self.aPUT)
            
        # determine best action
        best_val = -1000000
        best_actions = []
        for a in actionset:
            val = self.getValue(state,a)
            if val >= best_val:
                best_val = val
                best_actions.append(a)
        
        # select an action form the list
        best_action = random.choice(best_actions)
        
        #Q-learning
        if self.LEARN:
            #choose random action --> exploration
            if random.random() < self.Epsilon:
                best_action = random.choice(actionset)
            
            #Q update
            if self.old_state:
                reward = self.getReward(obs, tsk)
                self.setValue(self.old_state, self.old_action, best_val, reward) 

            self.old_state = state
            self.old_action = best_action
            self.old_obs = obs
            
        goal,shoot = self.takeAction(best_action, obs, tsk, ammo)
        self.old_shoot = shoot
        # return goal(location to move to)
        return goal,shoot
    
    
# hardcoded level
# turn/shoot/ all kind of low-level math functions
class LowLevel():
    mesh = None
    grid = None
    settings = None
    stepDictionary = {}
    
    def __init__(self, nav_mesh, field_grid, settings, stepDictionary):
        self.mesh = nav_mesh
        self.grid = field_grid
        self.settings = settings
        self.stepDictionary = stepDictionary
        
    def getNumberOfSteps(self, start_pos, goal_pos):
        #Calculates the number of steps (without steering) which it takes to go from
        # a location (start_pos) to another (goal)
        # start_pos = start position in pixels
        # goal_pos  = goal position in pixels

        #first calculate tile of start & goal pos:
        startTileX = math.ceil(start_pos[0] / self.settings.tilesize)
        startTileY = math.ceil(start_pos[1] / self.settings.tilesize)
        goalTileX = math.ceil(goal_pos[0] / self.settings.tilesize)
        goalTileY = math.ceil(goal_pos[1] / self.settings.tilesize)

        # create tuple: (identiefies path between start & goal)
        tuple = (startTileX, startTileY, goalTileX, goalTileY)
        
        # if there is not a path: calculate!
        if tuple not in self.stepDictionary:
            steps = 0
            # calculate path
            path = find_path(start_pos, goal_pos, self.mesh, self.grid, self.settings.tilesize)
            if path:
                dx = path[0][0] - start_pos[0]
                dy = path[0][1] - start_pos[1]            
                dst = math.sqrt(dx*dx + dy*dy)
                for i in range(1,len(path)):
                    dx = path[i][0] - path[i-1][0]
                    dy = path[i][1] - path[i-1][1]
                    dst += math.sqrt(dx*dx + dy*dy)
                steps = int(dst / self.settings.max_speed)
            self.stepDictionary[tuple] = steps 

        # return stored steps
        
        return self.stepDictionary[tuple]
    
    def foeObservation(self, obs):
        #based on obs.loc, obs.angle & obs.foe: determine which foe can shoot me, and which i can shoot 
        foeData = []
        sMe=0
        sI=0
        observedFoeData=[]
        
        for foe in obs.foes:
            #check if agents is near enough
            if math.sqrt(math.pow(obs.loc[0]-foe[0],2)-math.pow(obs.loc[0]-foe[0],2))<40:
                unobstructed=True
                #check if foe is unobstructed by friends, also prevents shooting friends later
                for friend2 in obs.friends:
                    if line_intersects_circ((obs.loc[1], obs.loc[1]), (foe[0], foe[1]), (friend2[0], friend2[1]), 6):
                        unobstructed=False
                        break
                #check if foe is unobstructed by other foes, this matters because he cant shoot me
                if unobstructed:
                    for foe2 in obs.foes:
                        if foe!= foe2:
                            if line_intersects_circ((obs.loc[1], obs.loc[1]), (foe[0], foe[1]), (foe2[0], foe2[1]), 6):
                                unobstructed=False
                                break   

                if unobstructed:
                    #determine angle between agents
                    angle=math.atan2(obs.loc[1]-foe[1],obs.loc[0]-foe[0])
                    He=0
                    Me=0
                    #check if he can shoot me
                    if math.fabs(angle+angle_fix(foe[2]))<(math.pi/6):
                        He=1
                        sI+=1
                    #check if i can shoot him
                    if math.fabs(angle-angle_fix(obs.angle))<(math.pi/6):
                        Me=1
                        sMe+=1
                    observedFoeData.append((He,Me))
                else:
                    observedFoeData.append((0,0))
            else:
                observedFoeData.append((0,0))
        #return: foeData = [(#sMe, #/sMe, #sI, #/sI), [observedFoeData]]
        #
        # sMe : Number of observed foes who can shoot me
        # /sMe: Number of observed foes who can not shoot me
        # sI  : Number of observed foes i can shoot
        # /sI : Number of observed foes i can not shoot
        # observedFoeData = list: per observed foe tuple: (HeCanShootMe, ICanShootHim)
        # 1 = true / 0 = false
        foeData=[(sMe,len(obs.foes)-sMe,sI,len(obs.foes)-sI),observedFoeData]

        return foeData
    
    ## TODO --> improved pathplanning?
    def pathPlanner():
        path = []
        return path
        
    def determineAction(self, obs, goal, shoot):
        # Compute path, angle and drive
        path = find_path(obs.loc, goal, self.mesh, self.grid, self.settings.tilesize)
        if path:
            dx = path[0][0]- obs.loc[0]
            dy = path[0][1]- obs.loc[1]
            turn = angle_fix(math.atan2(dy, dx) - obs.angle)
            speed = (dx**2 + dy**2)**0.5
            if turn > self.settings.max_turn or turn < -self.settings.max_turn:
                shoot = False
                speed = 0
        else:
            turn = 0
            speed = 0
        
        #check if obstructed
        obstructed=False
        for foe4 in obs.foes:
            if line_intersects_circ((obs.loc[1], obs.loc[1]), (dx, dy), (foe4[0], foe4[1]), 6):
               obstructed=True
               break
        for friend4 in obs.friends:
            if line_intersects_circ((obs.loc[1], obs.loc[1]), (dx, dy), (friend4[0], friend4[1]), 6):
               obstructed=True
               break
        if obstructed:
            turn=random.uniform(-math.pi/6, math.pi/6)        

        return (turn, speed, shoot)
        



class Agent(object):
    
    NAME = "Agent_007"
    
    def __init__(self, id, team, settings=None, field_rects=None, field_grid=None, nav_mesh=None):
        """ Each agent is initialized at the beginning of each game.
            The first agent (id==0) can use this to set up global variables.
            Note that the properties pertaining to the game field might not be
            given for each game.
        """
        self.id = id
        self.team = team
        self.mesh = nav_mesh
        self.grid = field_grid
        self.settings = settings
        self.goal = None
        self.old_state = []
        self.old_action = []
        self.old_obs = []
        self.old_shoot = False
        
        
        # Recommended way to share variables between agents.
        if id == 0:
            # set parameters
            self.__class__.global_ammo_loc = []
            self.__class__.global_foes_loc = []
            self.__class__.stepDictionary  = {}
            # field size in pixels
            self.__class__.FIELDSIZE = (settings.tilesize*len(self.grid[0]), settings.tilesize*len(self.grid))
            
            # init different levels
            self.__class__.LowLevel = LowLevel(nav_mesh, field_grid, settings, self.__class__.stepDictionary)
            self.__class__.LocalLevel = LocalLevel(self.__class__.FIELDSIZE, settings, team, self.__class__.LowLevel, field_grid, blob)
            self.__class__.GlobalLevel = GlobalLevel(self.__class__.LowLevel)
          

    def observe(self, observation):
        """ Each agent is passed an observation using this function,
            before being asked for an action. You can store either
            the observation object or its properties to use them
            to determine your action. Note that the observation object
            is modified in place.
        """
        self.observation = observation
        
        # sort control point list:
        # Home-base CPS first
        # Mid-field CPS second
        # Foe-base CPS third
        self.observation.cps.sort()
        if self.team == 1:
            self.observation.cps.reverse()

        self.selected = observation.selected
        
        # Calculate the position of the Ammo-Locations
        ammopacks = filter(lambda x: x[2] == "Ammo", observation.objects)
        for pack in ammopacks:
            if pack[0:2] not in self.__class__.global_ammo_loc:
                self.__class__.global_ammo_loc.append(pack[0:2])
                # calculate position of mirrored version
                self.__class__.global_ammo_loc.append((self.__class__.FIELDSIZE[0] - pack[0], pack[1]))                

        # Observed Foes
        # list of current observed foes and their positions
        if self.id == 0:
            self.__class__.global_foes_loc = []
        for foe in observation.foes:
            if foe not in self.__class__.global_foes_loc:
                self.__class__.global_foes_loc.append(foe)

                
    def action(self):
        """ This function is called every step and should
            return a tuple in the form: (turn, speed, shoot)
        """

        obs = self.observation
        
        #determine task of agent
        agent_task = self.__class__.GlobalLevel.determineTask(self.id)
        #print "TASK: " + str(self.id) + " - " + str(agent_task)
        
        #determin goal, based on task
        [goal,shoot] = self.__class__.LocalLevel.determineGoal(obs, agent_task, self.__class__.global_ammo_loc)
        #print "GOAL: " + str(self.id) + " - " + str(goal)
        self.goal = goal
        
        # determin action of agent, based on goal
        action = self.__class__.LowLevel.determineAction(obs, goal, shoot)
        
        #return length of q_table (measure for number of states visited)
        #if (self.id == 0) and ((obs.step+1)%10==0):
        #    self.__class__.LocalLevel.printQtableStats()

        return action
        
    def debug(self, surface):
        """ Allows the agents to draw on the game UI,
            Refer to the pygame reference to see how you can
            draw on a pygame.surface. The given surface is
            not cleared automatically. Additionally, this
            function will only be called when the renderer is
            active, and it will only be called for the active team.
        """
        import pygame
        # First agent clears the screen
        if self.id == 0:
            surface.fill((0,0,0,0))
        # Selected agents draw their info
        if self.selected:
            if self.goal is not None:
                pygame.draw.line(surface,(0,0,0),self.observation.loc, self.goal)
        
    def finalize(self, interrupted=False):
        """ This function is called after the game ends, 
            either due to time/score limits, or due to an
            interrupt (CTRL+C) by the user. Use it to
            store any learned variables and write logs/reports.
        """
        self.__class__.LocalLevel.saveQtable()
        
