'''
Created on 21 sep 2010

@author: Xakt
'''

import math
import pickle
import random
import traceback, sys
import heapq
import copy

def compare(a,b):
        if (a['cost']+a['hcost']) < (b['cost']+b['hcost']): 
            return -1
        elif (a['cost']+a['hcost']) == (b['cost']+b['hcost']): 
            return 0
        else: 
            return 1

class AgentBrain():
    
    def __init__(self):
        self.previousState = ''
        self.previousAction = ''
        self.DEFAULTVALUES = {'goToControlpoint':1, 'shootEnemy':0, 'getAmmo':0, 'flee':0}
        self.extraReward = 0
        self.stateActionDict = {}
        
    def action(self, observation):
        state = self.toState(observation)
#        stateActionDict = self.openDict()
        stateActionDict = self.stateActionDict
        if not state in stateActionDict:
            stateActionDict[state] = self.DEFAULTVALUES
        
        stateActionDict = self.update(stateActionDict, observation, state)
#        self.saveDict(stateActionDict)
        action = self.selectAction(stateActionDict[state], observation)
        self.previousState = state
        return action
    
    def flee(self,observation):
        agents = observation['agents']
        sumAgentLoc = (0,0)
        location = observation['location']
        for agent in agents:
            if not agent['team'] == observation['team']:
#                print 'AKJOF:FLKHKJLH KJHFKJSHD',sumAgentLoc
                relLoc = (agent['location'][0]-location[0],agent['location'][1]-location[1])
                sumAgentLoc = (sumAgentLoc[0]+relLoc[0],sumAgentLoc[1]+relLoc[1])
        sumAgentLoc = (-sumAgentLoc[0]+location[0],-sumAgentLoc[1]+location[1])
        # not completely correct, but: if no enemy agent is found, no need to flee!
        if sumAgentLoc == (0,0):
            self.extraReward = -100
            return {
                    'turn': 0,
                    'speed' :0,
                    'shoot': False
                    }
#        print 'agents',agents
#        print 'team',observation['team']
#        print 'loc',location
#        print 'dir',observation['direction']
#        print 'sumloc',sumAgentLoc
        
        action = {
                   'turn': math.radians(self.angleWithAgent(sumAgentLoc, observation['location'], observation['direction'])),
                   'speed': 50,
                   'shoot': False
                  }
        
#        print 'action',action
        
        return action
    
    def selectAction(self, actionValues, observation):
        EXPLORATIONFACTOR = 0
        toDo = 'goToControlpoint'
        if random.random() < EXPLORATIONFACTOR:
            # explore
            dictSize = len(actionValues)
            actions = actionValues.keys()
            toDo = actions[int(math.floor(dictSize*random.random()))]
        else:
            # greedy
            toDo = 'goToControloint'
            maxValue = -9999999
            for action, value in actionValues.iteritems():
                if value > maxValue:
                    maxValue = value
                    toDo = 'goToControlpoint'
        
        self.previousAction = toDo
        if toDo == 'goToControlpoint':
            action = self.goToControlpoint(observation)
        elif toDo == 'shootEnemy':
            action = self.fireAtEnemy(observation)
        elif toDo == 'getAmmo':
            action = self.getAmmo(observation)
        elif toDo == 'flee':
            action = self.flee(observation)
        
        return action
    
    def getAmmo(self,observation):
        ammopacks = observation['ammopacks']
        if ammopacks == []:
            self.extraReward = -100
            return {
                    'turn': 0,
                    'speed': 0,
                    'shoot': False                  
                    }
        closest_distance = 200
        toPickup = []
        for ammopack in ammopacks:
            distance = self.getDistance(observation['location'], ammopack['location'])
            if distance < closest_distance:
                closest_distance = distance
                toPickup = ammopack['location']
        angle = self.angleWithAgent(toPickup, observation['location'], observation['direction'])
        if angle >= -60 and angle <= 60:
            speed = closest_distance
        else:
            speed = 0
        action = {
                   'turn': math.radians(angle),
                   'speed': speed,
                   'shoot': False
                  }
        return action
    
    def getDistance(self, fromLoc, toLoc):
        distance = math.sqrt(math.pow((toLoc[0] - fromLoc[0]),2) + math.pow((toLoc[1] - fromLoc[1]),2))
        return distance
            
    def fireAtEnemy(self,observation):
        agents = observation['agents']
        enemiesInSight = []
        for agent in agents:
            if agent['team'] != observation['team']:
                angle = self.angleWithAgent(agent['location'], observation['location'], observation['direction'])
                if angle >= -60 and angle <= 60:
                    enemiesInSight.append(agent)
        if enemiesInSight == []:
            self.extraReward = -100
            return {
                    'turn': 0,
                    'speed' :0,
                    'shoot': False
                    }
        closest_distance = 200
        toAttack = []
        for enemy in enemiesInSight:
            distance = self.getDistance(observation['location'],enemy['location'])
            if distance < closest_distance:
                closest_distance = distance
                toAttack = enemy
        action = {
                   'turn': math.radians(self.angleWithAgent(toAttack['location'], observation['location'], observation['direction'])),
                   'speed': 0,
                   'shoot': True
                  }
        return action
            
    def goToControlpoint(self,observation):
        controlpoints = observation['controlpoints']
        uncontrolled = []
        for controlpoint in controlpoints:
            if not self.isControlled(controlpoint, observation['team']):
                uncontrolled.append(controlpoint)
        closest_distance = 9999999999
        closest = []        
        for controlpoint in uncontrolled:
            distance = self.getDistance(observation['location'], controlpoint['location'])
            if distance < closest_distance:
                closest_distance = distance
                closest = controlpoint
        angle = self.angleWithAgent(closest['location'], observation['location'], observation['direction'])
        obstacle = self.obstacleCheck(observation['location'], observation['walls'], closest['location']); 
        if not obstacle == []:
            print 'Going for: ', closest
            #print 'WALL IN THE WAY!'
            nodes = self.createNodes(obstacle, observation['location'], closest['location'])
            path = self.aStar(self.getParent(1, nodes), self.getParent(2, nodes), nodes)
            if path == None:
                print 'NO PATH FOUND'
                angle = 0
            else:
                (id, x, y, cost) = path[0]
                angle = self.angleWithAgent([x,y], observation['location'], observation['direction'])
                closest_distance = cost 
        if angle >= -60 and angle <= 60:
            speed = closest_distance
        else:
            speed = 0
        action = {
                   'turn': math.radians(angle),
                   'speed': speed,
                   'shoot': False
                  }
        return action                
    
    def reconstructPath(self, path, nodes):
        reconstruct = []
        while not path['id'] == 1:
            parent = self.getParent(path['parent'], nodes)
            tuple = (path['id'], path['x'], path['y'], path['cost'])
            reconstruct.append(tuple)
            path = parent
        reconstruct.reverse()
        return reconstruct
    
    def obstacleCheck(self, loc, walls, controlpoint):
        for wall in walls:
            x1 = float(wall['left'])
            y1 = float(wall['top'])
            x2 = float(wall['right'])
            y2 = float(wall['bottom'])
            if controlpoint[0] < loc[0]:
                u1 = float(controlpoint[0])
                v1 = float(controlpoint[1])
                u2 = float(loc[0])
                v2 = float(loc[1])
            else:
                u2 = float(controlpoint[0])
                v2 = float(controlpoint[1])
                u1 = float(loc[0])
                v1 = float(loc[1])
            if x1 - x2 == 0:
                x1 = x1+0.0001
            if u1 - u2 == 0:
                u1 = u1+0.0001
            b1 = (y1 - y2) / (x1 - x2)
            a1 = y1 - (b1*x1)
            if u1 - u2 == 0:
                # Vertical line
                y = a1 + (b1 * u1)
                if y < y1 or y > y2:
                    return []
                else:
                    return wall
            b2 = (v1 - v2) / (u1 - u2)
            a2 = v1 - (b2*u1)
            if b1-b2 == 0:
                return []
            xi = -1* ((a1 - a2) / (b1-b2))
            yi = a1 + (b1*xi) 
            if (x1-xi)*(xi-x2)>=0 and (u1-xi)*(xi-u2)>=0 and (y1-yi)*(yi-y2)>=0 and (v1-yi)*(yi-v2)>=0:
                return wall
        return []
            
    def createNodes(self, obstacle, start, goal):
        # Create nodes near all corners of the obstacle
        # and start and goal nodes.
        startNode = {'id': 1, 'x': start[0], 'y': start[1], 'parent': [], 'children': [], 'cost': 0, 'hcost': self.getDistance([start[0],start[1]], [goal[0],goal[1]])}
        goalNode = {'id': 2, 'x': goal[0], 'y': goal[1], 'parent': [], 'children': [], 'cost':0, 'hcost':0}
        nodes = self.makeWaypoints(obstacle, start, goal, startNode, goalNode)
        return nodes
        
    def makeWaypoints(self, wall, start, goal, sNode, gNode):
        node1 = {'id': 3, 'x': self.borderCheck(wall['left'],-15), 'y': self.borderCheck(wall['top'],-15), 'parent': [], 'children': []}
        node2 = {'id': 4, 'x': self.borderCheck(wall['left'],-15), 'y': self.borderCheck(wall['top'],+15), 'parent': [], 'children': []}
        node3 = {'id': 5, 'x': self.borderCheck(wall['left'],+15), 'y': self.borderCheck(wall['top'],-15), 'parent': [], 'children': []}
        node4 = {'id': 6, 'x': self.borderCheck(wall['left'],+15), 'y': self.borderCheck(wall['top'],+15), 'parent': [], 'children': []}
        node5 = {'id': 7, 'x': self.borderCheck(wall['right'],-15), 'y': self.borderCheck(wall['bottom'],-15), 'parent': [], 'children': []}
        node6 = {'id': 8, 'x': self.borderCheck(wall['right'],-15), 'y': self.borderCheck(wall['bottom'],+15), 'parent': [], 'children': []}
        node7 = {'id': 9, 'x': self.borderCheck(wall['right'],+15), 'y': self.borderCheck(wall['bottom'],-15), 'parent': [], 'children': []}
        node8 = {'id': 10, 'x': self.borderCheck(wall['right'],+15), 'y': self.borderCheck(wall['bottom'],+15), 'parent': [], 'children': []}
        nodes = [node1, node2, node3, node4, node5, node6, node7, node8, gNode, sNode]
        for node in nodes:
            if node['x'] == 0 or node ['y'] == 0:
                nodes.remove(node)
        nodes2 = self.setChildren(nodes, [gNode, sNode], wall)
        return nodes2
    
    def borderCheck(self, position, margin):
        if margin < 0:
            if (position - margin) < 0:
                return 0#position
            else:
                return position + margin
        else:
            if (position + margin) > 1000:
                return 0#position
            else:
                return position + margin
    
    def aStar(self, start, goal, nodes):
        pq = []
        heapq.heapify(pq)
        visited = []
        heapq.heappush(pq, start)
        
        while (len(pq) > 0):
            node = heapq.heappop(pq)
            if node not in visited:
                if node['x'] == goal['x'] and node['y'] == goal['y']:
                    return self.reconstructPath(node, nodes)
                else:
                    children = self.getChildren(node, nodes)
                    for child in children:
                        child['parent'] = node['id']
#                        parent = self.getParent(node['id'], nodes)
                        childCost = self.getDistance([child['x'], child['y']], [node['x'], node['y']])
                        child['cost'] = childCost
                        child['hcost'] = self.getDistance([child['x'], child['y']], [goal['x'], goal['y']])
                        heapq.heappush(pq, child)
                        visited.append(node)
                        pq.sort(compare)
        return None
    
    def getParent(self, id, nodes):
        for n in nodes:
            if id == n['id']:
                return n
        return None
        
    def getChildren(self, node, nodes):
        nb = node['children']
        children = []
        for number in nb:
            for n in nodes:
                if n['id'] == number:
                    children.append(n)
        return children
    
    def setChildren(self, list, goals, wall):
        list2 = copy.deepcopy(list)
        gNode = goals[0]
        sNode = goals[1]
        for node in list:
            if not node['id'] == 2:
                for node2 in list2:
                    if not node['id'] == node2['id'] and not node2['id'] == 1:
                        if self.obstacleCheck([node['x'], node['y']], [wall], [node2['x'], node2['y']]) == []:
                            tmpNode = self.getParent(node2['id'], list)
                            if node['id'] not in tmpNode['children']:
                                if math.fabs(wall['top'] - wall['bottom']) > math.fabs(wall['left'] - wall['right']):
                                    # Vertical wall
                                    if sNode['x'] < gNode['x']:
                                        if node['x'] <= node2['x']: 
                                            node['children'].append(node2['id'])
                                    else:
                                        if node['x'] >= node2['x']:
                                            node['children'].append(node2['id'])
                                else:
                                    # Horizontal wall
                                    if sNode['y'] < gNode['y']:
                                        if node['y'] <= node2['y']: 
                                            node['children'].append(node2['id'])
                                    else:
                                        if node['y'] >= node2['y']:
                                            node['children'].append(node2['id'])
        return list
                        
        
    def angleWithAgent(self, agentlocation, location, direction):
        direction = -direction
        rel_x = agentlocation[0]-location[0]
        rel_y = agentlocation[1]-location[1]
        rot_x = rel_x * math.cos(direction) - rel_y * math.sin(direction)
        rot_y = rel_x * math.sin(direction) + rel_y * math.cos(direction)
        angle = math.atan2(rot_y, rot_x)
        
#        print 'dir',math.degrees(direction)
#        print 'loc',location
#        print 'rel_loc',rel_x,rel_y
#        print 'rot_x',rot_x,rot_y
#        print 'angle',math.degrees(angle)
        
        return math.degrees(angle)
    
    def isEnemy(self, agent, team):
        if agent['team'] == team:
            return False
        return True
    
    def isControlled(self, controlpoint, team):
        if controlpoint['team'] == team:
            return True
        return False
    
    def saveDict(self,stateValuesDict):
        stateValuesFile = open('stateValues.pickle', 'wb')          
        pickle.dump(stateValuesDict,stateValuesFile,-1)
        # close file
        stateValuesFile.close()
    
    def openDict(self):
        stateActionFile = open('stateValues.pickle', 'ab+')
        try:
            # try to open existing dictionary                               
            stateActionDict = pickle.load(stateActionFile)
        except:    
             # if it fails make a new empty one
             print "couldnt unpickle dict"
             stateActionDict = {}
        stateActionFile.close()       
        return stateActionDict
    
    def update(self, stateActionDict, observation, state):
        if self.previousState != '':
            alpha = 0.1
            gamma = 0.9
        
            if self.previousState in stateActionDict:
                actionValues = stateActionDict[self.previousState];
            else:
                actionValues = self.DEFAULTVALUES
        
            prevValue = actionValues[self.previousAction]
            reward = self.getReward(observation)
            maxValue = self.getMaxValue(stateActionDict[state])
            actionValues[self.previousAction] = prevValue + alpha * (reward + gamma * maxValue - prevValue)
            stateActionDict[self.previousState] = actionValues
        return stateActionDict
#        stateValuesDict[previousState] = stateValuesDict[previousState] 
#            + self.alpha * (self.getReward(observation) + 
#                            self.gamma * MAX(stateValuesDict[state_string]) - 
#                            stateValuesDict[previousState])
    
    def getMaxValue(self, actionValues):
        toReturn = -999999
        for _action, value in actionValues.iteritems():
            if value > toReturn:
                toReturn = value
        return toReturn
    
    def getReward(self,observation):
        reward = 0      
        for controlpoint in observation['controlpoints']:
            if controlpoint['team'] == observation['team']:
                reward += 1
            if controlpoint['team'] != observation['team'] and controlpoint['team'] != 'Neutral':
                reward -= 1
        reward += self.extraReward
        self.extraReward = 0
        return reward
        
    def toState(self, observation):
        # slices are ordered as: [ hasEnemy , hasFriendly, hasAmmo ]
        frontSlice = [0,0,0]
        backSlice = [0,0,0]
        
        location = observation['location']
        direction = observation['direction']
        agents = observation['agents']
        ammopacks = observation['ammopacks']
        controlpoints = observation['controlpoints']

        # check agents
        if agents != []:
            for agent in agents:
                angle = self.angleWithAgent(agent['location'],location,direction)
                if angle <= 60 and angle >= -60:
                    if self.isEnemy(agent, observation['team']):
                        frontSlice[0] = 1
                    else:
                        frontSlice[1] = 1                    
                else:
                    if self.isEnemy(agent, observation['team']):
                        backSlice[0] = 1
                    else:
                        backSlice[1] = 1
        
        # check ammopacks
        if ammopacks != []:
            for ammopack in ammopacks:
                angle = self.angleWithAgent(ammopack['location'],location,direction)
                if angle <= 60 and angle >= -60:
                    frontSlice[2] = 1
                else:
                    backSlice[2] = 1
                    
        # check ammo
        hasAmmo = 0
        if(observation['ammo'] > 0):
            hasAmmo = 1
        
        # check controlpoints
        uncontrolled_controlpoints = 0
        for controlpoint in controlpoints:
            if not self.isControlled(controlpoint, observation['team']):
                uncontrolled_controlpoints = 1
        
        # construct string
        stateList = frontSlice
        stateList.extend(backSlice)
        stateList.append(hasAmmo)
        stateList.append(uncontrolled_controlpoints)
        state = ''.join(str(n) for n in stateList)
        return state