class Shared:
    """ shared object in which the agents can add information
    about the world and the stuff they observe """


    DEFAULT_VALUE_FOR_ACTIONS = 10

    # For q-learning purposes:
    LEARNING_RATE = 0.8
    DISCOUNT_FACTOR = 0.8


    def __init__(self, team, settings, state_dict=None):
        # we want to have our own random generator because its safer (we
        # never know what bastards our bot shall play) and because we want to
        # seed safely.
        self.random = random.Random("yes")

        # lists of the agents I recon we might want to use
        self.agents = []
        self.settings = settings

        print """settings %s: max_turn %d, max_range: %d""" % (
            settings, settings.max_turn, settings.max_range)


        #kill count
        self.kills = 0
        #death count
        self.deaths = 0
        # list of ammo locations
        self.ammo_locations = [(184, 168), (312, 104)]

        # list of control points
        self.cps = [(232, 56), (264, 216)]

        # built our actions
        possible_actions = []
        possible_actions.extend(self.cps)
        possible_actions.extend(self.ammo_locations)

        # FIXME: Instantiate self.actions to a standard first action.
        self.actions = tuple(possible_actions[0:3])

        # the set of possible actions for a state
        self.action_set = list(itertools.permutations(possible_actions, 3))

        # initialize the state instance from which we continue to create our
        # state with
        self.state = State(team, settings.max_turn)
        # Also initialize the previous state hash. This is initialized with an
        # empty string and updated after learning. 
        self.old_state_hash = ''

        # The dictionairy in which we save our state, actions and their values
        # Create one, if none is specified
        # Certain checks should probably be made...
        self.state_dict = collections.defaultdict(self.get_default_actions)
        # Fill the state dict with values read from the pickle:
        if(state_dict != None and isinstance(state_dict, dict)):
            for key in state_dict:
                self.state_dict[key] = state_dict[key]
        #print 'dict'
        #print self.state_dict


        # the value for epsilon (for epsilon greedy action selection ),
        # describing how many times another action than the optimal should be
        # taken. This should decrease over learning time, so that we can learn
        # first, and exploit later. TODO: For now it's just fixed to 0.4, which
        # means that theres a 0.6 chance that the optimal action from our
        # state-action values will be chosen.
        self.epsilon = 0.4

        # The immediate reward is calculated based on the previous score. Since
        # the score starts at 50, this initiates with 50
        self.previous_score = 50

    def get_default_actions(self):
        """ generate all possible actions and set those as keys in the dict
        set the value for such an action as DEFAULT_VALUE_FOR_ACTIONS """
        actions = {}
        for a in self.action_set:
            actions[a] = self.DEFAULT_VALUE_FOR_ACTIONS
        return actions

    def add_agent(self, agent):
        """ Agents should add themselves
        in the constructor so the id matches
        the index in the self.agents list"""
        self.agents.append(agent)

    def preprocess_observation(self, agent, observation):
        """ Turn the observation around to make the blue team look like the
        red team. 

        This is called instead of process_observation if we play with blue."""

        # reverse relevant observation attributes
        observation.score = list(reversed(observation.score))
        observation.cps = list(reversed(observation.cps))

        observation.angle += math.pi
        observation.angle %= (2*math.pi)

        observation.loc = self.state.convert_location(observation.loc[0],
            observation.loc[1])

        if (observation.previous_action[0] > -1 and
                observation.previous_action[1] > -1):
            observation.previous_action = self.state.convert_location(
                observation.previous_action[0], observation.previous_action[1])

        return self.process_observation(agent, observation)

    def process_observation(self, agent, observation):
        """ Processes the observation, sets specifics from the observation
        in the state.

        This is called from each observation from each agent. """
        #print observation.loc
        #print observation.angle
        #print observation.previous_action

        # the first observation is special because we parse more info
        if agent.id == 0:
            self.state.first_update(agent, observation)
        # 2nd and 3rd observations are standard updates
        elif agent.id == 1:
            self.state.update(agent, observation)
        else:
            self.state.update(agent, observation)
            # after the last update is done, we create the hash and stuff
            self.state_hash = self.state.get_hash()
            # Calculate the reward
            reward = self.calculate_reward(observation)
            # Use that for q-learning
            self.q_learn(reward)

    def request_goal(self, agent):
        """ Sets the actions if it is the first time they are requested for
        this time step. Then it returns the correct action. """
        # set actions if it is the first time they are requested for this step
        if agent.id == 0:
            action_value_pairs = self.state_dict[self.state_hash]
            # Epsilon greedy action selection. epsilon determines how many times
            # a non-optimal action is chosen. presumably we want to degrade this
            # value over time. 
            if self.random.random() < self.epsilon:
                self.actions = action_value_pairs.keys()[self.random.randint(
                    0, len(action_value_pairs.keys())-1)]
            else:
                self.actions = max(action_value_pairs, key=action_value_pairs.get)
            # revert the list if we are playing with blue (observations
            # are also reverted, see preprocess_observation
            if agent.team == TEAM_BLUE:
                self.actions = tuple(reversed(self.actions))
                # mirror the actions locations, this is not done super
                # efficiently, but meh.
                new_actions = ()
                for action in self.actions:
                    new_actions = new_actions + ((self.state.field_width -
                        action[0], self.state.field_height - action[1]), )
                self.actions = new_actions

            print "team %d, state %s, action %s" % (agent.team,
                self.state_hash, str(self.actions))


        return self.actions[agent.id]

    def q_learn(self, reward):
        """ Updates the q-values in state_dict. 
        Uses the previous state, the action taken, and the state that that
        action lead to as an input. Also the reward that was given after
        taking this action 
        !!!!!!!!!!!!!!
        Because self.actions is used, this method should be called BEFORE
        calling request_goal()!
        !!!!!!!!!!!!!
        This code is based on the java code made for the Autonomous Agents
        class, that can be found at
        https://code.google.com/p/caaaas/source/browse/project/src/caaaas/agent/Learner.java
        """
        # Get old action value from the old state with the taken action
        old_action_value = self.state_dict[self.old_state_hash][self.actions]
        # Get the maximum value from an action in the new state
        new_action_value_max = max(self.state_dict[self.state_hash].values())
        # Get the key of the action with the maximum value
        q_value = old_action_value + self.LEARNING_RATE * (reward +
            self.DISCOUNT_FACTOR * new_action_value_max - old_action_value)
        #print "Saving q value %s to state %s for action %s" % (q_value,
        #    self.old_state_hash, str(self.actions))
        # Put the new value
        self.state_dict[self.old_state_hash][self.actions] = q_value
        # Update the previous state hash to the one of the new old state.
        self.old_state_hash = self.state_hash
        # This wasn't needed, but shouldn't be forgotten for the policy:
        # max_action = max(new_action_values, key=new_action_values.get)


    def calculate_reward(self, observation):
        """ This calculates the immediate reward from an observation. Later, we
        might want to do monte carlo learning, omiting the immediate rewards,
        and only rewarding the visited states at the end of a game. For now,
        we'll just do this, because its easier to implement 
        This function has to be called with a preprocessed observation."""
        
        MULTIPLIER_SCORE = 10
        REWARD_KILL = 20;
        REWARD_DEATH = -10;
        #TODO: Add differnce in #friends alive, #foes alive, etc.
        reward = (MULTIPLIER_SCORE * (observation.score[0] - self.previous_score)) + (self.kills * REWARD_KILL) + (self.deaths * REWARD_DEATH)
        # Update previous score
        self.previous_score = observation.score[0]
        self.kills = 0
        self.deaths = 0
        return reward

