from random import random
from sets import Set



"""
Explanation for Brilliant reviewers:

This bot is named "echo previous move". What it does is simply repeat to each
opponent what he told us the round before.

The bot tries to follow each player based on the reputation. We can calculate
that, at round n, the reputation cannot move more or less than :
        delta_rep_max = q / (2. * (self._sum_q + q))
where
        q = p-1 where p is the number of players at this round.
and 
        sum_q = sum(q_n) for rounds n = 1 to current_round - 1
If the reputations of two players intersect at some point in time, the bot can
lose track of a player. In this case, it will respond 'h' with proba = 0.6.
(Why 0.6 ? Because we found during experiments that random bots would perform best
when they hunt with proba 0.6)

The functionality of tracking is encapsulated in the OpponentTracer class.



Why this bot can give good results:
  - against a bot who prefers slacking, we will slack as well, so we don't
    loose too much.
  - against a bot who prefers hunting, we will hunt as well: that gives us :
    1) a good reputation
    2) a good reputation *to this bot*, so he knows we are a good fellow and it
       is good to hunt with us!  But if sometimes he tries to slack on us, we will
       respond accordingly on the next round.
  - due to the fact that we slack a lot as well (if other bots slack), our
    reputation should be "high but not too much" so that we don't get abused by
    others that would find us too nice.

Why it can give bad results:
  - no advanced detection of behaviour of the opponent (what we have is just a simple rule)
  - if opponents detect how we behave, they can probably take advantage of this.
  - no control of the food/reputation: we don't implement a "safety net" that
    would change our behaviour in case of food running low. Idem we don't
    implement anything that puts our reputation in control. We just naturally
    follow the trends given by other players.


"""


class Player:
    DEFAULT_RANDOM_P = .6

    def __init__(self):
        #self.name = "echo previous move"
        self.tracer = OpponentTracer()
        self.hunter_detector = HunterDetector()

    def hunt_choices(self, round_number, current_food, current_reputation, m,
                     player_reputations):
        self.tracer.update_opponents(player_reputations)
        hunt_decisions = [None for _ in player_reputations]
        for i in range(len(player_reputations)):
            opponent = self.tracer.opponent_at_index(i)
            history = opponent.history
            if len(history) == 0:
                # no history: random choice
                hunt_decisions[i] = 'h' if random() < self.DEFAULT_RANDOM_P else 's'
            else:
                # we have an history!
                # here, we decide to move like the opponent last time.
                last_time_opponent_choice = history[-1]
                hunt_decisions[i] = last_time_opponent_choice

        self.hunter_detector.inform_choices(hunt_decisions)
        return hunt_decisions

    def hunt_outcomes(self, food_earnings):
        # convert food_earnings into each opponent's choice against us
        opponent_choices = self.hunter_detector.detect_opponent_choices(food_earnings)

        # log history for each opponent
        for i in range(len(opponent_choices)):
            choice = opponent_choices[i]
            opponent = self.tracer.opponent_at_index(i)
            opponent.history.append(choice)

    def round_end(self, award, m, number_hunters):
        pass # do nothing








class HunterDetector(object):
    """
    The main purpose of HunterDetector is, given an array of the players' choices, to convert an array of earnings into
    an array of the opponents' choices.

    Example:
        hunt_choices = ['h', 's']
        inform_choices(hunt_choices)
        food_earnings = [-3, 1]
        detect_opponent_choices(food_earnings) => ['s', 'h']
    """

    def __init__(self):
        self._hunt_choices = None


    def inform_choices(self, hunt_choices):
        """
        Call this function in hunt_choices with your current choices.
        Returns None.
        """
        self._hunt_choices = list(hunt_choices) # copy

    def detect_opponent_choices(self, food_earnings):
        """
        Call this function in hunt_outcomes.
        Returns an array of 'h' and 's', same size as food_earnings, containing the choices that every opponent has done
        against us this turn.
        """
        q = len(food_earnings)
        #assert len(self._hunt_choices) == q
        opponent_choices = [None for _ in range(q)]
        for i in range(q):
            earning = food_earnings[i]
            i_hunted = self._hunt_choices[i] == 'h'
            if (i_hunted):
                #assert earning in [-3, 0]
                other_hunted = (earning == 0)
            else:
                #assert earning in [-2, 1]
                other_hunted = (earning == 1)

            opponent_choices[i] = 'h' if other_hunted else 's'

        return opponent_choices



class Opponent():
    def __init__(self, id, rep):
        self.id = id
        self.rep = rep
        self.history = []
        pass



class OpponentTracer():
    def __init__(self):
        # notation : q = p-1 where p is the number of players at this round.
        self._sum_q = 0
        # opponents maps a number (the array index) to an Opponent object.
        self._opponents = []
        self._first_use = True


    def update_opponents(self, player_reputations):
        # returns: None

        # blank out some part of the opponents so that people who have the same rep get discarded
        q = len(player_reputations) # q = p-1
        new_opponents = [None for _ in range(q)]
        for i in range(len(self._opponents)):
            for j in range(i):
                #assert i != j
                opp_i = self._opponents[i]
                opp_j = self._opponents[j]
                if opp_i is not None and opp_j is not None and opp_i.rep == opp_j.rep:
                    # they have the same rep ? forget about them, we can no longer trace them. drop the references
                    self._opponents[i] = None
                    self._opponents[j] = None

        # "center" means the old rep. It is really the center of the interval in which the newer rep can fall this
        # round.
        map_center_to_id = {}
        map_center_to_bucket = {}
        for i in range(len(self._opponents)):
            if self._opponents[i] is not None:
                center = self._opponents[i].rep
                map_center_to_id[center] = i
                map_center_to_bucket[center] = []

        # this was hand-calculated. the rep cannot move from more than this value. calc it!
        delta_rep_max = q / (2. * (self._sum_q + q))
        # print "delta_rep_max = %f" % delta_rep_max
        for i in range(q):
            new_rep = player_reputations[i]
            for center in map_center_to_bucket.keys():
                if center - delta_rep_max <= new_rep <= center + delta_rep_max:
                    map_center_to_bucket[center].append(i)


        changed = True
        while changed:
            changed = False
            for center in map_center_to_bucket.keys():
                bucket = map_center_to_bucket[center]
                if len(bucket) == 1:
                    # found a mapping! record the mapping, and remove the index from every bucket. iterate.
                    id_player = bucket[0]
                    new_rep = player_reputations[id_player]
                    id_opponent = map_center_to_id.get(center)
                    if id_opponent is None:
                        opponent_object = Opponent(id_player, new_rep)
                    else:
                        opponent_object = self._opponents[id_opponent]
                    opponent_object.rep = new_rep
                    new_opponents[id_player] = opponent_object

                    for b in map_center_to_bucket.values():
                        if b.count(id_player) > 0:
                            #assert(b.count(id_player) == 1)
                            b.remove(id_player)

                    changed = True

        # now we have processed every found mapping
        # every remaining entry in the bucket means someone who was lost... too bad

        # the last part is to find out who was in player_reputations and who is not recorded in new_opponents.
        # for those people we need to create new Opponent objects (with virgin history) so they may get tracked
        # next round. Anyhow, the "new_opponents" array must be complete (mapping every index to a valid Opponent
        # object).

        for i in range(q):
            opponent_object = new_opponents[i]
            if opponent_object is None:
                rep = player_reputations[i]
                opponent_object = Opponent(i, rep)
                new_opponents[i] = opponent_object

        #for i in range(q):
            #assert(new_opponents[i] is not None)

        self._opponents = new_opponents

        self._sum_q += q
        self._first_use = False

    def opponent_at_index(self, i):
        """
         Returns the opponent at index i, where the indexation order is the same than the one in player_reputations
         in the previous call to update_opponents. This is the same indexation order than in food_earnings, in the next
         call to hunt_outcomes.
        """
        #assert len(self._opponents) > i  # if this is not the case: did you call update_opponents() first ?
        opponent = self._opponents[i]
        #assert opponent is not None
        #assert isinstance(opponent, Opponent)
        return opponent




