# Anthony James (AJ) Perez
# 2010 Oregon State University Alumni
# BS Computer Science
#
# This bot is attempting to be the last bot surviving.
#
import random


class BasePlayer(object):
    '''
    Base class based on Chad Miller's excellent Hunger Games engine
    '''
    
    def __str__(self):
        try:
            return self.name
        except AttributeError:
            # Fall back on Python default
            return super(BasePlayer, self).__repr__()
    
    def hunt_choices(*args, **kwargs):
        raise NotImplementedError("You must define a strategy!")
        
    def hunt_outcomes(*args, **kwargs):
        pass
        
    def round_end(*args, **kwargs):
        pass


class Player(BasePlayer):
    '''
    This bot is attempting to Win. He (it's a boy, I checked.) has a threshold 
    of hunting which constantly adjusts based on the food_earnings of each round. 
    If the earnings were better than the previous round's earnings, then we add the
    modifier to our base threshold and continue to the next round. If we find we did
    poorly on the last round, we reverse the direction the modifier adjusts, and continue
    to the next round.
    '''

    def __init__(self, threshold=0.9, modifier=0.05):
        self.name = "Player [" + str(threshold) + "," + str(modifier)+"]"
        self.threshold = threshold  # - The threshold we will use to determine whether 
                                    #   we hunt. This will adjust over time.
        self.hunt_decisions = []    # - List of the current round's hunt decisions
        self.modifier = modifier    # - The amount to modify the threshold by each round
        self.lastOutcomes = [0]     # - History of food_earning outcomes


    def hunt_choices(self, round_number, current_food, current_reputation, m, player_reputations):
        # 
        # Here our bot performs our hunting/slacking decisions. Unless there is only one
        # bot remaining (in which case, we always slack), we base our hunt decisions using
        # the current threshold. The "threshold" is really a probability of hunting. The higher
        # our threshold, the more hunts we are likely to perform.
        # 
        # The bot first determines how many times it plans on hunting this round using the 
        # threshold. For example, if the threshold is .75 and there are 10 bots, then for 10 
        # times our bot will call random.random() and will Hunt if the result is below the 
        # threshold value. This means that even if the threshold is the same on a given
        # round, the number of hunts may not be exactly the same (but they should be close).
        # For the example of 10 bots, our bot will generally have rounds where he hunts
        # between 6-8 times.
        #
        # Now that we know how many times we want to hunt, we need to know which players to
        # hunt against, and which ones to slack against. Since we are not tracking players, it
        # seems pretty reasonable to bet that players with higher reputations have a better
        # chance of hunting, so we want to place our hunts with them and slack with the 
        # remaining players. This gives us our best chance of longetivity while maintaining a 
        # reasonable reputation. To determine which players to hunt with, we take the number of 
        # times we plan on hunting, sort the player_reputations list from highest to smallest, 
        # and take the player reputation which coincides with our last hunt. We will choose to 
        # hunt with all players whose reputations is greater than or equal to this reputation.
        #

        if len(player_reputations) == 1:
            # If there is only one other bot remaining, the optimal strategy is to always slack:
            self.hunt_decisions = ['s'] * len(player_reputations)
        else:
            # Determine how many times we will hunt this round
            numHunts = sum([1 if random.random() <= self.threshold else 0 for p in player_reputations])

            # Edge-case: if we are not hunting at all this round, just return an all-slacking 
            # strategy
            if numHunts == 0:
                self.hunt_decisions = ['s'] * len(player_reputations)
            else:
                # Make a copy of the player_reputations list, sorted from highest to lowest 
                # reputation
                tempReputations = sorted(player_reputations, None, None, True)

                # Now choose the lowest player reputations we are willing to hunt with. This 
                # will hopefully give us the best chance of hunting with other hunters.
                repThreshold = tempReputations[numHunts - 1] # We minus, since the list is zero-indexed

                # Perform the actual hunting. Only hunt with players whose reputation is
                # greater than or equal to our calculated repThreshold
                self.hunt_decisions = ['h' if rep >= repThreshold else 's' for rep in player_reputations]
        return self.hunt_decisions


    def hunt_outcomes(self, food_earnings):
        #
        # Our bot uses the food_earnings list to see how well it performed this round. Since this
        # list is parallel to our hunt_decisions list, we can check and see if we hunted with
        # other hunters or not. We will use this check to determine how to modifier the bot's
        # hunting threshold.
        #
        # I made a table with outcomes based on the award table for a given hunt/slack combination.
        # The award table is pretty straight-forward: I think my bot's optimal strategy is to 
        # Hunt with other Hunters and Slack with known slackers. As slackers are slacked against, 
        # they will eventually starve leaving Hunters to thrive. Thus, H/H combinations are given
        # an outcombine value of +1. Slacking is the best strategy against known slackers, but
        # still results in a net loss in food. It's not ideal, but better than hunting against a 
        # slacker so its outcome score is a small negative value: -0.1. Slacking with a hunter 
        # yields a net-gain in food which is nice, but sacrifices reputation. It is still a 
        # decent action, just not the preferred one, so it is a smaller reward than H/H but 
        # still a positive value. Finally, H/S combinations are the worst. These are what I am
        # trying to avoid, so they will get a penalty of -1.
        #
        # With the table set, we scan the food earnings and compare them with our hunt_decision
        # to derive which action that player must have performed and add the outcome of that
        # hunt to a running total. If the net result is larger than the previous round's outcome,
        # then we must be performing better against the group. We will add the modifier to the
        # hunt threshold, which will presumably make the bot even stronger next round.
        #
        # If however the outcomes were worse than last round, we flip the modifier to 
        # add in the opposite direction. This has the effect of slowly reversing the bot's
        # strategy, allowing it to adapt a little bit when conditions change, such as when
        # players with certain strategies starve, or other bots change their strategy.
        #

        outcomes = 0.0

        # Check how well the bot performed this round:
        for i, award in enumerate(food_earnings):
            # If we both hunted:
            if self.hunt_decisions[i] == 'h' and award == 0:
                outcomes += 1.0
            # If I slacked while opponent hunted:
            elif self.hunt_decisions[i] == 's' and award == 1:
                outcomes += 0.75
            # If I hunted while the opponent slacked >.<
            elif self.hunt_decisions[i] == 'h' and award == -3:
                outcomes -= 1.0
            # If we both slacked...
            elif self.hunt_decisions[i] == 's' and award == -2:
                outcomes += 0.0

        # Start adjusting our threshold modifier to modify in the opposite direction 
        # if the previous round's outcome was better than the current outcome
        if (outcomes - self.lastOutcomes[-1]) < 0:
            self.modifier *= -1

        # Replace last round's outcome with this round's results
        self.lastOutcomes.append(outcomes)


    def round_end(self, award, m, number_hunters):
        #
        # At the end of each round, adjust the bot's hunting threshold
        # by the modifier value. Make sure the values stay bounded within
        # 0 and 1
        #
        self.threshold += self.modifier

        if self.threshold > 1.0:
            self.threshold = 1.0
        elif self.threshold < 0.0:
            self.threshold = 0.0
        