# This file is intended to be a final submission. python tester.py Player.py
# should work at all times. If it does not, there is a bug.
# If you're just trying to test a solution, scroll down to the Player
# class.

# This file is intended to be in the same format as a valid solution, so
# that users can edit their solution into Player and then submit just this
# file to the contest. If you see any reason this would not work, please submit
# an Issue to https://github.com/ChadAMiller/hungergames/issues or email me.

# You can see more sample player classes in bots.py

import math
import random

class BasePlayer(object):
    '''
    Base class so I don't have to repeat bookkeeping stuff.
    Do not edit unless you're working on the simulation.
    '''
    
    def __str__(self):
        try:
            return self.name
        except AttributeError:
            # Fall back on Python default
            return super(BasePlayer, self).__repr__()

    def __repr__(self):
        try:
            return self.name
        except AttributeError:
            # Fall back on Python default
            return super(BasePlayer, self).__repr__()
    
    def hunt_choices(*args, **kwargs):
        raise NotImplementedError("You must define a strategy!")
        
    def hunt_outcomes(*args, **kwargs):
        pass
        
    def round_end(*args, **kwargs):
        pass



class Identifier(object):
    '''
    Class to probabilisticly identifiy players by mapping reputation values to potential reputations in earlier rounds.
    '''


    def __init__(self):
        self.predecessors = [] #predecessors[round][player][predecessor] -> weight
        self.reputations = [] #reputations[round][player] -> reputation
        self.hunted = [] #hunted[round][player] -> hunted with me
        self.myreps = [] #myrep[round] -> my reputation
        self.h_plus_s = 0 #total number of choices so far
        self.predecessor_distributions = []

    def add_round(self, reputations, myrep):
        self.predecessor_distributions = []
        self.reputations.append(reputations)
        self.myreps.append(myrep)
        #update total number of choices so far
        if len(self.reputations) <= 1:
            return
        #update predecessors
        preds = []
        for rep in reputations:
            hunts = rep * self.h_plus_s
            players_in_prev_round = len(self.reputations[-2])
            h_plus_s_prev = self.h_plus_s - players_in_prev_round
            prev_min_hunts = max(0, hunts - players_in_prev_round) # lower bound for hunts by this player in the previous round
            if h_plus_s_prev == 0:
                prev_max_rep = 0
                prev_min_rep = 0
            else:
                prev_max_rep = min(1.0, hunts / h_plus_s_prev)
                prev_min_rep = prev_min_hunts / h_plus_s_prev
            preds.append(self.weighted_preds(self.reputations[-2], prev_max_rep, prev_min_rep, rep))
        self.predecessors.append(preds)
        self.h_plus_s += len(reputations)
        
        for i in range(len(reputations)):
            self.predecessor_distributions.append(self.predecessor_distribution(i, len(self.reputations)))

    def add_hunted(self, hunted):
        self.hunted.append(hunted)

    #computes a distribution on possible predecessors. The density is that of a uniform distribution plus that of a triangle distribution centered at exp_rep
    def weighted_preds(self, reps, max_rep, min_rep, exp_rep, uniform=0.25):
        uniform_weights = self.normalize([1.0 if min_rep <= rep <= max_rep else 0.0 for rep in reps], total=uniform)
        triangle_weights = [self.skewed_triangle(rep, min_rep, max_rep, exp_rep) for rep in reps]
        triangle_weights = self.normalize(triangle_weights, total = 1 - uniform)
        if sum(triangle_weights) == 0:
            uniform_weights = self.normalize(uniform_weights)

        return [x + y for x,y in zip(uniform_weights, triangle_weights)]

    #estimate the probability that a player will hunt with me, given my hypothetical reputation
    def will_hunt_with_me(self, player, myrep, distr_range = 0.1, lookback=20, time_decay=0.95):
        round = len(self.reputations)
        starting_round = round
        if self.reputations[round-1][player] == 1:
            return 1.0
        elif self.reputations[round-1][player] == 0:
            return 0.0
        if round <= 2:
            return 0.5
        hunt_score = 0.0
        slack_score = 0.0
        distr = self.predecessor_distributions[player]
        time_weight = 1
        round -= 1
        while round >= max(2, starting_round - lookback):
            round -= 1
            for weight, index in zip(distr[round], range(len(distr[round]))):
                if weight > 0:
                    hunted = self.hunted[round][index]
                    score = time_weight * self.skewed_triangle(myrep, myrep - distr_range, myrep  + distr_range, myrep)
                    if hunted:
                        hunt_score += score
                    else:
                        slack_score += score
            time_weight *= time_decay
        #use reputation to smooth the estimate
        hunt_score += 0.1 * self.reputations[starting_round - 1][player]
        slack_score += 0.1 * (1 - self.reputations[starting_round - 1][player])
        return hunt_score / (hunt_score + slack_score)
        
    #computes a complete predecessor distribution for a single player/reputation
    def predecessor_distribution(self, player, round, lookback=25):
        starting_round = round
        if round <= 1:
            return []
        distr = [self.predecessors[round-2][player]]
        round -= 2
        while round >= 1:
            round -= 1
            if round < starting_round - lookback:
                distr.append([])
            round_dist = [0 for dummy in self.predecessors[round]]
            for player_weight, predecessors in zip(distr[-1], self.predecessors[round]):
                if player_weight > 0:
                    player_dist = []
                    for predecessor in predecessors:
                        player_dist.append(player_weight * predecessor)
                    round_dist = [rd + pd for rd, pd in zip(round_dist, player_dist)]
            distr.append(round_dist)

        distr.reverse()
        return distr
            

    def skewed_triangle(self, x, lower, upper, peak):
        if x <= lower or x >= upper:
            return 0.0
        elif x <= peak:
            return (x - lower) / (peak - lower)
        elif x > peak:
            return (upper - x) / (upper - peak)
        

    def normalize(self, vector, total = 1.0):
        normalizer = sum([abs(v) for v in vector]) / total
        if normalizer > 0:
            return [x/normalizer for x in vector]
        else:
            return vector

            
    def partner_score(self, player, n_samples=20):
        #avoid hunting with freeloaders and pushovers -- assign them the lowest score
        round = len(self.reputations)
        if self.reputations[round-1][player] == 1 or self.reputations[round-1][player] == 0:
            return 0.0
        #compute Kullback-Leibler distance from the will-hunt-with-me distribution to the uniform distribution on my hypothetical reputations
        # sample from the will-hunt-with-me distribution
        sample_points = [x / (n_samples - 1.0) for x in range(n_samples)]
        sample_values = self.normalize([self.will_hunt_with_me(player, x) for x in sample_points])
        uniform_weight = 1.0 / n_samples
        kld = sum([math.log(sv / uniform_weight) * sv for sv in sample_values if sv > 0])
        #prefer hunting with non-random players -- they may be clever!
        return kld
        

class Player(BasePlayer):

    name = "Custom"

    def __str__(self):
        try:
            return self.name
        except AttributeError:
            # Fall back on Python default
            return super(BasePlayer, self).__repr__()

    def __init__(self):
        self.identifier = Identifier()
        self.rounds_estimate = 0
        self.my_hunts = 0.0
        self.my_slacks = 0.0

    '''
    Your strategy starts here.
    '''
    
    def hunt_choices(
                    self,
                    round_number,
                    current_food,
                    current_reputation,
                    m,
                    player_reputations,
                    ):
        '''Required function defined in the rules'''
        self.identifier.add_round(player_reputations, current_reputation)
        #in the first round, estimate the constant number of rounds set by the organizers
        if round_number == 1:
            #the number of rounds should be at least high enough to starve freeloaders, even when, say, only 75% of players are clever enough to slack with freeloaders. Also consider the mutual bonus. Even in a relatively lazy population with average rep 0.25, the mutual bonus should be reached every 4th round.
            freeloader_loss_per_round = 2 * 0.75 * len(player_reputations) - 0.25 * len(player_reputations) - 0.25 * 2 * (len(player_reputations) - 1)
            freeloader_survival = round(current_food / freeloader_loss_per_round)
            #the organizers certainly don't want just freeloaders to die off before ending the game. Would be too many shirts. So double to get the estimate.
            self.rounds_estimate = freeloader_survival * 2
            #protection against negative, too low, and too high values (should not be necessary, is just for safety):
            self.rounds_estimate = max(100, self.rounds_estimate)
            self.rounds_estimate = min(10000, self.rounds_estimate)
            # after the fixed number of rounds ends, the game will end with a small probability each round. We estimate this probability so the expected number of additional rounds is the same as the number of fixed rounds.
            self.ending_probability = 1.0 / self.rounds_estimate

        #in the first two rounds, hunt with nearly everyone to explore opponents behavior when we have high rep.
        if round_number <= 2:
            choices = ['h' for pr in player_reputations]
            #pick one opponent who we won't hunt with, so we don't appear to be pushovers
            choices[random.randint(0, len(choices) - 1)] = 's'
            return self.handle_choices(choices)

        #in rounds 3 and 4, we slack to see how others behave when we have low rep
        if round_number <= 4:
            choices = ['s' for pr in player_reputations]
            #pick one opponent who we won't slack with, so we don't appear to be freeloaders
            choices[random.randint(0, len(choices) - 1)] = 'h'
            return self.handle_choices(choices)
            

        #special cases: we have the highest rep amongst non-pushovers, or there is only one other player left -> SLACK
        if current_reputation >= max([rep for rep in player_reputations if rep < 1]) or len(player_reputations) == 1:
            choices = ['s' for pr in player_reputations]
            return self.handle_choices(choices)
            


        #in all rounds after the first two, and except in special cases:
        #we want to determine how many hunts to do in this round
        #for this, we simulate future rounds using our simple opponent modelling.
        #since a complete search of possible actions would take very long, we simplify: for n_rounds rounds, we hunt with n_hunts players. After that we only slack.
        #The optimization goal is food at the end of the game.

        #determine the range of n_hunts. To speed up computation, we try only 10 values.
        if len(player_reputations) <= 10:
            n_hunts_values = range(0, len(player_reputations) + 1)
        else:
            n_hunts_values = [round(len(player_reputations) * x / 10.0) for x in range(20)]
        #set the range of n_rounds. Either start slacking in any of the first 5 rounds, or in a multiple of self.rounds_estimate / 4 round later.
        n_rounds_values = range(1, 4) + range(5, 3 * int(self.rounds_estimate), int(self.rounds_estimate / 4))
        #search the best value for n_hunts
        partner_scores = [self.identifier.partner_score(x) for x in range(len(player_reputations))]
        partner_priority = sorted(range(len(partner_scores)), key=lambda k: partner_scores[k], reverse=True)
        best_n_hunts = 0
        best_n_rounds = 0
        best_food = -100000000
        for n_hunts in n_hunts_values:
            for n_rounds in n_rounds_values:
                if n_hunts == 0 and n_rounds > 1:
                    break
                food = self.simulate(n_hunts, n_rounds, round_number, current_reputation, partner_priority, current_food)
                print food - current_food
                if food > best_food:
                    best_food = food
                    best_n_hunts = n_hunts
                    best_n_rounds = n_rounds
                    print "new best:"
                    print "hunts: {}".format(best_n_hunts)
                    print "rounds: {}".format(best_n_rounds)
                    print "expected food gain: {}".format(best_food - current_food)
                    

        return self.handle_choices(['h' if pp < best_n_hunts else 's' for pp in partner_priority])
        
    def handle_choices(self, choices):
        self.my_hunts += choices.count('h')
        self.my_slacks += choices.count('s')
        return choices

    def hunt_outcomes(self, food_earnings):
        '''Required function defined in the rules'''
        self.identifier.add_hunted([(fe == 0 or fe == 1) for fe in food_earnings])
        pass
        

    def round_end(self, award, m, number_hunters):
        '''Required function defined in the rules'''
        pass
        
    def simulate(self, n_hunts, n_rounds, round_number, reputation, partner_priority, food, min_round_weight = 0.5):
        n_players = len(partner_priority)
        simulation_round = 0
        round_weight = 1.0
        sim_hunts = self.my_hunts
        sim_slacks = self.my_slacks
        sim_food = food
        sim_reputation = reputation
        slacker_weight = 1.0
        while round_weight > 0.1:
            if simulation_round < n_rounds:
                for i in range(n_players):
                    hprob = self.identifier.will_hunt_with_me(i, sim_reputation)
                    hprob = 0.5 * hprob +  min(1.0, sim_reputation/float(reputation)) * hprob
                    if partner_priority[i] < n_hunts: #I hunt
                        sim_food -=  3 * (1 - hprob)
                    else: #I don't hunt
                        sim_food += hprob - 2 * (1 - hprob)
                sim_hunts += n_hunts
                sim_slacks += n_players - n_hunts
            else: #slack time
                for i in range(n_players):
                    hprob = self.identifier.will_hunt_with_me(i, sim_reputation)
                    hprob = 0.5 * hprob +  min(1.0, sim_reputation/float(reputation)) * hprob
                    sim_food += hprob - 2 * (1 - hprob)
                sim_slacks += n_players
            sim_reputation = sim_hunts / (sim_hunts + sim_slacks)
            if round_number + simulation_round > self.rounds_estimate:
                round_weight *= (1 - self.ending_probability)
            simulation_round += 1
        return sim_food

def payout(s1,s2):
    if s1 == 'h':
        if s2 == 'h':
            return 0
        else:
            return -3
    else:
        if s2 == 'h':
            return 1
        else:
            return -2
