class BasePlayer(object):
    def __str__(self):
        try:
            return self.name
        except AttributeError:
            # Fall back on Python default
            return super(BasePlayer, self).__repr__()

    def hunt_choices(*args, **kwargs):
        raise NotImplementedError("You must define a strategy!")

    def hunt_outcomes(*args, **kwargs):
        pass

    def round_end(*args, **kwargs):
        pass
class Player(BasePlayer):
    ''' 
        This is where documentation begins (and is the longest segment of the documentation).
        In reality, I have another program that is FAR more intelligent than this one. The other algorithm can thus far accurately track players throughout the game. However, there are still some complex methods to be implemented (such as pattern analysis on moves done by players), and those might take more time than the deadline (considering I only was able to start coding this week). As a result, this "dumbed down" version is being used. (This is an explanation for why the name of this file is out_of_time_Player.py)
    
        _______________________________
        | ALGORITHM STRUCTURE AND GOAL |
        --------------------------------
        
        The goal of this algorithm is to survive, not to win. This means that it simply tries to prolong its own survival, regardless of how much it is prolonging other's survival. The program has a top_level function, hunt_decisions, that runs certain scenarios for a given round number.
            In round 1, the program simply checks if m is reachable by itself. If so, it does the required hunts to get m. If not, the program plays all slacks.
            In round 2 through 40, the program checks how many hunts the outer population is expected to get this round, and checks against a previous record how probable its estimate is. Based on these two facts, it determines how many, if any should be done, hunts the algorithm should do to reach m with minimal damage to the food amount. If hunts should not be done, it hunts with reputations higher than 0.5 .
            In rounds 40 and above, the program only works toward m if the round number is a multiple of a number from 45-50. If not, the algorithm plays all slacks to remain hidden and unpredictable by "smarter" algorithms.
            
        At the end of all rounds, the algorithm determines how far off the algorithm's prediction of hunts for the round were, and records this difference. The list of these records is used in the next round as a corrective index to a fixed equation that calculates hunt predictions.
    '''
    def __init__(self):
        self.name="Tanishq Aggarwal 2"
    def hunt_choices(self, round_number, current_food, current_reputation, m, player_reputations):
        def count(x, list): #Auxiliary helper function used to return the indices at which x occurs in a list.
            ind = []
            for i in list:
                if i == x:
                    ind.append(list.index(x))
            return ind
        self.round_number = round_number #Defining round number as an instance object so that round_end can access it
        self.current_player_reputation = player_reputations #Used as an unchanged list of player reputations as a reference/comparison against changed versions of the player reputations list.
        if round_number == 1: #Conditions occuring if the program is in the first round.
            if m <= len(player_reputations): #If the threshold is achievable by the program by itself,
                '''BEGIN M TESTING: IS M REACHABLE BY MYSELF?'''
                expected_hunting_value = 0 #Initiate expected hunting value variable
                for reputations in player_reputations:
                    expected_hunting_value += (reputations * len(player_reputations)) #Compute the expected hunting value of the other algorithms with a for loop
                self.probable_expected_hunting_value = expected_hunting_value
                #The following code tells the algorithm to hunt with the top M reputations.
                player_reputations.sort()
                shortened_reps = player_reputations[m - 1:]
                hunt_decisions = ['s']*len(self.current_player_reputation)
                for i in shortened_reps:
                    indices = count(i, self.current_player_reputation)
                    for q in indices:
                        hunt_decisions[q] = 'h'
                self.m_projected_success = [1]
                return hunt_decisions
                '''END TESTING FOR M'''
            else: #If the threshold is not achievable by the program on its own, it will return all slacks.
                expected_hunting_value = 0 #Initiate expected hunting value variable
                for reputations in player_reputations:
                    expected_hunting_value += (reputations * len(player_reputations)) #Compute the expected hunting value of the other algorithms with a for loop
                self.probable_expected_hunting_value = expected_hunting_value
                self.m_projected_success = [0]
                return ['s']*len(player_reputations)
        elif 1 < round_number <= 40: #If the round number is between 2 and 40 (inclusive),
            expected_hunting_value = 0 #Initiate expected hunting value variable
            for reputations in player_reputations:
                expected_hunting_value += (reputations * len(player_reputations)) #Compute the expected hunting value of the other algorithms with a for loop
            self.probable_expected_hunting_value = expected_hunting_value + ((sum(self.probable_hunting_accuracy_record)*len(player_reputations))/len(self.probable_hunting_accuracy_record)) #Based on previous success, recalibrate the expected hunting value
            amount_of_hunting_self_has_to_do = m - self.probable_expected_hunting_value #How much hunting do I have to do to reach the threshold?
            if amount_of_hunting_self_has_to_do <= 0: #If I don't have to hunt at all, I'll hunt with people who are likely to hunt.
                self.m_projected_success += [1]
                return ['h' if 0.5 <= rep <= 1 else 's' for rep in player_reputations]
            elif 0 < amount_of_hunting_self_has_to_do <= len(player_reputations): #If I do have to hunt, I will hunt only the required number of hunts. I'll only hunt with the top M reputations.
                self.m_projected_success += [1]
                def count(x, lst):
                    ind = []
                    for i in lst:
                        if i == x:
                            ind.append(lst.index(x))
                    return ind
                player_reputations.sort()
                shortened_reps = player_reputations[int(amount_of_hunting_self_has_to_do) - 1:]
                hunt_decisions = ['s']*len(self.current_player_reputation)
                for i in shortened_reps:
                    indices = count(i, self.current_player_reputation)
                    for q in indices:
                        hunt_decisions[q] = 'h'
                return hunt_decisions
            else: #If I can't do anything to reach M, I'll just hunt with people who are likely to hunt and slack with slackers.
                self.m_projected_success += [0]
                return ['h' if 0.5 <= rep <= 1 else 's' for rep in player_reputations]
        else: #If the round number is greater than 25,
            if ((round_number % 45) == 0) or ((round_number % 46) == 0) or ((round_number % 47) == 0) or ((round_number % 48) == 0) or ((round_number % 49) == 0) or ((round_number % 50) == 0):  #If the round number is a multiple of any number from 45-50 (these values chosen after some experimental testing with other bots)
                expected_hunting_value = 0 #Initiate expected hunting value variable
                for reputations in player_reputations:
                    expected_hunting_value += (reputations * len(player_reputations)) #Compute the expected hunting value of the other algorithms with a for loop
                self.probable_expected_hunting_value = expected_hunting_value + ((sum(self.probable_hunting_accuracy_record)*len(player_reputations))/len(self.probable_hunting_accuracy_record)) #Based on previous success, recalibrate the expected hunting value
                amount_of_hunting_self_has_to_do = m - self.probable_expected_hunting_value #How much hunting do I have to do to reach the threshold?
                if amount_of_hunting_self_has_to_do <= 0: #If I don't have to hunt at all, I'll hunt with people who are likely to hunt.
                    self.m_projected_success += [1]
                    return ['h' if 0.5 <= rep <= 1 else 's' for rep in player_reputations]
                elif 0 < amount_of_hunting_self_has_to_do <= len(player_reputations): #If I do have to hunt, I will hunt only the required number of hunts. I'll only hunt with the top M reputations.
                    self.m_projected_success += [1]
                    def count(x, lst):
                        ind = []
                        for i in lst:
                            if i == x:
                                ind.append(lst.index(x))
                        return ind
                    player_reputations.sort()
                    shortened_reps = player_reputations[int(amount_of_hunting_self_has_to_do) - 1:]
                    hunt_decisions = ['s']*len(self.current_player_reputation)
                    for i in shortened_reps:
                        indices = count(i, self.current_player_reputation)
                        for q in indices:
                            hunt_decisions[q] = 'h'
                    return hunt_decisions
                else: #If I can't do anything to reach M, I'll just hunt with people who are likely to hunt and slack with slackers.
                    self.m_projected_success += [0]
                    return ['h' if 0.5 <= rep <= 1 else 's' for rep in player_reputations]
            else: #If the round number is greater than 40 and is not a multiple of numbers from 45-50, to remain hidden and insignificant to "smarter" and "max hunting" algorithms, this program will slack. The program will still, however, continue to work on its predictions about the number of hunts that will be done by other players. Continual refinement of this corrective index ensures that when the algorithm does work towards m, it can rely pretty well on its predictions.
                expected_hunting_value = 0 #Initiate expected hunting value variable
                for reputations in player_reputations:
                    expected_hunting_value += (reputations * len(player_reputations)) #Compute the expected hunting value of the other algorithms with a for loop
                self.probable_expected_hunting_value = expected_hunting_value + ((sum(self.probable_hunting_accuracy_record)*len(player_reputations))/len(self.probable_hunting_accuracy_record))
                return ['s']*len(player_reputations)
    def round_end(self, award, m, number_hunters): #Required function: code to run at the end of the round. This function judges the accuracy of the predictions made by the program about the number of hunts that will occur, and also provides a corrective index for the predictions.
        if self.round_number == 1: #If the round is 1,
            if award > 0: #And the award has been reached
                if self.m_projected_success[-1] == 1: #And we predicted that M will be achieved
                    self.probable_hunting_accuracy_record = [(number_hunters - self.probable_expected_hunting_value)/len(self.current_player_reputation)] #Record how accurate our prediction was  (the closer to 0, the better, the closer to 1 or -1, the worse)
                elif self.m_projected_success[-1] == 0: #And we predicted that M will not be achieved
                    self.probable_hunting_accuracy_record = [1] #Declare to the world (and the algorithm) that we were incredibly wrong and how wrong we were
            else: #And the award has not been reached
                if self.m_projected_success[-1] == 0: #And we predicted that M will not be achieved
                    self.probable_hunting_accuracy_record = [(number_hunters - self.probable_expected_hunting_value)/len(self.current_player_reputation)] #Record how accurate our prediction was  (the closer to 0, the better, the closer to 1 or -1, the worse)
                elif self.m_projected_success[-1] == 1: #And we predicted that M will be achieved
                    self.probable_hunting_accuracy_record = [1] #Declare to the world (and the algorithm) that we were incredibly wrong and how wrong we were              
        elif 1 < self.round_number <= 40: #If we are in rounds 2 through 40,
            if award > 0: #And the award has been reached
                if self.m_projected_success[-1] == 1: #And we predicted that M will be achieved
                    self.probable_hunting_accuracy_record.append((number_hunters - self.probable_expected_hunting_value)/len(self.current_player_reputation)) #Record how accurate our prediction was (the closer to 0, the better, the closer to 1 or -1, the worse)
                elif self.m_projected_success[-1] == 0: #And we predicted that M will not be achieved
                    self.probable_hunting_accuracy_record.append(1) #Declare to the world (and the algorithm) that we were incredibly wrong and how wrong we were
            else: #And the award was not reached
                if self.m_projected_success[-1] == 0: #And we predicted that M will not be achieved
                    self.probable_hunting_accuracy_record.append((number_hunters - self.probable_expected_hunting_value)/len(self.current_player_reputation)) #Record how accurate our prediction was  (the closer to 0, the better, the closer to 1 or -1, the worse)
                elif self.m_projected_success[-1] == 1: #And we predicted that M will be achieved
                    self.probable_hunting_accuracy_record.append(1) #Declare to the world (and the algorithm) that we were incredibly wrong and how wrong we were
        else: #If the round is greater than 40,
            if ((self.round_number % 45) == 0) or ((self.round_number % 46) == 0) or ((self.round_number % 47) == 0) or ((self.round_number % 48) == 0) or ((self.round_number % 49) == 0) or ((self.round_number % 50) == 0): #And the round number is a multiple of any number 45-50,
                if award > 0: #And the award has been reached
                    if self.m_projected_success[-1] == 1: #And we predicted that M will be achieved
                        self.probable_hunting_accuracy_record.append((number_hunters - self.probable_expected_hunting_value)/len(self.current_player_reputation)) #Record how accurate our prediction was (the closer to 0, the better, the closer to 1 or -1, the worse)
                    elif self.m_projected_success[-1] == 0: #And we predicted that M will not be achieved
                        self.probable_hunting_accuracy_record.append(1) #Declare to the world (and the algorithm) that we were incredibly wrong and how wrong we were
                else: #And the award was not reached
                    if self.m_projected_success[-1] == 0: #And we predicted that M will not be achieved
                        self.probable_hunting_accuracy_record.append((number_hunters - self.probable_expected_hunting_value)/len(self.current_player_reputation)) #Record how accurate our prediction was  (the closer to 0, the better, the closer to 1 or -1, the worse)
                    elif self.m_projected_success[-1] == 1: #And we predicted that M will be achieved
                        self.probable_hunting_accuracy_record.append(1) #Declare to the world (and the algorithm) that we were incredibly wrong and how wrong we were
            else: #If the round number is not a multiple of any number 45-50, we're just chilling and improving our corrective index.
                self.probable_hunting_accuracy_record.append((number_hunters - self.probable_expected_hunting_value)/len(self.current_player_reputation)) #Record how accurate our prediction was (the closer to 0, the better, the closer to 1 or -1, the worse)
    def hunt_outcomes(self, food_earnings): #Nothing done here, though it was extensively used in the other algorithm I designed that implements player tracking.
        pass