#!/usr/bin/env python2.7
# -*- coding: utf-8 -*-
#
#  hunger_games.py
#  Algorithm for Hunger Games competition by Brilliant.com
#  
#  Author: Anssi "Miffyli" Kanervisto

"""
Algorithm briefly:
    - Basic strategery is to gain high rep at start, maintain it and exploit
      later on. It's possible others may do the same, but it's impossible to
      tell since this script handles while tribe as a single object

    - Script handles tribe as a one big object and tries to learn from it
        - Learning "table" holds all 101*101*101 different learned values, each
          of them meant for different my_rep,other_rep and m_hunt ratios in two
          decimal accuracy
        - Value in table represents possibility of other player choosing hunt
        - m_hunt ratio is max_hunts / m. It is used to see when players opt for
          slacking (can be directly proportional to m or inversly proportional)
    
    - Script doesn't completely take final action from learned table. It weights
      result from table with something else based on how far we're in the game
        - If there's still more than NICE_GUY_PLAYER_MIN players left, script
          tends to aim for higher rep and hunts more at expense of food.
        - When player amount drops below NICE_GUY_PLAYER_MIN, script starts to
          exploit its high rep and starts opting for slacking
    
    - Award is somewhat ignored, as it's same to everybody and 'just' makes game
      last longer. It could be used to see if this script is being starved to 
      death by slacking, but I can't see too much benefit from this.

I tested this script with force with self-made hunger games simulator and
several other algorithms. However the problem is I can't 'invent' dozens of
unique bots and I only had few dumbish ones so results weren't to reliable 
compared to how this will actually perform in real competition.
<boast> 
This simulator ran 10 50-player games per ~7sec, so at least I know it
shouldn't flop and embarrasingly fail. I didn't publish this simulator since
there were already several out there plus it needs lots of inside tweaking
</boast>

- This algorithm uses more learning/observing than game theory, however it's 
  aware slacking is best option in any case _in short term_. That's why it first
  aims for rep which it can exploit afterwards when rep shouldn't be affected 
  too radically per hunt round.
"""

#Numpy to make learning table bit more efficent
import numpy as np

#Some final constants -->
#Before this round ignore extreme reputations (ie, 0.01, 0.92 etc) to avoid
#slacking in start
#After this round use threshold values (see below)
THRESHOLD_ROUND = 10
#If other's rep > this, always slack
HIGH_HUNT_CHANCE = 0.9
#If other's rep < this, always slack
LOW_HUNT_CHANCE = 0.2
#After players_at_start/current_players drops below this, start exploiting high
#rep. Before this opt for hunting
NICE_GUY_PLAYER_MIN = 0.4
#How much we should opt for slacking after we've reached NICE_GUY_PLAYER_MIN
#trigger
SLACKER_WEIGHT = 0.5

class Player(object):
    def __init__(self):
        #--- Learning variables ->
        
        #This is table holding learned values
        #Whole array is inited here to avoid resizing later
        #structure: [other_rep,my_rep,m_ratio] = hunt_possiblity
        #Note: index values are rounded to two decimals to avoid too large
        #      array
        #1.0 = other player has always chosen hunt with these parameters
        #0.0 = other player has always chosen slack ...
        self.table = np.zeros((101,101,101), dtype=np.float)
        #Fill table with fifty-fifty chance
        self.table.fill(0.5)
        #How aggressive should we be with learning?
        #Less players = more aggressivity, avarage tribe behavior may change
        #when players drop out
        self.learn_aggression = 0.0
        #Just copy of decisions made on last round
        self.last_decisions = []
        
        #--- Game theory / Holders ->
        
        #Hold reputations from the last call of hunt_choices (players_reps)
        self.last_reps = []
        #Hold my rep from last hunt_choices
        self.my_last_rep = 0.0
        #Hold last m/max_hunts
        #This is to learn/see when tribe opts for hunting
        self.last_m_hunt_ratio = 0.0
        
        #--- Other global variables / Macros ->
        
        #current number of players (including me)
        self.num_of_players = 0
        #number of players when game started (including me)
        self.num_of_players_start = 0
        #max amount of hunts next round in total
        self.max_hunts = 0
        #Food we started with
        self.starting_food = -1
        #Result of current_food / starting_food
        self.current_food_ratio = None
        #macro for int(round(x,2)*100). ie: 0.2345 -> 23
        #Used while turning reps to indexes for array
        self.two_int = lambda x: int(round(x,2)*100)
        #Just a little lookup table for converting "h" and "s" to floats
        #0.0 is slack, 1.0 is hunt
        self.lookup = {"h": 1.0, "s": 0.0}
        #Same as above in reverse
        self.reverse_lookup = ["s","h"]
    
    def _learn(self,old_value,decision):
        """Learning algorithm.
        
        old_value -- old value we want to update
        decision -- "h" or "s" decision the other played made
        Returns updated value
        
        """
        #Calculate how 'far' were're from this decision value
        #'Simulates' how something sudden (say, somebody laying) affects a lot
        #more than something that happened often (ie. person who never lies 
        #loses credebility if he lies even once)
        decision = self.lookup[decision]
        #Multiply this a bit to make it's effect stronger
        dist = abs(decision-old_value)*4
        #clamp to 1.0 < x < inf
        if dist < 1.0: dist = 1.0
        #aaand finally square it to get a "stronger" curve at end
        dist = dist**2
        #Calculate learning aggression from self.learn_aggression
        aggr = self.learn_aggression * dist
        #aggr is weight of new value
        return ((1-aggr)*old_value + (aggr*decision))
        
    def _teach(self,other_rep,my_rep,m_hunt,decision):
        """Teach/update table based on given values
        
        other_rep -- Rep of the other player
        my_rep -- rep of me
        m_hunt -- m / max hunts ratio
        decision -- Which one the other player chose ("h" or "s")
        Note: First three parameters should be integers
        
        """
        #Get old value from the table
        old_value = self.table[other_rep,my_rep,m_hunt]
        #Update value in table
        self.table[other_rep,my_rep,m_hunt] = self._learn(old_value,decision)
            
    def hunt_choices(self, round_number, current_food, current_reputation, m,
            player_reputations):
        #Save starting food amount etc on first round
        if round_number == 1:
            self.starting_food = current_food
            self.num_of_players_start = len(player_reputations)+1
        
        #Pre-calculate values we need per round
        self.last_reps         = player_reputations[:]
        self.my_last_rep       = current_reputation
        self.num_of_players    = len(player_reputations)+1
        self.max_hunts         = self.num_of_players*(self.num_of_players-1)
        self.learn_aggression  = 1.0 / self.num_of_players
        self.last_m_hunt_ratio = float(m) / self.max_hunts
        self.current_food_ratio= float(current_food) / self.starting_food
        player_ratio           = (float(self.num_of_players_start)/
            self.num_of_players)
        #Turn my rep and m/hunt-ratio into an indexes
        #ie. 0.58352 -> 58
        my_rep_index = self.two_int(current_reputation)
        m_hunt_index = self.two_int(self.last_m_hunt_ratio)
        
        #Loop through reps and do decisions
        #First clean off last decisions
        self.last_decisions = []
        for rep in player_reputations:
            #Check other's reputation for thresholds
            #We want to exploit hunt-'em-alls and avoid helping lazies
            #However only do this after THRESHOLD_ROUND
            if ((rep >= HIGH_HUNT_CHANCE or rep <= LOW_HUNT_CHANCE) and
                    round_number > THRESHOLD_ROUND):
                self.last_decisions.append("s")
                continue
            
            #Get learned chance of other hunting from the table
            h_value = self.table[self.two_int(rep),my_rep_index,m_hunt_index]
            #Run value from table through thresholds again
            if ((h_value >= HIGH_HUNT_CHANCE or h_value <= LOW_HUNT_CHANCE) and
                    round_number > THRESHOLD_ROUND):
                self.last_decisions.append("s")
            else:
                #Else judge what we should do based on current rep, food etc
                decision = 0.0
                #In general we want to choose what opponent may choose if we
                #slack while slack = best option for food at expense of rep
                #hunt while hunt = best option for rep with no food loss
                #However, first we want to gain rep at expense of food
                
                #If there's still more than NICE_GUY_PLAYER_MIN players left, 
                #opt for hunting based on food situation
                #More food = more food to lose thus more hunting to get rep
                #Use food ratio to avoid killing myself with slacky
                if player_ratio <= NICE_GUY_PLAYER_MIN:
                    decision = ((0.5*h_value) + 
                        (self.current_food_ratio * 0.5))
                else:
                    #Else if we're past NICE_GUY_ROUND_MAX, slack more based on
                    #reputation
                    #Higher rep = more slacking to gain food at expense of rep
                    #Using rep directly as a weight should make algorithm more
                    #adaptable
                    decision = (((1-self.my_last_rep)*h_value) +
                        (self.my_last_rep*0.0))
                    #0.0 in end to make clear we're opting for slacking
                    
                #Select right action based on final sum of weighted values
                decision = int(round(decision))
                #Avoid rounding to higher ints than 1
                if decision > 1: decision = 1
                self.last_decisions.append(
                    self.reverse_lookup[decision])
        #Return decision array
        return self.last_decisions[:]
                
    def hunt_outcomes(self, food_earnings):
        """Teach/update table based on what others chose"""
        my_rep_index    = self.two_int(self.my_last_rep)
        m_hunt_index    = self.two_int(self.last_m_hunt_ratio)
        #Loop through player reps, my decisions and earnings to teach myself
        for i in range(len(food_earnings)):
            my_decision = self.last_decisions[i]
            earning = food_earnings[i]
            other_rep_index = self.two_int(self.last_reps[i])
            others_decision = 0
            #'solve' other's decision
            #Lets just use basic if-elif-else structure here
            if my_decision == "h":
                if earning == 0:
                    #zero food for me while I hunted, means other hunted too
                    others_decision = ("h")
                else:
                    #else the value should be -3 when other slacked
                    others_decision = ("s")
            else:
                if earning == 1:
                    #We gained food which means other hunted
                    others_decision = ("h")
                else:
                    #We lost food, which means we both slacked
                    others_decision = ("s")
            #Update table
            self._teach(other_rep_index, my_rep_index, m_hunt_index, 
                others_decision)

    def round_end(self, award, m, number_hunters):
        """
        This algorithm doesn't really care about public goods, since its goal is
        to be a winner mainly, so do nothing with award etc
        """
        pass
