import math
import random

__author__ = 'mirage007'


foodToPlayerActionMap = {('h', 0): 'h',
                        ('h', -3): 's',
                        ('s', -2): 's',
                        ('s', 1): 'h'}

class RankReputationHistory(object):
    ''' Keeps tracks of history according to rank.
    '''
    def __init__(self, sens = .25):
        self.hist = None
        self.sens = sens

    def updateRep(self, rankedRep):
        '''updates rank reputation assuming rank ordered outcomes of each round
        i.e. rankedrep = ['h', 's', 's', 'h',... ]
        which corresponds to best, second best, etc... players

        The memory is based on the exponential decay with (1-sens) the weight of prior observations
        '''
        ind = [1 if i=='h' else 0 for i in rankedRep]
        if self.hist is None:
            self.hist = ind
        else:
            for i,v in enumerate(ind):
                self.hist[i] = self.sens * v + (1-self.sens)*self.hist[i]
        #simply ignore any extra entries in self.reputationHistory as # players is decreasing

class RoundOutcome(object):
    def __init__(self, round_number, current_food, current_reputation, m, player_reputations):
        self.round_number = round_number
        self.current_food = current_food
        self.current_reputation = current_reputation
        self.m = m
        self.player_reputations = [i for i in player_reputations] # copy
        self.num_players  = len(self.player_reputations)
        self.player_rank = sorted(range(self.num_players), key = lambda x: self.player_reputations[x], reverse=True)
        self.ranked_player_reputations = [self.player_reputations[i] for i in self.player_rank]
        self.rank = sum([1 if i > self.current_reputation else 0 for i in self.ranked_player_reputations])
        self.original_order = {ranked: original for original, ranked in enumerate(self.player_rank) }
        self._actions = None
        self.player_actions = None
        self.food_earnings = None
        self.outcome_hunters = None
        self.outcome_award = None

    @property
    def actions(self):
        return list(self._actions)

    @actions.setter
    def actions(self, values):
        self._actions = list(values)

    @property
    def ranked_player_actions(self):
        return [self.player_actions[i] for i in self.player_rank]

    def setHuntOutcome(self, food_earnings):
        self.food_earnings = [i for i in food_earnings]
        #reverse engineer food
        self.player_actions = [foodToPlayerActionMap[(self._actions[i], f)] for i, f in enumerate(self.food_earnings)]

    def setRoundOutcome(self, award, m, num_hunters):
        self.award = award
        self.num_hunters = num_hunters

    def convertRankedToOriginal(self, ranked_actions):
        '''Convert a vector of ranked actions or otherwise, back into unordered state
        '''
        return [ranked_actions[self.original_order[i]] for i in xrange(len(ranked_actions))]

class Agent(object):

    def __init__(self):
        self.roundHistory= None
        self.rankHistory = RankReputationHistory(.1)
        self.currentRound = 0

    def setup(self, round_number, current_food, current_reputation, m, player_reputations):
        '''stores game info, and orders reputation
        '''
        r = RoundOutcome(round_number, current_food, current_reputation, m, player_reputations)
        self.roundHistory = r
        self.currentRound = round_number

    def round_end(self, award, m, number_hunters):
        pass

    def hunt_outcomes(self, food_earnings):
        r = self.roundHistory
        r.setHuntOutcome(food_earnings)
        self.rankHistory.updateRep(r.ranked_player_actions)

    def hunt_choices(self, round_number, current_food, current_reputation, m, player_reputations):
        ridx = round_number - 1
        self.currentRound = ridx
        self.setup(ridx, current_food, current_reputation, m, player_reputations)
        r = self.roundHistory
        if ridx == 0:
            #first round cooperate enough to meet award
            num = int(math.ceil(m+1/(r.num_players + 1)))
            r.actions = ['h' if i < num else 's' for i in range(r.num_players)]
        else:
            #plays with same probability as history for that ranked player
            r.actions  = r.convertRankedToOriginal(['h' if random.random() <= v else 's' for v in self.rankHistory.hist[:r.num_players]])
        return r.actions

class TTAgent(Agent):
    pass