from __future__ import division
import numpy as np
import math
import random
from random import shuffle
#from scipy import stats

'''
Brian Garofalo, 08/11/2003
'''

class Player:
    
    #A is the fraction of times I will hunt (converges to steady state reputation value)
    #B is the expected fraction of times opponent will hunt
    
    def __init__(self):
        
        
        self.rng = random.Random()
        self.rng.seed(347890)
        
        self.ah_randomize_cutoff = 0.5
        
        self.num_hunts = 0
        self.num_slacks = 0
        
        self.initial_A = 0.9 #initial number I made up
        self.use_endgame_strategy = False
        
        
        #Variables for potential use in hunt_outcomes and/or round_end
        
        self.expected_total_num_hunts_this_round = 0
        self.expected_hunts_received_this_round = 0
        
        self.num_players = 0
        self.num_hunts_received = 0

        self.last_rep =0
        self.goal_modifier=0
        self.goal_incrementer=.0001
        self.last_diff = 0
        self.history = []

    # All the other functions are the same as with the non object oriented setting (but they
    # should be instance methods so don't forget to add 'self' as an extra first argument).

    def hunt_choices(self, round_number, current_food, current_reputation, m,
            player_reputations):
        
        if len(player_reputations) < 2:
            self.use_endgame_strategy = True
            
        if self.use_endgame_strategy:    
            hunt_decisions = ['s' for x in player_reputations] # always slack
            self.increment_hunt_totals(0,len(player_reputations))
        elif round_number == 1:  #Handle first round special since reputations are not set yet
            cutoff = int(math.floor(self.initial_A * float(len(player_reputations))))
            hunt_decisions = ['s' for x in player_reputations]
            for i in range(cutoff):
                hunt_decisions[i] = 'h'
            self.rng.shuffle(hunt_decisions)
            self.increment_hunt_totals(cutoff,len(player_reputations)-cutoff)
        
        else:
            hunt_decisions = self.determine_hunt_choices(player_reputations,round_number)
        
        self.last_rep = current_reputation
        self.num_players = len(player_reputations)
        return hunt_decisions

    def hunt_outcomes(self, food_earnings):
        # TODO: Calculate modifier based on discrepancy between food_earnings and expected_payoff
        pass  

    def round_end(self, award, m, number_hunters):
        pass
        
        #TODO: FIGURE OUT SOME AWESOME ALGORITHM TO ADJUST. REMEMBER THAT ONE OF THE REASONS YOU NEED TO ADJUST IS A SIGNIFICANT NUMBER OF PLAYERS ARE NOT CONFORMING TO ASSUMPTIONS.

        # TODO Possibly detect something that forces end game strategy
        #### Give up hunting if goal is higher than current reputation but rank does not improve after x rounds???
        #### Most players slack for y rounds???
        # TODO: Calculate modifier based on discrepancy between reputation and number_hunters, possibly considering m
    
    def determine_hunt_choices(self, player_reputations,round_number):
        
        if (self.num_hunts+self.num_slacks) > 0:
            current_reputation = float(self.num_hunts)/float(self.num_hunts+self.num_slacks)
        else:
            current_reputation = 0 #Should not occur is round 1 special case is implemented

        all_reputations = list(player_reputations)
        all_reputations.append(current_reputation)
        Bs = self.estimate_B(all_reputations)  
        goal_reputation,expected_payoffs = self.calc_goal_reputation(all_reputations, Bs, len(Bs)-1) 
        num_hunts_this_round = self.calc_num_hunts_to_goal_reputation(goal_reputation, len(player_reputations))
        
        hunts_this_round = self.assign_hunts(num_hunts_this_round,expected_payoffs[:-1],round_number)
     
        self.increment_hunt_totals(num_hunts_this_round, len(player_reputations)-num_hunts_this_round)
        self.expected_hunts_received_this_round = Bs[-1]*float(len(player_reputations))
        self.expected_total_num_hunts_this_round = sum(Bs[:-1])*float(len(player_reputations)) + num_hunts_this_round
        
        return hunts_this_round
    
    def assign_hunts(self,num_hunts,player_expected_payoffs,round_number):

        ind_row=0
        ep_row=1
        hd_row=2
        
        P = len(player_expected_payoffs)
        indices = range(P)
        hunt_decisions = [False]*P
        
        if num_hunts < 0:
            num_hunts = 0
        elif num_hunts > P:
            num_hunts = P
        
        
        if num_hunts > 0:       
            if self.rng.random() >= self.ah_randomize_cutoff:
                #cutoff = float(num_hunts) / float(len(player_expected_payoffs))
                #hunt_decisions = [1 if self.rng.random() < cutoff else 0 for rep in player_expected_payoffs]
                hunt_decisions = [1 if x < num_hunts else 0 for x,value in enumerate(hunt_decisions)]
                #hunt_decisions[0:num_hunts] = True
                self.rng.shuffle(hunt_decisions)
            else:
                vals = np.vstack((indices,player_expected_payoffs,hunt_decisions))
                
                #Sort by expected payoff value
                vals = vals[:,vals[ep_row,:].argsort()]
                
                vals[hd_row,0:num_hunts] = True
                
                #resort back into original order
                vals = vals[:,vals[ind_row,:].argsort()]
                hunt_decisions = vals[hd_row,:]
        
        hunt_decisions = ['h' if x==1 else 's' for x in hunt_decisions]
        
        return hunt_decisions    
        
    
    def calc_num_hunts_to_goal_reputation(self,goal_reputation, num_other_players):
        num_hunts_this_round = goal_reputation * float(self.num_hunts+self.num_slacks+num_other_players) - self.num_hunts
        num_hunts_this_round = int(math.ceil(num_hunts_this_round))
        
        # Keep the output between 0 and 1 (the goal reputation may not be achievable this round)
        num_hunts_this_round = max(0,num_hunts_this_round)
        num_hunts_this_round = min(num_other_players,num_hunts_this_round)
        
        return num_hunts_this_round
        
    
    def increment_hunt_totals(self,num_hunts_this_round,num_slacks_this_round):
        self.num_hunts += num_hunts_this_round
        self.num_slacks += num_slacks_this_round
        
    
    def calc_expected_payoff(self,A,B):
        #A is the fraction of times I will hunt (converges to steady state reputation value)
        #B is the expected fraction of times opponent will hunt
        
        return -2.0 - float(A) + (3.0*float(B))
    
    def calc_goal_reputation(self,all_reputations,Bs, my_index):
     
        ind_row = 0
        rep_row = 1
        B_row = 2
        ep_row = 3
        
        P = len(all_reputations)
        indices = range(P)
        expected_payoffs = [0]*P
        
        vals = np.vstack((indices,all_reputations,Bs,expected_payoffs))
        
        #sort by reputation value
        vals = vals[:,vals[rep_row,:].argsort()]
        
        my_rank = np.where(vals[ind_row,:]==my_index)[0][0]
        
        best_A = 0
        best_next_round_payoff = self.calc_expected_payoff(0,vals[B_row,0]) #initialize assuming all slacks (A=0) and B of current slackiest player
        vals[ep_row,0] = best_next_round_payoff
        
        #Assume you can achieve player i B value if your reputation is negligibly higher than player i
        #Adjust A_next_round lookup based on whether the player is above or below your current rank
        for i in range(1,P):
            if i <= my_rank:
                A_next_round=vals[rep_row,i-1]
            else:
                A_next_round=vals[rep_row,i]
            A_current = vals[rep_row,i]
            B=vals[B_row,i]
            next_round_payoff = self.calc_expected_payoff(A_next_round, B)
            vals[ep_row,i] = self.calc_expected_payoff(A_current, B)
            if next_round_payoff > best_next_round_payoff:
                best_next_round_payoff = next_round_payoff
                best_A = A_next_round
                
        #resort back into original order
        vals = vals[:,vals[ind_row,:].argsort()]
        expected_payoffs = vals[ep_row,:]
        
        goal_reputation = best_A + self.goal_modifier + .00001
        
        return goal_reputation, expected_payoffs       
                
    #Estimates the fraction of players that will hunt with each ranked position in the reputation list
    def estimate_B(self,all_reputations):
        #If all_reputations includes hero algorithm, its hunts are distributed just like everyone else
        
        ind_row = 0
        rep_row = 1
        B_row = 2
        
        P = len(all_reputations)
        indices = range(P)
        Bs = [0] * P
        
        vals = np.vstack((indices,all_reputations,Bs))
        
        #Reverse sort by reputation value
        vals = vals[:,vals[rep_row,:].argsort()]
        vals = vals[:,::-1]
        
        #for each player, distribute hunts to other players with best reputation
        for i in range(P):
            
            #expected number of hunts from player i is reputation * number of players-1
            num_hunt = vals[rep_row,i] * float(P-1)
            floor_num_hunt = int(math.floor(num_hunt))
            remainder_num_hunt = num_hunt - floor_num_hunt
            
            if floor_num_hunt < i:
                vals[B_row,0:floor_num_hunt] += 1
                vals[B_row,floor_num_hunt] += remainder_num_hunt
            else:
                vals[B_row,0:i] += 1               
                vals[B_row,i+1:floor_num_hunt+1] += 1    
                if remainder_num_hunt > 0:             
                    vals[B_row,floor_num_hunt+1] += remainder_num_hunt

                    
        #resort back into original order
        vals = vals[:,vals[ind_row,:].argsort()]
        
        if P > 1:
            Bs = vals[B_row,:] / float(P-1)
        else:
            Bs = vals[B_row,:] #This should never happen
        
        return Bs
                
            