import sys
import random
import numpy
import math

MIN_REP = 0.0
MAX_REP = 0.0

class Player:
    def __init__(self):
        self.reputation = 0.0     # The reputation of the player
        self.hunt_decision_occurence = 0
        self.all_decision_occurence = 0
        self.former_reputation = 0
        self.food = 0           # The current amount of food
        self.initial_food = 0
        self.reputation_bias = 1.0           # Adjust mixed strategies depending on the reputation thresholds
        self.bonus_bias = 1.0     # Adjust mixed strategies depending the possibility of getting bonus awards
        self.regret_bias = 1.0    # Adjust mixed strategies based on regrets for previous actions
        self.food_bias = 1.0      # Adjust mixed strategies depending on the food amount
        self.bonus_prob = []      # Probability to get bonus
        self.hunt_decisions = []
        self.hunt_decision_prob = []

    def hunt_choices(self, round_number, current_food, current_reputation, m, player_reputations):
        # Update reputation data
        self.reputation = current_reputation
        # Update the hunt occurence I have made to dynamically update my reputation data
        self.hunt_decision_occurence = self.reputation * self.all_decision_occurence
        # Get current food data
        self.food = current_food

        hunt_decisions = []
        self.hunt_decision_prob = []

        # For the first round, play "h" at 75% percent, thus gaining a reputation of 0.75
        if round_number == 1:
            # Set the initial food
            self.initial_food = current_food

            reputations = [0.75 for rep in player_reputations]
            for reputation in player_reputations:
                decision = self.make_decision(0.75)

                # Update the hunt strategies I made to dynamically update the reputation data
                if decision == 'h':
                    self.hunt_decision_occurence += 1

                self.all_decision_occurence += 1
                hunt_decisions.append(decision)
                # Calculate current reputation
                self.reputation = self.hunt_decision_occurence / float(self.all_decision_occurence)

            self.hunt_decision_prob = [0.75 for reputation in player_reputations]
            self.hunt_decisions = hunt_decisions

            # Calculates the bonus probability for 4 situations
            # (h, h), (h, s), (s, h) and (s, s)
            self.bonus_prob = [self.calculate_bonus_probability(self.calculate_bonus_bias(reputations, m, i)) for i in range(0, 3)]
            return hunt_decisions

        # Calculate food bias based on current food amounts
        self.calculate_food_bias(self.food / float(self.initial_food))

        # Adjust the reputation bounds according to the current reputations of players
        self.set_reputation_bounds(player_reputations)

        # Determine the bonus bias
        self.bonus_bias = self.calculate_bonus_bias(player_reputations, m, 0);

        # Calculates the bonus probability for 4 situations
        # (h, h), (h, s), (s, h) and (s, s)
        self.bonus_prob = [self.calculate_bonus_probability(self.calculate_bonus_bias(player_reputations, m, i)) for i in range(0, 3)]

        for reputation in player_reputations:
            self.adjust_reputation_bias()
            # By representing the game to a Bayesian Game
            # the dominant strategy is to play mixed strategies according to
            # the reputation of the current player
            decision_probability = reputation * self.bonus_bias * self.reputation_bias * self.regret_bias * self.food_bias
            decision = self.make_decision(decision_probability)

            # Update the hunt strategies I made to dynamically update the reputation data
            if decision == 'h':
                self.hunt_decision_occurence += 1

            self.all_decision_occurence += 1
            hunt_decisions.append(decision)
            # Store the decision probability I made
            # Used for mixed strategy payoff in regret matching
            self.hunt_decision_prob.append(decision_probability)
            # Update the current reputation
            self.reputation = self.hunt_decision_occurence / float(self.all_decision_occurence)

        self.former_reputation = self.reputation
        self.hunt_decisions = hunt_decisions

        return hunt_decisions

    def hunt_outcomes(self, food_earnings):
        # Calculate regrets for each decision using Regret Matching

        # regrets[0] = regrets for "h", regrets[1] = regrets for "s"
        regrets = [0.0, 0.0]

        for i in range(0, len(food_earnings)):
            earning = food_earnings[i]
            decision = self.hunt_decisions[i]
            probability = self.hunt_decision_prob[i]

            # Calculate regret for 4 situations
            if decision == "h" and earning == 0:
                expected_payoff = 2 * self.bonus_prob[0] * probability + (1 + 2 * self.bonus_prob[1]) * (1 - probability)
                all_regrets = abs(expected_payoff - 2 * self.bonus_prob[0]) + abs(expected_payoff - (1 + 2 * self.bonus_prob[1]))
                regrets[0] += (expected_payoff - earning) / all_regrets
            elif decision == "h" and earning == -3:
                expected_payoff = (0 - 3 + 2 * self.bonus_prob[1]) * probability + (0 - 2 + 2 * self.bonus_prob[2]) * (1 - probability)
                all_regrets = abs(expected_payoff - (0 - 0 - 3 + 2 * self.bonus_prob[1]) * probability) + abs(expected_payoff - (0 - 2 + 2 * self.bonus_prob[2]) * (1 - probability))
                regrets[0] += (expected_payoff - earning) / all_regrets
            elif decision == "s" and earning == 1:
                expected_payoff = 2 * self.bonus_prob[0] * probability + (1 + 2 * self.bonus_prob[1]) * (1 - probability)
                all_regrets = abs(expected_payoff - 2 * self.bonus_prob[0]) + abs(expected_payoff - (1 + 2 * self.bonus_prob[1]))
                regrets[1] += (expected_payoff - earning) / all_regrets
            else:
                expected_payoff = (0 - 3 + 2 * self.bonus_prob[1]) * probability + (0 - 2 + 2 * self.bonus_prob[2]) * (1 - probability)
                all_regrets = abs(expected_payoff - (0 - 0 - 3 + 2 * self.bonus_prob[1]) * probability) + abs(expected_payoff - (0 - 2 + 2 * self.bonus_prob[2]) * (1 - probability))
                regrets[1] += (expected_payoff - earning) / all_regrets

        # Calculate the regret of "h" over "s"
        regret = regrets[0] - regrets[1]

        # Calculate regret bias
        if regret <= 0:
            regret = abs(regret)
            self.regret_bias = 1 + (1 - 1 / float(regret + 1.0))
        else:
            self.regret_bias = 1 - (1 - 1 / float(regret + 1.0))

    def round_end(self, award, m, number_hunters):
        pass # do nothing

    # Caculate bonus bias based on player reputations and the threshold
    # The slacker_number is used to calculate the expected number of hunts for the 4 situations
    # (h, h), (h, s), (s, h) and (s, s)
    def calculate_bonus_bias(self, reputation_list, threshold, slacker_number):
        expected_value = (self.reputation + sum(reputation_list)) * len(reputation_list) - slacker_number ** 2

        temp_list = list(reputation_list)

        # Calculate the standard deviation of player reputations
        temp_list.append(self.reputation)
        temp_list = numpy.array(temp_list)
        standard_deviation = numpy.std(temp_list)

        # The expected value should never be less than 0
        if expected_value < 0:
            expected_value = 0

        expected_percentage = expected_value / threshold

        # Calculate the bonus bias
        if self.food / self.initial_food >= 0.333:
            return expected_percentage * ((standard_deviation - 1.02) ** 2)
        else:
            return expected_percentage * (2 ** ((0 - 15) * standard_deviation))

    # Calculate the probability to get bonus based on the bonus bias passed in
    def calculate_bonus_probability(self, bonus_bias):
        probability = bonus_bias - 1.0;
        if probability > 1:
            probability = 1.0

        return probability

    # Calculate the food bias based on current amount of food
    def calculate_food_bias(self, ratio):
        self.food_bias = math.tanh(4.0 * ratio - 2.0) / 2.0 + 0.65

    # Returns the decision
    # If the random number generated is less than the decision probability passed in
    # Play h; otherwise play s
    def make_decision(self, probability):
        decision = self.generate_random()

        if decision < probability:
            return 'h'
        else:
            return 's'

    # Returns a random number between 0.0 and 1.0
    def generate_random(self):
        return random.random()

    # Set the reputation range using normal distribution
    def set_reputation_bounds(self, reputations):
        # normal distribution
        global MAX_REP
        global MIN_REP

        reputations = numpy.array(reputations)
        mean_reputation = numpy.mean(reputations)
        deviation = (numpy.std(reputations)) ** 2
        print mean_reputation
        
        MAX_REP = mean_reputation + deviation
        MIN_REP = mean_reputation - deviation


    # Adjust the reputation bias based on the repuation range
    def adjust_reputation_bias(self):
        if self.reputation > MAX_REP:
            if self.reputation_bias > 1:
                self.reputation_bias = 1.0

            self.reputation_bias *= (MAX_REP / self.reputation)
        elif self.reputation < MIN_REP:
            if self.reputation_bias < 1:
                self.reputation_bias = 1.0

            self.reputation_bias *= (MIN_REP / self.reputation)
        else:
            self.reputation_bias = 1.0
