##
# \file blackrltask.py
#

# --  Python imports  --
# -- End Python imports --

# --  Pybrain imports  --
from pybrain.rl.environments import Task
from scipy import array
# -- End Pybrain imports --

# --  Project imports  --
from dealer import Dealer
from actions import Actions
from players import Players
from mapping import Map
from bjmath import BJMath
from namedcards import Cards
from rewards import Reward
from hand import Hand
# -- End Project imports --

##
# \class BlackRLTask
# \details This is the logic for the reinforcement learning agent.
class BlackRLTask(Task):

    ## \var _last_result
    # Result from the last hand. Used when the agent action is STAND, then
    # this "last result" is used to compute the correct reward.
    _last_result = None

    ## \var _WIN
    # Enumeration for a winning hand.
    _WIN  = 0

    ## \var _LOSE
    # Enumeration for a losing hand.
    _LOSE = 1

    ## \var _PUSH
    # Enumeration for a push.
    _PUSH = 2

    # Flag to use card counting
    _Count = None

    ##
    # \details Updates the following card counting techniques:
    # \pre none
    # \post all counters are updated with the current count.
    # \param value The value of the observed card.
    # \return none
    def getReward(self):
        # Compute and return the current reward
        player = self.env.perseus[Players.AGENT]
        dealer = self.env.perseus[Players.DEALER]

        player_value = BJMath.calcHandValue(player)
        dealer_value = BJMath.calcHandValue(dealer)

        if player_value > Hand.BLACKJACK:
            # Round is over so reset the environment
            self.env.bets.lose()
            self.env.reset()

            # This is the reward for busting
            reward = Reward.BUST

        else:
            # This is the reward for non-busting (this is not the same as winning!)
            if self._last_result == self._WIN:

                # Round is over so reset the environment
                self.env.bets.win()
                self.env.reset()

                if dealer_value > Hand.BLACKJACK:
                    # Win by having the dealer bust
                    reward = Reward.DEALER_BUST
                else:
                    # Win by having a higher valued hand
                    reward = Reward.WIN
                    if self._Count != None:
                        reward = reward * (1 + self._Count/10)
                        self._Count = None

            elif self._last_result == self._PUSH:
                # Round is over so reset the environment
                self.env.bets.push()
                self.env.reset()

                reward = Reward.PUSH
            elif self._last_result == self._LOSE:
                # Round is over so reset the environment
                self.env.bets.lose()
                self.env.reset()
                # Lost by value
                reward = Reward.LOSE
            else:
                reward = Reward.KEEP_PLAYING

            self._last_result = None

        return reward

    ##
    # \details Updates the following card counting techniques:
    # \pre none
    # \post all counters are updated with the current count.
    # \param value The value of the observed card.
    # \return none
    def performAction(self, action):
        # Get the agents desired action.
        agent_action = int(action[Players.AGENT])

        if agent_action == Actions.STAND:
            # The agent has chosen to stand so we need to determine the winner.
            self._Count   = self.env.perseus[Players.CHEATER].KO_Count

            # Once the dealer has finished playing, we can determine the winner.
            player_value = BJMath.calcHandValue(self.env.perseus[Players.AGENT])
            dealer_value = BJMath.calcHandValue(self.env.perseus[Players.DEALER])

            if (player_value > dealer_value) or (dealer_value > Hand.BLACKJACK):
                self._last_result = self._WIN
            elif player_value == dealer_value:
                self._last_result = self._PUSH
            else:
                self._last_result = self._LOSE

        # Pass the action to the super class.
        Task.performAction(self, agent_action)

    ##
    # \details Updates the following card counting techniques:
    # \pre none
    # \post all counters are updated with the current count.
    # \param value The value of the observed card.
    # \return none
    def getObservation(self):
        # Create an "observation" out of the current state.
        obs = array([Map.createState(self.env.perseus)])
        return obs
