##
# \file blackrlenv.py
#

# --  Python imports  --
from random import random, choice
# -- End Python imports --

# --  Pybrain imports  --
from scipy import zeros
from pybrain.utilities import Named
from pybrain.rl.environments.environment import Environment
# -- End Pybrain imports --

# --  Project imports  --
from actions import Actions
from shoe import Shoe
from players import Players
from betting import Betting
from counting import CardCounter
from dealer import Dealer
# -- End Project imports --

##
# \class BlackRLEnv
# \details This is the logic for the reinforcement learning environment.
class BlackRLEnv(Environment, Named):

    ## \var indim
    # The number of actions the environment accepts
    indim  = 0

    ## \var outdim
    # The number of states the environment produces
    outdim = 0

    ## \var discreteStates
    # Discrete State Space
    discreteStates  = True

    ## \var discreteActions
    # Discrete Action Space
    discreteActions = True

    ## \var numActions
    # Number of possible actions within the discrete action space
    numActions = 0

    ## \var perseus
    # Current State
    perseus = None

    ## \var action_set
    # Possible Actions the agent can perform.
    action_set = [Actions.HIT, Actions.STAND]

    ##
    # \details Class constructor that sets a known framework for the learning.
    def __init__(self, topology=[], goal=0, **args):
        # Betting Agent
        self.bets = Betting()
        # Part of superclass
        self.setArgs(**args)
        # Part of superclass
        self.topology = topology
        # Part of superclass
        self.goal = goal
        # Generate a shoe for this environment
        self.shoe = Shoe()
        # Class to count cards
        self.count = CardCounter()
        # Reset the environment to it's default/initial state
        self.reset()

    ##
    # \details This reset, takes all cards from the dealer and player and gives them new cards.
    # \pre none
    # \post perseus is updated with the dealer and player hands.
    # \param none
    # \return none
    def reset(self):
        # Return to initial state
        player = []
        dealer = []

        # Deal the cards in the same order as a real Casino game would
        card = self.shoe.deal()
        self.count.update(card)
        player.append(card)

        card = self.shoe.deal()
        self.count.update(card)
        dealer.append(card)

        card = self.shoe.deal()
        self.count.update(card)
        player.append(card)

        card = self.shoe.deal()
        self.count.update(card)
        dealer.append(card)

        self.bets.placeBet()

        # Let the dealer play. This is done at the begining for two reasons:
        # 1. If this was done in the performAction of the task or enviroment
        # there is no state transition and the new dealer state would be missed
        # 2. It is possible to place first because the dealers actions are fixed
        # so doing this first has no adverse affect on the learning, because during
        # the agents learning it still will only look at the dealer's up card.
        dealer_action = Dealer.action(dealer)

        while dealer_action == Actions.HIT:
            card = self.shoe.deal()
            self.count.update(card)
            dealer.append(card)
            dealer_action = Dealer.action(dealer)

        self.perseus = (player, dealer, self.count)

    ##
    # \details This returns the items that are used to create the state of the system.
    # \pre none
    # \post none
    # \param none
    # \return The perseus, representing the player and dealer hands.
    def getSensors(self):
        # Return the current state of the environment
        # In this example we never change state!!
        return self.perseus

    ##
    # \details Performs the desired actions of the agent.
    # \pre none
    # \post Updated state based on the desired action.
    # \param action The desired action of the agent.
    # \return none
    def performAction(self, action):
        # Perform the given action within the current environment
        choice = self.action_set[action]

        # Now that we have the choice, we would normally look
        # at self.perseus to determine how to transition to the next state,
        # but in the case of Blackjack the current state does not give
        # any indication about the next state.
        if (choice == Actions.HIT):
            # The action was to hit, so deal the player a single card
            card = self.shoe.deal()
            self.count.update(card)
            self.perseus[Players.AGENT].append(card)
        elif (choice == Actions.SPLIT):
            # Action splits the cards, does not return the new hand.
            newHand = []
            newHand[0] = self.perseus[Players.AGENT].pop()
            self.perseus[Players.AGENT].append(self.shoe.deal())
            newHand.append(self.shoe.deal())

            Betting.placeBet()
            Betting.AgentMoney -= Betting.getBet()

        elif (choice == Actions.DOUBLE):
            # Action is still needed to skip to STAND
            Betting.placeBet()
            Betting.AgentMoney -= Betting.getBet()

        elif (choice == Actions.BET):
            Betting.placeBet()
            Betting.AgentMoney -= Betting.getBet()
