from random import random, choice
from scipy import zeros

from pybrain.utilities import Named
from pybrain.rl.environments.environment import Environment

from actions import Actions

class BlackRLEnv(Environment, Named):
    # The number of actions the environment accepts
    indim  = 0

    # The number of states the enviroment produces
    outdim = 0

    # Discrete State Space
    discreteStates  = True

    # Discrete Action Space
    discreteActions = True

    # Number of possible actions within the discrete action space
    numActions = 0

    # Current State
    perseus = None

    # Possible Actions
    action_set = [Actions.HIT, Actions.STAND]

    def __init__(self, topology=[], goal=0, **args):
        self.setArgs(**args)
        self.topology = topology
        self.goal = goal
        self.reset()

    def reset(self):
        # Return to initial state
        player = [11, 5]
        dealer = [6]

        self.perseus = (player, dealer)

    def getSensors(self):
        # Return the current state of the envionment
        # In this example we never change state!!
        return self.perseus

    def performAction(self, action):
        # Perform the given action within the current environment
        choice = self.action_set[action]
        # Now that we have the choice, we would normally look
        # at self.perseus to determine how to transition to the next state
        if (choice == Actions.HIT):
            # The action was to hit, so give them a 10
            self.perseus[0].append(10)


