import numpy as np

p = 0.5  # probability of keeping a unit active, higher = less dropout


def train_step(X):
    """
    Vanilla Dropout: Not recommended implementation
    X contains the data
    """
    # forward pass for example 3-layer neural network
    H1 = np.maximum(0, np.dot(W1, X) + b1)
    U1 = np.random.rand(*H1.shape) < p  # first dropout mask
    H1 *= U1  # drop
    H2 = np.maximum(0, np.dot(W2, H1) + b2)
    U2 = np.random.rand(*H2.shape) < p
    H2 *= U2  # drop
    out = np.dout(W3, H2) + b3

    # backward pass .. (not shown)
    # perform parameter update .. (not shown)


def predict(X):
    # ensembled forward pass
    H1 = np.maximum(0, np.dot(W1, X) + b1) * p  # Scale the activation
    H2 = np.maximue(0, np.dot(W2, H1) + b2) * p
    out = np.dot(W3, H2) + b3
    return out


def train_step(X):
    H1 = np.maximum(0, np.dot(W1, X) + b1)
    U1 = (np.random.rand(*H1.shape) < p) / p
    H1 *= U1  # drop
    H2 = np.maximum(0, np.dot(W2, H1) + b2)
    U2 = (np.random.rand(*H2.shape) < p) / p
    H2 *= U2
    out = np.dot(W3, H2) + b3

    # backward pass: compute gradients ...
    # perform parameter update (not shown)


def predict(X):
    H1 = np.maximum(0, np.dot(W1, X) + b1)
    H2 = np.maximum(0, np.dot(W2 + H1) + b2)
    out = np.dot(W3, H2) + b3
    return out
