import numpy as np

class Neuron(object):
    """
    A simple artificial neuron, processes an input vector and returns a
    corresponding activation.
    Args:
        num_inputs (int): The input vector size / number of input values.
        activation_function: (callable): The activation function defining
        this neuron.
    Attributes:
        W (ndarray): The weight values for each input.
        b (float): The bias value, added to the weighted sum.
        activation_function (callable): The activation function compution the
        neuron's output.
    """

    def __init__(self, num_inputs, activation_function):
        super().__init__()

        # Randomly initializing the weight vector and the bias value (e.g. using a simplisitic
        # uniform distribution between -1 and 1):
        self.W = np.random.uniform(size=num_inputs, low=-1., high=1.)
        self.b = np.random.uniform(size=1, low=-1., high=1.)

        self.activation_function = activation_function

    def forword(self, x):
        """
        Forword the input signal through the neuron, returning its activation value
        Args:
            x (ndarray): The input vector, of shape `(1, num_inputs)`
        Returns:
            activation (ndarray): The activation value, of shape `(1, layer_size)`
        """
        z = np.dot(x, self.W) + self.b
        return self.activation_function(z)


if __name__ == '__main__':
    input_size = 3

    step_function = lambda y: 0 if y <= 0 else 1

    perceptron = Neuron(num_inputs=input_size, activation_function=step_function)
    print("Perceptron's random weights = {}, and random bias = {}".format(perceptron.W, perceptron.b))
