# A neural network class for recognizing
# hand written numbers
class NeuralNetwork
  include Numo

  # @param layers [Array<Integer>] number of neurons per layer
  def initialize(*layers)
    @layers = layers.size - 1  # number of layers excluding the input layer
    @biases = layers[1..-1].map{|neurons| DFloat.new(neurons, 1).rand_norm}
    @weights = layers.each_cons(2).map{|neurons1, neurons2| DFloat.new(neurons2, neurons1).rand_norm}
  end

  # Evaluates data
  # @param data [DFloat] n-dimensional column vector representing an image of hand-written number
  # @return [DFloat] 10-dimensional column vector whose elements representing the confidence
  def evaluate(data)
    activation = @layers.times.reduce(data) do |activation, layer|
      sigmoid(@weights[layer].dot(activation) + @biases[layer])
    end
    activation.max_index
  end

  def train(training_data, epochs, mini_batch_size, learning_rate, test_data = nil)
    epochs.times do |epoch|
      training_data.shuffle!.each_slice(mini_batch_size) do |mini_batch|
        update(mini_batch, learning_rate)
      end

      if test_data
        hits = test_data.count do |x, y|
          evaluate(x) == y.max_index
        end
        #puts "Epoch #{epoch}: #{hits} / #{test_data.size}"
      else
        #puts "Epoch #{epoch} complete"
      end
    end
  end

  private

  # The sigmoid function that accepts and returns column vectors
  # @param z [DFloat] a column vector
  # @return [DFloat] a column vector
  def sigmoid(z)
    1.0 / (1.0 + NMath.exp(-z))
  end

  def sigmoid_derivative(z)
    s = sigmoid(z)
    s * (1.0 - s)
  end

  def update(mini_batch, learning_rate)
    nabla_b = @biases.map{|b| DFloat.zeros(b.shape)}
    nabla_w = @weights.map{|w| DFloat.zeros(w.shape)}

    mini_batch.each do |input, desired_output|
      delta_nabla_w, delta_nabla_b = backpropagation(input, desired_output)
      nabla_w = nabla_w.zip(delta_nabla_w).map! {|nw, dnw| nw + dnw}
      nabla_b = nabla_b.zip(delta_nabla_b).map! {|nb, dnb| nb + dnb}
    end

    @weights = @weights.zip(nabla_w).map!{|w, nw| w - (learning_rate / mini_batch.size) * nw}
    @biases = @biases.zip(nabla_b).map!{|b, nb| b - (learning_rate / mini_batch.size) * nb}
  end

  # @param input [DFloat] pixels (in the form of an n-dimensional column vector) that represents one hand-written number
  # @param desired_output [DFloat] desired output from the network
  def backpropagation(input, desired_output)
    nabla_b = @biases.map {|b| DFloat.zeros(b.shape)}
    nabla_w = @weights.map {|w| DFloat.zeros(w.shape)}

    # feedforward
    activations = [input]  # the activation value of the input layer is the input value itself
    weighted_inputs = []
    @layers.times do |layer|
      weighted_inputs << @weights[layer].dot(activations[-1]) + @biases[layer]
      activations << sigmoid(weighted_inputs[-1])
    end

    # Calculate the gradients of the last layer
    error = cost_derivative(activations[-1], desired_output) * sigmoid_derivative(weighted_inputs[-1])
    nabla_b[-1] = error
    nabla_w[-1] = error.dot(activations[-2].transpose)

    # backpropagate
    2.upto(@layers) do |layer|
      sd = sigmoid_derivative(weighted_inputs[-layer])
      error = @weights[-layer + 1].transpose.dot(error) * sd
      nabla_b[-layer] = error
      nabla_w[-layer] = error.dot(activations[-layer - 1].transpose)
    end

    [nabla_w, nabla_b]
  end

  # The derivative of the quadratic cost function `C = (1/2) * ||output - desired_output||^2`
  # @param output [DFloat] a column vector denoting the activation of the output layer
  # @param desired_output [DFloat] a column vector denoting the desired activation of the output layer
  # @return [DFloat] a column vector denoting the error
  def cost_derivative(output, desired_output)
    output - desired_output
  end
end
