from neural_net import *
from neural_net_impl_sol import *
import unittest
import math

class NeuralNetTest(unittest.TestCase):
  """Tests a simple case of back propagation in a hidden layer network."""
  def testHiddenLayerBackprop(self):
    network = NeuralNetwork()
    input = Node()
    network.AddNode(input, NeuralNetwork.INPUT)
    hidden = Node()
    hidden.AddInput(input, None, network)
    network.AddNode(hidden, NeuralNetwork.HIDDEN)
    output = Node()
    output.AddInput(hidden, None, network)
    network.AddNode(output, NeuralNetwork.OUTPUT)
    inp = Input()
    inp.values.append(1.0)
    target = Target()
    target.values.append(1.0)
    # Initialize all weights to be 0.
    for weight in network.weights:
      weight.value = 0

    learning_rate = 0.005
    FeedForward(network, inp)
    self.assertEquals(0.0, output.raw_value)
    self.assertEquals(0.5, output.transformed_value)
    self.assertEquals(0.0, hidden.raw_value)
    self.assertEquals(0.5, hidden.transformed_value)
    Backprop(network, inp, target, learning_rate)
    # Previously, all inputs should be 0, so the delta at the output is 1.0 - 0.5 = 0.5
    # Both weights that are inputs to output should be adjusted by 0.5 * learning_rate *
    # SigmoidPrime(0).
    delta1 = 0.5 * NeuralNetwork.SigmoidPrime(0)
    self.assertAlmostEquals(delta1 * learning_rate, output.fixed_weight.value)
    self.assertAlmostEquals(delta1 * learning_rate * 0.5, output.weights[0].value)

    # The delta at the hidden layer.  The input to the hidden layer is 1.0.  The delta at the hidden
    # layer is the sum of forward weights multiplied by the delta of the neighbors.  The forward
    # weight is 0, so the hidden delta is 0.
    self.assertAlmostEquals(0.0, hidden.fixed_weight.value)
    self.assertAlmostEquals(0.0, hidden.weights[0].value)

    # Now run one more iteration.
    FeedForward(network, inp)
    self.assertEquals(0.0, hidden.raw_value)
    self.assertEquals(0.5, hidden.transformed_value)
    self.assertEquals(delta1 * learning_rate * 1.25, network.outputs[0].raw_value)
    self.assertEquals(NeuralNetwork.Sigmoid(delta1 * learning_rate * 1.25),
                     output.transformed_value)
    output_weight = output.weights[0].value
    output_fixed_weight = output.fixed_weight.value
    Backprop(network, inp, target, learning_rate)
    #  Now the error is 1 - network.outputs[0].transformed_value.
    error = 1 - output.transformed_value
    delta2 = error * NeuralNetwork.SigmoidPrime(output.raw_value)
    self.assertEquals(output_fixed_weight + delta2 * learning_rate,
                      output.fixed_weight.value)
    self.assertEquals(output_weight + delta2 * learning_rate * 0.5,
                      output.weights[0].value)
    hidden_delta_2 = NeuralNetwork.SigmoidPrime(0.0) * output_weight * delta2
    self.assertEquals(hidden_delta_2 * 1.0 * learning_rate, hidden.weights[0].value)
    self.assertEquals(hidden_delta_2 * 1.0 * learning_rate, hidden.fixed_weight.value)

  """Assume that network is a network with a single input and output.
    Generates examples using the formula y = slope * x + intercept.
    Checks that after training, the network's weights are within tolerance of
    the actual weights.
  """
  def runLinear(self, slope, intercept, tolerance, network):
    print "TestLinear, slope: %f, intercept: %f" % (slope, intercept)
    # Create some fake inputs
    inputs = []
    targets = []
    for i in range(-10, 10):
      input = Input()
      input.values.append(i)
      inputs.append(input)
      target = Target()
      target.values.append(NeuralNetwork.Sigmoid(slope * i + intercept))
      targets.append(target)
    # Set all weights to 0
    for weight in network.weights:
      weight.value = 0
    Train(network, inputs, targets, 1.0, 1000)
    est_slope = network.outputs[0].weights[0].value
    est_intercept = network.outputs[0].fixed_weight.value
    print "TestLinearResult, slope: %f, intercept: %f" % (est_slope, est_intercept)
    self.assertTrue(math.fabs(slope - est_slope) < tolerance)
    self.assertTrue(math.fabs(intercept - est_intercept) < tolerance)

  """Test that runs the neural network for a single input and output node.  TestLinear checks that
  after a certain number of runs, the weights converge to the actual weights."""
  def testSimpleTrain(self):
    # Train a simple linear function with one input
    network = NeuralNetwork()
    input = Node()
    network.AddNode(input, NeuralNetwork.INPUT)
    output = Node()
    output.AddInput(input, None, network)
    network.AddNode(output, NeuralNetwork.OUTPUT)
    self.runLinear(2, 1, 0.05, network)
    self.runLinear(2, 3, 0.05, network)
    self.runLinear(2, 5, 0.05, network)
    self.runLinear(2, 7, 0.15, network)
    
  def testFeedForward(self):
      # A simple single layer network.
      network = NeuralNetwork()
      input1 = Node()
      input2 = Node()
      output1 = Node()
      network.AddNode(input1, NeuralNetwork.INPUT)
      network.AddNode(input2, NeuralNetwork.INPUT)
      output1.AddInput(input1, None, network)
      output1.AddInput(input2, None, network)
      output1.weights[0].value = 0.5
      output1.weights[1].value = 1.5
      output1.fixed_weight.value = -3.0
      input = Input()
      input.values.append(1.0)
      input.values.append(2.0)
      network.AddNode(output1, NeuralNetwork.OUTPUT)
      FeedForward(network, input)
      expected_total = 0.5 * 1.0 + 1.5 * 2.0 - 3.0
      self.assertAlmostEqual(expected_total, output1.raw_value)
      self.assertAlmostEqual(NeuralNetwork.Sigmoid(expected_total),
                             output1.transformed_value)

  def testBasicAdd(self):
      network = NeuralNetwork()
      node = Node()
      network.AddNode(node, NeuralNetwork.INPUT)
      node2 = Node()
      node2.AddInput(node, None, network)
      self.assertEqual(1, len(node2.weights))
      self.assertEqual(1, len(node2.inputs))
      self.assertEqual(1, len(node.forward_neighbors))
      self.assertEqual(1, len(node.forward_weights))
      self.assertTrue(node2.fixed_weight)
      self.assertFalse(node.fixed_weight)
      self.assertEqual(node.forward_weights[0], node2.weights[0])
      network.AddNode(node2, NeuralNetwork.OUTPUT)
      self.assertEqual(1, len(network.inputs))
      self.assertEqual(0, len(network.hidden_nodes))
      self.assertEqual(1, len(network.outputs))
      self.assertEqual(2, len(network.node_set))

if __name__ == '__main__':
    unittest.main()
