# -*- coding: utf-8 -*-

"""
	Implementing Bp NN with python 2.7
"""
import random
import numpy as np

def rand(a, b):				
	"""
		Generate a float from a to b
	"""
	return (b - a) * random.random() + a

def make_matrix(m, n, fill = 0.0):
	"""
		Generate a Matrix with specific shape

		Args:
			m: the number of rows
			n: the number of columns

		Returns:
			mat: the generated mat(m * n)
	"""
	mat = []
	for i in range(m):
		mat.append([fill] * n)
	return mat

def sigmoid(x):
	"""
		Sigmoid function
	"""
	return 1 / (1 + np.exp(-x))

def sigmoid_derivative(x):
	"""
		derivative of sigmoid
	"""
	return x * (1 - x)

class BpNeuralNetwork:
	def __init__(self):
		self.input_n = 0
		self.hidden_n = 0
		self.output_n = 0
		self.input_neuron = []
		self.hidden_neuron = []
		self.output_neuron = []
		self.input_weights = []
		self.output_weights = []
		self.input_correction = []
		self.output_correction = []

	def setup(self, ni, nh, no):
		self.input_n = ni + 1
		self.hidden_n = nh
		self.output_n = no

		# init neuron
		self.input_neuron = [1.0] * self.input_n
		self.hidden_neuron = [1.0] * self.hidden_n
		self.output_neuron = [1.0] * self.output_n

		# init weights
		self.input_weights = make_matrix(self.input_n, self.hidden_n)
		self.output_weights = make_matrix(self.hidden_n, self.output_n)

		# random activate
		for i in range(self.input_n):
			for h in range(self.hidden_n):
				self.input_weights[i][h] = rand(-0.2, 0.2)

		for h in range(self.hidden_n):
			for o in range(self.output_n):
				self.output_weights[h][o] = rand(-2.0, 2.0)

		# init correction matrix
		self.input_correction = make_matrix(self.input_n, self.hidden_n)
		self.output_correction = make_matrix(self.hidden_n, self.output_n)

	# Forward propagation
	def forward(self, inputs):				
		# activate input layer
		for i in range(self.input_n - 1):
			self.input_neuron[i] = inputs[i]
		# activate hidden layer
		for j in range(self.hidden_n):
			total = 0.0
			for i in range(self.input_n):
				total += self.input_neuron[i] * self.input_weights[i][j]
			self.hidden_neuron[j] = sigmoid(total)
		# activate output layer
		for k in range(self.output_n):
			total = 0.0
			for j in range(self.hidden_n):
				total += self.hidden_neuron[j] * self.output_weights[j][k]
			self.output_neuron[k] = sigmoid(total)
		return self.output_neuron[:]

	# Back propagation
	def back_propagation(self, case, label, learn, correct):
		# feed forward
		self.forward(case)
		# get output layer error
		output_deltas = [0.0] * self.output_n
		for o in range(self.output_n):
			error = label[o] - self.output_neuron[o]
			output_deltas[o] = sigmoid_derivative(self.output_neuron[o]) * error
		# get hidden layer error
		hidden_deltas = [0.0] * self.hidden_n
		for h in range(self.hidden_n):
			error = 0.0
			for o in range(self.output_n):
				error += output_deltas[o] * self.output_weights[h][o]
			hidden_deltas[h] = sigmoid_derivative(self.hidden_neuron[h]) * error
		# update output weights
		for h in range(self.hidden_n):
			for o in range(self.output_n):
				change = output_deltas[o] * self.hidden_neuron[h]
				self.output_weights[h][o] += learn * change + correct * self.output_correction[h][o]
				self.output_correction[h][o] = change
		# update input weights
		for i in range(self.input_n):
			for h in range(self.hidden_n):
				change = hidden_deltas[h] * self.input_neuron[i]
				self.input_weights[i][h] += change * learn + correct * self.input_correction[i][h]
				self.input_correction[i][h] = change
		# get global error
		error = 0.0
		for o in range(len(label)):
			error += 0.5 * (label[o] - self.output_neuron[o]) ** 2
		return error

	# train
	def train(self, cases, labels, limit = 10000, learn = 0.05, correct = 0.1):
		for j in range(limit):
			if (j + 1) % 100 == 0:
				print(str(j + 1) + '/' + str(limit))
			error = 0.0
			for i in range(len(cases)):
				label = labels[i]
				case = cases[i]
				error += self.back_propagation(case, label, learn, correct)
		# print(self.input_weights, self.output_weights)

	# test
	def test(self):
		cases = [
					[0, 0],
					[0, 1],
					[1, 0],
					[1, 1]
		]
		labels = [[0], [1], [1], [0]]
		self.setup(2, 5, 1)
		self.train(cases, labels, 10000, 0.1, 0.1)
		for case in cases:
			print(self.forward(case))

# main
if __name__ == '__main__':
	nn = BpNeuralNetwork()
	nn.test()






