# -*- coding: utf-8 -*-

"""
	Implementing BpNN with tensorflow
"""

import tensorflow as tf 
import numpy as np 

def make_layer(inputs, in_size, out_size, n_layer, activate = None):
	"""
		Create a single layer

		Args:
			inputs: the input data(vector)
			in_size: the size of input
			out_size: the size of output
			activate: the activative function, default is None

		Returns:
			result: the result of calculation
	"""
	layer_name = 'layer%s' % n_layer
	with tf.name_scope(layer_name):
		with tf.name_scope('weights'):
			weights = tf.Variable(tf.random_normal([in_size, out_size]), name = 'W')
			tf.summary.histogram(layer_name + '/weights', weights)
		with tf.name_scope('biases'):
			basis = tf.Variable(tf.zeros([1, out_size]) + 0.1, name = 'b')
			tf.summary.histogram(layer_name + '/biases', basis)
		with tf.name_scope('Wx_plus_b'):
			result = tf.matmul(inputs, weights) + basis
		if activate is None:
			outputs = result
		else:
			outputs = activate(result)
		tf.summary.histogram(layer_name + '/outputs', result)
		return outputs

class BpNeuralNetwork():
	"""
		A BpNeuralNetworl class

		@__init__
		@__del__
		@train
		@predict
		@test
	"""
	def __init__(self):
		"""
			Initialize
		"""
		self.session = tf.Session()
		self.input_layer = None
		self.label_layer = None
		self.loss = None
		self.optimizer = None
		self.layers = []

	def __del__(self):
		self.session.close()

	def train(self, cases, in_size, hidden_size, labels, out_size, limit = 100, learn_rate = 0.05):
		"""
			train

			Args:
				cases: the train samples, array like
				in_size: the size of input
				hidden_size: the number of layers of hidden
				labels: the true label of the train samples
				out_size: the size of output
				limit: the epoch for train, default is 100
				learn_rate: the learn_rate for train, default is 0.05

			Returns:
				the output of the last turn 
		"""
		with tf.name_scope('inputs'):
			self.input_layer = tf.placeholder(tf.float32, [None, in_size])
			self.label_layer = tf.placeholder(tf.float32, [None, out_size])
		self.layers.append(make_layer(self.input_layer, in_size, hidden_size, 1, activate = tf.nn.sigmoid))
		self.layers.append(make_layer(self.layers[0], hidden_size, out_size, 2, activate = None))
		with tf.name_scope('loss'):
			self.loss = tf.reduce_mean(tf.reduce_sum(tf.square((self.label_layer - self.layers[1])), reduction_indices = [1]))
			tf.summary.scalar('loss', self.loss)
		with tf.name_scope('train'):
			self.optimizer = tf.train.GradientDescentOptimizer(learn_rate).minimize(self.loss)
		merged = tf.summary.merge_all()
		writer = tf.summary.FileWriter("logs", self.session.graph)

		initer = tf.global_variables_initializer()
		self.session.run(initer)
		for i in range(limit):
			if (i + 1) % 50 == 0:
				result = self.session.run(merged, feed_dict = {self.input_layer: cases, self.label_layer: labels})
				writer.add_summary(result, (i + 1))
			self.session.run(self.optimizer, feed_dict = {self.input_layer: cases, self.label_layer: labels})
			if (i + 1) == limit:
				return self.session.run(self.layers[-1], feed_dict = {self.input_layer: cases})

	def predict(self, case):
		"""
			Prediction

			Args:
				case: the sample for predict

			Returns:
				the predict output for the sample
		"""
		return self.session.run(self.layers[-1], feed_dict = {self.input_layer: case})

	def test(self):
		"""
			For test
		"""
		x_data = np.array([[0, 0, 0], [0, 1, 1], [1, 0, 1], [1, 1, 1]])
		y_data = np.array([[0, 3, 20, 1]]).transpose()
		print(y_data)
		test_data = np.array([[1, 0, 1]])
		self.train(x_data, 3, 10, y_data, 1, limit = 1000)
		print(self.predict(test_data))

if __name__ == '__main__':
	nn = BpNeuralNetwork()
	nn.test()
	


		



