'''
encoding:UTF-8
BP神经网络,监督学习，两层神经网络
'''

import tensorflow as tf
from numpy.random import RandomState

batch_size = 8

# 创建第一层神经网络权值
w1 = tf.Variable(tf.random_normal([2, 3], stddev=1, seed=10))
# 创建第二层神经网络权值
w2 = tf.Variable(tf.random_normal([3, 1], stddev=1, seed=7))

# 创建训练数据变量
x = tf.placeholder(tf.float32, shape=[None, 2], name='input_x')
y_trail = tf.placeholder(tf.float32, shape=[None, 1], name='input_y')

# 定义前向神经网络
a1 = tf.matmul(x, w1)
y = tf.matmul(a1, w2)

# 定义学习率
leaningRate = 0.001

# 定义损失函数
loss_fun = -tf.reduce_mean(y_trail * tf.log(tf.clip_by_value(y, 1e-10, 1.0)))
trail_step = tf.train.GradientDescentOptimizer(learning_rate=leaningRate).minimize(loss_fun)

# 构造训练数据
rdm = RandomState(1)
X = rdm.rand(128, 2)
Y = [[int(x1 + x2 < 1)] for (x1, x2) in X]
print(X)
print(Y)

# 放入神经网络训练
with tf.Session() as sess:
    # 初始化tf变量
    init_op = tf.global_variables_initializer()
    sess.run(init_op)
    print("w1 :", sess.run(w1))
    print("w2 :", sess.run(w2))

    STEPS = 10000
    for i in range(STEPS):
        # 批量取出数据训练
        start = (i * batch_size) % 128
        end = (i * batch_size) % 128 + batch_size

        sess.run(trail_step, feed_dict={x: X[start:end], y_trail: Y[start:end]})

        if i % 2000 == 0:
            total_cross_entropy = sess.run(loss_fun, feed_dict={x: X, y_trail: Y})
            print("After %d training step(s), cross entropy on all data is %g" % (i, total_cross_entropy))

            # 输出训练后的参数取值。

    print("w1 :", sess.run(w1))
    print("w2 :", sess.run(w2))
