# 创建layer函数以矩阵运算仿真神经网络,使用了layer_debug，返回对应的W与b 16.3

import tensorflow as tf
import numpy as np


# output_dim:输出的神经元数量,input_dim:输入的神经元数量,
# imnput:输入的二维数组的placeholder,activation:传入的激活函数，默认为NONE
#
def layer_debug(output_dim, input_dim, inputs, activation=None):
    # 产生维数是(input_dim,output_dim)的正态分布的随机数矩阵
    W = tf.Variable(tf.random_normal([input_dim, output_dim]))
    # 偏差
    b = tf.Variable(tf.random_normal([1, output_dim]))
    XWb = tf.matmul(inputs, W) + b
    # 设置激活函数
    if activation is None:
        outputs = XWb
    else:
        outputs = activation(XWb)

    # 返回已经建立的神经网络层
    return outputs, W, b


# 建立二层神经网络,输入层有4个神经元，隐藏层有3个神经元，输出层有2个神经元

X = tf.placeholder("float", [None, 4])
# 隐藏层
h, W1, b1 = layer_debug(output_dim=3,
                        input_dim=4,
                        inputs=X,
                        activation=tf.nn.relu)
# 输出层
y, W2, b2 = layer_debug(output_dim=2,
                        input_dim=3,
                        inputs=h)

with tf.Session() as sess:
    init = tf.global_variables_initializer()
    sess.run(init)

    X_array = np.array([[0.4, 0.2, 0.4, 0.5]])
    (layer_x, layer_h, layer_y, W1, b1, W2, b2) = sess.run((X, h, y, W1, b1, W2, b2), feed_dict={X: X_array})

    print('input Layer X:')
    print(layer_x)
    print('W1:')
    print(W1)
    print('b1:')
    print(b1)
    print('hidden layer h:')
    print(layer_h)
    print('W2:')
    print(W2)
    print('b2:')
    print(b2)
    print('output:')
    print(layer_y)

    tf.summary.merge_all()
    # 默认写入当前盘的根下，比如：d:/log/area下,可以使用tensorboard --logdir d:/log/area 来查看计算图
    tran_writer = tf.summary.FileWriter('/log/area', sess.graph)
