"""
测试使用TensorBoard

加入events 和 histogram
"""

import tensorflow.compat.v1 as tf
import numpy as np
import matplotlib.pyplot as plt

# 在Tensorflow 2.0 中，eager execution 是默认开启的。所以，需要先关闭eager execution
tf.compat.v1.disable_eager_execution()


def add_layer(inputs, in_size, out_size, n_layer, activation_func=None):
    with tf.name_scope("layer"):  # 定义图层，有可能是隐藏层，也有可能是输出层
        # 定义图层中的每一个小部件
        layer_name = 'layer%s' % n_layer
        with tf.name_scope("weight"):
            # 创建一个参数，值为一个由随机数构成的 行为 in_size，列为out_size 的矩阵
            Weights = tf.Variable(tf.random_normal([in_size, out_size]), name='W')
            tf.summary.histogram(layer_name + '/weights', Weights)
        with tf.name_scope("biases"):
            # 创建一个 1*out_size shape的矩阵，然后 + 0.1
            biases = tf.Variable(tf.zeros([1, out_size]) + 0.1, name='b')
            tf.summary.histogram(layer_name + '/biases', biases)

        with tf.name_scope("Wx_plus_b"):
            # 矩阵乘 input Weight 然后 + biases
            Wx_plus_b = tf.matmul(inputs, Weights) + biases

        # 如果不存在激活函数，输出线性结果
        if activation_func is None:
            outputs = Wx_plus_b
        else:
            # 否则，激活激励函数
            outputs = activation_func(Wx_plus_b)
        tf.summary.histogram(layer_name + '/outputs', outputs)
        return outputs


x_data = np.linspace(-1, 1, 300)[:, np.newaxis]
# 一个均值为0的，概论分布标准差为0.05的，输出shape为300,1的正态分布随机矩阵
noise = np.random.normal(0, 0.05, x_data.shape)
y_data = np.square(x_data) - 0.5 + noise

with tf.name_scope("inputs"):  # 定义图层，名字是inputs
    xs = tf.placeholder(tf.float32, [None, 1], name='x_input')  # None表示第一个维度有多少个元素都可以
    ys = tf.placeholder(tf.float32, [None, 1], name='y_name')  # name 定义参数名字
# 输入xs , 1 ？ ，设置10个神经元，激励函数为relu函数
l1 = add_layer(xs, 1, 10, 1, activation_func=tf.nn.relu)  # 隐藏层
# 预测值
prediction = add_layer(l1, 10, 1, 2, activation_func=None)  # 输出层

# ys - predication 真实数据与预测值相减
# square 平方
# reduce_sum 累加 reduction_indices = axis 表示 纵/横坐标轴（维度）的意思
# reduce_mean 求平均
with tf.name_scope("loss"):
    loss = tf.reduce_mean(tf.reduce_sum(tf.square(ys - prediction), reduction_indices=[1], name='sum'), name='mean')
    tf.summary.scalar('loss', loss)

with tf.name_scope("train"):
    # 使用梯度下降的优化器减少loss误差
    train_step = tf.train.GradientDescentOptimizer(0.1).minimize(loss)

# 通用流程
init = tf.initialize_all_variables()
sess = tf.Session()

# 将所有的summary合并在一起
merged = tf.summary.merge_all()

# 导出这个神经网络的Graph  tf.train.SummaryWriter -> tf.summary.FileWriter
writer = tf.summary.FileWriter('E:\personal_workspace\\tfLearning\\tensorboardGraph', sess.graph)
# 命令行执行命令 tensorboard --logdir=E:\personal_workspace\tfLearning\tensorboardGraph

sess.run(init)

for i in range(1000):
    sess.run(train_step, feed_dict={xs: x_data, ys: y_data})
    if i % 50 == 0:
        result = sess.run(merged, feed_dict={xs: x_data, ys: y_data})
        # 将merged 的 result 放入writer里
        writer.add_summary(result, i) # i 每多少步记录一个点，参见events/loss图

