# encoding: utf-8
"""
@Time   : 2018/12/4 16:33
@Author : XJH
"""

import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt

log_dir = 'H:\TensorFlow\深度学习之TensorFlow入门、原理与进阶实战\4. TensorFlow编程基础\log/mnist_with_summaries'

"""
4.1.2 实例5：编写Hello World 程序演示Session的使用
"""
# hello = tf.constant('Hello, TensorFlow!')
# sess = tf.Session()
# print(sess.run(hello))
# sess.close()

"""
4.1.3 实例6：演示with session的使用
"""
# a = tf.constant(3)
# b = tf.constant(4)
# with tf.Session() as sess:
#     print("相加：%i" % sess.run(a+b))
#     print("相乘：%i" % sess.run(a*b))

"""
4.1.4 实例7：演示注入机制
"""
# a = tf.placeholder(tf.int16)
# b = tf.placeholder(tf.int16)
# add = tf.add(a, b)
# mul = tf.multiply(a, b)
# with tf.Session() as sess:
#     print("相加：%i" % sess.run(add, feed_dict={a: 3, b: 4}))
#     print("相乘：%i" % sess.run(mul, feed_dict={a: 3, b: 4}))

"""
4.1.5 建立session的其他方法
"""
# sess = tf.InteractiveSession()

"""
4.1.6 实例8：使用注入机制获取节点
"""
# a = tf.placeholder(tf.int16)
# b = tf.placeholder(tf.int16)
# add = tf.add(a, b)
# mul = tf.multiply(a, b)
# with tf.Session() as sess:
#     print("相加：%i" % sess.run(add, feed_dict={a: 3, b: 4}))
#     print("相乘：%i" % sess.run(mul, feed_dict={a: 3, b: 4}))
#     print("一起输出：", sess.run([mul, add], feed_dict={a: 3, b: 4}))

"""
4.1.7 制定GPU运算
CPU: "/cpu:0"
第一个GPU："/gpu:0"
第二个GPU："/gpu:1"
...
"""
# with tf.Session() as sess:
#     with tf.device("/cpu:0"):
#         a = tf.placeholder(tf.int16)
#         b = tf.placeholder(tf.int16)
#         add = tf.add(a, b)
#         print("相加：%i" % sess.run(add, feed_dict={a: 3, b: 4}))

"""
4.1.8 使者GPU使用资源
"""
# gpu_options = tf.GPUOptions(allow_growth=True)
# config = tf.ConfigProto(gpu_options=gpu_options)

"""
4.1.9 保存和载入模型的方法介绍
"""
# saver = tf.train.Saver()
# a = tf.Variable(tf.random_normal([20, 10], stddev=0.35))
# with tf.Session() as sess:
#     sess.run(tf.global_variables_initializer())
#     saver.save(sess, "save_path/file_name")

"""
4.1.10 实例9：保存/载入线性回归模型
"""
# train_X = np.linspace(-1, 1, 100)
# train_Y = 2*train_X+np.random.randn(*train_X.shape) * 0.3
# X = tf.placeholder("float")
# Y = tf.placeholder("float")
# W = tf.Variable(tf.random_normal([1]), name="weight")
# b = tf.Variable(tf.zeros([1]), name="bias")
# z = tf.multiply(X, W) + b
#
# cost = tf.reduce_mean(tf.square(Y-z))
# learning_rate = 0.01
# optimizer = tf.train.GradientDescentOptimizer(learning_rate).minimize(cost)
#
# init = tf.global_variables_initializer()
# training_epochs = 20
# display_step = 2
#
# saver = tf.train.Saver()
# with tf.Session() as sess:
#     sess.run(init)
#     plotdata = {"batchsize": [],"loss": []}
#     for epoch in range(training_epochs):
#         for (x, y) in zip(train_X, train_Y):
#             sess.run(optimizer, feed_dict={X: x, Y: y})
#
#         if epoch % display_step == 0:
#             loss = sess.run(cost, feed_dict={X: train_X, Y: train_Y})
#             print("Epoch:", epoch+1, "cost", loss, "W=", sess.run(W), "b=", sess.run(b))
#             if not (loss == "NA"):
#                 plotdata["batchsize"].append(epoch)
#                 plotdata["loss"].append(loss)
#     saver.save(sess, "save_path/file_name.ckpt")
#     print("cost=", sess.run(cost, feed_dict={X: train_X, Y: train_Y}), "W=", sess.run(W), "b=", sess.run(b))
#
# with tf.Session() as sess2:
#     sess2.run(tf.global_variables_initializer())
#     saver.restore(sess2, "save_path/file_name.ckpt")
#     print("cost=", sess2.run(cost, feed_dict={X: train_X, Y: train_Y}), "W=", sess2.run(W), "b=", sess2.run(b))
#     print("x=0,2m z=", sess2.run(z, feed_dict={X: 0.2}))

"""
4.1.11 实例10：分析模型内容，演示模型的其他保存方式
"""
# 分析保存内容
# from tensorflow.python.tools.inspect_checkpoint import print_tensors_in_checkpoint_file
# # savedir = "save_path/"
# # print_tensors_in_checkpoint_file(savedir+"file_name.ckpt", None, True, True)
#
# # 其他保存方法
# W = tf.Variable(1.0, name="weight")
# b = tf.Variable(2.0, name="bias")
# # 放到一个字典里，指定名字存放变量
# saver = tf.train.Saver({'weight': b, 'bias': W})
# savedir = "save_path/"
# with tf.Session() as sess:
#     tf.global_variables_initializer().run()
#     saver.save(sess, savedir+"linermodel.ckpt")
# print_tensors_in_checkpoint_file(savedir+"linermodel.ckpt", None, True, True)   # 可以看到输出bias为1

"""
4.1.12 检查点(checkpoint)
在训练之中保存结果
"""

"""
4.1.13 实例11：为模型添加保存检查点
"""
# # 定义生成loss可视化的函数
# plotdata = {"batchsize": [], "loss": []}
# def moving_average(a, w=10):
#     if len(a) < w:
#         return a[:]
#     return [val if idx < w else sum(a[idx-w:idx])/w for idx, val in enumerate(a)]
#
# # 生成模拟数据
# train_X = np.linspace(-1, 1, 100)
# train_Y = 2 * train_X + np.random.randn(*train_X.shape) * 0.3
#
# # 图像显示
# plt.plot(train_X, train_Y, 'ro', label='Original data')
# plt.legend()
# plt.show()
# # plt.close()
#
# # 创建模型
# # 占位符
# X = tf.placeholder("float")
# Y = tf.placeholder("float")
# # 模型参数
# W = tf.Variable(tf.random_normal([1]), name="weight")
# b = tf.Variable(tf.zeros([1]), name="bias")
# # 前向结构
# z = tf.multiply(X, W) + b
#
# # 反向优化
# cost = tf.reduce_mean(tf.square(Y-z))
# learning_rate = 0.01
# optimizer = tf.train.GradientDescentOptimizer(learning_rate).minimize(cost)
#
# # 初始化所有变量
# init = tf.global_variables_initializer()
# # 定义学习参数
# training_epochs = 20
# display_step = 2
# saver = tf.train.Saver(max_to_keep=2)
# savedir = "log/"
# # 启动图
# with tf.Session() as sess:
#     sess.run(init)
#     # 向模型中输入数据
#     for epoch in range(training_epochs):
#         for (x, y) in zip(train_X, train_Y):
#             sess.run(optimizer, feed_dict={X: x, Y: y})
#
#         # 显示训练中的详细信息
#         if epoch % display_step == 0:
#             loss = sess.run(cost, feed_dict={X: train_X, Y: train_Y})
#             print("Epoch:", epoch+1, "cost", loss, "W=", sess.run(W), "b=", sess.run(b))
#             if not (loss == "NA"):
#                 plotdata["batchsize"].append(epoch)
#                 plotdata["loss"].append(loss)
#             saver.save(sess, savedir+"linemodel.cpkt".format(epoch), global_step=epoch)
#     print("Finished!")
#     print("cost=", sess.run(cost, feed_dict={X: train_X, Y: train_Y}), "W=", sess.run(W), "b=", sess.run(b))
#
#     # 显示模型
#     plt.plot(train_X, train_Y, 'ro', label="Original data")
#     plt.plot(train_X, sess.run(W) * train_X + sess.run(b), label="Fitted Wline")
#     plt.legend()
#     plt.show()
#
#     plotdata["avgloss"] = moving_average(plotdata["loss"])
#     plt.figure(1)
#     plt.subplot(211)
#     plt.plot(plotdata["batchsize"], plotdata["avgloss"], 'b--')
#     plt.xlabel('Minibatch number')
#     plt.ylabel('Loss')
#     plt.title('Minibatch number')
#     plt.show()

"""
4.1.14 实例12：更简便的保存检查点
"""
# tf.reset_default_graph()
# global_step = tf.train.get_or_create_global_step()
# step = tf.assign_add(global_step, 1)
# with tf.train.MonitoredTrainingSession(checkpoint_dir='log/checkpoints', save_checkpoint_secs=2) as sess:
#     print("global_step", sess.run([global_step]))
#     while not sess.should_stop():
#         i = sess.run(step)
#         print("step:", i)

"""
4.1.15 模型操作常用函数总结
"""
# tf.train.Saver()                                      # 创建存储器
# tf.train.Saver.save()                                 # 保存
# tf.train.Saver.restore()                              # 恢复
# tf.train.Saver.last_checkpoints                       # 列出最近未删除的checkpoint文件
# tf.train.Saver.set_last_checkpoints(last_checkpoints) # 设置checkpoint文件名列表
# tf.train.Saver.set_last_checkpoints_with_time()       # 设置checkpoint文件名列表和时间戳

"""
4.1.16 TensorBoard可视化介绍
"""
# tf.summary.scalar()     # 标量数据汇总，输出protobuf
# tf.summary.histogram()  # 记录变量var的直方图，输出带直方图的汇总的protobuf
# tf.summary.image()      # 图像数据汇总，输出protobuf
# tf.summary.merge()      # 合并所有的汇总日记
# tf.summary.FileWriter   # 创建一个summary Writer

"""
4.1.17 线性回归的Tensrboard可视化
"""
# 定义生成loss可视化的函数
plotdata = {"batchsize": [], "loss": []}
def moving_average(a, w=10):
    if len(a) < w:
        return a[:]
    return [val if idx < w else sum(a[idx-w:idx])/w for idx, val in enumerate(a)]

# 生成模拟数据
train_X = np.linspace(-1, 1, 100)
train_Y = 2 * train_X + np.random.randn(*train_X.shape) * 0.3

# 图像显示
plt.plot(train_X, train_Y, 'ro', label='Original data')
plt.legend()
plt.show()
# plt.close()

# 创建模型
# 占位符
X = tf.placeholder("float")
Y = tf.placeholder("float")
# 模型参数
W = tf.Variable(tf.random_normal([1]), name="weight")
b = tf.Variable(tf.zeros([1]), name="bias")
# 前向结构
z = tf.multiply(X, W) + b
tf.summary.histogram('z', z)

# 反向优化
cost = tf.reduce_mean(tf.square(Y-z))
tf.summary.scalar("loss_function", cost)
learning_rate = 0.01
optimizer = tf.train.GradientDescentOptimizer(learning_rate).minimize(cost)

# 初始化所有变量
init = tf.global_variables_initializer()
# 定义学习参数
training_epochs = 20
display_step = 2
saver = tf.train.Saver(max_to_keep=2)
savedir = "log/"
# 启动图
with tf.Session() as sess:
    sess.run(init)
    merged_summary_op = tf.summary.merge_all()          # 合并所有summary
    summary_writer = tf.summary.FileWriter('test', sess.graph)
    # 向模型中输入数据
    for epoch in range(training_epochs):
        for (x, y) in zip(train_X, train_Y):
            sess.run(optimizer, feed_dict={X: x, Y: y})

            # 生成summary
            summary_str = sess.run(merged_summary_op, feed_dict={X:x, Y:y})
            summary_writer.add_summary(summary_str, epoch)
    summary_writer.close()