#!/bin/env python3

import sys
import os
import tarfile
import requests
import re
from PIL import Image
import tensorflow as tf
import tensorflow.keras.datasets.mnist as input_data
#  from tensorflow.contrib.tensorboard.plugins import projector
import numpy as np
import matplotlib.pyplot as plt

import sdb

mnist_data_path = './mnist_data/'
mnist_sprite_image = mnist_data_path + 'images/mnist_10k_sprite.png'

def graph_create(p):
    # 创建2个常量op
    m1 = tf.constant([[3, 3]])
    m2 = tf.constant([[2], [3]])
    # 创建一个矩阵乘法op, 把m1/m2传入
    product = tf.matmul(m1, m2)

    # product是一个tensor(数组)
    print('product:', product)

    # 定义一个会话, 启动默认图
    sess = tf.Session()
    # 调用sess的run方法来执行矩阵乘法op
    r1 = sess.run(product)
    print('get r1:', r1)
    sess.close()

    with tf.Session() as sess:
        r2 = sess.run(product)
        print('get r2:', r2)

def variables(p):
    x = tf.Variable([1, 2])
    a = tf.constant([3, 3])
    # 增加一个减法op
    sub = tf.subtract(x, a)
    # 增加一个加法op
    add = tf.add(x, sub)
    init = tf.global_variables_initializer()
    with tf.Session() as sess:
        sess.run(init)
        print(sess.run(sub))
        print(sess.run(add))

    # 创建一个变量, 初始化为0
    state = tf.Variable(0, name = 'counter')
    # 创建一个op, 使state加1
    new_value = tf.add(state, 1)
    # 创建一个赋值op
    update = tf.assign(state, new_value)
    # 变量初始化, 当计算图中存在变量Variable时, 必须进行变量初始化
    init = tf.global_variables_initializer()
    with tf.Session() as sess:
        sess.run(init)
        print(sess.run(state))
        for _ in range(5):
            sess.run(update)
            print(sess.run(state))

def fetch_feed(p):
    # fetch, 获取多个运算输出值
    i1 = tf.constant(3.0)
    i2 = tf.constant(2.0)
    i3 = tf.constant(5.0)
    add = tf.add(i2, i3)
    mul = tf.multiply(i1, add)
    with tf.Session() as sess:
        r1 = sess.run([mul, add])
        print(r1)

    # feed: 在运算过程中传入值
    # 创建占位符
    i4 = tf.placeholder(tf.float32)
    i5 = tf.placeholder(tf.float32)
    out = tf.multiply(i4, i5)
    with tf.Session() as sess:
        # feed的数据以字典形式传入
        print(sess.run(out, feed_dict = { i4: [8.], i5: [2.] }))

def linear_regression(p):
    ### 使用神经网络优化线性回归模型
    # 使用numpy生成100个随机点
    x_data = np.random.rand(100)
    # 实际的线性模型
    y_data = x_data * 0.1 + 0.2
    # 构造一个线性模型
    b = tf.Variable(3.)
    k = tf.Variable(2.)
    # 通过优化b和k变量, 使构造的线性模型接近于实际模型
    y = k * x_data + b
    # 二次代价函数, 计算真实值和计算值误差的平方的平均值
    loss = tf.reduce_mean(tf.square(y_data - y))
    # 定义一个随机梯度下降法进行训练的优化器, 并设置学习率为0.2
    optimizer = tf.train.GradientDescentOptimizer(0.2)
    # 定义一个最小化代价函数, 使loss最小化. loss越小, 则越接近真实值
    train_step = optimizer.minimize(loss)
    with tf.Session() as sess:
        sess.run(tf.global_variables_initializer())
        for step in range(201):
            sess.run(train_step)
            if step % 20 == 0:
                print(step, sess.run([k, b]))

def nonlinear_regression(p):
    ### 使用神经网络优化非线性回归模型
    ## 创建样本
    # 使用numpy生成200个随机点, 取200个在-0.5到0.5的随机数据
    x_data = np.linspace(-0.5, 0.5, 200)[:, np.newaxis]
    #  __________________________________^  ^~~~~~~~~~
    # |> 生成的随机点数据存在此处           |> 增加一个维度
    # 以上得到一个200行1列的矩阵.
    # 生成干扰项(随机值), 形状和x_data一样.
    noise = np.random.normal(0, 0.02, x_data.shape)
    # y_data = x_data ^ 2 + noise
    y_data = np.square(x_data) + noise
    # 定义placeholder, 矩阵大小同样本
    x = tf.placeholder(tf.float32, [None, 1])
    y = tf.placeholder(tf.float32, [None, 1])

    ## 构建神经网络
    #       输入层只有一个点(x_data), 即一个神经元.
    #       中间层可以设置, 以下设置为10个神经元.
    #       输出层也是一个神经元.

    ## 构建神经网络的中间层
    # 定义输入层到中间层的权值, 形状是[1, 10].
    #   权值连接输入层(1)和中间层(10). 赋值为随机值.
    Weights_L1 = tf.Variable(tf.random_normal([1, 10]))
    # 定义输入层到中间层的偏置, 同权值. 初始化为0.
    biases_L1 = tf.Variable(tf.zeros([1, 10]))
    # 计算中间层输出.
    # x为输入的样本矩阵, 神经元信号总和为(x * Weights_L1 + biases_L1).
    Wx_plus_b_L1 = tf.matmul(x, Weights_L1) + biases_L1
    # 信号总和需要经过一个激活函数, 使用双曲正切函数作为激活函数
    L1 = tf.nn.tanh(Wx_plus_b_L1)

    ## 构建神经网络的输出层
    # 定义中间层到输出层的权值, 形状为[10, 1]
    Weights_L2 = tf.Variable(tf.random_normal([10, 1]))
    # 定义中间层到输出层的偏置, 形状为[1, 1]
    biases_L2 = tf.Variable(tf.zeros([1, 1]))
    # 输出层信号总和
    Wx_plus_b_L2 = tf.matmul(L1, Weights_L2) + biases_L2
    # 信号总和需要经过一个激活函数, 使用双曲正切函数作为激活函数
    prediction = tf.nn.tanh(Wx_plus_b_L2)

    ## 定义代价函数和训练方法
    # 二次代价函数
    loss = tf.reduce_mean(tf.square(y - prediction))
    # 使用随机梯度下降优化器
    train_step = tf.train.GradientDescentOptimizer(0.1).minimize(loss)

    with tf.Session() as sess:
        # 初始化变量
        sess.run(tf.global_variables_initializer())
        # 训练神经网络
        for _ in range(2000):
            sess.run(train_step, feed_dict = { x: x_data, y: y_data })

        # 获得预测值
        prediction_value = sess.run(prediction, feed_dict = { x: x_data })
        ## 画图查看预测结果
        plt.figure()
        # 打印原始随机点, y轴上移0.3(随机值幅度为0.2), 避免覆盖显示)
        plt.scatter(x_data, np.square(x_data) + 0.1,
                s = 1, c = 'k', marker = '.')
        # 打印样本点. 蓝色散点是样本数据, 由于加了noise, 所以点分布较为随机.
        plt.scatter(x_data, y_data, s = 5, c = 'b', marker = '.')
        # 打印预测结果. 红线是神经网络训练后得到的预测线.
        plt.plot(x_data, prediction_value, 'r-', linewidth = 5)
        # 显示图形
        plt.show()
        ## 神经网络在优化过程中, 通过自动调整 Weights_L1, biases_L1,
        #   Weights_L2, biases_L2 这4个变量, 让loss不断减小.

def mnist_quadratic(p):
    ### 二次代价函数实现分类
    mnist = input_data.read_data_sets(mnist_data_path, one_hot = True)

    # 定义批次大小
    batch_size = 100
    # 计算批次数
    n_batch = mnist.train.num_examples // batch_size
    # 定义placeholder, 不限制行大小, 行大小为传入数据的大小.
    #   列为一张图的像素点长度
    x = tf.placeholder(tf.float32, [None, 784])
    # 列为label长度
    y = tf.placeholder(tf.float32, [None, 10])

    ## 创建神经网络
    W = tf.Variable(tf.zeros([784, 10]))
    b = tf.Variable(tf.zeros([10]))
    prediction = tf.nn.softmax(tf.matmul(x, W) + b)
    # 二次代价函数
    loss = tf.reduce_mean(tf.square(y - prediction))
    # 使用随机梯度下降优化器
    train_step = tf.train.GradientDescentOptimizer(0.2).minimize(loss)

    init = tf.global_variables_initializer()
    # 对比真实值和预测值的判断结果, argmax: 用于找到最大数值的位置.
    correct_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(prediction, 1))
    # 求准确率, case: 将布尔值转为准确率, 如: [1,1,0,1] => 0.75
    accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))

    with tf.Session() as sess:
        sess.run(init)
        for epoch in range(21):
            for batch in range(n_batch):
                # 获取一个批次的数据.
                #   图片数据放到batch_xs, 图片标签放到batch_ys.
                batch_xs, batch_ys = mnist.train.next_batch(batch_size)
                sess.run(train_step, feed_dict = { x: batch_xs, y: batch_ys })
            acc = sess.run(accuracy, feed_dict = { x: mnist.test.images,
                y: mnist.test.labels })
            print(epoch, acc) # 最终准确率约为91%

def mnist_quadratic_improve(p):
    ### 目标准确率>=95%
    mnist = input_data.read_data_sets(mnist_data_path, one_hot = True)

    # 定义批次大小
    batch_size = 40
    # 计算批次数
    n_batch = mnist.train.num_examples // batch_size
    # 定义placeholder, 不限制行大小, 行大小为传入数据的大小.
    #   列为一张图的像素点长度
    x = tf.placeholder(tf.float32, [None, 784])
    # 列为label长度
    y = tf.placeholder(tf.float32, [None, 10])

    ## 创建神经网络
    # layer
    nn1 = 200
    w1 = tf.Variable(tf.random_normal([784, nn1]))
    b1 = tf.Variable(tf.zeros([nn1]))
    l1 = tf.nn.tanh(tf.matmul(x, w1) + b1)
    # layer
    nn2 = 160
    w2 = tf.Variable(tf.random_normal([nn1, nn2]))
    b2 = tf.Variable(tf.zeros([nn2]))
    l2 = tf.nn.tanh(tf.matmul(l1, w2) + b2)
    # layer
    nn3 = 100
    w3 = tf.Variable(tf.random_normal([nn2, nn3]))
    b3 = tf.Variable(tf.zeros([nn3]))
    l3 = tf.nn.tanh(tf.matmul(l2, w3) + b3)
    # layer
    nn3 = 60
    w3 = tf.Variable(tf.random_normal([nn2, nn3]))
    b3 = tf.Variable(tf.zeros([nn3]))
    l3 = tf.nn.tanh(tf.matmul(l2, w3) + b3)
    # layer
    nn4 = 30
    w4 = tf.Variable(tf.random_normal([nn3, nn4]))
    b4 = tf.Variable(tf.zeros([nn4]))
    l4 = tf.nn.tanh(tf.matmul(l3, w4) + b4)
    # layer - output
    w = tf.Variable(tf.random_normal([nn4, 10]))
    b = tf.Variable(tf.zeros([10]))
    prediction = tf.nn.softmax(tf.matmul(l4, w) + b)

    # 二次代价函数
    loss = tf.reduce_mean(tf.square(y - prediction))
    # 使用随机梯度下降优化器
    #  train_step = tf.train.GradientDescentOptimizer(0.2).minimize(loss)
    train_step = tf.train.AdamOptimizer(0.001).minimize(loss)

    init = tf.global_variables_initializer()

    # 对比真实值和预测值的判断结果, argmax: 用于找到最大数值的位置.
    correct_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(prediction, 1))
    # 求准确率, case: 将布尔值转为准确率, 如: [1,1,0,1] => 0.75
    accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))

    with tf.Session() as sess:
        sess.run(init)
        for epoch in range(200):
            for batch in range(n_batch):
                # 获取一个批次的数据.
                #   图片数据放到batch_xs, 图片标签放到batch_ys.
                batch_xs, batch_ys = mnist.train.next_batch(batch_size)
                sess.run(train_step, feed_dict = { x: batch_xs, y: batch_ys })
            # 输出准确率
            acc = sess.run(accuracy, feed_dict = { x: mnist.test.images,
                y: mnist.test.labels })
            print(epoch, acc) # 最终准确率约为96.2%

def mnist_cross_entropy(p):
    ### 交叉熵代价函数实现分类
    mnist = input_data.read_data_sets(mnist_data_path, one_hot = True)

    # 定义批次大小
    batch_size = 100
    # 计算批次数
    n_batch = mnist.train.num_examples // batch_size
    # 定义placeholder, 不限制行大小, 行大小为传入数据的大小.
    #   列为一张图的像素点长度
    x = tf.placeholder(tf.float32, [None, 784])
    # 列为label长度
    y = tf.placeholder(tf.float32, [None, 10])

    ## 创建神经网络
    W = tf.Variable(tf.zeros([784, 10]))
    b = tf.Variable(tf.zeros([10]))
    prediction = tf.nn.softmax(tf.matmul(x, W) + b)
    # 交叉熵代价函数(适用于S型曲线)
    loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels = y,
        logits = prediction))
    # 使用随机梯度下降优化器
    train_step = tf.train.GradientDescentOptimizer(0.2).minimize(loss)

    init = tf.global_variables_initializer()
    # 对比真实值和预测值的判断结果, argmax: 用于找到最大数值的位置.
    correct_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(prediction, 1))
    # 求准确率, case: 将布尔值转为准确率, 如: [1,1,0,1] => 0.75
    accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))

    with tf.Session() as sess:
        sess.run(init)
        for epoch in range(21):
            for batch in range(n_batch):
                # 获取一个批次的数据.
                #   图片数据放到batch_xs, 图片标签放到batch_ys.
                batch_xs, batch_ys = mnist.train.next_batch(batch_size)
                sess.run(train_step, feed_dict = { x: batch_xs, y: batch_ys })
            acc = sess.run(accuracy, feed_dict = { x: mnist.test.images,
                y: mnist.test.labels })
            print(epoch, acc) # 最终准确率约为92.0%

def mnist_dropout(p):
    ### dropout实现分类
    mnist = input_data.read_data_sets(mnist_data_path, one_hot = True)

    # 定义批次大小
    batch_size = 100
    # 计算批次数
    n_batch = mnist.train.num_examples // batch_size
    # 定义placeholder, 不限制行大小, 行大小为传入数据的大小.
    #   列为一张图的像素点长度
    x = tf.placeholder(tf.float32, [None, 784])
    # 列为label长度
    y = tf.placeholder(tf.float32, [None, 10])
    keep_prob = tf.placeholder(tf.float32)

    ## 创建神经网络
    # 截断正态分布, 标准差为0.1
    W1 = tf.Variable(tf.truncated_normal([784, 2000], stddev = 0.1))
    # 偏置值为0.1
    b1 = tf.Variable(tf.zeros([2000]) + 0.1)
    # 使用双曲正切函数作为激活函数
    L1 = tf.nn.tanh(tf.matmul(x, W1) + b1)
    # 使用dropout, keep_prob: 设置工作的神经元百分百
    L1_drop = tf.nn.dropout(L1, keep_prob)

    W2 = tf.Variable(tf.truncated_normal([2000, 2000], stddev = 0.1))
    b2 = tf.Variable(tf.zeros([2000]) + 0.1)
    L2 = tf.nn.tanh(tf.matmul(L1_drop, W2) + b2)
    L2_drop = tf.nn.dropout(L2, keep_prob)

    W3 = tf.Variable(tf.truncated_normal([2000, 1000], stddev = 0.1))
    b3 = tf.Variable(tf.zeros([1000]) + 0.1)
    L3 = tf.nn.tanh(tf.matmul(L2_drop, W3) + b3)
    L3_drop = tf.nn.dropout(L3, keep_prob)

    W4 = tf.Variable(tf.truncated_normal([1000, 10], stddev = 0.1))
    b4 = tf.Variable(tf.zeros([10]) + 0.1)
    prediction = tf.nn.softmax(tf.matmul(L3_drop, W4) + b4)

    # 交叉熵代价函数(适用于S型曲线)
    loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels = y,
        logits = prediction))
    # 使用随机梯度下降优化器
    train_step = tf.train.GradientDescentOptimizer(0.2).minimize(loss)

    init = tf.global_variables_initializer()
    # 对比真实值和预测值的判断结果, argmax: 用于找到最大数值的位置.
    correct_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(prediction, 1))
    # 求准确率, case: 将布尔值转为准确率, 如: [1,1,0,1] => 0.75
    accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))

    with tf.Session() as sess:
        sess.run(init)
        for epoch in range(31):
            for batch in range(n_batch):
                # 获取一个批次的数据.
                #   图片数据放到batch_xs, 图片标签放到batch_ys.
                batch_xs, batch_ys = mnist.train.next_batch(batch_size)
                sess.run(train_step, feed_dict = { x: batch_xs, y: batch_ys,
                    keep_prob: 0.7 })

            train_acc = sess.run(accuracy, feed_dict = { x: mnist.train.images,
                y: mnist.train.labels, keep_prob: 1.0 })
            test_acc = sess.run(accuracy, feed_dict = { x: mnist.test.images,
                y: mnist.test.labels, keep_prob: 1.0 })
            print(epoch, train_acc, test_acc)
            # 最终准确率:   训练时dropout=1.0, 测试结果约为97.2%
            #               训练时dropout=0.7, 测试结果约为97.0%

def mnist_optimizer(p):
    ### 使用其他优化器实现分类
    mnist = input_data.read_data_sets(mnist_data_path, one_hot = True)

    # 定义批次大小
    batch_size = 100
    # 计算批次数
    n_batch = mnist.train.num_examples // batch_size
    # 定义placeholder, 不限制行大小, 行大小为传入数据的大小.
    #   列为一张图的像素点长度
    x = tf.placeholder(tf.float32, [None, 784])
    # 列为label长度
    y = tf.placeholder(tf.float32, [None, 10])

    ## 创建神经网络
    W = tf.Variable(tf.zeros([784, 10]))
    b = tf.Variable(tf.zeros([10]))
    prediction = tf.nn.softmax(tf.matmul(x, W) + b)
    # 交叉熵代价函数(适用于S型曲线)
    loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels = y,
        logits = prediction))
    # 使用Adam优化器
    train_step = tf.train.AdamOptimizer(1e-2).minimize(loss)

    init = tf.global_variables_initializer()
    # 对比真实值和预测值的判断结果, argmax: 用于找到最大数值的位置.
    correct_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(prediction, 1))
    # 求准确率, case: 将布尔值转为准确率, 如: [1,1,0,1] => 0.75
    accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))

    with tf.Session() as sess:
        sess.run(init)
        for epoch in range(21):
            for batch in range(n_batch):
                # 获取一个批次的数据.
                #   图片数据放到batch_xs, 图片标签放到batch_ys.
                batch_xs, batch_ys = mnist.train.next_batch(batch_size)
                sess.run(train_step, feed_dict = { x: batch_xs, y: batch_ys })
            acc = sess.run(accuracy, feed_dict = { x: mnist.test.images,
                y: mnist.test.labels })
            print(epoch, acc) # 最终准确率约为93%

def mnist_further_imporve(p):
    ### 进一步优化模型, 目标准确率>=98%
    mnist = input_data.read_data_sets(mnist_data_path, one_hot = True)

    # 定义批次大小
    batch_size = 100
    # 计算批次数
    n_batch = mnist.train.num_examples // batch_size
    # 定义placeholder, 不限制行大小, 行大小为传入数据的大小.
    #   列为一张图的像素点长度
    x = tf.placeholder(tf.float32, [None, 784])
    # 列为label长度
    y = tf.placeholder(tf.float32, [None, 10])
    # dropout参数占位符
    keep_prob = tf.placeholder(tf.float32)
    # 学习速率
    learning_rate = tf.placeholder(tf.float32)

    ## 创建神经网络
    stddev_init = 0.1
    biases_init = 0.7
    # layer
    nn1 = 500
    w1 = tf.Variable(tf.truncated_normal([784, nn1], stddev = stddev_init))
    b1 = tf.Variable(tf.zeros([nn1]) + biases_init)
    l1 = tf.nn.tanh(tf.matmul(x, w1) + b1)
    l1d = tf.nn.dropout(l1, keep_prob)
    # layer
    nn2 = 300
    w2 = tf.Variable(tf.truncated_normal([nn1, nn2], stddev = stddev_init))
    b2 = tf.Variable(tf.zeros([nn2]) + biases_init)
    l2 = tf.nn.tanh(tf.matmul(l1d, w2) + b2)
    l2d = tf.nn.dropout(l2, keep_prob)
    # layer - output
    w = tf.Variable(tf.truncated_normal([nn2, 10], stddev = stddev_init))
    b = tf.Variable(tf.zeros([10]) + biases_init)
    prediction = tf.nn.softmax(tf.matmul(l2d, w) + b)

    # 交叉熵代价函数(适用于S型曲线)
    loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels = y,
        logits = prediction))
    # 使用Adam优化器
    train_step = tf.train.AdamOptimizer(learning_rate).minimize(loss)

    init = tf.global_variables_initializer()
    # 对比真实值和预测值的判断结果, argmax: 用于找到最大数值的位置.
    correct_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(prediction, 1))
    # 求准确率, case: 将布尔值转为准确率, 如: [1,1,0,1] => 0.75
    accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))

    with tf.Session() as sess:
        sess.run(init)
        lr = 1e-3
        for epoch in range(401):
            # 自动调整训练速率
            lr *= 0.96
            for batch in range(n_batch):
                # 获取一个批次的数据.
                #   图片数据放到batch_xs, 图片标签放到batch_ys.
                batch_xs, batch_ys = mnist.train.next_batch(batch_size)
                sess.run(train_step, feed_dict = { x: batch_xs, y: batch_ys,
                    keep_prob: 1.0, learning_rate: lr })
            train_acc = sess.run(accuracy, feed_dict = { x: mnist.train.images,
                y: mnist.train.labels, keep_prob: 1.0 })
            acc = sess.run(accuracy, feed_dict = { x: mnist.test.images,
                y: mnist.test.labels, keep_prob: 1.0 })
            print('%03d: test:%0.4f train:%0.4f lr:%0.9f'
                    % (epoch, acc, train_acc, lr))
            # 最终准确率约为98.3%

def tensorboard_structure(p):
    ### tensorboard网络结构
    mnist = input_data.read_data_sets(mnist_data_path, one_hot = True)

    # 定义批次大小
    batch_size = 100
    # 计算批次数
    n_batch = mnist.train.num_examples // batch_size

    # 定义命名空间
    with tf.name_scope('input'):
        # 定义placeholder, 不限制行大小, 行大小为传入数据的大小.
        #   列为一张图的像素点长度
        x = tf.placeholder(tf.float32, [None, 784], name = 'x_input')
        # 列为label长度
        y = tf.placeholder(tf.float32, [None, 10], name = 'y_input')
    with tf.name_scope('layer'):
        ## 创建神经网络
        with tf.name_scope('wights'):
            W = tf.Variable(tf.zeros([784, 10]), name = 'W')
        with tf.name_scope('biases'):
            b = tf.Variable(tf.zeros([10]), name = 'b')
        with tf.name_scope('wx_plus_b'):
            wx_plus_b = tf.matmul(x, W) + b
        with tf.name_scope('softmax'):
            prediction = tf.nn.softmax(wx_plus_b)

    with tf.name_scope('train'):
        with tf.name_scope('loss'):
            # 二次代价函数
            loss = tf.reduce_mean(tf.square(y - prediction))
        with tf.name_scope('train_step'):
            # 使用随机梯度下降优化器
            train_step = tf.train.GradientDescentOptimizer(0.2).minimize(loss)

    init = tf.global_variables_initializer()

    with tf.name_scope('test'):
        with tf.name_scope('correct_prediction'):
            # 对比真实值和预测值的判断结果, argmax: 用于找到最大数值的位置.
            correct_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(prediction, 1))
        with tf.name_scope('accuracy'):
            # 求准确率, case: 将布尔值转为准确率, 如: [1,1,0,1] => 0.75
            accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))

    with tf.Session() as sess:
        sess.run(init)
        writer = tf.summary.FileWriter('5_2_logs/', sess.graph)
        for epoch in range(1):
            for batch in range(n_batch):
                # 获取一个批次的数据.
                #   图片数据放到batch_xs, 图片标签放到batch_ys.
                batch_xs, batch_ys = mnist.train.next_batch(batch_size)
                sess.run(train_step, feed_dict = { x: batch_xs, y: batch_ys })
            acc = sess.run(accuracy, feed_dict = { x: mnist.test.images,
                y: mnist.test.labels })
            print(epoch, acc)

def tensorboard_running_log(p):
    ### tensorboard网络运行
    mnist = input_data.read_data_sets(mnist_data_path, one_hot = True)

    # 定义批次大小
    batch_size = 100
    # 计算批次数
    n_batch = mnist.train.num_examples // batch_size

    # 参数概要, 用于分析参数变化
    def variable_summries(var):
        with tf.name_scope('summaries'):
            mean = tf.reduce_mean(var)
            tf.summary.scalar('mean', mean) # 平均值
            with tf.name_scope('stddev'):
                stddev = tf.sqrt(tf.reduce_mean(tf.square(var - mean)))
            tf.summary.scalar('stddev', stddev) # 标准差
            tf.summary.scalar('max', tf.reduce_max(var)) # 最大值
            tf.summary.scalar('min', tf.reduce_min(var)) # 最小值
            tf.summary.histogram('histogram', var)  # 直方图

    # 定义命名空间
    with tf.name_scope('input'):
        # 定义placeholder, 不限制行大小, 行大小为传入数据的大小.
        #   列为一张图的像素点长度
        x = tf.placeholder(tf.float32, [None, 784], name = 'x_input')
        # 列为label长度
        y = tf.placeholder(tf.float32, [None, 10], name = 'y_input')
    with tf.name_scope('layer'):
        ## 创建神经网络
        with tf.name_scope('wights'):
            W = tf.Variable(tf.zeros([784, 10]), name = 'W')
            variable_summries(W)
        with tf.name_scope('biases'):
            b = tf.Variable(tf.zeros([10]), name = 'b')
            variable_summries(b)
        with tf.name_scope('wx_plus_b'):
            wx_plus_b = tf.matmul(x, W) + b
        with tf.name_scope('softmax'):
            prediction = tf.nn.softmax(wx_plus_b)

    with tf.name_scope('train'):
        with tf.name_scope('loss'):
            # 二次代价函数
            loss = tf.reduce_mean(tf.square(y - prediction))
            tf.summary.scalar('loss', loss)
        with tf.name_scope('train_step'):
            # 使用随机梯度下降优化器
            train_step = tf.train.GradientDescentOptimizer(0.2).minimize(loss)

    init = tf.global_variables_initializer()

    with tf.name_scope('test'):
        with tf.name_scope('correct_prediction'):
            # 对比真实值和预测值的判断结果, argmax: 用于找到最大数值的位置.
            correct_prediction = tf.equal(tf.argmax(y, 1),
                    tf.argmax(prediction, 1))
        with tf.name_scope('accuracy'):
            # 求准确率, case: 将布尔值转为准确率, 如: [1,1,0,1] => 0.75
            accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
            tf.summary.scalar('accuracy', accuracy)

    # 合并所有的summary
    merged = tf.summary.merge_all()

    with tf.Session() as sess:
        sess.run(init)
        writer = tf.summary.FileWriter('5_2_logs/', sess.graph)
        for epoch in range(51):
            for batch in range(n_batch):
                # 获取一个批次的数据.
                #   图片数据放到batch_xs, 图片标签放到batch_ys.
                batch_xs, batch_ys = mnist.train.next_batch(batch_size)
                summary, _train = sess.run([merged, train_step],
                        feed_dict = { x: batch_xs, y: batch_ys })
            writer.add_summary(summary, epoch)  # 记录summary和周期编号
            acc = sess.run(accuracy, feed_dict = { x: mnist.test.images,
                y: mnist.test.labels })
            print(epoch, acc)

def tensorboard_visualization(p):
    ### tensorboard可视化
    ## 可参考官网: https://www.tensorflow.org/get_started/embedding_viz
    mnist = input_data.read_data_sets(mnist_data_path, one_hot = True)

    # 图片数量
    images_num = 2000
    # 定义批次大小
    batch_size = 100
    # 计算批次数
    n_batch = mnist.train.num_examples // batch_size

    # 定义会话
    sess = tf.Session()
    # 载入图片, 获取3000张图片, 打包并存放到embedding
    embedding = tf.Variable(tf.stack(mnist.test.images[:images_num]),
            trainable = False, name = 'embedding')

    # 参数概要, 用于分析参数变化
    def variable_summries(var):
        with tf.name_scope('summaries'):
            mean = tf.reduce_mean(var)
            tf.summary.scalar('mean', mean) # 平均值
            with tf.name_scope('stddev'):
                stddev = tf.sqrt(tf.reduce_mean(tf.square(var - mean)))
            tf.summary.scalar('stddev', stddev) # 标准差
            tf.summary.scalar('max', tf.reduce_max(var)) # 最大值
            tf.summary.scalar('min', tf.reduce_min(var)) # 最小值
            tf.summary.histogram('histogram', var)  # 直方图

    # 定义命名空间
    with tf.name_scope('input'):
        # 定义placeholder, 不限制行大小, 行大小为传入数据的大小.
        #   列为一张图的像素点长度
        x = tf.placeholder(tf.float32, [None, 784], name = 'x_input')
        # 列为label长度
        y = tf.placeholder(tf.float32, [None, 10], name = 'y_input')

    # 显示测试图片
    with tf.name_scope('input_reshape'):
        # 转换形状,
        # -1表示不确定值, 表示图片数量不确定
        # 将图片转为28x28形状
        # 因为是黑白图片, 纬度为1. 彩图为3.
        images_shaped_input = tf.reshape(x, [-1, 28, 28, 1])
        tf.summary.image('input', images_shaped_input, 10)

    with tf.name_scope('layer'):
        ## 创建神经网络
        with tf.name_scope('wights'):
            W = tf.Variable(tf.zeros([784, 10]), name = 'W')
            variable_summries(W)
        with tf.name_scope('biases'):
            b = tf.Variable(tf.zeros([10]), name = 'b')
            variable_summries(b)
        with tf.name_scope('wx_plus_b'):
            wx_plus_b = tf.matmul(x, W) + b
        with tf.name_scope('softmax'):
            prediction = tf.nn.softmax(wx_plus_b)

    with tf.name_scope('train'):
        with tf.name_scope('loss'):
            # 二次代价函数
            loss = tf.reduce_mean(tf.square(y - prediction))
            tf.summary.scalar('loss', loss)
        with tf.name_scope('train_step'):
            # 使用随机梯度下降优化器
            train_step = tf.train.GradientDescentOptimizer(0.2).minimize(loss)

    sess.run(tf.global_variables_initializer())

    with tf.name_scope('test'):
        with tf.name_scope('correct_prediction'):
            # 对比真实值和预测值的判断结果, argmax: 用于找到最大数值的位置.
            correct_prediction = tf.equal(tf.argmax(y, 1),
                    tf.argmax(prediction, 1))
        with tf.name_scope('accuracy'):
            # 求准确率, case: 将布尔值转为准确率, 如: [1,1,0,1] => 0.75
            accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
            tf.summary.scalar('accuracy', accuracy)

    # 合并所有的summary
    merged = tf.summary.merge_all()

    # 生成metadata文件, 已存在则删除.
    file_path = sys.path[0] + '/5_4_projector/'
    file_metadata = 'data/metadata.tsv'
    if tf.io.gfile.Exists(file_path + file_metadata):
        tf.io.gfile.Remove(file_path + file_metadata)
    with open(file_path + file_metadata, 'w') as f:
        labels = sess.run(tf.argmax(mnist.test.labels[:], 1))
        # 将label写入文件.
        for i in range(images_num):
            f.write(str(labels[i]) + '\n')

    # 定义writer, 保存图结构
    writer = tf.summary.FileWriter(file_path + 'data', sess.graph)
    # 定义saver，保存网络模型
    saver = tf.train.Saver()
    # 定义配置项
    #  config = projector.ProjectorConfig()
    # 设置配置项
    #  embed = config.embeddings.add()
    #  embed.tensor_name = embedding.name
    #  embed.metadata_path = file_path + file_metadata
    #  embed.sprite.image_path = mnist_sprite_image
    # 对图片进行切分
    #  embed.sprite.single_image_dim.extend([28, 28])
    # 配置可视化工具
    #  projector.visualize_embeddings(writer, config)

    step_num = 5001
    for i in range(step_num):
        # 每次训练100张图片
        batch_xs, batch_ys = mnist.train.next_batch(100)
        run_metadata = tf.RunMetadata()
        summary, _train = sess.run([merged, train_step],
                feed_dict = { x: batch_xs, y: batch_ys },
                options = tf.RunOptions(trace_level = tf.RunOptions.FULL_TRACE),
                run_metadata = run_metadata)
        # 记录参数状态变化
        writer.add_run_metadata(run_metadata, 'step%03d' % i)
        writer.add_summary(summary, i)

        if i % 100 == 0:
            acc = sess.run(accuracy, feed_dict = { x: mnist.test.images,
                y: mnist.test.labels })
            print(i / 100, acc)

    # 保存训练好的模型
    saver.save(sess, file_path + 'data/model.ckpt', global_step = step_num)
    writer.close()
    sess.close()

def mnist_cnn(p):
    ### 使用卷积神经网络(Convolution Neural Networks)实现MNIST分类
    mnist = input_data.read_data_sets(mnist_data_path, one_hot = True)

    ## 权值初始化函数
    def weight_variable(shape):
        # 生成一个截断的正态分布
        initial = tf.truncated_normal(shape, stddev = 0.1)
        return tf.Variable(initial)
    ## 偏置值初始化函数
    def bias_variable(shape):
        initial = tf.constant(0.1, shape = shape)
        return tf.Variable(initial)
    ## 卷积层初始化(2维卷积)
    # x:    input tensor of shape `[batch, in_height, in_width, in_channels]`
    #       输入一个4维张量.
    # W:    filter / kernel tensor of shape
    #       输入一个滤波器(卷积核)
    #       [filter_height, filter_width, in_channels, out_channels]
    def conv2d(x, W):
        # strides:  步长. strides[1]表示x方向的步长, strides[2]表示y方向的步长.
        # padding:  'SAME': Same padding, 卷积采样后得到和原来同样大小的平面.
        #           'VALID': Valid padding, 卷积采样后得到比原来小的平面.
        return tf.nn.conv2d(x, W, strides = [1, 1, 1, 1], padding = 'SAME')
    ## 池化层初始化
    def max_pool_2x2(x):
        # 使用最大池化, ksize: 窗口大小, [1, x, y, 1]
        return tf.nn.max_pool(x, ksize = [1, 2, 2, 1], strides = [1, 2, 2, 1],
                padding = 'SAME')

    # 批次大小
    batch_size = 100
    # 计算批次数
    n_batch = mnist.train.num_examples // batch_size
    # 输入参数占位符
    x = tf.placeholder(tf.float32, [None, 784])
    y = tf.placeholder(tf.float32, [None, 10])
    # 转变形状. 转换x的格式为4D向量.
    x_image = tf.reshape(x, [-1, 28, 28, 1])
    #                        ^~: -1为不限制batch大小

    ## 卷积层1
    # 初始化卷积层的权值和偏置值
    #   5x5的采样窗口; 1:通道数(黑白图片为1, 彩图为3);
    #   32:输出通道数, 表示使用32个卷积核从一个平面抽取32个特征平面.
    W_conv1 = weight_variable([5, 5, 1, 32])
    b_conv1 = bias_variable([32])   # 每个卷积核使用一个偏置值.
    # 执行卷积运算. 把x_image和权值进行卷积, 加上偏置值, 然后应用于relu激活函数.
    #   由于是Same padding, 卷积后得到32张特征平面, 大小仍然是28x28
    h_conv1 = tf.nn.relu(conv2d(x_image, W_conv1) + b_conv1)
    # 执行最大池化, 池化后形状变为14x14(32张)
    h_pool1 = max_pool_2x2(h_conv1)
    ## 卷积层2
    #   5x5的采样窗口, 64个卷积核从32个平面抽取特征.
    W_conv2 = weight_variable([5, 5, 32, 64])
    b_conv2 = bias_variable([64])   # 每个卷积核使用一个偏置值.
    h_conv2 = tf.nn.relu(conv2d(h_pool1, W_conv2) + b_conv2)
    h_pool2 = max_pool_2x2(h_conv2) # 池化后得到64张7x7的特征平面
    ## 全连接层1
    # 初始化全连接层的权值. 上一层有64x7x7个神经元, 全连接层有1024个神经元.
    W_fc1 = weight_variable([64 * 7 * 7, 1024])
    b_fc1 = bias_variable([1024])
    # 把池化层2的输出扁平化为1维
    h_pool2_flat = tf.reshape(h_pool2, [-1, 64 * 7 * 7])
    # 计算全连接层输出
    h_fc1 = tf.nn.relu(tf.matmul(h_pool2_flat, W_fc1) + b_fc1)
    # 设置dropout, 控制神经元输出概率.
    keep_prob = tf.placeholder(tf.float32)
    h_fc1_drop = tf.nn.dropout(h_fc1, keep_prob)
    ## 全连接层2
    W_fc2 = weight_variable([1024, 10])
    b_fc2 = bias_variable([10])
    # 计算输出
    prediction = tf.nn.softmax(tf.matmul(h_fc1_drop, W_fc2) + b_fc2)

    # 交叉熵代价函数
    cross_entropy = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(
        labels = y, logits = prediction))
    # 使用Adam优化器
    train_step = tf.train.AdamOptimizer(1e-4).minimize(cross_entropy)
    # 对比真实值和预测值的判断结果, argmax: 用于找到最大数值的位置.
    correct_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(prediction, 1))
    # 求准确率, case: 将布尔值转为准确率, 如: [1,1,0,1] => 0.75
    accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))

    config = tf.ConfigProto()
    config.gpu_options.allow_growth = True  # 使内存根据需求增长
    with tf.Session(config = config) as sess:
        sess.run(tf.global_variables_initializer())
        for epoch in range(21):
            for batch in range(n_batch):
                batch_xs, batch_ys = mnist.train.next_batch(batch_size)
                sess.run(train_step, feed_dict = { x: batch_xs, y: batch_ys,
                    keep_prob: 0.7 })

            train_acc = 0
            acc = 0
            for i in range(n_batch):
                batch_xs, batch_ys = mnist.train.next_batch(batch_size)
                train_acc += sess.run(accuracy, feed_dict = { x: batch_xs,
                    y: batch_ys, keep_prob: 1.0 })
                batch_xs, batch_ys = mnist.test.next_batch(batch_size)
                acc += sess.run(accuracy, feed_dict = { x: batch_xs,
                    y: batch_ys, keep_prob: 1.0 })
            acc *= 100
            acc /= n_batch
            train_acc *= 100
            train_acc /= n_batch
            print('%03d: test:%0.4f, train:%0.4f' % (epoch, acc, train_acc))
            # 最终准确率约为99.2%

def mnist_cnn_tensorboard_test(p):
    ### 使用卷积神经网络(Convolution Neural Networks)实现MNIST分类
    mnist = input_data.read_data_sets(mnist_data_path, one_hot = True)

    ## 权值初始化函数
    def weight_variable(shape):
        # 生成一个截断的正态分布
        initial = tf.truncated_normal(shape, stddev = 0.1)
        return tf.Variable(initial)
    ## 偏置值初始化函数
    def bias_variable(shape):
        initial = tf.constant(0.1, shape = shape)
        return tf.Variable(initial)
    ## 卷积层初始化(2维卷积)
    # x:    input tensor of shape `[batch, in_height, in_width, in_channels]`
    #       输入一个4维张量.
    # W:    filter / kernel tensor of shape
    #       输入一个滤波器(卷积核)
    #       [filter_height, filter_width, in_channels, out_channels]
    def conv2d(x, W):
        # strides:  步长. strides[1]表示x方向的步长, strides[2]表示y方向的步长.
        # padding:  'SAME': Same padding, 卷积采样后得到和原来同样大小的平面.
        #           'VALID': Valid padding, 卷积采样后得到比原来小的平面.
        return tf.nn.conv2d(x, W, strides = [1, 1, 1, 1], padding = 'SAME')
    ## 池化层初始化
    def max_pool_2x2(x):
        # 使用最大池化, ksize: 窗口大小, [1, x, y, 1]
        return tf.nn.max_pool(x, ksize = [1, 2, 2, 1], strides = [1, 2, 2, 1],
                padding = 'SAME')
    ## 参数概要记录, 用于tensorboard分析
    def variable_summries(var):
        with tf.name_scope('summaries'):
            mean = tf.reduce_mean(var)
            tf.summary.scalar('mean', mean)
            with tf.name_scope('stddev'):
                stddev = tf.sqrt(tf.reduce_mean(tf.square(var - mean)))
                tf.summary.scalar('stddev', stddev)
            tf.summary.scalar('max', tf.reduce_max(var))
            tf.summary.scalar('min', tf.reduce_min(var))
            tf.summary.histogram('histogram', var)

    with tf.name_scope('input'):
        # 输入参数占位符
        x = tf.placeholder(tf.float32, [None, 784])
        y = tf.placeholder(tf.float32, [None, 10])
        with tf.name_scope('reshape'):
            # 转变形状. 转换x的格式为4D向量.
            x_image = tf.reshape(x, [-1, 28, 28, 1])
            #                        ^~: -1为不限制batch大小
            tf.summary.image('input', x_image, 10)

    with tf.name_scope('layer'):
        with tf.name_scope('conv1'):
            ## 卷积层1
            # 初始化卷积层的权值和偏置值
            #   5x5的采样窗口; 1:通道数(黑白图片为1, 彩图为3);
            #   32:输出通道数, 表示使用32个卷积核从一个平面抽取32个特征平面.
            W_conv1 = weight_variable([5, 5, 1, 32])
            b_conv1 = bias_variable([32])   # 每个卷积核使用一个偏置值.
            # 执行卷积运算. 把x_image和权值进行卷积, 加上偏置值,
            #   然后应用于relu激活函数.
            #   由于是Same padding, 卷积后得到32张特征平面, 大小仍然是28x28
            h_conv1 = tf.nn.relu(conv2d(x_image, W_conv1) + b_conv1)
            # 执行最大池化, 池化后形状变为14x14(32张)
            h_pool1 = max_pool_2x2(h_conv1)
            variable_summries(W_conv1)
            variable_summries(b_conv1)
            variable_summries(h_conv1)
            variable_summries(h_pool1)
        with tf.name_scope('conv2'):
            ## 卷积层2
            #   5x5的采样窗口, 64个卷积核从32个平面抽取特征.
            W_conv2 = weight_variable([5, 5, 32, 64])
            b_conv2 = bias_variable([64])   # 每个卷积核使用一个偏置值.
            h_conv2 = tf.nn.relu(conv2d(h_pool1, W_conv2) + b_conv2)
            h_pool2 = max_pool_2x2(h_conv2) # 池化后得到64张7x7的特征平面
            variable_summries(W_conv2)
            variable_summries(b_conv2)
            variable_summries(h_conv2)
            variable_summries(h_pool2)
        with tf.name_scope('fc1'):
            ## 全连接层1
            # 初始化全连接层的权值.
            #   上一层有64x7x7个神经元, 全连接层有1024个神经元.
            W_fc1 = weight_variable([64 * 7 * 7, 1024])
            b_fc1 = bias_variable([1024])
            # 把池化层2的输出扁平化为1维
            h_pool2_flat = tf.reshape(h_pool2, [-1, 64 * 7 * 7])
            # 计算全连接层输出
            h_fc1 = tf.nn.relu(tf.matmul(h_pool2_flat, W_fc1) + b_fc1)
            # 设置dropout, 控制神经元输出概率.
            keep_prob = tf.placeholder(tf.float32)
            h_fc1_drop = tf.nn.dropout(h_fc1, keep_prob)
            variable_summries(W_fc1)
            variable_summries(b_fc1)
        with tf.name_scope('fc2'):
            ## 全连接层2
            W_fc2 = weight_variable([1024, 10])
            b_fc2 = bias_variable([10])
            variable_summries(W_fc2)
            variable_summries(b_fc2)
            # 计算输出
            prediction = tf.nn.softmax(tf.matmul(h_fc1_drop, W_fc2) + b_fc2)

    with tf.name_scope('train'):
        # 交叉熵代价函数
        cross_entropy = tf.reduce_mean(
                tf.nn.softmax_cross_entropy_with_logits_v2(labels = y,
                    logits = prediction))
        # 使用Adam优化器
        train_step = tf.train.AdamOptimizer(1e-4).minimize(cross_entropy)
        tf.summary.scalar('cross_entropy', cross_entropy)
    with tf.name_scope('test'):
        # 对比真实值和预测值的判断结果, argmax: 用于找到最大数值的位置.
        correct_prediction = tf.equal(tf.argmax(y, 1),
                tf.argmax(prediction, 1))
        # 求准确率, case: 将布尔值转为准确率, 如: [1,1,0,1] => 0.75
        accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
        tf.summary.scalar('accuracy', accuracy)

    sess_config = tf.ConfigProto()
    sess_config.gpu_options.allow_growth = True  # 使内存根据需求增长
    with tf.Session(config = sess_config) as sess:
        # 图片数量
        images_num = 2000
        # 载入图片, 获取3000张图片, 打包并存放到embedding
        embedding = tf.Variable(tf.stack(mnist.test.images[:images_num]),
                trainable = False, name = 'embedding')
        # 合并所有的summary
        summary = tf.summary.merge_all()
        # 生成metadata文件, 已存在则删除.
        file_path = sys.path[0] + '/5_4_projector/'
        file_metadata = 'data1/metadata.tsv'
        if tf.io.gfile.Exists(file_path + file_metadata):
            tf.io.gfile.Remove(file_path + file_metadata)
        with open(file_path + file_metadata, 'w') as f:
            labels = sess.run(tf.argmax(mnist.test.labels[:], 1))
            # 将label写入文件.
            for i in range(images_num):
                f.write(str(labels[i]) + '\n')
        # 定义writer, 保存图结构
        writer = tf.summary.FileWriter(file_path + 'data1', sess.graph)
        # 定义saver，保存网络模型
        saver = tf.train.Saver()
        # 定义配置项
        #  config = projector.ProjectorConfig()
        # 设置配置项
        #  embed = config.embeddings.add()
        #  embed.tensor_name = embedding.name
        #  embed.metadata_path = file_path + file_metadata
        #  embed.sprite.image_path = mnist_sprite_image
        # 对图片进行切分
        #  embed.sprite.single_image_dim.extend([28, 28])
        # 配置可视化工具
        #  projector.visualize_embeddings(writer, config)

        # 批次大小
        batch_size = 100
        # 计算批次数
        step_num = 12001    # 60000 x 20 / 100 + 1
        # 显示输出间隔
        disp_interval = 100
        # 测试批次数
        test_num = 100
        sess.run(tf.global_variables_initializer())
        run_metadata = tf.RunMetadata()
        for i in range(step_num):
            batch_xs, batch_ys = mnist.train.next_batch(batch_size)
            _t, _s = sess.run([train_step, summary],
                    feed_dict = { x: batch_xs, y: batch_ys, keep_prob: 0.7 },
                    options = tf.RunOptions(trace_level =
                        tf.RunOptions.FULL_TRACE), run_metadata = run_metadata)
            # 记录参数状态变化
            writer.add_run_metadata(run_metadata, 'step%03d' % i)
            writer.add_summary(_s, i)

            print('\r(%d)\r' % (i), end = '')
            if i % disp_interval == 0:
                train_acc = 0
                acc = 0
                for _i in range(test_num):
                    batch_xs, batch_ys = mnist.train.next_batch(batch_size)
                    train_acc += sess.run(accuracy, feed_dict = { x: batch_xs,
                        y: batch_ys, keep_prob: 1.0 })
                    batch_xs, batch_ys = mnist.test.next_batch(batch_size)
                    acc += sess.run(accuracy, feed_dict = { x: batch_xs,
                        y: batch_ys, keep_prob: 1.0 })
                acc = acc * 100 / test_num
                train_acc = train_acc * 100 / test_num
                print('%03d: test:%0.4f, train:%0.4f' %
                        (i / disp_interval, acc, train_acc))
                # 最终准确率约为99.2%

        # 保存训练好的模型
        saver.save(sess, file_path + 'data1/model.ckpt', global_step = step_num)
        writer.close()
        sess.close()

def mnist_cnn_tensorboard(p):
    ### 使用卷积神经网络(Convolution Neural Networks)实现MNIST分类
    mnist = input_data.read_data_sets(mnist_data_path, one_hot = True)

    ## 权值初始化函数
    def weight_variable(shape):
        # 生成一个截断的正态分布
        initial = tf.truncated_normal(shape, stddev = 0.1)
        return tf.Variable(initial)
    ## 偏置值初始化函数
    def bias_variable(shape):
        initial = tf.constant(0.1, shape = shape)
        return tf.Variable(initial)
    ## 卷积层初始化(2维卷积)
    # x:    input tensor of shape `[batch, in_height, in_width, in_channels]`
    #       输入一个4维张量.
    # W:    filter / kernel tensor of shape
    #       输入一个滤波器(卷积核)
    #       [filter_height, filter_width, in_channels, out_channels]
    def conv2d(x, W):
        # strides:  步长. strides[1]表示x方向的步长, strides[2]表示y方向的步长.
        # padding:  'SAME': Same padding, 卷积采样后得到和原来同样大小的平面.
        #           'VALID': Valid padding, 卷积采样后得到比原来小的平面.
        return tf.nn.conv2d(x, W, strides = [1, 1, 1, 1], padding = 'SAME')
    ## 池化层初始化
    def max_pool_2x2(x):
        # 使用最大池化, ksize: 窗口大小, [1, x, y, 1]
        return tf.nn.max_pool(x, ksize = [1, 2, 2, 1], strides = [1, 2, 2, 1],
                padding = 'SAME')

    with tf.name_scope('input'):
        # 输入参数占位符
        x = tf.placeholder(tf.float32, [None, 784])
        y = tf.placeholder(tf.float32, [None, 10])
        with tf.name_scope('reshape'):
            # 转变形状. 转换x的格式为4D向量.
            x_image = tf.reshape(x, [-1, 28, 28, 1])
            #                        ^~: -1为不限制batch大小
            tf.summary.image('input', x_image, 10)

    with tf.name_scope('layer'):
        with tf.name_scope('conv1'):
            ## 卷积层1
            # 初始化卷积层的权值和偏置值
            #   5x5的采样窗口; 1:通道数(黑白图片为1, 彩图为3);
            #   32:输出通道数, 表示使用32个卷积核从一个平面抽取32个特征平面.
            W_conv1 = weight_variable([5, 5, 1, 32])
            b_conv1 = bias_variable([32])   # 每个卷积核使用一个偏置值.
            # 执行卷积运算. 把x_image和权值进行卷积, 加上偏置值,
            #   然后应用于relu激活函数.
            #   由于是Same padding, 卷积后得到32张特征平面, 大小仍然是28x28
            h_conv1 = tf.nn.relu(conv2d(x_image, W_conv1) + b_conv1)
            # 执行最大池化, 池化后形状变为14x14(32张)
            h_pool1 = max_pool_2x2(h_conv1)
        with tf.name_scope('conv2'):
            ## 卷积层2
            #   5x5的采样窗口, 64个卷积核从32个平面抽取特征.
            W_conv2 = weight_variable([5, 5, 32, 64])
            b_conv2 = bias_variable([64])   # 每个卷积核使用一个偏置值.
            h_conv2 = tf.nn.relu(conv2d(h_pool1, W_conv2) + b_conv2)
            h_pool2 = max_pool_2x2(h_conv2) # 池化后得到64张7x7的特征平面
        with tf.name_scope('fc1'):
            ## 全连接层1
            # 初始化全连接层的权值.
            #   上一层有64x7x7个神经元, 全连接层有1024个神经元.
            W_fc1 = weight_variable([64 * 7 * 7, 1024])
            b_fc1 = bias_variable([1024])
            # 把池化层2的输出扁平化为1维
            h_pool2_flat = tf.reshape(h_pool2, [-1, 64 * 7 * 7])
            # 计算全连接层输出
            h_fc1 = tf.nn.relu(tf.matmul(h_pool2_flat, W_fc1) + b_fc1)
            # 设置dropout, 控制神经元输出概率.
            keep_prob = tf.placeholder(tf.float32)
            h_fc1_drop = tf.nn.dropout(h_fc1, keep_prob)
        with tf.name_scope('fc2'):
            ## 全连接层2
            W_fc2 = weight_variable([1024, 10])
            b_fc2 = bias_variable([10])
            # 计算输出
            prediction = tf.nn.softmax(tf.matmul(h_fc1_drop, W_fc2) + b_fc2)

    with tf.name_scope('train'):
        # 交叉熵代价函数
        cross_entropy = tf.reduce_mean(
                tf.nn.softmax_cross_entropy_with_logits_v2(labels = y,
                    logits = prediction))
        # 使用Adam优化器
        train_step = tf.train.AdamOptimizer(1e-4).minimize(cross_entropy)
        tf.summary.scalar('cross_entropy', cross_entropy)
    with tf.name_scope('test'):
        # 对比真实值和预测值的判断结果, argmax: 用于找到最大数值的位置.
        correct_prediction = tf.equal(tf.argmax(y, 1),
                tf.argmax(prediction, 1))
        # 求准确率, case: 将布尔值转为准确率, 如: [1,1,0,1] => 0.75
        accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
        tf.summary.scalar('accuracy', accuracy)

    sess_config = tf.ConfigProto()
    sess_config.gpu_options.allow_growth = True  # 使内存根据需求增长
    with tf.Session(config = sess_config) as sess:
        # 合并所有的summary
        summary = tf.summary.merge_all()
        file_path = sys.path[0] + '/5_4_projector/'
        # 定义writer, 保存图结构
        writer_train = tf.summary.FileWriter(file_path + 'data2/train',
                sess.graph)
        writer_test = tf.summary.FileWriter(file_path + 'data2/test',
                sess.graph)

        # 批次大小
        batch_size = 100
        # 计算批次数
        step_num = 12001    # 60000 x 20 / 100 + 1
        sess.run(tf.global_variables_initializer())
        for i in range(step_num):
            batch_xs, batch_ys = mnist.train.next_batch(batch_size)
            _, _s = sess.run([train_step, summary],
                    feed_dict = { x: batch_xs, y: batch_ys, keep_prob: 0.7 })
            # 记录参数状态变化
            writer_train.add_summary(_s, i)

            _train, _s = sess.run([accuracy, summary], feed_dict = {
                x: batch_xs, y: batch_ys, keep_prob: 1.0 })
            batch_xs, batch_ys = mnist.test.next_batch(1000)
            _test, _s = sess.run([accuracy, summary], feed_dict = {
                x: batch_xs, y: batch_ys, keep_prob: 1.0 })
            writer_test.add_summary(_s, i)
            if i % 10 == 0:
                print('\r%03d: test:%0.4f, train:%0.4f\r' %
                        (i / 10, _test, _train), end = '')
                # 最终准确率约为99.2%
            if i % 100 == 0:
                print()

def mnist_rnn(p):
    ### 使用循环神经网络(Recurrent Neural Networks)实现MNIST分类
    mnist = input_data.read_data_sets(mnist_data_path, one_hot = True)

    n_inputs = 28       # 输入一行, 一行有28个数据
    max_time = 28       # 28行
    lstm_size = 100     # RNN隐藏单元数
    n_classes = 10      # 10个分类(0~9)
    batch_size = 50     # 每个批次50个样本
    n_batch = mnist.train.num_examples // batch_size    # 批次数
    # 输入参数占位符
    x = tf.placeholder(tf.float32, [None, 784])
    y = tf.placeholder(tf.float32, [None, 10])

    weights = tf.Variable(tf.truncated_normal([lstm_size, n_classes],
        stddev = 0.1))
    biases = tf.Variable(tf.constant(0.1, shape = [n_classes]))

    ## 定义RNN网络
    def RNN(X, weight, biases):
        # 数据转换. [batch_size, 784] => [batch_size, 28, 28]
        inputs = tf.reshape(X, [-1, max_time, n_inputs])
        # 定义LSTM基本Cell. lstm_size: 中间隐藏层单元数
        lstm_cell = tf.contrib.rnn.BasicLSTMCell(lstm_size)
        # 进行RNN计算
        # inputs:       [batch_size, max_time, n_inputs]
        #               batch_size: 批次大小
        #               max_time: 序列长度
        #               n_inputs: 每个序列中数据大小
        # outputs:      输出block每次运算结果.
        #               if time_major == False (defalut):
        #                   [batch_size, max_time, cell.output_size]
        #               if time_major == True:
        #                   [max_time, batch_size, cell.output_size]
        #               batch_size: 批次大小
        #               max_time: 序列长度
        #               cell.output_size: 隐藏单元数
        # state:        The final state. 输出block的最终运算结果.
        #               [state, batch_size, cell.state_size]
        #               state:  [0]:cell_state, block中cell的信号.
        #                       [1]:hidden_state, block的最终输出信号.
        #               batch_size: 批次大小
        #               cell.state_size: 隐藏单元数
        outputs, state = tf.nn.dynamic_rnn(lstm_cell, inputs,
                dtype = tf.float32)
        # 计算输出
        results = tf.nn.softmax(tf.matmul(state[1], weights) + biases)
        return [results, outputs, state]

    [prediction, rnn_outputs, rnn_state] = RNN(x, weights, biases)
    ## 训练模型
    cross_entropy = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits_v2(
        logits = prediction, labels = y))
    train_step = tf.train.AdamOptimizer(1e-4).minimize(cross_entropy)
    ## 测试模型. 计算准确率
    correct_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(prediction, 1))
    accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))

    sess_config = tf.ConfigProto()
    sess_config.gpu_options.allow_growth = True  # 使内存根据需求增长
    with tf.Session(config = sess_config) as sess:
        sess.run(tf.global_variables_initializer())
        for epoch in range(21):
            for batch in range(n_batch):
                batch_xs, batch_ys = mnist.train.next_batch(batch_size)
                _ts, outputs, state = sess.run(
                        [train_step, rnn_outputs, rnn_state],
                        feed_dict = { x: batch_xs, y: batch_ys })

            #  print('outputs:', outputs)
            #  print('state:', state)
            acc = sess.run(accuracy, feed_dict = {
                x: mnist.test.images, y: mnist.test.labels })
            print(epoch, acc)
            # 最终准确率约为99.2%

def saver_params(p):
    ### 模型参数保存 & 加载
    mnist = input_data.read_data_sets(mnist_data_path, one_hot = True)

    # 定义批次大小
    batch_size = 100
    # 计算批次数
    n_batch = mnist.train.num_examples // batch_size
    # 定义placeholder, 不限制行大小, 行大小为传入数据的大小.
    #   列为一张图的像素点长度
    x = tf.placeholder(tf.float32, [None, 784])
    # 列为label长度
    y = tf.placeholder(tf.float32, [None, 10])

    ## 创建神经网络
    W = tf.Variable(tf.zeros([784, 10]))
    b = tf.Variable(tf.zeros([10]))
    prediction = tf.nn.softmax(tf.matmul(x, W) + b)
    # 交叉熵代价函数
    cross_entropy = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits_v2(
        logits = prediction, labels = y))
    # 使用随机梯度下降优化器
    train_step = tf.train.GradientDescentOptimizer(0.2).minimize(cross_entropy)

    init = tf.global_variables_initializer()
    # 对比真实值和预测值的判断结果, argmax: 用于找到最大数值的位置.
    correct_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(prediction, 1))
    # 求准确率, case: 将布尔值转为准确率, 如: [1,1,0,1] => 0.75
    accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))

    print('training model')
    with tf.Session() as sess:
        sess.run(init)
        for epoch in range(11):
            for batch in range(n_batch):
                # 获取一个批次的数据.
                #   图片数据放到batch_xs, 图片标签放到batch_ys.
                batch_xs, batch_ys = mnist.train.next_batch(batch_size)
                sess.run(train_step, feed_dict = { x: batch_xs, y: batch_ys })
            acc = sess.run(accuracy, feed_dict = { x: mnist.test.images,
                y: mnist.test.labels })
            print(epoch, acc) # 最终准确率约为91%
        # 保存训练好的模型参数
        print('save model params')
        saver = tf.train.Saver()
        saver.save(sess, '8_2_net/my_net.ckpt')

    with tf.Session() as sess:
        sess.run(init)
        # 测试初始化后的模型参数
        acc = sess.run(accuracy, feed_dict = { x: mnist.test.images,
            y: mnist.test.labels })
        print('accuracy after initial:', acc)
        # 模型参数加载
        print('load model params')
        saver = tf.train.Saver()
        saver.restore(sess, '8_2_net/my_net.ckpt')
        acc = sess.run(accuracy, feed_dict = { x: mnist.test.images,
            y: mnist.test.labels })
        print('accuracy after restore:', acc)

def saver_model(p):
    ### 模型保存
    mnist = input_data.read_data_sets(mnist_data_path, one_hot = True)

    # 定义批次大小
    batch_size = 100
    # 计算批次数
    n_batch = mnist.train.num_examples // batch_size
    # 定义placeholder, 不限制行大小, 行大小为传入数据的大小.
    #   列为一张图的像素点长度
    x = tf.placeholder(tf.float32, [None, 784], name = 'x_input')
    # 列为label长度
    y = tf.placeholder(tf.float32, [None, 10], name = 'y_input')

    ## 创建神经网络
    W = tf.Variable(tf.zeros([784, 10]))
    #  W = tf.Variable(tf.truncated_normal([784, 10], stddev = 0.1))
    b = tf.Variable(tf.zeros([10]))
    prediction = tf.nn.softmax(tf.matmul(x, W) + b, name = 'output')
    # 交叉熵代价函数
    cross_entropy = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits_v2(
        logits = prediction, labels = y))
    # 使用Adam优化器
    train_step = tf.train.AdamOptimizer(1e-4).minimize(cross_entropy)

    init = tf.global_variables_initializer()
    # 对比真实值和预测值的判断结果, argmax: 用于找到最大数值的位置.
    correct_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(prediction, 1))
    # 求准确率, case: 将布尔值转为准确率, 如: [1,1,0,1] => 0.75
    accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32),
            name = 'test_output')

    print('training model')
    file_model = '8_3_models/tfmodel.pb'
    with tf.Session() as sess:
        sess.run(init)
        for epoch in range(0):
            for batch in range(n_batch):
                # 获取一个批次的数据.
                #   图片数据放到batch_xs, 图片标签放到batch_ys.
                batch_xs, batch_ys = mnist.train.next_batch(batch_size)
                sess.run(train_step, feed_dict = { x: batch_xs, y: batch_ys })
            acc = sess.run(accuracy, feed_dict = { x: mnist.test.images,
                y: mnist.test.labels })
            print(epoch, acc) # 最终准确率约为91%
        # 保存训练好的模型参数和结构
        #   保存模型的结构和输出点, 保存时将变量转为常量.
        output_graph_def = tf.graph_util.convert_variables_to_constants(sess,
                sess.graph_def, output_node_names = ['output',
                    'x_input', 'y_input', 'test_output'])
        # 保存模型到文件
        print('save graph to', file_model)
        with tf.io.gfile.FastGFile(file_model, mode = 'wb') as f:
            f.write(output_graph_def.SerializeToString())
        # 保存训练好的模型参数
        print('save model params')
        saver = tf.train.Saver()
        saver.save(sess, file_model)

def saver_model_test(p):
    ### 模型加载 - 测试
    mnist = input_data.read_data_sets(mnist_data_path, one_hot = True)

    file_model = '8_3_models/tfmodel.pb'
    # 载入模型
    print('load graph from', file_model)
    with tf.io.gfile.FastGFile(file_model, 'rb') as f:
        graph_def = tf.compat.v1.GraphDef()
        graph_def.ParseFromString(f.read())
        tf.import_graph_def(graph_def, name = '')

    print('test model')
    with tf.Session() as sess:
        # 测试初始化后的模型参数
        acc = sess.run('test_output:0', feed_dict = {
            'x_input:0': mnist.test.images, 'y_input:0': mnist.test.labels })
        print('accuracy after initial:', acc)

    #  with tf.Session() as sess:
    #      y = tf.placeholder(tf.float32, [None, 10])
    #      # 对比真实值和预测值的判断结果, argmax: 用于找到最大数值的位置.
    #      correct_prediction = tf.equal(tf.argmax(y, 1),
    #              tf.argmax(sess.graph.get_tensor_by_name('output:0'), 1))
    #      # 求准确率, case: 将布尔值转为准确率, 如: [1,1,0,1] => 0.75
    #      accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))

    #      # 测试初始化后的模型参数
    #      acc = sess.run(accuracy, feed_dict = {
    #          'x_input:0': mnist.test.images, y: mnist.test.labels })
    #      print('accuracy after initial:', acc)

def saver_model_following_train(p):
    ### 模型保存
    mnist = input_data.read_data_sets(mnist_data_path, one_hot = True)

    # 定义批次大小
    batch_size = 100
    # 计算批次数
    n_batch = mnist.train.num_examples // batch_size
    # 定义placeholder, 不限制行大小, 行大小为传入数据的大小.
    #   列为一张图的像素点长度
    x = tf.placeholder(tf.float32, [None, 784], name = 'x_input')
    # 列为label长度
    y = tf.placeholder(tf.float32, [None, 10], name = 'y_input')

    ## 创建神经网络
    W = tf.Variable(tf.zeros([784, 10]))
    #  W = tf.Variable(tf.truncated_normal([784, 10], stddev = 0.1))
    b = tf.Variable(tf.zeros([10]))
    prediction = tf.nn.softmax(tf.matmul(x, W) + b, name = 'output')
    # 交叉熵代价函数
    cross_entropy = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits_v2(
        logits = prediction, labels = y))
    # 使用Adam优化器
    train_step = tf.train.AdamOptimizer(1e-4).minimize(cross_entropy)

    init = tf.global_variables_initializer()
    # 对比真实值和预测值的判断结果, argmax: 用于找到最大数值的位置.
    correct_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(prediction, 1))
    # 求准确率, case: 将布尔值转为准确率, 如: [1,1,0,1] => 0.75
    accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32),
            name = 'test_output')

    print('following training model')
    file_model = '8_3_models/tfmodel.pb'
    with tf.Session() as sess:
        saver = tf.train.Saver()
        saver.restore(sess, file_model)
        for epoch in range(5):
            for batch in range(n_batch):
                # 获取一个批次的数据.
                #   图片数据放到batch_xs, 图片标签放到batch_ys.
                batch_xs, batch_ys = mnist.train.next_batch(batch_size)
                sess.run(train_step, feed_dict = { x: batch_xs, y: batch_ys })
            acc = sess.run(accuracy, feed_dict = { x: mnist.test.images,
                y: mnist.test.labels })
            print(epoch, acc)
        # 保存训练好的模型参数和结构
        #   保存模型的结构和输出点, 保存时将变量转为常量.
        output_graph_def = tf.graph_util.convert_variables_to_constants(sess,
                sess.graph_def, output_node_names = ['output',
                    'x_input', 'y_input', 'test_output'])
        # 保存训练好的模型参数
        print('save model params')
        saver = tf.train.Saver()
        saver.save(sess, file_model)
        # 保存模型到文件
        print('save graph to', file_model)
        with tf.io.gfile.FastGFile(file_model, mode = 'wb') as f:
            f.write(output_graph_def.SerializeToString())

def inception_v3_download(p):
    # inception模型下载地址
    inception_pretrain_model_url = 'http://download.tensorflow.org/models/image/imagenet/inception-2015-12-05.tgz'

    # 模型存放地址
    file_path = '8_4_inception_model/'
    if not os.path.exists(file_path):
        os.makedirs(file_path)

    # 获取文件名和路径
    tar_file = os.path.join(file_path,
            inception_pretrain_model_url.split('/')[-1])
    print('download:', tar_file)
    # 下载模型
    if not os.path.exists(tar_file):
        r = requests.get(inception_pretrain_model_url, stream = True)
        with open(tar_file, 'wb') as f:
            for chunk in r.iter_content(chunk_size = 1024):
                if chunk:
                    f.write(chunk)
        print('finish download.')
    else:
        print('file exist!')
    # 解压文件
    print('untar...')
    untar_path = file_path + inception_pretrain_model_url.split('/')[-1
            ].split('.')[0]
    if not os.path.exists(untar_path):
        os.makedirs(untar_path)
    tarfile.open(tar_file, 'r:gz').extractall(untar_path)

    # 模型结构存放文件
    log_path = file_path + 'log'
    if not os.path.exists(log_path):
        os.makedirs(log_path)
    print('save model to', log_path)
    # classify_image_graph_def.pd为google训练好的模型
    graph_file = os.path.join(untar_path, 'classify_image_graph_def.pb')
    with tf.Session() as sess:
        # 创建一个图来存放google训练好的模型
        with tf.io.gfile.FastGFile(graph_file, 'rb') as f:
            graph_def = tf.compat.v1.GraphDef()
            graph_def.ParseFromString(f.read())
            tf.import_graph_def(graph_def, name = '')
        # 保存图结构
        writer = tf.summary.FileWriter(log_path, sess.graph)
        writer.close()

def inception_v3_image_recognition(p):
    # inception模型测试
    # inception结构分析:http://blog.csdn.net/u010402786/article/details/52433324
    file_path = '8_4_inception_model/inception-2015-12-05/'
    class NodeLookup(object):
        def __init__(self):
            label_lookup = file_path + 'imagenet_2012_challenge_label_map_proto.pbtxt'
            uid_lookup = file_path + 'imagenet_synset_to_human_label_map.txt'
            self.node_lookup = self.load(label_lookup, uid_lookup)

        def load(self, label_lookup, uid_lookup):
            # 加载分类字符串n********对应分类名称的文件
            proto_as_ascii_lines = tf.io.gfile.GFile(uid_lookup)
            # 声明一个字典, 用于存放分类UID编号和对应的名称
            uid_to_human = { }
            for line in proto_as_ascii_lines:
                # 去掉换行符
                line = line.strip('\n')
                # 按照'\t'分割
                parsed_items = line.split('\t')
                # 保存编号字符串n********与分类名称的映射关系
                uid_to_human[parsed_items[0]] = parsed_items[1]
            # 加载分类字符串n********对应分类编号1~1000的文件
            proto_as_ascii = tf.io.gfile.GFile(label_lookup).readlines()
            node_id_to_uid = { }
            for line in proto_as_ascii:
                if line.startswith('  target_class:'):
                    # 获取分类编号1~1000
                    target_class = int(line.split(': ')[1])
                if line.startswith('  target_class_string:'):
                    # 获取编号字符串n********
                    target_class_string = line.split(': ')[1]
                    # 保存分类编号1~1000与编号字符串n********映射关系
                    node_id_to_uid[target_class] = target_class_string[1:-2]
            # 建立分类编号1~1000对应分类名称的映射关系
            node_id_to_name = { }
            for key, val in node_id_to_uid.items():
                node_id_to_name[key] = uid_to_human[val]
            return node_id_to_name

        # 通过分类编号1~1000获取分类名称
        def id_to_string(self, node_id):
            if node_id not in self.node_lookup:
                return ''
            return self.node_lookup[node_id]

    # 创建一个图来存放google训练好的模型
    with tf.io.gfile.GFile(file_path +
            'classify_image_graph_def.pb', 'rb') as f:
        graph_def = tf.compat.v1.GraphDef()
        graph_def.ParseFromString(f.read())
        tf.import_graph_def(graph_def, name = '')

    # 将jpg格式测试图片放到该目录下
    test_image_path = './8_5_images/'
    with tf.Session() as sess:
        softmax_tensor = sess.graph.get_tensor_by_name('softmax:0')
        # 遍历目录
        for root, dirs, files in os.walk(test_image_path):
            for file in files:
                # 载入图片
                image_data = tf.io.gfile.GFile(os.path.join(root, file),
                        'rb').read()
                # 传入JPG图片执行计算, squeeze将结果转为1维数据
                prediction = np.squeeze(sess.run(softmax_tensor,
                    { 'DecodeJpeg/contents:0': image_data }))

                # 打印图片路径和名称
                image_path = os.path.join(root, file)
                print(image_path)
                #  # 显示图片
                #  img = Image.open(image_path)
                #  plt.imshow(img)
                #  plt.axis('off')
                #  plt.show()

                # 对计算结果(存放概率值的1维数组)进行排序
                #   argsort为从小到大排序, 取末尾5个值([-5:]), 并倒序排列([::-1])
                top_k = prediction.argsort()[-5:][::-1]
                node_lookup = NodeLookup()
                for node_id in top_k:
                    # 获取分类名称
                    human_string = node_lookup.id_to_string(node_id)
                    # 获取该分类的置信度
                    score = prediction[node_id]
                    print('%%%02f: %s(%d)' %
                            (score * 100, human_string, node_id))
                print()

def inception_v3_retrain(p):
    # 获取Tensorflow源码: https://github.com/tensorflow/tensorflow.git
    source_dir = '../../official/tensorflow/'
    # 从VGG网站下载训练数据集: http://www.robots.ox.ac.uk/~vgg/data/
    file_path = '9_2_retrain/'
    if not os.path.exists(file_path):
        os.makedirs(file_path)
    # 参数存放目录
    bottleneck_path = file_path + 'bottleneck/'

    





def entity(p=None):
    print('TensorFlow version: %s (%s)' % (tf.__version__, tf.__path__))
    os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
    menu = [
            { 'name': '\n---- basic', },
            { 'name': '[2_1] graph create & run', 'func': graph_create, },
            { 'name': '[2_2] variables', 'func': variables, },
            { 'name': '[2_3] fetch & feed', 'func': fetch_feed, },
            { 'name': '[2_4] linear regression', 'func': linear_regression, },
            { 'name': '[3_1] nonlinear regression', 'func': nonlinear_regression, },
            { 'name': '[3_3] mnist - quadratic', 'func': mnist_quadratic, },
            { 'name': '[w_3] mnist - quadratic(improve)', 'func': mnist_quadratic_improve, },
            { 'name': '[4_1] mnist - cross entropy', 'func': mnist_cross_entropy, },
            { 'name': '[4_2] mnist - dropout', 'func': mnist_dropout, },
            { 'name': '[4_4] mnist - optimizer', 'func': mnist_optimizer, },
            { 'name': '[w_4] mnist - (further imporve)', 'func': mnist_further_imporve, },
            { 'name': '[5_2] tensorboard - structure', 'func': tensorboard_structure, },
            { 'name': '[5_3] tensorboard - running log', 'func': tensorboard_running_log, },
            { 'name': '[5_4] tensorboard - visualization', 'func': tensorboard_visualization, },
            { 'name': '[6_2] mnist - CNN', 'func': mnist_cnn, },
            { 'name': '[w_6] mnist - CNN with tensorboard(test)', 'func': mnist_cnn_tensorboard_test, },
            { 'name': '[7_1] mnist - CNN with tensorboard', 'func': mnist_cnn_tensorboard, },
            { 'name': '[7_4] mnist - RNN', 'func': mnist_rnn, },
            { 'name': '\n---- advanced', },
            { 'name': '[8_2] saver - params', 'func': saver_params, },
            { 'name': '[8_3] saver - model (graph & params)', 'func': saver_model, },
            { 'name': '[8_3] saver - load model for test', 'func': saver_model_test, },
            { 'name': '[8_3] saver - load model for following train', 'func': saver_model_following_train, },
            { 'name': '[8_4] inception-v3 - download', 'func': inception_v3_download, },
            { 'name': '[8_5] inception-v3 - image recognition', 'func': inception_v3_image_recognition, },
            { 'name': '[9_2] inception-v3 - retrain', 'func': inception_v3_retrain, },
            ]
    sdb.menu(menu, 'TensorFlow lesson')

if __name__ == '__main__':
    entity()
else:
    pass

