import tensorflow as tf
import numpy as np
from tensorflow.examples.tutorials.mnist import input_data

"""
    RNN 递归神经网络
"""


def rnn(train_data, hidden_size, labels_classes):
    # 输入数据  [batch_size,height,weight]
    train_data_inputs = tf.reshape(train_data, [-1, 28, 28])

    # RNN处理  LSTM 中的设置100个block数目
    rnn_cell = tf.nn.rnn_cell.BasicLSTMCell(hidden_size)

    # state[0]是cell state
    # state[1]是hidden_state
    outputs, state = tf.nn.dynamic_rnn(rnn_cell, train_data_inputs, dtype=tf.float32)

    # 直联函数运算
    weights = tf.Variable(tf.truncated_normal([hidden_size, labels_classes], stddev=0.1))
    biases = tf.Variable(tf.constant(0.1, shape=[labels_classes]))
    prediction = tf.nn.softmax(tf.matmul(state[1], weights) + biases)
    return prediction


def train_model():
    """
        训练模型
    :return:
    """

    # block个数
    hidden_size = 100
    # labels等级个数
    labels_classes = 10
    # 每次的batch数目
    batch_size = 100

    # 处理数据
    train_data = tf.placeholder(tf.float32, [None, 28 * 28])
    train_labels = tf.placeholder(tf.float32, [None, 10])

    prediction = rnn(train_data, hidden_size, labels_classes)

    # 损失值计算
    loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits_v2(labels=train_labels, logits=prediction))
    tf.summary.scalar("loss", tf.reduce_mean(loss))

    # 训练方法
    train = tf.train.AdamOptimizer(1e-4).minimize(loss)

    # 求准确率
    accuracy = tf.reduce_mean(tf.cast(tf.equal(tf.argmax(prediction, 1), tf.argmax(train_labels, 1)), dtype=tf.float32))
    tf.summary.scalar("accuracy", tf.reduce_mean(accuracy))

    # 合并所有散点图的摘要
    merged = tf.summary.merge_all()

    mnist = input_data.read_data_sets("MNIST_data/", one_hot=True)
    with tf.Session() as session:
        session.run(tf.global_variables_initializer())
        file_write = tf.summary.FileWriter(logdir="logs/", graph=session.graph)

        # 载入训练模型
        saver = read_model(session)

        # 将所有样本中的数据训练20遍
        for i in range(20):
            for step in range(mnist.train.num_examples // batch_size):
                # 每次拿出100个样本数据
                images, labels = mnist.train.next_batch(100)
                session.run(train, feed_dict={train_data: images, train_labels: labels})
            print("accuracy=",
                  session.run(accuracy, feed_dict={train_data: mnist.test.images, train_labels: mnist.test.labels}))

            # 将摘要图点输入到log文件中 每次打印
            file_write.add_summary(
                session.run(merged, feed_dict={train_data: mnist.test.images, train_labels: mnist.test.labels}), i)

        # 持久化训练模型
        save_model(session, saver)


def save_model(session, saver=None):
    """
        保存模型
    """
    saver = saver if saver is not None else tf.train.Saver()
    saver.save(session, 'net/my_net.ckpt')
    return saver


def read_model(session, saver=None):
    """
        读取模型数据
    :param session:
    :return:
    """
    saver = saver if saver is not None else tf.train.Saver()
    saver.restore(session, 'net/my_net.ckpt')
    return saver


if __name__ == '__main__':
    train_model()
