import tensorflow.compat.v1 as tf
import tensorflow as tsf
from tensorflow.contrib.layers import fully_connected
from tensorflow.examples.tutorials.mnist import input_data

tf.set_random_seed(777)

import sys
import os
path = r'../../../../large_data/DL1/mnist'
if not os.path.exists(path):
    print('[[[ DIR WRONG! ]]]', file=sys.stderr)
    sys.exit(0)
mnist = input_data.read_data_sets(path)

# 将28*28的一个数据看成28个1*28的序列组成的数据。所以一个数据包含的序列数目为28，单个序列的长度是1*28。
# 假设隐藏层有128个神经元，输出层有10个单元（输出的神经单元的个数应该和数据的类别数相同）

#参数
batch_size = 100 #mini-batch批大小
n_steps = 28 #时间步数（序列长度）
n_inputs = 28 #输入数据长度

n_neurons = 128 #隐藏状态，神经元个数
n_layers = 3 #层数
n_outputs = 10 #输出10分类
learning_rate = 0.001 #学习率
n_epochs = 1 #训练大周期
n_print = 20 # How many times in a epoch to print information
# 输入输出占位符
X = tf.placeholder(tf.float32, [None, n_steps, n_inputs]) #(?, 28, 28)
Y = tf.placeholder(tf.int32, [None]) # 一维输出，标签0-9
#模型
# lstm_cells = [tf.contrib.rnn.BasicLSTMCell(num_units=n_neurons) for layer in range(n_layers)]
# lstm_cells = [tf.contrib.rnn.GRUCell(num_units=n_neurons) for layer in range(n_layers)]
lstm_cells = [tsf.contrib.rnn.LSTMCell(num_units=n_neurons) for layer in range(n_layers)]
#print(lstm_cells[0].state_size)  #LSTMStateTuple(c=128, h=128)
multi_cell = tsf.contrib.rnn.MultiRNNCell(lstm_cells) #堆叠多层
#print(multi_cell.state_size)  #(LSTMStateTuple(c=128, h=128), LSTMStateTuple(c=128, h=128), LSTMStateTuple(c=128, h=128))
outputs, states = tf.nn.dynamic_rnn(multi_cell, X, dtype=tf.float32) #outputs(?, 28, 128)
# top_layer_h_state = states[-1][1] #最顶层隐藏层状态（最后）  (?,128)
# logits = tf.layers.dense(outputs[:,-1], n_outputs, name="softmax") #用最后一个Cell的输出
logits = fully_connected(outputs[:,-1], n_outputs, activation_fn=None) #用最后一个Cell的输出
# logits = tf.layers.dense(top_layer_h_state, n_outputs, name="softmax")
# 代价或损失函数
cost = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logits, labels=Y))
train = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(cost)
# 计算准确率，只有等于y才是对的
correct = tf.nn.in_top_k(logits, Y, 1) #每个样本的预测结果的前k个最大的数里面是否包含targets预测中的标签
accuracy = tf.reduce_mean(tf.cast(correct, tf.float32))
# 开始循环
with tf.Session() as sess:
    sess.run(tf.global_variables_initializer())
    for epoch in range(n_epochs):
	
        n_batch = mnist.train.num_examples // batch_size
        group = n_batch // n_print
        for iteration in range(n_batch):  # 55000
            X_batch, Y_batch = mnist.train.next_batch(batch_size)
            X_batch = X_batch.reshape((-1, n_steps, n_inputs)) #(?, 28, 28)
            _, loss_v, acc_v = sess.run([train, cost, accuracy], feed_dict={X: X_batch, Y: Y_batch})
            if iteration % group == 0:
                print(f'#{epoch + 1}:{iteration + 1}: cost = {loss_v}, acc = {acc_v}')
        if iteration % group != 0:
            print(f'#{epoch + 1}:{iteration + 1}: cost = {loss_v}, acc = {acc_v}')
        acc_train = accuracy.eval(feed_dict={X: X_batch, Y: Y_batch})#训练准确率
        acc_test = accuracy.eval(feed_dict={X: mnist.test.images.reshape((-1, n_steps, n_inputs)),
                                            Y: mnist.test.labels}) #测试准确率
        print(epoch+1, "Train accuracy:", acc_train, "Test accuracy:", acc_test)
