import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
from Autoencoder import AutoEncoder
import matplotlib.pyplot as plt

# 先获取mnist数据
mnist = input_data.read_data_sets('MNIST_data/',
                                  one_hot=True)
# 确定每一个batch的大小为50
batch_size = 50
epochs = 100
n_batches = mnist.train.num_examples//batch_size
# 构建自编码器
auto_encoder = AutoEncoder(encoding_dim=64)

with tf.Session() as sess:
    # 初始化tensorflow
    sess.run(tf.global_variables_initializer())
    print('Initialized')
    for epoch in range(1, epochs+1):
        for batch in range(n_batches):
            # 获取对应的输入和输出
            batch_x, batch_y = mnist.train.next_batch(batch_size)
            # 对于自编码器而言，输入和输出是一致的
            feed_dict = {auto_encoder.inputs: batch_x,
                         auto_encoder.targets: batch_x}
            batch_loss, _ = sess.run(
                [auto_encoder.loss, auto_encoder.train_op],
                feed_dict=feed_dict)
        # 每一个epoch打印出loss
        print('Epoch:{}/{}'.format(epoch, epochs),
              'Training loss: {:.4f}'.format(batch_loss))
    # 从测试数据集中选出一张
    input_image = mnist.test.images[1]
    input_image = input_image.reshape((1, 784))
    plt.imshow(input_image.reshape([28, 28]), cmap='Greys_r')
    plt.title('original')
    plt.show()
    # 把这笔数据经过自编码器得到还原的结果和压缩后的向量
    reconstructed, compressed = sess.run(
        [auto_encoder.decode, auto_encoder.encoded],
        feed_dict={auto_encoder.inputs: input_image})

    plt.imshow(reconstructed.reshape([28, 28]), cmap='Greys_r')
    plt.title('reconstructed')
    plt.show()
 
    plt.imshow(compressed.reshape([64, 1]), cmap='Greys_r')
    plt.title('compressed')
    plt.show()
    







