# -*- coding: utf-8 -*-
"""
Created on Tue Oct 17 16:31:21 2017

@author: xuanlei
"""

import numpy as np
import sklearn.preprocessing as prep
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
import matplotlib.pyplot as plt

# define xavier initialization function, mean distribution or gauss distribution
# mean = 0, var = 2/(n_in+n_out) 
def xavier_init(fan_in, fan_out, constant=1):
    """xavier initialization function
    fan_in: input node number
    fan_out: output node number"""
    low = -constant*np.sqrt(6/(fan_in+fan_out))
    high = constant*np.sqrt(6/(fan_in+fan_out))
    return tf.random_uniform((fan_in, fan_out), minval=low, maxval=high, dtype=tf.float32)

# define standard scale fucntion
def standard_scale(X_train, X_test):
    preprocessor = prep.StandardScaler().fit(X_train)
    X_train = preprocessor.transform(X_train)
    X_test = preprocessor.transform(X_test)
    return X_train, X_test

# define get random block function
def get_random_block_from_data(data, batch_size):
    start_index = np.random.randint(0, len(data)-batch_size)
    return data[start_index:(start_index+batch_size)]

class AdditiveGaussianNoiseAutoencoder(object):   
    # define construct function
    def __init__(self, input_size, encoder_1_size,encoder_2_size,encoder_3_size,decoder_1_size,decoder_2_size,reg=0.01, beta=0.01, rou = 0.05,LR = 0.01):
        self.input_size = input_size
        self.encoder_1_size = encoder_1_size
        self.encoder_2_size = encoder_2_size
        self.encoder_3_size = encoder_3_size
        self.decoder_1_size = decoder_1_size
        self.decoder_2_size = decoder_2_size

        self.LR = LR
        self.reg = reg
        self.beta = beta
        self.rou = rou
       # define auto-encoder net structure
        with tf.name_scope('input_layer'):
            self.input_layer()

        with tf.name_scope('encoder_layer_1'):
            self.encoder_layer_1()
        
        with tf.name_scope('encoder_layer_2'):
            self.encoder_layer_2()
        
        with tf.name_scope('encoder_layer_3'):
            self.encoder_layer_3()
        
        with tf.name_scope('decoder_layer_1'):
            self.decoder_layer_1()
        
        with tf.name_scope('decoder_layer_2'):
            self.decoder_layer_2()

        with tf.name_scope('output_layer'):
            self.output_layer()

        with tf.name_scope('cost'):
            self.compute_cost()

        with tf.name_scope('train'):
            self.train_op = tf.train.AdamOptimizer(learning_rate=self.LR).minimize(self.cost)


    def input_layer(self):
         with tf.name_scope('input_layer'):
             self.x = tf.placeholder(tf.float32, [None, self.input_size])
             l_in_x = tf.reshape(self.x,[-1,self.input_size], name='x_input')
             self.l_in_y = l_in_x
#             image1 = tf.image.decode_png(self.x, channels=1)
#             image1 = tf.expand_dims(image1, 0)
#             tf.summary.image('x_input',image1)
                        
             
    def encoder_layer_1(self):
        with tf.name_scope('encoder_layer_1'):
            encoder_1_x = tf.reshape(self.l_in_y, [-1,self.input_size])
            self.W_en_1 = tf.Variable(tf.truncated_normal([self.input_size, self.encoder_1_size]),name='W_en_1')
            b_en_1 = tf.Variable(tf.zeros([self.encoder_1_size,]),dtype=tf.float32,name = 'b_en_1')
            temp_en_1 = tf.nn.tanh(tf.add(tf.matmul(encoder_1_x,self.W_en_1),b_en_1))
            '''
            batch_norm_layer(temp_en_1)
            
            '''
            self.encoder_1_y = temp_en_1
    
    def encoder_layer_2(self):
        with tf.name_scope('encoder_layer_2'):
            encoder_2_x = tf.reshape(self.encoder_1_y, [-1,self.encoder_1_size])
            self.W_en_2 = tf.Variable(tf.truncated_normal([self.encoder_1_size, self.encoder_2_size]),name='W_en_2')
            b_en_2 = tf.Variable(tf.zeros([self.encoder_2_size,]),dtype=tf.float32,name = 'b_en_2')
            temp_en_2 = tf.nn.tanh(tf.add(tf.matmul(encoder_2_x,self.W_en_2),b_en_2))
            '''
            batch_norm_layer(temp_en_2)
            
            '''
            self.encoder_2_y = temp_en_2
            
    def encoder_layer_3(self):
        with tf.name_scope('encoder_layer_3'):
            encoder_3_x = tf.reshape(self.encoder_2_y, [-1,self.encoder_2_size])
            self.W_en_3 = tf.Variable(tf.truncated_normal([self.encoder_2_size, self.encoder_3_size]),name='W_en_3')
#            tf.Variable("W_en_3", shape=[self.encoder_2_size, self.encoder_3_size],initializer=tf.contrib.layers.xavier_initializer())
            b_en_3 = tf.Variable(tf.zeros([self.encoder_3_size,]),dtype=tf.float32,name = 'b_en_3')
            temp_en_3 = tf.nn.tanh(tf.add(tf.matmul(encoder_3_x,self.W_en_3),b_en_3))
            '''
            batch_norm_layer(temp_en_3)
            
            '''
            self.encoder_3_y = temp_en_3
    
    def decoder_layer_1(self):
        with tf.name_scope('decoder_layer_1'):
            decoder_1_x = tf.reshape(self.encoder_3_y, [-1,self.encoder_3_size])
            self.W_de_1 = tf.Variable(tf.truncated_normal([self.encoder_3_size, self.decoder_1_size]),name='W_de_1')
#            tf.Variable("W_de_1", shape=[self.encoder_3_size, self.decoder_1_size],initializer=tf.contrib.layers.xavier_initializer())
            b_de_1 = tf.Variable(tf.zeros([self.decoder_1_size,]),dtype=tf.float32,name = 'b_de_1')
            temp_de_1 = tf.nn.tanh(tf.add(tf.matmul(decoder_1_x,self.W_de_1),b_de_1))
            '''
            batch_norm_layer(temp_de_1)
            
            '''
            self.decoder_1_y = temp_de_1
    
    def decoder_layer_2(self):
        with tf.name_scope('decoder_layer_2'):
            decoder_2_x = tf.reshape(self.decoder_1_y, [-1,self.decoder_1_size])
            self.W_de_2 =  tf.Variable(tf.truncated_normal([self.decoder_1_size, self.decoder_2_size]),name='W_de_2')
#            tf.Variable("W_de_2", shape=[self.decoder_1_size, self.decoder_2_size],initializer=tf.contrib.layers.xavier_initializer())
            b_de_2 = tf.Variable(tf.zeros([self.decoder_2_size,]),dtype=tf.float32,name = 'b_de_2')
            temp_de_2 = tf.nn.tanh(tf.add(tf.matmul(decoder_2_x,self.W_de_2),b_de_2))
            '''
            batch_norm_layer(temp_de_2)
            
            '''
            self.decoder_2_y = temp_de_2
    
    def output_layer(self):
        with tf.name_scope('output_layer'):
            l_out_x = tf.reshape(self.decoder_2_y,[-1,self.decoder_2_size],name = 'decoder_2_input')
            self.Ws_out =tf.Variable(tf.truncated_normal([self.decoder_2_size, self.input_size]),name='W_out')
#            tf.Variable("W_de_out", shape=[self.decoder_2_size, self.input_size],initializer=tf.contrib.layers.xavier_initializer())
            bs_out = tf.Variable(tf.zeros([self.input_size,]),dtype=tf.float32,name = 'b_out')
            self.pred = tf.nn.tanh(tf.matmul(l_out_x,self.Ws_out)+bs_out)
            tf.summary.histogram('w', self.Ws_out)
            tf.summary.histogram('b', bs_out)
            tf.summary.histogram('out', self.pred)
            
    def compute_cost(self):
#        self.cost = tf.sqrt(tf.reduce_mean(tf.square(tf.subtract(self.pred, self.x))))
#        self.cost = 0.5*tf.reduce_mean(tf.pow(tf.subtract(self.pred, self.x),2.0))

        self.cost = tf.reduce_mean(tf.square(tf.subtract(self.pred, self.x)))+self.reg*0.5*(tf.reduce_sum(tf.square(self.W_en_1))
                    +tf.reduce_sum(tf.square(self.W_en_2))+tf.reduce_sum(tf.square(self.W_en_3))
                    +tf.reduce_sum(tf.square(self.W_de_1))+tf.reduce_sum(tf.square(self.W_de_2))
                    +tf.reduce_sum(tf.square(self.Ws_out)))
        tf.summary.scalar('result_cost', self.cost)

 

            


if __name__  == '__main__':
    mnist = input_data.read_data_sets('MNIST_DATA', one_hot=True)
    
    tf.reset_default_graph()
    model = AdditiveGaussianNoiseAutoencoder(784,512,256,121,256,512,LR=0.02)
#    saver = tf.train.Saver()
    
    with tf.Session() as sess:
        init_op = tf.global_variables_initializer()
        sess.run(init_op)
        
        merged = tf.summary.merge_all()
        logdir = 'E:/auto_encoder_logdir'
        writer = tf.summary.FileWriter(logdir,sess.graph)
        
        X_train, X_test = standard_scale(mnist.train.images, mnist.test.images)
        n_samples = int(mnist.train.num_examples)        
        training_epochs = 150
        batch_size = 5000
        display_step = 20
        # training process

        for epoch in range(training_epochs):
            avg_cost = 0
            total_batch = int(n_samples/batch_size)
            for i in range(total_batch):
                batch_xs = get_random_block_from_data(X_train, batch_size)
                feed_dict_train = {model.x:batch_xs}
                _,cost = sess.run([model.train_op, model.cost],feed_dict=feed_dict_train)
                rs = sess.run(merged,feed_dict=feed_dict_train)
                writer.add_summary(rs, i)
                if i%display_step == 0:
                    print('Epoch:','%d'%(epoch+1), 'cost=','{:.9f}'.format(cost))

#            print('Total cost:'+str(autoencoder.calc_total_cost(X_test)))
#        saver.save(sess, 'adencoder/para_log')
        writer.close()
#        计算测试集上的cost
#    with tf.Session() as sess:
#        saver.restore(sess, 'adencoder/para_log')
#        pre = sess.run([model.pred], feed_dict={model.x:X_test})
#        result_train = sess.run(model.encoder_3_y,feed_dict={model.x:X_train})
        print('Total test coat:', str(sess.run(model.cost,feed_dict={model.x:X_test})))
        encoder_test = sess.run(model.encoder_3_y,feed_dict={model.x:X_test[:10]})
        decoder_test = sess.run(model.pred,feed_dict={model.x:X_test[:10]})
        f, a = plt.subplots(3, 10, figsize=(10, 3))
        for i in range(10):
            a[0][i].imshow(np.reshape(mnist.test.images[i], (28, 28)))
            a[1][i].imshow(np.reshape(encoder_test[i], (11, 11)))
            a[2][i].imshow(np.reshape(decoder_test[i], (28, 28)))
       