# -*- coding: utf-8 -*-
"""
Created on Tue Nov  7 13:43:49 2017

@author: xuanlei
"""

import numpy as np
import sklearn.preprocessing as prep
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
import matplotlib.pyplot as plt
# define xavier initialization function, mean distribution or gauss distribution
# mean = 0, var = 2/(n_in+n_out) 
def xavier_init(fan_in, fan_out, constant=1):
    """xavier initialization function
    fan_in: input node number
    fan_out: output node number"""
    low = -constant*np.sqrt(6/(fan_in+fan_out))
    high = constant*np.sqrt(6/(fan_in+fan_out))
    return tf.random_uniform((fan_in, fan_out), minval=low, maxval=high, dtype=tf.float32)

# define standard scale fucntion
def standard_scale(X_train, X_test):
    preprocessor = prep.StandardScaler().fit(X_train)
    X_train = preprocessor.transform(X_train)
    X_test = preprocessor.transform(X_test)
    return X_train, X_test

# define get random block function
def get_random_block_from_data(data, batch_size):
    start_index = np.random.randint(0, len(data)-batch_size)
    return data[start_index:(start_index+batch_size)]

class AdditiveGaussianNoiseAutoencoder(object):   
    # define construct function
    def __init__(self, input_size, h1_size,h2_size,h3_size,noise_level=0.2,transfer_function=tf.nn.softplus,LR = 0.001):
        self.input_size = input_size
        self.h1_size = h1_size
        self.h2_size = h2_size
        self.h3_size = h3_size
        self.transfer = transfer_function
        self.LR = LR
        self.noise_level = noise_level
       # define auto-encoder net structure
        with tf.name_scope('input_layer'):
            self.input_layer()

        with tf.name_scope('encoder_layer_1'):
            self.encoder_layer_1()
        
        with tf.name_scope('encoder_layer_2'):
            self.encoder_layer_2()
            
        with tf.name_scope('decoder_layer_1'):
            self.decoder_layer_1()        

        with tf.name_scope('output_layer'):
            self.output_layer()

        with tf.name_scope('cost'):
            self.compute_cost()

        with tf.name_scope('train'):
            self.train_op = tf.train.AdamOptimizer(learning_rate=self.LR).minimize(self.cost)


    def input_layer(self):
         with tf.name_scope('input_layer'):
             self.x = tf.placeholder(tf.float32, [None, self.input_size])
             l_in_x = tf.reshape(self.x,[-1,self.input_size], name='x_input')
             self.l_in_y = l_in_x
             self.noo = l_in_x+self.noise_level*tf.random_normal((self.input_size,))
#             image1 = tf.image.decode_png(self.x, channels=1)
#             image1 = tf.expand_dims(image1, 0)
#             tf.summary.image('x_input',image1)
                        
             
    def encoder_layer_1(self):
        with tf.name_scope('encoder_layer_1'):
            h1_x = tf.reshape(self.l_in_y, [-1,self.input_size])
            Ws_h1 = tf.Variable(xavier_init(self.input_size, self.h1_size),name = "en_W1")
            bs_h1 = tf.Variable(tf.zeros([self.h1_size,]),dtype=tf.float32,name = 'en_bis1')
            self.no = h1_x+self.noise_level*tf.random_normal((self.input_size,))
            non_bn_h1 = self.transfer(tf.matmul(self.no,Ws_h1)+bs_h1)
            '''
            batch_norm_layer
            '''
            self.h1_y = non_bn_h1
            
    def encoder_layer_2(self):
        with tf.name_scope('encoder_layer_2'):
            h2_x = tf.reshape(self.h1_y, [-1,self.h1_size])
            Ws_h2 = tf.Variable(xavier_init(self.h1_size, self.h2_size),name = "en_W2")
            bs_h2 = tf.Variable(tf.zeros([self.h2_size,]),dtype=tf.float32,name = 'en_bis2')
            non_bn_h2 = self.transfer(tf.matmul(h2_x,Ws_h2)+bs_h2)
            '''
            batch_norm_layer
            '''
            self.h2_y = non_bn_h2
    
    def decoder_layer_1(self):
        with tf.name_scope('decoder_layer_1'):
            h1_x_de = tf.reshape(self.h2_y, [-1,self.h2_size])
            Ws_h1_de = tf.Variable(xavier_init(self.h2_size, self.h3_size),name = "de_W1")
            bs_h1_de = tf.Variable(tf.zeros([self.h3_size,]),dtype=tf.float32,name = 'de_bis1')
            non_bn_h1_de = self.transfer(tf.matmul(h1_x_de,Ws_h1_de)+bs_h1_de)
            '''
            batch_norm_layer
            '''
            self.h3_y = non_bn_h1_de
            
    def output_layer(self):
        with tf.name_scope('output_layer'):
            l_out_x = tf.reshape(self.h3_y,[-1,self.h3_size],name = 'y_input')
            Ws_out = tf.Variable(xavier_init(self.h3_size, self.input_size),name = "out_w")
            bs_out = tf.Variable(tf.zeros([self.input_size,]),dtype=tf.float32,name = 'out_b')
#            self.pred = tf.nn.tanh(tf.matmul(l_out_x,Ws_out)+bs_out)
            self.pred = tf.matmul(l_out_x,Ws_out)+bs_out
            tf.summary.histogram('w', Ws_out)
            tf.summary.histogram('b', bs_out)
            tf.summary.histogram('out', self.pred)
            
    def compute_cost(self):
        with tf.name_scope('cost'):
#            self.cost = tf.sqrt(tf.reduce_mean(tf.square(tf.subtract(self.pred, self.x))))
            self.cost = 0.5*tf.reduce_mean(tf.pow(tf.subtract(self.pred, self.x),2.0))
            tf.summary.scalar('result_cost', self.cost)
            

 

            


if __name__  == '__main__':
#    mnist = input_data.read_data_sets('MNIST_DATA', one_hot=True)
    tf.reset_default_graph()
    model = AdditiveGaussianNoiseAutoencoder(770,320,128,320,LR=0.001)
    saver = tf.train.Saver()
    
    with tf.Session() as sess:
        init_op = tf.global_variables_initializer()
        sess.run(init_op)
        
        merged = tf.summary.merge_all()
        logdir = 'E:/c_coder_logdir'
        writer = tf.summary.FileWriter(logdir,sess.graph)
    
        X_train, X_test = standard_scale(xr, xr)
        n_samples = int(X_train.shape[0])        
        training_epochs = 50
        batch_size = 10000
        display_step = 80
        # training process

        for epoch in range(training_epochs):
            avg_cost = 0
            total_batch = int(n_samples/batch_size)
            for i in range(total_batch):
                batch_xs = get_random_block_from_data(X_train, batch_size)
                feed_dict_train = {model.x:batch_xs}
                _,cost = sess.run([model.train_op, model.cost],feed_dict=feed_dict_train)
                rs = sess.run(merged,feed_dict=feed_dict_train)
                writer.add_summary(rs, i)
                if i%display_step == 0:
                    print('Epoch:','%d'%(epoch+1), 'cost=','{:.9f}'.format(cost))

#            print('Total cost:'+str(autoencoder.calc_total_cost(X_test)))
#        saver.save(sess, 'adencoder/para_log')
#        writer.close()
#        计算测试集上的cost
#    with tf.Session() as sess:
        saver.save(sess, 'adencoder/para_log')
#        pre = sess.run([model.pred], feed_dict={model.x:X_test})
        print('Total test coat:', str(sess.run(model.cost,feed_dict={model.x:X_test})))
        noise_test = sess.run(model.noo,feed_dict={model.x:X_test[:10]})
        encoder_test = sess.run(model.h2_y,feed_dict={model.x:X_test[:10]})
        decoder_test = sess.run(model.pred,feed_dict={model.x:X_test[:10]})
        f, a = plt.subplots(4, 10, figsize=(10, 4))
        for i in range(10):
            a[0][i].imshow(np.reshape(X_test[i], (10, 77)))
            a[1][i].imshow(np.reshape(noise_test[i], (10, 77)))
            a[2][i].imshow(np.reshape(encoder_test[i], (12, 12)))
            a[3][i].imshow(np.reshape(decoder_test[i], (10, 77)))
        result_train2 = sess.run(model.h2_y,feed_dict={model.x:X_train})
#        result_test2 = sess.run(model.h2_y,feed_dict={model.x:X_test})