﻿import matplotlib.pyplot as plt
import tensorflow as tf
import scipy.misc
from tensorflow.examples.tutorials.mnist import input_data
import numpy as np
import argparse
from switch import switch
import os

'''
def statistic_zeros(b):

    l = sorted([(np.sum(b == i),i)for i in set(b.flat)])
    print('max times of element in b is {1} with {0} times'.format(*l[-1]))
'''

mnist = input_data.read_data_sets("/home/xyj/lyn/experiment",one_hot = True)

X = tf.placeholder(name = 'x_input',dtype = tf.float32,shape = [None,784])
Y = tf.placeholder(name = 'y_output',dtype = tf.float32,shape = [None,10])
learning_rate = tf.placeholder(name = 'lr',dtype = tf.float32,shape = [])
batch_size = tf.placeholder(name = 'batch_size',dtype = tf.float32,shape = [])
keep_prob = tf.placeholder(name = 'keep_prob',dtype = tf.float32,shape = [])
train_epoch = tf.placeholder(name = 'train_epoch',dtype = tf.float32,shape = [])

parses = argparse.ArgumentParser()
parses.add_argument('--gpu',default = '0')
parses.add_argument('--lr',default = '0.1')
parses.add_argument('--batch_size',type = int,default = 10000)
parses.add_argument('--keep_prob',default = '1')
parses.add_argument('--epoch_num',type = int,default = 10000)
parses.add_argument('--hide_capacity',type = int,default = 100)
parses.add_argument('--regularization_mode',choices = ['l1','l2'],default = None)
parses.add_argument('--af',choices = ['relu','tanh','sigmoid','leaky_relu'],default = 'relu')
parses.add_argument('--op',choices = ['Adam','GradientDescent','Momentum','RMSProp'],default = 'GradientDescent')

args = parses.parse_args()
os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu
learning_rate_value = float(args.lr)
batch_size_value = args.batch_size
dropout_prob = float(args.keep_prob)
op = args.op
eopch_num_value = args.epoch_num + 1
af = args.af
hide_capacity = args.hide_capacity
regularization_mode = args.regularization_mode


#W1 = tf.Variable(tf.zeros([784,100]),name = 'layer1_W')
#b1 = tf.Variable(tf.zeros([100]),name = 'layer1_b')

W1 = tf.Variable(tf.truncated_normal(shape = [784,hide_capacity],stddev = 1),name = 'layer1_W')
b1 = tf.Variable(tf.truncated_normal([hide_capacity],stddev = 1),name = 'layer1_b')

Y_out1 = tf.matmul(X,W1) + b1

for case in switch(af):

    if case('relu'):
        Y_out1_drop = tf.nn.dropout(tf.nn.relu(Y_out1),keep_prob  =  keep_prob)
        break
    if case('tanh'):
        Y_out1_drop = tf.nn.dropout(tf.nn.tanh(Y_out1),keep_prob  =  keep_prob)
        break
    if case('sigmoid'):
        Y_out1_drop = tf.nn.dropout(tf.nn.sigmoid(Y_out1),keep_prob  =  keep_prob)
        break
    if case('leaky_relu'):
        Y_out1_drop = tf.nn.dropout(tf.nn.leaky_relu(Y_out1),keep_prob  =  keep_prob)
        break

W2 = tf.Variable(tf.truncated_normal(shape = [hide_capacity,10],stddev = 1),name = 'layer2_W')
b2 = tf.Variable(tf.truncated_normal(shape = [10],stddev = 1),name = 'layer2_b')

Y_out2 = tf.matmul(Y_out1_drop,W2) + b2

if(regularization_mode == 'l2'):
    regularizer = tf.contrib.layers.l2_regularizer(0.001)
    regularization = regularizer(W1) + regularizer(W2)
    loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits = Y_out2,labels = Y) + regularization)
elif(regularization_mode == 'l1'):
    regularizer = tf.contrib.layers.l1_regularizer(0.001)
    regularization = regularizer(W1) + regularizer(W2)
    loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits = Y_out2,labels = Y) + regularization)

loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits = Y_out2,labels = Y))

for case in switch(op):

    if case('GradientDescent'):
        train_step = tf.train.GradientDescentOptimizer(learning_rate).minimize(loss)
        break
    if case('Adam'):
        train_step = tf.train.AdamOptimizer(learning_rate,0.9,0.99,1e-08).minimize(loss)
        break
    if case('RMSProp'):
        train_step = tf.train.RMSPropOptimizer(learning_rate,0.9,0.0005).minimize(loss)
        break
    if case('Momentum'):
        train_step = tf.train.MomentumOptimizer(learning_rate,0.9).minimize(loss)
        break


with tf.Session() as sess:

    print(mnist.validation.num_examples)
    
    init = tf.global_variables_initializer()
    sess.run(init)

#    relu_loss_list = []
#    relu_epoch_list = []

    batch_count = mnist.train.num_examples / batch_size_value 

    for epoch in range(eopch_num_value):
        
        for num in range(int(batch_count)):
            
            X_batch,Y_batch= mnist.train.next_batch(batch_size_value)
            sess.run(train_step,feed_dict = {X:X_batch,Y:Y_batch,keep_prob:dropout_prob,learning_rate:learning_rate_value,batch_size:batch_size_value,train_epoch:eopch_num_value})           
            print('epoch : %d  batch_num: %d loss : %g' %(epoch,num + 1,sess.run(loss,feed_dict = {X:X_batch,Y:Y_batch,keep_prob:dropout_prob,learning_rate:learning_rate_value,batch_size:batch_size_value,train_epoch:eopch_num_value})))
            if(num == int(batch_count) - 1):
                try:
                    assert(int(num + 1) * batch_size_value == mnist.train.num_examples)
                except AssertionError:
                    remain_examples =  mnist.train.num_examples - int(batch_count) * batch_size_value
                    X_batch,Y_batch = mnist.train.next_batch(remain_examples)
                    print('epoch : %d  batch_num: %d loss : %g' %(epoch,num + 2,sess.run(loss,feed_dict = {X:X_batch,Y:Y_batch,keep_prob:dropout_prob,learning_rate:learning_rate_value,batch_size:batch_size_value,train_epoch:eopch_num_value})))


        correct_prediction = tf.equal(tf.argmax(Y_out2,1),tf.argmax(Y,1))

        accuracy = tf.reduce_mean(tf.cast(correct_prediction,tf.float32))
    #    box_data = sess.run(W2,feed_dict= {X:X_batch,Y:Y_batch,keep_prob:1})
    #    statistic_zeros(box_data)
    #    plt.boxplot(box_data,sym = 'o',whis = 1.5)
    #    plt.show()
        print('The train accuracy is' + str(sess.run(accuracy,feed_dict = {X:mnist.train.images,Y:mnist.train.labels,keep_prob:dropout_prob,learning_rate:learning_rate_value,batch_size:batch_size_value,train_epoch:eopch_num_value})))
    print('The test accuracy is ' + str(sess.run(accuracy,feed_dict = {X:mnist.test.images,Y:mnist.test.labels,keep_prob:dropout_prob,learning_rate:learning_rate_value,batch_size:batch_size_value,train_epoch:eopch_num_value})))


