import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
import xlwt
import numpy as np

actFunction =[
    "tanh",
    "sigmoid",
    "relu",
    "crelu",
    "softplus"
]

initializeWay= [
    "zero",
    "gaussian distribution",
    "Xavier",
    "MSRA",
]

lossWay = [
    "meanSquareError",
    "crossEntropy"
]

optimizerNames = [
    'Gradient',
    'Adam'
]


#激活函数,在TensorFlow中也内置了许多的激活函数，安装好TensorFlow之后可以在tensorflow-->python-->ops-->nn.py
def activationFunction(initalParam,name):
    if name=="tanh":
        return tf.tan(initalParam)
    elif name == "sigmoid":
        return tf.sigmoid(initalParam)
    elif name =="relu":
        return tf.nn.relu(initalParam)
    elif name =="crelu":
        return tf.nn.crelu(initalParam)
    elif name=="softplus":
        return tf.nn.softplus(initalParam)
    else:
        return tf.zeros(initalParam)

#初始化参考博客：https://blog.csdn.net/hai008007/article/details/79735491
def initialFunction(name,shape):
    if name=="zero":
        return tf.Variable(tf.zeros(shape))
    elif name == "gaussian distribution":
        # return tf.Variable(np.random)
        return tf.Variable(tf.random_normal(shape=shape,stddev=0.01))
    elif name =="Xavier":
        return tf.Variable(tf.random_normal(shape=shape,stddev=1/np.sqrt(shape[0])))
    elif name == "MSRA":
        return tf.Variable(tf.random_normal(shape=shape,stddev=2/np.sqrt(shape[0])))

def lossFunction(y,predict_y,lossName):
    if lossName ==  "meanSquareError":
        temp = tf.reduce_mean(tf.square(y-predict_y))
        # temp = tf.square(y-predict_y)
        return temp
    else:#交叉熵
        return -tf.reduce_sum(y*tf.log(predict_y))

def optimizerFunction(loss,name):
    if name == 'Gradient':
        return tf.train.GradientDescentOptimizer(0.01).minimize(loss)
    else:
        return tf.train.AdamOptimizer(0.01).minimize(loss)

mnist = input_data.read_data_sets("./data",one_hot=True)

train_x = tf.placeholder("float",[None,784])
train_y = tf.placeholder("float",[None,10])

accuracyS = []



for initial in initializeWay:
    W1 = initialFunction(shape=[784,10],name=initial)
    b1 = initialFunction(shape=[10],name=initial)

    # for activateName in actFunction: 还未使用到激活函数这一步
    predict_y = tf.nn.softmax(tf.matmul(train_x,W1)+b1)

    # for lossName in lossWay:
    # loss = lossFunction(train_y,predict_y,"meanSquareError")
    loss = -tf.reduce_sum(train_y*tf.log(predict_y))
    # l1_loss = tf.reduce_mean(tf.abs(train_y-predict_y))

    for optimizerName in optimizerNames:
        train_step = optimizerFunction(loss,optimizerName)
        init = tf.initialize_all_variables()
        sess = tf.Session()
        sess.run(init)

        theLoss = []
        for i in range(1000):
            batch_x, batch_y = mnist.train.next_batch(100)
            theLoss.append(int(sess.run(loss,feed_dict={train_x:batch_x,train_y:batch_y})))
            sess.run(train_step,feed_dict={train_x:batch_x,train_y:batch_y})
        correct_prediction = tf.equal(tf.argmax(train_y,1),tf.argmax(predict_y,1))
        theAccuracy = tf.reduce_mean(tf.cast(correct_prediction,'float'))
        tempAccuracy = sess.run(theAccuracy,feed_dict={train_x:mnist.test.images,train_y:mnist.test.labels})
        accuracyS.append([initial+" "+optimizerName,float(tempAccuracy.T),theLoss])
wb = xlwt.Workbook()
ws = wb.add_sheet("loss")

for i in range(0,len(accuracyS)):
    ws.write(0,i ,accuracyS[i][0])
    ws.write(1,i, accuracyS[i][1])
    for j in range(0,len(accuracyS[i][2])):
        ws.write(j+2,i,accuracyS[i][2][j])

wb.save("oneLayer不同初始化和优化器对应的准确率.xls")