import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
import xlwt
import numpy as np

actFunction =[
    "tanh",
    "sigmoid",
    "relu",
    # "crelu",
    # "softplus"
]

initializeWay= [

    "gaussian distribution",
    "Xavier",
    "MSRA",
    # "zero",
]

lossWay = [
    "meanSquareError",
    "crossEntropy"
]

optimizerNames = [
    'Gradient',
    'Adam'
]

trainCount = [
    1000,
    2000,
    3000,
    4000
]

#激活函数,在TensorFlow中也内置了许多的激活函数，安装好TensorFlow之后可以在tensorflow-->python-->ops-->nn.py
def activationFunction(initalParam,name):
    if name=="tanh":
        return tf.tanh(initalParam)
    elif name == "sigmoid":
        return tf.sigmoid(initalParam)
    elif name =="relu":
        return tf.nn.relu(initalParam)
    elif name =="crelu":
        return tf.nn.crelu(initalParam)
    elif name=="softplus":
        return tf.nn.softplus(initalParam)

#初始化参考博客：https://blog.csdn.net/hai008007/article/details/79735491
def initialFunction(name,shape):
    if name=="zero":
        return tf.Variable(tf.zeros(shape))
    elif name == "gaussian distribution":
        # return tf.Variable(np.random)
        return tf.Variable(tf.random_normal(shape=shape,stddev=0.01))
    elif name =="Xavier":
        return tf.Variable(tf.random_normal(shape=shape,stddev=1/np.sqrt(shape[0])))
    elif name == "MSRA":
        return tf.Variable(tf.random_normal(shape=shape,stddev=2/np.sqrt(shape[0])))

def lossFunction(y,predict_y,lossName):
    if lossName ==  "meanSquareError":
        temp = tf.reduce_mean(tf.square(y-predict_y))
        # temp = tf.square(y-predict_y)
        return temp
    else:#交叉熵
        return -tf.reduce_sum(y*tf.log(predict_y))

def optimizerFunction(loss,name):
    if name == 'Gradient':
        return tf.train.GradientDescentOptimizer(0.01).minimize(loss)
    else:
        return tf.train.AdamOptimizer(0.01).minimize(loss)

mnist = input_data.read_data_sets("./data",one_hot=True)

train_x = tf.placeholder("float",[None,784])
train_y = tf.placeholder("float",[None,10])

# accuracys = []
accuracy = []
for initial in initializeWay:
    W1 = initialFunction(shape=[784,100],name=initial)
    b1 = initialFunction(shape=[100],name=initial)

    W2 = initialFunction(shape=[100, 10], name=initial)
    b2 = initialFunction(shape=[10], name=initial)


    # h1 = tf.tanh(tf.matmul(train_x, W1) + b1)

    for activateName in actFunction:
        print(activateName)
        h1 = activationFunction(tf.matmul(train_x, W1) + b1,activateName)
        predict_y = tf.nn.softmax(tf.matmul(h1, W2) + b2)

        # for lossName in lossWay:
        # loss = lossFunction(train_y,predict_y,"meanSquareError")
        loss = -tf.reduce_sum(train_y * tf.log(predict_y))
        # l1_loss = tf.reduce_mean(tf.abs(train_y-predict_y))
        for optimizerName in optimizerNames:
            train_step = optimizerFunction(loss, optimizerName)
            init = tf.initialize_all_variables()
            sess = tf.Session()
            sess.run(init)

            # theLoss = []
            trainCounts = 0
            for i in trainCount:

                trainCounts = i
                for j in range(i):
                    batch_x, batch_y = mnist.train.next_batch(100)
                    # theLoss.append(int(sess.run(loss, feed_dict={train_x: batch_x, train_y: batch_y})))#loss会出现NaN的情况
                    sess.run(train_step, feed_dict={train_x: batch_x, train_y: batch_y})
                correct_prediction = tf.equal(tf.argmax(train_y, 1), tf.argmax(predict_y, 1))
                theAccuracy = tf.reduce_mean(tf.cast(correct_prediction, 'float'))
                tempAccuracy = sess.run(theAccuracy, feed_dict={train_x: mnist.test.images, train_y: mnist.test.labels})
                # print("初始化:"+initial + "\n优化器：" + optimizerName +"\n训练step：" +  str(trainCounts)+"\n准确率：" +  str(tempAccuracy)+"\n激活函数："+activateName +"\n\n")
                accuracy.append([initial + " " + optimizerName +" " +  str(trainCounts) + " "+activateName, str(tempAccuracy.T)])


wb = xlwt.Workbook()
ws = wb.add_sheet("loss")
ws.write(0,0,"参数名称")
ws.write(0,1,"accuracy")
for i in range(0,len(accuracy)):
    print(accuracy[i][0] +" : "+ str(accuracy[i][1]))
    ws.write(i+1,0,accuracy[i][0])
    ws.write(i+1, 1, accuracy[i][1])

    # for j in range(0,len(accuracy[i][2])):
    #     ws.write(i,j+1,accuracy[i][2][j])

wb.save("twoLayer不同初始化和优化器对应的准确率.xls")