from keras.datasets import mnist
from keras import backend,initializers
from keras.utils import np_utils
from keras.models import Sequential
from keras.layers import Dense,Dropout,Activation,Flatten,Convolution2D,MaxPool2D
from keras import regularizers



'''
1、读数据
2、构造卷积神经网络
3、调整参数【a、每个参数每次选择最佳的。b、一次全调。】

参考的代码的疑问：1、第二层没有指定inpt_shape   2、未设定stride

可以调整的参数：
激活函数 
正则化 (正则化因子 )
权重初始化 
卷积 
池化 (kernel的大小、个数，stride）
学习率 
批处理数据大小
'''


#定义全局变量,GRIDSEARCH，选择最优
activationS = [
    "tanh",
    "sigmoid",
    "selu",
    "relu",
    "softplus",

               ]
regularizerS=[
    [regularizers.l1(0.0001), 'l1-0.0001'],
    [regularizers.l1(0), 'l1-0'],#不施加正则项
    [ regularizers.l1_l2(0.0001,0.0001),'l1_l2-0.0001,0.0001'],
    [ regularizers.l2(0.0001),'l2-0.0001'],
]

initializerS = [
    [initializers.VarianceScaling(scale=1.0, mode='fan_in', distribution='normal', seed=None),'VarianceScaling'],
    [initializers.TruncatedNormal(mean=0.0, stddev=0.05, seed=None), 'TruncatedNormal'],
    [initializers.RandomUniform(minval=-0.05, maxval=0.05, seed=None),'randomUniform']
]

kernelSizeS = [3,4,5,2]
batchSizeS = [128,256,64]
epochsS = [10,20,30] # [20,30,40],在实验的过程中，发现大多数模型在epoch=10的时候差不多就收敛了，在20和30的时候提高的不多
filterS = [32,64,16]
poolSizeS = [2]
strideS = [2,3]# [2,3,4] 发现当stride=4的时候，准确率极差
img_rows, img_cols = 28,28
paddingS = ['valid','same']
# nb_filter = 32#kernel的个数
# pool_size= (2,2)
# kernel_size = (3,3)


(X_train,Y_train),(X_test,Y_test) = mnist.load_data()
#将图片格式配置成（channel,height,width)
backend.set_image_dim_ordering('th')
X_train = X_train.reshape(X_train.shape[0],1,28,28)
X_test = X_test.reshape(X_test.shape[0],1,28,28)
input_shape = (1,img_rows,img_cols)

#将训练集和测试集归一化
X_train = X_train.astype('float32') / 255
X_test = X_test.astype('float32')   / 255

#将训练集和测试集结果进行one_hot
Y_train = np_utils.to_categorical(Y_train,10)
Y_test = np_utils.to_categorical(Y_test,10)



for activation in activationS:
    for regularizer in regularizerS:
        for initializer in initializerS:
            for kernelSize in kernelSizeS:
                for poolSize in poolSizeS:#用于池化
                    for batchSize in batchSizeS:#用于训练模型
                        for filter in filterS:
                            for stride in strideS:
                               if stride == 3  and padding == 'valid':#多次实验证明，当stride=3时，valid的结果不好，所以跳过。
                                    break
                               for padding in paddingS:
                                   for epochs in epochsS:
                                       # 创建模型:两层卷积，两层全连接
                                       model = Sequential()
                                       model.add(Convolution2D(input_shape=input_shape,filters=filter, kernel_size=kernelSize,strides=stride, padding=padding,activation=activation,kernel_regularizer=regularizer[0],kernel_initializer=initializer[0]))
                                       model.add(Convolution2D(filters=filter, kernel_size=kernelSize,strides=stride, padding=padding,activation=activation,kernel_regularizer=regularizer[0],kernel_initializer=initializer[0]))
                                       model.add(MaxPool2D(pool_size=poolSize))
                                       model.add(Flatten())
                                       model.add(Dense(128,activation=activation,kernel_regularizer=regularizer[0]))
                                       model.add(Dense(10,activation='softmax'))
                                       # 编译模型
                                       model.compile(
                                           loss='categorical_crossentropy',
                                           optimizer='adadelta',
                                           metrics=['accuracy']
                                       )

                                       # 训练模型
                                       model.fit(X_train, Y_train, batch_size=batchSize, epochs=epochs, verbose=1,
                                                 validation_data=[X_test, Y_test])

                                       # 评估模型
                                       score = model.evaluate(X_test, Y_test, verbose=0)

                                       print("损失函数值：", score[0])
                                       print("准  确  率：", score[1])
                                       document = open("statistics.txt", "a+")
                                       document.write("激活函数为："+str(activation)+"  ")
                                       document.write("正则函数为："+str(regularizer[1])+"  ")
                                       document.write("初始化方式为："+str(initializer[1])+"  ")
                                       document.write("kernel大小为："+str(kernelSize)+"  ")
                                       document.write("kernel个数为："+str(filter)+"  ")
                                       document.write("stride为："+str(stride)+"  ")
                                       document.write("池化大小为："+str(poolSize)+"  ")
                                       document.write("卷积的方式为："+str(padding)+"  ")
                                       document.write("batch大小为："+str(batchSize)+"  ")
                                       document.write("epoch为："+str(epochs)+"  ")
                                       document.write("损失函数值： %.4f  准确率：%.4f \n" % (score[0], score[1]))
                                       document.close()






