import tensorflow
from tensorflow import keras
from sklearn.preprocessing import StandardScaler
import numpy as np
from self_mudle import plot_learning_curves

(train_x, train_y), (test_x, test_y)  = keras.datasets.fashion_mnist.load_data()
valid_x, valid_y = train_x[:5000], train_y[:5000]
train_x, train_y = train_x[5000:], train_y[5000:]

scaler = StandardScaler()
train_x = scaler.fit_transform(train_x.astype(np.float32).reshape(-1,1)).reshape(-1,28,28)
valid_x = scaler.transform(valid_x.astype(np.float32).reshape(-1,1)).reshape(-1,28,28)
test_x = scaler.transform(test_x.astype(np.float32).reshape(-1,1)).reshape(-1,28,28)

model = keras.Sequential()
model.add(keras.layers.Flatten())
for _ in range(20):
    model.add(keras.layers.Dense(100,activation = 'relu'))
    model.add(keras.layers.BatchNormalization())  
    # 归一化:数据进入网络之前归一化 批归一化:网络层输出的数据再输入下一层之前归一化
    # 深度网络存在前期学习曲线非常平缓的情况 原因：1.参数众多 难以训练  2.网络过深 存在梯度消失
    # 归一化可以保证数据的分布不发生变化，计算梯度时更加准确，缓解梯度消失，从而缓解前期训练不动的情况
    # 批归一化加在激活函数之前还是之后存在争论 如果加在后面 可以单独加一层keras.layers.Activation('relu')
model.add(keras.layers.Dense(10,activation='softmax'))

model.build(input_shape=[None,28,28])
model.summary()

model.compile(  
    optimizer = 'sgd',
    loss = 'sparse_categorical_crossentropy',  
    metrics = ['accuracy'] 
)

history = model.fit(train_x,train_y,epochs=3,validation_data=(valid_x,valid_y))

predict = model.evaluate(test_x, test_y)  # 78%

plot_learning_curves.plot_learning_curves(history)