import matplotlib as mpl
import matplotlib.pyplot as plt
import numpy as np
import sklearn
import pandas as pd
import os
import sys
import time
import tensorflow as tf
from tensorflow import keras

print(tf.__version__)
print(sys.version_info)
for module in mpl, np, pd, tf, keras:
    print(module.__name__,module.__version__)

dir = r'E:\testDir\ml\mnist'
if not os.path.exists(dir):
    os.mkdir(dir)
(x_train, y_train), (x_test, y_test) = keras.datasets.mnist.load_data(r'E:\testDir\ml\mnist\mnist.npz')
print(x_train.shape,x_test.shape)
# 生成验证数据集
x_valid = x_train[0:5000]
y_valid = y_train[0:5000]
# 更新训练数据集
x_train = x_train[5000:]
y_train = y_train[5000:]

# 做归一化
from sklearn.preprocessing import StandardScaler
scaler = StandardScaler()
x_train_scaled = scaler.fit_transform(x_train.astype(np.float32).reshape(-1,1)).reshape(-1,28,28,1)
x_valid_scaled = scaler.transform(x_valid.astype(np.float32).reshape(-1,1)).reshape(-1,28,28,1)
x_test_scaled = scaler.transform(x_test.astype(np.float32).reshape(-1,1)).reshape(-1,28,28,1)


def make_dataset(datas,labels,batch_size,shuffle=False,buffer_size=10000):
    dataset = tf.data.Dataset.from_tensor_slices((datas, labels))
    if shuffle :
        dataset.shuffle(buffer_size=buffer_size)
    dataset = dataset.repeat().batch(batch_size)
    return dataset



model = tf.keras.Sequential()

model.add(keras.layers.Conv2D(filters=32, kernel_size=3, padding='same', activation='selu',input_shape=(28,28,1)))
model.add(keras.layers.Conv2D(filters=32, kernel_size=3, padding='same', activation='selu'))
model.add(keras.layers.MaxPool2D(pool_size=2))

model.add(keras.layers.Conv2D(filters=64, kernel_size=3, padding='same', activation='selu'))
model.add(keras.layers.Conv2D(filters=64, kernel_size=3, padding='same', activation='selu'))
model.add(keras.layers.MaxPool2D(pool_size=2))

model.add(keras.layers.Conv2D(filters=128, kernel_size=3, padding='same', activation='selu'))
model.add(keras.layers.Conv2D(filters=128, kernel_size=3, padding='same', activation='selu'))
model.add(keras.layers.MaxPool2D(pool_size=2))


model.add(tf.keras.layers.Flatten())
model.add(tf.keras.layers.Dense(128,activation='selu'))
model.add(tf.keras.layers.Dense(10,activation='softmax'))

model.compile(optimizer="sgd",loss="sparse_categorical_crossentropy",metrics=['accuracy'])
batch_size = 32
train_dataset = make_dataset(x_train_scaled,y_train,batch_size,shuffle=True)
valid_dataset = make_dataset(x_valid_scaled,y_valid,batch_size)
test_dataset = make_dataset(x_test_scaled,y_test,batch_size)

train_steps_per_epoch = x_train_scaled.shape[0]//batch_size
valid_steps_per_epoch = x_valid_scaled.shape[0]//batch_size
test_steps_per_epoch = x_test_scaled.shape[0]//batch_size

logdir = os.path.join(dir,'cnn-selu-callbacks')
if not os.path.exists(logdir):
    os.mkdir(logdir)
model.load_weights(os.path.join(logdir,"mnist_weights_2.h5"))
# model.evaluate(test_dataset,steps=test_steps_per_epoch)
for data,label in test_dataset.take(1):
    print(data.shape,label.shape)
    predict = model.predict(data)
    print(tf.argmax(predict,1))
    print(label)




