import tensorflow as tf
import keras
import numpy as np
from keras.datasets import mnist

# 定义Vgg网络中的卷积块数，卷积层的个数，卷积层中卷积核的个数
def vgg_block(num_conv, num_filters):
    blk = keras.models.Sequential()
    # 遍历卷积层
    for i in range(num_conv):
        blk.add(keras.layers.Conv2D(num_filters, (3, 3), padding='same', activation='relu'))
    # 池化层
    blk.add(keras.layers.MaxPool2D(pool_size=(2, 2), strides=2))
    return blk


def vgg(conv_arch):
    # 构建序列模型
    net = keras.models.Sequential()
    for num_convs, num_filters in conv_arch:
        # 卷积层
        net.add(vgg_block(num_convs, num_filters))
    # 全连接层
    net.add(keras.models.Sequential(
        [keras.layers.Flatten(),
         keras.layers.Dense(4096, activation='relu'),
         keras.layers.Dropout(0.5),
         keras.layers.Dense(4096, activation='relu'),
         keras.layers.Dropout(0.5),
         keras.layers.Dense(10, activation='softmax')
         ]
    ))
    return net


conv_arch = ((2, 64), (2, 128), (3, 256), (3, 512), (3, 512))
net = vgg(conv_arch)
x = tf.random.normal([1, 224, 224, 1])
y = net(x)
print(net.summary())

# 获取手写数字数据集
(train_images, train_labels), (test_images, test_labels) = mnist.load_data()
# 训练集数据维度的调整：N H W C
train_images = np.reshape(train_images, (train_images.shape[0], train_images.shape[1], train_images.shape[2], 1))
# 测试集数据维度的调整：N H W C
test_images = np.reshape(test_images, (test_images.shape[0], test_images.shape[1], test_images.shape[2], 1))


# 定义两个方法随机抽取部分样本演示
# 获取训练集数据
def get_train(size):
    # 随机生成要抽样的样本的索引
    index = np.random.randint(0, np.shape(train_images)[0], size)
    # 将这些数据resize成22*227大小
    resized_images = tf.image.resize_with_pad(train_images[index], 224, 224, )
    # 返回抽取的
    return resized_images.numpy(), train_labels[index]


# 获取测试集数据
def get_test(size):
    # 随机生成要抽样的样本的索引
    index = np.random.randint(0, np.shape(test_images)[0], size)
    # 将这些数据resize成224*224大小
    resized_images = tf.image.resize_with_pad(test_images[index], 224, 224, )
    # 返回抽样的测试样本
    return resized_images.numpy(), test_labels[index]


# 获取训练样本和测试样本
train_images, train_labels = get_train(128)
test_images, test_labels = get_test(128)
# 指定优化器，损失函数和评价指标
net.compile(optimizer=keras.optimizers.SGD(learning_rate=0.01),
            loss="sparse_categorical_crossentropy",
            metrics=['accuracy'])
# 模型训练：指定训练数据，batchsize,epoch,验证集
net.fit(train_images, train_labels, batch_size=128, epochs=3, verbose=1, validation_split=0.1)
# 指定测试数据
net.evaluate(test_images, test_labels, verbose=1)
