"""
3.	使用tensorflow2.0+keras接口，按照下面要求，完成VGG16网络模型cifar10图像分类（共30分）
"""
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers, activations, losses, optimizers, metrics, callbacks
import os
import numpy as np

BATCH_SIZE = 64
N_EPOCHS = 2
ALPHA = 0.001
VER = 'v1.0'
FILE_NAME = os.path.basename(__file__)
LOG_DIR = os.path.join('_log', FILE_NAME, VER)

# ①加载数据集，对数据进行合理的预处理
(x_train, y_train), (x_test, y_test) = keras.datasets.cifar10.load_data()
x_train = x_train.astype(np.float32) / 255.
x_test = x_test.astype(np.float32) / 255.
M_TRAIN, PIC_H, PIC_W, PIC_CH = x_train.shape
N_CLS = len(np.unique(y_train))
print('x_train', np.shape(x_train))
print('y_train', np.shape(y_train))
print('x_test', np.shape(x_test))
print('y_test', np.shape(y_test))
print('M_TRAIN, PIC_H, PIC_W, PIC_CH', M_TRAIN, PIC_H, PIC_W, PIC_CH)
print('N_CLS', N_CLS)

# ②创建卷积单元ConvCell，由卷积、BN层和Relu激活等组成
def ConvCell(filters, ksize=(3, 3), strides=(1, 1), padding='same'):
    return keras.Sequential([
        layers.Conv2D(filters, ksize, strides, padding),
        layers.BatchNormalization(),
        layers.ReLU()
    ])

def MaxPooling(psize=(3, 3), strides=(2, 2), padding='same'):
    return layers.MaxPooling2D(psize, strides, padding)

# ③创建VGG16模型，参考下图
inputs = keras.Input(shape=[PIC_H, PIC_W, PIC_CH])
x = ConvCell(64)(inputs)
x = ConvCell(64)(x)
x = MaxPooling()(x)
x = ConvCell(128)(x)
x = ConvCell(128)(x)
x = MaxPooling()(x)
x = ConvCell(256)(x)
x = ConvCell(256)(x)
x = MaxPooling()(x)
x = ConvCell(512)(x)
x = ConvCell(512)(x)
x = ConvCell(512)(x)
x = MaxPooling()(x)
x = ConvCell(512)(x)
x = ConvCell(512)(x)
x = ConvCell(512)(x)
x = MaxPooling()(x)
x = layers.Flatten()(x)
x = layers.Dense(128, activation=activations.relu)(x)
x = layers.Dense(64, activation=activations.relu)(x)
x = layers.Dense(N_CLS, activation=activations.softmax)(x)
model = keras.Model(inputs, x)

# ④打印VGG16网络输出维度列表
model.summary()

# ⑤使用训练集进行模型训练，使用测试集进行模型验证，参数自拟
model.compile(
    loss=losses.sparse_categorical_crossentropy,
    optimizer=optimizers.Adam(learning_rate=ALPHA),
    metrics=metrics.sparse_categorical_accuracy,
)
model.fit(x_train, y_train,
          batch_size=BATCH_SIZE, epochs=N_EPOCHS,
          validation_data=(x_test, y_test),
          validation_batch_size=BATCH_SIZE,
          callbacks=callbacks.TensorBoard(LOG_DIR, update_freq='batch', profile_batch=0),
          )

# ⑥计算测试集的损失值和准确率（准确率低于30%，本项不得分）
print('Testing...')
model.evaluate(x_test, y_test, batch_size=BATCH_SIZE)
