# 导入tensorflow、tensorflow_datasets、layers模块包
import tensorflow as tf
import tensorflow_datasets as tfds
from tensorflow.keras import layers

# 使用load()函数加载我们需要的tf_flowers数据集
# 并对其进行训练集、测试集、验证集的划分
(train_ds, val_ds, test_ds), metadata = tfds.load(
    'tf_flowers',
    split=['train[:80%]', 'train[80%:90%]', 'train[90%:]'],
    with_info=True,
    as_supervised=True,
)

# 通过返回的元数据获得数据集的类别大小
num_classes = metadata.features['label'].num_classes
print(num_classes)

# 设置数据集训练时图片提取数量以及图片重置大小尺寸
batch_size = 64
IMG_SIZE = 32


# 定义调整大小和重新缩放函数：
# 该函数对图片数据进行转化同时重置图片大小
# 返回图片数据和特征分类标签
def resize_and_rescale(image, label):
    image = tf.cast(image, tf.float32)
    image = tf.image.resize(image, [IMG_SIZE, IMG_SIZE])
    return image, label

# 定义自动调整的常量
AUTOTUNE = tf.data.AUTOTUNE
# 分别对数据集进行打乱、调整大小和重新缩放函数、分批和预取
train_ds = (
    train_ds
        .shuffle(1000)
        .map(lambda x, y: (resize_and_rescale(x, y)))
        .batch(batch_size)
        .prefetch(AUTOTUNE)
)
val_ds = (
    val_ds
        .map(lambda x, y: (resize_and_rescale(x, y)))
        .batch(batch_size)
        .prefetch(AUTOTUNE)

)
test_ds = (
    test_ds
        .map(lambda x, y: (resize_and_rescale(x, y)))
        .batch(batch_size)
        .prefetch(AUTOTUNE)

)

# 创建一个“顺序”模型实例：
# 三个卷积三个池化，以及两个dense层
model = tf.keras.Sequential([
    layers.Conv2D(16, 3, padding='same', activation='relu'),
    layers.MaxPooling2D(),
    layers.Conv2D(32, 3, padding='same', activation='relu'),
    layers.MaxPooling2D(),
    layers.Conv2D(64, 3, padding='same', activation='relu'),
    layers.MaxPooling2D(),
    layers.Flatten(),
    layers.Dense(128, activation='relu'),
    layers.Dense(num_classes)
])

# 编译CNN模型实例
model.compile(optimizer='adam',
              loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),
              metrics=['accuracy'])

# 设置训练迭代次数、验证集
epochs = 10
history = model.fit(
    train_ds,
    validation_data=val_ds,
    epochs=epochs
)

# 对最终模型进行测试评估
acc = model.evaluate(test_ds)
print("Accuracy", acc)
