text stringlengths 0 4.99k |
|---|
def display(array1, array2): |
\"\"\" |
Displays ten random images from each one of the supplied arrays. |
\"\"\" |
n = 10 |
indices = np.random.randint(len(array1), size=n) |
images1 = array1[indices, :] |
images2 = array2[indices, :] |
plt.figure(figsize=(20, 4)) |
for i, (image1, image2) in enumerate(zip(images1, images2)): |
ax = plt.subplot(2, n, i + 1) |
plt.imshow(image1.reshape(28, 28)) |
plt.gray() |
ax.get_xaxis().set_visible(False) |
ax.get_yaxis().set_visible(False) |
ax = plt.subplot(2, n, i + 1 + n) |
plt.imshow(image2.reshape(28, 28)) |
plt.gray() |
ax.get_xaxis().set_visible(False) |
ax.get_yaxis().set_visible(False) |
plt.show() |
Prepare the data |
# Since we only need images from the dataset to encode and decode, we |
# won't use the labels. |
(train_data, _), (test_data, _) = mnist.load_data() |
# Normalize and reshape the data |
train_data = preprocess(train_data) |
test_data = preprocess(test_data) |
# Create a copy of the data with added noise |
noisy_train_data = noise(train_data) |
noisy_test_data = noise(test_data) |
# Display the train data and a version of it with added noise |
display(train_data, noisy_train_data) |
png |
Build the autoencoder |
We are going to use the Functional API to build our convolutional autoencoder. |
input = layers.Input(shape=(28, 28, 1)) |
# Encoder |
x = layers.Conv2D(32, (3, 3), activation=\"relu\", padding=\"same\")(input) |
x = layers.MaxPooling2D((2, 2), padding=\"same\")(x) |
x = layers.Conv2D(32, (3, 3), activation=\"relu\", padding=\"same\")(x) |
x = layers.MaxPooling2D((2, 2), padding=\"same\")(x) |
# Decoder |
x = layers.Conv2DTranspose(32, (3, 3), strides=2, activation=\"relu\", padding=\"same\")(x) |
x = layers.Conv2DTranspose(32, (3, 3), strides=2, activation=\"relu\", padding=\"same\")(x) |
x = layers.Conv2D(1, (3, 3), activation=\"sigmoid\", padding=\"same\")(x) |
# Autoencoder |
autoencoder = Model(input, x) |
autoencoder.compile(optimizer=\"adam\", loss=\"binary_crossentropy\") |
autoencoder.summary() |
Model: \"model\" |
_________________________________________________________________ |
Layer (type) Output Shape Param # |
================================================================= |
input_1 (InputLayer) [(None, 28, 28, 1)] 0 |
_________________________________________________________________ |
conv2d (Conv2D) (None, 28, 28, 32) 320 |
_________________________________________________________________ |
max_pooling2d (MaxPooling2D) (None, 14, 14, 32) 0 |
_________________________________________________________________ |
conv2d_1 (Conv2D) (None, 14, 14, 32) 9248 |
_________________________________________________________________ |
max_pooling2d_1 (MaxPooling2 (None, 7, 7, 32) 0 |
_________________________________________________________________ |
conv2d_transpose (Conv2DTran (None, 14, 14, 32) 9248 |
_________________________________________________________________ |
conv2d_transpose_1 (Conv2DTr (None, 28, 28, 32) 9248 |
_________________________________________________________________ |
conv2d_2 (Conv2D) (None, 28, 28, 1) 289 |
================================================================= |
Total params: 28,353 |
Trainable params: 28,353 |
Non-trainable params: 0 |
_________________________________________________________________ |
Now we can train our autoencoder using train_data as both our input data and target. Notice we are setting up the validation data using the same format. |
autoencoder.fit( |
x=train_data, |
y=train_data, |
epochs=50, |
batch_size=128, |
shuffle=True, |
validation_data=(test_data, test_data), |
) |
Epoch 1/50 |
469/469 [==============================] - 20s 43ms/step - loss: 0.1354 - val_loss: 0.0735 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.