text stringlengths 0 4.99k |
|---|
# Initialize `RandAugment` object with 2 layers of |
# augmentation transforms and strength of 9. |
augmenter = RandAugment(num_layers=2, magnitude=9) |
For training the teacher model, we will only be using two geometric augmentation transforms: random horizontal flip and random crop. |
def preprocess_train(image, label, noisy=True): |
image = tf.image.random_flip_left_right(image) |
# We first resize the original image to a larger dimension |
# and then we take random crops from it. |
image = tf.image.resize(image, [RESIZE_TO, RESIZE_TO]) |
image = tf.image.random_crop(image, [CROP_TO, CROP_TO, 3]) |
if noisy: |
image = augmenter.distort(image) |
return image, label |
def preprocess_test(image, label): |
image = tf.image.resize(image, [CROP_TO, CROP_TO]) |
return image, label |
train_ds = tf.data.Dataset.from_tensor_slices((new_train_x, new_y_train)) |
validation_ds = tf.data.Dataset.from_tensor_slices((val_x, val_y)) |
test_ds = tf.data.Dataset.from_tensor_slices((x_test, y_test)) |
We make sure train_clean_ds and train_noisy_ds are shuffled using the same seed to ensure their orders are exactly the same. This will be helpful during training the student model. |
# This dataset will be used to train the first model. |
train_clean_ds = ( |
train_ds.shuffle(BATCH_SIZE * 10, seed=42) |
.map(lambda x, y: (preprocess_train(x, y, noisy=False)), num_parallel_calls=AUTO) |
.batch(BATCH_SIZE) |
.prefetch(AUTO) |
) |
# This prepares the `Dataset` object to use RandAugment. |
train_noisy_ds = ( |
train_ds.shuffle(BATCH_SIZE * 10, seed=42) |
.map(preprocess_train, num_parallel_calls=AUTO) |
.batch(BATCH_SIZE) |
.prefetch(AUTO) |
) |
validation_ds = ( |
validation_ds.map(preprocess_test, num_parallel_calls=AUTO) |
.batch(BATCH_SIZE) |
.prefetch(AUTO) |
) |
test_ds = ( |
test_ds.map(preprocess_test, num_parallel_calls=AUTO) |
.batch(BATCH_SIZE) |
.prefetch(AUTO) |
) |
# This dataset will be used to train the second model. |
consistency_training_ds = tf.data.Dataset.zip((train_clean_ds, train_noisy_ds)) |
Visualize the datasets |
sample_images, sample_labels = next(iter(train_clean_ds)) |
plt.figure(figsize=(10, 10)) |
for i, image in enumerate(sample_images[:9]): |
ax = plt.subplot(3, 3, i + 1) |
plt.imshow(image.numpy().astype(\"int\")) |
plt.axis(\"off\") |
sample_images, sample_labels = next(iter(train_noisy_ds)) |
plt.figure(figsize=(10, 10)) |
for i, image in enumerate(sample_images[:9]): |
ax = plt.subplot(3, 3, i + 1) |
plt.imshow(image.numpy().astype(\"int\")) |
plt.axis(\"off\") |
png |
png |
Define a model building utility function |
We now define our model building utility. Our model is based on the ResNet50V2 architecture. |
def get_training_model(num_classes=10): |
resnet50_v2 = tf.keras.applications.ResNet50V2( |
weights=None, include_top=False, input_shape=(CROP_TO, CROP_TO, 3), |
) |
model = tf.keras.Sequential( |
[ |
layers.Input((CROP_TO, CROP_TO, 3)), |
layers.Rescaling(scale=1.0 / 127.5, offset=-1), |
resnet50_v2, |
layers.GlobalAveragePooling2D(), |
layers.Dense(num_classes), |
] |
) |
return model |
In the interest of reproducibility, we serialize the initial random weights of the teacher network. |
initial_teacher_model = get_training_model() |
initial_teacher_model.save_weights(\"initial_teacher_model.h5\") |
Train the teacher model |
As noted in Noisy Student Training, if the teacher model is trained with geometric ensembling and when the student model is forced to mimic that, it leads to better performance. The original work uses Stochastic Depth and Dropout to bring in the ensembling part but for this example, we will use Stochastic Weight Averag... |
# Define the callbacks. |
reduce_lr = tf.keras.callbacks.ReduceLROnPlateau(patience=3) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.