text
stringlengths
0
4.99k
early_stopping = tf.keras.callbacks.EarlyStopping(
patience=10, restore_best_weights=True
)
# Initialize SWA from tf-hub.
SWA = tfa.optimizers.SWA
# Compile and train the teacher model.
teacher_model = get_training_model()
teacher_model.load_weights(\"initial_teacher_model.h5\")
teacher_model.compile(
# Notice that we are wrapping our optimizer within SWA
optimizer=SWA(tf.keras.optimizers.Adam()),
loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),
metrics=[\"accuracy\"],
)
history = teacher_model.fit(
train_clean_ds,
epochs=EPOCHS,
validation_data=validation_ds,
callbacks=[reduce_lr, early_stopping],
)
# Evaluate the teacher model on the test set.
_, acc = teacher_model.evaluate(test_ds, verbose=0)
print(f\"Test accuracy: {acc*100}%\")
Epoch 1/5
387/387 [==============================] - 73s 78ms/step - loss: 1.7785 - accuracy: 0.3582 - val_loss: 2.0589 - val_accuracy: 0.3920
Epoch 2/5
387/387 [==============================] - 28s 71ms/step - loss: 1.2493 - accuracy: 0.5542 - val_loss: 1.4228 - val_accuracy: 0.5380
Epoch 3/5
387/387 [==============================] - 28s 73ms/step - loss: 1.0294 - accuracy: 0.6350 - val_loss: 1.4422 - val_accuracy: 0.5900
Epoch 4/5
387/387 [==============================] - 28s 73ms/step - loss: 0.8954 - accuracy: 0.6864 - val_loss: 1.2189 - val_accuracy: 0.6520
Epoch 5/5
387/387 [==============================] - 28s 73ms/step - loss: 0.7879 - accuracy: 0.7231 - val_loss: 0.9790 - val_accuracy: 0.6500
Test accuracy: 65.83999991416931%
Define a self-training utility
For this part, we will borrow the Distiller class from this Keras Example.
# Majority of the code is taken from:
# https://keras.io/examples/vision/knowledge_distillation/
class SelfTrainer(tf.keras.Model):
def __init__(self, student, teacher):
super(SelfTrainer, self).__init__()
self.student = student
self.teacher = teacher
def compile(
self, optimizer, metrics, student_loss_fn, distillation_loss_fn, temperature=3,
):
super(SelfTrainer, self).compile(optimizer=optimizer, metrics=metrics)
self.student_loss_fn = student_loss_fn
self.distillation_loss_fn = distillation_loss_fn
self.temperature = temperature
def train_step(self, data):
# Since our dataset is a zip of two independent datasets,
# after initially parsing them, we segregate the
# respective images and labels next.
clean_ds, noisy_ds = data
clean_images, _ = clean_ds
noisy_images, y = noisy_ds
# Forward pass of teacher
teacher_predictions = self.teacher(clean_images, training=False)
with tf.GradientTape() as tape:
# Forward pass of student
student_predictions = self.student(noisy_images, training=True)
# Compute losses
student_loss = self.student_loss_fn(y, student_predictions)
distillation_loss = self.distillation_loss_fn(
tf.nn.softmax(teacher_predictions / self.temperature, axis=1),
tf.nn.softmax(student_predictions / self.temperature, axis=1),
)
total_loss = (student_loss + distillation_loss) / 2
# Compute gradients
trainable_vars = self.student.trainable_variables
gradients = tape.gradient(total_loss, trainable_vars)
# Update weights
self.optimizer.apply_gradients(zip(gradients, trainable_vars))
# Update the metrics configured in `compile()`
self.compiled_metrics.update_state(
y, tf.nn.softmax(student_predictions, axis=1)
)
# Return a dict of performance
results = {m.name: m.result() for m in self.metrics}
results.update({\"total_loss\": total_loss})
return results
def test_step(self, data):
# During inference, we only pass a dataset consisting images and labels.
x, y = data