text
stringlengths
0
4.99k
class MyModel(keras.Model):
def train_step(self, data):
inputs, targets = data
trainable_vars = self.trainable_variables
with tf.GradientTape() as tape2:
with tf.GradientTape() as tape1:
preds = self(inputs, training=True) # Forward pass
# Compute the loss value
# (the loss function is configured in `compile()`)
loss = self.compiled_loss(targets, preds)
# Compute first-order gradients
dl_dw = tape1.gradient(loss, trainable_vars)
# Compute second-order gradients
d2l_dw2 = tape2.gradient(dl_dw, trainable_vars)
# Combine first-order and second-order gradients
grads = [0.5 * w1 + 0.5 * w2 for (w1, w2) in zip(d2l_dw2, dl_dw)]
# Update weights
self.optimizer.apply_gradients(zip(grads, trainable_vars))
# Update metrics (includes the metric that tracks the loss)
self.compiled_metrics.update_state(targets, preds)
# Return a dict mapping metric names to current value
return {m.name: m.result() for m in self.metrics}
Let's train a one-layer model on MNIST with this custom training loop.
We pick, somewhat at random, a batch size of 1024 and a learning rate of 0.1. The general idea being to use larger batches and a larger learning rate than usual, since our \"improved\" gradients should lead us to quicker convergence.
import numpy as np
# Construct an instance of MyModel
def get_model():
inputs = keras.Input(shape=(784,))
intermediate = layers.Dense(256, activation=\"relu\")(inputs)
outputs = layers.Dense(10, activation=\"softmax\")(intermediate)
model = MyModel(inputs, outputs)
return model
# Prepare data
(x_train, y_train), _ = keras.datasets.mnist.load_data()
x_train = np.reshape(x_train, (-1, 784)) / 255
model = get_model()
model.compile(
optimizer=keras.optimizers.SGD(learning_rate=1e-2),
loss=\"sparse_categorical_crossentropy\",
metrics=[\"accuracy\"],
)
model.fit(x_train, y_train, epochs=3, batch_size=1024, validation_split=0.1)
Epoch 1/3
53/53 [==============================] - 1s 15ms/step - loss: 2.2960 - accuracy: 0.1580 - val_loss: 2.3071 - val_accuracy: 0.0963
Epoch 2/3
53/53 [==============================] - 1s 13ms/step - loss: 2.3246 - accuracy: 0.0995 - val_loss: 2.3454 - val_accuracy: 0.0960
Epoch 3/3
53/53 [==============================] - 1s 12ms/step - loss: 2.3578 - accuracy: 0.0995 - val_loss: 2.3767 - val_accuracy: 0.0960
<tensorflow.python.keras.callbacks.History at 0x151cbf0d0>
Oh no, it doesn't converge! Something is not working as planned.
Time for some step-by-step printing of what's going on with our gradients.
We add various print statements in the train_step method, and we make sure to pass run_eagerly=True to compile() to run our code step-by-step, eagerly.
class MyModel(keras.Model):
def train_step(self, data):
print()
print(\"----Start of step: %d\" % (self.step_counter,))
self.step_counter += 1
inputs, targets = data
trainable_vars = self.trainable_variables
with tf.GradientTape() as tape2:
with tf.GradientTape() as tape1:
preds = self(inputs, training=True) # Forward pass
# Compute the loss value
# (the loss function is configured in `compile()`)
loss = self.compiled_loss(targets, preds)
# Compute first-order gradients
dl_dw = tape1.gradient(loss, trainable_vars)
# Compute second-order gradients
d2l_dw2 = tape2.gradient(dl_dw, trainable_vars)
print(\"Max of dl_dw[0]: %.4f\" % tf.reduce_max(dl_dw[0]))
print(\"Min of dl_dw[0]: %.4f\" % tf.reduce_min(dl_dw[0]))
print(\"Mean of dl_dw[0]: %.4f\" % tf.reduce_mean(dl_dw[0]))
print(\"-\")
print(\"Max of d2l_dw2[0]: %.4f\" % tf.reduce_max(d2l_dw2[0]))
print(\"Min of d2l_dw2[0]: %.4f\" % tf.reduce_min(d2l_dw2[0]))
print(\"Mean of d2l_dw2[0]: %.4f\" % tf.reduce_mean(d2l_dw2[0]))
# Combine first-order and second-order gradients
grads = [0.5 * w1 + 0.5 * w2 for (w1, w2) in zip(d2l_dw2, dl_dw)]
# Update weights
self.optimizer.apply_gradients(zip(grads, trainable_vars))
# Update metrics (includes the metric that tracks the loss)
self.compiled_metrics.update_state(targets, preds)