text stringlengths 0 4.99k |
|---|
if batch_normalization: |
x = keras.layers.BatchNormalization()(x) |
if activation is not None: |
x = keras.layers.Activation(activation)(x) |
else: |
if batch_normalization: |
x = keras.layers.BatchNormalization()(x) |
if activation is not None: |
x = keras.layers.Activation(activation)(x) |
x = conv(x) |
return x |
def resnet_v20(input_shape, depth, num_classes=10): |
if (depth - 2) % 6 != 0: |
raise ValueError(\"depth should be 6n+2 (eg 20, 32, 44 in [a])\") |
# Start model definition. |
num_filters = 16 |
num_res_blocks = int((depth - 2) / 6) |
inputs = keras.layers.Input(shape=input_shape) |
x = resnet_layer(inputs=inputs) |
# Instantiate the stack of residual units |
for stack in range(3): |
for res_block in range(num_res_blocks): |
strides = 1 |
if stack > 0 and res_block == 0: # first layer but not first stack |
strides = 2 # downsample |
y = resnet_layer(inputs=x, num_filters=num_filters, strides=strides) |
y = resnet_layer(inputs=y, num_filters=num_filters, activation=None) |
if stack > 0 and res_block == 0: # first layer but not first stack |
# linear projection residual shortcut connection to match |
# changed dims |
x = resnet_layer( |
inputs=x, |
num_filters=num_filters, |
kernel_size=1, |
strides=strides, |
activation=None, |
batch_normalization=False, |
) |
x = keras.layers.add([x, y]) |
x = keras.layers.Activation(\"relu\")(x) |
num_filters *= 2 |
# Add classifier on top. |
# v1 does not use BN after last shortcut connection-ReLU |
x = keras.layers.AveragePooling2D(pool_size=8)(x) |
y = keras.layers.Flatten()(x) |
outputs = keras.layers.Dense( |
num_classes, activation=\"softmax\", kernel_initializer=\"he_normal\" |
)(y) |
# Instantiate model. |
model = keras.models.Model(inputs=inputs, outputs=outputs) |
return model |
def training_model(): |
return resnet_v20((32, 32, 3), 20) |
initial_model = training_model() |
initial_model.save_weights(\"initial_weights.h5\") |
Train the model with the dataset augmented by CutMix |
model = training_model() |
model.load_weights(\"initial_weights.h5\") |
model.compile(loss=\"categorical_crossentropy\", optimizer=\"adam\", metrics=[\"accuracy\"]) |
model.fit(train_ds_cmu, validation_data=test_ds, epochs=15) |
test_loss, test_accuracy = model.evaluate(test_ds) |
print(\"Test accuracy: {:.2f}%\".format(test_accuracy * 100)) |
Epoch 1/15 |
1563/1563 [==============================] - 62s 24ms/step - loss: 1.9216 - accuracy: 0.4090 - val_loss: 1.9737 - val_accuracy: 0.4061 |
Epoch 2/15 |
1563/1563 [==============================] - 37s 24ms/step - loss: 1.6549 - accuracy: 0.5325 - val_loss: 1.5033 - val_accuracy: 0.5061 |
Epoch 3/15 |
1563/1563 [==============================] - 38s 24ms/step - loss: 1.5536 - accuracy: 0.5840 - val_loss: 1.2913 - val_accuracy: 0.6112 |
Epoch 4/15 |
1563/1563 [==============================] - 38s 24ms/step - loss: 1.4988 - accuracy: 0.6097 - val_loss: 1.0587 - val_accuracy: 0.7033 |
Epoch 5/15 |
1563/1563 [==============================] - 38s 24ms/step - loss: 1.4531 - accuracy: 0.6291 - val_loss: 1.0681 - val_accuracy: 0.6841 |
Epoch 6/15 |
1563/1563 [==============================] - 37s 24ms/step - loss: 1.4173 - accuracy: 0.6464 - val_loss: 1.0265 - val_accuracy: 0.7085 |
Epoch 7/15 |
1563/1563 [==============================] - 37s 24ms/step - loss: 1.3932 - accuracy: 0.6572 - val_loss: 0.9540 - val_accuracy: 0.7331 |
Epoch 8/15 |
1563/1563 [==============================] - 37s 24ms/step - loss: 1.3736 - accuracy: 0.6680 - val_loss: 0.9877 - val_accuracy: 0.7240 |
Epoch 9/15 |
1563/1563 [==============================] - 38s 24ms/step - loss: 1.3575 - accuracy: 0.6782 - val_loss: 0.8944 - val_accuracy: 0.7570 |
Epoch 10/15 |
1563/1563 [==============================] - 38s 24ms/step - loss: 1.3398 - accuracy: 0.6886 - val_loss: 0.8598 - val_accuracy: 0.7649 |
Epoch 11/15 |
1563/1563 [==============================] - 38s 24ms/step - loss: 1.3277 - accuracy: 0.6939 - val_loss: 0.9032 - val_accuracy: 0.7603 |
Epoch 12/15 |
1563/1563 [==============================] - 38s 24ms/step - loss: 1.3131 - accuracy: 0.6964 - val_loss: 0.7934 - val_accuracy: 0.7926 |
Epoch 13/15 |
1563/1563 [==============================] - 37s 24ms/step - loss: 1.3050 - accuracy: 0.7029 - val_loss: 0.8737 - val_accuracy: 0.7552 |
Epoch 14/15 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.