text
stringlengths
0
4.99k
activation=\"relu\",
kernel_initializer=\"he_normal\",
)
)
self.conv_model.add(layers.ZeroPadding2D(padding))
self.conv_model.add(
layers.MaxPool2D(pooling_kernel_size, pooling_stride, \"same\")
)
self.positional_emb = positional_emb
def call(self, images):
outputs = self.conv_model(images)
# After passing the images through our mini-network the spatial dimensions
# are flattened to form sequences.
reshaped = tf.reshape(
outputs,
(-1, tf.shape(outputs)[1] * tf.shape(outputs)[2], tf.shape(outputs)[-1]),
)
return reshaped
def positional_embedding(self, image_size):
# Positional embeddings are optional in CCT. Here, we calculate
# the number of sequences and initialize an `Embedding` layer to
# compute the positional embeddings later.
if self.positional_emb:
dummy_inputs = tf.ones((1, image_size, image_size, 3))
dummy_outputs = self.call(dummy_inputs)
sequence_length = tf.shape(dummy_outputs)[1]
projection_dim = tf.shape(dummy_outputs)[-1]
embed_layer = layers.Embedding(
input_dim=sequence_length, output_dim=projection_dim
)
return embed_layer, sequence_length
else:
return None
Stochastic depth for regularization
Stochastic depth is a regularization technique that randomly drops a set of layers. During inference, the layers are kept as they are. It is very much similar to Dropout but only that it operates on a block of layers rather than individual nodes present inside a layer. In CCT, stochastic depth is used just before the r...
# Referred from: github.com:rwightman/pytorch-image-models.
class StochasticDepth(layers.Layer):
def __init__(self, drop_prop, **kwargs):
super(StochasticDepth, self).__init__(**kwargs)
self.drop_prob = drop_prop
def call(self, x, training=None):
if training:
keep_prob = 1 - self.drop_prob
shape = (tf.shape(x)[0],) + (1,) * (len(tf.shape(x)) - 1)
random_tensor = keep_prob + tf.random.uniform(shape, 0, 1)
random_tensor = tf.floor(random_tensor)
return (x / keep_prob) * random_tensor
return x
MLP for the Transformers encoder
def mlp(x, hidden_units, dropout_rate):
for units in hidden_units:
x = layers.Dense(units, activation=tf.nn.gelu)(x)
x = layers.Dropout(dropout_rate)(x)
return x
Data augmentation
In the original paper, the authors use AutoAugment to induce stronger regularization. For this example, we will be using the standard geometric augmentations like random cropping and flipping.
# Note the rescaling layer. These layers have pre-defined inference behavior.
data_augmentation = keras.Sequential(
[
layers.Rescaling(scale=1.0 / 255),
layers.RandomCrop(image_size, image_size),
layers.RandomFlip(\"horizontal\"),
],
name=\"data_augmentation\",
)
The final CCT model
Another recipe introduced in CCT is attention pooling or sequence pooling. In ViT, only the feature map corresponding to the class token is pooled and is then used for the subsequent classification task (or any other downstream task). In CCT, outputs from the Transformers encoder are weighted and then passed on to the ...
def create_cct_model(
image_size=image_size,
input_shape=input_shape,
num_heads=num_heads,
projection_dim=projection_dim,
transformer_units=transformer_units,
):
inputs = layers.Input(input_shape)
# Augment data.
augmented = data_augmentation(inputs)
# Encode patches.
cct_tokenizer = CCTTokenizer()
encoded_patches = cct_tokenizer(augmented)
# Apply positional embedding.
if positional_emb:
pos_embed, seq_length = cct_tokenizer.positional_embedding(image_size)
positions = tf.range(start=0, limit=seq_length, delta=1)
position_embeddings = pos_embed(positions)
encoded_patches += position_embeddings
# Calculate Stochastic Depth probabilities.