text
stringlengths
0
4.99k
Create model inputs
Now, define the inputs for the models as a dictionary, where the key is the feature name, and the value is a keras.layers.Input tensor with the corresponding feature shape and data type.
def create_model_inputs():
inputs = {}
for feature_name in FEATURE_NAMES:
if feature_name in NUMERIC_FEATURE_NAMES:
inputs[feature_name] = layers.Input(
name=feature_name, shape=(), dtype=tf.float32
)
else:
inputs[feature_name] = layers.Input(
name=feature_name, shape=(), dtype=tf.string
)
return inputs
Encode features
We create two representations of our input features: sparse and dense: 1. In the sparse representation, the categorical features are encoded with one-hot encoding using the CategoryEncoding layer. This representation can be useful for the model to memorize particular feature values to make certain predictions. 2. In the dense representation, the categorical features are encoded with low-dimensional embeddings using the Embedding layer. This representation helps the model to generalize well to unseen feature combinations.
from tensorflow.keras.layers import StringLookup
def encode_inputs(inputs, use_embedding=False):
encoded_features = []
for feature_name in inputs:
if feature_name in CATEGORICAL_FEATURE_NAMES:
vocabulary = CATEGORICAL_FEATURES_WITH_VOCABULARY[feature_name]
# Create a lookup to convert string values to an integer indices.
# Since we are not using a mask token nor expecting any out of vocabulary
# (oov) token, we set mask_token to None and num_oov_indices to 0.
lookup = StringLookup(
vocabulary=vocabulary,
mask_token=None,
num_oov_indices=0,
output_mode=\"int\" if use_embedding else \"binary\",
)
if use_embedding:
# Convert the string input values into integer indices.
encoded_feature = lookup(inputs[feature_name])
embedding_dims = int(math.sqrt(len(vocabulary)))
# Create an embedding layer with the specified dimensions.
embedding = layers.Embedding(
input_dim=len(vocabulary), output_dim=embedding_dims
)
# Convert the index values to embedding representations.
encoded_feature = embedding(encoded_feature)
else:
# Convert the string input values into a one hot encoding.
encoded_feature = lookup(tf.expand_dims(inputs[feature_name], -1))
else:
# Use the numerical features as-is.
encoded_feature = tf.expand_dims(inputs[feature_name], -1)
encoded_features.append(encoded_feature)
all_features = layers.concatenate(encoded_features)
return all_features
Experiment 1: a baseline model
In the first experiment, let's create a multi-layer feed-forward network, where the categorical features are one-hot encoded.
def create_baseline_model():
inputs = create_model_inputs()
features = encode_inputs(inputs)
for units in hidden_units:
features = layers.Dense(units)(features)
features = layers.BatchNormalization()(features)
features = layers.ReLU()(features)
features = layers.Dropout(dropout_rate)(features)
outputs = layers.Dense(units=NUM_CLASSES, activation=\"softmax\")(features)
model = keras.Model(inputs=inputs, outputs=outputs)
return model
baseline_model = create_baseline_model()
keras.utils.plot_model(baseline_model, show_shapes=True, rankdir=\"LR\")
('You must install pydot (`pip install pydot`) and install graphviz (see instructions at https://graphviz.gitlab.io/download/) ', 'for plot_model/model_to_dot to work.')
Let's run it:
run_experiment(baseline_model)
Start training the model...
Epoch 1/50
1862/1862 [==============================] - 10s 5ms/step - loss: 0.9208 - sparse_categorical_accuracy: 0.6334
Epoch 2/50
1862/1862 [==============================] - 5s 3ms/step - loss: 0.6758 - sparse_categorical_accuracy: 0.7081
Epoch 3/50
1862/1862 [==============================] - 5s 3ms/step - loss: 0.6409 - sparse_categorical_accuracy: 0.7225
Epoch 4/50
1862/1862 [==============================] - 5s 3ms/step - loss: 0.6209 - sparse_categorical_accuracy: 0.7316
Epoch 5/50
1862/1862 [==============================] - 5s 3ms/step - loss: 0.6074 - sparse_categorical_accuracy: 0.7371
Epoch 6/50
1862/1862 [==============================] - 5s 3ms/step - loss: 0.5975 - sparse_categorical_accuracy: 0.7419
Epoch 7/50
1862/1862 [==============================] - 5s 3ms/step - loss: 0.5889 - sparse_categorical_accuracy: 0.7458
Epoch 8/50
1862/1862 [==============================] - 5s 3ms/step - loss: 0.5846 - sparse_categorical_accuracy: 0.7474
Epoch 9/50
1862/1862 [==============================] - 5s 3ms/step - loss: 0.5810 - sparse_categorical_accuracy: 0.7502
Epoch 10/50