text
stringlengths
0
4.99k
column_names=CSV_HEADER,
column_defaults=COLUMN_DEFAULTS,
label_name=TARGET_FEATURE_NAME,
num_epochs=1,
header=False,
shuffle=shuffle,
).map(process)
return dataset
Create model inputs
def create_model_inputs():
inputs = {}
for feature_name in FEATURE_NAMES:
if feature_name in NUMERIC_FEATURE_NAMES:
inputs[feature_name] = layers.Input(
name=feature_name, shape=(), dtype=tf.float32
)
else:
inputs[feature_name] = layers.Input(
name=feature_name, shape=(), dtype=tf.string
)
return inputs
Encode input features
For categorical features, we encode them using layers.Embedding using the encoding_size as the embedding dimensions. For the numerical features, we apply linear transformation using layers.Dense to project each feature into encoding_size-dimensional vector. Thus, all the encoded features will have the same dimensionali...
def encode_inputs(inputs, encoding_size):
encoded_features = []
for feature_name in inputs:
if feature_name in CATEGORICAL_FEATURES_WITH_VOCABULARY:
vocabulary = CATEGORICAL_FEATURES_WITH_VOCABULARY[feature_name]
# Create a lookup to convert a string values to an integer indices.
# Since we are not using a mask token nor expecting any out of vocabulary
# (oov) token, we set mask_token to None and num_oov_indices to 0.
index = StringLookup(
vocabulary=vocabulary, mask_token=None, num_oov_indices=0
)
# Convert the string input values into integer indices.
value_index = index(inputs[feature_name])
# Create an embedding layer with the specified dimensions
embedding_ecoder = layers.Embedding(
input_dim=len(vocabulary), output_dim=encoding_size
)
# Convert the index values to embedding representations.
encoded_feature = embedding_ecoder(value_index)
else:
# Project the numeric feature to encoding_size using linear transformation.
encoded_feature = tf.expand_dims(inputs[feature_name], -1)
encoded_feature = layers.Dense(units=encoding_size)(encoded_feature)
encoded_features.append(encoded_feature)
return encoded_features
Implement the Gated Linear Unit
Gated Linear Units (GLUs) provide the flexibility to suppress input that are not relevant for a given task.
class GatedLinearUnit(layers.Layer):
def __init__(self, units):
super(GatedLinearUnit, self).__init__()
self.linear = layers.Dense(units)
self.sigmoid = layers.Dense(units, activation=\"sigmoid\")
def call(self, inputs):
return self.linear(inputs) * self.sigmoid(inputs)
Implement the Gated Residual Network
The Gated Residual Network (GRN) works as follows:
Applies the nonlinear ELU transformation to the inputs.
Applies linear transformation followed by dropout.
Applies GLU and adds the original inputs to the output of the GLU to perform skip (residual) connection.
Applies layer normalization and produces the output.
class GatedResidualNetwork(layers.Layer):
def __init__(self, units, dropout_rate):
super(GatedResidualNetwork, self).__init__()
self.units = units
self.elu_dense = layers.Dense(units, activation=\"elu\")
self.linear_dense = layers.Dense(units)
self.dropout = layers.Dropout(dropout_rate)
self.gated_linear_unit = GatedLinearUnit(units)
self.layer_norm = layers.LayerNormalization()
self.project = layers.Dense(units)
def call(self, inputs):
x = self.elu_dense(inputs)
x = self.linear_dense(x)
x = self.dropout(x)
if inputs.shape[-1] != self.units:
inputs = self.project(inputs)
x = inputs + self.gated_linear_unit(x)
x = self.layer_norm(x)
return x
Implement the Variable Selection Network
The Variable Selection Network (VSN) works as follows:
Applies a GRN to each feature individually.
Applies a GRN on the concatenation of all the features, followed by a softmax to produce feature weights.
Produces a weighted sum of the output of the individual GRN.
Note that the output of the VSN is [batch_size, encoding_size], regardless of the number of the input features.
class VariableSelection(layers.Layer):
def __init__(self, num_features, units, dropout_rate):
super(VariableSelection, self).__init__()
self.grns = list()