text
stringlengths
0
4.99k
return {
\"user_id\": layers.Input(name=\"user_id\", shape=(1,), dtype=tf.string),
\"sequence_movie_ids\": layers.Input(
name=\"sequence_movie_ids\", shape=(sequence_length - 1,), dtype=tf.string
),
\"target_movie_id\": layers.Input(
name=\"target_movie_id\", shape=(1,), dtype=tf.string
),
\"sequence_ratings\": layers.Input(
name=\"sequence_ratings\", shape=(sequence_length - 1,), dtype=tf.float32
),
\"sex\": layers.Input(name=\"sex\", shape=(1,), dtype=tf.string),
\"age_group\": layers.Input(name=\"age_group\", shape=(1,), dtype=tf.string),
\"occupation\": layers.Input(name=\"occupation\", shape=(1,), dtype=tf.string),
}
Encode input features
The encode_input_features method works as follows:
Each categorical user feature is encoded using layers.Embedding, with embedding dimension equals to the square root of the vocabulary size of the feature. The embeddings of these features are concatenated to form a single input tensor.
Each movie in the movie sequence and the target movie is encoded layers.Embedding, where the dimension size is the square root of the number of movies.
A multi-hot genres vector for each movie is concatenated with its embedding vector, and processed using a non-linear layers.Dense to output a vector of the same movie embedding dimensions.
A positional embedding is added to each movie embedding in the sequence, and then multiplied by its rating from the ratings sequence.
The target movie embedding is concatenated to the sequence movie embeddings, producing a tensor with the shape of [batch size, sequence length, embedding size], as expected by the attention layer for the transformer architecture.
The method returns a tuple of two elements: encoded_transformer_features and encoded_other_features.
def encode_input_features(
inputs,
include_user_id=True,
include_user_features=True,
include_movie_features=True,
):
encoded_transformer_features = []
encoded_other_features = []
other_feature_names = []
if include_user_id:
other_feature_names.append(\"user_id\")
if include_user_features:
other_feature_names.extend(USER_FEATURES)
## Encode user features
for feature_name in other_feature_names:
# Convert the string input values into integer indices.
vocabulary = CATEGORICAL_FEATURES_WITH_VOCABULARY[feature_name]
idx = StringLookup(vocabulary=vocabulary, mask_token=None, num_oov_indices=0)(
inputs[feature_name]
)
# Compute embedding dimensions
embedding_dims = int(math.sqrt(len(vocabulary)))
# Create an embedding layer with the specified dimensions.
embedding_encoder = layers.Embedding(
input_dim=len(vocabulary),
output_dim=embedding_dims,
name=f\"{feature_name}_embedding\",
)
# Convert the index values to embedding representations.
encoded_other_features.append(embedding_encoder(idx))
## Create a single embedding vector for the user features
if len(encoded_other_features) > 1:
encoded_other_features = layers.concatenate(encoded_other_features)
elif len(encoded_other_features) == 1:
encoded_other_features = encoded_other_features[0]
else:
encoded_other_features = None
## Create a movie embedding encoder
movie_vocabulary = CATEGORICAL_FEATURES_WITH_VOCABULARY[\"movie_id\"]
movie_embedding_dims = int(math.sqrt(len(movie_vocabulary)))
# Create a lookup to convert string values to integer indices.
movie_index_lookup = StringLookup(
vocabulary=movie_vocabulary,
mask_token=None,
num_oov_indices=0,
name=\"movie_index_lookup\",
)
# Create an embedding layer with the specified dimensions.
movie_embedding_encoder = layers.Embedding(
input_dim=len(movie_vocabulary),
output_dim=movie_embedding_dims,
name=f\"movie_embedding\",
)
# Create a vector lookup for movie genres.
genre_vectors = movies[genres].to_numpy()
movie_genres_lookup = layers.Embedding(
input_dim=genre_vectors.shape[0],
output_dim=genre_vectors.shape[1],
embeddings_initializer=tf.keras.initializers.Constant(genre_vectors),
trainable=False,
name=\"genres_vector\",
)
# Create a processing layer for genres.
movie_embedding_processor = layers.Dense(
units=movie_embedding_dims,