text
stringlengths
0
4.99k
if len(seq) < window_size:
seq = values[-window_size:]
if len(seq) == window_size:
sequences.append(seq)
break
sequences.append(seq)
start_index += step_size
return sequences
ratings_data.movie_ids = ratings_data.movie_ids.apply(
lambda ids: create_sequences(ids, sequence_length, step_size)
)
ratings_data.ratings = ratings_data.ratings.apply(
lambda ids: create_sequences(ids, sequence_length, step_size)
)
del ratings_data[\"timestamps\"]
After that, we process the output to have each sequence in a separate records in the DataFrame. In addition, we join the user features with the ratings data.
ratings_data_movies = ratings_data[[\"user_id\", \"movie_ids\"]].explode(
\"movie_ids\", ignore_index=True
)
ratings_data_rating = ratings_data[[\"ratings\"]].explode(\"ratings\", ignore_index=True)
ratings_data_transformed = pd.concat([ratings_data_movies, ratings_data_rating], axis=1)
ratings_data_transformed = ratings_data_transformed.join(
users.set_index(\"user_id\"), on=\"user_id\"
)
ratings_data_transformed.movie_ids = ratings_data_transformed.movie_ids.apply(
lambda x: \",\".join(x)
)
ratings_data_transformed.ratings = ratings_data_transformed.ratings.apply(
lambda x: \",\".join([str(v) for v in x])
)
del ratings_data_transformed[\"zip_code\"]
ratings_data_transformed.rename(
columns={\"movie_ids\": \"sequence_movie_ids\", \"ratings\": \"sequence_ratings\"},
inplace=True,
)
With sequence_length of 4 and step_size of 2, we end up with 498,623 sequences.
Finally, we split the data into training and testing splits, with 85% and 15% of the instances, respectively, and store them to CSV files.
random_selection = np.random.rand(len(ratings_data_transformed.index)) <= 0.85
train_data = ratings_data_transformed[random_selection]
test_data = ratings_data_transformed[~random_selection]
train_data.to_csv(\"train_data.csv\", index=False, sep=\"|\", header=False)
test_data.to_csv(\"test_data.csv\", index=False, sep=\"|\", header=False)
Define metadata
CSV_HEADER = list(ratings_data_transformed.columns)
CATEGORICAL_FEATURES_WITH_VOCABULARY = {
\"user_id\": list(users.user_id.unique()),
\"movie_id\": list(movies.movie_id.unique()),
\"sex\": list(users.sex.unique()),
\"age_group\": list(users.age_group.unique()),
\"occupation\": list(users.occupation.unique()),
}
USER_FEATURES = [\"sex\", \"age_group\", \"occupation\"]
MOVIE_FEATURES = [\"genres\"]
Create tf.data.Dataset for training and evaluation
def get_dataset_from_csv(csv_file_path, shuffle=False, batch_size=128):
def process(features):
movie_ids_string = features[\"sequence_movie_ids\"]
sequence_movie_ids = tf.strings.split(movie_ids_string, \",\").to_tensor()
# The last movie id in the sequence is the target movie.
features[\"target_movie_id\"] = sequence_movie_ids[:, -1]
features[\"sequence_movie_ids\"] = sequence_movie_ids[:, :-1]
ratings_string = features[\"sequence_ratings\"]
sequence_ratings = tf.strings.to_number(
tf.strings.split(ratings_string, \",\"), tf.dtypes.float32
).to_tensor()
# The last rating in the sequence is the target for the model to predict.
target = sequence_ratings[:, -1]
features[\"sequence_ratings\"] = sequence_ratings[:, :-1]
return features, target
dataset = tf.data.experimental.make_csv_dataset(
csv_file_path,
batch_size=batch_size,
column_names=CSV_HEADER,
num_epochs=1,
header=False,
field_delim=\"|\",
shuffle=shuffle,
).map(process)
return dataset
Create model inputs
def create_model_inputs():