text stringlengths 0 4.99k |
|---|
Prepare the data |
Download and process data |
urlretrieve(\"http://files.grouplens.org/datasets/movielens/ml-1m.zip\", \"movielens.zip\") |
ZipFile(\"movielens.zip\", \"r\").extractall() |
ratings_data = pd.read_csv( |
\"ml-1m/ratings.dat\", |
sep=\"::\", |
names=[\"user_id\", \"movie_id\", \"rating\", \"unix_timestamp\"], |
) |
ratings_data[\"movie_id\"] = ratings_data[\"movie_id\"].apply(lambda x: f\"movie_{x}\") |
ratings_data[\"user_id\"] = ratings_data[\"user_id\"].apply(lambda x: f\"user_{x}\") |
ratings_data[\"rating\"] = ratings_data[\"rating\"].apply(lambda x: float(x)) |
del ratings_data[\"unix_timestamp\"] |
print(f\"Number of users: {len(ratings_data.user_id.unique())}\") |
print(f\"Number of movies: {len(ratings_data.movie_id.unique())}\") |
print(f\"Number of ratings: {len(ratings_data.index)}\") |
Number of users: 6040 |
Number of movies: 3706 |
Number of ratings: 1000209 |
Create train and eval data splits |
random_selection = np.random.rand(len(ratings_data.index)) <= 0.85 |
train_data = ratings_data[random_selection] |
eval_data = ratings_data[~random_selection] |
train_data.to_csv(\"train_data.csv\", index=False, sep=\"|\", header=False) |
eval_data.to_csv(\"eval_data.csv\", index=False, sep=\"|\", header=False) |
print(f\"Train data split: {len(train_data.index)}\") |
print(f\"Eval data split: {len(eval_data.index)}\") |
print(\"Train and eval data files are saved.\") |
Train data split: 850361 |
Eval data split: 149848 |
Train and eval data files are saved. |
Define dataset metadata and hyperparameters |
csv_header = list(ratings_data.columns) |
user_vocabulary = list(ratings_data.user_id.unique()) |
movie_vocabulary = list(ratings_data.movie_id.unique()) |
target_feature_name = \"rating\" |
learning_rate = 0.001 |
batch_size = 128 |
num_epochs = 3 |
base_embedding_dim = 64 |
Train and evaluate the model |
def get_dataset_from_csv(csv_file_path, batch_size=128, shuffle=True): |
return tf.data.experimental.make_csv_dataset( |
csv_file_path, |
batch_size=batch_size, |
column_names=csv_header, |
label_name=target_feature_name, |
num_epochs=1, |
header=False, |
field_delim=\"|\", |
shuffle=shuffle, |
) |
def run_experiment(model): |
# Compile the model. |
model.compile( |
optimizer=keras.optimizers.Adam(learning_rate), |
loss=tf.keras.losses.MeanSquaredError(), |
metrics=[keras.metrics.MeanAbsoluteError(name=\"mae\")], |
) |
# Read the training data. |
train_dataset = get_dataset_from_csv(\"train_data.csv\", batch_size) |
# Read the test data. |
eval_dataset = get_dataset_from_csv(\"eval_data.csv\", batch_size, shuffle=False) |
# Fit the model with the training data. |
history = model.fit(train_dataset, epochs=num_epochs, validation_data=eval_dataset,) |
return history |
Experiment 1: baseline collaborative filtering model |
Implement embedding encoder |
def embedding_encoder(vocabulary, embedding_dim, num_oov_indices=0, name=None): |
return keras.Sequential( |
[ |
StringLookup( |
vocabulary=vocabulary, mask_token=None, num_oov_indices=num_oov_indices |
), |
layers.Embedding( |
input_dim=len(vocabulary) + num_oov_indices, output_dim=embedding_dim |
), |
], |
name=f\"{name}_embedding\" if name else None, |
) |
Implement the baseline model |
def create_baseline_model(): |
# Receive the user as an input. |
user_input = layers.Input(name=\"user_id\", shape=(), dtype=tf.string) |
# Get user embedding. |
user_embedding = embedding_encoder( |
vocabulary=user_vocabulary, embedding_dim=base_embedding_dim, name=\"user\" |
)(user_input) |
# Receive the movie as an input. |
movie_input = layers.Input(name=\"movie_id\", shape=(), dtype=tf.string) |
# Get embedding. |
movie_embedding = embedding_encoder( |
vocabulary=movie_vocabulary, embedding_dim=base_embedding_dim, name=\"movie\" |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.