text stringlengths 0 4.99k |
|---|
class RecommenderNet(keras.Model): |
def __init__(self, num_users, num_movies, embedding_size, **kwargs): |
super(RecommenderNet, self).__init__(**kwargs) |
self.num_users = num_users |
self.num_movies = num_movies |
self.embedding_size = embedding_size |
self.user_embedding = layers.Embedding( |
num_users, |
embedding_size, |
embeddings_initializer=\"he_normal\", |
embeddings_regularizer=keras.regularizers.l2(1e-6), |
) |
self.user_bias = layers.Embedding(num_users, 1) |
self.movie_embedding = layers.Embedding( |
num_movies, |
embedding_size, |
embeddings_initializer=\"he_normal\", |
embeddings_regularizer=keras.regularizers.l2(1e-6), |
) |
self.movie_bias = layers.Embedding(num_movies, 1) |
def call(self, inputs): |
user_vector = self.user_embedding(inputs[:, 0]) |
user_bias = self.user_bias(inputs[:, 0]) |
movie_vector = self.movie_embedding(inputs[:, 1]) |
movie_bias = self.movie_bias(inputs[:, 1]) |
dot_user_movie = tf.tensordot(user_vector, movie_vector, 2) |
# Add all the components (including bias) |
x = dot_user_movie + user_bias + movie_bias |
# The sigmoid activation forces the rating to between 0 and 1 |
return tf.nn.sigmoid(x) |
model = RecommenderNet(num_users, num_movies, EMBEDDING_SIZE) |
model.compile( |
loss=tf.keras.losses.BinaryCrossentropy(), optimizer=keras.optimizers.Adam(lr=0.001) |
) |
Train the model based on the data split |
history = model.fit( |
x=x_train, |
y=y_train, |
batch_size=64, |
epochs=5, |
verbose=1, |
validation_data=(x_val, y_val), |
) |
Epoch 1/5 |
1418/1418 [==============================] - 6s 4ms/step - loss: 0.6368 - val_loss: 0.6206 |
Epoch 2/5 |
1418/1418 [==============================] - 7s 5ms/step - loss: 0.6131 - val_loss: 0.6176 |
Epoch 3/5 |
1418/1418 [==============================] - 6s 4ms/step - loss: 0.6083 - val_loss: 0.6146 |
Epoch 4/5 |
1418/1418 [==============================] - 6s 4ms/step - loss: 0.6072 - val_loss: 0.6131 |
Epoch 5/5 |
1418/1418 [==============================] - 6s 4ms/step - loss: 0.6075 - val_loss: 0.6150 |
Plot training and validation loss |
plt.plot(history.history[\"loss\"]) |
plt.plot(history.history[\"val_loss\"]) |
plt.title(\"model loss\") |
plt.ylabel(\"loss\") |
plt.xlabel(\"epoch\") |
plt.legend([\"train\", \"test\"], loc=\"upper left\") |
plt.show() |
png |
Show top 10 movie recommendations to a user |
movie_df = pd.read_csv(movielens_dir / \"movies.csv\") |
# Let us get a user and see the top recommendations. |
user_id = df.userId.sample(1).iloc[0] |
movies_watched_by_user = df[df.userId == user_id] |
movies_not_watched = movie_df[ |
~movie_df[\"movieId\"].isin(movies_watched_by_user.movieId.values) |
][\"movieId\"] |
movies_not_watched = list( |
set(movies_not_watched).intersection(set(movie2movie_encoded.keys())) |
) |
movies_not_watched = [[movie2movie_encoded.get(x)] for x in movies_not_watched] |
user_encoder = user2user_encoded.get(user_id) |
user_movie_array = np.hstack( |
([[user_encoder]] * len(movies_not_watched), movies_not_watched) |
) |
ratings = model.predict(user_movie_array).flatten() |
top_ratings_indices = ratings.argsort()[-10:][::-1] |
recommended_movie_ids = [ |
movie_encoded2movie.get(movies_not_watched[x][0]) for x in top_ratings_indices |
] |
print(\"Showing recommendations for user: {}\".format(user_id)) |
print(\"====\" * 9) |
print(\"Movies with high ratings from user\") |
print(\"----\" * 8) |
top_movies_user = ( |
movies_watched_by_user.sort_values(by=\"rating\", ascending=False) |
.head(5) |
.movieId.values |
) |
movie_df_rows = movie_df[movie_df[\"movieId\"].isin(top_movies_user)] |
for row in movie_df_rows.itertuples(): |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.