nirajandhakal's picture
Update app.py
11772dd verified
import pickle
import pandas as pd
import numpy as np
from sklearn.preprocessing import LabelEncoder
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.metrics.pairwise import cosine_similarity
from tensorflow.keras.models import load_model
import streamlit as st
# Load datasets
books = pd.read_csv("./dataset/books.csv")
ratings = pd.read_csv("./dataset/ratings.csv")
# Preprocess data
user_encoder = LabelEncoder()
book_encoder = LabelEncoder()
ratings["user_id"] = ratings["user_id"].astype(str)
ratings["user_id"] = user_encoder.fit_transform(ratings["user_id"])
ratings["book_id"] = book_encoder.fit_transform(ratings["book_id"])
# Load TF-IDF models
with open("tfidf_model_authors.pkl", "rb") as f:
tfidf_model_authors = pickle.load(f)
with open("tfidf_model_titles.pkl", "rb") as f:
tfidf_model_titles = pickle.load(f)
# Load collaborative filtering model
model_cf = load_model("recommendation_model.keras")
# Content-Based Recommendation
def content_based_recommendation(
query, books, tfidf_model_authors, tfidf_model_titles, num_recommendations=10
):
# Transform book author, title, and description into TF-IDF vectors
query_author_tfidf = tfidf_model_authors.transform([query])
query_title_tfidf = tfidf_model_titles.transform([query])
# Compute cosine similarity for authors and titles separately
similarity_scores_authors = cosine_similarity(
query_author_tfidf, tfidf_model_authors.transform(books["authors"])
)
similarity_scores_titles = cosine_similarity(
query_title_tfidf, tfidf_model_titles.transform(books["original_title"])
)
# Combine similarity scores for authors and titles
similarity_scores_combined = (
similarity_scores_authors + similarity_scores_titles
) / 2
# Get indices of recommended books
recommended_indices = np.argsort(similarity_scores_combined.flatten())[
-num_recommendations:
][::-1]
# Get recommended books
recommended_books = books.iloc[recommended_indices]
return recommended_books
# Collaborative Recommendation
def collaborative_recommendation(user_id, model_cf, ratings, num_recommendations=10):
# Get unrated books for the user
unrated_books = ratings[
~ratings["book_id"].isin(ratings[ratings["user_id"] == user_id]["book_id"])
]["book_id"].unique()
# Predict ratings for unrated books
predictions = model_cf.predict(
[np.full_like(unrated_books, user_id), unrated_books]
).flatten()
# Get top indices based on predictions
top_indices = np.argsort(predictions)[-num_recommendations:][::-1]
# Get recommended books
recommended_books = books.iloc[top_indices][["original_title", "authors"]]
return recommended_books
# Hybrid Recommendation
def hybrid_recommendation(
user_id,
query,
model_cf,
books,
ratings,
tfidf_model_authors,
tfidf_model_titles,
num_recommendations=10,
):
content_based_rec = content_based_recommendation(
query,
books,
tfidf_model_authors,
tfidf_model_titles,
num_recommendations=num_recommendations,
)
collaborative_rec = collaborative_recommendation(
user_id, model_cf, ratings, num_recommendations=num_recommendations
)
# Combine recommendations from different approaches
hybrid_rec = pd.concat([content_based_rec, collaborative_rec]).drop_duplicates(
subset="book_id", keep="first"
)
return hybrid_rec
# Streamlit App
st.title("Book Recommendation System")
# Sidebar for user input
user_input = st.text_input("Enter book name or author:", "")
# Get recommendations on button click
if st.button("Get Recommendations"):
st.write("Content-Based Recommendation:")
content_based_rec = content_based_recommendation(
user_input, books, tfidf_model_authors, tfidf_model_titles
)
st.write(content_based_rec)
# Example user ID for collaborative recommendation
USER_ID = 0
st.write("Collaborative Recommendation:")
collaborative_rec = collaborative_recommendation(USER_ID, model_cf, ratings)
st.write(collaborative_rec)
st.write("Hybrid Recommendation:")
hybrid_rec = hybrid_recommendation(
USER_ID,
user_input,
model_cf,
books,
ratings,
tfidf_model_authors,
tfidf_model_titles,
)
st.write(hybrid_rec)