Spaces:
Runtime error
Runtime error
Commit
•
d9ab710
1
Parent(s):
9264801
Added Files
Browse files- Movie_Recommender_System.ipynb +0 -0
- app.py +62 -0
- movie_dict.pkl +3 -0
- movie_recommender_system.py +195 -0
- movies.pkl +3 -0
- similarity.pkl +3 -0
Movie_Recommender_System.ipynb
ADDED
The diff for this file is too large to render.
See raw diff
|
|
app.py
ADDED
@@ -0,0 +1,62 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import pickle
|
2 |
+
import streamlit as st
|
3 |
+
import requests
|
4 |
+
|
5 |
+
def fetch_poster(movie_id):
|
6 |
+
url = "https://api.themoviedb.org/3/movie/{}?api_key=8265bd1679663a7ea12ac168da84d2e8&language=en-US".format(movie_id)
|
7 |
+
data = requests.get(url)
|
8 |
+
data = data.json()
|
9 |
+
poster_path = data['poster_path']
|
10 |
+
full_path = "https://image.tmdb.org/t/p/w500/" + poster_path
|
11 |
+
return full_path
|
12 |
+
|
13 |
+
def recommend(movie):
|
14 |
+
index = movies[movies['title'] == movie].index[0]
|
15 |
+
distances = sorted(list(enumerate(similarity[index])), reverse=True, key=lambda x: x[1])
|
16 |
+
recommended_movie_names = []
|
17 |
+
recommended_movie_posters = []
|
18 |
+
for i in distances[1:6]:
|
19 |
+
# fetch the movie poster
|
20 |
+
movie_id = movies.iloc[i[0]].movie_id
|
21 |
+
recommended_movie_posters.append(fetch_poster(movie_id))
|
22 |
+
recommended_movie_names.append(movies.iloc[i[0]].title)
|
23 |
+
|
24 |
+
return recommended_movie_names,recommended_movie_posters
|
25 |
+
|
26 |
+
st.markdown("<h1 style='text-align: center; color: black;'>Movie Recommender System</h1>", unsafe_allow_html=True)
|
27 |
+
st.markdown("<h4 style='text-align: center; color: black;'>Find a similar movie from a dataset of 5,000 movies!</h4>", unsafe_allow_html=True)
|
28 |
+
st.markdown("<h4 style='text-align: center; color: black;'>Web App created by Sagar Bapodara</h4>", unsafe_allow_html=True)
|
29 |
+
|
30 |
+
|
31 |
+
movies = pickle.load(open('movies.pkl','rb'))
|
32 |
+
similarity = pickle.load(open('similarity.pkl','rb'))
|
33 |
+
|
34 |
+
movie_list = movies['title'].values
|
35 |
+
selected_movie = st.selectbox(
|
36 |
+
"Type or select a movie you like :",
|
37 |
+
movie_list
|
38 |
+
)
|
39 |
+
|
40 |
+
if st.button('Show Recommendation'):
|
41 |
+
st.write("Recommended Movies based on your interests are :")
|
42 |
+
recommended_movie_names,recommended_movie_posters = recommend(selected_movie)
|
43 |
+
col1, col2, col3, col4, col5 = st.columns(5)
|
44 |
+
with col1:
|
45 |
+
st.text(recommended_movie_names[0])
|
46 |
+
st.image(recommended_movie_posters[0])
|
47 |
+
with col2:
|
48 |
+
st.text(recommended_movie_names[1])
|
49 |
+
st.image(recommended_movie_posters[1])
|
50 |
+
|
51 |
+
with col3:
|
52 |
+
st.text(recommended_movie_names[2])
|
53 |
+
st.image(recommended_movie_posters[2])
|
54 |
+
with col4:
|
55 |
+
st.text(recommended_movie_names[3])
|
56 |
+
st.image(recommended_movie_posters[3])
|
57 |
+
with col5:
|
58 |
+
st.text(recommended_movie_names[4])
|
59 |
+
st.image(recommended_movie_posters[4])
|
60 |
+
|
61 |
+
st.title(" ")
|
62 |
+
# st.write("The code for this recommender system is available [here](https://share.streamlit.io/mesmith027/streamlit_webapps/main/MC_pi/streamlit_app.py)")
|
movie_dict.pkl
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:b1535685f9d4110518d42756d790bd151969e3b5f9c289d78192ed67da672b85
|
3 |
+
size 2261950
|
movie_recommender_system.py
ADDED
@@ -0,0 +1,195 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# -*- coding: utf-8 -*-
|
2 |
+
"""Movie_Recommender_System.ipynb
|
3 |
+
|
4 |
+
import os
|
5 |
+
import numpy as np
|
6 |
+
import pandas as pd
|
7 |
+
import matplotlib.pyplot as plt
|
8 |
+
import seaborn as sns
|
9 |
+
|
10 |
+
"""## Loading Data"""
|
11 |
+
|
12 |
+
movies = pd.read_csv('tmdb_5000_movies.csv')
|
13 |
+
credits = pd.read_csv('tmdb_5000_credits.csv')
|
14 |
+
|
15 |
+
movies.head(5)
|
16 |
+
|
17 |
+
movies.info()
|
18 |
+
|
19 |
+
credits.info()
|
20 |
+
|
21 |
+
credits.head(5)
|
22 |
+
|
23 |
+
"""## Merging both dataframes : Movies & Credits"""
|
24 |
+
|
25 |
+
movies = movies.merge(credits,on='title')
|
26 |
+
|
27 |
+
movies.shape
|
28 |
+
|
29 |
+
movies.head(1)
|
30 |
+
|
31 |
+
"""## Data Pre-Processing"""
|
32 |
+
|
33 |
+
#> important columns to be used in recommendation system :
|
34 |
+
|
35 |
+
# genres
|
36 |
+
# id
|
37 |
+
# keywords
|
38 |
+
# title
|
39 |
+
# overview
|
40 |
+
# cast
|
41 |
+
# crew
|
42 |
+
|
43 |
+
movies = movies[['movie_id','title','overview','genres','cast','keywords','crew']]
|
44 |
+
|
45 |
+
movies.head(5)
|
46 |
+
|
47 |
+
movies.isnull().sum()
|
48 |
+
|
49 |
+
movies.dropna(inplace=True)
|
50 |
+
|
51 |
+
movies.isnull().sum()
|
52 |
+
|
53 |
+
movies.duplicated().sum()
|
54 |
+
|
55 |
+
movies.iloc[0].genres
|
56 |
+
|
57 |
+
import ast
|
58 |
+
|
59 |
+
# extracting genres from raw data for the creation of tags
|
60 |
+
|
61 |
+
def convert(obj):
|
62 |
+
L = []
|
63 |
+
for i in ast.literal_eval(obj):
|
64 |
+
L.append(i['name'])
|
65 |
+
return L
|
66 |
+
|
67 |
+
movies['genres'] = movies['genres'].apply(convert)
|
68 |
+
|
69 |
+
movies['keywords'] = movies['keywords'].apply(convert)
|
70 |
+
|
71 |
+
movies.head(5)
|
72 |
+
|
73 |
+
#function for extracting top 3 actors from the movie
|
74 |
+
|
75 |
+
def convert3(obj):
|
76 |
+
L = []
|
77 |
+
counter = 0
|
78 |
+
for i in ast.literal_eval(obj):
|
79 |
+
if counter !=3:
|
80 |
+
L.append(i['name'])
|
81 |
+
counter+=1
|
82 |
+
else:
|
83 |
+
break
|
84 |
+
return L
|
85 |
+
|
86 |
+
movies['cast'] = movies['cast'].apply(convert3)
|
87 |
+
|
88 |
+
movies.head(5)
|
89 |
+
|
90 |
+
#function to fetch the director of movie from crew column
|
91 |
+
def fetch_director(obj):
|
92 |
+
L = []
|
93 |
+
for i in ast.literal_eval(obj):
|
94 |
+
if i['job'] == 'Director':
|
95 |
+
L.append(i['name'])
|
96 |
+
break
|
97 |
+
return L
|
98 |
+
|
99 |
+
movies['crew'] = movies['crew'].apply(fetch_director)
|
100 |
+
|
101 |
+
movies.head(5)
|
102 |
+
|
103 |
+
movies['overview'] = movies['overview'].apply(lambda x:x.split())
|
104 |
+
|
105 |
+
movies.head()
|
106 |
+
|
107 |
+
# applying a transformation to remove spaces between words
|
108 |
+
|
109 |
+
movies['genres'] = movies['genres'].apply(lambda x:[i.replace(" ","") for i in x])
|
110 |
+
movies['keywords'] = movies['keywords'].apply(lambda x:[i.replace(" ","") for i in x])
|
111 |
+
movies['cast'] = movies['cast'].apply(lambda x:[i.replace(" ","") for i in x])
|
112 |
+
movies['crew'] = movies['crew'].apply(lambda x:[i.replace(" ","") for i in x])
|
113 |
+
|
114 |
+
movies.head()
|
115 |
+
|
116 |
+
movies['tags'] = movies['overview'] + movies['genres'] + movies['keywords'] + movies['cast'] + movies['crew']
|
117 |
+
|
118 |
+
movies.head()
|
119 |
+
|
120 |
+
new_df = movies[['movie_id','title','tags']]
|
121 |
+
|
122 |
+
new_df['tags'] = new_df['tags'].apply(lambda x:" ".join(x))
|
123 |
+
|
124 |
+
new_df.head()
|
125 |
+
|
126 |
+
new_df['tags'][0]
|
127 |
+
|
128 |
+
new_df['tags'] = new_df['tags'].apply(lambda x:x.lower())
|
129 |
+
|
130 |
+
new_df['tags'][0]
|
131 |
+
|
132 |
+
"""## Text Vectorization"""
|
133 |
+
|
134 |
+
from sklearn.feature_extraction.text import CountVectorizer
|
135 |
+
|
136 |
+
cv = CountVectorizer(max_features=5000,stop_words='english')
|
137 |
+
|
138 |
+
vectors = cv.fit_transform(new_df['tags']).toarray()
|
139 |
+
|
140 |
+
## Most frequent 5000 words
|
141 |
+
# cv.get_feature_names()
|
142 |
+
|
143 |
+
"""## Applying Stemming Process"""
|
144 |
+
|
145 |
+
import nltk #for stemming process
|
146 |
+
|
147 |
+
from nltk.stem.porter import PorterStemmer
|
148 |
+
ps = PorterStemmer()
|
149 |
+
|
150 |
+
#defining the stemming function
|
151 |
+
def stem(text):
|
152 |
+
y=[]
|
153 |
+
|
154 |
+
for i in text.split():
|
155 |
+
y.append(ps.stem(i))
|
156 |
+
|
157 |
+
return " ".join(y)
|
158 |
+
|
159 |
+
stem('In the 22nd century, a paraplegic Marine is dispatched to the moon Pandora on a unique mission, but becomes torn between following orders and protecting an alien civilization. Action Adventure Fantasy ScienceFiction cultureclash future spacewar spacecolony society spacetravel futuristic romance space alien tribe alienplanet cgi marine soldier battle loveaffair antiwar powerrelations mindandsoul 3d SamWorthington ZoeSaldana SigourneyWeaver JamesCameron')
|
160 |
+
|
161 |
+
new_df['tags'] = new_df['tags'].apply(stem)
|
162 |
+
|
163 |
+
"""## Similarity Measures"""
|
164 |
+
|
165 |
+
# For calculating similarity, the cosine distance between different vectors will be used.
|
166 |
+
|
167 |
+
from sklearn.metrics.pairwise import cosine_similarity
|
168 |
+
|
169 |
+
similarity = cosine_similarity(vectors)
|
170 |
+
|
171 |
+
"""## Making the recommendation function"""
|
172 |
+
|
173 |
+
def recommend(movie):
|
174 |
+
movie_index = new_df[new_df['title'] == movie].index[0]
|
175 |
+
distances = similarity[movie_index]
|
176 |
+
movies_list = sorted(list(enumerate(distances)),reverse=True, key=lambda x:x[1])[1:6]
|
177 |
+
|
178 |
+
for i in movies_list:
|
179 |
+
print(new_df.iloc[i[0]].title)
|
180 |
+
|
181 |
+
"""## Recommendation"""
|
182 |
+
|
183 |
+
recommend('Batman Begins') #enter movies only which are in the dataset, otherwise it would result in error
|
184 |
+
|
185 |
+
new_df.iloc[1216]
|
186 |
+
|
187 |
+
"""## Exporting the Model"""
|
188 |
+
|
189 |
+
import pickle
|
190 |
+
|
191 |
+
pickle.dump(new_df,open('movies.pkl','wb'))
|
192 |
+
|
193 |
+
pickle.dump(new_df.to_dict(),open('movie_dict.pkl','wb'))
|
194 |
+
|
195 |
+
pickle.dump(similarity,open('similarity.pkl','wb'))
|
movies.pkl
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:0a1399c7408299a483c9d831468ce07cf13408322d23eadc363e3d6d600da446
|
3 |
+
size 2281025
|
similarity.pkl
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:4053da393ab855feb54e69f352748b13a0ed34bcdece4512abd055c9fc1d4c52
|
3 |
+
size 184781248
|