seyia92coding commited on
Commit
524a705
1 Parent(s): 984b6e1

Upload app.py

Browse files
Files changed (1) hide show
  1. app.py +158 -0
app.py ADDED
@@ -0,0 +1,158 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+ """HS_Text-based_Recom_Metacritic.ipynb
3
+
4
+ Automatically generated by Colaboratory.
5
+
6
+ Original file is located at
7
+ https://colab.research.google.com/drive/1MmWRwRJT04GVAO2SKCpwSqQ2bWghVGtQ
8
+ """
9
+
10
+ import pandas as pd
11
+ import numpy as np
12
+ !pip install fuzzywuzzy
13
+ from fuzzywuzzy import fuzz
14
+ from sklearn.feature_extraction.text import TfidfVectorizer
15
+ from sklearn.metrics.pairwise import cosine_similarity
16
+
17
+ df = pd.read_csv("/content/Metacritic_Reviews_Only.csv", error_bad_lines=False, encoding='utf-8')
18
+
19
+ #Remove title from review
20
+ def remove_title(row):
21
+ game_title = row['Game Title']
22
+ body_text = row['Reviews']
23
+ new_doc = body_text.replace(game_title, "")
24
+ return new_doc
25
+
26
+ df['Reviews'] = df.apply(remove_title, axis=1)
27
+ #drop redundant column
28
+ df = df.drop(['Unnamed: 0'], axis=1)
29
+
30
+ df.dropna(inplace=True) #Drop Null Reviews
31
+
32
+ # Instantiate the vectorizer object to the vectorizer variable
33
+ #Minimum word count 2 to be included, words that appear in over 70% of docs should not be included
34
+ vectorizer = TfidfVectorizer(min_df=2, max_df=0.7)
35
+
36
+ # Fit and transform the plot column
37
+ vectorized_data = vectorizer.fit_transform(df['Reviews'])
38
+
39
+ # Create Dataframe from TF-IDFarray
40
+ tfidf_df = pd.DataFrame(vectorized_data.toarray(), columns=vectorizer.get_feature_names())
41
+
42
+ # Assign the game titles to the index
43
+ tfidf_df.index = df['Game Title']
44
+
45
+ # Find the cosine similarity measures between all game and assign the results to cosine_similarity_array.
46
+ cosine_similarity_array = cosine_similarity(tfidf_df)
47
+
48
+ # Create a DataFrame from the cosine_similarity_array with tfidf_df.index as its rows and columns.
49
+ cosine_similarity_df = pd.DataFrame(cosine_similarity_array, index=tfidf_df.index, columns=tfidf_df.index)
50
+
51
+ # Print the top 5 rows of the DataFrame
52
+ # cosine_similarity_df.head()
53
+
54
+ # # Find the values for the game Batman: Arkham City
55
+ # cosine_similarity_series = cosine_similarity_df.loc['Batman: Arkham City']
56
+
57
+ # # Sort these values highest to lowest
58
+ # ordered_similarities = cosine_similarity_series.sort_values(ascending=False)
59
+
60
+ # # Print the results
61
+ # print(ordered_similarities)
62
+
63
+ # create a function to find the closest title
64
+ def matching_score(a,b):
65
+ #fuzz.ratio(a,b) calculates the Levenshtein Distance between a and b, and returns the score for the distance
66
+ return fuzz.ratio(a,b)
67
+ # exactly the same, the score becomes 100
68
+
69
+ #Convert index to title_year
70
+ def get_title_from_index(index):
71
+ return df[df.index == index]['Game Title'].values[0]
72
+
73
+ # A function to return the most similar title to the words a user type
74
+ # Without this, the recommender only works when a user enters the exact title which the data has.
75
+ def find_closest_title(title):
76
+ #matching_score(a,b) > a is the current row, b is the title we're trying to match
77
+ leven_scores = list(enumerate(df['Game Title'].apply(matching_score, b=title))) #[(0, 30), (1,95), (2, 19)~~] A tuple of distances per index
78
+ sorted_leven_scores = sorted(leven_scores, key=lambda x: x[1], reverse=True) #Sorts list of tuples by distance [(1, 95), (3, 49), (0, 30)~~]
79
+ closest_title = get_title_from_index(sorted_leven_scores[0][0])
80
+ distance_score = sorted_leven_scores[0][1]
81
+ return closest_title, distance_score
82
+ # Bejeweled Twist, 100
83
+
84
+ find_closest_title('Batman Arkham Knight')
85
+
86
+ """# Build Recommender Function
87
+
88
+ Our recommender function will take in two inputs. The game title and the keyword exclusion. The keyword exclusion was added when I realised that the recommendations were returning a lot of DLCs and sequels which isn't a very useful recommender.
89
+
90
+
91
+ By combining everything we've done from building the user profile onwards we will pull out the Top 5 games we want to recommend.
92
+
93
+
94
+ 1. Text Match the closest title in the dataset
95
+ 2. Assign number for the final ranking
96
+ 3. Create your user profile based on previous games
97
+ 4. Create TFIDF subset without previously mentioned titles
98
+ 5. Calculate cosine similarity based on selected titles and convert back into DataFrame
99
+ 6. Sort DataFrame by similarity
100
+ 7. Return most similarity game titles that don't contain keyword
101
+ """
102
+
103
+ def recommend_games(game1, game2, game3, keyword1, keyword2, keyword3, max_results):
104
+ #Insert closest title here
105
+ title1, distance_score1 = find_closest_title(game1)
106
+ title2, distance_score2 = find_closest_title(game2)
107
+ title3, distance_score3 = find_closest_title(game3)
108
+ #Counter for Ranking
109
+ number = 1
110
+ print('Recommended because you played {}, {} and {}:\n'.format(title1, title2, title3))
111
+
112
+ list_of_games_enjoyed = [title1, title2, title3]
113
+ games_enjoyed_df = tfidf_df.reindex(list_of_games_enjoyed)
114
+ user_prof = games_enjoyed_df.mean()
115
+
116
+ tfidf_subset_df = tfidf_df.drop([title1, title2, title3], axis=0)
117
+ similarity_array = cosine_similarity(user_prof.values.reshape(1, -1), tfidf_subset_df)
118
+ similarity_df = pd.DataFrame(similarity_array.T, index=tfidf_subset_df.index, columns=["similarity_score"])
119
+
120
+ # Sort the values from high to low by the values in the similarity_score
121
+ sorted_similarity_df = similarity_df.sort_values(by="similarity_score", ascending=False)
122
+
123
+ # Inspect the most similar to the user preferences
124
+ print("Without Keywords Exclusions:")
125
+ print(sorted_similarity_df.head())
126
+ print("\n")
127
+ print("With Keywords Exclusions:\n ")
128
+
129
+ number = 0
130
+ rank = 1
131
+
132
+ for n in sorted_similarity_df.index:
133
+ if rank <= max_results:
134
+ if keyword1.lower() not in n.lower() and keyword2.lower() not in n.lower() and keyword3.lower() not in n.lower():
135
+ print("#" + str(rank) + ": " + n + ", " + str(round(sorted_similarity_df.iloc[number]['similarity_score']*100,2)) + "% " + "match")
136
+ number+=1
137
+ rank +=1
138
+ else:
139
+ continue
140
+
141
+
142
+ recommend_games('Mortal Kombat', 'Street Fighter', 'Overwatch', 'Kombat', 'Fighter', 'Overwatch', 5)
143
+
144
+ !pip install gradio
145
+
146
+ import gradio as gr
147
+
148
+ recommender_interface = gr.Interface(fn=recommend_games,
149
+ inputs=["text","text","text","text","text","text", gr.inputs.Slider(1, 20, step=1)],
150
+ title="Text-based Recommendation Engine for Video Games",
151
+ description="""This is a Recommendation Engine based on the review texts of Metacritic critics for games between 2011-2019.
152
+ You need to enter 3 games you've enjoyed playing followed by 3 keywords from those game titles so that I can avoid recommending the same games to you.""",
153
+ examples= [['Mortal Kombat', 'Street Fighter', 'Overwatch', 'Kombat', 'Fighter', 'Overwatch', 5],
154
+ ["Batman Arkham Knight","Dying Light","Left 4 Dead","Batman","Dying","Left", 10],
155
+ ["Mario Kart","Zelda","Final Fantasy","Mario","Zelda","Final", 7]],
156
+ outputs=["dataframe"])
157
+
158
+ recommender_interface.launch(debug=True)