brendenc commited on
Commit
b5feb1d
·
1 Parent(s): 1833452

Upload app.py

Browse files
Files changed (1) hide show
  1. app.py +191 -0
app.py ADDED
@@ -0,0 +1,191 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+ import gradio as gr
3
+ import pandas as pd
4
+ from sklearn.metrics.pairwise import cosine_similarity
5
+ import plotly.express as px
6
+ import plotly.graph_objects as go
7
+ import umap
8
+
9
+ embedding_df = pd.read_csv('all-MiniLM-L12-v2_embeddings.csv')
10
+ embeddings = np.array(embedding_df.drop('id', axis=1))
11
+
12
+ feature_df = pd.read_csv('feature_df.csv', index_col=0)
13
+ feature_df= (feature_df - feature_df.mean() ) / feature_df.std() #standardize
14
+
15
+ info_df = pd.read_csv('song_info_df.csv')
16
+ info_df.sort_values(['artist_name','song_title'], inplace=True)
17
+
18
+ def feature_similarity(song_id):
19
+
20
+
21
+ std_drop = 4 #drop songs with strange values
22
+
23
+ song_vec = feature_df[feature_df.index.isin([song_id])].to_numpy()
24
+ songs_matrix = feature_df[~feature_df.index.isin([song_id])].copy()
25
+ songs_matrix = songs_matrix[(songs_matrix<std_drop).any(axis=1)]
26
+ song_ids = list(songs_matrix.index)
27
+ songs_matrix=songs_matrix.to_numpy()
28
+
29
+ num_dims=songs_matrix.shape[1]
30
+ distances = np.sqrt(np.square(songs_matrix-song_vec) @ np.ones(num_dims)) #compute euclidean distance
31
+ max_distance = np.nanmax(distances)
32
+ similarities = (max_distance - distances)/max_distance #low distance -> high similarity
33
+
34
+ return pd.DataFrame({'song_id': song_ids, 'feature_similarity': similarities})
35
+
36
+
37
+ def embedding_similarity(song_id):
38
+
39
+ song_index = embedding_df[embedding_df.id==song_id].index.values[0]
40
+ song_ids = embedding_df[embedding_df.id != song_id].id.to_list()
41
+ emb_matrix = np.delete(np.copy(embeddings), song_index, axis=0)
42
+ similarities = cosine_similarity(emb_matrix, np.expand_dims(np.copy(embeddings[song_index,:]), axis=0))
43
+
44
+ return pd.DataFrame({'song_id': song_ids, 'cosine_similarity': similarities[:,0]})
45
+
46
+ def decode(song_id):
47
+ temp_df = info_df[info_df.song_id == song_id]
48
+ artist = temp_df.artist_name.values[0]
49
+ song = temp_df.song_title.values[0]
50
+ youtube_url = f"""<a href=https://www.youtube.com/results?search_query=
51
+ {artist.replace(' ','+')}+{song}.replace(' ','+') target=_blank>{song}</a>"""
52
+ url = f'''<a href="https://www.youtube.com/results?search_query=
53
+ {artist.strip().replace(' ','+')}+{song.strip().replace(' ','+')}" target="_blank" style="color:blue; text-decoration: underline">
54
+ {song} </a> by {artist}'''
55
+ return url
56
+
57
+ def plot(artist, song):
58
+ plot_df['color'] = 'blue'
59
+ plot_df.loc[(plot_df.artist_name==artist) & (plot_df.song_title==song), 'color'] = 'red'
60
+ plot_df['size'] = 1.5
61
+ plot_df.loc[(plot_df.artist_name==artist) & (plot_df.song_title==song), 'size'] = 3
62
+ try:
63
+ fig2.data=[]
64
+ except:
65
+ pass
66
+
67
+ fig2 = px.scatter(plot_df[~((plot_df.artist_name==artist) & (plot_df.song_title==song))],
68
+ 'x',
69
+ 'y',
70
+ template='simple_white',
71
+ hover_data=['artist_name', 'song_title']).update_traces(marker_size=1.5, marker_opacity=0.7)
72
+ fig2.add_trace(go.Scatter(x=[plot_df.loc[(plot_df.artist_name==artist) & (plot_df.song_title==song), 'x'].values[0]],
73
+ y=[plot_df.loc[(plot_df.artist_name==artist) & (plot_df.song_title==song), 'y'].values[0]],
74
+ mode = 'markers',
75
+ marker_color='red',
76
+ hovertemplate="Your selected song<extra></extra>",
77
+ marker_size = 4))
78
+ fig2.update_xaxes(visible=False)
79
+ fig2.update_yaxes(visible=False).update_layout(height = 800,
80
+ width =1500,
81
+ showlegend=False,
82
+ title = {
83
+ 'text': "UMAP Projection of Lyric Embeddings",
84
+ 'y':0.9,
85
+ 'x':0.5,
86
+ 'xanchor': 'center',
87
+ 'yanchor': 'top'
88
+ })
89
+ fig2.data = [fig2.data[1], fig2.data[0]]
90
+ return fig2
91
+
92
+ def recommend(artist, song_title, embedding_importance, topk=5):
93
+
94
+ feature_importance = 1 - embedding_importance
95
+ song_id = info_df[(info_df.artist_name == artist) & (info_df.song_title == song_title)]['song_id'].values[0]
96
+
97
+ feature_sim = feature_similarity(song_id)
98
+ embedding_sim = embedding_similarity(song_id)
99
+ result = embedding_sim.merge(feature_sim, how='left',on='song_id').dropna()
100
+
101
+ result['cosine_similarity'] = (result['cosine_similarity'] - result['cosine_similarity'].min())/ \
102
+ (result['cosine_similarity'].max() - result['cosine_similarity'].min())
103
+ result['feature_similarity'] = (result['feature_similarity'] - result['feature_similarity'].min())/ \
104
+ (result['feature_similarity'].max() - result['feature_similarity'].min())
105
+
106
+ result['score'] = embedding_importance*result.cosine_similarity + feature_importance*result.feature_similarity
107
+
108
+ exclude_phrases = [r'clean', 'interlude', 'acoustic', r'mix', 'intro', r'original', 'version',\
109
+ 'edited', 'extended']
110
+
111
+ result = result[~result.song_id.isin(info_df[info_df.song_title.str.lower().str.contains('|'.join(exclude_phrases))].song_id)]
112
+
113
+ body='<br>'.join([decode(x) for x in result.sort_values('score', ascending=False).head(topk).song_id.to_list()])
114
+ fig = plot(artist, song_title)
115
+ return f'<h3 style="text-align: center;">Recommendations</h3><p style="text-align: center;"><br>{body}</p>', fig
116
+
117
+
118
+ out = umap.UMAP(n_neighbors=30, min_dist=0.2).fit_transform(embedding_df.iloc[:,:-1])
119
+ plot_df = pd.DataFrame({'x':out[:,0],'y':out[:,1],'id':embedding_df.id, 'size':0.1})
120
+ plot_df['x'] = ((plot_df['x'] - plot_df['x'].mean())/plot_df['x'].std())
121
+ plot_df['y'] = ((plot_df['y'] - plot_df['y'].mean())/plot_df['y'].std())
122
+ plot_df = plot_df.merge(info_df, left_on='id', right_on='song_id')
123
+ plot_df = plot_df[(plot_df.x.abs()<4) & (plot_df.y.abs()<4)]
124
+
125
+ fig = px.scatter(plot_df,
126
+ 'x',
127
+ 'y',
128
+ template='simple_white',
129
+ hover_data=['artist_name', 'song_title']
130
+ ).update_traces(marker_size=1.5,
131
+ opacity=0.7,
132
+
133
+ )
134
+ fig.update_xaxes(visible=False)
135
+ fig.update_yaxes(visible=False).update_layout(height = 800,
136
+ width =1500,
137
+ title = {
138
+ 'text': "UMAP Projection of Lyric Embeddings",
139
+ 'y':0.9,
140
+ 'x':0.5,
141
+ 'xanchor': 'center',
142
+ 'yanchor': 'top'
143
+ })
144
+
145
+ app = gr.Blocks()
146
+
147
+
148
+ with app:
149
+ gr.Markdown("# Hip Hop gRadio - A Lyric Based Recommender")
150
+
151
+ gr.Markdown("""### About this space
152
+ The goal of this space is to provide recommendations for hip-hop/rap songs strictly by utilizing lyrics. The recommendations
153
+ are a combination of ranked similarity scores. We calculate euclidean distances between our engineered feature vectors for each song,
154
+ as well as a cosine distance between document embeddings of the lyrics themselves. A weighted average of these two results in our
155
+ final similarity score that we use for recommendation. (feature importance = (1 - embedding importance))
156
+
157
+ Additionally, we provide a 2-D projection of all document embeddings below. After entering a song of your choice, you will see it as
158
+ a red dot, allowing you to explore both near and far. This projection reduces 384-dimensional embeddings down to 2-d, allowing visualization.
159
+ This is done using Uniform Manifold Approximation and Projection [(UMAP)](https://umap-learn.readthedocs.io/en/latest/), a very interesting approach to dimensionalty
160
+ reduction, I encourage you to look into it if you are interested! ([paper](https://arxiv.org/abs/1802.03426))
161
+
162
+ The engineered features used are the following: song duration, number of lines, syllables per line, variance in syllables per line,
163
+ total unique tokens, lexical diversity (measure of repitition), sentiment (using nltk VADER), tokens per second, and syllables per second.
164
+
165
+ **Model used for embedding**: [all-MiniLM-L12-v2](https://huggingface.co/sentence-transformers/all-MiniLM-L12-v2)<br/>
166
+ **Lyrics**: from [genius](https://genius.com/)
167
+ """)
168
+ with gr.Row():
169
+ with gr.Column():
170
+ artist = gr.Dropdown(choices = list(info_df.artist_name.unique()),
171
+ value = 'Kanye West',
172
+ label='Artist')
173
+ song = gr.Dropdown(choices = list(info_df.loc[info_df.artist_name=='Kanye West','song_title']),
174
+ label = 'Song Title')
175
+ slider = gr.Slider(0,1,value=0.5, label='Embedding Importance')
176
+ but = gr.Button()
177
+ with gr.Column():
178
+ t = gr.Markdown('<h3 style="text-align: center;">Recomendations</h3>')
179
+
180
+ with gr.Row():
181
+ p = gr.Plot(fig)
182
+
183
+ def artist_songs(artist):
184
+ return gr.components.Dropdown.update(choices=info_df[info_df.artist_name == artist]['song_title'].to_list())
185
+
186
+
187
+
188
+ artist.change(artist_songs, artist, outputs=song)
189
+ but.click(recommend, inputs=[artist, song,slider], outputs=[t, p])
190
+
191
+ app.launch()