brendenc commited on
Commit
a95a987
·
1 Parent(s): 7c0eb70

Upload app.py

Browse files
Files changed (1) hide show
  1. app.py +183 -0
app.py ADDED
@@ -0,0 +1,183 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+ import gradio as gr
3
+ import pandas as pd
4
+ from sklearn.metrics.pairwise import cosine_similarity
5
+ import plotly.express as px
6
+ import plotly.graph_objects as go
7
+ import umap
8
+
9
+ embedding_df = pd.read_csv('all-MiniLM-L12-v2_embeddings.csv')
10
+ embeddings = np.array(embedding_df.drop('id', axis=1))
11
+
12
+ feature_df = pd.read_csv('feature_df.csv', index_col=0)
13
+ feature_df= (feature_df - feature_df.mean() ) / feature_df.std() #standardize
14
+
15
+ info_df = pd.read_csv('song_info_df.csv')
16
+ info_df.sort_values(['artist_name','song_title'], inplace=True)
17
+
18
+ def feature_similarity(song_id):
19
+
20
+
21
+ std_drop = 4 #drop songs with strange values
22
+
23
+ song_vec = feature_df[feature_df.index.isin([song_id])].to_numpy()
24
+ songs_matrix = feature_df[~feature_df.index.isin([song_id])].copy()
25
+ songs_matrix = songs_matrix[(songs_matrix<std_drop).any(axis=1)]
26
+ song_ids = list(songs_matrix.index)
27
+ songs_matrix=songs_matrix.to_numpy()
28
+
29
+ num_dims=songs_matrix.shape[1]
30
+ distances = np.sqrt(np.square(songs_matrix-song_vec) @ np.ones(num_dims)) #compute euclidean distance
31
+ max_distance = np.nanmax(distances)
32
+ similarities = (max_distance - distances)/max_distance #low distance -> high similarity
33
+
34
+ return pd.DataFrame({'song_id': song_ids, 'feature_similarity': similarities})
35
+
36
+
37
+ def embedding_similarity(song_id):
38
+
39
+ song_index = embedding_df[embedding_df.id==song_id].index.values[0]
40
+ song_ids = embedding_df[embedding_df.id != song_id].id.to_list()
41
+ emb_matrix = np.delete(np.copy(embeddings), song_index, axis=0)
42
+ similarities = cosine_similarity(emb_matrix, np.expand_dims(np.copy(embeddings[song_index,:]), axis=0))
43
+
44
+ return pd.DataFrame({'song_id': song_ids, 'cosine_similarity': similarities[:,0]})
45
+
46
+ def decode(song_id):
47
+ temp_df = info_df[info_df.song_id == song_id]
48
+ artist = temp_df.artist_name.values[0]
49
+ song = temp_df.song_title.values[0]
50
+ youtube_url = f"""<a href=https://www.youtube.com/results?search_query=
51
+ {artist.replace(' ','+')}+{song}.replace(' ','+') target=_blank>{song}</a>"""
52
+ url = f'''<a href="https://www.youtube.com/results?search_query=
53
+ {artist.strip().replace(' ','+')}+{song.strip().replace(' ','+')}" target="_blank" style="color:blue; text-decoration: underline">
54
+ {song} </a> by {artist}'''
55
+ return url
56
+
57
+ def plot(artist, song):
58
+ plot_df['color'] = 'blue'
59
+ plot_df.loc[(plot_df.artist_name==artist) & (plot_df.song_title==song), 'color'] = 'red'
60
+ plot_df['size'] = 1.5
61
+ plot_df.loc[(plot_df.artist_name==artist) & (plot_df.song_title==song), 'size'] = 3
62
+ try:
63
+ fig2.data=[]
64
+ except:
65
+ pass
66
+
67
+ fig2 = px.scatter(plot_df[~((plot_df.artist_name==artist) & (plot_df.song_title==song))],
68
+ 'x',
69
+ 'y',
70
+ template='simple_white',
71
+ hover_data=['artist_name', 'song_title']).update_traces(marker_size=1.5, marker_opacity=0.7)
72
+ fig2.add_trace(go.Scatter(x=[plot_df.loc[(plot_df.artist_name==artist) & (plot_df.song_title==song), 'x'].values[0]],
73
+ y=[plot_df.loc[(plot_df.artist_name==artist) & (plot_df.song_title==song), 'y'].values[0]],
74
+ mode = 'markers',
75
+ marker_color='red',
76
+ hovertemplate="Your selected song<extra></extra>",
77
+ marker_size = 4))
78
+ fig2.update_xaxes(visible=False)
79
+ fig2.update_yaxes(visible=False)
80
+ fig2.update_layout(showlegend=False, height = 800, width =1600)
81
+ fig2.data = [fig2.data[1], fig2.data[0]]
82
+ return fig2
83
+
84
+ def recommend(artist, song_title, embedding_importance, topk=5):
85
+
86
+ feature_importance = 1 - embedding_importance
87
+ song_id = info_df[(info_df.artist_name == artist) & (info_df.song_title == song_title)]['song_id'].values[0]
88
+
89
+ feature_sim = feature_similarity(song_id)
90
+ embedding_sim = embedding_similarity(song_id)
91
+ result = embedding_sim.merge(feature_sim, how='left',on='song_id').dropna()
92
+
93
+ result['cosine_similarity'] = (result['cosine_similarity'] - result['cosine_similarity'].min())/ \
94
+ (result['cosine_similarity'].max() - result['cosine_similarity'].min())
95
+ result['feature_similarity'] = (result['feature_similarity'] - result['feature_similarity'].min())/ \
96
+ (result['feature_similarity'].max() - result['feature_similarity'].min())
97
+
98
+ result['score'] = embedding_importance*result.cosine_similarity + feature_importance*result.feature_similarity
99
+
100
+ exclude_phrases = [r'clean', 'interlude', 'acoustic', r'mix', 'intro', r'original', 'version',\
101
+ 'edited', 'extended']
102
+
103
+ result = result[~result.song_id.isin(info_df[info_df.song_title.str.lower().str.contains('|'.join(exclude_phrases))].song_id)]
104
+
105
+ body='<br>'.join([decode(x) for x in result.sort_values('score', ascending=False).head(topk).song_id.to_list()])
106
+ fig = plot(artist, song_title)
107
+ return f'<h3 style="text-align: center;">Recomendations</h3><p style="text-align: center;"><br>{body}</p>', fig
108
+
109
+
110
+ out = umap.UMAP(n_neighbors=30, min_dist=0.2).fit_transform(embedding_df.iloc[:,:-1])
111
+ plot_df = pd.DataFrame({'x':out[:,0],'y':out[:,1],'id':embedding_df.id, 'size':0.1})
112
+ plot_df['x'] = ((plot_df['x'] - plot_df['x'].mean())/plot_df['x'].std())
113
+ plot_df['y'] = ((plot_df['y'] - plot_df['y'].mean())/plot_df['y'].std())
114
+ plot_df = plot_df.merge(info_df, left_on='id', right_on='song_id')
115
+ plot_df = plot_df[(plot_df.x.abs()<4) & (plot_df.y.abs()<4)]
116
+
117
+ fig = px.scatter(plot_df,
118
+ 'x',
119
+ 'y',
120
+ template='simple_white',
121
+ hover_data=['artist_name', 'song_title']
122
+ ).update_traces(marker_size=1.5,
123
+ opacity=0.7,
124
+
125
+ )
126
+ fig.update_xaxes(visible=False)
127
+ fig.update_yaxes(visible=False).update_layout(height = 800,
128
+ width =1600,
129
+ title = {
130
+ 'text': "UMAP Projection of Lyric Embeddings",
131
+ 'y':0.9,
132
+ 'x':0.5,
133
+ 'xanchor': 'center',
134
+ 'yanchor': 'top'
135
+ })
136
+
137
+ app = gr.Blocks()
138
+
139
+
140
+ with app:
141
+ gr.Markdown("# Hip Hop gRadio - A Lyric Based Recommender")
142
+
143
+ gr.Markdown("""### About this space
144
+ The goal of this space is to provide recommendations for hip-hop/rap songs strictly by utilizing lyrics. The recommendations
145
+ are a combination of ranked similarity scores. We calculate euclidean distances between our engineered feature vectors for each song,
146
+ as well as a cosine distance between document embeddings of the lyrics themselves. A weighted average of these two results in our
147
+ final similarity score that we use for recommendation. (feature importance = (1 - embedding importance))
148
+
149
+ Additionally, we provide a 2-D projection of all document embeddings below. After entering a song of your choice, you will see it as
150
+ a red dot, allowing you to explore both near and far. This projection reduces 384-dimensional embeddings down to 2-d, allowing visualization.
151
+ This is done using Uniform Manifold Approximation and Projection [(UMAP)](https://umap-learn.readthedocs.io/en/latest/), a very interesting approach to dimensionalty
152
+ reduction, I encourage you to look into it if you are interested! ([paper](https://arxiv.org/abs/1802.03426))
153
+
154
+ The engineered features used are the following: song duration, number of lines, syllables per line, variance in syllables per line,
155
+ total unique tokens, lexical diversity (measure of repitition), sentiment (using nltk VADER), tokens per second, and syllables per second.
156
+
157
+ **Model used for embedding**: [all-MiniLM-L12-v2](https://huggingface.co/sentence-transformers/all-MiniLM-L12-v2)<br/>
158
+ **Lyrics**: from [genius](https://genius.com/)
159
+ """)
160
+ with gr.Row():
161
+ with gr.Column():
162
+ artist = gr.Dropdown(choices = list(info_df.artist_name.unique()),
163
+ value = 'Kanye West',
164
+ label='Artist')
165
+ song = gr.Dropdown(choices = list(info_df.loc[info_df.artist_name=='Kanye West','song_title']),
166
+ label = 'Song Title')
167
+ slider = gr.Slider(0,1,value=0.5, label='Embedding Importance')
168
+ but = gr.Button()
169
+ with gr.Column():
170
+ t = gr.Markdown()
171
+
172
+ with gr.Row():
173
+ p = gr.Plot(fig)
174
+
175
+ def artist_songs(artist):
176
+ return gr.components.Dropdown.update(choices=info_df[info_df.artist_name == artist]['song_title'].to_list())
177
+
178
+
179
+
180
+ artist.change(artist_songs, artist, outputs=song)
181
+ but.click(recommend, inputs=[artist, song,slider], outputs=[t, p])
182
+
183
+ app.launch()