helliun commited on
Commit
8c171ec
1 Parent(s): 0a9e596

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +5 -67
app.py CHANGED
@@ -106,44 +106,9 @@ def get_songs_from_spotify(playlist_link, songs_df, previews_df):
106
  previews_df = pd.concat([previews_df,new_df])
107
  new_df = new_df[["elo_score", "artist", "song_title"]]
108
  songs_df = pd.concat([songs_df,new_df])
109
- return songs_df, previews_df
110
-
111
- def get_songs_from_spotify(playlist_link, songs_df, previews_df):
112
- if match := re.match(r"https://open.spotify.com/playlist/(.*)\?", playlist_link):
113
- playlist_uri = match.groups()[0]
114
- else:
115
- raise ValueError("Expected format: https://open.spotify.com/playlist/...")
116
-
117
- # Get playlist track information
118
- tracks = sp.playlist_tracks(playlist_uri)["items"]
119
-
120
- # For efficiency, collect new rows in a list before converting it to a DataFrame
121
- new_rows = []
122
- for track in tracks:
123
- track_info = {
124
- 'artist': track['track']['artists'][0]['name'],
125
- 'song_title': track['track']['name'],
126
- 'preview_url': track['track']['preview_url'],
127
- 'elo_score': 1000
128
- }
129
- new_rows.append(track_info)
130
-
131
- new_df = pd.DataFrame(new_rows)
132
-
133
- # Merge original DataFrames to ensure we don't have duplicates across them
134
- consolidated_df = pd.concat([songs_df[['artist', 'song_title']], previews_df[['artist', 'song_title']]]).drop_duplicates()
135
-
136
- # Identify new, unique songs not in the original datasets
137
- unique_songs = pd.merge(new_df, consolidated_df, on=['artist', 'song_title'], how='left', indicator=True).query('_merge=="left_only"').drop('_merge', axis=1)
138
-
139
- # Update previews_df if there is a 'preview_url' column in the new unique songs DataFrame
140
- if 'preview_url' in unique_songs:
141
- previews_df = pd.concat([previews_df, unique_songs])
142
-
143
- # Drop the 'preview_url' for songs_df update
144
- unique_songs_simplified = unique_songs.drop(['preview_url'], axis=1, errors='ignore')
145
- songs_df = pd.concat([songs_df, unique_songs_simplified])
146
 
 
 
147
  return songs_df, previews_df
148
 
149
  def update_scores(winner, loser, k_factor=100):
@@ -297,38 +262,11 @@ def import_csv(file, songs_df, previews_df):
297
  previews_df = pd.concat([previews_df,new_df])
298
  new_df = new_df[["elo_score", "artist", "song_title"]]
299
  songs_df = pd.concat([songs_df,new_df])
300
- return songs_df, previews_df
301
 
302
- def import_csv(file, songs_df, previews_df):
303
- if file is not None:
304
- new_df = pd.read_csv(file)
305
-
306
- # Ensure the input DataFrames have the correct structure, creating them if they don't exist
307
- if 'elo_score' not in songs_df.columns or 'artist' not in songs_df.columns or 'song_title' not in songs_df.columns:
308
- songs_df = pd.DataFrame(columns=['elo_score', 'artist', 'song_title'])
309
- if 'preview_url' not in previews_df.columns:
310
- previews_df = pd.DataFrame(columns=["elo_score", "artist", "song_title", "preview_url"])
311
-
312
- # Ensure the new DataFrame has the correct columns
313
- if all(column in new_df.columns for column in ["elo_score", "artist", "song_title", "preview_url"]):
314
- new_df = new_df[["elo_score", "artist", "song_title", "preview_url"]]
315
- else:
316
- raise ValueError("New CSV file does not contain required columns.")
317
-
318
- # Merge original DataFrames to ensure we don't have duplicates across them
319
- consolidated_df = pd.concat([songs_df[['artist', 'song_title']], previews_df[['artist', 'song_title']]]).drop_duplicates()
320
-
321
- # Identify new, unique songs not in the original datasets
322
- unique_songs = pd.merge(new_df, consolidated_df, on=['artist', 'song_title'], how='left', indicator=True).query('_merge=="left_only"').drop('_merge', axis=1)
323
-
324
- # Update previews_df with new unique songs including their preview URLs
325
- previews_df = pd.concat([previews_df, unique_songs])
326
-
327
- # Separate the unique songs data without the 'preview_url' for songs_df
328
- unique_songs_simplified = unique_songs.drop(['preview_url'], axis=1, errors='ignore')
329
- songs_df = pd.concat([songs_df, unique_songs_simplified])
330
-
331
  return songs_df, previews_df
 
332
 
333
  # Function to remove a song
334
  def remove_song(artist, song_title, songs_df):
 
106
  previews_df = pd.concat([previews_df,new_df])
107
  new_df = new_df[["elo_score", "artist", "song_title"]]
108
  songs_df = pd.concat([songs_df,new_df])
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
109
 
110
+ songs_df = songs_df.drop_duplicates(subset=['artist', 'song_title'])
111
+ previews_df = previews_df.drop_duplicates(subset=['artist', 'song_title'])
112
  return songs_df, previews_df
113
 
114
  def update_scores(winner, loser, k_factor=100):
 
262
  previews_df = pd.concat([previews_df,new_df])
263
  new_df = new_df[["elo_score", "artist", "song_title"]]
264
  songs_df = pd.concat([songs_df,new_df])
 
265
 
266
+ songs_df = songs_df.drop_duplicates(subset=['artist', 'song_title'])
267
+ previews_df = previews_df.drop_duplicates(subset=['artist', 'song_title'])
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
268
  return songs_df, previews_df
269
+
270
 
271
  # Function to remove a song
272
  def remove_song(artist, song_title, songs_df):