PunGrumpy
Merge branch 'main' of hf.co:spaces/PunGrumpy/music-genre-classification
635e1b4
import os
import torch
# import spacy
import spotipy
import numpy as np
import gradio as gr
import torch.nn as nn
import torch.nn.functional as F
from sklearn.preprocessing import StandardScaler
from transformers import AutoModel, AutoTokenizer
from spotipy.oauth2 import SpotifyClientCredentials
# from dotenv import load_dotenv
# load_dotenv()
class ConfigApp:
REPO_NAME = "PunGrumpy/music-genre-classification"
GENRE_MAPPING = {"pop": 0, "rap": 1, "rock": 2, "r&b": 3, "edm": 4}
AUDIO_FEATURES = {
"acousticness": 0,
"danceability": 0,
"energy": 0,
"instrumentalness": 0,
"key": 0,
"liveness": 0,
"loudness": 0,
"mode": 0,
"speechiness": 0,
"tempo": 0,
"valence": 0,
}
SPOTIFY_CLIENT_ID = os.getenv("SPOTIFY_CLIENT_ID")
SPOTIFY_ACCESS_TOKEN = os.getenv("SPOTIFY_ACCESS_TOKEN")
class LyricsAudioModelInference:
def __init__(self, model_name, num_labels=5):
self.model = AutoModel.from_pretrained(model_name)
self.tokenizer = AutoTokenizer.from_pretrained(model_name)
self.num_labels = num_labels
self.classifier = nn.Linear(
self.model.config.hidden_size + len(ConfigApp.AUDIO_FEATURES), num_labels
)
self.scaler = StandardScaler()
# self.nlp = spacy.load("en_core_web_sm")
self.sp = spotipy.Spotify(
client_credentials_manager=SpotifyClientCredentials(
client_id=ConfigApp.SPOTIFY_CLIENT_ID,
client_secret=ConfigApp.SPOTIFY_ACCESS_TOKEN,
)
)
def get_audio_features(self, spotify_track_link: str) -> list:
track_id = spotify_track_link.split("/")[-1].split("?")[0]
audio_features = self.sp.audio_features(track_id)
audio_features = [
audio_features[0][feature] for feature in ConfigApp.AUDIO_FEATURES
]
return audio_features
def get_track_info(self, spotify_track_link: str) -> dict:
track_id = spotify_track_link.split("/")[-1].split("?")[0]
track_info = self.sp.track(track_id)
song_name = track_info.get("name", "Unknown")
artist_name = ", ".join(
[artist["name"] for artist in track_info.get("artists", [])]
)
return {"Song Name": song_name, "Artist Name": artist_name}
def predict_genre(self, lyrics: str, spotify_track_link: str) -> dict:
with torch.no_grad():
self.model.eval()
# lyrics = self._preprocess_lyrics(lyrics)
input_lyrics = self.tokenizer(
lyrics,
None,
return_tensors="pt",
padding=True,
truncation=True,
max_length=512,
)
audio_features = self.get_audio_features(spotify_track_link)
audio_features = self.scaler.fit_transform(
np.array(audio_features).reshape(1, -1)
)
outputs = self.model(**input_lyrics)
lyrics_embedding = outputs.last_hidden_state.mean(dim=1)
if audio_features is not None:
audio_features = list(audio_features)
input_features = torch.cat(
[lyrics_embedding, torch.tensor(audio_features).float()],
dim=1,
)
else:
input_features = lyrics_embedding
logits = self.classifier(input_features)
probs = F.softmax(logits, dim=1)
top1_genre = torch.argmax(probs, dim=1)
genre_label = [
key.capitalize()
for key, value in ConfigApp.GENRE.items()
if value == top1_genre
][0]
result = {genre_label: probs[0][top1_genre].item()}
return result
# def _preprocess_lyrics(self, text):
# doc = self.nlp(text)
# processed_text = " ".join(
# [
# token.lemma_.lower().strip()
# for token in doc
# if not token.is_stop and token.lemma_.isalpha() and not token.is_punct
# ]
# )
# return processed_text
with gr.Blocks() as demo:
iface = gr.Interface(
api_name="Music Genre Classifier",
fn=LyricsAudioModelInference(model_name=ConfigApp.REPO_NAME).predict_genre,
inputs=[
gr.Textbox(
lines=5,
placeholder="Enter lyrics here...",
label="Lyrics",
),
gr.Textbox(
lines=1,
placeholder="Enter Spotify Track Link here...",
label="Spotify Track Link",
),
],
outputs=[
gr.Label(
num_top_classes=1,
label="Predicted Genre",
elem_id="genre",
),
],
title="🎷 Music Genre Classifier",
description="This model predicts the genre of a song based on its lyrics and audio features.",
examples=[
[
"Standing in the rain, with his head hung low Couldn't get a ticket, it was a sold out show Heard the roar of the crowd, he could picture the scene Put his ear to the wall, then like a distant scream He heard one guitar, just blew him away He saw stars in his eyes, and the very next day Bought a beat up six string in a secondhand store",
"https://open.spotify.com/track/00qOE7OjRl0BpYiCiweZB2",
],
[
"Intro They say, or at least they say that they say That reality is only what your mind chooses to believe If that's true, believe this I don't rap",
"https://open.spotify.com/track/2j3JzMfQI2cGw4w2juWoD4",
],
[
"Mmmm, ay-oh Hey... Ratatat, yeah! Na-na-na-nah Na-na-na-nah Crush a bit, little bit, roll it up, take a hit Feelin' lit, feelin' right, 2 AM, summer night I don't care, hand on the wheel Driving drunk, I'm doing my thing Rolling in the Midwest side and out Living my life, getting out dreams People told me slow my roll, I'm screaming out 'Fuck that'",
"https://open.spotify.com/track/3Uqn6QvA1IzVjhY0ngcZ9B",
],
],
analytics_enabled=True,
cache_examples=False,
)
demo.launch(debug=True, show_api=True)