File size: 1,524 Bytes
c6798a7
2fbf82e
 
 
519b33d
2fbf82e
d92f370
2fbf82e
 
 
b8d1683
 
2fbf82e
 
 
 
 
 
 
 
 
 
 
 
 
 
 
40d3368
 
 
 
 
 
8a43324
2fbf82e
 
40d3368
 
 
 
 
 
 
 
e4373c4
40d3368
2fbf82e
c6798a7
b3f2f5e
2fbf82e
40d3368
 
2fbf82e
b3f2f5e
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
import gradio as gr
import librosa
from tensorflow.keras.models import load_model
import numpy as np
import random
# load model
model = load_model("BBNN_model.hdf5")

# basic variables for mel spectrogram
target_sr = 22050
frame_size = 1024
frame_shift_len = 512
n_mels = 128

genre_classes = {
    0: "Blues",
    1: "Classical",
    2: "Country",
    3: "Disco",
    4: "Hiphop",
    5: "Jazz",
    6: "Metal",
    7: "Pop",
    8: "Reggae",
    9: "Rock"
}
def get_melspec_feature(X, target_sr, frame_size, frame_shift_len, n_mels):
    
    audio_melspec = librosa.feature.melspectrogram(y=X, sr=target_sr, n_fft=frame_size, hop_length=frame_shift_len)
    audio_melspec = librosa.power_to_db(audio_melspec)
    audio_melspec = audio_melspec.T
    print(audio_melspec.shape)
    return np.array(audio_melspec, dtype=np.float32)


def predict_genre(audio):
    # print(type(audio[1]))
    print(audio)
    start = random.randint(1, 15)
    x, sr = librosa.load(audio, offset=start, duration=10.0)
    # # print(audio)
    # print(type(x))
    melspec = get_melspec_feature(x, target_sr, frame_size, frame_shift_len, 128)
    melspec = np.reshape(melspec, (-1, 431, 128, 1))
    prediction = model.predict(melspec)[0]
    return {genre_classes[i]: float(prediction[i]) for i in range(10)}


iface = gr.Interface(
    predict_genre, 
    inputs=gr.inputs.Audio(type="filepath", label="Upload Music file"),
    outputs=gr.outputs.Label(num_top_classes=10),
    title="Music Genre Classifier",
    live=True
)
iface.launch()