File size: 1,367 Bytes
35545b0
 
 
 
 
 
 
 
 
 
c3d029a
 
8dda170
 
 
35545b0
 
c3d029a
f9b0e0d
8dda170
 
 
 
 
 
 
35545b0
 
 
 
 
d3aaef0
 
 
 
 
 
c3d029a
d3aaef0
35545b0
 
 
 
8dda170
 
35545b0
 
 
 
 
 
 
 
8dda170
d3aaef0
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
from transformers import pipeline
from transformers import AutoModelForAudioClassification
import gradio as gr
import librosa
import torch
import numpy as np




mean, std = -8.278621631819787e-05, 0.08485510250851999
id2label = {0: 'arousal', 1: 'dominance', 2: 'valence'}



def classify_audio(audio_file):
    model = AutoModelForAudioClassification.from_pretrained("3loi/SER-Odyssey-Baseline-WavLM-Multi-Attributes", trust_remote_code=True)
    
    
    sr, raw_wav = audio_file
    
    y = raw_wav.astype(np.float32)
    y /= np.max(np.abs(y))

    
    norm_wav = (y - mean) / (std+0.000001)

    mask = torch.ones(1, len(norm_wav))
    wavs = torch.tensor(norm_wav).unsqueeze(0)
    
    pred = model(wavs, mask).detach().numpy()

    output = ''
#     for i, audio_pred in enumerate(pred):
#         output[i] = {}
    for att_i, att_val in enumerate(pred[0]):
        output += "{}: \t{:0.4f}\n".format(id2label[att_i], att_val)
    
    return output


def main():
    
    iface = gr.Interface(fn=classify_audio, inputs=gr.Audio(sources=["upload", "microphone"], label="Audio file"), 
                         outputs=gr.Text(), title="Speech Emotion Recognition App",
                         description="Upload an audio file and hit the 'Submit'\
                             button")
    
    iface.launch()


if __name__ == '__main__':
    main()