File size: 1,288 Bytes
e130e80
 
 
 
 
 
 
92f8ba8
e130e80
92f8ba8
 
 
e130e80
 
92f8ba8
e130e80
 
515a989
e130e80
 
 
 
92f8ba8
 
 
 
 
 
 
 
 
 
 
 
 
 
e130e80
515a989
e130e80
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
import gradio as gr
from transformers import pipeline

model_name = "juliensimon/wav2vec2-conformer-rel-pos-large-finetuned-speech-commands"

p = pipeline("audio-classification", model=model_name)


def process(file):
    pred = p(file)
    return {x["label"]: x["score"] for x in pred}


# Gradio inputs
mic = gr.inputs.Audio(source="microphone", type="filepath", label="Speech input")

# Gradio outputs
labels = gr.outputs.Label(num_top_classes=3)

description = "This Space showcases a wav2vec2-conformer-rel-pos-large model fine-tuned for audio classification on the speech_commands dataset. \n \n It can spot one of the following keywords: 'Yes', 'No', 'Up', 'Down', 'Left', 'Right', 'On', 'Off', 'Stop', 'Go', 'Zero', 'One', 'Two', 'Three', 'Four', 'Five', 'Six', 'Seven', 'Eight', 'Nine', 'Bed', 'Bird', 'Cat', 'Dog', 'Happy', 'House', 'Marvin', 'Sheila', 'Tree', 'Wow', 'Backward', 'Forward', 'Follow', 'Learn', 'Visual'."

iface = gr.Interface(
    theme="huggingface",
    description=description,
    fn=process,
    inputs=[mic],
    outputs=[labels],
    examples=[
        ["backward16k.wav"],
        ["happy16k.wav"],
        ["marvin16k.wav"],
        ["seven16k.wav"],
        ["stop16k.wav"],
        ["up16k.wav"],
    ],
    allow_flagging="never",
)

iface.launch()