Spaces:
Runtime error
Runtime error
OthmaneJ
commited on
Commit
β’
c7a9e39
1
Parent(s):
4800fde
with doc (no quantization)
Browse files
app.py
CHANGED
@@ -11,8 +11,8 @@ processor = Wav2Vec2Processor.from_pretrained(model_name)
|
|
11 |
model = Wav2Vec2ForCTC.from_pretrained(model_name)
|
12 |
|
13 |
# quantization
|
14 |
-
model.eval()
|
15 |
-
model_int8 = torch.quantization.quantize_dynamic(model, dtype=torch.qint8,inplace = True,)
|
16 |
|
17 |
# define function to read in sound file
|
18 |
# def map_to_array(file):
|
@@ -33,8 +33,8 @@ def inference(audio):
|
|
33 |
|
34 |
inputs = gr.inputs.Audio(label="Input Audio", type="file")
|
35 |
outputs = gr.outputs.Textbox(label="Output Text")
|
36 |
-
title = "distilled wav2vec 2.0
|
37 |
-
description = "Gradio demo for
|
38 |
-
article = "<p style='text-align: center'><a href='https://
|
39 |
examples=[['poem.wav']]
|
40 |
gr.Interface(inference, inputs, outputs, title=title, description=description, article=article, examples=examples).launch()
|
|
|
11 |
model = Wav2Vec2ForCTC.from_pretrained(model_name)
|
12 |
|
13 |
# quantization
|
14 |
+
# model.eval()
|
15 |
+
# model_int8 = torch.quantization.quantize_dynamic(model, dtype=torch.qint8,inplace = True,)
|
16 |
|
17 |
# define function to read in sound file
|
18 |
# def map_to_array(file):
|
|
|
33 |
|
34 |
inputs = gr.inputs.Audio(label="Input Audio", type="file")
|
35 |
outputs = gr.outputs.Textbox(label="Output Text")
|
36 |
+
title = "distilled wav2vec 2.0"
|
37 |
+
description = "Gradio demo for a distilled wav2vec 2.0 (4x faster than large wav2vec 2.0, and 16x times smaller than base wav2vec 2.0 if combined with quantization). To use it, simply upload your audio, or click one of the examples to load them. Read more at the links below. Currently supports .wav and .flac files"
|
38 |
+
article = "<p style='text-align: center'><a href='https://github.com/OthmaneJ/distil-wav2vec2' target='_blank'> Github repo for demonstration </a> | <a href='https://huggingface.co/OthmaneJ/distil-wav2vec2' target='_blank'>Pretrained model</a></p>"
|
39 |
examples=[['poem.wav']]
|
40 |
gr.Interface(inference, inputs, outputs, title=title, description=description, article=article, examples=examples).launch()
|