OthmaneJ commited on
Commit
4800fde
β€’
1 Parent(s): dd89e75

adding quantization

Browse files
Files changed (1) hide show
  1. app.py +5 -1
app.py CHANGED
@@ -10,6 +10,10 @@ model_name = "OthmaneJ/distil-wav2vec2"
10
  processor = Wav2Vec2Processor.from_pretrained(model_name)
11
  model = Wav2Vec2ForCTC.from_pretrained(model_name)
12
 
 
 
 
 
13
  # define function to read in sound file
14
  # def map_to_array(file):
15
  # speech, sample_rate = sf.read(file)
@@ -29,7 +33,7 @@ def inference(audio):
29
 
30
  inputs = gr.inputs.Audio(label="Input Audio", type="file")
31
  outputs = gr.outputs.Textbox(label="Output Text")
32
- title = "Distilled wav2vec 2.0"
33
  description = "Gradio demo for Robust wav2vec 2.0. To use it, simply upload your audio, or click one of the examples to load them. Read more at the links below. Currently supports .wav and .flac files"
34
  article = "<p style='text-align: center'><a href='https://arxiv.org/abs/2104.01027' target='_blank'>Robust wav2vec 2.0: Analyzing Domain Shift in Self-Supervised Pre-Training</a> | <a href='https://github.com/pytorch/fairseq' target='_blank'>Github Repo</a></p>"
35
  examples=[['poem.wav']]
 
10
  processor = Wav2Vec2Processor.from_pretrained(model_name)
11
  model = Wav2Vec2ForCTC.from_pretrained(model_name)
12
 
13
+ # quantization
14
+ model.eval()
15
+ model_int8 = torch.quantization.quantize_dynamic(model, dtype=torch.qint8,inplace = True,)
16
+
17
  # define function to read in sound file
18
  # def map_to_array(file):
19
  # speech, sample_rate = sf.read(file)
 
33
 
34
  inputs = gr.inputs.Audio(label="Input Audio", type="file")
35
  outputs = gr.outputs.Textbox(label="Output Text")
36
+ title = "distilled wav2vec 2.0 (with quantization)"
37
  description = "Gradio demo for Robust wav2vec 2.0. To use it, simply upload your audio, or click one of the examples to load them. Read more at the links below. Currently supports .wav and .flac files"
38
  article = "<p style='text-align: center'><a href='https://arxiv.org/abs/2104.01027' target='_blank'>Robust wav2vec 2.0: Analyzing Domain Shift in Self-Supervised Pre-Training</a> | <a href='https://github.com/pytorch/fairseq' target='_blank'>Github Repo</a></p>"
39
  examples=[['poem.wav']]