patrickvonplaten commited on
Commit
f1f1647
1 Parent(s): 67fb590

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +3 -3
README.md CHANGED
@@ -36,13 +36,13 @@ The original model can be found under https://github.com/pytorch/fairseq/tree/ma
36
  To transcribe audio files the model can be used as a standalone acoustic model as follows:
37
 
38
  ```python
39
- from transformers import Wav2Vec2Tokenizer, Wav2Vec2ForCTC
40
  from datasets import load_dataset
41
  import soundfile as sf
42
  import torch
43
 
44
  # load model and tokenizer
45
- tokenizer = Wav2Vec2Tokenizer.from_pretrained("facebook/wav2vec2-base-960h")
46
  model = Wav2Vec2ForCTC.from_pretrained("facebook/wav2vec2-base-960h")
47
 
48
  # define function to read in sound file
@@ -81,7 +81,7 @@ from jiwer import wer
81
  librispeech_eval = load_dataset("librispeech_asr", "clean", split="test")
82
 
83
  model = Wav2Vec2ForCTC.from_pretrained("facebook/wav2vec2-base-960h").to("cuda")
84
- tokenizer = Wav2Vec2Tokenizer.from_pretrained("facebook/wav2vec2-base-960h")
85
 
86
  def map_to_array(batch):
87
  speech, _ = sf.read(batch["file"])
36
  To transcribe audio files the model can be used as a standalone acoustic model as follows:
37
 
38
  ```python
39
+ from transformers import Wav2Vec2CTCTokenizer, Wav2Vec2ForCTC
40
  from datasets import load_dataset
41
  import soundfile as sf
42
  import torch
43
 
44
  # load model and tokenizer
45
+ tokenizer = Wav2Vec2CTCTokenizer.from_pretrained("facebook/wav2vec2-base-960h")
46
  model = Wav2Vec2ForCTC.from_pretrained("facebook/wav2vec2-base-960h")
47
 
48
  # define function to read in sound file
81
  librispeech_eval = load_dataset("librispeech_asr", "clean", split="test")
82
 
83
  model = Wav2Vec2ForCTC.from_pretrained("facebook/wav2vec2-base-960h").to("cuda")
84
+ tokenizer = Wav2Vec2CTCTokenizer.from_pretrained("facebook/wav2vec2-base-960h")
85
 
86
  def map_to_array(batch):
87
  speech, _ = sf.read(batch["file"])