patrickvonplaten commited on
Commit
bce13e0
1 Parent(s): 1672565

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +4 -4
README.md CHANGED
@@ -31,14 +31,14 @@ The original model can be found under https://github.com/pytorch/fairseq/tree/ma
31
  To transcribe audio files the model can be used as a standalone acoustic model as follows:
32
 
33
  ```python
34
- from transformers import Wav2Vec2Tokenizer, Wav2Vec2ForMaskedLM
35
  from datasets import load_dataset
36
  import soundfile as sf
37
  import torch
38
 
39
  # load model and tokenizer
40
  tokenizer = Wav2Vec2Tokenizer.from_pretrained("facebook/wav2vec2-large-960h-lv60")
41
- model = Wav2Vec2ForMaskedLM.from_pretrained("facebook/wav2vec2-large-960h-lv60")
42
 
43
  # define function to read in sound file
44
  def map_to_array(batch):
@@ -67,7 +67,7 @@ This code snippet shows how to evaluate **facebook/wav2vec2-large-960h-lv60** on
67
 
68
  ```python
69
  from datasets import load_dataset
70
- from transformers import Wav2Vec2ForMaskedLM, Wav2Vec2Tokenizer
71
  import soundfile as sf
72
  import torch
73
  from jiwer import wer
@@ -75,7 +75,7 @@ from jiwer import wer
75
 
76
  librispeech_eval = load_dataset("librispeech_asr", "clean", split="test")
77
 
78
- model = Wav2Vec2ForMaskedLM.from_pretrained("facebook/wav2vec2-large-960h-lv60").to("cuda")
79
  tokenizer = Wav2Vec2Tokenizer.from_pretrained("facebook/wav2vec2-base-960h-lv60")
80
 
81
  def map_to_array(batch):
 
31
  To transcribe audio files the model can be used as a standalone acoustic model as follows:
32
 
33
  ```python
34
+ from transformers import Wav2Vec2Tokenizer, Wav2Vec2ForCTC
35
  from datasets import load_dataset
36
  import soundfile as sf
37
  import torch
38
 
39
  # load model and tokenizer
40
  tokenizer = Wav2Vec2Tokenizer.from_pretrained("facebook/wav2vec2-large-960h-lv60")
41
+ model = Wav2Vec2ForCTC.from_pretrained("facebook/wav2vec2-large-960h-lv60")
42
 
43
  # define function to read in sound file
44
  def map_to_array(batch):
 
67
 
68
  ```python
69
  from datasets import load_dataset
70
+ from transformers import Wav2Vec2ForCTC, Wav2Vec2Tokenizer
71
  import soundfile as sf
72
  import torch
73
  from jiwer import wer
 
75
 
76
  librispeech_eval = load_dataset("librispeech_asr", "clean", split="test")
77
 
78
+ model = Wav2Vec2ForCTC.from_pretrained("facebook/wav2vec2-large-960h-lv60").to("cuda")
79
  tokenizer = Wav2Vec2Tokenizer.from_pretrained("facebook/wav2vec2-base-960h-lv60")
80
 
81
  def map_to_array(batch):