Patrick von Platen commited on
Commit
8a84f07
1 Parent(s): dd244af
README.md ADDED
@@ -0,0 +1,74 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ language: en
3
+ datasets:
4
+ - librispeech_asr
5
+ tags:
6
+ - audio
7
+ - automatic-speech-recognition
8
+ - hf-asr-leaderboard
9
+ license: apache-2.0
10
+ widget:
11
+ - example_title: Librispeech sample 1
12
+ src: https://cdn-media.huggingface.co/speech_samples/sample1.flac
13
+ - example_title: Librispeech sample 2
14
+ src: https://cdn-media.huggingface.co/speech_samples/sample2.flac
15
+ model-index:
16
+ - name: patrickvonplaten/wav2vec2-large-960h-lv60-self-4-gram
17
+ results:
18
+ - task:
19
+ name: Automatic Speech Recognition
20
+ type: automatic-speech-recognition
21
+ dataset:
22
+ name: Librispeech (clean)
23
+ type: librispeech_asr
24
+ args: en
25
+ metrics:
26
+ - name: Test WER
27
+ type: wer
28
+ value: 1.84
29
+ ---
30
+
31
+ # Wav2Vec2-Base-960h + 4-gram
32
+
33
+ This model is identical to [Facebook's Wav2Vec2-Large-960h-lv60-self](https://huggingface.co/facebook/wav2vec2-large-960h-lv60-self), but is
34
+ augmented with an English 4-gram. The `4-gram.arpa.gz` of [Librispeech's official ngrams](https://www.openslr.org/11) is used.
35
+
36
+ ## Evaluation
37
+
38
+ This code snippet shows how to evaluate **patrickvonplaten/wav2vec2-large-960h-lv60-self-4-gram** on LibriSpeech's "clean" and "other" test data.
39
+
40
+ ```python
41
+ from datasets import load_dataset
42
+ from transformers import AutoModelForCTC, AutoProcessor
43
+ import torch
44
+ from jiwer import wer
45
+
46
+ model_id = "patrickvonplaten/wav2vec2-large-960h-lv60-self-4-gram"
47
+
48
+ librispeech_eval = load_dataset("librispeech_asr", "other", split="test")
49
+
50
+ model = AutoModelForCTC.from_pretrained(model_id).to("cuda")
51
+ processor = AutoProcessor.from_pretrained(model_id)
52
+
53
+ def map_to_pred(batch):
54
+ inputs = processor(batch["audio"]["array"], sampling_rate=16_000, return_tensors="pt")
55
+
56
+ inputs = {k: v.to("cuda") for k,v in inputs.items()}
57
+
58
+ with torch.no_grad():
59
+ logits = model(**inputs).logits
60
+
61
+ transcription = processor.batch_decode(logits.cpu().numpy()).text[0]
62
+ batch["transcription"] = transcription
63
+ return batch
64
+
65
+ result = librispeech_eval.map(map_to_pred, remove_columns=["audio"])
66
+
67
+ print(wer(result["text"], result["transcription"]))
68
+ ```
69
+
70
+ *Result (WER)*:
71
+
72
+ | "clean" | "other" |
73
+ |---|---|
74
+ | 1.84 | 3.71 |
alphabet.json ADDED
@@ -0,0 +1 @@
 
1
+ {"labels": ["", "<s>", "</s>", "\u2047", " ", "E", "T", "A", "O", "N", "I", "H", "S", "R", "D", "L", "U", "M", "W", "C", "F", "G", "Y", "P", "B", "V", "K", "'", "X", "J", "Q", "Z"], "is_bpe": false}
config.json ADDED
@@ -0,0 +1,80 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "facebook/hubert-xlarge-ls960-ft",
3
+ "activation_dropout": 0.0,
4
+ "apply_spec_augment": true,
5
+ "architectures": [
6
+ "HubertForCTC"
7
+ ],
8
+ "attention_dropout": 0.1,
9
+ "bos_token_id": 1,
10
+ "classifier_proj_size": 256,
11
+ "conv_bias": true,
12
+ "conv_dim": [
13
+ 512,
14
+ 512,
15
+ 512,
16
+ 512,
17
+ 512,
18
+ 512,
19
+ 512
20
+ ],
21
+ "conv_kernel": [
22
+ 10,
23
+ 3,
24
+ 3,
25
+ 3,
26
+ 3,
27
+ 2,
28
+ 2
29
+ ],
30
+ "conv_stride": [
31
+ 5,
32
+ 2,
33
+ 2,
34
+ 2,
35
+ 2,
36
+ 2,
37
+ 2
38
+ ],
39
+ "ctc_loss_reduction": "sum",
40
+ "ctc_zero_infinity": false,
41
+ "do_stable_layer_norm": true,
42
+ "eos_token_id": 2,
43
+ "feat_extract_activation": "gelu",
44
+ "feat_extract_dropout": 0.0,
45
+ "feat_extract_norm": "layer",
46
+ "feat_proj_dropout": 0.1,
47
+ "feat_proj_layer_norm": true,
48
+ "final_dropout": 0.0,
49
+ "gradient_checkpointing": false,
50
+ "hidden_act": "gelu",
51
+ "hidden_dropout": 0.1,
52
+ "hidden_size": 1280,
53
+ "initializer_range": 0.02,
54
+ "intermediate_size": 5120,
55
+ "layer_norm_eps": 1e-05,
56
+ "layerdrop": 0.1,
57
+ "mask_channel_length": 10,
58
+ "mask_channel_min_space": 1,
59
+ "mask_channel_other": 0.0,
60
+ "mask_channel_prob": 0.0,
61
+ "mask_feature_length": 10,
62
+ "mask_feature_min_masks": 0,
63
+ "mask_feature_prob": 0.0,
64
+ "mask_time_length": 10,
65
+ "mask_time_min_masks": 2,
66
+ "mask_time_min_space": 1,
67
+ "mask_time_other": 0.0,
68
+ "mask_time_prob": 0.075,
69
+ "model_type": "hubert",
70
+ "num_attention_heads": 16,
71
+ "num_conv_pos_embedding_groups": 16,
72
+ "num_conv_pos_embeddings": 128,
73
+ "num_feat_extract_layers": 7,
74
+ "num_hidden_layers": 48,
75
+ "pad_token_id": 0,
76
+ "torch_dtype": "float32",
77
+ "transformers_version": "4.19.0.dev0",
78
+ "use_weighted_layer_sum": false,
79
+ "vocab_size": 32
80
+ }
language_model/4-gram.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e674d4a61df15bef37cd49183dc4fb087aaa3d7819d0ff8347068a880f033c61
3
+ size 3124591979
language_model/attrs.json ADDED
@@ -0,0 +1 @@
 
1
+ {"alpha": 0.5, "beta": 1.5, "unk_score_offset": -10.0, "score_boundary": true}
language_model/unigrams.txt ADDED
The diff for this file is too large to render. See raw diff
preprocessor_config.json ADDED
@@ -0,0 +1,10 @@
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "do_normalize": true,
3
+ "feature_extractor_type": "Wav2Vec2FeatureExtractor",
4
+ "feature_size": 1,
5
+ "padding_side": "right",
6
+ "padding_value": 0,
7
+ "return_attention_mask": true,
8
+ "processor_class": "Wav2Vec2ProcessorWithLM",
9
+ "sampling_rate": 16000
10
+ }
pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:16dfe0b4aa7dc3c0e1bce84ec1505a3fecd1aefa600440a5c355b43d4b4dea1b
3
+ size 3850416049
special_tokens_map.json ADDED
@@ -0,0 +1 @@
 
1
+ {"bos_token": "<s>", "eos_token": "</s>", "unk_token": "<unk>", "pad_token": "<pad>"}
tokenizer_config.json ADDED
@@ -0,0 +1 @@
 
1
+ {"unk_token": "<unk>", "bos_token": "<s>", "eos_token": "</s>", "pad_token": "<pad>", "do_lower_case": false, "word_delimiter_token": "|", "replace_word_delimiter_char": " ", "special_tokens_map_file": "/home/patrick_huggingface_co/.cache/huggingface/transformers/0dd9f421c5b57d7bce23fb1d1c182fd28779f145cd3b2dada47f3d2e2ecff47b.9d6cd81ef646692fb1c169a880161ea1cb95f49694f220aced9b704b457e51dd", "name_or_path": "facebook/hubert-xlarge-ls960-ft", "tokenizer_class": "Wav2Vec2CTCTokenizer"}
vocab.json ADDED
@@ -0,0 +1 @@
 
1
+ {"<pad>": 0, "<s>": 1, "</s>": 2, "<unk>": 3, "|": 4, "E": 5, "T": 6, "A": 7, "O": 8, "N": 9, "I": 10, "H": 11, "S": 12, "R": 13, "D": 14, "L": 15, "U": 16, "M": 17, "W": 18, "C": 19, "F": 20, "G": 21, "Y": 22, "P": 23, "B": 24, "V": 25, "K": 26, "'": 27, "X": 28, "J": 29, "Q": 30, "Z": 31}