jonatasgrosman commited on
Commit
8f49fca
1 Parent(s): 24f0df7

first commit

Browse files

README.md ADDED
@@ -0,0 +1,157 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ language: de
3
+ datasets:
4
+ - common_voice
5
+ metrics:
6
+ - wer
7
+ - cer
8
+ tags:
9
+ - audio
10
+ - automatic-speech-recognition
11
+ - speech
12
+ - xlsr-fine-tuning-week
13
+ license: apache-2.0
14
+ model-index:
15
+ - name: XLSR Wav2Vec2 German by Jonatas Grosman
16
+ results:
17
+ - task:
18
+ name: Speech Recognition
19
+ type: automatic-speech-recognition
20
+ dataset:
21
+ name: Common Voice de
22
+ type: common_voice
23
+ args: de
24
+ metrics:
25
+ - name: Test WER
26
+ type: wer
27
+ value: 13.32
28
+ - name: Test CER
29
+ type: cer
30
+ value: 3.71
31
+
32
+ ---
33
+
34
+ # Wav2Vec2-Large-XLSR-53-German
35
+
36
+ Fine-tuned [facebook/wav2vec2-large-xlsr-53](https://huggingface.co/facebook/wav2vec2-large-xlsr-53) on German using the [Common Voice](https://huggingface.co/datasets/common_voice).
37
+ When using this model, make sure that your speech input is sampled at 16kHz.
38
+
39
+ The script used for training can be found here: https://github.com/jonatasgrosman/wav2vec2-sprint
40
+
41
+ ## Usage
42
+
43
+ The model can be used directly (without a language model) as follows:
44
+
45
+ ```python
46
+ import torch
47
+ import librosa
48
+ from datasets import load_dataset
49
+ from transformers import Wav2Vec2ForCTC, Wav2Vec2Processor
50
+
51
+ LANG_ID = "de"
52
+ MODEL_ID = "jonatasgrosman/wav2vec2-large-xlsr-53-german"
53
+ SAMPLES = 5
54
+
55
+ test_dataset = load_dataset("common_voice", LANG_ID, split=f"test[:{SAMPLES}]")
56
+
57
+ processor = Wav2Vec2Processor.from_pretrained(MODEL_ID)
58
+ model = Wav2Vec2ForCTC.from_pretrained(MODEL_ID)
59
+
60
+ # Preprocessing the datasets.
61
+ # We need to read the audio files as arrays
62
+ def speech_file_to_array_fn(batch):
63
+ speech_array, sampling_rate = librosa.load(batch["path"], sr=16_000)
64
+ batch["speech"] = speech_array
65
+ batch["sentence"] = batch["sentence"].upper()
66
+ return batch
67
+
68
+ test_dataset = test_dataset.map(speech_file_to_array_fn)
69
+ inputs = processor(test_dataset["speech"], sampling_rate=16_000, return_tensors="pt", padding=True)
70
+
71
+ with torch.no_grad():
72
+ logits = model(inputs.input_values, attention_mask=inputs.attention_mask).logits
73
+
74
+ predicted_ids = torch.argmax(logits, dim=-1)
75
+ predicted_sentences = processor.batch_decode(predicted_ids)
76
+
77
+ for i, predicted_sentence in enumerate(predicted_sentences):
78
+ print("-" * 100)
79
+ print("Reference:", test_dataset[i]["sentence"])
80
+ print("Prediction:", predicted_sentence)
81
+ ```
82
+
83
+ | Reference | Prediction |
84
+ | ------------- | ------------- |
85
+ | ZIEHT EUCH BITTE DRAUSSEN DIE SCHUHE AUS. | ZIEHT EUCH BITTE DRAUSSEN DIE SCHUHE AUS |
86
+ | ES KOMMT ZUM SHOWDOWN IN GSTAAD. | ES GRONTEHILSCHONDEBAR ENBESTACDEN |
87
+ | IHRE FOTOSTRECKEN ERSCHIENEN IN MODEMAGAZINEN WIE DER VOGUE, HARPER’S BAZAAR UND MARIE CLAIRE. | IHRE FROTESTRECKEN ERSCHIENEN IN MODEMAGAZINEN WIE DER VOLKE-APERS BASAR VAREQER |
88
+ | FELIPE HAT EINE AUCH FÜR MONARCHEN UNGEWÖHNLICH LANGE TITELLISTE. | FIELIPPE HATE EINE AUCH FÜR MONACHEN UNGEWÖHNLICH LANGE TITELLISTE |
89
+ | ER WURDE ZU EHREN DES REICHSKANZLERS OTTO VON BISMARCK ERRICHTET. | ER WURDE ZU EHREN DES REICHSKANZLERS OTTO VON BISMARK ERRICHTET |
90
+
91
+ ## Evaluation
92
+
93
+ The model can be evaluated as follows on the German test data of Common Voice.
94
+
95
+ ```python
96
+ import torch
97
+ import re
98
+ import librosa
99
+ from datasets import load_dataset, load_metric
100
+ from transformers import Wav2Vec2ForCTC, Wav2Vec2Processor
101
+
102
+ LANG_ID = "de"
103
+ MODEL_ID = "jonatasgrosman/wav2vec2-large-xlsr-53-german"
104
+ DEVICE = "cuda"
105
+ MAX_SAMPLES = 8000
106
+
107
+ CHARS_TO_IGNORE = [",", "?", "¿", ".", "!", "¡", ";", ":", '""', "%", '"', "�", "ʿ", "·", "჻", "~", "՞",
108
+ "؟", "،", "।", "॥", "«", "»", "„", "“", "”", "「", "」", "‘", "’", "《", "》", "(", ")", "[", "]",
109
+ "=", "`", "_", "+", "<", ">", "…", "–", "°", "´", "ʾ", "‹", "›", "©", "®", "—", "→", "。"]
110
+
111
+ test_dataset = load_dataset("common_voice", LANG_ID, split="test")
112
+ if len(test_dataset) > MAX_SAMPLES:
113
+ test_dataset = test_dataset.select(range(MAX_SAMPLES))
114
+
115
+ wer = load_metric("wer.py") # https://github.com/jonatasgrosman/wav2vec2-sprint/blob/main/wer.py
116
+ cer = load_metric("cer.py") # https://github.com/jonatasgrosman/wav2vec2-sprint/blob/main/cer.py
117
+
118
+ chars_to_ignore_regex = f"[{re.escape(''.join(CHARS_TO_IGNORE))}]"
119
+
120
+ processor = Wav2Vec2Processor.from_pretrained(MODEL_ID)
121
+ model = Wav2Vec2ForCTC.from_pretrained(MODEL_ID)
122
+ model.to(DEVICE)
123
+
124
+ # Preprocessing the datasets.
125
+ # We need to read the audio files as arrays
126
+ def speech_file_to_array_fn(batch):
127
+ with warnings.catch_warnings():
128
+ warnings.simplefilter("ignore")
129
+ speech_array, sampling_rate = librosa.load(batch["path"], sr=16_000)
130
+ batch["speech"] = speech_array
131
+ batch["sentence"] = re.sub(chars_to_ignore_regex, "", batch["sentence"]).upper()
132
+ return batch
133
+
134
+ test_dataset = test_dataset.map(speech_file_to_array_fn)
135
+
136
+ # Preprocessing the datasets.
137
+ # We need to read the audio files as arrays
138
+ def evaluate(batch):
139
+ inputs = processor(batch["speech"], sampling_rate=16_000, return_tensors="pt", padding=True)
140
+
141
+ with torch.no_grad():
142
+ logits = model(inputs.input_values.to(DEVICE), attention_mask=inputs.attention_mask.to(DEVICE)).logits
143
+
144
+ pred_ids = torch.argmax(logits, dim=-1)
145
+ batch["pred_strings"] = processor.batch_decode(pred_ids)
146
+ return batch
147
+
148
+ result = test_dataset.map(evaluate, batched=True, batch_size=8)
149
+
150
+ print("WER: {:2f}".format(100 * wer.compute(predictions=result["pred_strings"], references=result["sentence"], chunk_size=1000)))
151
+ print("CER: {:2f}".format(100 * cer.compute(predictions=result["pred_strings"], references=result["sentence"], chunk_size=1000)))
152
+ ```
153
+
154
+ **Test Result**:
155
+
156
+ - WER: 13.32%
157
+ - CER: 3.71%
config.json ADDED
@@ -0,0 +1,69 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "facebook/wav2vec2-large-xlsr-53",
3
+ "activation_dropout": 0.05,
4
+ "apply_spec_augment": true,
5
+ "architectures": [
6
+ "Wav2Vec2ForCTC"
7
+ ],
8
+ "attention_dropout": 0.1,
9
+ "bos_token_id": 1,
10
+ "conv_bias": true,
11
+ "conv_dim": [
12
+ 512,
13
+ 512,
14
+ 512,
15
+ 512,
16
+ 512,
17
+ 512,
18
+ 512
19
+ ],
20
+ "conv_kernel": [
21
+ 10,
22
+ 3,
23
+ 3,
24
+ 3,
25
+ 3,
26
+ 2,
27
+ 2
28
+ ],
29
+ "conv_stride": [
30
+ 5,
31
+ 2,
32
+ 2,
33
+ 2,
34
+ 2,
35
+ 2,
36
+ 2
37
+ ],
38
+ "ctc_loss_reduction": "mean",
39
+ "ctc_zero_infinity": true,
40
+ "do_stable_layer_norm": true,
41
+ "eos_token_id": 2,
42
+ "feat_extract_activation": "gelu",
43
+ "feat_extract_dropout": 0.0,
44
+ "feat_extract_norm": "layer",
45
+ "feat_proj_dropout": 0.05,
46
+ "final_dropout": 0.1,
47
+ "gradient_checkpointing": true,
48
+ "hidden_act": "gelu",
49
+ "hidden_dropout": 0.05,
50
+ "hidden_dropout_prob": 0.1,
51
+ "hidden_size": 1024,
52
+ "initializer_range": 0.02,
53
+ "intermediate_size": 4096,
54
+ "layer_norm_eps": 1e-05,
55
+ "layerdrop": 0.05,
56
+ "mask_feature_length": 10,
57
+ "mask_feature_prob": 0.0,
58
+ "mask_time_length": 10,
59
+ "mask_time_prob": 0.05,
60
+ "model_type": "wav2vec2",
61
+ "num_attention_heads": 16,
62
+ "num_conv_pos_embedding_groups": 16,
63
+ "num_conv_pos_embeddings": 128,
64
+ "num_feat_extract_layers": 7,
65
+ "num_hidden_layers": 24,
66
+ "pad_token_id": 0,
67
+ "transformers_version": "4.5.0.dev0",
68
+ "vocab_size": 36
69
+ }
preprocessor_config.json ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
1
+ {
2
+ "do_normalize": true,
3
+ "feature_size": 1,
4
+ "padding_side": "right",
5
+ "padding_value": 0.0,
6
+ "return_attention_mask": true,
7
+ "sampling_rate": 16000
8
+ }
pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:230f7682c6576a1c855a884b6faf1d52e21ca70f86e426a7c2c1744cd0100b08
3
+ size 1262081431
special_tokens_map.json ADDED
@@ -0,0 +1 @@
 
1
+ {"bos_token": "<s>", "eos_token": "</s>", "unk_token": "<unk>", "pad_token": "<pad>"}
vocab.json ADDED
@@ -0,0 +1 @@
 
1
+ {"<pad>": 0, "<s>": 1, "</s>": 2, "<unk>": 3, "|": 4, "E": 5, "N": 6, "I": 7, "S": 8, "R": 9, "T": 10, "A": 11, "H": 12, "D": 13, "U": 14, "L": 15, "C": 16, "G": 17, "M": 18, "O": 19, "B": 20, "W": 21, "F": 22, "K": 23, "Z": 24, "V": 25, "Ü": 26, "P": 27, "Ä": 28, "Ö": 29, "J": 30, "Y": 31, "'": 32, "X": 33, "Q": 34, "-": 35}