jonatasgrosman commited on
Commit
8433ab8
1 Parent(s): 701381f

first commit

Browse files
README.md ADDED
@@ -0,0 +1,128 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ language: pt
3
+ datasets:
4
+ - common_voice
5
+ metrics:
6
+ - wer
7
+ tags:
8
+ - audio
9
+ - automatic-speech-recognition
10
+ - speech
11
+ - xlsr-fine-tuning-week
12
+ license: apache-2.0
13
+ model-index:
14
+ - name: XLSR Wav2Vec2 Portuguese by Jonatas Grosman
15
+ results:
16
+ - task:
17
+ name: Speech Recognition
18
+ type: automatic-speech-recognition
19
+ dataset:
20
+ name: Common Voice pt
21
+ type: common_voice
22
+ args: pt
23
+ metrics:
24
+ - name: Test WER
25
+ type: wer
26
+ value: 13.45
27
+ ---
28
+
29
+ # Wav2Vec2-Large-XLSR-53-portuguese
30
+
31
+ Fine-tuned [facebook/wav2vec2-large-xlsr-53](https://huggingface.co/facebook/wav2vec2-large-xlsr-53) on Portuguese using the [Common Voice](https://huggingface.co/datasets/common_voice).
32
+ When using this model, make sure that your speech input is sampled at 16kHz.
33
+
34
+ The script used for training can be found here: https://github.com/jonatasgrosman/wav2vec2-sprint
35
+
36
+ ## Usage
37
+
38
+ The model can be used directly (without a language model) as follows:
39
+
40
+ ```python
41
+ import torch
42
+ import librosa
43
+ from datasets import load_dataset
44
+ from transformers import Wav2Vec2ForCTC, Wav2Vec2Processor
45
+
46
+ LANG_ID = "pt"
47
+ MODEL_ID = "jonatasgrosman/wav2vec2-large-xlsr-53-portuguese"
48
+
49
+ test_dataset = load_dataset("common_voice", LANG_ID, split="test[:2%]")
50
+
51
+ processor = Wav2Vec2Processor.from_pretrained(MODEL_ID)
52
+ model = Wav2Vec2ForCTC.from_pretrained(MODEL_ID)
53
+
54
+ # Preprocessing the datasets.
55
+ # We need to read the audio files as arrays
56
+ def speech_file_to_array_fn(batch):
57
+ speech_array, sampling_rate = librosa.load(batch["path"], sr=16_000)
58
+ batch["speech"] = resampler(speech_array).squeeze().numpy()
59
+ return batch
60
+
61
+ test_dataset = test_dataset.map(speech_file_to_array_fn)
62
+ inputs = processor(test_dataset["speech"][:2], sampling_rate=16_000, return_tensors="pt", padding=True)
63
+
64
+ with torch.no_grad():
65
+ logits = model(inputs.input_values, attention_mask=inputs.attention_mask).logits
66
+
67
+ predicted_ids = torch.argmax(logits, dim=-1)
68
+
69
+ print("Prediction:", processor.batch_decode(predicted_ids))
70
+ print("Reference:", test_dataset["sentence"][:2])
71
+ ```
72
+
73
+
74
+ ## Evaluation
75
+
76
+ The model can be evaluated as follows on the Portuguese test data of Common Voice.
77
+
78
+ ```python
79
+ import torch
80
+ from datasets import load_dataset, load_metric
81
+ from transformers import Wav2Vec2ForCTC, Wav2Vec2Processor
82
+ import re
83
+
84
+ LANG_ID = "pt"
85
+ MODEL_ID = "jonatasgrosman/wav2vec2-large-xlsr-53-portuguese"
86
+ DEVICE = "cuda"
87
+
88
+ CHARS_TO_IGNORE = [",", "?", ".", "!", "-", ";", ":", '""', "%", "'", '"', "�", "ʿ", "·", "჻", "¿", "¡", "~", "՞",
89
+ "؟", "،", "।", "॥", "«", "»", "„", "“", "”", "「", "」", "‘", "’", "《", "》", "(", ")", "[", "]",
90
+ "=", "`", "_", "+", "<", ">", "…", "–", "°", "´", "ʾ", ""]
91
+
92
+ test_dataset = load_dataset("common_voice", LANG_ID, split="test")
93
+ wer = load_metric("wer")
94
+
95
+ chars_to_ignore_regex = f'[{re.escape("".join(CHARS_TO_IGNORE))}]'
96
+
97
+ processor = Wav2Vec2Processor.from_pretrained(MODEL_ID)
98
+ model = Wav2Vec2ForCTC.from_pretrained(MODEL_ID)
99
+ model.to(DEVICE)
100
+
101
+ # Preprocessing the datasets.
102
+ # We need to read the audio files as arrays
103
+ def speech_file_to_array_fn(batch):
104
+ batch["sentence"] = re.sub(chars_to_ignore_regex, '', batch["sentence"]).upper()
105
+ speech_array, sampling_rate = librosa.load(batch["path"], sr=16_000)
106
+ batch["speech"] = speech_array
107
+ return batch
108
+
109
+ test_dataset = test_dataset.map(speech_file_to_array_fn)
110
+
111
+ # Preprocessing the datasets.
112
+ # We need to read the audio files as arrays
113
+ def evaluate(batch):
114
+ inputs = processor(batch["speech"], sampling_rate=16_000, return_tensors="pt", padding=True)
115
+
116
+ with torch.no_grad():
117
+ logits = model(inputs.input_values.to(DEVICE), attention_mask=inputs.attention_mask.to(DEVICE)).logits
118
+
119
+ pred_ids = torch.argmax(logits, dim=-1)
120
+ batch["pred_strings"] = processor.batch_decode(pred_ids)
121
+ return batch
122
+
123
+ result = test_dataset.map(evaluate, batched=True, batch_size=32)
124
+
125
+ print("WER: {:2f}".format(100 * wer.compute(predictions=result["pred_strings"], references=result["sentence"])))
126
+ ```
127
+
128
+ **Test Result**: 13.45%
config.json ADDED
@@ -0,0 +1,69 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "facebook/wav2vec2-large-xlsr-53",
3
+ "activation_dropout": 0.05,
4
+ "apply_spec_augment": true,
5
+ "architectures": [
6
+ "Wav2Vec2ForCTC"
7
+ ],
8
+ "attention_dropout": 0.1,
9
+ "bos_token_id": 1,
10
+ "conv_bias": true,
11
+ "conv_dim": [
12
+ 512,
13
+ 512,
14
+ 512,
15
+ 512,
16
+ 512,
17
+ 512,
18
+ 512
19
+ ],
20
+ "conv_kernel": [
21
+ 10,
22
+ 3,
23
+ 3,
24
+ 3,
25
+ 3,
26
+ 2,
27
+ 2
28
+ ],
29
+ "conv_stride": [
30
+ 5,
31
+ 2,
32
+ 2,
33
+ 2,
34
+ 2,
35
+ 2,
36
+ 2
37
+ ],
38
+ "ctc_loss_reduction": "mean",
39
+ "ctc_zero_infinity": true,
40
+ "do_stable_layer_norm": true,
41
+ "eos_token_id": 2,
42
+ "feat_extract_activation": "gelu",
43
+ "feat_extract_dropout": 0.0,
44
+ "feat_extract_norm": "layer",
45
+ "feat_proj_dropout": 0.05,
46
+ "final_dropout": 0.1,
47
+ "gradient_checkpointing": true,
48
+ "hidden_act": "gelu",
49
+ "hidden_dropout": 0.05,
50
+ "hidden_dropout_prob": 0.1,
51
+ "hidden_size": 1024,
52
+ "initializer_range": 0.02,
53
+ "intermediate_size": 4096,
54
+ "layer_norm_eps": 1e-05,
55
+ "layerdrop": 0.05,
56
+ "mask_feature_length": 10,
57
+ "mask_feature_prob": 0.0,
58
+ "mask_time_length": 10,
59
+ "mask_time_prob": 0.05,
60
+ "model_type": "wav2vec2",
61
+ "num_attention_heads": 16,
62
+ "num_conv_pos_embedding_groups": 16,
63
+ "num_conv_pos_embeddings": 128,
64
+ "num_feat_extract_layers": 7,
65
+ "num_hidden_layers": 24,
66
+ "pad_token_id": 0,
67
+ "transformers_version": "4.5.0.dev0",
68
+ "vocab_size": 47
69
+ }
preprocessor_config.json ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
1
+ {
2
+ "do_normalize": true,
3
+ "feature_size": 1,
4
+ "padding_side": "right",
5
+ "padding_value": 0.0,
6
+ "return_attention_mask": true,
7
+ "sampling_rate": 16000
8
+ }
pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e26bfd51de967cc8e0b5eb55916e2d36d3b3a5fb41699b150cb25a15cd801374
3
+ size 1262126551
special_tokens_map.json ADDED
@@ -0,0 +1 @@
 
1
+ {"bos_token": "<s>", "eos_token": "</s>", "unk_token": "<unk>", "pad_token": "<pad>"}
vocab.json ADDED
@@ -0,0 +1 @@
 
1
+ {"<pad>": 0, "<s>": 1, "</s>": 2, "<unk>": 3, "|": 4, "A": 5, "E": 6, "O": 7, "S": 8, "R": 9, "I": 10, "D": 11, "N": 12, "M": 13, "U": 14, "T": 15, "C": 16, "L": 17, "P": 18, "V": 19, "H": 20, "G": 21, "Q": 22, "F": 23, "B": 24, "Ã": 25, "Ç": 26, "Z": 27, "Á": 28, "É": 29, "J": 30, "X": 31, "-": 32, "Í": 33, "Ó": 34, "Ê": 35, "À": 36, "Õ": 37, "Ú": 38, "Ô": 39, "Â": 40, "'": 41, "Y": 42, "Ü": 43, "K": 44, "W": 45, "Ò": 46}