shivam commited on
Commit
3799f00
1 Parent(s): e5253bd

Training in progress, step 500

Browse files
.gitignore ADDED
@@ -0,0 +1 @@
 
1
+ checkpoint-*/
.ipynb_checkpoints/added_tokens-checkpoint.json ADDED
@@ -0,0 +1 @@
 
1
+ {"<s>": 95, "</s>": 96}
.ipynb_checkpoints/run-checkpoint.sh ADDED
@@ -0,0 +1,34 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ python3 run_speech_recognition_ctc.py \
2
+ --dataset_name="mozilla-foundation/common_voice_7_0" \
3
+ --model_name_or_path="facebook/wav2vec2-xls-r-300m" \
4
+ --dataset_config_name="hi" \
5
+ --output_dir="./" \
6
+ --overwrite_output_dir \
7
+ --num_train_epochs="50" \
8
+ --per_device_train_batch_size="8" \
9
+ --per_device_eval_batch_size="8" \
10
+ --gradient_accumulation_steps="4" \
11
+ --learning_rate="7.5e-5" \
12
+ --warmup_steps="2000" \
13
+ --length_column_name="input_length" \
14
+ --evaluation_strategy="steps" \
15
+ --text_column_name="sentence" \
16
+ --chars_to_ignore , ? . ! \- \; \: \" “ % ‘ ” � — ’ … – \
17
+ --save_steps="500" \
18
+ --eval_steps="500" \
19
+ --logging_steps="100" \
20
+ --layerdrop="0.0" \
21
+ --activation_dropout="0.1" \
22
+ --save_total_limit="3" \
23
+ --freeze_feature_encoder \
24
+ --feat_proj_dropout="0.0" \
25
+ --mask_time_prob="0.75" \
26
+ --mask_time_length="10" \
27
+ --mask_feature_prob="0.25" \
28
+ --mask_feature_length="64" \
29
+ --gradient_checkpointing \
30
+ --use_auth_token \
31
+ --fp16 \
32
+ --group_by_length \
33
+ --do_train --do_eval \
34
+ --push_to_hub
.ipynb_checkpoints/run_speech_recognition_ctc-checkpoint.py ADDED
@@ -0,0 +1,741 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python
2
+ # coding=utf-8
3
+ # Copyright 2021 The HuggingFace Inc. team. All rights reserved.
4
+ #
5
+ # Licensed under the Apache License, Version 2.0 (the "License");
6
+ # you may not use this file except in compliance with the License.
7
+ # You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing, software
12
+ # distributed under the License is distributed on an "AS IS" BASIS,
13
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
+ # See the License for the specific language governing permissions and
15
+
16
+ """ Fine-tuning a 🤗 Transformers CTC model for automatic speech recognition"""
17
+
18
+ import functools
19
+ import json
20
+ import logging
21
+ import os
22
+ import re
23
+ import sys
24
+ import warnings
25
+ from dataclasses import dataclass, field
26
+ from typing import Dict, List, Optional, Union
27
+
28
+ import datasets
29
+ import numpy as np
30
+ import torch
31
+ from datasets import DatasetDict, load_dataset, load_metric
32
+
33
+ import transformers
34
+ from transformers import (
35
+ AutoConfig,
36
+ AutoFeatureExtractor,
37
+ AutoModelForCTC,
38
+ AutoProcessor,
39
+ AutoTokenizer,
40
+ HfArgumentParser,
41
+ Trainer,
42
+ TrainingArguments,
43
+ Wav2Vec2Processor,
44
+ set_seed,
45
+ )
46
+ from transformers.trainer_utils import get_last_checkpoint, is_main_process
47
+ from transformers.utils import check_min_version
48
+ from transformers.utils.versions import require_version
49
+
50
+
51
+ # Will error if the minimal version of Transformers is not installed. Remove at your own risks.
52
+ check_min_version("4.16.0.dev0")
53
+
54
+ require_version("datasets>=1.13.3", "To fix: pip install -r examples/pytorch/text-classification/requirements.txt")
55
+
56
+
57
+ logger = logging.getLogger(__name__)
58
+
59
+
60
+ def list_field(default=None, metadata=None):
61
+ return field(default_factory=lambda: default, metadata=metadata)
62
+
63
+
64
+ @dataclass
65
+ class ModelArguments:
66
+ """
67
+ Arguments pertaining to which model/config/tokenizer we are going to fine-tune from.
68
+ """
69
+
70
+ model_name_or_path: str = field(
71
+ metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"}
72
+ )
73
+ tokenizer_name_or_path: Optional[str] = field(
74
+ default=None,
75
+ metadata={"help": "Path to pretrained tokenizer or tokenizer identifier from huggingface.co/models"},
76
+ )
77
+ cache_dir: Optional[str] = field(
78
+ default=None,
79
+ metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"},
80
+ )
81
+ freeze_feature_encoder: bool = field(
82
+ default=True, metadata={"help": "Whether to freeze the feature encoder layers of the model."}
83
+ )
84
+ attention_dropout: float = field(
85
+ default=0.0, metadata={"help": "The dropout ratio for the attention probabilities."}
86
+ )
87
+ activation_dropout: float = field(
88
+ default=0.0, metadata={"help": "The dropout ratio for activations inside the fully connected layer."}
89
+ )
90
+ feat_proj_dropout: float = field(default=0.0, metadata={"help": "The dropout ratio for the projected features."})
91
+ hidden_dropout: float = field(
92
+ default=0.0,
93
+ metadata={
94
+ "help": "The dropout probability for all fully connected layers in the embeddings, encoder, and pooler."
95
+ },
96
+ )
97
+ final_dropout: float = field(
98
+ default=0.0,
99
+ metadata={"help": "The dropout probability for the final projection layer."},
100
+ )
101
+ mask_time_prob: float = field(
102
+ default=0.05,
103
+ metadata={
104
+ "help": "Probability of each feature vector along the time axis to be chosen as the start of the vector"
105
+ "span to be masked. Approximately ``mask_time_prob * sequence_length // mask_time_length`` feature"
106
+ "vectors will be masked along the time axis."
107
+ },
108
+ )
109
+ mask_time_length: int = field(
110
+ default=10,
111
+ metadata={"help": "Length of vector span to mask along the time axis."},
112
+ )
113
+ mask_feature_prob: float = field(
114
+ default=0.0,
115
+ metadata={
116
+ "help": "Probability of each feature vector along the feature axis to be chosen as the start of the vector"
117
+ "span to be masked. Approximately ``mask_feature_prob * sequence_length // mask_feature_length`` feature bins will be masked along the time axis."
118
+ },
119
+ )
120
+ mask_feature_length: int = field(
121
+ default=10,
122
+ metadata={"help": "Length of vector span to mask along the feature axis."},
123
+ )
124
+ layerdrop: float = field(default=0.0, metadata={"help": "The LayerDrop probability."})
125
+ ctc_loss_reduction: Optional[str] = field(
126
+ default="mean", metadata={"help": "The way the ctc loss should be reduced. Should be one of 'mean' or 'sum'."}
127
+ )
128
+
129
+
130
+ @dataclass
131
+ class DataTrainingArguments:
132
+ """
133
+ Arguments pertaining to what data we are going to input our model for training and eval.
134
+
135
+ Using `HfArgumentParser` we can turn this class
136
+ into argparse arguments to be able to specify them on
137
+ the command line.
138
+ """
139
+
140
+ dataset_name: str = field(
141
+ metadata={"help": "The configuration name of the dataset to use (via the datasets library)."}
142
+ )
143
+ dataset_config_name: str = field(
144
+ default=None, metadata={"help": "The configuration name of the dataset to use (via the datasets library)."}
145
+ )
146
+ train_split_name: str = field(
147
+ default="train+validation",
148
+ metadata={
149
+ "help": "The name of the training data set split to use (via the datasets library). Defaults to 'train'"
150
+ },
151
+ )
152
+ eval_split_name: str = field(
153
+ default="test",
154
+ metadata={
155
+ "help": "The name of the training data set split to use (via the datasets library). Defaults to 'train'"
156
+ },
157
+ )
158
+ audio_column_name: str = field(
159
+ default="audio",
160
+ metadata={"help": "The name of the dataset column containing the audio data. Defaults to 'audio'"},
161
+ )
162
+ text_column_name: str = field(
163
+ default="text",
164
+ metadata={"help": "The name of the dataset column containing the text data. Defaults to 'text'"},
165
+ )
166
+ overwrite_cache: bool = field(
167
+ default=False, metadata={"help": "Overwrite the cached preprocessed datasets or not."}
168
+ )
169
+ preprocessing_num_workers: Optional[int] = field(
170
+ default=None,
171
+ metadata={"help": "The number of processes to use for the preprocessing."},
172
+ )
173
+ max_train_samples: Optional[int] = field(
174
+ default=None,
175
+ metadata={
176
+ "help": "For debugging purposes or quicker training, truncate the number of training examples to this "
177
+ "value if set."
178
+ },
179
+ )
180
+ max_eval_samples: Optional[int] = field(
181
+ default=None,
182
+ metadata={
183
+ "help": "For debugging purposes or quicker training, truncate the number of validation examples to this "
184
+ "value if set."
185
+ },
186
+ )
187
+ chars_to_ignore: Optional[List[str]] = list_field(
188
+ default=None,
189
+ metadata={"help": "A list of characters to remove from the transcripts."},
190
+ )
191
+ eval_metrics: List[str] = list_field(
192
+ default=["wer"],
193
+ metadata={"help": "A list of metrics the model should be evaluated on. E.g. `'wer cer'`"},
194
+ )
195
+ max_duration_in_seconds: float = field(
196
+ default=20.0,
197
+ metadata={
198
+ "help": "Filter audio files that are longer than `max_duration_in_seconds` seconds to 'max_duration_in_seconds`"
199
+ },
200
+ )
201
+ min_duration_in_seconds: float = field(
202
+ default=0.0, metadata={"help": "Filter audio files that are shorter than `min_duration_in_seconds` seconds"}
203
+ )
204
+ preprocessing_only: bool = field(
205
+ default=False,
206
+ metadata={
207
+ "help": "Whether to only do data preprocessing and skip training. "
208
+ "This is especially useful when data preprocessing errors out in distributed training due to timeout. "
209
+ "In this case, one should run the preprocessing in a non-distributed setup with `preprocessing_only=True` "
210
+ "so that the cached datasets can consequently be loaded in distributed training"
211
+ },
212
+ )
213
+ use_auth_token: bool = field(
214
+ default=False,
215
+ metadata={
216
+ "help": "If :obj:`True`, will use the token generated when running"
217
+ ":obj:`transformers-cli login` as HTTP bearer authorization for remote files."
218
+ },
219
+ )
220
+ unk_token: str = field(
221
+ default="[UNK]",
222
+ metadata={"help": "The unk token for the tokenizer"},
223
+ )
224
+ pad_token: str = field(
225
+ default="[PAD]",
226
+ metadata={"help": "The padding token for the tokenizer"},
227
+ )
228
+ word_delimiter_token: str = field(
229
+ default="|",
230
+ metadata={"help": "The word delimiter token for the tokenizer"},
231
+ )
232
+ phoneme_language: Optional[str] = field(
233
+ default=None,
234
+ metadata={
235
+ "help": "The target language that should be used be"
236
+ " passed to the tokenizer for tokenization. Note that"
237
+ " this is only relevant if the model classifies the"
238
+ " input audio to a sequence of phoneme sequences."
239
+ },
240
+ )
241
+
242
+
243
+ @dataclass
244
+ class DataCollatorCTCWithPadding:
245
+ """
246
+ Data collator that will dynamically pad the inputs received.
247
+ Args:
248
+ processor (:class:`~transformers.AutoProcessor`)
249
+ The processor used for proccessing the data.
250
+ padding (:obj:`bool`, :obj:`str` or :class:`~transformers.tokenization_utils_base.PaddingStrategy`, `optional`, defaults to :obj:`True`):
251
+ Select a strategy to pad the returned sequences (according to the model's padding side and padding index)
252
+ among:
253
+ * :obj:`True` or :obj:`'longest'`: Pad to the longest sequence in the batch (or no padding if only a single
254
+ sequence if provided).
255
+ * :obj:`'max_length'`: Pad to a maximum length specified with the argument :obj:`max_length` or to the
256
+ maximum acceptable input length for the model if that argument is not provided.
257
+ * :obj:`False` or :obj:`'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of
258
+ different lengths).
259
+ max_length (:obj:`int`, `optional`):
260
+ Maximum length of the ``input_values`` of the returned list and optionally padding length (see above).
261
+ max_length_labels (:obj:`int`, `optional`):
262
+ Maximum length of the ``labels`` returned list and optionally padding length (see above).
263
+ pad_to_multiple_of (:obj:`int`, `optional`):
264
+ If set will pad the sequence to a multiple of the provided value.
265
+ This is especially useful to enable the use of Tensor Cores on NVIDIA hardware with compute capability >=
266
+ 7.5 (Volta).
267
+ """
268
+
269
+ processor: AutoProcessor
270
+ padding: Union[bool, str] = "longest"
271
+ pad_to_multiple_of: Optional[int] = None
272
+ pad_to_multiple_of_labels: Optional[int] = None
273
+
274
+ def __call__(self, features: List[Dict[str, Union[List[int], torch.Tensor]]]) -> Dict[str, torch.Tensor]:
275
+ # split inputs and labels since they have to be of different lenghts and need
276
+ # different padding methods
277
+ input_features = [{"input_values": feature["input_values"]} for feature in features]
278
+ label_features = [{"input_ids": feature["labels"]} for feature in features]
279
+
280
+ batch = self.processor.pad(
281
+ input_features,
282
+ padding=self.padding,
283
+ pad_to_multiple_of=self.pad_to_multiple_of,
284
+ return_tensors="pt",
285
+ )
286
+
287
+ with self.processor.as_target_processor():
288
+ labels_batch = self.processor.pad(
289
+ label_features,
290
+ padding=self.padding,
291
+ pad_to_multiple_of=self.pad_to_multiple_of_labels,
292
+ return_tensors="pt",
293
+ )
294
+
295
+ # replace padding with -100 to ignore loss correctly
296
+ labels = labels_batch["input_ids"].masked_fill(labels_batch.attention_mask.ne(1), -100)
297
+
298
+ batch["labels"] = labels
299
+
300
+ return batch
301
+
302
+
303
+ def create_vocabulary_from_data(
304
+ datasets: DatasetDict,
305
+ word_delimiter_token: Optional[str] = None,
306
+ unk_token: Optional[str] = None,
307
+ pad_token: Optional[str] = None,
308
+ ):
309
+ # Given training and test labels create vocabulary
310
+ def extract_all_chars(batch):
311
+ all_text = " ".join(batch["target_text"])
312
+ vocab = list(set(all_text))
313
+ return {"vocab": [vocab], "all_text": [all_text]}
314
+
315
+ vocabs = datasets.map(
316
+ extract_all_chars,
317
+ batched=True,
318
+ batch_size=-1,
319
+ keep_in_memory=True,
320
+ remove_columns=datasets["train"].column_names,
321
+ )
322
+
323
+ # take union of all unique characters in each dataset
324
+ vocab_set = functools.reduce(
325
+ lambda vocab_1, vocab_2: set(vocab_1["vocab"][0]) | set(vocab_2["vocab"][0]), vocabs.values()
326
+ )
327
+
328
+ # remove | if its already present in dataset
329
+ vocab_list = list(vocab_set)
330
+ vocab_list.remove("|")
331
+
332
+ vocab_dict = {v: k for k, v in enumerate(sorted(vocab_list))}
333
+
334
+ # replace white space with delimiter token
335
+ if word_delimiter_token is not None:
336
+ vocab_dict[word_delimiter_token] = vocab_dict[" "]
337
+ del vocab_dict[" "]
338
+
339
+ # add unk and pad token
340
+ if unk_token is not None:
341
+ vocab_dict[unk_token] = len(vocab_dict)
342
+
343
+ if pad_token is not None:
344
+ vocab_dict[pad_token] = len(vocab_dict)
345
+
346
+ return vocab_dict
347
+
348
+
349
+ def main():
350
+ # See all possible arguments in src/transformers/training_args.py
351
+ # or by passing the --help flag to this script.
352
+ # We now keep distinct sets of args, for a cleaner separation of concerns.
353
+
354
+ parser = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments))
355
+ if len(sys.argv) == 2 and sys.argv[1].endswith(".json"):
356
+ # If we pass only one argument to the script and it's the path to a json file,
357
+ # let's parse it to get our arguments.
358
+ model_args, data_args, training_args = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1]))
359
+ else:
360
+ model_args, data_args, training_args = parser.parse_args_into_dataclasses()
361
+
362
+ # Detecting last checkpoint.
363
+ last_checkpoint = None
364
+ if os.path.isdir(training_args.output_dir) and training_args.do_train and not training_args.overwrite_output_dir:
365
+ last_checkpoint = get_last_checkpoint(training_args.output_dir)
366
+ if last_checkpoint is None and len(os.listdir(training_args.output_dir)) > 0:
367
+ raise ValueError(
368
+ f"Output directory ({training_args.output_dir}) already exists and is not empty. "
369
+ "Use --overwrite_output_dir to overcome."
370
+ )
371
+ elif last_checkpoint is not None:
372
+ logger.info(
373
+ f"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change "
374
+ "the `--output_dir` or add `--overwrite_output_dir` to train from scratch."
375
+ )
376
+
377
+ # Setup logging
378
+ logging.basicConfig(
379
+ format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
380
+ datefmt="%m/%d/%Y %H:%M:%S",
381
+ handlers=[logging.StreamHandler(sys.stdout)],
382
+ )
383
+ logger.setLevel(logging.INFO if is_main_process(training_args.local_rank) else logging.WARN)
384
+
385
+ # Log on each process the small summary:
386
+ logger.warning(
387
+ f"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"
388
+ f"distributed training: {bool(training_args.local_rank != -1)}, 16-bits training: {training_args.fp16}"
389
+ )
390
+ # Set the verbosity to info of the Transformers logger (on main process only):
391
+ if is_main_process(training_args.local_rank):
392
+ transformers.utils.logging.set_verbosity_info()
393
+ logger.info("Training/evaluation parameters %s", training_args)
394
+
395
+ # Set seed before initializing model.
396
+ set_seed(training_args.seed)
397
+
398
+ # 1. First, let's load the dataset
399
+ raw_datasets = DatasetDict()
400
+
401
+ if training_args.do_train:
402
+ raw_datasets["train"] = load_dataset(
403
+ data_args.dataset_name,
404
+ data_args.dataset_config_name,
405
+ split=data_args.train_split_name,
406
+ use_auth_token=data_args.use_auth_token,
407
+ )
408
+
409
+ if data_args.audio_column_name not in raw_datasets["train"].column_names:
410
+ raise ValueError(
411
+ f"--audio_column_name '{data_args.audio_column_name}' not found in dataset '{data_args.dataset_name}'. "
412
+ "Make sure to set `--audio_column_name` to the correct audio column - one of "
413
+ f"{', '.join(raw_datasets['train'].column_names)}."
414
+ )
415
+
416
+ if data_args.text_column_name not in raw_datasets["train"].column_names:
417
+ raise ValueError(
418
+ f"--text_column_name {data_args.text_column_name} not found in dataset '{data_args.dataset_name}'. "
419
+ "Make sure to set `--text_column_name` to the correct text column - one of "
420
+ f"{', '.join(raw_datasets['train'].column_names)}."
421
+ )
422
+
423
+ if data_args.max_train_samples is not None:
424
+ raw_datasets["train"] = raw_datasets["train"].select(range(data_args.max_train_samples))
425
+
426
+ if training_args.do_eval:
427
+ raw_datasets["eval"] = load_dataset(
428
+ data_args.dataset_name,
429
+ data_args.dataset_config_name,
430
+ split=data_args.eval_split_name,
431
+ use_auth_token=data_args.use_auth_token,
432
+ )
433
+
434
+ if data_args.max_eval_samples is not None:
435
+ raw_datasets["eval"] = raw_datasets["eval"].select(range(data_args.max_eval_samples))
436
+
437
+ # 2. We remove some special characters from the datasets
438
+ # that make training complicated and do not help in transcribing the speech
439
+ # E.g. characters, such as `,` and `.` do not really have an acoustic characteristic
440
+ # that could be easily picked up by the model
441
+ chars_to_ignore_regex = (
442
+ f'[{"".join(data_args.chars_to_ignore)}]' if data_args.chars_to_ignore is not None else None
443
+ )
444
+ text_column_name = data_args.text_column_name
445
+
446
+ def remove_special_characters(batch):
447
+ if chars_to_ignore_regex is not None:
448
+ batch["target_text"] = re.sub(chars_to_ignore_regex, "", batch[text_column_name]).lower() + " "
449
+ else:
450
+ batch["target_text"] = batch[text_column_name].lower() + " "
451
+ return batch
452
+
453
+ with training_args.main_process_first(desc="dataset map special characters removal"):
454
+ raw_datasets = raw_datasets.map(
455
+ remove_special_characters,
456
+ remove_columns=[text_column_name],
457
+ desc="remove special characters from datasets",
458
+ )
459
+
460
+ # save special tokens for tokenizer
461
+ word_delimiter_token = data_args.word_delimiter_token
462
+ unk_token = data_args.unk_token
463
+ pad_token = data_args.pad_token
464
+
465
+ # 3. Next, let's load the config as we might need it to create
466
+ # the tokenizer
467
+ # load config
468
+ config = AutoConfig.from_pretrained(
469
+ model_args.model_name_or_path, cache_dir=model_args.cache_dir, use_auth_token=data_args.use_auth_token
470
+ )
471
+
472
+ # 4. Next, if no tokenizer file is defined,
473
+ # we create the vocabulary of the model by extracting all unique characters from
474
+ # the training and evaluation datasets
475
+ # We need to make sure that only first rank saves vocabulary
476
+ # make sure all processes wait until vocab is created
477
+ tokenizer_name_or_path = model_args.tokenizer_name_or_path
478
+ tokenizer_kwargs = {}
479
+ if tokenizer_name_or_path is None:
480
+ # save vocab in training output dir
481
+ tokenizer_name_or_path = training_args.output_dir
482
+
483
+ vocab_file = os.path.join(tokenizer_name_or_path, "vocab.json")
484
+
485
+ with training_args.main_process_first():
486
+ if training_args.overwrite_output_dir and os.path.isfile(vocab_file):
487
+ os.remove(vocab_file)
488
+
489
+ with training_args.main_process_first(desc="dataset map vocabulary creation"):
490
+ if not os.path.isfile(vocab_file):
491
+ os.makedirs(tokenizer_name_or_path, exist_ok=True)
492
+ vocab_dict = create_vocabulary_from_data(
493
+ raw_datasets,
494
+ word_delimiter_token=word_delimiter_token,
495
+ unk_token=unk_token,
496
+ pad_token=pad_token,
497
+ )
498
+
499
+ # save vocab dict to be loaded into tokenizer
500
+ with open(vocab_file, "w") as file:
501
+ json.dump(vocab_dict, file)
502
+
503
+ # if tokenizer has just been created
504
+ # it is defined by `tokenizer_class` if present in config else by `model_type`
505
+ tokenizer_kwargs = {
506
+ "config": config if config.tokenizer_class is not None else None,
507
+ "tokenizer_type": config.model_type if config.tokenizer_class is None else None,
508
+ "unk_token": unk_token,
509
+ "pad_token": pad_token,
510
+ "word_delimiter_token": word_delimiter_token,
511
+ }
512
+
513
+ # 5. Now we can instantiate the feature extractor, tokenizer and model
514
+ # Note for distributed training, the .from_pretrained methods guarantee that only
515
+ # one local process can concurrently download model & vocab.
516
+
517
+ # load feature_extractor and tokenizer
518
+ tokenizer = AutoTokenizer.from_pretrained(
519
+ tokenizer_name_or_path,
520
+ use_auth_token=data_args.use_auth_token,
521
+ **tokenizer_kwargs,
522
+ )
523
+ feature_extractor = AutoFeatureExtractor.from_pretrained(
524
+ model_args.model_name_or_path, cache_dir=model_args.cache_dir, use_auth_token=data_args.use_auth_token
525
+ )
526
+
527
+ # adapt config
528
+ config.update(
529
+ {
530
+ "feat_proj_dropout": model_args.feat_proj_dropout,
531
+ "attention_dropout": model_args.attention_dropout,
532
+ "hidden_dropout": model_args.hidden_dropout,
533
+ "final_dropout": model_args.final_dropout,
534
+ "mask_time_prob": model_args.mask_time_prob,
535
+ "mask_time_length": model_args.mask_time_length,
536
+ "mask_feature_prob": model_args.mask_feature_prob,
537
+ "mask_feature_length": model_args.mask_feature_length,
538
+ "gradient_checkpointing": training_args.gradient_checkpointing,
539
+ "layerdrop": model_args.layerdrop,
540
+ "ctc_loss_reduction": model_args.ctc_loss_reduction,
541
+ "pad_token_id": tokenizer.pad_token_id,
542
+ "vocab_size": len(tokenizer),
543
+ "activation_dropout": model_args.activation_dropout,
544
+ }
545
+ )
546
+
547
+ # create model
548
+ model = AutoModelForCTC.from_pretrained(
549
+ model_args.model_name_or_path,
550
+ cache_dir=model_args.cache_dir,
551
+ config=config,
552
+ use_auth_token=data_args.use_auth_token,
553
+ )
554
+
555
+ # freeze encoder
556
+ if model_args.freeze_feature_encoder:
557
+ model.freeze_feature_encoder()
558
+
559
+ # 6. Now we preprocess the datasets including loading the audio, resampling and normalization
560
+ # Thankfully, `datasets` takes care of automatically loading and resampling the audio,
561
+ # so that we just need to set the correct target sampling rate and normalize the input
562
+ # via the `feature_extractor`
563
+
564
+ # make sure that dataset decodes audio with correct sampling rate
565
+ dataset_sampling_rate = next(iter(raw_datasets.values())).features[data_args.audio_column_name].sampling_rate
566
+ if dataset_sampling_rate != feature_extractor.sampling_rate:
567
+ raw_datasets = raw_datasets.cast_column(
568
+ data_args.audio_column_name, datasets.features.Audio(sampling_rate=feature_extractor.sampling_rate)
569
+ )
570
+
571
+ # derive max & min input length for sample rate & max duration
572
+ max_input_length = data_args.max_duration_in_seconds * feature_extractor.sampling_rate
573
+ min_input_length = data_args.min_duration_in_seconds * feature_extractor.sampling_rate
574
+ audio_column_name = data_args.audio_column_name
575
+ num_workers = data_args.preprocessing_num_workers
576
+
577
+ # `phoneme_language` is only relevant if the model is fine-tuned on phoneme classification
578
+ phoneme_language = data_args.phoneme_language
579
+
580
+ # Preprocessing the datasets.
581
+ # We need to read the audio files as arrays and tokenize the targets.
582
+ def prepare_dataset(batch):
583
+ # load audio
584
+ sample = batch[audio_column_name]
585
+
586
+ inputs = feature_extractor(sample["array"], sampling_rate=sample["sampling_rate"])
587
+ batch["input_values"] = inputs.input_values[0]
588
+ batch["input_length"] = len(batch["input_values"])
589
+
590
+ # encode targets
591
+ additional_kwargs = {}
592
+ if phoneme_language is not None:
593
+ additional_kwargs["phonemizer_lang"] = phoneme_language
594
+
595
+ batch["labels"] = tokenizer(batch["target_text"], **additional_kwargs).input_ids
596
+ return batch
597
+
598
+ with training_args.main_process_first(desc="dataset map preprocessing"):
599
+ vectorized_datasets = raw_datasets.map(
600
+ prepare_dataset,
601
+ remove_columns=next(iter(raw_datasets.values())).column_names,
602
+ num_proc=num_workers,
603
+ desc="preprocess datasets",
604
+ )
605
+
606
+ def is_audio_in_length_range(length):
607
+ return length > min_input_length and length < max_input_length
608
+
609
+ # filter data that is shorter than min_input_length
610
+ vectorized_datasets = vectorized_datasets.filter(
611
+ is_audio_in_length_range,
612
+ num_proc=num_workers,
613
+ input_columns=["input_length"],
614
+ )
615
+
616
+ # 7. Next, we can prepare the training.
617
+ # Let's use word error rate (WER) as our evaluation metric,
618
+ # instantiate a data collator and the trainer
619
+
620
+ # Define evaluation metrics during training, *i.e.* word error rate, character error rate
621
+ eval_metrics = {metric: load_metric(metric) for metric in data_args.eval_metrics}
622
+
623
+ # for large datasets it is advised to run the preprocessing on a
624
+ # single machine first with ``args.preprocessing_only`` since there will mostly likely
625
+ # be a timeout when running the script in distributed mode.
626
+ # In a second step ``args.preprocessing_only`` can then be set to `False` to load the
627
+ # cached dataset
628
+ if data_args.preprocessing_only:
629
+ logger.info(f"Data preprocessing finished. Files cached at {vectorized_datasets.cache_files}")
630
+ return
631
+
632
+ def compute_metrics(pred):
633
+ pred_logits = pred.predictions
634
+ pred_ids = np.argmax(pred_logits, axis=-1)
635
+
636
+ pred.label_ids[pred.label_ids == -100] = tokenizer.pad_token_id
637
+
638
+ pred_str = tokenizer.batch_decode(pred_ids)
639
+ # we do not want to group tokens when computing the metrics
640
+ label_str = tokenizer.batch_decode(pred.label_ids, group_tokens=False)
641
+
642
+ metrics = {k: v.compute(predictions=pred_str, references=label_str) for k, v in eval_metrics.items()}
643
+
644
+ return metrics
645
+
646
+ # Now save everything to be able to create a single processor later
647
+ if is_main_process(training_args.local_rank):
648
+ # save feature extractor, tokenizer and config
649
+ feature_extractor.save_pretrained(training_args.output_dir)
650
+ tokenizer.save_pretrained(training_args.output_dir)
651
+ config.save_pretrained(training_args.output_dir)
652
+
653
+ try:
654
+ processor = AutoProcessor.from_pretrained(training_args.output_dir)
655
+ except (OSError, KeyError):
656
+ warnings.warn(
657
+ "Loading a processor from a feature extractor config that does not"
658
+ " include a `processor_class` attribute is deprecated and will be removed in v5. Please add the following "
659
+ " attribute to your `preprocessor_config.json` file to suppress this warning: "
660
+ " `'processor_class': 'Wav2Vec2Processor'`",
661
+ FutureWarning,
662
+ )
663
+ processor = Wav2Vec2Processor.from_pretrained(training_args.output_dir)
664
+
665
+ # Instantiate custom data collator
666
+ data_collator = DataCollatorCTCWithPadding(processor=processor)
667
+
668
+ # Initialize Trainer
669
+ trainer = Trainer(
670
+ model=model,
671
+ data_collator=data_collator,
672
+ args=training_args,
673
+ compute_metrics=compute_metrics,
674
+ train_dataset=vectorized_datasets["train"] if training_args.do_train else None,
675
+ eval_dataset=vectorized_datasets["eval"] if training_args.do_eval else None,
676
+ tokenizer=feature_extractor,
677
+ )
678
+
679
+ # 8. Finally, we can start training
680
+
681
+ # Training
682
+ if training_args.do_train:
683
+
684
+ # use last checkpoint if exist
685
+ if last_checkpoint is not None:
686
+ checkpoint = last_checkpoint
687
+ elif os.path.isdir(model_args.model_name_or_path):
688
+ checkpoint = model_args.model_name_or_path
689
+ else:
690
+ checkpoint = None
691
+
692
+ train_result = trainer.train(resume_from_checkpoint=checkpoint)
693
+ trainer.save_model()
694
+
695
+ metrics = train_result.metrics
696
+ max_train_samples = (
697
+ data_args.max_train_samples
698
+ if data_args.max_train_samples is not None
699
+ else len(vectorized_datasets["train"])
700
+ )
701
+ metrics["train_samples"] = min(max_train_samples, len(vectorized_datasets["train"]))
702
+
703
+ trainer.log_metrics("train", metrics)
704
+ trainer.save_metrics("train", metrics)
705
+ trainer.save_state()
706
+
707
+ # Evaluation
708
+ results = {}
709
+ if training_args.do_eval:
710
+ logger.info("*** Evaluate ***")
711
+ metrics = trainer.evaluate()
712
+ max_eval_samples = (
713
+ data_args.max_eval_samples if data_args.max_eval_samples is not None else len(vectorized_datasets["eval"])
714
+ )
715
+ metrics["eval_samples"] = min(max_eval_samples, len(vectorized_datasets["eval"]))
716
+
717
+ trainer.log_metrics("eval", metrics)
718
+ trainer.save_metrics("eval", metrics)
719
+
720
+ # Write model card and (optionally) push to hub
721
+ config_name = data_args.dataset_config_name if data_args.dataset_config_name is not None else "na"
722
+ kwargs = {
723
+ "finetuned_from": model_args.model_name_or_path,
724
+ "tasks": "speech-recognition",
725
+ "tags": ["automatic-speech-recognition", data_args.dataset_name],
726
+ "dataset_args": f"Config: {config_name}, Training split: {data_args.train_split_name}, Eval split: {data_args.eval_split_name}",
727
+ "dataset": f"{data_args.dataset_name.upper()} - {config_name.upper()}",
728
+ }
729
+ if "common_voice" in data_args.dataset_name:
730
+ kwargs["language"] = config_name
731
+
732
+ if training_args.push_to_hub:
733
+ trainer.push_to_hub(**kwargs)
734
+ else:
735
+ trainer.create_model_card(**kwargs)
736
+
737
+ return results
738
+
739
+
740
+ if __name__ == "__main__":
741
+ main()
.ipynb_checkpoints/vocab-checkpoint.json ADDED
@@ -0,0 +1 @@
 
1
+ {"a": 1, "b": 2, "c": 3, "d": 4, "e": 5, "f": 6, "g": 7, "h": 8, "i": 9, "j": 10, "k": 11, "l": 12, "m": 13, "n": 14, "o": 15, "p": 16, "r": 17, "s": 18, "t": 19, "u": 20, "v": 21, "w": 22, "x": 23, "y": 24, "z": 25, "ँ": 26, "ं": 27, "ः": 28, "अ": 29, "आ": 30, "इ": 31, "ई": 32, "उ": 33, "ऊ": 34, "ऋ": 35, "ए": 36, "ऐ": 37, "ऑ": 38, "ओ": 39, "औ": 40, "क": 41, "ख": 42, "ग": 43, "घ": 44, "च": 45, "छ": 46, "ज": 47, "झ": 48, "ञ": 49, "ट": 50, "ठ": 51, "ड": 52, "ढ": 53, "ण": 54, "त": 55, "थ": 56, "द": 57, "ध": 58, "न": 59, "प": 60, "फ": 61, "ब": 62, "भ": 63, "म": 64, "य": 65, "र": 66, "ल": 67, "व": 68, "श": 69, "ष": 70, "स": 71, "ह": 72, "़": 73, "ा": 74, "ि": 75, "ी": 76, "ु": 77, "ू": 78, "ृ": 79, "ॅ": 80, "े": 81, "ै": 82, "ॉ": 83, "ो": 84, "ौ": 85, "्": 86, "क़": 87, "ग़": 88, "ज़": 89, "ड़": 90, "ढ़": 91, "।": 92, "|": 0, "[UNK]": 93, "[PAD]": 94}
added_tokens.json ADDED
@@ -0,0 +1 @@
 
1
+ {"<s>": 95, "</s>": 96}
config.json ADDED
@@ -0,0 +1,107 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "facebook/wav2vec2-xls-r-300m",
3
+ "activation_dropout": 0.1,
4
+ "adapter_kernel_size": 3,
5
+ "adapter_stride": 2,
6
+ "add_adapter": false,
7
+ "apply_spec_augment": true,
8
+ "architectures": [
9
+ "Wav2Vec2ForCTC"
10
+ ],
11
+ "attention_dropout": 0.0,
12
+ "bos_token_id": 1,
13
+ "classifier_proj_size": 256,
14
+ "codevector_dim": 768,
15
+ "contrastive_logits_temperature": 0.1,
16
+ "conv_bias": true,
17
+ "conv_dim": [
18
+ 512,
19
+ 512,
20
+ 512,
21
+ 512,
22
+ 512,
23
+ 512,
24
+ 512
25
+ ],
26
+ "conv_kernel": [
27
+ 10,
28
+ 3,
29
+ 3,
30
+ 3,
31
+ 3,
32
+ 2,
33
+ 2
34
+ ],
35
+ "conv_stride": [
36
+ 5,
37
+ 2,
38
+ 2,
39
+ 2,
40
+ 2,
41
+ 2,
42
+ 2
43
+ ],
44
+ "ctc_loss_reduction": "mean",
45
+ "ctc_zero_infinity": false,
46
+ "diversity_loss_weight": 0.1,
47
+ "do_stable_layer_norm": true,
48
+ "eos_token_id": 2,
49
+ "feat_extract_activation": "gelu",
50
+ "feat_extract_dropout": 0.0,
51
+ "feat_extract_norm": "layer",
52
+ "feat_proj_dropout": 0.0,
53
+ "feat_quantizer_dropout": 0.0,
54
+ "final_dropout": 0.0,
55
+ "hidden_act": "gelu",
56
+ "hidden_dropout": 0.0,
57
+ "hidden_size": 1024,
58
+ "initializer_range": 0.02,
59
+ "intermediate_size": 4096,
60
+ "layer_norm_eps": 1e-05,
61
+ "layerdrop": 0.0,
62
+ "mask_feature_length": 64,
63
+ "mask_feature_min_masks": 0,
64
+ "mask_feature_prob": 0.25,
65
+ "mask_time_length": 10,
66
+ "mask_time_min_masks": 2,
67
+ "mask_time_prob": 0.75,
68
+ "model_type": "wav2vec2",
69
+ "num_adapter_layers": 3,
70
+ "num_attention_heads": 16,
71
+ "num_codevector_groups": 2,
72
+ "num_codevectors_per_group": 320,
73
+ "num_conv_pos_embedding_groups": 16,
74
+ "num_conv_pos_embeddings": 128,
75
+ "num_feat_extract_layers": 7,
76
+ "num_hidden_layers": 24,
77
+ "num_negatives": 100,
78
+ "output_hidden_size": 1024,
79
+ "pad_token_id": 94,
80
+ "proj_codevector_dim": 768,
81
+ "tdnn_dilation": [
82
+ 1,
83
+ 2,
84
+ 3,
85
+ 1,
86
+ 1
87
+ ],
88
+ "tdnn_dim": [
89
+ 512,
90
+ 512,
91
+ 512,
92
+ 512,
93
+ 1500
94
+ ],
95
+ "tdnn_kernel": [
96
+ 5,
97
+ 3,
98
+ 3,
99
+ 1,
100
+ 1
101
+ ],
102
+ "torch_dtype": "float32",
103
+ "transformers_version": "4.16.0.dev0",
104
+ "use_weighted_layer_sum": false,
105
+ "vocab_size": 97,
106
+ "xvector_output_dim": 512
107
+ }
preprocessor_config.json ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "do_normalize": true,
3
+ "feature_extractor_type": "Wav2Vec2FeatureExtractor",
4
+ "feature_size": 1,
5
+ "padding_side": "right",
6
+ "padding_value": 0,
7
+ "return_attention_mask": true,
8
+ "sampling_rate": 16000
9
+ }
pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0cec4c3e7444113f1603ddfbbe386041a843f027f27baddcbd7382acec7f1253
3
+ size 1262321393
run.sh ADDED
@@ -0,0 +1,34 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ python3 run_speech_recognition_ctc.py \
2
+ --dataset_name="mozilla-foundation/common_voice_7_0" \
3
+ --model_name_or_path="facebook/wav2vec2-xls-r-300m" \
4
+ --dataset_config_name="hi" \
5
+ --output_dir="./" \
6
+ --overwrite_output_dir \
7
+ --num_train_epochs="50" \
8
+ --per_device_train_batch_size="8" \
9
+ --per_device_eval_batch_size="8" \
10
+ --gradient_accumulation_steps="4" \
11
+ --learning_rate="7.5e-5" \
12
+ --warmup_steps="2000" \
13
+ --length_column_name="input_length" \
14
+ --evaluation_strategy="steps" \
15
+ --text_column_name="sentence" \
16
+ --chars_to_ignore , ? . ! \- \; \: \" “ % ‘ ” � — ’ … – \
17
+ --save_steps="500" \
18
+ --eval_steps="500" \
19
+ --logging_steps="100" \
20
+ --layerdrop="0.0" \
21
+ --activation_dropout="0.1" \
22
+ --save_total_limit="3" \
23
+ --freeze_feature_encoder \
24
+ --feat_proj_dropout="0.0" \
25
+ --mask_time_prob="0.75" \
26
+ --mask_time_length="10" \
27
+ --mask_feature_prob="0.25" \
28
+ --mask_feature_length="64" \
29
+ --gradient_checkpointing \
30
+ --use_auth_token \
31
+ --fp16 \
32
+ --group_by_length \
33
+ --do_train --do_eval \
34
+ --push_to_hub
run_speech_recognition_ctc.py ADDED
@@ -0,0 +1,741 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python
2
+ # coding=utf-8
3
+ # Copyright 2021 The HuggingFace Inc. team. All rights reserved.
4
+ #
5
+ # Licensed under the Apache License, Version 2.0 (the "License");
6
+ # you may not use this file except in compliance with the License.
7
+ # You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing, software
12
+ # distributed under the License is distributed on an "AS IS" BASIS,
13
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
+ # See the License for the specific language governing permissions and
15
+
16
+ """ Fine-tuning a 🤗 Transformers CTC model for automatic speech recognition"""
17
+
18
+ import functools
19
+ import json
20
+ import logging
21
+ import os
22
+ import re
23
+ import sys
24
+ import warnings
25
+ from dataclasses import dataclass, field
26
+ from typing import Dict, List, Optional, Union
27
+
28
+ import datasets
29
+ import numpy as np
30
+ import torch
31
+ from datasets import DatasetDict, load_dataset, load_metric
32
+
33
+ import transformers
34
+ from transformers import (
35
+ AutoConfig,
36
+ AutoFeatureExtractor,
37
+ AutoModelForCTC,
38
+ AutoProcessor,
39
+ AutoTokenizer,
40
+ HfArgumentParser,
41
+ Trainer,
42
+ TrainingArguments,
43
+ Wav2Vec2Processor,
44
+ set_seed,
45
+ )
46
+ from transformers.trainer_utils import get_last_checkpoint, is_main_process
47
+ from transformers.utils import check_min_version
48
+ from transformers.utils.versions import require_version
49
+
50
+
51
+ # Will error if the minimal version of Transformers is not installed. Remove at your own risks.
52
+ check_min_version("4.16.0.dev0")
53
+
54
+ require_version("datasets>=1.13.3", "To fix: pip install -r examples/pytorch/text-classification/requirements.txt")
55
+
56
+
57
+ logger = logging.getLogger(__name__)
58
+
59
+
60
+ def list_field(default=None, metadata=None):
61
+ return field(default_factory=lambda: default, metadata=metadata)
62
+
63
+
64
+ @dataclass
65
+ class ModelArguments:
66
+ """
67
+ Arguments pertaining to which model/config/tokenizer we are going to fine-tune from.
68
+ """
69
+
70
+ model_name_or_path: str = field(
71
+ metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"}
72
+ )
73
+ tokenizer_name_or_path: Optional[str] = field(
74
+ default=None,
75
+ metadata={"help": "Path to pretrained tokenizer or tokenizer identifier from huggingface.co/models"},
76
+ )
77
+ cache_dir: Optional[str] = field(
78
+ default=None,
79
+ metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"},
80
+ )
81
+ freeze_feature_encoder: bool = field(
82
+ default=True, metadata={"help": "Whether to freeze the feature encoder layers of the model."}
83
+ )
84
+ attention_dropout: float = field(
85
+ default=0.0, metadata={"help": "The dropout ratio for the attention probabilities."}
86
+ )
87
+ activation_dropout: float = field(
88
+ default=0.0, metadata={"help": "The dropout ratio for activations inside the fully connected layer."}
89
+ )
90
+ feat_proj_dropout: float = field(default=0.0, metadata={"help": "The dropout ratio for the projected features."})
91
+ hidden_dropout: float = field(
92
+ default=0.0,
93
+ metadata={
94
+ "help": "The dropout probability for all fully connected layers in the embeddings, encoder, and pooler."
95
+ },
96
+ )
97
+ final_dropout: float = field(
98
+ default=0.0,
99
+ metadata={"help": "The dropout probability for the final projection layer."},
100
+ )
101
+ mask_time_prob: float = field(
102
+ default=0.05,
103
+ metadata={
104
+ "help": "Probability of each feature vector along the time axis to be chosen as the start of the vector"
105
+ "span to be masked. Approximately ``mask_time_prob * sequence_length // mask_time_length`` feature"
106
+ "vectors will be masked along the time axis."
107
+ },
108
+ )
109
+ mask_time_length: int = field(
110
+ default=10,
111
+ metadata={"help": "Length of vector span to mask along the time axis."},
112
+ )
113
+ mask_feature_prob: float = field(
114
+ default=0.0,
115
+ metadata={
116
+ "help": "Probability of each feature vector along the feature axis to be chosen as the start of the vector"
117
+ "span to be masked. Approximately ``mask_feature_prob * sequence_length // mask_feature_length`` feature bins will be masked along the time axis."
118
+ },
119
+ )
120
+ mask_feature_length: int = field(
121
+ default=10,
122
+ metadata={"help": "Length of vector span to mask along the feature axis."},
123
+ )
124
+ layerdrop: float = field(default=0.0, metadata={"help": "The LayerDrop probability."})
125
+ ctc_loss_reduction: Optional[str] = field(
126
+ default="mean", metadata={"help": "The way the ctc loss should be reduced. Should be one of 'mean' or 'sum'."}
127
+ )
128
+
129
+
130
+ @dataclass
131
+ class DataTrainingArguments:
132
+ """
133
+ Arguments pertaining to what data we are going to input our model for training and eval.
134
+
135
+ Using `HfArgumentParser` we can turn this class
136
+ into argparse arguments to be able to specify them on
137
+ the command line.
138
+ """
139
+
140
+ dataset_name: str = field(
141
+ metadata={"help": "The configuration name of the dataset to use (via the datasets library)."}
142
+ )
143
+ dataset_config_name: str = field(
144
+ default=None, metadata={"help": "The configuration name of the dataset to use (via the datasets library)."}
145
+ )
146
+ train_split_name: str = field(
147
+ default="train+validation",
148
+ metadata={
149
+ "help": "The name of the training data set split to use (via the datasets library). Defaults to 'train'"
150
+ },
151
+ )
152
+ eval_split_name: str = field(
153
+ default="test",
154
+ metadata={
155
+ "help": "The name of the training data set split to use (via the datasets library). Defaults to 'train'"
156
+ },
157
+ )
158
+ audio_column_name: str = field(
159
+ default="audio",
160
+ metadata={"help": "The name of the dataset column containing the audio data. Defaults to 'audio'"},
161
+ )
162
+ text_column_name: str = field(
163
+ default="text",
164
+ metadata={"help": "The name of the dataset column containing the text data. Defaults to 'text'"},
165
+ )
166
+ overwrite_cache: bool = field(
167
+ default=False, metadata={"help": "Overwrite the cached preprocessed datasets or not."}
168
+ )
169
+ preprocessing_num_workers: Optional[int] = field(
170
+ default=None,
171
+ metadata={"help": "The number of processes to use for the preprocessing."},
172
+ )
173
+ max_train_samples: Optional[int] = field(
174
+ default=None,
175
+ metadata={
176
+ "help": "For debugging purposes or quicker training, truncate the number of training examples to this "
177
+ "value if set."
178
+ },
179
+ )
180
+ max_eval_samples: Optional[int] = field(
181
+ default=None,
182
+ metadata={
183
+ "help": "For debugging purposes or quicker training, truncate the number of validation examples to this "
184
+ "value if set."
185
+ },
186
+ )
187
+ chars_to_ignore: Optional[List[str]] = list_field(
188
+ default=None,
189
+ metadata={"help": "A list of characters to remove from the transcripts."},
190
+ )
191
+ eval_metrics: List[str] = list_field(
192
+ default=["wer"],
193
+ metadata={"help": "A list of metrics the model should be evaluated on. E.g. `'wer cer'`"},
194
+ )
195
+ max_duration_in_seconds: float = field(
196
+ default=20.0,
197
+ metadata={
198
+ "help": "Filter audio files that are longer than `max_duration_in_seconds` seconds to 'max_duration_in_seconds`"
199
+ },
200
+ )
201
+ min_duration_in_seconds: float = field(
202
+ default=0.0, metadata={"help": "Filter audio files that are shorter than `min_duration_in_seconds` seconds"}
203
+ )
204
+ preprocessing_only: bool = field(
205
+ default=False,
206
+ metadata={
207
+ "help": "Whether to only do data preprocessing and skip training. "
208
+ "This is especially useful when data preprocessing errors out in distributed training due to timeout. "
209
+ "In this case, one should run the preprocessing in a non-distributed setup with `preprocessing_only=True` "
210
+ "so that the cached datasets can consequently be loaded in distributed training"
211
+ },
212
+ )
213
+ use_auth_token: bool = field(
214
+ default=False,
215
+ metadata={
216
+ "help": "If :obj:`True`, will use the token generated when running"
217
+ ":obj:`transformers-cli login` as HTTP bearer authorization for remote files."
218
+ },
219
+ )
220
+ unk_token: str = field(
221
+ default="[UNK]",
222
+ metadata={"help": "The unk token for the tokenizer"},
223
+ )
224
+ pad_token: str = field(
225
+ default="[PAD]",
226
+ metadata={"help": "The padding token for the tokenizer"},
227
+ )
228
+ word_delimiter_token: str = field(
229
+ default="|",
230
+ metadata={"help": "The word delimiter token for the tokenizer"},
231
+ )
232
+ phoneme_language: Optional[str] = field(
233
+ default=None,
234
+ metadata={
235
+ "help": "The target language that should be used be"
236
+ " passed to the tokenizer for tokenization. Note that"
237
+ " this is only relevant if the model classifies the"
238
+ " input audio to a sequence of phoneme sequences."
239
+ },
240
+ )
241
+
242
+
243
+ @dataclass
244
+ class DataCollatorCTCWithPadding:
245
+ """
246
+ Data collator that will dynamically pad the inputs received.
247
+ Args:
248
+ processor (:class:`~transformers.AutoProcessor`)
249
+ The processor used for proccessing the data.
250
+ padding (:obj:`bool`, :obj:`str` or :class:`~transformers.tokenization_utils_base.PaddingStrategy`, `optional`, defaults to :obj:`True`):
251
+ Select a strategy to pad the returned sequences (according to the model's padding side and padding index)
252
+ among:
253
+ * :obj:`True` or :obj:`'longest'`: Pad to the longest sequence in the batch (or no padding if only a single
254
+ sequence if provided).
255
+ * :obj:`'max_length'`: Pad to a maximum length specified with the argument :obj:`max_length` or to the
256
+ maximum acceptable input length for the model if that argument is not provided.
257
+ * :obj:`False` or :obj:`'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of
258
+ different lengths).
259
+ max_length (:obj:`int`, `optional`):
260
+ Maximum length of the ``input_values`` of the returned list and optionally padding length (see above).
261
+ max_length_labels (:obj:`int`, `optional`):
262
+ Maximum length of the ``labels`` returned list and optionally padding length (see above).
263
+ pad_to_multiple_of (:obj:`int`, `optional`):
264
+ If set will pad the sequence to a multiple of the provided value.
265
+ This is especially useful to enable the use of Tensor Cores on NVIDIA hardware with compute capability >=
266
+ 7.5 (Volta).
267
+ """
268
+
269
+ processor: AutoProcessor
270
+ padding: Union[bool, str] = "longest"
271
+ pad_to_multiple_of: Optional[int] = None
272
+ pad_to_multiple_of_labels: Optional[int] = None
273
+
274
+ def __call__(self, features: List[Dict[str, Union[List[int], torch.Tensor]]]) -> Dict[str, torch.Tensor]:
275
+ # split inputs and labels since they have to be of different lenghts and need
276
+ # different padding methods
277
+ input_features = [{"input_values": feature["input_values"]} for feature in features]
278
+ label_features = [{"input_ids": feature["labels"]} for feature in features]
279
+
280
+ batch = self.processor.pad(
281
+ input_features,
282
+ padding=self.padding,
283
+ pad_to_multiple_of=self.pad_to_multiple_of,
284
+ return_tensors="pt",
285
+ )
286
+
287
+ with self.processor.as_target_processor():
288
+ labels_batch = self.processor.pad(
289
+ label_features,
290
+ padding=self.padding,
291
+ pad_to_multiple_of=self.pad_to_multiple_of_labels,
292
+ return_tensors="pt",
293
+ )
294
+
295
+ # replace padding with -100 to ignore loss correctly
296
+ labels = labels_batch["input_ids"].masked_fill(labels_batch.attention_mask.ne(1), -100)
297
+
298
+ batch["labels"] = labels
299
+
300
+ return batch
301
+
302
+
303
+ def create_vocabulary_from_data(
304
+ datasets: DatasetDict,
305
+ word_delimiter_token: Optional[str] = None,
306
+ unk_token: Optional[str] = None,
307
+ pad_token: Optional[str] = None,
308
+ ):
309
+ # Given training and test labels create vocabulary
310
+ def extract_all_chars(batch):
311
+ all_text = " ".join(batch["target_text"])
312
+ vocab = list(set(all_text))
313
+ return {"vocab": [vocab], "all_text": [all_text]}
314
+
315
+ vocabs = datasets.map(
316
+ extract_all_chars,
317
+ batched=True,
318
+ batch_size=-1,
319
+ keep_in_memory=True,
320
+ remove_columns=datasets["train"].column_names,
321
+ )
322
+
323
+ # take union of all unique characters in each dataset
324
+ vocab_set = functools.reduce(
325
+ lambda vocab_1, vocab_2: set(vocab_1["vocab"][0]) | set(vocab_2["vocab"][0]), vocabs.values()
326
+ )
327
+
328
+ # remove | if its already present in dataset
329
+ vocab_list = list(vocab_set)
330
+ vocab_list.remove("|")
331
+
332
+ vocab_dict = {v: k for k, v in enumerate(sorted(vocab_list))}
333
+
334
+ # replace white space with delimiter token
335
+ if word_delimiter_token is not None:
336
+ vocab_dict[word_delimiter_token] = vocab_dict[" "]
337
+ del vocab_dict[" "]
338
+
339
+ # add unk and pad token
340
+ if unk_token is not None:
341
+ vocab_dict[unk_token] = len(vocab_dict)
342
+
343
+ if pad_token is not None:
344
+ vocab_dict[pad_token] = len(vocab_dict)
345
+
346
+ return vocab_dict
347
+
348
+
349
+ def main():
350
+ # See all possible arguments in src/transformers/training_args.py
351
+ # or by passing the --help flag to this script.
352
+ # We now keep distinct sets of args, for a cleaner separation of concerns.
353
+
354
+ parser = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments))
355
+ if len(sys.argv) == 2 and sys.argv[1].endswith(".json"):
356
+ # If we pass only one argument to the script and it's the path to a json file,
357
+ # let's parse it to get our arguments.
358
+ model_args, data_args, training_args = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1]))
359
+ else:
360
+ model_args, data_args, training_args = parser.parse_args_into_dataclasses()
361
+
362
+ # Detecting last checkpoint.
363
+ last_checkpoint = None
364
+ if os.path.isdir(training_args.output_dir) and training_args.do_train and not training_args.overwrite_output_dir:
365
+ last_checkpoint = get_last_checkpoint(training_args.output_dir)
366
+ if last_checkpoint is None and len(os.listdir(training_args.output_dir)) > 0:
367
+ raise ValueError(
368
+ f"Output directory ({training_args.output_dir}) already exists and is not empty. "
369
+ "Use --overwrite_output_dir to overcome."
370
+ )
371
+ elif last_checkpoint is not None:
372
+ logger.info(
373
+ f"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change "
374
+ "the `--output_dir` or add `--overwrite_output_dir` to train from scratch."
375
+ )
376
+
377
+ # Setup logging
378
+ logging.basicConfig(
379
+ format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
380
+ datefmt="%m/%d/%Y %H:%M:%S",
381
+ handlers=[logging.StreamHandler(sys.stdout)],
382
+ )
383
+ logger.setLevel(logging.INFO if is_main_process(training_args.local_rank) else logging.WARN)
384
+
385
+ # Log on each process the small summary:
386
+ logger.warning(
387
+ f"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"
388
+ f"distributed training: {bool(training_args.local_rank != -1)}, 16-bits training: {training_args.fp16}"
389
+ )
390
+ # Set the verbosity to info of the Transformers logger (on main process only):
391
+ if is_main_process(training_args.local_rank):
392
+ transformers.utils.logging.set_verbosity_info()
393
+ logger.info("Training/evaluation parameters %s", training_args)
394
+
395
+ # Set seed before initializing model.
396
+ set_seed(training_args.seed)
397
+
398
+ # 1. First, let's load the dataset
399
+ raw_datasets = DatasetDict()
400
+
401
+ if training_args.do_train:
402
+ raw_datasets["train"] = load_dataset(
403
+ data_args.dataset_name,
404
+ data_args.dataset_config_name,
405
+ split=data_args.train_split_name,
406
+ use_auth_token=data_args.use_auth_token,
407
+ )
408
+
409
+ if data_args.audio_column_name not in raw_datasets["train"].column_names:
410
+ raise ValueError(
411
+ f"--audio_column_name '{data_args.audio_column_name}' not found in dataset '{data_args.dataset_name}'. "
412
+ "Make sure to set `--audio_column_name` to the correct audio column - one of "
413
+ f"{', '.join(raw_datasets['train'].column_names)}."
414
+ )
415
+
416
+ if data_args.text_column_name not in raw_datasets["train"].column_names:
417
+ raise ValueError(
418
+ f"--text_column_name {data_args.text_column_name} not found in dataset '{data_args.dataset_name}'. "
419
+ "Make sure to set `--text_column_name` to the correct text column - one of "
420
+ f"{', '.join(raw_datasets['train'].column_names)}."
421
+ )
422
+
423
+ if data_args.max_train_samples is not None:
424
+ raw_datasets["train"] = raw_datasets["train"].select(range(data_args.max_train_samples))
425
+
426
+ if training_args.do_eval:
427
+ raw_datasets["eval"] = load_dataset(
428
+ data_args.dataset_name,
429
+ data_args.dataset_config_name,
430
+ split=data_args.eval_split_name,
431
+ use_auth_token=data_args.use_auth_token,
432
+ )
433
+
434
+ if data_args.max_eval_samples is not None:
435
+ raw_datasets["eval"] = raw_datasets["eval"].select(range(data_args.max_eval_samples))
436
+
437
+ # 2. We remove some special characters from the datasets
438
+ # that make training complicated and do not help in transcribing the speech
439
+ # E.g. characters, such as `,` and `.` do not really have an acoustic characteristic
440
+ # that could be easily picked up by the model
441
+ chars_to_ignore_regex = (
442
+ f'[{"".join(data_args.chars_to_ignore)}]' if data_args.chars_to_ignore is not None else None
443
+ )
444
+ text_column_name = data_args.text_column_name
445
+
446
+ def remove_special_characters(batch):
447
+ if chars_to_ignore_regex is not None:
448
+ batch["target_text"] = re.sub(chars_to_ignore_regex, "", batch[text_column_name]).lower() + " "
449
+ else:
450
+ batch["target_text"] = batch[text_column_name].lower() + " "
451
+ return batch
452
+
453
+ with training_args.main_process_first(desc="dataset map special characters removal"):
454
+ raw_datasets = raw_datasets.map(
455
+ remove_special_characters,
456
+ remove_columns=[text_column_name],
457
+ desc="remove special characters from datasets",
458
+ )
459
+
460
+ # save special tokens for tokenizer
461
+ word_delimiter_token = data_args.word_delimiter_token
462
+ unk_token = data_args.unk_token
463
+ pad_token = data_args.pad_token
464
+
465
+ # 3. Next, let's load the config as we might need it to create
466
+ # the tokenizer
467
+ # load config
468
+ config = AutoConfig.from_pretrained(
469
+ model_args.model_name_or_path, cache_dir=model_args.cache_dir, use_auth_token=data_args.use_auth_token
470
+ )
471
+
472
+ # 4. Next, if no tokenizer file is defined,
473
+ # we create the vocabulary of the model by extracting all unique characters from
474
+ # the training and evaluation datasets
475
+ # We need to make sure that only first rank saves vocabulary
476
+ # make sure all processes wait until vocab is created
477
+ tokenizer_name_or_path = model_args.tokenizer_name_or_path
478
+ tokenizer_kwargs = {}
479
+ if tokenizer_name_or_path is None:
480
+ # save vocab in training output dir
481
+ tokenizer_name_or_path = training_args.output_dir
482
+
483
+ vocab_file = os.path.join(tokenizer_name_or_path, "vocab.json")
484
+
485
+ with training_args.main_process_first():
486
+ if training_args.overwrite_output_dir and os.path.isfile(vocab_file):
487
+ os.remove(vocab_file)
488
+
489
+ with training_args.main_process_first(desc="dataset map vocabulary creation"):
490
+ if not os.path.isfile(vocab_file):
491
+ os.makedirs(tokenizer_name_or_path, exist_ok=True)
492
+ vocab_dict = create_vocabulary_from_data(
493
+ raw_datasets,
494
+ word_delimiter_token=word_delimiter_token,
495
+ unk_token=unk_token,
496
+ pad_token=pad_token,
497
+ )
498
+
499
+ # save vocab dict to be loaded into tokenizer
500
+ with open(vocab_file, "w") as file:
501
+ json.dump(vocab_dict, file)
502
+
503
+ # if tokenizer has just been created
504
+ # it is defined by `tokenizer_class` if present in config else by `model_type`
505
+ tokenizer_kwargs = {
506
+ "config": config if config.tokenizer_class is not None else None,
507
+ "tokenizer_type": config.model_type if config.tokenizer_class is None else None,
508
+ "unk_token": unk_token,
509
+ "pad_token": pad_token,
510
+ "word_delimiter_token": word_delimiter_token,
511
+ }
512
+
513
+ # 5. Now we can instantiate the feature extractor, tokenizer and model
514
+ # Note for distributed training, the .from_pretrained methods guarantee that only
515
+ # one local process can concurrently download model & vocab.
516
+
517
+ # load feature_extractor and tokenizer
518
+ tokenizer = AutoTokenizer.from_pretrained(
519
+ tokenizer_name_or_path,
520
+ use_auth_token=data_args.use_auth_token,
521
+ **tokenizer_kwargs,
522
+ )
523
+ feature_extractor = AutoFeatureExtractor.from_pretrained(
524
+ model_args.model_name_or_path, cache_dir=model_args.cache_dir, use_auth_token=data_args.use_auth_token
525
+ )
526
+
527
+ # adapt config
528
+ config.update(
529
+ {
530
+ "feat_proj_dropout": model_args.feat_proj_dropout,
531
+ "attention_dropout": model_args.attention_dropout,
532
+ "hidden_dropout": model_args.hidden_dropout,
533
+ "final_dropout": model_args.final_dropout,
534
+ "mask_time_prob": model_args.mask_time_prob,
535
+ "mask_time_length": model_args.mask_time_length,
536
+ "mask_feature_prob": model_args.mask_feature_prob,
537
+ "mask_feature_length": model_args.mask_feature_length,
538
+ "gradient_checkpointing": training_args.gradient_checkpointing,
539
+ "layerdrop": model_args.layerdrop,
540
+ "ctc_loss_reduction": model_args.ctc_loss_reduction,
541
+ "pad_token_id": tokenizer.pad_token_id,
542
+ "vocab_size": len(tokenizer),
543
+ "activation_dropout": model_args.activation_dropout,
544
+ }
545
+ )
546
+
547
+ # create model
548
+ model = AutoModelForCTC.from_pretrained(
549
+ model_args.model_name_or_path,
550
+ cache_dir=model_args.cache_dir,
551
+ config=config,
552
+ use_auth_token=data_args.use_auth_token,
553
+ )
554
+
555
+ # freeze encoder
556
+ if model_args.freeze_feature_encoder:
557
+ model.freeze_feature_encoder()
558
+
559
+ # 6. Now we preprocess the datasets including loading the audio, resampling and normalization
560
+ # Thankfully, `datasets` takes care of automatically loading and resampling the audio,
561
+ # so that we just need to set the correct target sampling rate and normalize the input
562
+ # via the `feature_extractor`
563
+
564
+ # make sure that dataset decodes audio with correct sampling rate
565
+ dataset_sampling_rate = next(iter(raw_datasets.values())).features[data_args.audio_column_name].sampling_rate
566
+ if dataset_sampling_rate != feature_extractor.sampling_rate:
567
+ raw_datasets = raw_datasets.cast_column(
568
+ data_args.audio_column_name, datasets.features.Audio(sampling_rate=feature_extractor.sampling_rate)
569
+ )
570
+
571
+ # derive max & min input length for sample rate & max duration
572
+ max_input_length = data_args.max_duration_in_seconds * feature_extractor.sampling_rate
573
+ min_input_length = data_args.min_duration_in_seconds * feature_extractor.sampling_rate
574
+ audio_column_name = data_args.audio_column_name
575
+ num_workers = data_args.preprocessing_num_workers
576
+
577
+ # `phoneme_language` is only relevant if the model is fine-tuned on phoneme classification
578
+ phoneme_language = data_args.phoneme_language
579
+
580
+ # Preprocessing the datasets.
581
+ # We need to read the audio files as arrays and tokenize the targets.
582
+ def prepare_dataset(batch):
583
+ # load audio
584
+ sample = batch[audio_column_name]
585
+
586
+ inputs = feature_extractor(sample["array"], sampling_rate=sample["sampling_rate"])
587
+ batch["input_values"] = inputs.input_values[0]
588
+ batch["input_length"] = len(batch["input_values"])
589
+
590
+ # encode targets
591
+ additional_kwargs = {}
592
+ if phoneme_language is not None:
593
+ additional_kwargs["phonemizer_lang"] = phoneme_language
594
+
595
+ batch["labels"] = tokenizer(batch["target_text"], **additional_kwargs).input_ids
596
+ return batch
597
+
598
+ with training_args.main_process_first(desc="dataset map preprocessing"):
599
+ vectorized_datasets = raw_datasets.map(
600
+ prepare_dataset,
601
+ remove_columns=next(iter(raw_datasets.values())).column_names,
602
+ num_proc=num_workers,
603
+ desc="preprocess datasets",
604
+ )
605
+
606
+ def is_audio_in_length_range(length):
607
+ return length > min_input_length and length < max_input_length
608
+
609
+ # filter data that is shorter than min_input_length
610
+ vectorized_datasets = vectorized_datasets.filter(
611
+ is_audio_in_length_range,
612
+ num_proc=num_workers,
613
+ input_columns=["input_length"],
614
+ )
615
+
616
+ # 7. Next, we can prepare the training.
617
+ # Let's use word error rate (WER) as our evaluation metric,
618
+ # instantiate a data collator and the trainer
619
+
620
+ # Define evaluation metrics during training, *i.e.* word error rate, character error rate
621
+ eval_metrics = {metric: load_metric(metric) for metric in data_args.eval_metrics}
622
+
623
+ # for large datasets it is advised to run the preprocessing on a
624
+ # single machine first with ``args.preprocessing_only`` since there will mostly likely
625
+ # be a timeout when running the script in distributed mode.
626
+ # In a second step ``args.preprocessing_only`` can then be set to `False` to load the
627
+ # cached dataset
628
+ if data_args.preprocessing_only:
629
+ logger.info(f"Data preprocessing finished. Files cached at {vectorized_datasets.cache_files}")
630
+ return
631
+
632
+ def compute_metrics(pred):
633
+ pred_logits = pred.predictions
634
+ pred_ids = np.argmax(pred_logits, axis=-1)
635
+
636
+ pred.label_ids[pred.label_ids == -100] = tokenizer.pad_token_id
637
+
638
+ pred_str = tokenizer.batch_decode(pred_ids)
639
+ # we do not want to group tokens when computing the metrics
640
+ label_str = tokenizer.batch_decode(pred.label_ids, group_tokens=False)
641
+
642
+ metrics = {k: v.compute(predictions=pred_str, references=label_str) for k, v in eval_metrics.items()}
643
+
644
+ return metrics
645
+
646
+ # Now save everything to be able to create a single processor later
647
+ if is_main_process(training_args.local_rank):
648
+ # save feature extractor, tokenizer and config
649
+ feature_extractor.save_pretrained(training_args.output_dir)
650
+ tokenizer.save_pretrained(training_args.output_dir)
651
+ config.save_pretrained(training_args.output_dir)
652
+
653
+ try:
654
+ processor = AutoProcessor.from_pretrained(training_args.output_dir)
655
+ except (OSError, KeyError):
656
+ warnings.warn(
657
+ "Loading a processor from a feature extractor config that does not"
658
+ " include a `processor_class` attribute is deprecated and will be removed in v5. Please add the following "
659
+ " attribute to your `preprocessor_config.json` file to suppress this warning: "
660
+ " `'processor_class': 'Wav2Vec2Processor'`",
661
+ FutureWarning,
662
+ )
663
+ processor = Wav2Vec2Processor.from_pretrained(training_args.output_dir)
664
+
665
+ # Instantiate custom data collator
666
+ data_collator = DataCollatorCTCWithPadding(processor=processor)
667
+
668
+ # Initialize Trainer
669
+ trainer = Trainer(
670
+ model=model,
671
+ data_collator=data_collator,
672
+ args=training_args,
673
+ compute_metrics=compute_metrics,
674
+ train_dataset=vectorized_datasets["train"] if training_args.do_train else None,
675
+ eval_dataset=vectorized_datasets["eval"] if training_args.do_eval else None,
676
+ tokenizer=feature_extractor,
677
+ )
678
+
679
+ # 8. Finally, we can start training
680
+
681
+ # Training
682
+ if training_args.do_train:
683
+
684
+ # use last checkpoint if exist
685
+ if last_checkpoint is not None:
686
+ checkpoint = last_checkpoint
687
+ elif os.path.isdir(model_args.model_name_or_path):
688
+ checkpoint = model_args.model_name_or_path
689
+ else:
690
+ checkpoint = None
691
+
692
+ train_result = trainer.train(resume_from_checkpoint=checkpoint)
693
+ trainer.save_model()
694
+
695
+ metrics = train_result.metrics
696
+ max_train_samples = (
697
+ data_args.max_train_samples
698
+ if data_args.max_train_samples is not None
699
+ else len(vectorized_datasets["train"])
700
+ )
701
+ metrics["train_samples"] = min(max_train_samples, len(vectorized_datasets["train"]))
702
+
703
+ trainer.log_metrics("train", metrics)
704
+ trainer.save_metrics("train", metrics)
705
+ trainer.save_state()
706
+
707
+ # Evaluation
708
+ results = {}
709
+ if training_args.do_eval:
710
+ logger.info("*** Evaluate ***")
711
+ metrics = trainer.evaluate()
712
+ max_eval_samples = (
713
+ data_args.max_eval_samples if data_args.max_eval_samples is not None else len(vectorized_datasets["eval"])
714
+ )
715
+ metrics["eval_samples"] = min(max_eval_samples, len(vectorized_datasets["eval"]))
716
+
717
+ trainer.log_metrics("eval", metrics)
718
+ trainer.save_metrics("eval", metrics)
719
+
720
+ # Write model card and (optionally) push to hub
721
+ config_name = data_args.dataset_config_name if data_args.dataset_config_name is not None else "na"
722
+ kwargs = {
723
+ "finetuned_from": model_args.model_name_or_path,
724
+ "tasks": "speech-recognition",
725
+ "tags": ["automatic-speech-recognition", data_args.dataset_name],
726
+ "dataset_args": f"Config: {config_name}, Training split: {data_args.train_split_name}, Eval split: {data_args.eval_split_name}",
727
+ "dataset": f"{data_args.dataset_name.upper()} - {config_name.upper()}",
728
+ }
729
+ if "common_voice" in data_args.dataset_name:
730
+ kwargs["language"] = config_name
731
+
732
+ if training_args.push_to_hub:
733
+ trainer.push_to_hub(**kwargs)
734
+ else:
735
+ trainer.create_model_card(**kwargs)
736
+
737
+ return results
738
+
739
+
740
+ if __name__ == "__main__":
741
+ main()
special_tokens_map.json ADDED
@@ -0,0 +1 @@
 
1
+ {"bos_token": "<s>", "eos_token": "</s>", "unk_token": "[UNK]", "pad_token": "[PAD]", "additional_special_tokens": [{"content": "<s>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true}, {"content": "</s>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true}]}
tokenizer_config.json ADDED
@@ -0,0 +1 @@
 
1
+ {"unk_token": "[UNK]", "bos_token": "<s>", "eos_token": "</s>", "pad_token": "[PAD]", "do_lower_case": false, "word_delimiter_token": "|", "special_tokens_map_file": null, "tokenizer_file": null, "name_or_path": "./", "tokenizer_class": "Wav2Vec2CTCTokenizer"}
training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7e6f20268ea2a36d0f7d53ad7e7529dc214aa6f70665bf8d7159e5ed2da2b72a
3
+ size 2991
vocab.json ADDED
@@ -0,0 +1 @@
 
1
+ {"a": 1, "b": 2, "c": 3, "d": 4, "e": 5, "f": 6, "g": 7, "h": 8, "i": 9, "j": 10, "k": 11, "l": 12, "m": 13, "n": 14, "o": 15, "p": 16, "r": 17, "s": 18, "t": 19, "u": 20, "v": 21, "w": 22, "x": 23, "y": 24, "z": 25, "ँ": 26, "ं": 27, "ः": 28, "अ": 29, "आ": 30, "इ": 31, "ई": 32, "उ": 33, "ऊ": 34, "ऋ": 35, "ए": 36, "ऐ": 37, "ऑ": 38, "ओ": 39, "औ": 40, "क": 41, "ख": 42, "ग": 43, "घ": 44, "च": 45, "छ": 46, "ज": 47, "झ": 48, "ञ": 49, "ट": 50, "ठ": 51, "ड": 52, "ढ": 53, "ण": 54, "त": 55, "थ": 56, "द": 57, "ध": 58, "न": 59, "प": 60, "फ": 61, "ब": 62, "भ": 63, "म": 64, "य": 65, "र": 66, "ल": 67, "व": 68, "श": 69, "ष": 70, "स": 71, "ह": 72, "़": 73, "ा": 74, "ि": 75, "ी": 76, "ु": 77, "ू": 78, "ृ": 79, "ॅ": 80, "े": 81, "ै": 82, "ॉ": 83, "ो": 84, "ौ": 85, "्": 86, "क़": 87, "ग़": 88, "ज़": 89, "ड़": 90, "ढ़": 91, "।": 92, "|": 0, "[UNK]": 93, "[PAD]": 94}