arampacha commited on
Commit
28582fe
1 Parent(s): 0291150

processor and training script

Browse files
added_tokens.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"<s>": 42, "</s>": 43}
alphabet.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"labels": [" ", "\u0561", "\u0562", "\u0563", "\u0564", "\u0565", "\u0566", "\u0567", "\u0568", "\u0569", "\u056a", "\u056b", "\u056c", "\u056d", "\u056e", "\u056f", "\u0570", "\u0571", "\u0572", "\u0573", "\u0574", "\u0575", "\u0576", "\u0577", "\u0578", "\u0579", "\u057a", "\u057b", "\u057c", "\u057d", "\u057e", "\u057f", "\u0580", "\u0581", "\u0582", "\u0583", "\u0584", "\u0585", "\u0586", "\u0587", "\u2047", "", "<s>", "</s>"], "is_bpe": false}
language_model/5gram.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:39bcfd7d6afa5cc9c6286594ac121b9900d9e2f0934bd3da54b21373a303f064
3
+ size 879915787
language_model/attrs.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"alpha": 0.5, "beta": 1.5, "unk_score_offset": -10.0, "score_boundary": true}
language_model/unigrams.txt ADDED
The diff for this file is too large to render. See raw diff
 
preprocessor_config.json ADDED
@@ -0,0 +1,10 @@
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "do_normalize": true,
3
+ "feature_extractor_type": "Wav2Vec2FeatureExtractor",
4
+ "feature_size": 1,
5
+ "padding_side": "right",
6
+ "padding_value": 0.0,
7
+ "processor_class": "Wav2Vec2ProcessorWithLM",
8
+ "return_attention_mask": true,
9
+ "sampling_rate": 16000
10
+ }
run.sh ADDED
@@ -0,0 +1,40 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ python run_speech_recognition_ctc.py \
2
+ --dataset_name="mozilla-foundation/common_voice_8_0" \
3
+ --dataset_config_name="hy-AM" \
4
+ --model_name_or_path="facebook/wav2vec2-xls-r-1b" \
5
+ --tokenizer_name_or_path="./" \
6
+ --output_dir="./" \
7
+ --max_steps 1400 \
8
+ --per_device_train_batch_size="16" \
9
+ --per_device_eval_batch_size="64" \
10
+ --gradient_accumulation_steps="8" \
11
+ --dataloader_num_workers 8 \
12
+ --learning_rate="8e-5" \
13
+ --adam_beta2 0.98 \
14
+ --warmup_ratio 0.1 \
15
+ --evaluation_strategy="steps" \
16
+ --text_column_name="sentence" \
17
+ --chars_to_ignore \, \? \. \! \- \; \: \" \“ \% \‘ \” \� \' « » \( \) ։ ՝ ՞ ՛ ՚ \
18
+ --save_steps="100" \
19
+ --eval_steps="100" \
20
+ --logging_steps="100" \
21
+ --eval_metrics="wer cer" \
22
+ --save_total_limit="2" \
23
+ --freeze_feature_encoder \
24
+ --layerdrop="0.1" \
25
+ --activation_dropout="0.1" \
26
+ --feat_proj_dropout="0.0" \
27
+ --mask_time_prob="0.75" \
28
+ --mask_time_length="10" \
29
+ --mask_feature_prob="0.25" \
30
+ --mask_feature_length="64" \
31
+ --gradient_checkpointing \
32
+ --use_auth_token \
33
+ --fp16 \
34
+ --group_by_length \
35
+ --do_train --do_eval \
36
+ --load_best_model_at_end \
37
+ --report_to all \
38
+ --run_name xlsr-hy-cv-1b-1 \
39
+ --wandb_project xlsr-hy \
40
+ --bnb --tristage_sched
run_speech_recognition_ctc.py ADDED
@@ -0,0 +1,813 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python
2
+ # coding=utf-8
3
+ # Copyright 2021 The HuggingFace Inc. team. All rights reserved.
4
+ #
5
+ # Licensed under the Apache License, Version 2.0 (the "License");
6
+ # you may not use this file except in compliance with the License.
7
+ # You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing, software
12
+ # distributed under the License is distributed on an "AS IS" BASIS,
13
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
+ # See the License for the specific language governing permissions and
15
+
16
+ """ Fine-tuning a 🤗 Transformers CTC model for automatic speech recognition"""
17
+
18
+ import functools
19
+ import json
20
+ import logging
21
+ import os
22
+ import re
23
+ import sys
24
+ import warnings
25
+ from dataclasses import dataclass, field
26
+ from typing import Dict, List, Optional, Union
27
+
28
+ import datasets
29
+ import numpy as np
30
+ import torch
31
+ from torch.optim.lr_scheduler import LambdaLR
32
+ from datasets import DatasetDict, load_dataset, load_metric
33
+
34
+ import bitsandbytes as bnb
35
+ import transformers
36
+ from transformers import (
37
+ AutoConfig,
38
+ AutoFeatureExtractor,
39
+ AutoModelForCTC,
40
+ AutoProcessor,
41
+ AutoTokenizer,
42
+ HfArgumentParser,
43
+ Trainer,
44
+ TrainingArguments,
45
+ Wav2Vec2Processor,
46
+ set_seed,
47
+ )
48
+ from transformers.trainer_pt_utils import get_parameter_names
49
+ from transformers.trainer_utils import get_last_checkpoint, is_main_process
50
+ from transformers.utils import check_min_version
51
+ from transformers.utils.versions import require_version
52
+
53
+
54
+ # Will error if the minimal version of Transformers is not installed. Remove at your own risks.
55
+ check_min_version("4.16.0.dev0")
56
+
57
+ require_version("datasets>=1.13.3", "To fix: pip install -r examples/pytorch/text-classification/requirements.txt")
58
+
59
+
60
+ logger = logging.getLogger(__name__)
61
+
62
+
63
+ def list_field(default=None, metadata=None):
64
+ return field(default_factory=lambda: default, metadata=metadata)
65
+
66
+
67
+ @dataclass
68
+ class ModelArguments:
69
+ """
70
+ Arguments pertaining to which model/config/tokenizer we are going to fine-tune from.
71
+ """
72
+
73
+ model_name_or_path: str = field(
74
+ metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"}
75
+ )
76
+ tokenizer_name_or_path: Optional[str] = field(
77
+ default=None,
78
+ metadata={"help": "Path to pretrained tokenizer or tokenizer identifier from huggingface.co/models"},
79
+ )
80
+ cache_dir: Optional[str] = field(
81
+ default=None,
82
+ metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"},
83
+ )
84
+ freeze_feature_encoder: bool = field(
85
+ default=True, metadata={"help": "Whether to freeze the feature encoder layers of the model."}
86
+ )
87
+ attention_dropout: float = field(
88
+ default=0.0, metadata={"help": "The dropout ratio for the attention probabilities."}
89
+ )
90
+ activation_dropout: float = field(
91
+ default=0.0, metadata={"help": "The dropout ratio for activations inside the fully connected layer."}
92
+ )
93
+ feat_proj_dropout: float = field(default=0.0, metadata={"help": "The dropout ratio for the projected features."})
94
+ hidden_dropout: float = field(
95
+ default=0.0,
96
+ metadata={
97
+ "help": "The dropout probability for all fully connected layers in the embeddings, encoder, and pooler."
98
+ },
99
+ )
100
+ final_dropout: float = field(
101
+ default=0.0,
102
+ metadata={"help": "The dropout probability for the final projection layer."},
103
+ )
104
+ mask_time_prob: float = field(
105
+ default=0.05,
106
+ metadata={
107
+ "help": "Probability of each feature vector along the time axis to be chosen as the start of the vector"
108
+ "span to be masked. Approximately ``mask_time_prob * sequence_length // mask_time_length`` feature"
109
+ "vectors will be masked along the time axis."
110
+ },
111
+ )
112
+ mask_time_length: int = field(
113
+ default=10,
114
+ metadata={"help": "Length of vector span to mask along the time axis."},
115
+ )
116
+ mask_feature_prob: float = field(
117
+ default=0.0,
118
+ metadata={
119
+ "help": "Probability of each feature vector along the feature axis to be chosen as the start of the vector"
120
+ "span to be masked. Approximately ``mask_feature_prob * sequence_length // mask_feature_length`` feature bins will be masked along the time axis."
121
+ },
122
+ )
123
+ mask_feature_length: int = field(
124
+ default=10,
125
+ metadata={"help": "Length of vector span to mask along the feature axis."},
126
+ )
127
+ layerdrop: float = field(default=0.0, metadata={"help": "The LayerDrop probability."})
128
+ ctc_loss_reduction: Optional[str] = field(
129
+ default="mean", metadata={"help": "The way the ctc loss should be reduced. Should be one of 'mean' or 'sum'."}
130
+ )
131
+
132
+
133
+ @dataclass
134
+ class DataTrainingArguments:
135
+ """
136
+ Arguments pertaining to what data we are going to input our model for training and eval.
137
+
138
+ Using `HfArgumentParser` we can turn this class
139
+ into argparse arguments to be able to specify them on
140
+ the command line.
141
+ """
142
+
143
+ dataset_name: str = field(
144
+ metadata={"help": "The configuration name of the dataset to use (via the datasets library)."}
145
+ )
146
+ dataset_config_name: str = field(
147
+ default=None, metadata={"help": "The configuration name of the dataset to use (via the datasets library)."}
148
+ )
149
+ train_split_name: str = field(
150
+ default="train+validation",
151
+ metadata={
152
+ "help": "The name of the training data set split to use (via the datasets library). Defaults to 'train'"
153
+ },
154
+ )
155
+ eval_split_name: str = field(
156
+ default="test",
157
+ metadata={
158
+ "help": "The name of the training data set split to use (via the datasets library). Defaults to 'train'"
159
+ },
160
+ )
161
+ audio_column_name: str = field(
162
+ default="audio",
163
+ metadata={"help": "The name of the dataset column containing the audio data. Defaults to 'audio'"},
164
+ )
165
+ text_column_name: str = field(
166
+ default="text",
167
+ metadata={"help": "The name of the dataset column containing the text data. Defaults to 'text'"},
168
+ )
169
+ overwrite_cache: bool = field(
170
+ default=False, metadata={"help": "Overwrite the cached preprocessed datasets or not."}
171
+ )
172
+ preprocessing_num_workers: Optional[int] = field(
173
+ default=None,
174
+ metadata={"help": "The number of processes to use for the preprocessing."},
175
+ )
176
+ max_train_samples: Optional[int] = field(
177
+ default=None,
178
+ metadata={
179
+ "help": "For debugging purposes or quicker training, truncate the number of training examples to this "
180
+ "value if set."
181
+ },
182
+ )
183
+ max_eval_samples: Optional[int] = field(
184
+ default=None,
185
+ metadata={
186
+ "help": "For debugging purposes or quicker training, truncate the number of validation examples to this "
187
+ "value if set."
188
+ },
189
+ )
190
+ chars_to_ignore: Optional[List[str]] = list_field(
191
+ default=None,
192
+ metadata={"help": "A list of characters to remove from the transcripts."},
193
+ )
194
+ eval_metrics: List[str] = list_field(
195
+ default=["wer"],
196
+ metadata={"help": "A list of metrics the model should be evaluated on. E.g. `'wer cer'`"},
197
+ )
198
+ max_duration_in_seconds: float = field(
199
+ default=20.0,
200
+ metadata={
201
+ "help": "Filter audio files that are longer than `max_duration_in_seconds` seconds to 'max_duration_in_seconds`"
202
+ },
203
+ )
204
+ min_duration_in_seconds: float = field(
205
+ default=0.0, metadata={"help": "Filter audio files that are shorter than `min_duration_in_seconds` seconds"}
206
+ )
207
+ preprocessing_only: bool = field(
208
+ default=False,
209
+ metadata={
210
+ "help": "Whether to only do data preprocessing and skip training. "
211
+ "This is especially useful when data preprocessing errors out in distributed training due to timeout. "
212
+ "In this case, one should run the preprocessing in a non-distributed setup with `preprocessing_only=True` "
213
+ "so that the cached datasets can consequently be loaded in distributed training"
214
+ },
215
+ )
216
+ use_auth_token: bool = field(
217
+ default=False,
218
+ metadata={
219
+ "help": "If :obj:`True`, will use the token generated when running"
220
+ ":obj:`transformers-cli login` as HTTP bearer authorization for remote files."
221
+ },
222
+ )
223
+ unk_token: str = field(
224
+ default="[UNK]",
225
+ metadata={"help": "The unk token for the tokenizer"},
226
+ )
227
+ pad_token: str = field(
228
+ default="[PAD]",
229
+ metadata={"help": "The padding token for the tokenizer"},
230
+ )
231
+ word_delimiter_token: str = field(
232
+ default="|",
233
+ metadata={"help": "The word delimiter token for the tokenizer"},
234
+ )
235
+ phoneme_language: Optional[str] = field(
236
+ default=None,
237
+ metadata={
238
+ "help": "The target language that should be used be"
239
+ " passed to the tokenizer for tokenization. Note that"
240
+ " this is only relevant if the model classifies the"
241
+ " input audio to a sequence of phoneme sequences."
242
+ },
243
+ )
244
+
245
+ @dataclass
246
+ class ExtraArguments:
247
+ "Additional training arguments"
248
+ bnb: bool = field(
249
+ default=False,
250
+ metadata = {"help":"If true uses 8bit Adam"}
251
+ )
252
+ tristage_sched: bool = field(
253
+ default=False,
254
+ metadata = {"help":"If true uses tristage LR scheduler (refer to XLS-R paper)"}
255
+ )
256
+ wandb_project: str = field(
257
+ default=None,
258
+ metadata = {"help":"Name of wandb project to log into"}
259
+ )
260
+
261
+
262
+ @dataclass
263
+ class DataCollatorCTCWithPadding:
264
+ """
265
+ Data collator that will dynamically pad the inputs received.
266
+ Args:
267
+ processor (:class:`~transformers.AutoProcessor`)
268
+ The processor used for proccessing the data.
269
+ padding (:obj:`bool`, :obj:`str` or :class:`~transformers.tokenization_utils_base.PaddingStrategy`, `optional`, defaults to :obj:`True`):
270
+ Select a strategy to pad the returned sequences (according to the model's padding side and padding index)
271
+ among:
272
+ * :obj:`True` or :obj:`'longest'`: Pad to the longest sequence in the batch (or no padding if only a single
273
+ sequence if provided).
274
+ * :obj:`'max_length'`: Pad to a maximum length specified with the argument :obj:`max_length` or to the
275
+ maximum acceptable input length for the model if that argument is not provided.
276
+ * :obj:`False` or :obj:`'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of
277
+ different lengths).
278
+ max_length (:obj:`int`, `optional`):
279
+ Maximum length of the ``input_values`` of the returned list and optionally padding length (see above).
280
+ max_length_labels (:obj:`int`, `optional`):
281
+ Maximum length of the ``labels`` returned list and optionally padding length (see above).
282
+ pad_to_multiple_of (:obj:`int`, `optional`):
283
+ If set will pad the sequence to a multiple of the provided value.
284
+ This is especially useful to enable the use of Tensor Cores on NVIDIA hardware with compute capability >=
285
+ 7.5 (Volta).
286
+ """
287
+
288
+ processor: AutoProcessor
289
+ padding: Union[bool, str] = "longest"
290
+ pad_to_multiple_of: Optional[int] = None
291
+ pad_to_multiple_of_labels: Optional[int] = None
292
+
293
+ def __call__(self, features: List[Dict[str, Union[List[int], torch.Tensor]]]) -> Dict[str, torch.Tensor]:
294
+ # split inputs and labels since they have to be of different lenghts and need
295
+ # different padding methods
296
+ input_features = [{"input_values": feature["input_values"]} for feature in features]
297
+ label_features = [{"input_ids": feature["labels"]} for feature in features]
298
+
299
+ batch = self.processor.pad(
300
+ input_features,
301
+ padding=self.padding,
302
+ pad_to_multiple_of=self.pad_to_multiple_of,
303
+ return_tensors="pt",
304
+ )
305
+
306
+ with self.processor.as_target_processor():
307
+ labels_batch = self.processor.pad(
308
+ label_features,
309
+ padding=self.padding,
310
+ pad_to_multiple_of=self.pad_to_multiple_of_labels,
311
+ return_tensors="pt",
312
+ )
313
+
314
+ # replace padding with -100 to ignore loss correctly
315
+ labels = labels_batch["input_ids"].masked_fill(labels_batch.attention_mask.ne(1), -100)
316
+
317
+ batch["labels"] = labels
318
+
319
+ return batch
320
+
321
+
322
+ def get_tri_stage_schedule(
323
+ optimizer, num_training_steps, ratios=[0.1, 0.4, 0.5], num_warmup_steps=None, num_hold_steps=None, start_ratio=0.01, end_ratio=0.05
324
+ ):
325
+ assert (num_warmup_steps is None) == (num_hold_steps is None)
326
+ if num_warmup_steps is None:
327
+ num_warmup_steps = int(ratios[0]*num_training_steps)
328
+ num_hold_steps = int(ratios[1]*num_training_steps)
329
+ start_decay_step = num_warmup_steps + num_hold_steps
330
+ a_w, b_w = (1-start_ratio)/num_warmup_steps, start_ratio
331
+ num_decay_steps = num_training_steps - start_decay_step
332
+ a_d, b_d = (end_ratio-1)/num_decay_steps, 1.
333
+
334
+ def lr_lambda(current_step):
335
+ if current_step < num_warmup_steps:
336
+ return a_w * float(current_step) + b_w
337
+ if current_step < start_decay_step:
338
+ return 1.
339
+ return max(end_ratio, a_d * float(current_step - start_decay_step) + b_d )
340
+
341
+ return LambdaLR(optimizer, lr_lambda)
342
+
343
+ def create_vocabulary_from_data(
344
+ datasets: DatasetDict,
345
+ word_delimiter_token: Optional[str] = None,
346
+ unk_token: Optional[str] = None,
347
+ pad_token: Optional[str] = None,
348
+ ):
349
+ # Given training and test labels create vocabulary
350
+ def extract_all_chars(batch):
351
+ all_text = " ".join(batch["target_text"])
352
+ vocab = list(set(all_text))
353
+ return {"vocab": [vocab], "all_text": [all_text]}
354
+
355
+ vocabs = datasets.map(
356
+ extract_all_chars,
357
+ batched=True,
358
+ batch_size=-1,
359
+ keep_in_memory=True,
360
+ remove_columns=datasets["train"].column_names,
361
+ )
362
+
363
+ # take union of all unique characters in each dataset
364
+ vocab_set = functools.reduce(
365
+ lambda vocab_1, vocab_2: set(vocab_1["vocab"][0]) | set(vocab_2["vocab"][0]), vocabs.values()
366
+ )
367
+
368
+ vocab_dict = {v: k for k, v in enumerate(sorted(list(vocab_set)))}
369
+
370
+ # replace white space with delimiter token
371
+ if word_delimiter_token is not None:
372
+ vocab_dict[word_delimiter_token] = vocab_dict[" "]
373
+ del vocab_dict[" "]
374
+
375
+ # add unk and pad token
376
+ if unk_token is not None:
377
+ vocab_dict[unk_token] = len(vocab_dict)
378
+
379
+ if pad_token is not None:
380
+ vocab_dict[pad_token] = len(vocab_dict)
381
+
382
+ return vocab_dict
383
+
384
+
385
+ def main():
386
+ # See all possible arguments in src/transformers/training_args.py
387
+ # or by passing the --help flag to this script.
388
+ # We now keep distinct sets of args, for a cleaner separation of concerns.
389
+
390
+ parser = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments, ExtraArguments))
391
+ if len(sys.argv) == 2 and sys.argv[1].endswith(".json"):
392
+ # If we pass only one argument to the script and it's the path to a json file,
393
+ # let's parse it to get our arguments.
394
+ model_args, data_args, training_args, extra_args = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1]))
395
+ else:
396
+ model_args, data_args, training_args, extra_args = parser.parse_args_into_dataclasses()
397
+
398
+ # Detecting last checkpoint.
399
+ last_checkpoint = None
400
+ if os.path.isdir(training_args.output_dir) and training_args.do_train and not training_args.overwrite_output_dir:
401
+ last_checkpoint = get_last_checkpoint(training_args.output_dir)
402
+ if last_checkpoint is None and len(os.listdir(training_args.output_dir)) > 0:
403
+ raise ValueError(
404
+ f"Output directory ({training_args.output_dir}) already exists and is not empty. "
405
+ "Use --overwrite_output_dir to overcome."
406
+ )
407
+ elif last_checkpoint is not None:
408
+ logger.info(
409
+ f"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change "
410
+ "the `--output_dir` or add `--overwrite_output_dir` to train from scratch."
411
+ )
412
+
413
+ # Setup logging
414
+ logging.basicConfig(
415
+ format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
416
+ datefmt="%m/%d/%Y %H:%M:%S",
417
+ handlers=[logging.StreamHandler(sys.stdout)],
418
+ )
419
+ logger.setLevel(logging.INFO if is_main_process(training_args.local_rank) else logging.WARN)
420
+
421
+ # Log on each process the small summary:
422
+ logger.warning(
423
+ f"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"
424
+ f"distributed training: {bool(training_args.local_rank != -1)}, 16-bits training: {training_args.fp16}"
425
+ )
426
+ # Set the verbosity to info of the Transformers logger (on main process only):
427
+ if is_main_process(training_args.local_rank):
428
+ transformers.utils.logging.set_verbosity_info()
429
+ logger.info("Training/evaluation parameters %s", training_args)
430
+
431
+ # Set seed before initializing model.
432
+ set_seed(training_args.seed)
433
+
434
+ # configure wandb run
435
+ os.environ["WANDB_PROJECT"] = extra_args.wandb_project
436
+
437
+ # 1. First, let's load the dataset
438
+ raw_datasets = DatasetDict()
439
+
440
+ if training_args.do_train:
441
+ raw_datasets["train"] = load_dataset(
442
+ data_args.dataset_name,
443
+ data_args.dataset_config_name,
444
+ split=data_args.train_split_name,
445
+ use_auth_token=data_args.use_auth_token,
446
+ )
447
+
448
+ if data_args.audio_column_name not in raw_datasets["train"].column_names:
449
+ raise ValueError(
450
+ f"--audio_column_name '{data_args.audio_column_name}' not found in dataset '{data_args.dataset_name}'. "
451
+ "Make sure to set `--audio_column_name` to the correct audio column - one of "
452
+ f"{', '.join(raw_datasets['train'].column_names)}."
453
+ )
454
+
455
+ if data_args.text_column_name not in raw_datasets["train"].column_names:
456
+ raise ValueError(
457
+ f"--text_column_name {data_args.text_column_name} not found in dataset '{data_args.dataset_name}'. "
458
+ "Make sure to set `--text_column_name` to the correct text column - one of "
459
+ f"{', '.join(raw_datasets['train'].column_names)}."
460
+ )
461
+
462
+ if data_args.max_train_samples is not None:
463
+ raw_datasets["train"] = raw_datasets["train"].select(range(data_args.max_train_samples))
464
+
465
+ if training_args.do_eval:
466
+ raw_datasets["eval"] = load_dataset(
467
+ data_args.dataset_name,
468
+ data_args.dataset_config_name,
469
+ split=data_args.eval_split_name,
470
+ use_auth_token=data_args.use_auth_token,
471
+ )
472
+
473
+ if data_args.max_eval_samples is not None:
474
+ raw_datasets["eval"] = raw_datasets["eval"].select(range(data_args.max_eval_samples))
475
+
476
+ # 2. We remove some special characters from the datasets
477
+ # that make training complicated and do not help in transcribing the speech
478
+ # E.g. characters, such as `,` and `.` do not really have an acoustic characteristic
479
+ # that could be easily picked up by the model
480
+ chars_to_ignore_regex = (
481
+ f'[{"".join(data_args.chars_to_ignore)}]' if data_args.chars_to_ignore is not None else None
482
+ )
483
+ text_column_name = data_args.text_column_name
484
+
485
+ def remove_special_characters(batch):
486
+ if chars_to_ignore_regex is not None:
487
+ batch["target_text"] = re.sub(chars_to_ignore_regex, "", batch[text_column_name]).lower() + " "
488
+ else:
489
+ batch["target_text"] = batch[text_column_name].lower() + " "
490
+ return batch
491
+
492
+ with training_args.main_process_first(desc="dataset map special characters removal"):
493
+ raw_datasets = raw_datasets.map(
494
+ remove_special_characters,
495
+ remove_columns=[text_column_name],
496
+ desc="remove special characters from datasets",
497
+ )
498
+
499
+ # save special tokens for tokenizer
500
+ word_delimiter_token = data_args.word_delimiter_token
501
+ unk_token = data_args.unk_token
502
+ pad_token = data_args.pad_token
503
+
504
+ # 3. Next, let's load the config as we might need it to create
505
+ # the tokenizer
506
+ # load config
507
+ config = AutoConfig.from_pretrained(
508
+ model_args.model_name_or_path, cache_dir=model_args.cache_dir, use_auth_token=data_args.use_auth_token
509
+ )
510
+
511
+ # 4. Next, if no tokenizer file is defined,
512
+ # we create the vocabulary of the model by extracting all unique characters from
513
+ # the training and evaluation datasets
514
+ # We need to make sure that only first rank saves vocabulary
515
+ # make sure all processes wait until vocab is created
516
+ tokenizer_name_or_path = model_args.tokenizer_name_or_path
517
+ tokenizer_kwargs = {}
518
+ if tokenizer_name_or_path is None:
519
+ # save vocab in training output dir
520
+ tokenizer_name_or_path = training_args.output_dir
521
+
522
+ vocab_file = os.path.join(tokenizer_name_or_path, "vocab.json")
523
+
524
+ with training_args.main_process_first():
525
+ if training_args.overwrite_output_dir and os.path.isfile(vocab_file):
526
+ os.remove(vocab_file)
527
+
528
+ with training_args.main_process_first(desc="dataset map vocabulary creation"):
529
+ if not os.path.isfile(vocab_file):
530
+ os.makedirs(tokenizer_name_or_path, exist_ok=True)
531
+ vocab_dict = create_vocabulary_from_data(
532
+ raw_datasets,
533
+ word_delimiter_token=word_delimiter_token,
534
+ unk_token=unk_token,
535
+ pad_token=pad_token,
536
+ )
537
+
538
+ # save vocab dict to be loaded into tokenizer
539
+ with open(vocab_file, "w") as file:
540
+ json.dump(vocab_dict, file)
541
+
542
+ # if tokenizer has just been created
543
+ # it is defined by `tokenizer_class` if present in config else by `model_type`
544
+ tokenizer_kwargs = {
545
+ "config": config if config.tokenizer_class is not None else None,
546
+ "tokenizer_type": config.model_type if config.tokenizer_class is None else None,
547
+ "unk_token": unk_token,
548
+ "pad_token": pad_token,
549
+ "word_delimiter_token": word_delimiter_token,
550
+ }
551
+
552
+ # 5. Now we can instantiate the feature extractor, tokenizer and model
553
+ # Note for distributed training, the .from_pretrained methods guarantee that only
554
+ # one local process can concurrently download model & vocab.
555
+
556
+ # load feature_extractor and tokenizer
557
+ tokenizer = AutoTokenizer.from_pretrained(
558
+ tokenizer_name_or_path,
559
+ use_auth_token=data_args.use_auth_token,
560
+ **tokenizer_kwargs,
561
+ )
562
+ feature_extractor = AutoFeatureExtractor.from_pretrained(
563
+ model_args.model_name_or_path, cache_dir=model_args.cache_dir, use_auth_token=data_args.use_auth_token
564
+ )
565
+
566
+ # adapt config
567
+ config.update(
568
+ {
569
+ "feat_proj_dropout": model_args.feat_proj_dropout,
570
+ "attention_dropout": model_args.attention_dropout,
571
+ "hidden_dropout": model_args.hidden_dropout,
572
+ "final_dropout": model_args.final_dropout,
573
+ "mask_time_prob": model_args.mask_time_prob,
574
+ "mask_time_length": model_args.mask_time_length,
575
+ "mask_feature_prob": model_args.mask_feature_prob,
576
+ "mask_feature_length": model_args.mask_feature_length,
577
+ "gradient_checkpointing": training_args.gradient_checkpointing,
578
+ "layerdrop": model_args.layerdrop,
579
+ "ctc_loss_reduction": model_args.ctc_loss_reduction,
580
+ "pad_token_id": tokenizer.pad_token_id,
581
+ "vocab_size": len(tokenizer),
582
+ "activation_dropout": model_args.activation_dropout,
583
+ }
584
+ )
585
+
586
+ # create model
587
+ model = AutoModelForCTC.from_pretrained(
588
+ model_args.model_name_or_path,
589
+ cache_dir=model_args.cache_dir,
590
+ config=config,
591
+ use_auth_token=data_args.use_auth_token,
592
+ )
593
+
594
+ # freeze encoder
595
+ if model_args.freeze_feature_encoder:
596
+ model.freeze_feature_encoder()
597
+
598
+ # 6. Now we preprocess the datasets including loading the audio, resampling and normalization
599
+ # Thankfully, `datasets` takes care of automatically loading and resampling the audio,
600
+ # so that we just need to set the correct target sampling rate and normalize the input
601
+ # via the `feature_extractor`
602
+
603
+ # make sure that dataset decodes audio with correct sampling rate
604
+ dataset_sampling_rate = next(iter(raw_datasets.values())).features[data_args.audio_column_name].sampling_rate
605
+ if dataset_sampling_rate != feature_extractor.sampling_rate:
606
+ raw_datasets = raw_datasets.cast_column(
607
+ data_args.audio_column_name, datasets.features.Audio(sampling_rate=feature_extractor.sampling_rate)
608
+ )
609
+
610
+ # derive max & min input length for sample rate & max duration
611
+ max_input_length = data_args.max_duration_in_seconds * feature_extractor.sampling_rate
612
+ min_input_length = data_args.min_duration_in_seconds * feature_extractor.sampling_rate
613
+ audio_column_name = data_args.audio_column_name
614
+ num_workers = data_args.preprocessing_num_workers
615
+
616
+ # `phoneme_language` is only relevant if the model is fine-tuned on phoneme classification
617
+ phoneme_language = data_args.phoneme_language
618
+
619
+ # Preprocessing the datasets.
620
+ # We need to read the audio files as arrays and tokenize the targets.
621
+ def prepare_dataset(batch):
622
+ # load audio
623
+ sample = batch[audio_column_name]
624
+
625
+ inputs = feature_extractor(sample["array"], sampling_rate=sample["sampling_rate"])
626
+ batch["input_values"] = inputs.input_values[0]
627
+ batch["length"] = len(batch["input_values"])
628
+
629
+ # encode targets
630
+ additional_kwargs = {}
631
+ if phoneme_language is not None:
632
+ additional_kwargs["phonemizer_lang"] = phoneme_language
633
+
634
+ batch["labels"] = tokenizer(batch["target_text"], **additional_kwargs).input_ids
635
+ return batch
636
+
637
+ with training_args.main_process_first(desc="dataset map preprocessing"):
638
+ vectorized_datasets = raw_datasets.map(
639
+ prepare_dataset,
640
+ remove_columns=next(iter(raw_datasets.values())).column_names,
641
+ num_proc=num_workers,
642
+ desc="preprocess datasets",
643
+ )
644
+
645
+ def is_audio_in_length_range(length):
646
+ return length > min_input_length and length < max_input_length
647
+
648
+ # filter data that is shorter than min_input_length
649
+ vectorized_datasets = vectorized_datasets.filter(
650
+ is_audio_in_length_range,
651
+ num_proc=num_workers,
652
+ input_columns=["length"],
653
+ )
654
+
655
+ # 7. Next, we can prepare the training.
656
+ # Let's use word error rate (WER) as our evaluation metric,
657
+ # instantiate a data collator and the trainer
658
+
659
+ # Define evaluation metrics during training, *i.e.* word error rate, character error rate
660
+ eval_metrics = {metric: load_metric(metric) for metric in data_args.eval_metrics}
661
+
662
+ # for large datasets it is advised to run the preprocessing on a
663
+ # single machine first with ``args.preprocessing_only`` since there will mostly likely
664
+ # be a timeout when running the script in distributed mode.
665
+ # In a second step ``args.preprocessing_only`` can then be set to `False` to load the
666
+ # cached dataset
667
+ if data_args.preprocessing_only:
668
+ logger.info(f"Data preprocessing finished. Files cached at {vectorized_datasets.cache_files}")
669
+ return
670
+
671
+ def compute_metrics(pred):
672
+ pred_logits = pred.predictions
673
+ pred_ids = np.argmax(pred_logits, axis=-1)
674
+
675
+ pred.label_ids[pred.label_ids == -100] = tokenizer.pad_token_id
676
+
677
+ pred_str = tokenizer.batch_decode(pred_ids)
678
+ # we do not want to group tokens when computing the metrics
679
+ label_str = tokenizer.batch_decode(pred.label_ids, group_tokens=False)
680
+
681
+ metrics = {k: v.compute(predictions=pred_str, references=label_str) for k, v in eval_metrics.items()}
682
+
683
+ return metrics
684
+
685
+ # Now save everything to be able to create a single processor later
686
+ if is_main_process(training_args.local_rank):
687
+ # save feature extractor, tokenizer and config
688
+ feature_extractor.save_pretrained(training_args.output_dir)
689
+ tokenizer.save_pretrained(training_args.output_dir)
690
+ config.save_pretrained(training_args.output_dir)
691
+
692
+ try:
693
+ processor = AutoProcessor.from_pretrained(training_args.output_dir)
694
+ except (OSError, KeyError):
695
+ warnings.warn(
696
+ "Loading a processor from a feature extractor config that does not"
697
+ " include a `processor_class` attribute is deprecated and will be removed in v5. Please add the following "
698
+ " attribute to your `preprocessor_config.json` file to suppress this warning: "
699
+ " `'processor_class': 'Wav2Vec2Processor'`",
700
+ FutureWarning,
701
+ )
702
+ processor = Wav2Vec2Processor.from_pretrained(training_args.output_dir)
703
+
704
+ # Instantiate custom data collator
705
+ data_collator = DataCollatorCTCWithPadding(processor=processor)
706
+
707
+ decay_parameters = get_parameter_names(model, [torch.nn.LayerNorm])
708
+ decay_parameters = [name for name in decay_parameters if "bias" not in name]
709
+ optimizer_grouped_parameters = [
710
+ {
711
+ "params": [p for n, p in model.named_parameters() if n in decay_parameters],
712
+ "weight_decay": training_args.weight_decay,
713
+ },
714
+ {
715
+ "params": [p for n, p in model.named_parameters() if n not in decay_parameters],
716
+ "weight_decay": 0.0,
717
+ },
718
+ ]
719
+ if extra_args.bnb:
720
+ optimizer = bnb.optim.Adam8bit(
721
+ params=optimizer_grouped_parameters,
722
+ lr=training_args.learning_rate,
723
+ betas=(training_args.adam_beta1, training_args.adam_beta2),
724
+ eps=training_args.adam_epsilon,
725
+ )
726
+ else:
727
+ optimizer = torch.optim.AdamW(
728
+ params=optimizer_grouped_parameters,
729
+ lr=training_args.learning_rate,
730
+ betas=(training_args.adam_beta1, training_args.adam_beta2),
731
+ eps=training_args.adam_epsilon,
732
+ )
733
+ if extra_args.tristage_sched:
734
+ scheduler = get_tri_stage_schedule(optimizer, training_args.max_steps)
735
+ else:
736
+ scheduler = None
737
+ optimizers = (optimizer, scheduler)
738
+
739
+ # Initialize Trainer
740
+ trainer = Trainer(
741
+ model=model,
742
+ data_collator=data_collator,
743
+ args=training_args,
744
+ compute_metrics=compute_metrics,
745
+ train_dataset=vectorized_datasets["train"] if training_args.do_train else None,
746
+ eval_dataset=vectorized_datasets["eval"] if training_args.do_eval else None,
747
+ tokenizer=feature_extractor,
748
+ optimizers=optimizers,
749
+ )
750
+
751
+ # 8. Finally, we can start training
752
+
753
+ # Training
754
+ if training_args.do_train:
755
+
756
+ # use last checkpoint if exist
757
+ if last_checkpoint is not None:
758
+ checkpoint = last_checkpoint
759
+ elif os.path.isdir(model_args.model_name_or_path):
760
+ checkpoint = model_args.model_name_or_path
761
+ else:
762
+ checkpoint = None
763
+
764
+ train_result = trainer.train(resume_from_checkpoint=checkpoint)
765
+ trainer.save_model()
766
+
767
+ metrics = train_result.metrics
768
+ max_train_samples = (
769
+ data_args.max_train_samples
770
+ if data_args.max_train_samples is not None
771
+ else len(vectorized_datasets["train"])
772
+ )
773
+ metrics["train_samples"] = min(max_train_samples, len(vectorized_datasets["train"]))
774
+
775
+ trainer.log_metrics("train", metrics)
776
+ trainer.save_metrics("train", metrics)
777
+ trainer.save_state()
778
+
779
+ # Evaluation
780
+ results = {}
781
+ if training_args.do_eval:
782
+ logger.info("*** Evaluate ***")
783
+ metrics = trainer.evaluate()
784
+ max_eval_samples = (
785
+ data_args.max_eval_samples if data_args.max_eval_samples is not None else len(vectorized_datasets["eval"])
786
+ )
787
+ metrics["eval_samples"] = min(max_eval_samples, len(vectorized_datasets["eval"]))
788
+
789
+ trainer.log_metrics("eval", metrics)
790
+ trainer.save_metrics("eval", metrics)
791
+
792
+ # Write model card and (optionally) push to hub
793
+ config_name = data_args.dataset_config_name if data_args.dataset_config_name is not None else "na"
794
+ kwargs = {
795
+ "finetuned_from": model_args.model_name_or_path,
796
+ "tasks": "speech-recognition",
797
+ "tags": ["automatic-speech-recognition", data_args.dataset_name],
798
+ "dataset_args": f"Config: {config_name}, Training split: {data_args.train_split_name}, Eval split: {data_args.eval_split_name}",
799
+ "dataset": f"{data_args.dataset_name.upper()} - {config_name.upper()}",
800
+ }
801
+ if "common_voice" in data_args.dataset_name:
802
+ kwargs["language"] = config_name
803
+
804
+ if training_args.push_to_hub:
805
+ trainer.push_to_hub(**kwargs)
806
+ else:
807
+ trainer.create_model_card(**kwargs)
808
+
809
+ return results
810
+
811
+
812
+ if __name__ == "__main__":
813
+ main()
special_tokens_map.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"bos_token": "<s>", "eos_token": "</s>", "unk_token": "[UNK]", "pad_token": "[PAD]", "additional_special_tokens": [{"content": "<s>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true}, {"content": "</s>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true}, {"content": "<s>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true}, {"content": "</s>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true}, {"content": "<s>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true}, {"content": "</s>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true}]}
tokenizer_config.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"unk_token": "[UNK]", "bos_token": "<s>", "eos_token": "</s>", "pad_token": "[PAD]", "do_lower_case": false, "word_delimiter_token": "|", "special_tokens_map_file": null, "tokenizer_file": null, "name_or_path": "/workspace/data/wav2vec2-xls-r-300m-hy/", "tokenizer_class": "Wav2Vec2CTCTokenizer", "processor_class": "Wav2Vec2ProcessorWithLM"}
vocab.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"ա": 1, "բ": 2, "գ": 3, "դ": 4, "ե": 5, "զ": 6, "է": 7, "ը": 8, "թ": 9, "ժ": 10, "ի": 11, "լ": 12, "խ": 13, "ծ": 14, "կ": 15, "հ": 16, "ձ": 17, "ղ": 18, "ճ": 19, "մ": 20, "յ": 21, "ն": 22, "շ": 23, "ո": 24, "չ": 25, "պ": 26, "ջ": 27, "ռ": 28, "ս": 29, "վ": 30, "տ": 31, "ր": 32, "ց": 33, "ւ": 34, "փ": 35, "ք": 36, "օ": 37, "ֆ": 38, "և": 39, "|": 0, "[UNK]": 40, "[PAD]": 41}