sanchit-gandhi HF staff commited on
Commit
071893d
1 Parent(s): 8b752c1
Files changed (3) hide show
  1. create_model.py +30 -0
  2. run.sh +33 -0
  3. run_xtreme_s.py +885 -0
create_model.py ADDED
@@ -0,0 +1,30 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from transformers import SpeechEncoderDecoderModel, AutoFeatureExtractor, AutoTokenizer
2
+ import torch
3
+
4
+
5
+ encoder_id = "facebook/wav2vec2-xls-r-300m"
6
+ decoder_id = "facebook/bart-large"
7
+
8
+ model = SpeechEncoderDecoderModel.from_encoder_decoder_pretrained(encoder_id, decoder_id, encoder_add_adapter=True)
9
+ model.config.encoder.feat_proj_dropout = 0.0
10
+ model.config.encoder.final_dropout = 0.0
11
+ model.config.encoder.mask_time_prob = 0.1
12
+ model.config.decoder_start_token_id = model.decoder.config.bos_token_id
13
+ model.config.pad_token_id = model.decoder.config.pad_token_id
14
+ model.config.eos_token_id = model.decoder.config.eos_token_id
15
+ model.config.max_length = 40
16
+ model.config.num_beams = 1
17
+ model.config.encoder.layerdrop = 0.0
18
+ model.config.use_cache = False
19
+ model.config.processor_class = "Wav2Vec2Processor"
20
+
21
+ # check if generation works
22
+ out = model.generate(torch.ones((1, 2000)))
23
+
24
+ model.save_pretrained("./")
25
+
26
+ feature_etxractor = AutoFeatureExtractor.from_pretrained(encoder_id)
27
+ feature_etxractor.save_pretrained("./")
28
+ tokenizer = AutoTokenizer.from_pretrained(decoder_id)
29
+ tokenizer.save_pretrained("./")
30
+
run.sh ADDED
@@ -0,0 +1,33 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env bash
2
+ CUDA_VISIBLE_DEVICES=1 python run_xtreme_s.py \
3
+ --model_name_or_path="./" \
4
+ --task="covost2" \
5
+ --language="fr.en" \
6
+ --eval_split_name="test" \
7
+ --output_dir="./" \
8
+ --overwrite_output_dir \
9
+ --num_train_epochs="3" \
10
+ --per_device_train_batch_size="4" \
11
+ --per_device_eval_batch_size="2" \
12
+ --gradient_accumulation_steps="2" \
13
+ --generation_max_length="40" \
14
+ --generation_num_beams="1" \
15
+ --learning_rate="3e-4" \
16
+ --warmup_steps="500" \
17
+ --evaluation_strategy="steps" \
18
+ --max_duration_in_seconds="20" \
19
+ --save_steps="500" \
20
+ --eval_steps="500" \
21
+ --logging_steps="1" \
22
+ --freeze_feature_encoder \
23
+ --gradient_checkpointing \
24
+ --fp16 \
25
+ --max_train_samples="10" \
26
+ --group_by_length \
27
+ --do_train \
28
+ --do_eval \
29
+ --metric_for_best_model="bleu" \
30
+ --greater_is_better=True \
31
+ --load_best_model_at_end \
32
+ --push_to_hub \
33
+ --use_auth_token
run_xtreme_s.py ADDED
@@ -0,0 +1,885 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python
2
+ # coding=utf-8
3
+ # Copyright 2022 The HuggingFace Inc. team. All rights reserved.
4
+ #
5
+ # Licensed under the Apache License, Version 2.0 (the "License");
6
+ # you may not use this file except in compliance with the License.
7
+ # You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing, software
12
+ # distributed under the License is distributed on an "AS IS" BASIS,
13
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
+ # See the License for the specific language governing permissions and
15
+
16
+ """ Fine-tuning a 🤗 Transformers pretrained speech model on the XTREME-S benchmark tasks"""
17
+
18
+ import json
19
+ import logging
20
+ import os
21
+ import re
22
+ import sys
23
+ from collections import OrderedDict, defaultdict
24
+ from dataclasses import dataclass, field
25
+ from typing import Dict, List, Optional, Union
26
+
27
+ import datasets
28
+ import numpy as np
29
+ import torch
30
+ from datasets import DatasetDict, load_dataset, load_metric
31
+
32
+ import transformers
33
+ from transformers import (
34
+ AutoConfig,
35
+ AutoFeatureExtractor,
36
+ AutoModelForAudioClassification,
37
+ AutoModelForCTC,
38
+ AutoModelForSpeechSeq2Seq,
39
+ AutoProcessor,
40
+ AutoTokenizer,
41
+ HfArgumentParser,
42
+ Seq2SeqTrainer,
43
+ Seq2SeqTrainingArguments,
44
+ Trainer,
45
+ set_seed,
46
+ )
47
+ from transformers.trainer_utils import get_last_checkpoint, is_main_process
48
+ from transformers.utils import check_min_version
49
+ from transformers.utils.versions import require_version
50
+
51
+
52
+ # Will error if the minimal version of Transformers is not installed. Remove at your own risks.
53
+ check_min_version("4.18.0.dev0")
54
+
55
+ require_version("datasets>=1.18.0", "To fix: pip install -r examples/pytorch/speech-recognition/requirements.txt")
56
+
57
+
58
+ logger = logging.getLogger(__name__)
59
+
60
+
61
+ def list_field(default=None, metadata=None):
62
+ return field(default_factory=lambda: default, metadata=metadata)
63
+
64
+
65
+ TASK_TO_TARGET_COLUMN_NAME = {
66
+ "fleurs-asr": "transcription",
67
+ "fleurs-lang_id": "lang_id",
68
+ "mls": "transcription",
69
+ "voxpopuli": "transcription",
70
+ "covost2": "translation",
71
+ "minds14": "intent_class",
72
+ "babel": "transcription",
73
+ }
74
+
75
+
76
+ @dataclass
77
+ class ModelArguments:
78
+ """
79
+ Arguments pertaining to which model/config/tokenizer we are going to fine-tune from.
80
+ """
81
+
82
+ model_name_or_path: str = field(
83
+ metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"}
84
+ )
85
+ tokenizer_name_or_path: Optional[str] = field(
86
+ default=None,
87
+ metadata={"help": "Path to pretrained tokenizer or tokenizer identifier from huggingface.co/models"},
88
+ )
89
+ cache_dir: Optional[str] = field(
90
+ default=None,
91
+ metadata={
92
+ "help": "Where do you want to store the pretrained models and datasets downloaded from " "huggingface.co"
93
+ },
94
+ )
95
+ freeze_feature_encoder: bool = field(
96
+ default=True, metadata={"help": "Whether to freeze the feature encoder layers of the model."}
97
+ )
98
+ attention_dropout: float = field(
99
+ default=0.0, metadata={"help": "The dropout ratio for the attention probabilities."}
100
+ )
101
+ activation_dropout: float = field(
102
+ default=0.0, metadata={"help": "The dropout ratio for activations inside the fully connected layer."}
103
+ )
104
+ feat_proj_dropout: float = field(default=0.0, metadata={"help": "The dropout ratio for the projected features."})
105
+ hidden_dropout: float = field(
106
+ default=0.0,
107
+ metadata={
108
+ "help": "The dropout probability for all fully connected layers in the embeddings, encoder, and pooler."
109
+ },
110
+ )
111
+ final_dropout: float = field(
112
+ default=0.0,
113
+ metadata={"help": "The dropout probability for the final projection layer."},
114
+ )
115
+ mask_time_prob: float = field(
116
+ default=0.05,
117
+ metadata={
118
+ "help": "Probability of each feature vector along the time axis to be chosen as the start of the vector"
119
+ "span to be masked. Approximately ``mask_time_prob * sequence_length // mask_time_length`` feature"
120
+ "vectors will be masked along the time axis."
121
+ },
122
+ )
123
+ mask_time_length: int = field(
124
+ default=10,
125
+ metadata={"help": "Length of vector span to mask along the time axis."},
126
+ )
127
+ mask_feature_prob: float = field(
128
+ default=0.0,
129
+ metadata={
130
+ "help": "Probability of each feature vector along the feature axis to be chosen as the start of the vector"
131
+ "span to be masked. Approximately ``mask_feature_prob * sequence_length // mask_feature_length`` feature bins will be masked along the time axis."
132
+ },
133
+ )
134
+ mask_feature_length: int = field(
135
+ default=10,
136
+ metadata={"help": "Length of vector span to mask along the feature axis."},
137
+ )
138
+ layerdrop: float = field(default=0.0, metadata={"help": "The LayerDrop probability."})
139
+ ctc_loss_reduction: Optional[str] = field(
140
+ default="mean", metadata={"help": "The way the ctc loss should be reduced. Should be one of 'mean' or 'sum'."}
141
+ )
142
+
143
+
144
+ @dataclass
145
+ class DataTrainingArguments:
146
+ """
147
+ Arguments pertaining to what data we are going to input our model for training and eval.
148
+
149
+ Using `HfArgumentParser` we can turn this class
150
+ into argparse arguments to be able to specify them on
151
+ the command line.
152
+ """
153
+
154
+ dataset_name: str = field(
155
+ default="google/xtreme_s",
156
+ metadata={"help": "The name of the dataset to use (via the datasets library). Defaults to 'google/xtreme_s'"},
157
+ )
158
+ task: str = field(
159
+ default=None,
160
+ metadata={
161
+ "help": "The task name of the benchmark to use (via the datasets library). Should be on of: "
162
+ "'fleurs-asr', 'mls', 'voxpopuli', 'covost2', 'minds14', 'fleurs-lang_id', 'babel'."
163
+ },
164
+ )
165
+ language: str = field(
166
+ default="all",
167
+ metadata={"help": "The language id as defined in the datasets config name or `all` for all languages."},
168
+ )
169
+ train_split_name: str = field(
170
+ default="train",
171
+ metadata={
172
+ "help": "The name of the training dataset split to use (via the datasets library). Defaults to 'train'"
173
+ },
174
+ )
175
+ eval_split_name: str = field(
176
+ default="validation",
177
+ metadata={
178
+ "help": "The name of the evaluation dataset split to use (via the datasets library). "
179
+ "Defaults to 'validation'"
180
+ },
181
+ )
182
+ predict_split_name: str = field(
183
+ default="test",
184
+ metadata={
185
+ "help": "The name of the prediction dataset split to use (via the datasets library). " "Defaults to 'test'"
186
+ },
187
+ )
188
+ audio_column_name: str = field(
189
+ default="audio",
190
+ metadata={"help": "The name of the dataset column containing the audio data. Defaults to 'audio'"},
191
+ )
192
+ target_column_name: str = field(
193
+ default=None,
194
+ metadata={
195
+ "help": "The name of the dataset column containing the target data "
196
+ "(transcription/translation/label). If None, the name will be inferred from the task. Defaults to None."
197
+ },
198
+ )
199
+ overwrite_cache: bool = field(
200
+ default=False, metadata={"help": "Overwrite the cached preprocessed datasets or not."}
201
+ )
202
+ preprocessing_num_workers: Optional[int] = field(
203
+ default=None,
204
+ metadata={"help": "The number of processes to use for the preprocessing."},
205
+ )
206
+ max_train_samples: Optional[int] = field(
207
+ default=None,
208
+ metadata={
209
+ "help": "For debugging purposes or quicker training, truncate the number of training examples to this "
210
+ "value if set."
211
+ },
212
+ )
213
+ max_eval_samples: Optional[int] = field(
214
+ default=None,
215
+ metadata={
216
+ "help": "For debugging purposes or quicker training, truncate the number of validation examples to this "
217
+ "value if set."
218
+ },
219
+ )
220
+ max_predict_samples: Optional[int] = field(
221
+ default=None,
222
+ metadata={
223
+ "help": "For debugging purposes or quicker training, truncate the number of prediction examples to this "
224
+ "value if set."
225
+ },
226
+ )
227
+ chars_to_ignore: Optional[List[str]] = list_field(
228
+ default=', ? . ! - ; : " “ % ‘ ” �'.split(" "),
229
+ metadata={"help": "A list of characters to remove from the transcripts."},
230
+ )
231
+ max_duration_in_seconds: float = field(
232
+ default=30.0,
233
+ metadata={
234
+ "help": "Filter audio files that are longer than `max_duration_in_seconds` seconds to 'max_duration_in_seconds`"
235
+ },
236
+ )
237
+ min_duration_in_seconds: float = field(
238
+ default=0.0, metadata={"help": "Filter audio files that are shorter than `min_duration_in_seconds` seconds"}
239
+ )
240
+ preprocessing_only: bool = field(
241
+ default=False,
242
+ metadata={
243
+ "help": "Whether to only do data preprocessing and skip training. "
244
+ "This is especially useful when data preprocessing errors out in distributed training due to timeout. "
245
+ "In this case, one should run the preprocessing in a non-distributed setup with `preprocessing_only=True` "
246
+ "so that the cached datasets can consequently be loaded in distributed training"
247
+ },
248
+ )
249
+ use_auth_token: bool = field(
250
+ default=False,
251
+ metadata={
252
+ "help": "If :obj:`True`, will use the token generated when running"
253
+ ":obj:`transformers-cli login` as HTTP bearer authorization for remote files."
254
+ },
255
+ )
256
+ unk_token: str = field(
257
+ default="[UNK]",
258
+ metadata={"help": "The unk token for the tokenizer"},
259
+ )
260
+ pad_token: str = field(
261
+ default="[PAD]",
262
+ metadata={"help": "The padding token for the tokenizer"},
263
+ )
264
+ word_delimiter_token: str = field(
265
+ default="|",
266
+ metadata={"help": "The word delimiter token for the tokenizer"},
267
+ )
268
+ phoneme_language: Optional[str] = field(
269
+ default=None,
270
+ metadata={
271
+ "help": "The target language that should be used be"
272
+ " passed to the tokenizer for tokenization. Note that"
273
+ " this is only relevant if the model classifies the"
274
+ " input audio to a sequence of phoneme sequences."
275
+ },
276
+ )
277
+ per_lang_metrics: bool = field(
278
+ default=True,
279
+ metadata={
280
+ "help": "If `True`, compute the test metrics separately for each language, and average the results. "
281
+ "If `False` compute the average test metrics in a single pass for all languages at once."
282
+ },
283
+ )
284
+
285
+
286
+ @dataclass
287
+ class SpeechDataCollatorWithPadding:
288
+
289
+ processor: AutoProcessor
290
+ decoder_start_token_id: Optional[int] = None
291
+ padding: Union[bool, str] = "longest"
292
+ pad_labels: Optional[int] = True
293
+ pad_to_multiple_of: Optional[int] = None
294
+ pad_to_multiple_of_labels: Optional[int] = None
295
+
296
+ def __call__(self, features: List[Dict[str, Union[List[int], torch.Tensor]]]) -> Dict[str, torch.Tensor]:
297
+ # split inputs and labels since they have to be of different lenghts and need
298
+ # different padding methods
299
+ input_features = [{"input_values": feature["input_values"]} for feature in features]
300
+
301
+ batch = self.processor.pad(
302
+ input_features,
303
+ padding=self.padding,
304
+ pad_to_multiple_of=self.pad_to_multiple_of,
305
+ return_tensors="pt",
306
+ )
307
+
308
+ if self.pad_labels:
309
+ label_features = [{"input_ids": feature["labels"]} for feature in features]
310
+ with self.processor.as_target_processor():
311
+ labels_batch = self.processor.pad(
312
+ label_features,
313
+ padding=self.padding,
314
+ pad_to_multiple_of=self.pad_to_multiple_of_labels,
315
+ return_tensors="pt",
316
+ )
317
+
318
+ # replace padding with -100 to ignore loss correctly
319
+ labels = labels_batch["input_ids"].masked_fill(labels_batch.attention_mask.ne(1), -100)
320
+
321
+ # if bos token is appended in previous tokenization step,
322
+ # cut bos token here as it's append later anyways
323
+ if (
324
+ self.decoder_start_token_id is not None
325
+ and (labels[:, 0] == self.decoder_start_token_id).all().cpu().item()
326
+ ):
327
+ labels = labels[:, 1:]
328
+
329
+ batch["labels"] = labels
330
+ else:
331
+ batch["labels"] = torch.tensor([feature["labels"] for feature in features])
332
+
333
+ return batch
334
+
335
+
336
+ def create_vocabulary_from_data(
337
+ datasets: DatasetDict,
338
+ word_delimiter_token: Optional[str] = None,
339
+ unk_token: Optional[str] = None,
340
+ pad_token: Optional[str] = None,
341
+ ):
342
+ # Given training and test labels create vocabulary
343
+ def extract_all_chars(batch):
344
+ all_text = " ".join(batch["target_text"])
345
+ vocab = list(set(all_text))
346
+ return {"vocab": [vocab], "all_text": [all_text]}
347
+
348
+ vocabs = datasets.map(
349
+ extract_all_chars,
350
+ batched=True,
351
+ batch_size=-1,
352
+ keep_in_memory=True,
353
+ remove_columns=datasets["train"].column_names,
354
+ )
355
+
356
+ # take union of all unique characters in each dataset
357
+ vocab_set = (
358
+ (set(vocabs["train"]["vocab"][0]) if "train" in vocabs else set())
359
+ | (set(vocabs["eval"]["vocab"][0]) if "eval" in vocabs else set())
360
+ | (set(vocabs["predict"]["vocab"][0]) if "predict" in vocabs else set())
361
+ )
362
+
363
+ vocab_dict = {v: k for k, v in enumerate(sorted(list(vocab_set)))}
364
+
365
+ # replace white space with delimiter token
366
+ if word_delimiter_token is not None:
367
+ vocab_dict[word_delimiter_token] = vocab_dict[" "]
368
+ del vocab_dict[" "]
369
+
370
+ # add unk and pad token
371
+ if unk_token is not None:
372
+ vocab_dict[unk_token] = len(vocab_dict)
373
+
374
+ if pad_token is not None:
375
+ vocab_dict[pad_token] = len(vocab_dict)
376
+
377
+ return vocab_dict
378
+
379
+
380
+ def main():
381
+ # See all possible arguments in src/transformers/training_args.py
382
+ # or by passing the --help flag to this script.
383
+ # We now keep distinct sets of args, for a cleaner separation of concerns.
384
+
385
+ parser = HfArgumentParser((ModelArguments, DataTrainingArguments, Seq2SeqTrainingArguments))
386
+ if len(sys.argv) == 2 and sys.argv[1].endswith(".json"):
387
+ # If we pass only one argument to the script and it's the path to a json file,
388
+ # let's parse it to get our arguments.
389
+ model_args, data_args, training_args = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1]))
390
+ else:
391
+ model_args, data_args, training_args = parser.parse_args_into_dataclasses()
392
+
393
+ # Detecting last checkpoint.
394
+ last_checkpoint = None
395
+ if os.path.isdir(training_args.output_dir) and training_args.do_train and not training_args.overwrite_output_dir:
396
+ last_checkpoint = get_last_checkpoint(training_args.output_dir)
397
+ if last_checkpoint is None and len(os.listdir(training_args.output_dir)) > 0:
398
+ raise ValueError(
399
+ f"Output directory ({training_args.output_dir}) already exists and is not empty. "
400
+ "Use --overwrite_output_dir to overcome."
401
+ )
402
+ elif last_checkpoint is not None:
403
+ logger.info(
404
+ f"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change "
405
+ "the `--output_dir` or add `--overwrite_output_dir` to train from scratch."
406
+ )
407
+
408
+ # Setup logging
409
+ logging.basicConfig(
410
+ format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
411
+ datefmt="%m/%d/%Y %H:%M:%S",
412
+ handlers=[logging.StreamHandler(sys.stdout)],
413
+ )
414
+ logger.setLevel(logging.INFO if is_main_process(training_args.local_rank) else logging.WARN)
415
+
416
+ # Log on each process the small summary:
417
+ logger.warning(
418
+ f"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"
419
+ f"distributed training: {bool(training_args.local_rank != -1)}, 16-bits training: {training_args.fp16}"
420
+ )
421
+ # Set the verbosity to info of the Transformers logger (on main process only):
422
+ if is_main_process(training_args.local_rank):
423
+ transformers.utils.logging.set_verbosity_info()
424
+ logger.info("Training/evaluation parameters %s", training_args)
425
+
426
+ # Set seed before initializing model.
427
+ set_seed(training_args.seed)
428
+
429
+ # 1. First, let's load the dataset
430
+ raw_datasets = DatasetDict()
431
+ task_name = data_args.task
432
+ lang_id = data_args.language
433
+
434
+ if task_name is None:
435
+ raise ValueError(
436
+ "Set --task should be set to '<xtreme_s_task>' " "(e.g. 'fleurs-asr', 'mls', 'covost2', 'minds14') "
437
+ )
438
+ if lang_id is None:
439
+ raise ValueError(
440
+ "Set --language should be set to the language id of the sub dataset "
441
+ "config to be used (e.g. 'pl', 'en.tr', 'fr-FR') or 'all'"
442
+ " for multi-lingual fine-tuning."
443
+ )
444
+
445
+ if data_args.target_column_name is None:
446
+ target_column_name = TASK_TO_TARGET_COLUMN_NAME[task_name]
447
+ else:
448
+ target_column_name = data_args.target_column_name
449
+
450
+ # here we differentiate between tasks with text as the target and classification tasks
451
+ is_text_target = target_column_name in ("transcription", "translation")
452
+
453
+ config_name = ".".join([task_name.split("-")[0], lang_id])
454
+
455
+ if training_args.do_train:
456
+ raw_datasets["train"] = load_dataset(
457
+ data_args.dataset_name,
458
+ config_name,
459
+ split=data_args.train_split_name,
460
+ use_auth_token=data_args.use_auth_token,
461
+ cache_dir=model_args.cache_dir,
462
+ )
463
+
464
+ if data_args.audio_column_name not in raw_datasets["train"].column_names:
465
+ raise ValueError(
466
+ f"--audio_column_name '{data_args.audio_column_name}' not found in dataset '{data_args.dataset_name}'. "
467
+ "Make sure to set `--audio_column_name` to the correct audio column - one of "
468
+ f"{', '.join(raw_datasets['train'].column_names)}."
469
+ )
470
+
471
+ if target_column_name not in raw_datasets["train"].column_names:
472
+ raise ValueError(
473
+ f"--target_column_name {target_column_name} not found in dataset '{data_args.dataset_name}'. "
474
+ "Make sure to set `--target_column_name` to the correct text column - one of "
475
+ f"{', '.join(raw_datasets['train'].column_names)}."
476
+ )
477
+
478
+ if data_args.max_train_samples is not None:
479
+ raw_datasets["train"] = raw_datasets["train"].select(range(data_args.max_train_samples))
480
+
481
+ if training_args.do_eval:
482
+ raw_datasets["eval"] = load_dataset(
483
+ data_args.dataset_name,
484
+ config_name,
485
+ split=data_args.eval_split_name,
486
+ use_auth_token=data_args.use_auth_token,
487
+ cache_dir=model_args.cache_dir,
488
+ )
489
+
490
+ if data_args.max_eval_samples is not None:
491
+ raw_datasets["eval"] = raw_datasets["eval"].select(range(data_args.max_eval_samples))
492
+
493
+ if training_args.do_predict:
494
+ raw_datasets["predict"] = load_dataset(
495
+ data_args.dataset_name,
496
+ config_name,
497
+ split=data_args.predict_split_name,
498
+ use_auth_token=data_args.use_auth_token,
499
+ cache_dir=model_args.cache_dir,
500
+ )
501
+
502
+ if data_args.max_predict_samples is not None:
503
+ raw_datasets["predict"] = raw_datasets["predict"].select(range(data_args.max_predict_samples))
504
+
505
+ if not is_text_target:
506
+ label_list = next(iter(raw_datasets.values())).features[target_column_name].names
507
+ lang_list = next(iter(raw_datasets.values())).features["lang_id"].names
508
+ num_labels = len(label_list)
509
+
510
+ # 2. We remove some special characters from the datasets
511
+ # that make training complicated and do not help in transcribing the speech
512
+ # E.g. characters, such as `,` and `.` do not really have an acoustic characteristic
513
+ # that could be easily picked up by the model
514
+ chars_to_ignore_regex = (
515
+ f'[{"".join(data_args.chars_to_ignore)}]' if data_args.chars_to_ignore is not None else None
516
+ )
517
+
518
+ def remove_special_characters(batch):
519
+ if chars_to_ignore_regex is not None:
520
+ batch["target_text"] = re.sub(chars_to_ignore_regex, "", batch[target_column_name]).lower() + " "
521
+ else:
522
+ batch["target_text"] = batch[target_column_name].lower() + " "
523
+ return batch
524
+
525
+ if is_text_target:
526
+ with training_args.main_process_first(desc="dataset map special characters removal"):
527
+ raw_datasets = raw_datasets.map(
528
+ remove_special_characters,
529
+ remove_columns=[target_column_name],
530
+ desc="remove special characters from datasets",
531
+ )
532
+
533
+ # save special tokens for tokenizer
534
+ word_delimiter_token = data_args.word_delimiter_token
535
+ unk_token = data_args.unk_token
536
+ pad_token = data_args.pad_token
537
+
538
+ # 3. Next, let's load the config as we might need it to create
539
+ # the tokenizer
540
+ config = AutoConfig.from_pretrained(
541
+ model_args.model_name_or_path, cache_dir=model_args.cache_dir, use_auth_token=data_args.use_auth_token
542
+ )
543
+
544
+ if is_text_target:
545
+ # 4. (Optional, for ASR and translation) If no tokenizer file is defined,
546
+ # we create the vocabulary of the model by extracting all unique characters from
547
+ # the training and evaluation datasets
548
+ # We need to make sure that only first rank saves vocabulary
549
+ # make sure all processes wait until vocab is created
550
+ tokenizer_name_or_path = model_args.tokenizer_name_or_path
551
+ tokenizer_kwargs = {}
552
+ if tokenizer_name_or_path is None:
553
+ # save vocab in training output dir
554
+ tokenizer_name_or_path = training_args.output_dir
555
+
556
+ vocab_file = os.path.join(tokenizer_name_or_path, "vocab.json")
557
+
558
+ with training_args.main_process_first():
559
+ if training_args.overwrite_output_dir and os.path.isfile(vocab_file):
560
+ os.remove(vocab_file)
561
+
562
+ with training_args.main_process_first(desc="dataset map vocabulary creation"):
563
+ if not os.path.isfile(vocab_file):
564
+ os.makedirs(tokenizer_name_or_path, exist_ok=True)
565
+ vocab_dict = create_vocabulary_from_data(
566
+ raw_datasets,
567
+ word_delimiter_token=word_delimiter_token,
568
+ unk_token=unk_token,
569
+ pad_token=pad_token,
570
+ )
571
+
572
+ # save vocab dict to be loaded into tokenizer
573
+ with open(vocab_file, "w") as file:
574
+ json.dump(vocab_dict, file)
575
+
576
+ # if tokenizer has just been created
577
+ # it is defined by `tokenizer_class` if present in config else by `model_type`
578
+ if not config.is_encoder_decoder:
579
+ tokenizer_kwargs = {
580
+ "config": config if config.tokenizer_class is not None else None,
581
+ "tokenizer_type": config.model_type if config.tokenizer_class is None else None,
582
+ "unk_token": unk_token,
583
+ "pad_token": pad_token,
584
+ "word_delimiter_token": word_delimiter_token,
585
+ }
586
+ else:
587
+ tokenizer_kwargs = {}
588
+
589
+ # 5. Now we can instantiate the feature extractor, tokenizer and model
590
+ # Note for distributed training, the .from_pretrained methods guarantee that only
591
+ # one local process can concurrently download model & vocab.
592
+
593
+ # load feature_extractor and tokenizer
594
+ if is_text_target:
595
+ tokenizer = AutoTokenizer.from_pretrained(
596
+ tokenizer_name_or_path,
597
+ use_auth_token=data_args.use_auth_token,
598
+ **tokenizer_kwargs,
599
+ )
600
+ feature_extractor = AutoFeatureExtractor.from_pretrained(
601
+ model_args.model_name_or_path, cache_dir=model_args.cache_dir, use_auth_token=data_args.use_auth_token
602
+ )
603
+
604
+ # adapt config
605
+ # (speech translation requires pre-configured seq2seq models)
606
+ if task_name != "covost2":
607
+ config.update(
608
+ {
609
+ "feat_proj_dropout": model_args.feat_proj_dropout,
610
+ "attention_dropout": model_args.attention_dropout,
611
+ "hidden_dropout": model_args.hidden_dropout,
612
+ "final_dropout": model_args.final_dropout,
613
+ "mask_time_prob": model_args.mask_time_prob,
614
+ "mask_time_length": model_args.mask_time_length,
615
+ "mask_feature_prob": model_args.mask_feature_prob,
616
+ "mask_feature_length": model_args.mask_feature_length,
617
+ "gradient_checkpointing": training_args.gradient_checkpointing,
618
+ "layerdrop": model_args.layerdrop,
619
+ "ctc_loss_reduction": model_args.ctc_loss_reduction,
620
+ "activation_dropout": model_args.activation_dropout,
621
+ }
622
+ )
623
+ if training_args.do_train:
624
+ if is_text_target:
625
+ config.pad_token_id = tokenizer.pad_token_id
626
+ config.vocab_size = len(tokenizer)
627
+ else:
628
+ label_to_id = {v: i for i, v in enumerate(label_list)}
629
+ config.label2id = label_to_id
630
+ config.id2label = {id: label for label, id in label_to_id.items()}
631
+ config.num_labels = num_labels
632
+ else:
633
+ config.encoder.update({"hidden_dropout": model_args.hidden_dropout})
634
+
635
+ # create model
636
+ if target_column_name == "transcription":
637
+ model = AutoModelForCTC.from_pretrained(
638
+ model_args.model_name_or_path,
639
+ cache_dir=model_args.cache_dir,
640
+ config=config,
641
+ use_auth_token=data_args.use_auth_token,
642
+ )
643
+ elif config.is_encoder_decoder:
644
+ model = AutoModelForSpeechSeq2Seq.from_pretrained(
645
+ model_args.model_name_or_path,
646
+ cache_dir=model_args.cache_dir,
647
+ config=config,
648
+ use_auth_token=data_args.use_auth_token,
649
+ )
650
+ if model.config.decoder_start_token_id is None:
651
+ raise ValueError("Make sure that `config.decoder_start_token_id` is correctly defined")
652
+ else:
653
+ model = AutoModelForAudioClassification.from_pretrained(
654
+ model_args.model_name_or_path,
655
+ cache_dir=model_args.cache_dir,
656
+ config=config,
657
+ use_auth_token=data_args.use_auth_token,
658
+ )
659
+
660
+ # freeze encoder
661
+ if model_args.freeze_feature_encoder:
662
+ model.freeze_feature_encoder()
663
+
664
+ # 6. Now we preprocess the datasets including loading the audio, resampling and normalization
665
+ # Thankfully, `datasets` takes care of automatically loading and resampling the audio,
666
+ # so that we just need to set the correct target sampling rate and normalize the input
667
+ # via the `feature_extractor`
668
+
669
+ # make sure that dataset decodes audio with correct sampling rate
670
+ dataset_sampling_rate = next(iter(raw_datasets.values())).features[data_args.audio_column_name].sampling_rate
671
+ if dataset_sampling_rate != feature_extractor.sampling_rate:
672
+ raw_datasets = raw_datasets.cast_column(
673
+ data_args.audio_column_name, datasets.features.Audio(sampling_rate=feature_extractor.sampling_rate)
674
+ )
675
+
676
+ # derive max & min input length for sample rate & max duration
677
+ max_input_length = data_args.max_duration_in_seconds * feature_extractor.sampling_rate
678
+ min_input_length = data_args.min_duration_in_seconds * feature_extractor.sampling_rate
679
+ audio_column_name = data_args.audio_column_name
680
+ num_workers = data_args.preprocessing_num_workers
681
+
682
+ # `phoneme_language` is only relevant if the model is fine-tuned on phoneme classification
683
+ phoneme_language = data_args.phoneme_language
684
+
685
+ # Preprocessing the datasets.
686
+ # We need to read the audio files as arrays and tokenize the targets.
687
+ def prepare_dataset(batch):
688
+ # load audio
689
+ sample = batch[audio_column_name]
690
+
691
+ inputs = feature_extractor(sample["array"], sampling_rate=sample["sampling_rate"])
692
+ batch["input_values"] = inputs.input_values[0]
693
+ batch["length"] = len(batch["input_values"])
694
+
695
+ # encode targets
696
+ additional_kwargs = {}
697
+ if phoneme_language is not None:
698
+ additional_kwargs["phonemizer_lang"] = phoneme_language
699
+
700
+ if is_text_target:
701
+ batch["labels"] = tokenizer(batch["target_text"], **additional_kwargs).input_ids
702
+ else:
703
+ batch["labels"] = batch[target_column_name]
704
+
705
+ batch["lang"] = batch["lang_id"]
706
+
707
+ return batch
708
+
709
+ with training_args.main_process_first(desc="dataset map preprocessing"):
710
+ vectorized_datasets = raw_datasets.map(
711
+ prepare_dataset,
712
+ remove_columns=next(iter(raw_datasets.values())).column_names,
713
+ num_proc=num_workers,
714
+ desc="preprocess datasets",
715
+ )
716
+
717
+ if training_args.do_train:
718
+
719
+ def is_audio_in_length_range(length):
720
+ return length > min_input_length and length < max_input_length
721
+
722
+ # filter data that is shorter than min_input_length
723
+ vectorized_datasets["train"] = vectorized_datasets["train"].filter(
724
+ is_audio_in_length_range,
725
+ num_proc=num_workers,
726
+ input_columns=["length"],
727
+ )
728
+
729
+ # 7. Next, we can prepare for the training step.
730
+ # Let's use the appropriate XTREME-S evaluation metric,
731
+ # instantiate a data collator and the trainer
732
+
733
+ # Define evaluation metrics during training, *i.e.* word error rate, character error rate
734
+ eval_metric = load_metric("xtreme_s", task_name)
735
+
736
+ # for large datasets it is advised to run the preprocessing on a
737
+ # single machine first with ``args.preprocessing_only`` since there will mostly likely
738
+ # be a timeout when running the script in distributed mode.
739
+ # In a second step ``args.preprocessing_only`` can then be set to `False` to load the
740
+ # cached dataset
741
+ if data_args.preprocessing_only:
742
+ logger.info(f"Data preprocessing finished. Files cached at {vectorized_datasets.cache_files}")
743
+ return
744
+
745
+ def compute_asr_metric(pred):
746
+ pred_logits = pred.predictions
747
+ pred_ids = np.argmax(pred_logits, axis=-1)
748
+
749
+ pred.label_ids[pred.label_ids == -100] = tokenizer.pad_token_id
750
+
751
+ pred_str = tokenizer.batch_decode(pred_ids)
752
+ # we do not want to group tokens when computing the metrics
753
+ label_str = tokenizer.batch_decode(pred.label_ids, group_tokens=False)
754
+
755
+ metric = eval_metric.compute(predictions=pred_str, references=label_str)
756
+ return metric
757
+
758
+ def compute_classification_metric(pred):
759
+ pred_ids = np.argmax(pred.predictions, axis=1)
760
+ metric = eval_metric.compute(predictions=pred_ids, references=pred.label_ids)
761
+ return metric
762
+
763
+ # Now save everything to be able to create a single processor later
764
+ if is_main_process(training_args.local_rank):
765
+ # save feature extractor, tokenizer and config
766
+ feature_extractor.save_pretrained(training_args.output_dir)
767
+ if is_text_target:
768
+ tokenizer.save_pretrained(training_args.output_dir)
769
+ config.save_pretrained(training_args.output_dir)
770
+ # wait until configs are saved in the main process before loading the processor
771
+ if training_args.local_rank != -1:
772
+ torch.distributed.barrier()
773
+
774
+ if is_text_target:
775
+ processor = AutoProcessor.from_pretrained(training_args.output_dir)
776
+ else:
777
+ processor = AutoFeatureExtractor.from_pretrained(training_args.output_dir)
778
+
779
+ # Instantiate custom data collator
780
+ data_collator = SpeechDataCollatorWithPadding(processor=processor, pad_labels=is_text_target)
781
+
782
+ # Initialize Trainer
783
+ if target_column_name == "translation":
784
+ trainer = Seq2SeqTrainer(
785
+ model=model,
786
+ data_collator=data_collator,
787
+ args=training_args,
788
+ compute_metrics=compute_asr_metric if training_args.predict_with_generate else None,
789
+ train_dataset=vectorized_datasets["train"] if training_args.do_train else None,
790
+ eval_dataset=vectorized_datasets["eval"] if training_args.do_eval else None,
791
+ tokenizer=feature_extractor,
792
+ )
793
+ else:
794
+ trainer = Trainer(
795
+ model=model,
796
+ data_collator=data_collator,
797
+ args=training_args,
798
+ compute_metrics=compute_asr_metric if is_text_target else compute_classification_metric,
799
+ train_dataset=vectorized_datasets["train"] if training_args.do_train else None,
800
+ eval_dataset=vectorized_datasets["eval"] if training_args.do_eval else None,
801
+ tokenizer=feature_extractor,
802
+ )
803
+
804
+ # 8. Finally, we can start training
805
+
806
+ # Training
807
+ if training_args.do_train:
808
+
809
+ # use last checkpoint if exist
810
+ if last_checkpoint is not None:
811
+ checkpoint = last_checkpoint
812
+ elif os.path.isdir(model_args.model_name_or_path):
813
+ checkpoint = model_args.model_name_or_path
814
+ else:
815
+ checkpoint = None
816
+
817
+ train_result = trainer.train(resume_from_checkpoint=checkpoint)
818
+ trainer.save_model()
819
+
820
+ metrics = train_result.metrics
821
+ max_train_samples = (
822
+ data_args.max_train_samples
823
+ if data_args.max_train_samples is not None
824
+ else len(vectorized_datasets["train"])
825
+ )
826
+ metrics["train_samples"] = min(max_train_samples, len(vectorized_datasets["train"]))
827
+
828
+ trainer.log_metrics("train", metrics)
829
+ trainer.save_metrics("train", metrics)
830
+ trainer.save_state()
831
+
832
+ # Evaluation on the test set
833
+ results = {}
834
+ if training_args.do_predict:
835
+ logger.info(f"*** Evaluating on the `{data_args.predict_split_name}` set ***")
836
+ if data_args.per_lang_metrics:
837
+ # separate the `test` dataset into language-specific subsets and compute metrics for each of them
838
+ metrics = {}
839
+ average_metrics = defaultdict(list)
840
+ for lang_id in range(len(lang_list)):
841
+ lang_name = lang_list[lang_id]
842
+ lang_dataset = vectorized_datasets["predict"].filter(lambda example: example["lang"] == lang_id)
843
+ lang_metrics = trainer.evaluate(lang_dataset)
844
+ for metric_name, value in lang_metrics.items():
845
+ average_metrics[metric_name].append(value)
846
+ if metric_name not in ["eval_runtime", "eval_samples_per_second", "eval_steps_per_second"]:
847
+ metrics[f"{metric_name}_{lang_name}"] = value
848
+ for metric_name, value in average_metrics.items():
849
+ metrics[metric_name] = np.mean(value)
850
+ else:
851
+ metrics = trainer.evaluate(vectorized_datasets["predict"])
852
+ max_predict_samples = (
853
+ data_args.max_predict_samples
854
+ if data_args.max_predict_samples is not None
855
+ else len(vectorized_datasets["predict"])
856
+ )
857
+ metrics["predict_samples"] = min(max_predict_samples, len(vectorized_datasets["predict"]))
858
+
859
+ # make sure that the `predict` metrics end up in the log history for the model card
860
+ trainer.log(OrderedDict(sorted(metrics.items())))
861
+
862
+ trainer.log_metrics("predict", metrics)
863
+ trainer.save_metrics("predict", metrics)
864
+
865
+ # Write model card and (optionally) push to hub
866
+ kwargs = {
867
+ "finetuned_from": model_args.model_name_or_path,
868
+ "tasks": task_name,
869
+ "tags": [task_name, data_args.dataset_name],
870
+ "dataset_args": f"Config: {config_name}, Training split: {data_args.train_split_name}, Eval split: {data_args.eval_split_name}, Predict split: {data_args.predict_split_name}",
871
+ "dataset": f"{data_args.dataset_name.upper()} - {config_name.upper()}",
872
+ "language": data_args.language,
873
+ }
874
+
875
+ if training_args.push_to_hub:
876
+ trainer.push_to_hub(**kwargs)
877
+ else:
878
+ trainer.create_model_card(**kwargs)
879
+
880
+ return results
881
+
882
+
883
+ if __name__ == "__main__":
884
+ main()
885
+