chmanoj commited on
Commit
84b4dd5
1 Parent(s): 29b3287

Update gitignore

Browse files
.gitignore CHANGED
@@ -1 +1,2 @@
1
- checkpoint-*/
 
 
1
+ checkpoint-*/
2
+ .ipynb_checkpoints*/
.ipynb_checkpoints/run-checkpoint.sh CHANGED
@@ -5,8 +5,8 @@ python run_speech_recognition_ctc.py \
5
  --preprocessing_num_workers="8" \
6
  --output_dir="./" \
7
  --overwrite_output_dir \
8
- --num_train_epochs="5" \
9
- --per_device_train_batch_size="4" \
10
  --per_device_eval_batch_size="4" \
11
  --gradient_accumulation_steps="8" \
12
  --learning_rate="2e-5" \
 
5
  --preprocessing_num_workers="8" \
6
  --output_dir="./" \
7
  --overwrite_output_dir \
8
+ --num_train_epochs="100" \
9
+ --per_device_train_batch_size="16" \
10
  --per_device_eval_batch_size="4" \
11
  --gradient_accumulation_steps="8" \
12
  --learning_rate="2e-5" \
.ipynb_checkpoints/run_speech_recognition_ctc-checkpoint.py DELETED
@@ -1,756 +0,0 @@
1
- #!/usr/bin/env python
2
- # coding=utf-8
3
- # Copyright 2021 The HuggingFace Inc. team. All rights reserved.
4
- #
5
- # Licensed under the Apache License, Version 2.0 (the "License");
6
- # you may not use this file except in compliance with the License.
7
- # You may obtain a copy of the License at
8
- #
9
- # http://www.apache.org/licenses/LICENSE-2.0
10
- #
11
- # Unless required by applicable law or agreed to in writing, software
12
- # distributed under the License is distributed on an "AS IS" BASIS,
13
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
- # See the License for the specific language governing permissions and
15
-
16
- """ Fine-tuning a 🤗 Transformers CTC model for automatic speech recognition"""
17
-
18
- import functools
19
- import json
20
- import logging
21
- import os
22
- import re
23
- import sys
24
- import warnings
25
- from dataclasses import dataclass, field
26
- from typing import Dict, List, Optional, Union
27
-
28
- import datasets
29
- import numpy as np
30
- import torch
31
- from datasets import DatasetDict, load_dataset, load_metric
32
-
33
- import transformers
34
- from transformers import (
35
- AutoConfig,
36
- AutoFeatureExtractor,
37
- AutoModelForCTC,
38
- AutoProcessor,
39
- AutoTokenizer,
40
- HfArgumentParser,
41
- Trainer,
42
- TrainingArguments,
43
- Wav2Vec2Processor,
44
- set_seed,
45
- )
46
- from transformers.trainer_utils import get_last_checkpoint, is_main_process
47
- from transformers.utils import check_min_version
48
- from transformers.utils.versions import require_version
49
-
50
-
51
- # Will error if the minimal version of Transformers is not installed. Remove at your own risks.
52
- check_min_version("4.16.0.dev0")
53
-
54
- require_version("datasets>=1.13.3", "To fix: pip install -r examples/pytorch/text-classification/requirements.txt")
55
-
56
-
57
- logger = logging.getLogger(__name__)
58
-
59
-
60
- def list_field(default=None, metadata=None):
61
- return field(default_factory=lambda: default, metadata=metadata)
62
-
63
- def get_telugu_dataset(validation_split=False):
64
- dataset = load_dataset('openslr', 'SLR66')
65
-
66
- seed=1242
67
-
68
- if validation_split:
69
- train_testvalid = dataset['train'].train_test_split(test_size=0.2, seed=seed)
70
- # Split the 10% test + valid in half test, half valid
71
- test_valid = train_testvalid['test'].train_test_split(test_size=0.33, seed=seed)
72
- # gather everyone if you want to have a single DatasetDict
73
- out_dataset = DatasetDict({
74
- 'train': train_testvalid['train'],
75
- 'test': test_valid['test'],
76
- 'valid': test_valid['train']})
77
- else:
78
- train_testvalid = dataset['train'].train_test_split(test_size=0.25, seed=seed)
79
- out_dataset = DatasetDict({
80
- 'train': train_testvalid['train'],
81
- 'test': train_testvalid['test']})
82
- return out_dataset
83
-
84
-
85
- @dataclass
86
- class ModelArguments:
87
- """
88
- Arguments pertaining to which model/config/tokenizer we are going to fine-tune from.
89
- """
90
-
91
- model_name_or_path: str = field(
92
- metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"}
93
- )
94
- tokenizer_name_or_path: Optional[str] = field(
95
- default=None,
96
- metadata={"help": "Path to pretrained tokenizer or tokenizer identifier from huggingface.co/models"},
97
- )
98
- cache_dir: Optional[str] = field(
99
- default=None,
100
- metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"},
101
- )
102
- freeze_feature_encoder: bool = field(
103
- default=True, metadata={"help": "Whether to freeze the feature encoder layers of the model."}
104
- )
105
- attention_dropout: float = field(
106
- default=0.0, metadata={"help": "The dropout ratio for the attention probabilities."}
107
- )
108
- activation_dropout: float = field(
109
- default=0.0, metadata={"help": "The dropout ratio for activations inside the fully connected layer."}
110
- )
111
- feat_proj_dropout: float = field(default=0.0, metadata={"help": "The dropout ratio for the projected features."})
112
- hidden_dropout: float = field(
113
- default=0.0,
114
- metadata={
115
- "help": "The dropout probability for all fully connected layers in the embeddings, encoder, and pooler."
116
- },
117
- )
118
- final_dropout: float = field(
119
- default=0.0,
120
- metadata={"help": "The dropout probability for the final projection layer."},
121
- )
122
- mask_time_prob: float = field(
123
- default=0.05,
124
- metadata={
125
- "help": "Probability of each feature vector along the time axis to be chosen as the start of the vector"
126
- "span to be masked. Approximately ``mask_time_prob * sequence_length // mask_time_length`` feature"
127
- "vectors will be masked along the time axis."
128
- },
129
- )
130
- mask_time_length: int = field(
131
- default=10,
132
- metadata={"help": "Length of vector span to mask along the time axis."},
133
- )
134
- mask_feature_prob: float = field(
135
- default=0.0,
136
- metadata={
137
- "help": "Probability of each feature vector along the feature axis to be chosen as the start of the vector"
138
- "span to be masked. Approximately ``mask_feature_prob * sequence_length // mask_feature_length`` feature bins will be masked along the time axis."
139
- },
140
- )
141
- mask_feature_length: int = field(
142
- default=10,
143
- metadata={"help": "Length of vector span to mask along the feature axis."},
144
- )
145
- layerdrop: float = field(default=0.0, metadata={"help": "The LayerDrop probability."})
146
- ctc_loss_reduction: Optional[str] = field(
147
- default="mean", metadata={"help": "The way the ctc loss should be reduced. Should be one of 'mean' or 'sum'."}
148
- )
149
-
150
-
151
- @dataclass
152
- class DataTrainingArguments:
153
- """
154
- Arguments pertaining to what data we are going to input our model for training and eval.
155
-
156
- Using `HfArgumentParser` we can turn this class
157
- into argparse arguments to be able to specify them on
158
- the command line.
159
- """
160
-
161
- dataset_name: str = field(
162
- metadata={"help": "The configuration name of the dataset to use (via the datasets library)."}
163
- )
164
- dataset_config_name: str = field(
165
- default=None, metadata={"help": "The configuration name of the dataset to use (via the datasets library)."}
166
- )
167
- train_split_name: str = field(
168
- default="train+validation",
169
- metadata={
170
- "help": "The name of the training data set split to use (via the datasets library). Defaults to 'train'"
171
- },
172
- )
173
- eval_split_name: str = field(
174
- default="test",
175
- metadata={
176
- "help": "The name of the training data set split to use (via the datasets library). Defaults to 'train'"
177
- },
178
- )
179
- audio_column_name: str = field(
180
- default="audio",
181
- metadata={"help": "The name of the dataset column containing the audio data. Defaults to 'audio'"},
182
- )
183
- text_column_name: str = field(
184
- default="text",
185
- metadata={"help": "The name of the dataset column containing the text data. Defaults to 'text'"},
186
- )
187
- overwrite_cache: bool = field(
188
- default=False, metadata={"help": "Overwrite the cached preprocessed datasets or not."}
189
- )
190
- preprocessing_num_workers: Optional[int] = field(
191
- default=None,
192
- metadata={"help": "The number of processes to use for the preprocessing."},
193
- )
194
- max_train_samples: Optional[int] = field(
195
- default=None,
196
- metadata={
197
- "help": "For debugging purposes or quicker training, truncate the number of training examples to this "
198
- "value if set."
199
- },
200
- )
201
- max_eval_samples: Optional[int] = field(
202
- default=None,
203
- metadata={
204
- "help": "For debugging purposes or quicker training, truncate the number of validation examples to this "
205
- "value if set."
206
- },
207
- )
208
- chars_to_ignore: Optional[List[str]] = list_field(
209
- default=None,
210
- metadata={"help": "A list of characters to remove from the transcripts."},
211
- )
212
- eval_metrics: List[str] = list_field(
213
- default=["wer"],
214
- metadata={"help": "A list of metrics the model should be evaluated on. E.g. `'wer cer'`"},
215
- )
216
- max_duration_in_seconds: float = field(
217
- default=20.0,
218
- metadata={
219
- "help": "Filter audio files that are longer than `max_duration_in_seconds` seconds to 'max_duration_in_seconds`"
220
- },
221
- )
222
- min_duration_in_seconds: float = field(
223
- default=0.0, metadata={"help": "Filter audio files that are shorter than `min_duration_in_seconds` seconds"}
224
- )
225
- preprocessing_only: bool = field(
226
- default=False,
227
- metadata={
228
- "help": "Whether to only do data preprocessing and skip training. "
229
- "This is especially useful when data preprocessing errors out in distributed training due to timeout. "
230
- "In this case, one should run the preprocessing in a non-distributed setup with `preprocessing_only=True` "
231
- "so that the cached datasets can consequently be loaded in distributed training"
232
- },
233
- )
234
- use_auth_token: bool = field(
235
- default=False,
236
- metadata={
237
- "help": "If :obj:`True`, will use the token generated when running"
238
- ":obj:`transformers-cli login` as HTTP bearer authorization for remote files."
239
- },
240
- )
241
- unk_token: str = field(
242
- default="[UNK]",
243
- metadata={"help": "The unk token for the tokenizer"},
244
- )
245
- pad_token: str = field(
246
- default="[PAD]",
247
- metadata={"help": "The padding token for the tokenizer"},
248
- )
249
- word_delimiter_token: str = field(
250
- default="|",
251
- metadata={"help": "The word delimiter token for the tokenizer"},
252
- )
253
- phoneme_language: Optional[str] = field(
254
- default=None,
255
- metadata={
256
- "help": "The target language that should be used be"
257
- " passed to the tokenizer for tokenization. Note that"
258
- " this is only relevant if the model classifies the"
259
- " input audio to a sequence of phoneme sequences."
260
- },
261
- )
262
-
263
-
264
- @dataclass
265
- class DataCollatorCTCWithPadding:
266
- """
267
- Data collator that will dynamically pad the inputs received.
268
- Args:
269
- processor (:class:`~transformers.AutoProcessor`)
270
- The processor used for proccessing the data.
271
- padding (:obj:`bool`, :obj:`str` or :class:`~transformers.tokenization_utils_base.PaddingStrategy`, `optional`, defaults to :obj:`True`):
272
- Select a strategy to pad the returned sequences (according to the model's padding side and padding index)
273
- among:
274
- * :obj:`True` or :obj:`'longest'`: Pad to the longest sequence in the batch (or no padding if only a single
275
- sequence if provided).
276
- * :obj:`'max_length'`: Pad to a maximum length specified with the argument :obj:`max_length` or to the
277
- maximum acceptable input length for the model if that argument is not provided.
278
- * :obj:`False` or :obj:`'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of
279
- different lengths).
280
- max_length (:obj:`int`, `optional`):
281
- Maximum length of the ``input_values`` of the returned list and optionally padding length (see above).
282
- max_length_labels (:obj:`int`, `optional`):
283
- Maximum length of the ``labels`` returned list and optionally padding length (see above).
284
- pad_to_multiple_of (:obj:`int`, `optional`):
285
- If set will pad the sequence to a multiple of the provided value.
286
- This is especially useful to enable the use of Tensor Cores on NVIDIA hardware with compute capability >=
287
- 7.5 (Volta).
288
- """
289
-
290
- processor: AutoProcessor
291
- padding: Union[bool, str] = "longest"
292
- pad_to_multiple_of: Optional[int] = None
293
- pad_to_multiple_of_labels: Optional[int] = None
294
-
295
- def __call__(self, features: List[Dict[str, Union[List[int], torch.Tensor]]]) -> Dict[str, torch.Tensor]:
296
- # split inputs and labels since they have to be of different lenghts and need
297
- # different padding methods
298
- input_features = [{"input_values": feature["input_values"]} for feature in features]
299
- label_features = [{"input_ids": feature["labels"]} for feature in features]
300
-
301
- batch = self.processor.pad(
302
- input_features,
303
- padding=self.padding,
304
- pad_to_multiple_of=self.pad_to_multiple_of,
305
- return_tensors="pt",
306
- )
307
-
308
- with self.processor.as_target_processor():
309
- labels_batch = self.processor.pad(
310
- label_features,
311
- padding=self.padding,
312
- pad_to_multiple_of=self.pad_to_multiple_of_labels,
313
- return_tensors="pt",
314
- )
315
-
316
- # replace padding with -100 to ignore loss correctly
317
- labels = labels_batch["input_ids"].masked_fill(labels_batch.attention_mask.ne(1), -100)
318
-
319
- batch["labels"] = labels
320
-
321
- return batch
322
-
323
-
324
- def create_vocabulary_from_data(
325
- datasets: DatasetDict,
326
- word_delimiter_token: Optional[str] = None,
327
- unk_token: Optional[str] = None,
328
- pad_token: Optional[str] = None,
329
- ):
330
- # Given training and test labels create vocabulary
331
- def extract_all_chars(batch):
332
- all_text = " ".join(batch["target_text"])
333
- vocab = list(set(all_text))
334
- return {"vocab": [vocab], "all_text": [all_text]}
335
-
336
- vocabs = datasets.map(
337
- extract_all_chars,
338
- batched=True,
339
- batch_size=-1,
340
- keep_in_memory=True,
341
- remove_columns=datasets["train"].column_names,
342
- )
343
-
344
- # take union of all unique characters in each dataset
345
- vocab_set = functools.reduce(
346
- lambda vocab_1, vocab_2: set(vocab_1["vocab"][0]) | set(vocab_2["vocab"][0]), vocabs.values()
347
- )
348
-
349
- vocab_dict = {v: k for k, v in enumerate(sorted(list(vocab_set)))}
350
-
351
- # replace white space with delimiter token
352
- if word_delimiter_token is not None:
353
- vocab_dict[word_delimiter_token] = vocab_dict[" "]
354
- del vocab_dict[" "]
355
-
356
- # add unk and pad token
357
- if unk_token is not None:
358
- vocab_dict[unk_token] = len(vocab_dict)
359
-
360
- if pad_token is not None:
361
- vocab_dict[pad_token] = len(vocab_dict)
362
-
363
- return vocab_dict
364
-
365
-
366
- def main():
367
- # See all possible arguments in src/transformers/training_args.py
368
- # or by passing the --help flag to this script.
369
- # We now keep distinct sets of args, for a cleaner separation of concerns.
370
-
371
- parser = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments))
372
- if len(sys.argv) == 2 and sys.argv[1].endswith(".json"):
373
- # If we pass only one argument to the script and it's the path to a json file,
374
- # let's parse it to get our arguments.
375
- model_args, data_args, training_args = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1]))
376
- else:
377
- model_args, data_args, training_args = parser.parse_args_into_dataclasses()
378
-
379
- # Setup logging
380
- logging.basicConfig(
381
- format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
382
- datefmt="%m/%d/%Y %H:%M:%S",
383
- handlers=[logging.StreamHandler(sys.stdout)],
384
- )
385
- logger.setLevel(logging.INFO if is_main_process(training_args.local_rank) else logging.WARN)
386
-
387
- # Detecting last checkpoint.
388
- last_checkpoint = None
389
- if os.path.isdir(training_args.output_dir) and training_args.do_train and not training_args.overwrite_output_dir:
390
- last_checkpoint = get_last_checkpoint(training_args.output_dir)
391
- if last_checkpoint is None and len(os.listdir(training_args.output_dir)) > 0:
392
- raise ValueError(
393
- f"Output directory ({training_args.output_dir}) already exists and is not empty. "
394
- "Use --overwrite_output_dir to overcome."
395
- )
396
- elif last_checkpoint is not None:
397
- logger.info(
398
- f"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change "
399
- "the `--output_dir` or add `--overwrite_output_dir` to train from scratch."
400
- )
401
-
402
- # Log on each process the small summary:
403
- logger.warning(
404
- f"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"
405
- f"distributed training: {bool(training_args.local_rank != -1)}, 16-bits training: {training_args.fp16}"
406
- )
407
- # Set the verbosity to info of the Transformers logger (on main process only):
408
- if is_main_process(training_args.local_rank):
409
- transformers.utils.logging.set_verbosity_info()
410
- logger.info("Training/evaluation parameters %s", training_args)
411
-
412
- # Set seed before initializing model.
413
- set_seed(training_args.seed)
414
-
415
- # 1. First, let's load the dataset
416
- te_dataset = get_telugu_dataset(validation_split=False)
417
- def load_te_dataset(split):
418
- return te_dataset[split]
419
-
420
- raw_datasets = DatasetDict()
421
-
422
- if training_args.do_train:
423
- raw_datasets["train"] = load_te_dataset(
424
- split=data_args.train_split_name
425
- )
426
-
427
- if data_args.audio_column_name not in raw_datasets["train"].column_names:
428
- raise ValueError(
429
- f"--audio_column_name '{data_args.audio_column_name}' not found in dataset '{data_args.dataset_name}'. "
430
- "Make sure to set `--audio_column_name` to the correct audio column - one of "
431
- f"{', '.join(raw_datasets['train'].column_names)}."
432
- )
433
-
434
- if data_args.text_column_name not in raw_datasets["train"].column_names:
435
- raise ValueError(
436
- f"--text_column_name {data_args.text_column_name} not found in dataset '{data_args.dataset_name}'. "
437
- "Make sure to set `--text_column_name` to the correct text column - one of "
438
- f"{', '.join(raw_datasets['train'].column_names)}."
439
- )
440
-
441
- if data_args.max_train_samples is not None:
442
- raw_datasets["train"] = raw_datasets["train"].select(range(data_args.max_train_samples))
443
-
444
- if training_args.do_eval:
445
- raw_datasets["eval"] = load_te_dataset(
446
- split=data_args.eval_split_name
447
- )
448
-
449
- if data_args.max_eval_samples is not None:
450
- raw_datasets["eval"] = raw_datasets["eval"].select(range(data_args.max_eval_samples))
451
-
452
- # 2. We remove some special characters from the datasets
453
- # that make training complicated and do not help in transcribing the speech
454
- # E.g. characters, such as `,` and `.` do not really have an acoustic characteristic
455
- # that could be easily picked up by the model
456
- chars_to_ignore_regex = (
457
- f'[{"".join(data_args.chars_to_ignore)}]' if data_args.chars_to_ignore is not None else None
458
- )
459
- text_column_name = data_args.text_column_name
460
-
461
- def remove_special_characters(batch):
462
- if chars_to_ignore_regex is not None:
463
- batch["target_text"] = re.sub(chars_to_ignore_regex, "", batch[text_column_name]).lower() + " "
464
- else:
465
- batch["target_text"] = batch[text_column_name].lower() + " "
466
- return batch
467
-
468
- with training_args.main_process_first(desc="dataset map special characters removal"):
469
- raw_datasets = raw_datasets.map(
470
- remove_special_characters,
471
- remove_columns=[text_column_name],
472
- desc="remove special characters from datasets",
473
- )
474
-
475
- # save special tokens for tokenizer
476
- word_delimiter_token = data_args.word_delimiter_token
477
- unk_token = data_args.unk_token
478
- pad_token = data_args.pad_token
479
-
480
- # 3. Next, let's load the config as we might need it to create
481
- # the tokenizer
482
- # load config
483
- config = AutoConfig.from_pretrained(
484
- model_args.model_name_or_path, cache_dir=model_args.cache_dir, use_auth_token=data_args.use_auth_token
485
- )
486
-
487
- # 4. Next, if no tokenizer file is defined,
488
- # we create the vocabulary of the model by extracting all unique characters from
489
- # the training and evaluation datasets
490
- # We need to make sure that only first rank saves vocabulary
491
- # make sure all processes wait until vocab is created
492
- tokenizer_name_or_path = model_args.tokenizer_name_or_path
493
- tokenizer_kwargs = {}
494
- if tokenizer_name_or_path is None:
495
- # save vocab in training output dir
496
- tokenizer_name_or_path = training_args.output_dir
497
-
498
- vocab_file = os.path.join(tokenizer_name_or_path, "vocab.json")
499
-
500
- with training_args.main_process_first():
501
- if training_args.overwrite_output_dir and os.path.isfile(vocab_file):
502
- os.remove(vocab_file)
503
-
504
- with training_args.main_process_first(desc="dataset map vocabulary creation"):
505
- if not os.path.isfile(vocab_file):
506
- os.makedirs(tokenizer_name_or_path, exist_ok=True)
507
- vocab_dict = create_vocabulary_from_data(
508
- raw_datasets,
509
- word_delimiter_token=word_delimiter_token,
510
- unk_token=unk_token,
511
- pad_token=pad_token,
512
- )
513
-
514
- # save vocab dict to be loaded into tokenizer
515
- with open(vocab_file, "w") as file:
516
- json.dump(vocab_dict, file)
517
-
518
- # if tokenizer has just been created
519
- # it is defined by `tokenizer_class` if present in config else by `model_type`
520
- tokenizer_kwargs = {
521
- "config": config if config.tokenizer_class is not None else None,
522
- "tokenizer_type": config.model_type if config.tokenizer_class is None else None,
523
- "unk_token": unk_token,
524
- "pad_token": pad_token,
525
- "word_delimiter_token": word_delimiter_token,
526
- }
527
-
528
- # 5. Now we can instantiate the feature extractor, tokenizer and model
529
- # Note for distributed training, the .from_pretrained methods guarantee that only
530
- # one local process can concurrently download model & vocab.
531
-
532
- # load feature_extractor and tokenizer
533
- tokenizer = AutoTokenizer.from_pretrained(
534
- tokenizer_name_or_path,
535
- use_auth_token=data_args.use_auth_token,
536
- **tokenizer_kwargs,
537
- )
538
- feature_extractor = AutoFeatureExtractor.from_pretrained(
539
- model_args.model_name_or_path, cache_dir=model_args.cache_dir, use_auth_token=data_args.use_auth_token
540
- )
541
-
542
- # adapt config
543
- config.update(
544
- {
545
- "feat_proj_dropout": model_args.feat_proj_dropout,
546
- "attention_dropout": model_args.attention_dropout,
547
- "hidden_dropout": model_args.hidden_dropout,
548
- "final_dropout": model_args.final_dropout,
549
- "mask_time_prob": model_args.mask_time_prob,
550
- "mask_time_length": model_args.mask_time_length,
551
- "mask_feature_prob": model_args.mask_feature_prob,
552
- "mask_feature_length": model_args.mask_feature_length,
553
- "gradient_checkpointing": training_args.gradient_checkpointing,
554
- "layerdrop": model_args.layerdrop,
555
- "ctc_loss_reduction": model_args.ctc_loss_reduction,
556
- "pad_token_id": tokenizer.pad_token_id,
557
- "vocab_size": len(tokenizer),
558
- "activation_dropout": model_args.activation_dropout,
559
- }
560
- )
561
-
562
- # create model
563
- model = AutoModelForCTC.from_pretrained(
564
- model_args.model_name_or_path,
565
- cache_dir=model_args.cache_dir,
566
- config=config,
567
- use_auth_token=data_args.use_auth_token,
568
- )
569
-
570
- # freeze encoder
571
- if model_args.freeze_feature_encoder:
572
- model.freeze_feature_encoder()
573
-
574
- # 6. Now we preprocess the datasets including loading the audio, resampling and normalization
575
- # Thankfully, `datasets` takes care of automatically loading and resampling the audio,
576
- # so that we just need to set the correct target sampling rate and normalize the input
577
- # via the `feature_extractor`
578
-
579
- # make sure that dataset decodes audio with correct sampling rate
580
- dataset_sampling_rate = next(iter(raw_datasets.values())).features[data_args.audio_column_name].sampling_rate
581
- if dataset_sampling_rate != feature_extractor.sampling_rate:
582
- raw_datasets = raw_datasets.cast_column(
583
- data_args.audio_column_name, datasets.features.Audio(sampling_rate=feature_extractor.sampling_rate)
584
- )
585
-
586
- # derive max & min input length for sample rate & max duration
587
- max_input_length = data_args.max_duration_in_seconds * feature_extractor.sampling_rate
588
- min_input_length = data_args.min_duration_in_seconds * feature_extractor.sampling_rate
589
- audio_column_name = data_args.audio_column_name
590
- num_workers = data_args.preprocessing_num_workers
591
-
592
- # `phoneme_language` is only relevant if the model is fine-tuned on phoneme classification
593
- phoneme_language = data_args.phoneme_language
594
-
595
- # Preprocessing the datasets.
596
- # We need to read the audio files as arrays and tokenize the targets.
597
- def prepare_dataset(batch):
598
- # load audio
599
- sample = batch[audio_column_name]
600
-
601
- inputs = feature_extractor(sample["array"], sampling_rate=sample["sampling_rate"])
602
- batch["input_values"] = inputs.input_values[0]
603
- batch["input_length"] = len(batch["input_values"])
604
-
605
- # encode targets
606
- additional_kwargs = {}
607
- if phoneme_language is not None:
608
- additional_kwargs["phonemizer_lang"] = phoneme_language
609
-
610
- batch["labels"] = tokenizer(batch["target_text"], **additional_kwargs).input_ids
611
- return batch
612
-
613
- with training_args.main_process_first(desc="dataset map preprocessing"):
614
- vectorized_datasets = raw_datasets.map(
615
- prepare_dataset,
616
- remove_columns=next(iter(raw_datasets.values())).column_names,
617
- num_proc=num_workers,
618
- desc="preprocess datasets",
619
- )
620
-
621
- def is_audio_in_length_range(length):
622
- return length > min_input_length and length < max_input_length
623
-
624
- # filter data that is shorter than min_input_length
625
- vectorized_datasets = vectorized_datasets.filter(
626
- is_audio_in_length_range,
627
- num_proc=num_workers,
628
- input_columns=["input_length"],
629
- )
630
-
631
- # 7. Next, we can prepare the training.
632
- # Let's use word error rate (WER) as our evaluation metric,
633
- # instantiate a data collator and the trainer
634
-
635
- # Define evaluation metrics during training, *i.e.* word error rate, character error rate
636
- eval_metrics = {metric: load_metric(metric) for metric in data_args.eval_metrics}
637
-
638
- # for large datasets it is advised to run the preprocessing on a
639
- # single machine first with ``args.preprocessing_only`` since there will mostly likely
640
- # be a timeout when running the script in distributed mode.
641
- # In a second step ``args.preprocessing_only`` can then be set to `False` to load the
642
- # cached dataset
643
- if data_args.preprocessing_only:
644
- logger.info(f"Data preprocessing finished. Files cached at {vectorized_datasets.cache_files}")
645
- return
646
-
647
- def compute_metrics(pred):
648
- pred_logits = pred.predictions
649
- pred_ids = np.argmax(pred_logits, axis=-1)
650
-
651
- pred.label_ids[pred.label_ids == -100] = tokenizer.pad_token_id
652
-
653
- pred_str = tokenizer.batch_decode(pred_ids)
654
- # we do not want to group tokens when computing the metrics
655
- label_str = tokenizer.batch_decode(pred.label_ids, group_tokens=False)
656
-
657
- metrics = {k: v.compute(predictions=pred_str, references=label_str) for k, v in eval_metrics.items()}
658
-
659
- return metrics
660
-
661
- # Now save everything to be able to create a single processor later
662
- if is_main_process(training_args.local_rank):
663
- # save feature extractor, tokenizer and config
664
- feature_extractor.save_pretrained(training_args.output_dir)
665
- tokenizer.save_pretrained(training_args.output_dir)
666
- config.save_pretrained(training_args.output_dir)
667
-
668
- try:
669
- processor = AutoProcessor.from_pretrained(training_args.output_dir)
670
- except (OSError, KeyError):
671
- warnings.warn(
672
- "Loading a processor from a feature extractor config that does not"
673
- " include a `processor_class` attribute is deprecated and will be removed in v5. Please add the following "
674
- " attribute to your `preprocessor_config.json` file to suppress this warning: "
675
- " `'processor_class': 'Wav2Vec2Processor'`",
676
- FutureWarning,
677
- )
678
- processor = Wav2Vec2Processor.from_pretrained(training_args.output_dir)
679
-
680
- # Instantiate custom data collator
681
- data_collator = DataCollatorCTCWithPadding(processor=processor)
682
-
683
- # Initialize Trainer
684
- trainer = Trainer(
685
- model=model,
686
- data_collator=data_collator,
687
- args=training_args,
688
- compute_metrics=compute_metrics,
689
- train_dataset=vectorized_datasets["train"] if training_args.do_train else None,
690
- eval_dataset=vectorized_datasets["eval"] if training_args.do_eval else None,
691
- tokenizer=feature_extractor,
692
- )
693
-
694
- # 8. Finally, we can start training
695
-
696
- # Training
697
- if training_args.do_train:
698
-
699
- # use last checkpoint if exist
700
- if last_checkpoint is not None:
701
- checkpoint = last_checkpoint
702
- elif os.path.isdir(model_args.model_name_or_path):
703
- checkpoint = model_args.model_name_or_path
704
- else:
705
- checkpoint = None
706
-
707
- train_result = trainer.train(resume_from_checkpoint=checkpoint)
708
- trainer.save_model()
709
-
710
- metrics = train_result.metrics
711
- max_train_samples = (
712
- data_args.max_train_samples
713
- if data_args.max_train_samples is not None
714
- else len(vectorized_datasets["train"])
715
- )
716
- metrics["train_samples"] = min(max_train_samples, len(vectorized_datasets["train"]))
717
-
718
- trainer.log_metrics("train", metrics)
719
- trainer.save_metrics("train", metrics)
720
- trainer.save_state()
721
-
722
- # Evaluation
723
- results = {}
724
- if training_args.do_eval:
725
- logger.info("*** Evaluate ***")
726
- metrics = trainer.evaluate()
727
- max_eval_samples = (
728
- data_args.max_eval_samples if data_args.max_eval_samples is not None else len(vectorized_datasets["eval"])
729
- )
730
- metrics["eval_samples"] = min(max_eval_samples, len(vectorized_datasets["eval"]))
731
-
732
- trainer.log_metrics("eval", metrics)
733
- trainer.save_metrics("eval", metrics)
734
-
735
- # Write model card and (optionally) push to hub
736
- config_name = data_args.dataset_config_name if data_args.dataset_config_name is not None else "na"
737
- kwargs = {
738
- "finetuned_from": model_args.model_name_or_path,
739
- "tasks": "speech-recognition",
740
- "tags": ["automatic-speech-recognition", data_args.dataset_name, "robust-speech-event"],
741
- "dataset_args": f"Config: {config_name}, Training split: {data_args.train_split_name}, Eval split: {data_args.eval_split_name}",
742
- "dataset": f"{data_args.dataset_name.upper()} - {config_name.upper()}",
743
- }
744
- if "common_voice" in data_args.dataset_name:
745
- kwargs["language"] = config_name
746
-
747
- if training_args.push_to_hub:
748
- trainer.push_to_hub(**kwargs)
749
- else:
750
- trainer.create_model_card(**kwargs)
751
-
752
- return results
753
-
754
-
755
- if __name__ == "__main__":
756
- main()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
run.sh CHANGED
@@ -5,8 +5,8 @@ python run_speech_recognition_ctc.py \
5
  --preprocessing_num_workers="8" \
6
  --output_dir="./" \
7
  --overwrite_output_dir \
8
- --num_train_epochs="5" \
9
- --per_device_train_batch_size="4" \
10
  --per_device_eval_batch_size="4" \
11
  --gradient_accumulation_steps="8" \
12
  --learning_rate="2e-5" \
 
5
  --preprocessing_num_workers="8" \
6
  --output_dir="./" \
7
  --overwrite_output_dir \
8
+ --num_train_epochs="100" \
9
+ --per_device_train_batch_size="16" \
10
  --per_device_eval_batch_size="4" \
11
  --gradient_accumulation_steps="8" \
12
  --learning_rate="2e-5" \