versae commited on
Commit
0fdc627
1 Parent(s): 5ce554b

Model save

Browse files
runs/Feb06_12-59-06_dante/1644148807.511835/events.out.tfevents.1644148807.dante.2790531.1 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:de29a2a4263b81525551cc6a8648135a9470f06fdbc742c6b46db893fe00ab19
3
+ size 4765
runs/Feb06_12-59-06_dante/events.out.tfevents.1644148807.dante.2790531.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b4bd6d012fbd254fed1b5d06c38715b576b2c2c5bb7dd25ae0428906ff3fa494
3
+ size 5285
special_tokens_map.json CHANGED
@@ -1 +1 @@
1
- {"bos_token": "<s>", "eos_token": "</s>", "unk_token": "[UNK]", "pad_token": "[PAD]", "additional_special_tokens": [{"content": "<s>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true}, {"content": "</s>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true}, {"content": "<s>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true}, {"content": "</s>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true}, {"content": "<s>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true}, {"content": "</s>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true}]}
 
1
+ {"bos_token": "<s>", "eos_token": "</s>", "unk_token": "[UNK]", "pad_token": "[PAD]", "additional_special_tokens": [{"content": "<s>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true}, {"content": "</s>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true}, {"content": "<s>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true}, {"content": "</s>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true}, {"content": "<s>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true}, {"content": "</s>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true}, {"content": "<s>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true}, {"content": "</s>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true}]}
training_args.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:84558b59d9045c9313d6d29bd2d8508fcf073db25ca405a23758338804be12ea
3
  size 3055
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:47fe3eeb50c004fe4d7cd558592801961858fa8699c79d6f495be25fb189bdd3
3
  size 3055
wandb/debug-internal.log CHANGED
@@ -1 +1 @@
1
- run-20220206_003420-34rq7wm8/logs/debug-internal.log
 
1
+ run-20220206_130008-2e07dm9k/logs/debug-internal.log
wandb/debug.log CHANGED
@@ -1 +1 @@
1
- run-20220206_003420-34rq7wm8/logs/debug.log
 
1
+ run-20220206_130008-2e07dm9k/logs/debug.log
wandb/latest-run CHANGED
@@ -1 +1 @@
1
- run-20220206_003420-34rq7wm8
 
1
+ run-20220206_130008-2e07dm9k
wandb/run-20220206_130008-2e07dm9k/files/code/run_speech_recognition_ctc.py ADDED
@@ -0,0 +1,792 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python
2
+ # coding=utf-8
3
+ # Copyright 2021 The HuggingFace Inc. team. All rights reserved.
4
+ #
5
+ # Licensed under the Apache License, Version 2.0 (the "License");
6
+ # you may not use this file except in compliance with the License.
7
+ # You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing, software
12
+ # distributed under the License is distributed on an "AS IS" BASIS,
13
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
+ # See the License for the specific language governing permissions and
15
+
16
+ """ Fine-tuning a 🤗 Transformers CTC model for automatic speech recognition"""
17
+
18
+ import functools
19
+ import json
20
+ import logging
21
+ import os
22
+ import re
23
+ import sys
24
+ import warnings
25
+ from dataclasses import dataclass, field
26
+ from typing import Dict, List, Optional, Union
27
+
28
+ import datasets
29
+ import numpy as np
30
+ import torch
31
+ from datasets import DatasetDict, load_dataset, load_metric
32
+
33
+ import transformers
34
+ from transformers import (
35
+ AutoConfig,
36
+ AutoFeatureExtractor,
37
+ AutoModelForCTC,
38
+ AutoProcessor,
39
+ AutoTokenizer,
40
+ HfArgumentParser,
41
+ Trainer,
42
+ TrainingArguments,
43
+ Wav2Vec2Processor,
44
+ set_seed,
45
+ )
46
+ from transformers.trainer_utils import get_last_checkpoint, is_main_process
47
+ from transformers.utils import check_min_version
48
+ from transformers.utils.versions import require_version
49
+
50
+
51
+ # Will error if the minimal version of Transformers is not installed. Remove at your own risks.
52
+ check_min_version("4.16.0.dev0")
53
+
54
+ require_version("datasets>=1.13.3", "To fix: pip install -r examples/pytorch/text-classification/requirements.txt")
55
+
56
+
57
+ logger = logging.getLogger(__name__)
58
+
59
+
60
+ def list_field(default=None, metadata=None):
61
+ return field(default_factory=lambda: default, metadata=metadata)
62
+
63
+
64
+ @dataclass
65
+ class ModelArguments:
66
+ """
67
+ Arguments pertaining to which model/config/tokenizer we are going to fine-tune from.
68
+ """
69
+
70
+ model_name_or_path: str = field(
71
+ metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"}
72
+ )
73
+ tokenizer_name_or_path: Optional[str] = field(
74
+ default=None,
75
+ metadata={"help": "Path to pretrained tokenizer or tokenizer identifier from huggingface.co/models"},
76
+ )
77
+ cache_dir: Optional[str] = field(
78
+ default=None,
79
+ metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"},
80
+ )
81
+ freeze_feature_encoder: bool = field(
82
+ default=True, metadata={"help": "Whether to freeze the feature encoder layers of the model."}
83
+ )
84
+ attention_dropout: float = field(
85
+ default=0.0, metadata={"help": "The dropout ratio for the attention probabilities."}
86
+ )
87
+ activation_dropout: float = field(
88
+ default=0.0, metadata={"help": "The dropout ratio for activations inside the fully connected layer."}
89
+ )
90
+ feat_proj_dropout: float = field(default=0.0, metadata={"help": "The dropout ratio for the projected features."})
91
+ hidden_dropout: float = field(
92
+ default=0.0,
93
+ metadata={
94
+ "help": "The dropout probability for all fully connected layers in the embeddings, encoder, and pooler."
95
+ },
96
+ )
97
+ final_dropout: float = field(
98
+ default=0.0,
99
+ metadata={"help": "The dropout probability for the final projection layer."},
100
+ )
101
+ mask_time_prob: float = field(
102
+ default=0.05,
103
+ metadata={
104
+ "help": "Probability of each feature vector along the time axis to be chosen as the start of the vector"
105
+ "span to be masked. Approximately ``mask_time_prob * sequence_length // mask_time_length`` feature"
106
+ "vectors will be masked along the time axis."
107
+ },
108
+ )
109
+ mask_time_length: int = field(
110
+ default=10,
111
+ metadata={"help": "Length of vector span to mask along the time axis."},
112
+ )
113
+ mask_feature_prob: float = field(
114
+ default=0.0,
115
+ metadata={
116
+ "help": "Probability of each feature vector along the feature axis to be chosen as the start of the vector"
117
+ "span to be masked. Approximately ``mask_feature_prob * sequence_length // mask_feature_length`` feature bins will be masked along the time axis."
118
+ },
119
+ )
120
+ mask_feature_length: int = field(
121
+ default=10,
122
+ metadata={"help": "Length of vector span to mask along the feature axis."},
123
+ )
124
+ layerdrop: float = field(default=0.0, metadata={"help": "The LayerDrop probability."})
125
+ ctc_loss_reduction: Optional[str] = field(
126
+ default="mean", metadata={"help": "The way the ctc loss should be reduced. Should be one of 'mean' or 'sum'."}
127
+ )
128
+ ctc_zero_infinity: Optional[bool] = field(
129
+ default=False, metadata={"help": "If True, will try yo aboud the CTC loss goinf to infinity."}
130
+ )
131
+
132
+ @dataclass
133
+ class DataTrainingArguments:
134
+ """
135
+ Arguments pertaining to what data we are going to input our model for training and eval.
136
+
137
+ Using `HfArgumentParser` we can turn this class
138
+ into argparse arguments to be able to specify them on
139
+ the command line.
140
+ """
141
+
142
+ dataset_name: str = field(
143
+ metadata={"help": "The configuration name of the dataset to use (via the datasets library)."}
144
+ )
145
+ dataset_config_name: str = field(
146
+ default=None, metadata={"help": "The configuration name of the dataset to use (via the datasets library)."}
147
+ )
148
+ train_split_name: str = field(
149
+ default="train+validation",
150
+ metadata={
151
+ "help": "The name of the training data set split to use (via the datasets library). Defaults to 'train'"
152
+ },
153
+ )
154
+ eval_split_name: str = field(
155
+ default="test",
156
+ metadata={
157
+ "help": "The name of the training data set split to use (via the datasets library). Defaults to 'train'"
158
+ },
159
+ )
160
+ audio_column_name: str = field(
161
+ default="audio",
162
+ metadata={"help": "The name of the dataset column containing the audio data. Defaults to 'audio'"},
163
+ )
164
+ text_column_name: str = field(
165
+ default="text",
166
+ metadata={"help": "The name of the dataset column containing the text data. Defaults to 'text'"},
167
+ )
168
+ overwrite_cache: bool = field(
169
+ default=False, metadata={"help": "Overwrite the cached preprocessed datasets or not."}
170
+ )
171
+ preprocessing_num_workers: Optional[int] = field(
172
+ default=None,
173
+ metadata={"help": "The number of processes to use for the preprocessing."},
174
+ )
175
+ max_train_samples: Optional[int] = field(
176
+ default=None,
177
+ metadata={
178
+ "help": "For debugging purposes or quicker training, truncate the number of training examples to this "
179
+ "value if set."
180
+ },
181
+ )
182
+ max_eval_samples: Optional[int] = field(
183
+ default=None,
184
+ metadata={
185
+ "help": "For debugging purposes or quicker training, truncate the number of validation examples to this "
186
+ "value if set."
187
+ },
188
+ )
189
+ chars_to_ignore: Optional[List[str]] = list_field(
190
+ default=None,
191
+ metadata={"help": "A list of characters to remove from the transcripts."},
192
+ )
193
+ eval_metrics: List[str] = list_field(
194
+ default=["wer"],
195
+ metadata={"help": "A list of metrics the model should be evaluated on. E.g. `'wer cer'`"},
196
+ )
197
+ max_duration_in_seconds: float = field(
198
+ default=20.0,
199
+ metadata={
200
+ "help": "Filter audio files that are longer than `max_duration_in_seconds` seconds to 'max_duration_in_seconds`"
201
+ },
202
+ )
203
+ min_duration_in_seconds: float = field(
204
+ default=0.0, metadata={"help": "Filter audio files that are shorter than `min_duration_in_seconds` seconds"}
205
+ )
206
+ preprocessing_only: bool = field(
207
+ default=False,
208
+ metadata={
209
+ "help": "Whether to only do data preprocessing and skip training. "
210
+ "This is especially useful when data preprocessing errors out in distributed training due to timeout. "
211
+ "In this case, one should run the preprocessing in a non-distributed setup with `preprocessing_only=True` "
212
+ "so that the cached datasets can consequently be loaded in distributed training"
213
+ },
214
+ )
215
+ use_auth_token: bool = field(
216
+ default=False,
217
+ metadata={
218
+ "help": "If :obj:`True`, will use the token generated when running"
219
+ ":obj:`transformers-cli login` as HTTP bearer authorization for remote files."
220
+ },
221
+ )
222
+ unk_token: str = field(
223
+ default="[UNK]",
224
+ metadata={"help": "The unk token for the tokenizer"},
225
+ )
226
+ pad_token: str = field(
227
+ default="[PAD]",
228
+ metadata={"help": "The padding token for the tokenizer"},
229
+ )
230
+ word_delimiter_token: str = field(
231
+ default="|",
232
+ metadata={"help": "The word delimiter token for the tokenizer"},
233
+ )
234
+ phoneme_language: Optional[str] = field(
235
+ default=None,
236
+ metadata={
237
+ "help": "The target language that should be used be"
238
+ " passed to the tokenizer for tokenization. Note that"
239
+ " this is only relevant if the model classifies the"
240
+ " input audio to a sequence of phoneme sequences."
241
+ },
242
+ )
243
+
244
+
245
+ @dataclass
246
+ class DataCollatorCTCWithPadding:
247
+ """
248
+ Data collator that will dynamically pad the inputs received.
249
+ Args:
250
+ processor (:class:`~transformers.AutoProcessor`)
251
+ The processor used for proccessing the data.
252
+ padding (:obj:`bool`, :obj:`str` or :class:`~transformers.tokenization_utils_base.PaddingStrategy`, `optional`, defaults to :obj:`True`):
253
+ Select a strategy to pad the returned sequences (according to the model's padding side and padding index)
254
+ among:
255
+ * :obj:`True` or :obj:`'longest'`: Pad to the longest sequence in the batch (or no padding if only a single
256
+ sequence if provided).
257
+ * :obj:`'max_length'`: Pad to a maximum length specified with the argument :obj:`max_length` or to the
258
+ maximum acceptable input length for the model if that argument is not provided.
259
+ * :obj:`False` or :obj:`'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of
260
+ different lengths).
261
+ max_length (:obj:`int`, `optional`):
262
+ Maximum length of the ``input_values`` of the returned list and optionally padding length (see above).
263
+ max_length_labels (:obj:`int`, `optional`):
264
+ Maximum length of the ``labels`` returned list and optionally padding length (see above).
265
+ pad_to_multiple_of (:obj:`int`, `optional`):
266
+ If set will pad the sequence to a multiple of the provided value.
267
+ This is especially useful to enable the use of Tensor Cores on NVIDIA hardware with compute capability >=
268
+ 7.5 (Volta).
269
+ """
270
+
271
+ processor: AutoProcessor
272
+ padding: Union[bool, str] = "longest"
273
+ pad_to_multiple_of: Optional[int] = None
274
+ pad_to_multiple_of_labels: Optional[int] = None
275
+
276
+ def __call__(self, features: List[Dict[str, Union[List[int], torch.Tensor]]]) -> Dict[str, torch.Tensor]:
277
+ # split inputs and labels since they have to be of different lenghts and need
278
+ # different padding methods
279
+ input_features = [{"input_values": feature["input_values"]} for feature in features]
280
+ label_features = [{"input_ids": feature["labels"]} for feature in features]
281
+
282
+ batch = self.processor.pad(
283
+ input_features,
284
+ padding=self.padding,
285
+ pad_to_multiple_of=self.pad_to_multiple_of,
286
+ return_tensors="pt",
287
+ )
288
+
289
+ with self.processor.as_target_processor():
290
+ labels_batch = self.processor.pad(
291
+ label_features,
292
+ padding=self.padding,
293
+ pad_to_multiple_of=self.pad_to_multiple_of_labels,
294
+ return_tensors="pt",
295
+ )
296
+
297
+ # replace padding with -100 to ignore loss correctly
298
+ labels = labels_batch["input_ids"].masked_fill(labels_batch.attention_mask.ne(1), -100)
299
+
300
+ batch["labels"] = labels
301
+
302
+ return batch
303
+
304
+
305
+ def create_vocabulary_from_data(
306
+ datasets: DatasetDict,
307
+ word_delimiter_token: Optional[str] = None,
308
+ unk_token: Optional[str] = None,
309
+ pad_token: Optional[str] = None,
310
+ ):
311
+ # Given training and test labels create vocabulary
312
+ def extract_all_chars(batch):
313
+ all_text = " ".join(batch["target_text"])
314
+ vocab = list(set(all_text))
315
+ return {"vocab": [vocab], "all_text": [all_text]}
316
+
317
+ vocabs = datasets.map(
318
+ extract_all_chars,
319
+ batched=True,
320
+ batch_size=-1,
321
+ keep_in_memory=True,
322
+ remove_columns=datasets["train"].column_names,
323
+ )
324
+
325
+ # take union of all unique characters in each dataset
326
+ vocab_set = functools.reduce(
327
+ lambda vocab_1, vocab_2: set(vocab_1["vocab"][0]) | set(vocab_2["vocab"][0]), vocabs.values()
328
+ )
329
+
330
+ vocab_dict = {v: k for k, v in enumerate(sorted(list(vocab_set)))}
331
+
332
+ # replace white space with delimiter token
333
+ if word_delimiter_token is not None:
334
+ vocab_dict[word_delimiter_token] = vocab_dict[" "]
335
+ del vocab_dict[" "]
336
+
337
+ # add unk and pad token
338
+ if unk_token is not None:
339
+ vocab_dict[unk_token] = len(vocab_dict)
340
+
341
+ if pad_token is not None:
342
+ vocab_dict[pad_token] = len(vocab_dict)
343
+
344
+ return vocab_dict
345
+
346
+
347
+ def main():
348
+ # See all possible arguments in src/transformers/training_args.py
349
+ # or by passing the --help flag to this script.
350
+ # We now keep distinct sets of args, for a cleaner separation of concerns.
351
+
352
+ parser = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments))
353
+ if len(sys.argv) == 2 and sys.argv[1].endswith(".json"):
354
+ # If we pass only one argument to the script and it's the path to a json file,
355
+ # let's parse it to get our arguments.
356
+ model_args, data_args, training_args = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1]))
357
+ else:
358
+ model_args, data_args, training_args = parser.parse_args_into_dataclasses()
359
+
360
+ # Detecting last checkpoint.
361
+ last_checkpoint = None
362
+ if os.path.isdir(training_args.output_dir) and training_args.do_train and not training_args.overwrite_output_dir:
363
+ last_checkpoint = get_last_checkpoint(training_args.output_dir)
364
+ if last_checkpoint is None and len(os.listdir(training_args.output_dir)) > 0:
365
+ raise ValueError(
366
+ f"Output directory ({training_args.output_dir}) already exists and is not empty. "
367
+ "Use --overwrite_output_dir to overcome."
368
+ )
369
+ elif last_checkpoint is not None:
370
+ logger.info(
371
+ f"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change "
372
+ "the `--output_dir` or add `--overwrite_output_dir` to train from scratch."
373
+ )
374
+
375
+ # Setup logging
376
+ logging.basicConfig(
377
+ format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
378
+ datefmt="%m/%d/%Y %H:%M:%S",
379
+ handlers=[logging.StreamHandler(sys.stdout)],
380
+ )
381
+ logger.setLevel(logging.INFO if is_main_process(training_args.local_rank) else logging.WARN)
382
+
383
+ # Log on each process the small summary:
384
+ logger.warning(
385
+ f"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"
386
+ f"distributed training: {bool(training_args.local_rank != -1)}, 16-bits training: {training_args.fp16}"
387
+ )
388
+ # Set the verbosity to info of the Transformers logger (on main process only):
389
+ if is_main_process(training_args.local_rank):
390
+ transformers.utils.logging.set_verbosity_info()
391
+ logger.info("Training/evaluation parameters %s", training_args)
392
+
393
+ # Set seed before initializing model.
394
+ set_seed(training_args.seed)
395
+
396
+ # Pre-processing dataset
397
+ import re
398
+ def filter_numeric(entry):
399
+ return (
400
+ "0" not in entry["text"]
401
+ and "1" not in entry["text"]
402
+ and "2" not in entry["text"]
403
+ and "3" not in entry["text"]
404
+ and "4" not in entry["text"]
405
+ and "5" not in entry["text"]
406
+ and "6" not in entry["text"]
407
+ and "7" not in entry["text"]
408
+ and "8" not in entry["text"]
409
+ and "9" not in entry["text"]
410
+ )
411
+
412
+ def filter_inaudible(entry):
413
+ return not re.search("\d|<inaudible>", entry["text"], flags=re.IGNORECASE)
414
+
415
+ def filter_nynorsk(entry):
416
+ return re.search("nb-no", entry["sentence_language_code"], flags=re.IGNORECASE)
417
+
418
+ def filter_tooshort(entry):
419
+ #print(f"The audio sample ({entry["audio"]["path"]}) is too small, and has been omitted. "
420
+ return (len(entry["text"]) <= len(entry["audio"]["array"]) // 320) and (len(entry["text"].strip()) >= 3)
421
+
422
+ def map_dataset(entry):
423
+ batch = {"text": entry["text"].lower()}
424
+ batch["text"] = re.sub('[áàâ]', 'a', batch["text"])
425
+ batch["text"] = re.sub('[ä]', 'æ', batch["text"])
426
+ batch["text"] = re.sub('[éèëê]', 'e', batch["text"])
427
+ batch["text"] = re.sub('[íìïî]', 'i', batch["text"])
428
+ batch["text"] = re.sub('[óòöô]', 'o', batch["text"])
429
+ batch["text"] = re.sub('[ö]', 'ø', batch["text"])
430
+ batch["text"] = re.sub('[ç]', 'c', batch["text"])
431
+ batch["text"] = re.sub('[úùüû]', 'u', batch["text"])
432
+ batch["text"] = re.sub('\s', ' ', batch["text"])
433
+ batch["text"] = re.sub('<ee>', 'eee', batch["text"])
434
+ batch["text"] = re.sub('<qq>', 'qqq', batch["text"])
435
+ batch["text"] = re.sub('<mm>', 'mmm', batch["text"])
436
+ # batch["text"] = re.sub('<inaudible>', '?', batch["text"])
437
+ if "<" in batch["text"]:
438
+ raise ValueError(batch["text"])
439
+ return batch
440
+
441
+ # 1. First, let's load the dataset
442
+ raw_datasets = DatasetDict()
443
+
444
+ if training_args.do_train:
445
+ raw_datasets["train"] = load_dataset(
446
+ data_args.dataset_name,
447
+ data_args.dataset_config_name,
448
+ split=data_args.train_split_name,
449
+ use_auth_token=data_args.use_auth_token,
450
+ ).shuffle()
451
+ raw_datasets["train"] = raw_datasets["train"].filter(filter_numeric).filter(filter_inaudible).filter(filter_nynorsk).filter(filter_tooshort)
452
+ raw_datasets["train"] = raw_datasets["train"].map(map_dataset)
453
+
454
+ if data_args.audio_column_name not in raw_datasets["train"].column_names:
455
+ raise ValueError(
456
+ f"--audio_column_name '{data_args.audio_column_name}' not found in dataset '{data_args.dataset_name}'. "
457
+ "Make sure to set `--audio_column_name` to the correct audio column - one of "
458
+ f"{', '.join(raw_datasets['train'].column_names)}."
459
+ )
460
+
461
+ if data_args.text_column_name not in raw_datasets["train"].column_names:
462
+ raise ValueError(
463
+ f"--text_column_name {data_args.text_column_name} not found in dataset '{data_args.dataset_name}'. "
464
+ "Make sure to set `--text_column_name` to the correct text column - one of "
465
+ f"{', '.join(raw_datasets['train'].column_names)}."
466
+ )
467
+
468
+ if data_args.max_train_samples is not None:
469
+ raw_datasets["train"] = raw_datasets["train"].select(range(data_args.max_train_samples))
470
+
471
+ if training_args.do_eval:
472
+ raw_datasets["eval"] = load_dataset(
473
+ data_args.dataset_name,
474
+ data_args.dataset_config_name,
475
+ split=data_args.eval_split_name,
476
+ use_auth_token=data_args.use_auth_token,
477
+ ).shuffle()
478
+ raw_datasets["eval"] = raw_datasets["eval"].filter(filter_numeric).filter(filter_inaudible).filter(filter_nynorsk).filter(filter_tooshort)
479
+ raw_datasets["eval"] = raw_datasets["eval"].map(map_dataset)
480
+
481
+ if data_args.max_eval_samples is not None:
482
+ raw_datasets["eval"] = raw_datasets["eval"].select(range(data_args.max_eval_samples))
483
+
484
+
485
+ # 2. We remove some special characters from the datasets
486
+ # that make training complicated and do not help in transcribing the speech
487
+ # E.g. characters, such as `,` and `.` do not really have an acoustic characteristic
488
+ # that could be easily picked up by the model
489
+ #chars_to_ignore_regex = (
490
+ # f'[{"".join(data_args.chars_to_ignore)}]' if data_args.chars_to_ignore is not None else None
491
+ #)
492
+ chars_to_ignore_regex = '[\,\?\.\!\-\;\:\"\“\%\‘\”\�\'\–\_\\\+\#\/]'
493
+
494
+ text_column_name = data_args.text_column_name
495
+
496
+ def remove_special_characters(batch):
497
+ if chars_to_ignore_regex is not None:
498
+ batch["target_text"] = re.sub(chars_to_ignore_regex, "", batch[text_column_name]).lower() + " "
499
+ else:
500
+ batch["target_text"] = batch[text_column_name].lower() + " "
501
+ return batch
502
+
503
+ with training_args.main_process_first(desc="dataset map special characters removal"):
504
+ raw_datasets = raw_datasets.map(
505
+ remove_special_characters,
506
+ remove_columns=[text_column_name],
507
+ desc="remove special characters from datasets",
508
+ )
509
+
510
+ # save special tokens for tokenizer
511
+ word_delimiter_token = data_args.word_delimiter_token
512
+ unk_token = data_args.unk_token
513
+ pad_token = data_args.pad_token
514
+
515
+ # 3. Next, let's load the config as we might need it to create
516
+ # the tokenizer
517
+ # load config
518
+ config = AutoConfig.from_pretrained(
519
+ model_args.model_name_or_path, cache_dir=model_args.cache_dir, use_auth_token=data_args.use_auth_token
520
+ )
521
+
522
+ # 4. Next, if no tokenizer file is defined,
523
+ # we create the vocabulary of the model by extracting all unique characters from
524
+ # the training and evaluation datasets
525
+ # We need to make sure that only first rank saves vocabulary
526
+ # make sure all processes wait until vocab is created
527
+ tokenizer_name_or_path = model_args.tokenizer_name_or_path
528
+ tokenizer_kwargs = {}
529
+ if tokenizer_name_or_path is None:
530
+ # save vocab in training output dir
531
+ tokenizer_name_or_path = training_args.output_dir
532
+
533
+ vocab_file = os.path.join(tokenizer_name_or_path, "vocab.json")
534
+
535
+ with training_args.main_process_first():
536
+ if training_args.overwrite_output_dir and os.path.isfile(vocab_file):
537
+ os.remove(vocab_file)
538
+
539
+ with training_args.main_process_first(desc="dataset map vocabulary creation"):
540
+ if not os.path.isfile(vocab_file):
541
+ os.makedirs(tokenizer_name_or_path, exist_ok=True)
542
+ vocab_dict = create_vocabulary_from_data(
543
+ raw_datasets,
544
+ word_delimiter_token=word_delimiter_token,
545
+ unk_token=unk_token,
546
+ pad_token=pad_token,
547
+ )
548
+
549
+ # save vocab dict to be loaded into tokenizer
550
+ with open(vocab_file, "w") as file:
551
+ json.dump(vocab_dict, file)
552
+
553
+ # if tokenizer has just been created
554
+ # it is defined by `tokenizer_class` if present in config else by `model_type`
555
+ tokenizer_kwargs = {
556
+ "config": config if config.tokenizer_class is not None else None,
557
+ "tokenizer_type": config.model_type if config.tokenizer_class is None else None,
558
+ "unk_token": unk_token,
559
+ "pad_token": pad_token,
560
+ "word_delimiter_token": word_delimiter_token,
561
+ }
562
+
563
+ # 5. Now we can instantiate the feature extractor, tokenizer and model
564
+ # Note for distributed training, the .from_pretrained methods guarantee that only
565
+ # one local process can concurrently download model & vocab.
566
+
567
+ # load feature_extractor and tokenizer
568
+ tokenizer = AutoTokenizer.from_pretrained(
569
+ tokenizer_name_or_path,
570
+ use_auth_token=data_args.use_auth_token,
571
+ **tokenizer_kwargs,
572
+ )
573
+ feature_extractor = AutoFeatureExtractor.from_pretrained(
574
+ model_args.model_name_or_path, cache_dir=model_args.cache_dir, use_auth_token=data_args.use_auth_token
575
+ )
576
+
577
+ # adapt config
578
+ config.update(
579
+ {
580
+ "feat_proj_dropout": model_args.feat_proj_dropout,
581
+ "attention_dropout": model_args.attention_dropout,
582
+ "hidden_dropout": model_args.hidden_dropout,
583
+ "final_dropout": model_args.final_dropout,
584
+ "mask_time_prob": model_args.mask_time_prob,
585
+ "mask_time_length": model_args.mask_time_length,
586
+ "mask_feature_prob": model_args.mask_feature_prob,
587
+ "mask_feature_length": model_args.mask_feature_length,
588
+ "gradient_checkpointing": training_args.gradient_checkpointing,
589
+ "layerdrop": model_args.layerdrop,
590
+ "ctc_loss_reduction": model_args.ctc_loss_reduction,
591
+ "ctc_zero_infinity": model_args.ctc_zero_infinity,
592
+ "pad_token_id": tokenizer.pad_token_id,
593
+ "vocab_size": len(tokenizer),
594
+ "activation_dropout": model_args.activation_dropout,
595
+ }
596
+ )
597
+
598
+ # create model
599
+ model = AutoModelForCTC.from_pretrained(
600
+ model_args.model_name_or_path,
601
+ cache_dir=model_args.cache_dir,
602
+ config=config,
603
+ use_auth_token=data_args.use_auth_token,
604
+ )
605
+
606
+ # freeze encoder
607
+ if model_args.freeze_feature_encoder:
608
+ model.freeze_feature_encoder()
609
+
610
+ # 6. Now we preprocess the datasets including loading the audio, resampling and normalization
611
+ # Thankfully, `datasets` takes care of automatically loading and resampling the audio,
612
+ # so that we just need to set the correct target sampling rate and normalize the input
613
+ # via the `feature_extractor`
614
+
615
+ # make sure that dataset decodes audio with correct sampling rate
616
+ dataset_sampling_rate = next(iter(raw_datasets.values())).features[data_args.audio_column_name].sampling_rate
617
+ if dataset_sampling_rate != feature_extractor.sampling_rate:
618
+ raw_datasets = raw_datasets.cast_column(
619
+ data_args.audio_column_name, datasets.features.Audio(sampling_rate=feature_extractor.sampling_rate)
620
+ )
621
+
622
+ # derive max & min input length for sample rate & max duration
623
+ max_input_length = data_args.max_duration_in_seconds * feature_extractor.sampling_rate
624
+ min_input_length = data_args.min_duration_in_seconds * feature_extractor.sampling_rate
625
+ audio_column_name = data_args.audio_column_name
626
+ num_workers = data_args.preprocessing_num_workers
627
+
628
+ # `phoneme_language` is only relevant if the model is fine-tuned on phoneme classification
629
+ phoneme_language = data_args.phoneme_language
630
+
631
+ # Preprocessing the datasets.
632
+ # We need to read the audio files as arrays and tokenize the targets.
633
+ def prepare_dataset(batch):
634
+ # load audio
635
+ sample = batch[audio_column_name]
636
+
637
+ inputs = feature_extractor(sample["array"], sampling_rate=sample["sampling_rate"])
638
+ batch["input_values"] = inputs.input_values[0]
639
+ batch["input_length"] = len(batch["input_values"])
640
+
641
+ # encode targets
642
+ additional_kwargs = {}
643
+ if phoneme_language is not None:
644
+ additional_kwargs["phonemizer_lang"] = phoneme_language
645
+
646
+ batch["labels"] = tokenizer(batch["target_text"], **additional_kwargs).input_ids
647
+ return batch
648
+
649
+ with training_args.main_process_first(desc="dataset map preprocessing"):
650
+ vectorized_datasets = raw_datasets.map(
651
+ prepare_dataset,
652
+ remove_columns=next(iter(raw_datasets.values())).column_names,
653
+ num_proc=num_workers,
654
+ desc="preprocess datasets",
655
+ )
656
+
657
+ def is_audio_in_length_range(length):
658
+ return length > min_input_length and length < max_input_length
659
+
660
+ # filter data that is shorter than min_input_length
661
+ vectorized_datasets = vectorized_datasets.filter(
662
+ is_audio_in_length_range,
663
+ num_proc=num_workers,
664
+ input_columns=["input_length"],
665
+ )
666
+
667
+ # 7. Next, we can prepare the training.
668
+ # Let's use word error rate (WER) as our evaluation metric,
669
+ # instantiate a data collator and the trainer
670
+
671
+ # Define evaluation metrics during training, *i.e.* word error rate, character error rate
672
+ eval_metrics = {metric: load_metric(metric) for metric in data_args.eval_metrics}
673
+
674
+ # for large datasets it is advised to run the preprocessing on a
675
+ # single machine first with ``args.preprocessing_only`` since there will mostly likely
676
+ # be a timeout when running the script in distributed mode.
677
+ # In a second step ``args.preprocessing_only`` can then be set to `False` to load the
678
+ # cached dataset
679
+ if data_args.preprocessing_only:
680
+ logger.info(f"Data preprocessing finished. Files cached at {vectorized_datasets.cache_files}")
681
+ return
682
+
683
+ def compute_metrics(pred):
684
+ pred_logits = pred.predictions
685
+ pred_ids = np.argmax(pred_logits, axis=-1)
686
+
687
+ pred.label_ids[pred.label_ids == -100] = tokenizer.pad_token_id
688
+
689
+ pred_str = tokenizer.batch_decode(pred_ids)
690
+ # we do not want to group tokens when computing the metrics
691
+ label_str = tokenizer.batch_decode(pred.label_ids, group_tokens=False)
692
+
693
+ metrics = {k: v.compute(predictions=pred_str, references=label_str) for k, v in eval_metrics.items()}
694
+
695
+ return metrics
696
+
697
+ # Now save everything to be able to create a single processor later
698
+ if is_main_process(training_args.local_rank):
699
+ # save feature extractor, tokenizer and config
700
+ feature_extractor.save_pretrained(training_args.output_dir)
701
+ tokenizer.save_pretrained(training_args.output_dir)
702
+ config.save_pretrained(training_args.output_dir)
703
+
704
+ try:
705
+ processor = AutoProcessor.from_pretrained(training_args.output_dir)
706
+ except (OSError, KeyError):
707
+ warnings.warn(
708
+ "Loading a processor from a feature extractor config that does not"
709
+ " include a `processor_class` attribute is deprecated and will be removed in v5. Please add the following "
710
+ " attribute to your `preprocessor_config.json` file to suppress this warning: "
711
+ " `'processor_class': 'Wav2Vec2Processor'`",
712
+ FutureWarning,
713
+ )
714
+ processor = Wav2Vec2Processor.from_pretrained(training_args.output_dir)
715
+
716
+ # Instantiate custom data collator
717
+ data_collator = DataCollatorCTCWithPadding(processor=processor)
718
+
719
+ # Initialize Trainer
720
+ trainer = Trainer(
721
+ model=model,
722
+ data_collator=data_collator,
723
+ args=training_args,
724
+ compute_metrics=compute_metrics,
725
+ train_dataset=vectorized_datasets["train"] if training_args.do_train else None,
726
+ eval_dataset=vectorized_datasets["eval"] if training_args.do_eval else None,
727
+ tokenizer=feature_extractor,
728
+ )
729
+
730
+ # 8. Finally, we can start training
731
+
732
+ # Training
733
+ if training_args.do_train:
734
+
735
+ # use last checkpoint if exist
736
+ if last_checkpoint is not None:
737
+ checkpoint = last_checkpoint
738
+ elif os.path.isdir(model_args.model_name_or_path):
739
+ checkpoint = model_args.model_name_or_path
740
+ else:
741
+ checkpoint = None
742
+
743
+ train_result = trainer.train(resume_from_checkpoint=checkpoint)
744
+ trainer.save_model()
745
+
746
+ metrics = train_result.metrics
747
+ max_train_samples = (
748
+ data_args.max_train_samples
749
+ if data_args.max_train_samples is not None
750
+ else len(vectorized_datasets["train"])
751
+ )
752
+ metrics["train_samples"] = min(max_train_samples, len(vectorized_datasets["train"]))
753
+
754
+ trainer.log_metrics("train", metrics)
755
+ trainer.save_metrics("train", metrics)
756
+ trainer.save_state()
757
+
758
+ # Evaluation
759
+ results = {}
760
+ if training_args.do_eval:
761
+ logger.info("*** Evaluate ***")
762
+ metrics = trainer.evaluate()
763
+ max_eval_samples = (
764
+ data_args.max_eval_samples if data_args.max_eval_samples is not None else len(vectorized_datasets["eval"])
765
+ )
766
+ metrics["eval_samples"] = min(max_eval_samples, len(vectorized_datasets["eval"]))
767
+
768
+ trainer.log_metrics("eval", metrics)
769
+ trainer.save_metrics("eval", metrics)
770
+
771
+ # Write model card and (optionally) push to hub
772
+ config_name = data_args.dataset_config_name if data_args.dataset_config_name is not None else "na"
773
+ kwargs = {
774
+ "finetuned_from": model_args.model_name_or_path,
775
+ "tasks": "speech-recognition",
776
+ "tags": ["automatic-speech-recognition", data_args.dataset_name],
777
+ "dataset_args": f"Config: {config_name}, Training split: {data_args.train_split_name}, Eval split: {data_args.eval_split_name}",
778
+ "dataset": f"{data_args.dataset_name.upper()} - {config_name.upper()}",
779
+ }
780
+ if "common_voice" in data_args.dataset_name:
781
+ kwargs["language"] = config_name
782
+
783
+ if training_args.push_to_hub:
784
+ trainer.push_to_hub(**kwargs)
785
+ else:
786
+ trainer.create_model_card(**kwargs)
787
+
788
+ return results
789
+
790
+
791
+ if __name__ == "__main__":
792
+ main()
wandb/run-20220206_130008-2e07dm9k/files/config.yaml ADDED
@@ -0,0 +1,699 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ wandb_version: 1
2
+
3
+ _n_gpu:
4
+ desc: null
5
+ value: 2
6
+ _name_or_path:
7
+ desc: null
8
+ value: ./
9
+ _wandb:
10
+ desc: null
11
+ value:
12
+ cli_version: 0.12.9
13
+ code_path: code/run_speech_recognition_ctc.py
14
+ framework: huggingface
15
+ huggingface_version: 4.17.0.dev0
16
+ is_jupyter_run: false
17
+ is_kaggle_kernel: false
18
+ m:
19
+ - 1: train/global_step
20
+ 6:
21
+ - 3
22
+ - 1: train/train_runtime
23
+ 5: 1
24
+ 6:
25
+ - 1
26
+ - 1: train/train_samples_per_second
27
+ 5: 1
28
+ 6:
29
+ - 1
30
+ - 1: train/train_steps_per_second
31
+ 5: 1
32
+ 6:
33
+ - 1
34
+ - 1: train/total_flos
35
+ 5: 1
36
+ 6:
37
+ - 1
38
+ - 1: train/train_loss
39
+ 5: 1
40
+ 6:
41
+ - 1
42
+ - 1: train/epoch
43
+ 5: 1
44
+ 6:
45
+ - 1
46
+ python_version: 3.9.7
47
+ start_time: 1644148808
48
+ t:
49
+ 1:
50
+ - 1
51
+ - 2
52
+ - 3
53
+ - 5
54
+ - 11
55
+ 3:
56
+ - 13
57
+ 4: 3.9.7
58
+ 5: 0.12.9
59
+ 6: 4.17.0.dev0
60
+ 8:
61
+ - 5
62
+ activation_dropout:
63
+ desc: null
64
+ value: 0.055
65
+ adafactor:
66
+ desc: null
67
+ value: false
68
+ adam_beta1:
69
+ desc: null
70
+ value: 0.9
71
+ adam_beta2:
72
+ desc: null
73
+ value: 0.999
74
+ adam_epsilon:
75
+ desc: null
76
+ value: 1.0e-08
77
+ adapter_kernel_size:
78
+ desc: null
79
+ value: 3
80
+ adapter_stride:
81
+ desc: null
82
+ value: 2
83
+ add_adapter:
84
+ desc: null
85
+ value: false
86
+ add_cross_attention:
87
+ desc: null
88
+ value: false
89
+ apply_spec_augment:
90
+ desc: null
91
+ value: true
92
+ architectures:
93
+ desc: null
94
+ value:
95
+ - Wav2Vec2ForCTC
96
+ attention_dropout:
97
+ desc: null
98
+ value: 0.094
99
+ bad_words_ids:
100
+ desc: null
101
+ value: null
102
+ bf16:
103
+ desc: null
104
+ value: false
105
+ bf16_full_eval:
106
+ desc: null
107
+ value: false
108
+ bos_token_id:
109
+ desc: null
110
+ value: 1
111
+ chunk_size_feed_forward:
112
+ desc: null
113
+ value: 0
114
+ classifier_proj_size:
115
+ desc: null
116
+ value: 256
117
+ codevector_dim:
118
+ desc: null
119
+ value: 768
120
+ contrastive_logits_temperature:
121
+ desc: null
122
+ value: 0.1
123
+ conv_bias:
124
+ desc: null
125
+ value: true
126
+ conv_dim:
127
+ desc: null
128
+ value:
129
+ - 512
130
+ - 512
131
+ - 512
132
+ - 512
133
+ - 512
134
+ - 512
135
+ - 512
136
+ conv_kernel:
137
+ desc: null
138
+ value:
139
+ - 10
140
+ - 3
141
+ - 3
142
+ - 3
143
+ - 3
144
+ - 2
145
+ - 2
146
+ conv_stride:
147
+ desc: null
148
+ value:
149
+ - 5
150
+ - 2
151
+ - 2
152
+ - 2
153
+ - 2
154
+ - 2
155
+ - 2
156
+ cross_attention_hidden_size:
157
+ desc: null
158
+ value: null
159
+ ctc_loss_reduction:
160
+ desc: null
161
+ value: mean
162
+ ctc_zero_infinity:
163
+ desc: null
164
+ value: true
165
+ dataloader_drop_last:
166
+ desc: null
167
+ value: false
168
+ dataloader_num_workers:
169
+ desc: null
170
+ value: 0
171
+ dataloader_pin_memory:
172
+ desc: null
173
+ value: true
174
+ ddp_bucket_cap_mb:
175
+ desc: null
176
+ value: None
177
+ ddp_find_unused_parameters:
178
+ desc: null
179
+ value: None
180
+ debug:
181
+ desc: null
182
+ value: '[]'
183
+ decoder_start_token_id:
184
+ desc: null
185
+ value: null
186
+ deepspeed:
187
+ desc: null
188
+ value: None
189
+ disable_tqdm:
190
+ desc: null
191
+ value: false
192
+ diversity_loss_weight:
193
+ desc: null
194
+ value: 0.1
195
+ diversity_penalty:
196
+ desc: null
197
+ value: 0.0
198
+ do_eval:
199
+ desc: null
200
+ value: true
201
+ do_predict:
202
+ desc: null
203
+ value: false
204
+ do_sample:
205
+ desc: null
206
+ value: false
207
+ do_stable_layer_norm:
208
+ desc: null
209
+ value: true
210
+ do_train:
211
+ desc: null
212
+ value: true
213
+ early_stopping:
214
+ desc: null
215
+ value: false
216
+ encoder_no_repeat_ngram_size:
217
+ desc: null
218
+ value: 0
219
+ eos_token_id:
220
+ desc: null
221
+ value: 2
222
+ eval_accumulation_steps:
223
+ desc: null
224
+ value: None
225
+ eval_batch_size:
226
+ desc: null
227
+ value: 32
228
+ eval_steps:
229
+ desc: null
230
+ value: 500
231
+ evaluation_strategy:
232
+ desc: null
233
+ value: steps
234
+ feat_extract_activation:
235
+ desc: null
236
+ value: gelu
237
+ feat_extract_dropout:
238
+ desc: null
239
+ value: 0.0
240
+ feat_extract_norm:
241
+ desc: null
242
+ value: layer
243
+ feat_proj_dropout:
244
+ desc: null
245
+ value: 0.04
246
+ feat_quantizer_dropout:
247
+ desc: null
248
+ value: 0.0
249
+ final_dropout:
250
+ desc: null
251
+ value: 0.0
252
+ finetuning_task:
253
+ desc: null
254
+ value: null
255
+ forced_bos_token_id:
256
+ desc: null
257
+ value: null
258
+ forced_eos_token_id:
259
+ desc: null
260
+ value: null
261
+ fp16:
262
+ desc: null
263
+ value: true
264
+ fp16_backend:
265
+ desc: null
266
+ value: auto
267
+ fp16_full_eval:
268
+ desc: null
269
+ value: false
270
+ fp16_opt_level:
271
+ desc: null
272
+ value: O1
273
+ gradient_accumulation_steps:
274
+ desc: null
275
+ value: 2
276
+ gradient_checkpointing:
277
+ desc: null
278
+ value: true
279
+ greater_is_better:
280
+ desc: null
281
+ value: None
282
+ group_by_length:
283
+ desc: null
284
+ value: true
285
+ half_precision_backend:
286
+ desc: null
287
+ value: amp
288
+ hidden_act:
289
+ desc: null
290
+ value: gelu
291
+ hidden_dropout:
292
+ desc: null
293
+ value: 0.047
294
+ hidden_size:
295
+ desc: null
296
+ value: 1024
297
+ hub_model_id:
298
+ desc: null
299
+ value: NbAiLab/wav2vec2-large-voxrex-npsc-bokmaal
300
+ hub_strategy:
301
+ desc: null
302
+ value: every_save
303
+ hub_token:
304
+ desc: null
305
+ value: <HUB_TOKEN>
306
+ id2label:
307
+ desc: null
308
+ value:
309
+ '0': LABEL_0
310
+ '1': LABEL_1
311
+ ignore_data_skip:
312
+ desc: null
313
+ value: false
314
+ initializer_range:
315
+ desc: null
316
+ value: 0.02
317
+ intermediate_size:
318
+ desc: null
319
+ value: 4096
320
+ is_decoder:
321
+ desc: null
322
+ value: false
323
+ is_encoder_decoder:
324
+ desc: null
325
+ value: false
326
+ label2id:
327
+ desc: null
328
+ value:
329
+ LABEL_0: 0
330
+ LABEL_1: 1
331
+ label_names:
332
+ desc: null
333
+ value: None
334
+ label_smoothing_factor:
335
+ desc: null
336
+ value: 0.0
337
+ layer_norm_eps:
338
+ desc: null
339
+ value: 1.0e-05
340
+ layerdrop:
341
+ desc: null
342
+ value: 0.041
343
+ learning_rate:
344
+ desc: null
345
+ value: 8.379967082059723e-06
346
+ length_column_name:
347
+ desc: null
348
+ value: input_length
349
+ length_penalty:
350
+ desc: null
351
+ value: 1.0
352
+ load_best_model_at_end:
353
+ desc: null
354
+ value: false
355
+ local_rank:
356
+ desc: null
357
+ value: -1
358
+ log_level:
359
+ desc: null
360
+ value: -1
361
+ log_level_replica:
362
+ desc: null
363
+ value: -1
364
+ log_on_each_node:
365
+ desc: null
366
+ value: true
367
+ logging_dir:
368
+ desc: null
369
+ value: ./runs/Feb06_12-59-06_dante
370
+ logging_first_step:
371
+ desc: null
372
+ value: false
373
+ logging_nan_inf_filter:
374
+ desc: null
375
+ value: true
376
+ logging_steps:
377
+ desc: null
378
+ value: 100
379
+ logging_strategy:
380
+ desc: null
381
+ value: steps
382
+ lr_scheduler_type:
383
+ desc: null
384
+ value: linear
385
+ mask_channel_length:
386
+ desc: null
387
+ value: 10
388
+ mask_channel_min_space:
389
+ desc: null
390
+ value: 1
391
+ mask_channel_other:
392
+ desc: null
393
+ value: 0.0
394
+ mask_channel_prob:
395
+ desc: null
396
+ value: 0.0
397
+ mask_channel_selection:
398
+ desc: null
399
+ value: static
400
+ mask_feature_length:
401
+ desc: null
402
+ value: 64
403
+ mask_feature_min_masks:
404
+ desc: null
405
+ value: 0
406
+ mask_feature_prob:
407
+ desc: null
408
+ value: 0.25
409
+ mask_time_length:
410
+ desc: null
411
+ value: 10
412
+ mask_time_min_masks:
413
+ desc: null
414
+ value: 2
415
+ mask_time_min_space:
416
+ desc: null
417
+ value: 1
418
+ mask_time_other:
419
+ desc: null
420
+ value: 0.0
421
+ mask_time_prob:
422
+ desc: null
423
+ value: 0.082
424
+ mask_time_selection:
425
+ desc: null
426
+ value: static
427
+ max_grad_norm:
428
+ desc: null
429
+ value: 1.0
430
+ max_length:
431
+ desc: null
432
+ value: 20
433
+ max_steps:
434
+ desc: null
435
+ value: -1
436
+ metric_for_best_model:
437
+ desc: null
438
+ value: None
439
+ min_length:
440
+ desc: null
441
+ value: 0
442
+ model_type:
443
+ desc: null
444
+ value: wav2vec2
445
+ mp_parameters:
446
+ desc: null
447
+ value: ''
448
+ no_cuda:
449
+ desc: null
450
+ value: false
451
+ no_repeat_ngram_size:
452
+ desc: null
453
+ value: 0
454
+ num_adapter_layers:
455
+ desc: null
456
+ value: 3
457
+ num_attention_heads:
458
+ desc: null
459
+ value: 16
460
+ num_beam_groups:
461
+ desc: null
462
+ value: 1
463
+ num_beams:
464
+ desc: null
465
+ value: 1
466
+ num_codevector_groups:
467
+ desc: null
468
+ value: 2
469
+ num_codevectors_per_group:
470
+ desc: null
471
+ value: 320
472
+ num_conv_pos_embedding_groups:
473
+ desc: null
474
+ value: 16
475
+ num_conv_pos_embeddings:
476
+ desc: null
477
+ value: 128
478
+ num_feat_extract_layers:
479
+ desc: null
480
+ value: 7
481
+ num_hidden_layers:
482
+ desc: null
483
+ value: 24
484
+ num_negatives:
485
+ desc: null
486
+ value: 100
487
+ num_return_sequences:
488
+ desc: null
489
+ value: 1
490
+ num_train_epochs:
491
+ desc: null
492
+ value: 0.1
493
+ optim:
494
+ desc: null
495
+ value: adamw_hf
496
+ output_attentions:
497
+ desc: null
498
+ value: false
499
+ output_dir:
500
+ desc: null
501
+ value: ./
502
+ output_hidden_size:
503
+ desc: null
504
+ value: 1024
505
+ output_hidden_states:
506
+ desc: null
507
+ value: false
508
+ output_scores:
509
+ desc: null
510
+ value: false
511
+ overwrite_output_dir:
512
+ desc: null
513
+ value: true
514
+ pad_token_id:
515
+ desc: null
516
+ value: 31
517
+ past_index:
518
+ desc: null
519
+ value: -1
520
+ per_device_eval_batch_size:
521
+ desc: null
522
+ value: 16
523
+ per_device_train_batch_size:
524
+ desc: null
525
+ value: 16
526
+ per_gpu_eval_batch_size:
527
+ desc: null
528
+ value: None
529
+ per_gpu_train_batch_size:
530
+ desc: null
531
+ value: None
532
+ prediction_loss_only:
533
+ desc: null
534
+ value: false
535
+ prefix:
536
+ desc: null
537
+ value: null
538
+ problem_type:
539
+ desc: null
540
+ value: null
541
+ proj_codevector_dim:
542
+ desc: null
543
+ value: 768
544
+ pruned_heads:
545
+ desc: null
546
+ value: {}
547
+ push_to_hub:
548
+ desc: null
549
+ value: true
550
+ push_to_hub_model_id:
551
+ desc: null
552
+ value: None
553
+ push_to_hub_organization:
554
+ desc: null
555
+ value: None
556
+ push_to_hub_token:
557
+ desc: null
558
+ value: <PUSH_TO_HUB_TOKEN>
559
+ remove_invalid_values:
560
+ desc: null
561
+ value: false
562
+ remove_unused_columns:
563
+ desc: null
564
+ value: true
565
+ repetition_penalty:
566
+ desc: null
567
+ value: 1.0
568
+ report_to:
569
+ desc: null
570
+ value: '[''tensorboard'', ''wandb'']'
571
+ resume_from_checkpoint:
572
+ desc: null
573
+ value: None
574
+ return_dict:
575
+ desc: null
576
+ value: true
577
+ return_dict_in_generate:
578
+ desc: null
579
+ value: false
580
+ run_name:
581
+ desc: null
582
+ value: ./
583
+ save_on_each_node:
584
+ desc: null
585
+ value: false
586
+ save_steps:
587
+ desc: null
588
+ value: 500
589
+ save_strategy:
590
+ desc: null
591
+ value: steps
592
+ save_total_limit:
593
+ desc: null
594
+ value: 3
595
+ seed:
596
+ desc: null
597
+ value: 42
598
+ sep_token_id:
599
+ desc: null
600
+ value: null
601
+ sharded_ddp:
602
+ desc: null
603
+ value: '[]'
604
+ skip_memory_metrics:
605
+ desc: null
606
+ value: true
607
+ task_specific_params:
608
+ desc: null
609
+ value: null
610
+ tdnn_dilation:
611
+ desc: null
612
+ value:
613
+ - 1
614
+ - 2
615
+ - 3
616
+ - 1
617
+ - 1
618
+ tdnn_dim:
619
+ desc: null
620
+ value:
621
+ - 512
622
+ - 512
623
+ - 512
624
+ - 512
625
+ - 1500
626
+ tdnn_kernel:
627
+ desc: null
628
+ value:
629
+ - 5
630
+ - 3
631
+ - 3
632
+ - 1
633
+ - 1
634
+ temperature:
635
+ desc: null
636
+ value: 1.0
637
+ tf32:
638
+ desc: null
639
+ value: None
640
+ tie_encoder_decoder:
641
+ desc: null
642
+ value: false
643
+ tie_word_embeddings:
644
+ desc: null
645
+ value: true
646
+ tokenizer_class:
647
+ desc: null
648
+ value: null
649
+ top_k:
650
+ desc: null
651
+ value: 50
652
+ top_p:
653
+ desc: null
654
+ value: 1.0
655
+ torch_dtype:
656
+ desc: null
657
+ value: float32
658
+ torchscript:
659
+ desc: null
660
+ value: false
661
+ tpu_metrics_debug:
662
+ desc: null
663
+ value: false
664
+ tpu_num_cores:
665
+ desc: null
666
+ value: None
667
+ train_batch_size:
668
+ desc: null
669
+ value: 32
670
+ transformers_version:
671
+ desc: null
672
+ value: 4.17.0.dev0
673
+ use_bfloat16:
674
+ desc: null
675
+ value: false
676
+ use_legacy_prediction_loop:
677
+ desc: null
678
+ value: false
679
+ use_weighted_layer_sum:
680
+ desc: null
681
+ value: false
682
+ vocab_size:
683
+ desc: null
684
+ value: 34
685
+ warmup_ratio:
686
+ desc: null
687
+ value: 0.0
688
+ warmup_steps:
689
+ desc: null
690
+ value: 0
691
+ weight_decay:
692
+ desc: null
693
+ value: 0.0
694
+ xpu_backend:
695
+ desc: null
696
+ value: None
697
+ xvector_output_dim:
698
+ desc: null
699
+ value: 512
wandb/run-20220206_130008-2e07dm9k/files/diff.patch ADDED
@@ -0,0 +1,36 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ diff --git a/special_tokens_map.json b/special_tokens_map.json
2
+ index ac9d0f5..59f35a2 100644
3
+ --- a/special_tokens_map.json
4
+ +++ b/special_tokens_map.json
5
+ @@ -1 +1 @@
6
+ -{"bos_token": "<s>", "eos_token": "</s>", "unk_token": "[UNK]", "pad_token": "[PAD]", "additional_special_tokens": [{"content": "<s>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true}, {"content": "</s>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true}, {"content": "<s>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true}, {"content": "</s>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true}, {"content": "<s>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true}, {"content": "</s>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true}]}
7
+
8
+ +{"bos_token": "<s>", "eos_token": "</s>", "unk_token": "[UNK]", "pad_token": "[PAD]", "additional_special_tokens": [{"content": "<s>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true}, {"content": "</s>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true}, {"content": "<s>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true}, {"content": "</s>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true}, {"content": "<s>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true}, {"content": "</s>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true}, {"content": "<s>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true}, {"content": "</s>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true}]}
9
+
10
+ diff --git a/wandb/debug-internal.log b/wandb/debug-internal.log
11
+ index 6427b35..387e9da 120000
12
+ --- a/wandb/debug-internal.log
13
+ +++ b/wandb/debug-internal.log
14
+ @@ -1 +1 @@
15
+ -run-20220206_003420-34rq7wm8/logs/debug-internal.log
16
+
17
+ +run-20220206_130008-2e07dm9k/logs/debug-internal.log
18
+
19
+ diff --git a/wandb/debug.log b/wandb/debug.log
20
+ index f020038..54a71e1 120000
21
+ --- a/wandb/debug.log
22
+ +++ b/wandb/debug.log
23
+ @@ -1 +1 @@
24
+ -run-20220206_003420-34rq7wm8/logs/debug.log
25
+
26
+ +run-20220206_130008-2e07dm9k/logs/debug.log
27
+
28
+ diff --git a/wandb/latest-run b/wandb/latest-run
29
+ index 5911f00..39fcb1d 120000
30
+ --- a/wandb/latest-run
31
+ +++ b/wandb/latest-run
32
+ @@ -1 +1 @@
33
+ -run-20220206_003420-34rq7wm8
34
+
35
+ +run-20220206_130008-2e07dm9k
36
+
wandb/run-20220206_130008-2e07dm9k/files/output.log ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ 0%| | 0/78 [00:00<?, ?it/s]
3
+ {'train_runtime': 12.046, 'train_samples_per_second': 412.128, 'train_steps_per_second': 6.475, 'train_loss': 0.0, 'epoch': 1.5}
4
+ Training completed. Do not forget to share your model on huggingface.co/models =)
5
+ 0%| | 0/78 [00:08<?, ?it/s]
6
+ Skipping the first batches: 0%| | 0/1550 [00:12<?, ?it/s]
7
+ Saving model checkpoint to ./
8
+ Configuration saved in ./config.json
9
+ Model weights saved in ./pytorch_model.bin
10
+ Configuration saved in ./preprocessor_config.json
11
+ Saving model checkpoint to ./
12
+ Configuration saved in ./config.json
13
+ Model weights saved in ./pytorch_model.bin
wandb/run-20220206_130008-2e07dm9k/files/requirements.txt ADDED
@@ -0,0 +1,137 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ absl-py==1.0.0
2
+ aiohttp==3.8.1
3
+ aiosignal==1.2.0
4
+ appdirs==1.4.4
5
+ asttokens==2.0.5
6
+ astunparse==1.6.3
7
+ async-timeout==4.0.2
8
+ attrs==21.4.0
9
+ audioread==2.1.9
10
+ backcall==0.2.0
11
+ black==22.1.0
12
+ cachetools==5.0.0
13
+ certifi==2021.10.8
14
+ cffi==1.15.0
15
+ charset-normalizer==2.0.11
16
+ click==8.0.3
17
+ configparser==5.2.0
18
+ datasets==1.18.4.dev0
19
+ decorator==5.1.1
20
+ deepspeed==0.5.10
21
+ dill==0.3.4
22
+ docker-pycreds==0.4.0
23
+ executing==0.8.2
24
+ fairscale==0.4.5
25
+ filelock==3.4.2
26
+ flatbuffers==2.0
27
+ frozenlist==1.3.0
28
+ fsspec==2022.1.0
29
+ gast==0.4.0
30
+ gitdb==4.0.9
31
+ gitpython==3.1.26
32
+ google-auth-oauthlib==0.4.6
33
+ google-auth==2.6.0
34
+ google-pasta==0.2.0
35
+ grpcio==1.43.0
36
+ h5py==3.6.0
37
+ hjson==3.0.2
38
+ huggingface-hub==0.4.0
39
+ hypothesis==6.36.1
40
+ idna==3.3
41
+ importlib-metadata==4.10.1
42
+ ipython==8.0.1
43
+ jedi==0.18.1
44
+ jiwer==2.3.0
45
+ joblib==1.1.0
46
+ kenlm==0.0.0
47
+ keras-preprocessing==1.1.2
48
+ keras==2.7.0
49
+ libclang==13.0.0
50
+ librosa==0.8.1
51
+ llvmlite==0.38.0
52
+ markdown==3.3.6
53
+ matplotlib-inline==0.1.3
54
+ multidict==6.0.2
55
+ multiprocess==0.70.12.2
56
+ mypy-extensions==0.4.3
57
+ ninja==1.10.2.3
58
+ numba==0.55.1
59
+ numpy==1.21.5
60
+ oauthlib==3.2.0
61
+ opt-einsum==3.3.0
62
+ packaging==21.3
63
+ pandas==1.4.0
64
+ parso==0.8.3
65
+ pathspec==0.9.0
66
+ pathtools==0.1.2
67
+ pexpect==4.8.0
68
+ pickleshare==0.7.5
69
+ pillow==9.0.0
70
+ pip==20.3.4
71
+ pkg-resources==0.0.0
72
+ platformdirs==2.4.1
73
+ pooch==1.6.0
74
+ promise==2.3
75
+ prompt-toolkit==3.0.26
76
+ protobuf==3.19.4
77
+ psutil==5.9.0
78
+ ptyprocess==0.7.0
79
+ pure-eval==0.2.2
80
+ py-cpuinfo==8.0.0
81
+ pyarrow==6.0.1
82
+ pyasn1-modules==0.2.8
83
+ pyasn1==0.4.8
84
+ pycparser==2.21
85
+ pyctcdecode==0.3.0
86
+ pygments==2.11.2
87
+ pygtrie==2.4.2
88
+ pyparsing==3.0.7
89
+ python-dateutil==2.8.2
90
+ python-levenshtein==0.12.2
91
+ pytz==2021.3
92
+ pyyaml==6.0
93
+ regex==2022.1.18
94
+ requests-oauthlib==1.3.1
95
+ requests==2.27.1
96
+ resampy==0.2.2
97
+ rsa==4.8
98
+ sacremoses==0.0.47
99
+ scikit-learn==1.0.2
100
+ scipy==1.7.3
101
+ sentry-sdk==1.5.4
102
+ setuptools==44.1.1
103
+ shortuuid==1.0.8
104
+ six==1.16.0
105
+ smmap==5.0.0
106
+ sortedcontainers==2.4.0
107
+ soundfile==0.10.3.post1
108
+ stack-data==0.1.4
109
+ subprocess32==3.5.4
110
+ tensorboard-data-server==0.6.1
111
+ tensorboard-plugin-wit==1.8.1
112
+ tensorboard==2.8.0
113
+ tensorflow-estimator==2.7.0
114
+ tensorflow-io-gcs-filesystem==0.23.1
115
+ tensorflow==2.7.0
116
+ termcolor==1.1.0
117
+ threadpoolctl==3.1.0
118
+ tokenizers==0.11.4
119
+ tomli==2.0.0
120
+ torch==1.10.2+cu113
121
+ torchaudio==0.10.2+cu113
122
+ torchvision==0.11.3+cu113
123
+ tqdm==4.62.3
124
+ traitlets==5.1.1
125
+ transformers==4.17.0.dev0
126
+ triton==1.0.0
127
+ typing-extensions==4.0.1
128
+ urllib3==1.26.8
129
+ wandb==0.12.9
130
+ wcwidth==0.2.5
131
+ werkzeug==2.0.2
132
+ wheel==0.37.1
133
+ wrapt==1.13.3
134
+ xxhash==2.0.2
135
+ yarl==1.7.2
136
+ yaspin==2.1.0
137
+ zipp==3.7.0
wandb/run-20220206_130008-2e07dm9k/files/wandb-metadata.json ADDED
@@ -0,0 +1,66 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "os": "Linux-5.13.0-27-generic-x86_64-with-glibc2.34",
3
+ "python": "3.9.7",
4
+ "heartbeatAt": "2022-02-06T12:00:11.012496",
5
+ "startedAt": "2022-02-06T12:00:08.458039",
6
+ "docker": null,
7
+ "gpu": "NVIDIA RTX A6000",
8
+ "gpu_count": 2,
9
+ "cpu_count": 96,
10
+ "cuda": null,
11
+ "args": [
12
+ "--dataset_name=NbAiLab/NPSC",
13
+ "--model_name_or_path=./",
14
+ "--hub_model_id=NbAiLab/wav2vec2-large-voxrex-npsc-bokmaal",
15
+ "--dataset_config_name=16K_mp3",
16
+ "--output_dir=./",
17
+ "--overwrite_output_dir",
18
+ "--num_train_epochs=0.1",
19
+ "--per_device_train_batch_size=16",
20
+ "--per_device_eval_batch_size=16",
21
+ "--gradient_accumulation_steps=2",
22
+ "--learning_rate=8.379967082059723e-06",
23
+ "--warmup_steps=0",
24
+ "--length_column_name=input_length",
25
+ "--evaluation_strategy=steps",
26
+ "--text_column_name=text",
27
+ "--save_steps=500",
28
+ "--eval_steps=500",
29
+ "--logging_steps=100",
30
+ "--layerdrop=0.041",
31
+ "--attention_dropout=0.094",
32
+ "--activation_dropout=0.055",
33
+ "--hidden_dropout=0.047",
34
+ "--ctc_zero_infinity",
35
+ "--save_total_limit=3",
36
+ "--freeze_feature_encoder",
37
+ "--feat_proj_dropout=0.04",
38
+ "--mask_time_prob=0.082",
39
+ "--mask_time_length=10",
40
+ "--mask_feature_prob=0.25",
41
+ "--mask_feature_length=64",
42
+ "--gradient_checkpointing",
43
+ "--min_duration_in_seconds=0.5",
44
+ "--max_duration_in_seconds=30.0",
45
+ "--use_auth_token",
46
+ "--seed=42",
47
+ "--fp16",
48
+ "--group_by_length",
49
+ "--do_train",
50
+ "--do_eval",
51
+ "--push_to_hub",
52
+ "--preprocessing_num_workers=32"
53
+ ],
54
+ "state": "running",
55
+ "program": "/mnt/lv_ai_1_dante/javierr/wav2vec2-large-voxrex-npsc-bokmaal/run_speech_recognition_ctc.py",
56
+ "codePath": "run_speech_recognition_ctc.py",
57
+ "git": {
58
+ "remote": "https://huggingface.co/NbAiLab/wav2vec2-large-voxrex-npsc-bokmaal",
59
+ "commit": "5ce554b48411729e5da1f0032a2558301b2a6f47"
60
+ },
61
+ "email": "versae@gmail.com",
62
+ "root": "/mnt/lv_ai_1_dante/javierr/wav2vec2-large-voxrex-npsc-bokmaal",
63
+ "host": "dante",
64
+ "username": "javierr",
65
+ "executable": "/mnt/lv_ai_1_dante/javierr/audio/bin/python"
66
+ }
wandb/run-20220206_130008-2e07dm9k/files/wandb-summary.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"train/train_runtime": 12.046, "train/train_samples_per_second": 412.128, "train/train_steps_per_second": 6.475, "train/total_flos": 1.6280526756920635e+19, "train/train_loss": 0.0, "train/epoch": 1.5, "train/global_step": 2327, "_runtime": 11, "_timestamp": 1644148819, "_step": 0}
wandb/run-20220206_130008-2e07dm9k/logs/debug-internal.log ADDED
@@ -0,0 +1,63 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ 2022-02-06 13:00:09,306 INFO MainThread:2791845 [internal.py:wandb_internal():87] W&B internal server running at pid: 2791845, started at: 2022-02-06 13:00:09.306643
2
+ 2022-02-06 13:00:09,309 DEBUG HandlerThread:2791845 [handler.py:handle_request():130] handle_request: check_version
3
+ 2022-02-06 13:00:09,309 INFO WriterThread:2791845 [datastore.py:open_for_write():77] open: /mnt/lv_ai_1_dante/javierr/wav2vec2-large-voxrex-npsc-bokmaal/wandb/run-20220206_130008-2e07dm9k/run-2e07dm9k.wandb
4
+ 2022-02-06 13:00:09,311 DEBUG SenderThread:2791845 [sender.py:send():234] send: header
5
+ 2022-02-06 13:00:09,311 DEBUG SenderThread:2791845 [sender.py:send_request():248] send_request: check_version
6
+ 2022-02-06 13:00:09,449 DEBUG SenderThread:2791845 [sender.py:send():234] send: run
7
+ 2022-02-06 13:00:09,737 INFO SenderThread:2791845 [dir_watcher.py:__init__():169] watching files in: /mnt/lv_ai_1_dante/javierr/wav2vec2-large-voxrex-npsc-bokmaal/wandb/run-20220206_130008-2e07dm9k/files
8
+ 2022-02-06 13:00:09,737 INFO SenderThread:2791845 [sender.py:_start_run_threads():804] run started: 2e07dm9k with start time 1644148808
9
+ 2022-02-06 13:00:09,737 DEBUG SenderThread:2791845 [sender.py:send():234] send: summary
10
+ 2022-02-06 13:00:09,737 INFO SenderThread:2791845 [sender.py:_save_file():939] saving file wandb-summary.json with policy end
11
+ 2022-02-06 13:00:09,739 DEBUG HandlerThread:2791845 [handler.py:handle_request():130] handle_request: run_start
12
+ 2022-02-06 13:00:10,740 INFO Thread-8 :2791845 [dir_watcher.py:_on_file_created():217] file/dir created: /mnt/lv_ai_1_dante/javierr/wav2vec2-large-voxrex-npsc-bokmaal/wandb/run-20220206_130008-2e07dm9k/files/wandb-summary.json
13
+ 2022-02-06 13:00:11,012 DEBUG HandlerThread:2791845 [meta.py:__init__():40] meta init
14
+ 2022-02-06 13:00:11,012 DEBUG HandlerThread:2791845 [meta.py:__init__():54] meta init done
15
+ 2022-02-06 13:00:11,012 DEBUG HandlerThread:2791845 [meta.py:probe():214] probe
16
+ 2022-02-06 13:00:11,018 DEBUG HandlerThread:2791845 [meta.py:_setup_git():204] setup git
17
+ 2022-02-06 13:00:11,056 DEBUG HandlerThread:2791845 [meta.py:_setup_git():211] setup git done
18
+ 2022-02-06 13:00:11,056 DEBUG HandlerThread:2791845 [meta.py:_save_code():92] save code
19
+ 2022-02-06 13:00:11,074 DEBUG HandlerThread:2791845 [meta.py:_save_code():113] save code done
20
+ 2022-02-06 13:00:11,074 DEBUG HandlerThread:2791845 [meta.py:_save_patches():130] save patches
21
+ 2022-02-06 13:00:11,173 DEBUG HandlerThread:2791845 [meta.py:_save_patches():172] save patches done
22
+ 2022-02-06 13:00:11,174 DEBUG HandlerThread:2791845 [meta.py:_save_pip():58] save pip
23
+ 2022-02-06 13:00:11,174 DEBUG HandlerThread:2791845 [meta.py:_save_pip():72] save pip done
24
+ 2022-02-06 13:00:11,174 DEBUG HandlerThread:2791845 [meta.py:probe():252] probe done
25
+ 2022-02-06 13:00:11,179 DEBUG SenderThread:2791845 [sender.py:send():234] send: files
26
+ 2022-02-06 13:00:11,180 INFO SenderThread:2791845 [sender.py:_save_file():939] saving file wandb-metadata.json with policy now
27
+ 2022-02-06 13:00:11,181 INFO SenderThread:2791845 [sender.py:_save_file():939] saving file code/run_speech_recognition_ctc.py with policy now
28
+ 2022-02-06 13:00:11,181 INFO SenderThread:2791845 [sender.py:_save_file():939] saving file diff.patch with policy now
29
+ 2022-02-06 13:00:11,191 DEBUG HandlerThread:2791845 [handler.py:handle_request():130] handle_request: stop_status
30
+ 2022-02-06 13:00:11,192 DEBUG SenderThread:2791845 [sender.py:send_request():248] send_request: stop_status
31
+ 2022-02-06 13:00:11,432 DEBUG SenderThread:2791845 [sender.py:send():234] send: config
32
+ 2022-02-06 13:00:11,434 DEBUG SenderThread:2791845 [sender.py:send():234] send: metric
33
+ 2022-02-06 13:00:11,434 DEBUG SenderThread:2791845 [sender.py:send():234] send: metric
34
+ 2022-02-06 13:00:11,434 WARNING SenderThread:2791845 [sender.py:send_metric():897] Seen metric with glob (shouldnt happen)
35
+ 2022-02-06 13:00:11,741 INFO Thread-8 :2791845 [dir_watcher.py:_on_file_created():217] file/dir created: /mnt/lv_ai_1_dante/javierr/wav2vec2-large-voxrex-npsc-bokmaal/wandb/run-20220206_130008-2e07dm9k/files/output.log
36
+ 2022-02-06 13:00:11,742 INFO Thread-8 :2791845 [dir_watcher.py:_on_file_created():217] file/dir created: /mnt/lv_ai_1_dante/javierr/wav2vec2-large-voxrex-npsc-bokmaal/wandb/run-20220206_130008-2e07dm9k/files/code/run_speech_recognition_ctc.py
37
+ 2022-02-06 13:00:11,742 INFO Thread-8 :2791845 [dir_watcher.py:_on_file_created():217] file/dir created: /mnt/lv_ai_1_dante/javierr/wav2vec2-large-voxrex-npsc-bokmaal/wandb/run-20220206_130008-2e07dm9k/files/wandb-metadata.json
38
+ 2022-02-06 13:00:11,742 INFO Thread-8 :2791845 [dir_watcher.py:_on_file_created():217] file/dir created: /mnt/lv_ai_1_dante/javierr/wav2vec2-large-voxrex-npsc-bokmaal/wandb/run-20220206_130008-2e07dm9k/files/requirements.txt
39
+ 2022-02-06 13:00:11,742 INFO Thread-8 :2791845 [dir_watcher.py:_on_file_created():217] file/dir created: /mnt/lv_ai_1_dante/javierr/wav2vec2-large-voxrex-npsc-bokmaal/wandb/run-20220206_130008-2e07dm9k/files/diff.patch
40
+ 2022-02-06 13:00:11,743 INFO Thread-8 :2791845 [dir_watcher.py:_on_file_created():217] file/dir created: /mnt/lv_ai_1_dante/javierr/wav2vec2-large-voxrex-npsc-bokmaal/wandb/run-20220206_130008-2e07dm9k/files/code
41
+ 2022-02-06 13:00:11,866 INFO Thread-11 :2791845 [upload_job.py:push():137] Uploaded file /tmp/tmptftvs6btwandb/1uyos4rs-wandb-metadata.json
42
+ 2022-02-06 13:00:11,868 INFO Thread-13 :2791845 [upload_job.py:push():137] Uploaded file /tmp/tmptftvs6btwandb/3gb8cxwv-diff.patch
43
+ 2022-02-06 13:00:11,900 INFO Thread-12 :2791845 [upload_job.py:push():137] Uploaded file /tmp/tmptftvs6btwandb/39sukun6-code/run_speech_recognition_ctc.py
44
+ 2022-02-06 13:00:13,742 INFO Thread-8 :2791845 [dir_watcher.py:_on_file_modified():230] file/dir modified: /mnt/lv_ai_1_dante/javierr/wav2vec2-large-voxrex-npsc-bokmaal/wandb/run-20220206_130008-2e07dm9k/files/output.log
45
+ 2022-02-06 13:00:19,550 DEBUG SenderThread:2791845 [sender.py:send():234] send: metric
46
+ 2022-02-06 13:00:19,551 DEBUG SenderThread:2791845 [sender.py:send():234] send: metric
47
+ 2022-02-06 13:00:19,551 DEBUG SenderThread:2791845 [sender.py:send():234] send: metric
48
+ 2022-02-06 13:00:19,551 DEBUG SenderThread:2791845 [sender.py:send():234] send: metric
49
+ 2022-02-06 13:00:19,551 DEBUG SenderThread:2791845 [sender.py:send():234] send: metric
50
+ 2022-02-06 13:00:19,551 DEBUG SenderThread:2791845 [sender.py:send():234] send: metric
51
+ 2022-02-06 13:00:19,551 DEBUG SenderThread:2791845 [sender.py:send():234] send: history
52
+ 2022-02-06 13:00:19,552 DEBUG SenderThread:2791845 [sender.py:send():234] send: summary
53
+ 2022-02-06 13:00:19,555 INFO SenderThread:2791845 [sender.py:_save_file():939] saving file wandb-summary.json with policy end
54
+ 2022-02-06 13:00:19,745 INFO Thread-8 :2791845 [dir_watcher.py:_on_file_modified():230] file/dir modified: /mnt/lv_ai_1_dante/javierr/wav2vec2-large-voxrex-npsc-bokmaal/wandb/run-20220206_130008-2e07dm9k/files/wandb-summary.json
55
+ 2022-02-06 13:00:21,746 INFO Thread-8 :2791845 [dir_watcher.py:_on_file_modified():230] file/dir modified: /mnt/lv_ai_1_dante/javierr/wav2vec2-large-voxrex-npsc-bokmaal/wandb/run-20220206_130008-2e07dm9k/files/output.log
56
+ 2022-02-06 13:00:23,748 INFO Thread-8 :2791845 [dir_watcher.py:_on_file_modified():230] file/dir modified: /mnt/lv_ai_1_dante/javierr/wav2vec2-large-voxrex-npsc-bokmaal/wandb/run-20220206_130008-2e07dm9k/files/output.log
57
+ 2022-02-06 13:00:25,749 INFO Thread-8 :2791845 [dir_watcher.py:_on_file_modified():230] file/dir modified: /mnt/lv_ai_1_dante/javierr/wav2vec2-large-voxrex-npsc-bokmaal/wandb/run-20220206_130008-2e07dm9k/files/output.log
58
+ 2022-02-06 13:00:26,918 DEBUG HandlerThread:2791845 [handler.py:handle_request():130] handle_request: stop_status
59
+ 2022-02-06 13:00:26,918 DEBUG SenderThread:2791845 [sender.py:send_request():248] send_request: stop_status
60
+ 2022-02-06 13:00:39,779 INFO Thread-8 :2791845 [dir_watcher.py:_on_file_modified():230] file/dir modified: /mnt/lv_ai_1_dante/javierr/wav2vec2-large-voxrex-npsc-bokmaal/wandb/run-20220206_130008-2e07dm9k/files/config.yaml
61
+ 2022-02-06 13:00:39,993 DEBUG SenderThread:2791845 [sender.py:send():234] send: stats
62
+ 2022-02-06 13:00:42,146 DEBUG HandlerThread:2791845 [handler.py:handle_request():130] handle_request: stop_status
63
+ 2022-02-06 13:00:42,147 DEBUG SenderThread:2791845 [sender.py:send_request():248] send_request: stop_status
wandb/run-20220206_130008-2e07dm9k/logs/debug.log ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ 2022-02-06 13:00:08,467 INFO MainThread:2790531 [wandb_setup.py:_flush():71] setting env: {'project': 'wav2vec2', 'entity': 'NbAiLab'}
2
+ 2022-02-06 13:00:08,468 INFO MainThread:2790531 [wandb_setup.py:_flush():71] setting login settings: {}
3
+ 2022-02-06 13:00:08,468 INFO MainThread:2790531 [wandb_init.py:_log_setup():371] Logging user logs to /mnt/lv_ai_1_dante/javierr/wav2vec2-large-voxrex-npsc-bokmaal/wandb/run-20220206_130008-2e07dm9k/logs/debug.log
4
+ 2022-02-06 13:00:08,468 INFO MainThread:2790531 [wandb_init.py:_log_setup():372] Logging internal logs to /mnt/lv_ai_1_dante/javierr/wav2vec2-large-voxrex-npsc-bokmaal/wandb/run-20220206_130008-2e07dm9k/logs/debug-internal.log
5
+ 2022-02-06 13:00:08,468 INFO MainThread:2790531 [wandb_init.py:init():404] calling init triggers
6
+ 2022-02-06 13:00:08,468 INFO MainThread:2790531 [wandb_init.py:init():409] wandb.init called with sweep_config: {}
7
+ config: {}
8
+ 2022-02-06 13:00:08,468 INFO MainThread:2790531 [wandb_init.py:init():460] starting backend
9
+ 2022-02-06 13:00:08,468 INFO MainThread:2790531 [backend.py:_multiprocessing_setup():99] multiprocessing start_methods=fork,spawn,forkserver, using: spawn
10
+ 2022-02-06 13:00:08,566 INFO MainThread:2790531 [backend.py:ensure_launched():216] starting backend process...
11
+ 2022-02-06 13:00:08,658 INFO MainThread:2790531 [backend.py:ensure_launched():221] started backend process with pid: 2791845
12
+ 2022-02-06 13:00:08,659 INFO MainThread:2790531 [wandb_init.py:init():469] backend started and connected
13
+ 2022-02-06 13:00:08,667 INFO MainThread:2790531 [wandb_init.py:init():533] updated telemetry
14
+ 2022-02-06 13:00:08,870 INFO MainThread:2790531 [wandb_init.py:init():563] communicating current version
15
+ 2022-02-06 13:00:09,447 INFO MainThread:2790531 [wandb_init.py:init():568] got version response upgrade_message: "wandb version 0.12.10 is available! To upgrade, please run:\n $ pip install wandb --upgrade"
16
+
17
+ 2022-02-06 13:00:09,447 INFO MainThread:2790531 [wandb_init.py:init():578] communicating run to backend with 30 second timeout
18
+ 2022-02-06 13:00:09,738 INFO MainThread:2790531 [wandb_init.py:init():606] starting run threads in backend
19
+ 2022-02-06 13:00:11,187 INFO MainThread:2790531 [wandb_run.py:_console_start():1810] atexit reg
20
+ 2022-02-06 13:00:11,187 INFO MainThread:2790531 [wandb_run.py:_redirect():1684] redirect: SettingsConsole.REDIRECT
21
+ 2022-02-06 13:00:11,188 INFO MainThread:2790531 [wandb_run.py:_redirect():1689] Redirecting console.
22
+ 2022-02-06 13:00:11,191 INFO MainThread:2790531 [wandb_run.py:_redirect():1745] Redirects installed.
23
+ 2022-02-06 13:00:11,191 INFO MainThread:2790531 [wandb_init.py:init():633] run started, returning control to user process
24
+ 2022-02-06 13:00:11,195 INFO MainThread:2790531 [wandb_run.py:_config_callback():956] config_cb None None {'return_dict': True, 'output_hidden_states': False, 'output_attentions': False, 'torchscript': False, 'torch_dtype': 'float32', 'use_bfloat16': False, 'pruned_heads': {}, 'tie_word_embeddings': True, 'is_encoder_decoder': False, 'is_decoder': False, 'cross_attention_hidden_size': None, 'add_cross_attention': False, 'tie_encoder_decoder': False, 'max_length': 20, 'min_length': 0, 'do_sample': False, 'early_stopping': False, 'num_beams': 1, 'num_beam_groups': 1, 'diversity_penalty': 0.0, 'temperature': 1.0, 'top_k': 50, 'top_p': 1.0, 'repetition_penalty': 1.0, 'length_penalty': 1.0, 'no_repeat_ngram_size': 0, 'encoder_no_repeat_ngram_size': 0, 'bad_words_ids': None, 'num_return_sequences': 1, 'chunk_size_feed_forward': 0, 'output_scores': False, 'return_dict_in_generate': False, 'forced_bos_token_id': None, 'forced_eos_token_id': None, 'remove_invalid_values': False, 'architectures': ['Wav2Vec2ForCTC'], 'finetuning_task': None, 'id2label': {0: 'LABEL_0', 1: 'LABEL_1'}, 'label2id': {'LABEL_0': 0, 'LABEL_1': 1}, 'tokenizer_class': None, 'prefix': None, 'bos_token_id': 1, 'pad_token_id': 31, 'eos_token_id': 2, 'sep_token_id': None, 'decoder_start_token_id': None, 'task_specific_params': None, 'problem_type': None, '_name_or_path': './', 'transformers_version': '4.17.0.dev0', 'feat_extract_dropout': 0.0, 'mask_channel_length': 10, 'mask_channel_min_space': 1, 'mask_channel_other': 0.0, 'mask_channel_prob': 0.0, 'mask_channel_selection': 'static', 'mask_time_min_space': 1, 'mask_time_other': 0.0, 'mask_time_selection': 'static', 'model_type': 'wav2vec2', 'num_feat_extract_layers': 7, 'hidden_size': 1024, 'feat_extract_norm': 'layer', 'feat_extract_activation': 'gelu', 'conv_dim': [512, 512, 512, 512, 512, 512, 512], 'conv_stride': [5, 2, 2, 2, 2, 2, 2], 'conv_kernel': [10, 3, 3, 3, 3, 2, 2], 'conv_bias': True, 'num_conv_pos_embeddings': 128, 'num_conv_pos_embedding_groups': 16, 'num_hidden_layers': 24, 'intermediate_size': 4096, 'hidden_act': 'gelu', 'num_attention_heads': 16, 'hidden_dropout': 0.047, 'attention_dropout': 0.094, 'activation_dropout': 0.055, 'feat_proj_dropout': 0.04, 'final_dropout': 0.0, 'layerdrop': 0.041, 'layer_norm_eps': 1e-05, 'initializer_range': 0.02, 'vocab_size': 34, 'do_stable_layer_norm': True, 'use_weighted_layer_sum': False, 'apply_spec_augment': True, 'mask_time_prob': 0.082, 'mask_time_length': 10, 'mask_time_min_masks': 2, 'mask_feature_prob': 0.25, 'mask_feature_length': 64, 'mask_feature_min_masks': 0, 'num_codevectors_per_group': 320, 'num_codevector_groups': 2, 'contrastive_logits_temperature': 0.1, 'feat_quantizer_dropout': 0.0, 'num_negatives': 100, 'codevector_dim': 768, 'proj_codevector_dim': 768, 'diversity_loss_weight': 0.1, 'ctc_loss_reduction': 'mean', 'ctc_zero_infinity': True, 'add_adapter': False, 'adapter_kernel_size': 3, 'adapter_stride': 2, 'num_adapter_layers': 3, 'output_hidden_size': 1024, 'classifier_proj_size': 256, 'tdnn_dim': [512, 512, 512, 512, 1500], 'tdnn_kernel': [5, 3, 3, 1, 1], 'tdnn_dilation': [1, 2, 3, 1, 1], 'xvector_output_dim': 512, 'output_dir': './', 'overwrite_output_dir': True, 'do_train': True, 'do_eval': True, 'do_predict': False, 'evaluation_strategy': 'steps', 'prediction_loss_only': False, 'per_device_train_batch_size': 16, 'per_device_eval_batch_size': 16, 'per_gpu_train_batch_size': 'None', 'per_gpu_eval_batch_size': 'None', 'gradient_accumulation_steps': 2, 'eval_accumulation_steps': 'None', 'learning_rate': 8.379967082059723e-06, 'weight_decay': 0.0, 'adam_beta1': 0.9, 'adam_beta2': 0.999, 'adam_epsilon': 1e-08, 'max_grad_norm': 1.0, 'num_train_epochs': 0.1, 'max_steps': -1, 'lr_scheduler_type': 'linear', 'warmup_ratio': 0.0, 'warmup_steps': 0, 'log_level': -1, 'log_level_replica': -1, 'log_on_each_node': True, 'logging_dir': './runs/Feb06_12-59-06_dante', 'logging_strategy': 'steps', 'logging_first_step': False, 'logging_steps': 100, 'logging_nan_inf_filter': True, 'save_strategy': 'steps', 'save_steps': 500, 'save_total_limit': 3, 'save_on_each_node': False, 'no_cuda': False, 'seed': 42, 'bf16': False, 'fp16': True, 'fp16_opt_level': 'O1', 'half_precision_backend': 'amp', 'bf16_full_eval': False, 'fp16_full_eval': False, 'tf32': 'None', 'local_rank': -1, 'xpu_backend': 'None', 'tpu_num_cores': 'None', 'tpu_metrics_debug': False, 'debug': '[]', 'dataloader_drop_last': False, 'eval_steps': 500, 'dataloader_num_workers': 0, 'past_index': -1, 'run_name': './', 'disable_tqdm': False, 'remove_unused_columns': True, 'label_names': 'None', 'load_best_model_at_end': False, 'metric_for_best_model': 'None', 'greater_is_better': 'None', 'ignore_data_skip': False, 'sharded_ddp': '[]', 'deepspeed': 'None', 'label_smoothing_factor': 0.0, 'optim': 'adamw_hf', 'adafactor': False, 'group_by_length': True, 'length_column_name': 'input_length', 'report_to': "['tensorboard', 'wandb']", 'ddp_find_unused_parameters': 'None', 'ddp_bucket_cap_mb': 'None', 'dataloader_pin_memory': True, 'skip_memory_metrics': True, 'use_legacy_prediction_loop': False, 'push_to_hub': True, 'resume_from_checkpoint': 'None', 'hub_model_id': 'NbAiLab/wav2vec2-large-voxrex-npsc-bokmaal', 'hub_strategy': 'every_save', 'hub_token': '<HUB_TOKEN>', 'gradient_checkpointing': True, 'fp16_backend': 'auto', 'push_to_hub_model_id': 'None', 'push_to_hub_organization': 'None', 'push_to_hub_token': '<PUSH_TO_HUB_TOKEN>', '_n_gpu': 2, 'mp_parameters': '', 'train_batch_size': 32, 'eval_batch_size': 32}
25
+ 2022-02-06 13:00:11,200 INFO MainThread:2790531 [wandb_watch.py:watch():43] Watching
wandb/run-20220206_130008-2e07dm9k/run-2e07dm9k.wandb ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6d36cee3cf7c131662085ebe71a29d6856678e040e2f0f7466ba989b11513848
3
+ size 6066