shpotes commited on
Commit
9cb64f6
1 Parent(s): a9c6c15

Training in progress, step 500

Browse files
.gitignore ADDED
@@ -0,0 +1 @@
 
 
1
+ checkpoint-*/
README.md ADDED
@@ -0,0 +1,74 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ language:
3
+ - et
4
+ license: apache-2.0
5
+ tags:
6
+ - automatic-speech-recognition
7
+ - mozilla-foundation/common_voice_8_0
8
+ - generated_from_trainer
9
+ datasets:
10
+ - common_voice
11
+ model-index:
12
+ - name: ''
13
+ results: []
14
+ ---
15
+
16
+ <!-- This model card has been generated automatically according to the information the Trainer had access to. You
17
+ should probably proofread and complete it, then remove this comment. -->
18
+
19
+ #
20
+
21
+ This model is a fine-tuned version of [facebook/wav2vec2-xls-r-300m](https://huggingface.co/facebook/wav2vec2-xls-r-300m) on the MOZILLA-FOUNDATION/COMMON_VOICE_8_0 - ET dataset.
22
+ It achieves the following results on the evaluation set:
23
+ - Loss: 0.4623
24
+ - Wer: 0.3420
25
+
26
+ ## Model description
27
+
28
+ More information needed
29
+
30
+ ## Intended uses & limitations
31
+
32
+ More information needed
33
+
34
+ ## Training and evaluation data
35
+
36
+ More information needed
37
+
38
+ ## Training procedure
39
+
40
+ ### Training hyperparameters
41
+
42
+ The following hyperparameters were used during training:
43
+ - learning_rate: 0.0003
44
+ - train_batch_size: 72
45
+ - eval_batch_size: 72
46
+ - seed: 42
47
+ - gradient_accumulation_steps: 2
48
+ - total_train_batch_size: 144
49
+ - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
50
+ - lr_scheduler_type: cosine
51
+ - lr_scheduler_warmup_steps: 500
52
+ - num_epochs: 100.0
53
+ - mixed_precision_training: Native AMP
54
+
55
+ ### Training results
56
+
57
+ | Training Loss | Epoch | Step | Validation Loss | Wer |
58
+ |:-------------:|:-----:|:----:|:---------------:|:------:|
59
+ | 0.3082 | 12.5 | 500 | 0.3871 | 0.4907 |
60
+ | 0.1497 | 25.0 | 1000 | 0.4168 | 0.4278 |
61
+ | 0.1243 | 37.5 | 1500 | 0.4446 | 0.4220 |
62
+ | 0.0954 | 50.0 | 2000 | 0.4426 | 0.3946 |
63
+ | 0.0741 | 62.5 | 2500 | 0.4502 | 0.3800 |
64
+ | 0.0533 | 75.0 | 3000 | 0.4618 | 0.3653 |
65
+ | 0.0447 | 87.5 | 3500 | 0.4518 | 0.3461 |
66
+ | 0.0396 | 100.0 | 4000 | 0.4623 | 0.3420 |
67
+
68
+
69
+ ### Framework versions
70
+
71
+ - Transformers 4.16.0.dev0
72
+ - Pytorch 1.10.1+cu102
73
+ - Datasets 1.18.4.dev0
74
+ - Tokenizers 0.11.0
added_tokens.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"<s>": 31, "</s>": 32}
config.json ADDED
@@ -0,0 +1,107 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "facebook/wav2vec2-xls-r-300m",
3
+ "activation_dropout": 0.0,
4
+ "adapter_kernel_size": 3,
5
+ "adapter_stride": 2,
6
+ "add_adapter": false,
7
+ "apply_spec_augment": true,
8
+ "architectures": [
9
+ "Wav2Vec2ForCTC"
10
+ ],
11
+ "attention_dropout": 0.0,
12
+ "bos_token_id": 1,
13
+ "classifier_proj_size": 256,
14
+ "codevector_dim": 768,
15
+ "contrastive_logits_temperature": 0.1,
16
+ "conv_bias": true,
17
+ "conv_dim": [
18
+ 512,
19
+ 512,
20
+ 512,
21
+ 512,
22
+ 512,
23
+ 512,
24
+ 512
25
+ ],
26
+ "conv_kernel": [
27
+ 10,
28
+ 3,
29
+ 3,
30
+ 3,
31
+ 3,
32
+ 2,
33
+ 2
34
+ ],
35
+ "conv_stride": [
36
+ 5,
37
+ 2,
38
+ 2,
39
+ 2,
40
+ 2,
41
+ 2,
42
+ 2
43
+ ],
44
+ "ctc_loss_reduction": "mean",
45
+ "ctc_zero_infinity": false,
46
+ "diversity_loss_weight": 0.1,
47
+ "do_stable_layer_norm": true,
48
+ "eos_token_id": 2,
49
+ "feat_extract_activation": "gelu",
50
+ "feat_extract_dropout": 0.0,
51
+ "feat_extract_norm": "layer",
52
+ "feat_proj_dropout": 0.1,
53
+ "feat_quantizer_dropout": 0.0,
54
+ "final_dropout": 0.0,
55
+ "hidden_act": "gelu",
56
+ "hidden_dropout": 0.0,
57
+ "hidden_size": 1024,
58
+ "initializer_range": 0.02,
59
+ "intermediate_size": 4096,
60
+ "layer_norm_eps": 1e-05,
61
+ "layerdrop": 0.0,
62
+ "mask_feature_length": 10,
63
+ "mask_feature_min_masks": 0,
64
+ "mask_feature_prob": 0.0,
65
+ "mask_time_length": 10,
66
+ "mask_time_min_masks": 2,
67
+ "mask_time_prob": 0.1,
68
+ "model_type": "wav2vec2",
69
+ "num_adapter_layers": 3,
70
+ "num_attention_heads": 16,
71
+ "num_codevector_groups": 2,
72
+ "num_codevectors_per_group": 320,
73
+ "num_conv_pos_embedding_groups": 16,
74
+ "num_conv_pos_embeddings": 128,
75
+ "num_feat_extract_layers": 7,
76
+ "num_hidden_layers": 24,
77
+ "num_negatives": 100,
78
+ "output_hidden_size": 1024,
79
+ "pad_token_id": 30,
80
+ "proj_codevector_dim": 768,
81
+ "tdnn_dilation": [
82
+ 1,
83
+ 2,
84
+ 3,
85
+ 1,
86
+ 1
87
+ ],
88
+ "tdnn_dim": [
89
+ 512,
90
+ 512,
91
+ 512,
92
+ 512,
93
+ 1500
94
+ ],
95
+ "tdnn_kernel": [
96
+ 5,
97
+ 3,
98
+ 3,
99
+ 1,
100
+ 1
101
+ ],
102
+ "torch_dtype": "float32",
103
+ "transformers_version": "4.16.0.dev0",
104
+ "use_weighted_layer_sum": false,
105
+ "vocab_size": 33,
106
+ "xvector_output_dim": 512
107
+ }
eval.py ADDED
@@ -0,0 +1,127 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python3
2
+ from datasets import load_dataset, load_metric, Audio, Dataset
3
+ from transformers import pipeline, AutoFeatureExtractor
4
+ import re
5
+ import argparse
6
+ import unicodedata
7
+ from typing import Dict
8
+
9
+
10
+ def log_results(result: Dataset, args: Dict[str, str]):
11
+ """ DO NOT CHANGE. This function computes and logs the result metrics. """
12
+
13
+ log_outputs = args.log_outputs
14
+ dataset_id = "_".join(args.dataset.split("/") + [args.config, args.split])
15
+
16
+ # load metric
17
+ wer = load_metric("wer")
18
+ cer = load_metric("cer")
19
+
20
+ # compute metrics
21
+ wer_result = wer.compute(references=result["target"], predictions=result["prediction"])
22
+ cer_result = cer.compute(references=result["target"], predictions=result["prediction"])
23
+
24
+ # print & log results
25
+ result_str = (
26
+ f"WER: {wer_result}\n"
27
+ f"CER: {cer_result}"
28
+ )
29
+ print(result_str)
30
+
31
+ with open(f"{dataset_id}_eval_results.txt", "w") as f:
32
+ f.write(result_str)
33
+
34
+ # log all results in text file. Possibly interesting for analysis
35
+ if log_outputs is not None:
36
+ pred_file = f"log_{dataset_id}_predictions.txt"
37
+ target_file = f"log_{dataset_id}_targets.txt"
38
+
39
+ with open(pred_file, "w") as p, open(target_file, "w") as t:
40
+
41
+ # mapping function to write output
42
+ def write_to_file(batch, i):
43
+ p.write(f"{i}" + "\n")
44
+ p.write(batch["prediction"] + "\n")
45
+ t.write(f"{i}" + "\n")
46
+ t.write(batch["target"] + "\n")
47
+
48
+ result.map(write_to_file, with_indices=True)
49
+
50
+
51
+ def normalize_text(text: str) -> str:
52
+ """ DO ADAPT FOR YOUR USE CASE. this function normalizes the target text. """
53
+
54
+ chars_to_ignore_regex = '[,?.!\-\;\:\"“%‘”�—’…–]' # noqa: W605 IMPORTANT: this should correspond to the chars that were ignored during training
55
+
56
+ text = text.lower()
57
+ # normalize non-standard (stylized) unicode characters
58
+ text = unicodedata.normalize('NFKC', text)
59
+ # remove punctuation
60
+ text = re.sub(chars_to_ignore_regex, "", text)
61
+
62
+ # Let's also make sure we split on all kinds of newlines, spaces, etc...
63
+ text = " ".join(text.split())
64
+
65
+ return text
66
+
67
+
68
+ def main(args):
69
+ # load dataset
70
+ dataset = load_dataset(args.dataset, args.config, split=args.split, use_auth_token=True)
71
+
72
+ # for testing: only process the first two examples as a test
73
+ # dataset = dataset.select(range(10))
74
+
75
+ # load processor
76
+ feature_extractor = AutoFeatureExtractor.from_pretrained(args.model_id)
77
+ sampling_rate = feature_extractor.sampling_rate
78
+
79
+ # resample audio
80
+ dataset = dataset.cast_column("audio", Audio(sampling_rate=sampling_rate))
81
+
82
+ # load eval pipeline
83
+ asr = pipeline("automatic-speech-recognition", model=args.model_id)
84
+
85
+ # map function to decode audio
86
+ def map_to_pred(batch):
87
+ prediction = asr(batch["audio"]["array"], chunk_length_s=args.chunk_length_s, stride_length_s=args.stride_length_s)
88
+
89
+ batch["prediction"] = prediction["text"]
90
+ batch["target"] = normalize_text(batch["sentence"])
91
+ return batch
92
+
93
+ # run inference on all examples
94
+ result = dataset.map(map_to_pred, remove_columns=dataset.column_names)
95
+
96
+ # compute and log_results
97
+ # do not change function below
98
+ log_results(result, args)
99
+
100
+
101
+ if __name__ == "__main__":
102
+ parser = argparse.ArgumentParser()
103
+
104
+ parser.add_argument(
105
+ "--model_id", type=str, required=True, help="Model identifier. Should be loadable with 🤗 Transformers"
106
+ )
107
+ parser.add_argument(
108
+ "--dataset", type=str, required=True, help="Dataset name to evaluate the `model_id`. Should be loadable with 🤗 Datasets"
109
+ )
110
+ parser.add_argument(
111
+ "--config", type=str, required=True, help="Config of the dataset. *E.g.* `'en'` for Common Voice"
112
+ )
113
+ parser.add_argument(
114
+ "--split", type=str, required=True, help="Split of the dataset. *E.g.* `'test'`"
115
+ )
116
+ parser.add_argument(
117
+ "--chunk_length_s", type=float, default=None, help="Chunk length in seconds. Defaults to None. For long audio files a good value would be 5.0 seconds."
118
+ )
119
+ parser.add_argument(
120
+ "--stride_length_s", type=float, default=None, help="Stride of the audio chunks. Defaults to None. For long audio files a good value would be 1.0 seconds."
121
+ )
122
+ parser.add_argument(
123
+ "--log_outputs", action='store_true', help="If defined, write outputs to log file for analysis."
124
+ )
125
+ args = parser.parse_args()
126
+
127
+ main(args)
preprocessor_config.json ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "do_normalize": true,
3
+ "feature_extractor_type": "Wav2Vec2FeatureExtractor",
4
+ "feature_size": 1,
5
+ "padding_side": "right",
6
+ "padding_value": 0,
7
+ "return_attention_mask": true,
8
+ "sampling_rate": 16000
9
+ }
pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:90c3fb536f56901decbad9a2a36f4f0cd91baf1ef72980643536c368e56e9730
3
+ size 1262058993
requierments.txt ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ git+https://github.com/huggingface/transformers.git#egg=transformers[torch-speech]
2
+ git+https://github.com/huggingface/datasets.git#egg=datasets[streaming]
3
+ torchaudio
4
+ librosa
5
+ jiwer
6
+ bitsandbytes-cuda113
7
+ wandb
run-300M.sh ADDED
@@ -0,0 +1,38 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/sh
2
+
3
+ export WANDB_PROJECT="xls-r-basque"
4
+ export CUDA_VISIBLE_DEVICES=2
5
+
6
+ python src/run_speech_recognition_ctc_bnb.py \
7
+ --dataset_name="mozilla-foundation/common_voice_8_0" \
8
+ --model_name_or_path="facebook/wav2vec2-xls-r-300m" \
9
+ --dataset_config_name="eu" \
10
+ --output_dir="./" \
11
+ --overwrite_output_dir \
12
+ --num_train_epochs=100 \
13
+ --per_device_train_batch_size=72 \
14
+ --per_device_eval_batch_size=72 \
15
+ --gradient_accumulation_steps=2 \
16
+ --learning_rate=3e-4 \
17
+ --save_total_limit=1 \
18
+ --warmup_steps=500 \
19
+ --evaluation_strategy=steps \
20
+ --text_column_name=sentence \
21
+ --length_column_name=input_length \
22
+ --save_steps=500 \
23
+ --eval_steps=500 \
24
+ --logging_steps=100 \
25
+ --layerdrop=0.0 \
26
+ --freeze_feature_encoder \
27
+ --feat_proj_dropout=0.1 \
28
+ --chars_to_ignore , ? . ! \- \; \: \" “ % ‘ ” � — ’ … – \
29
+ --gradient_checkpointing \
30
+ --lr_scheduler_type=cosine \
31
+ --fp16 \
32
+ --group_by_length \
33
+ --mask_time_prob=0.1 \
34
+ --mask_time_length=10 \
35
+ --report_to=wandb \
36
+ --run_name="cosine+drop_proj+low_specaugment-300M+cv_8_0" \
37
+ --do_train --do_eval \
38
+ --use_auth_token --push_to_hub
special_tokens_map.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"bos_token": "<s>", "eos_token": "</s>", "unk_token": "[UNK]", "pad_token": "[PAD]", "additional_special_tokens": [{"content": "<s>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true}, {"content": "</s>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true}]}
src/__init__.py ADDED
File without changes
src/run_speech_recognition_ctc_bnb.py ADDED
@@ -0,0 +1,760 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python
2
+ # coding=utf-8
3
+ # Copyright 2021 The HuggingFace Inc. team. All rights reserved.
4
+ #
5
+ # Licensed under the Apache License, Version 2.0 (the "License");
6
+ # you may not use this file except in compliance with the License.
7
+ # You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing, software
12
+ # distributed under the License is distributed on an "AS IS" BASIS,
13
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
+ # See the License for the specific language governing permissions and
15
+
16
+ """ Fine-tuning a 🤗 Transformers CTC model for automatic speech recognition"""
17
+
18
+ import functools
19
+ import json
20
+ import logging
21
+ import os
22
+ import re
23
+ import sys
24
+ import warnings
25
+ from dataclasses import dataclass, field
26
+ from typing import Dict, List, Optional, Union
27
+
28
+ import datasets
29
+ import numpy as np
30
+ import torch
31
+ from datasets import DatasetDict, load_dataset, load_metric
32
+
33
+ import bitsandbytes as bnb
34
+ import transformers
35
+ from transformers import (
36
+ AutoConfig,
37
+ AutoFeatureExtractor,
38
+ AutoModelForCTC,
39
+ AutoProcessor,
40
+ AutoTokenizer,
41
+ HfArgumentParser,
42
+ Trainer,
43
+ TrainingArguments,
44
+ Wav2Vec2Processor,
45
+ set_seed,
46
+ )
47
+ from transformers.trainer_pt_utils import get_parameter_names
48
+ from transformers.trainer_utils import get_last_checkpoint, is_main_process
49
+ from transformers.utils import check_min_version
50
+ from transformers.utils.versions import require_version
51
+
52
+
53
+ # Will error if the minimal version of Transformers is not installed. Remove at your own risks.
54
+ check_min_version("4.16.0.dev0")
55
+
56
+ require_version("datasets>=1.13.3", "To fix: pip install -r examples/pytorch/text-classification/requirements.txt")
57
+
58
+
59
+ logger = logging.getLogger(__name__)
60
+
61
+
62
+ def list_field(default=None, metadata=None):
63
+ return field(default_factory=lambda: default, metadata=metadata)
64
+
65
+
66
+ @dataclass
67
+ class ModelArguments:
68
+ """
69
+ Arguments pertaining to which model/config/tokenizer we are going to fine-tune from.
70
+ """
71
+
72
+ model_name_or_path: str = field(
73
+ metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"}
74
+ )
75
+ tokenizer_name_or_path: Optional[str] = field(
76
+ default=None,
77
+ metadata={"help": "Path to pretrained tokenizer or tokenizer identifier from huggingface.co/models"},
78
+ )
79
+ cache_dir: Optional[str] = field(
80
+ default=None,
81
+ metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"},
82
+ )
83
+ freeze_feature_encoder: bool = field(
84
+ default=True, metadata={"help": "Whether to freeze the feature encoder layers of the model."}
85
+ )
86
+ attention_dropout: float = field(
87
+ default=0.0, metadata={"help": "The dropout ratio for the attention probabilities."}
88
+ )
89
+ activation_dropout: float = field(
90
+ default=0.0, metadata={"help": "The dropout ratio for activations inside the fully connected layer."}
91
+ )
92
+ feat_proj_dropout: float = field(default=0.0, metadata={"help": "The dropout ratio for the projected features."})
93
+ hidden_dropout: float = field(
94
+ default=0.0,
95
+ metadata={
96
+ "help": "The dropout probability for all fully connected layers in the embeddings, encoder, and pooler."
97
+ },
98
+ )
99
+ final_dropout: float = field(
100
+ default=0.0,
101
+ metadata={"help": "The dropout probability for the final projection layer."},
102
+ )
103
+ mask_time_prob: float = field(
104
+ default=0.05,
105
+ metadata={
106
+ "help": "Probability of each feature vector along the time axis to be chosen as the start of the vector"
107
+ "span to be masked. Approximately ``mask_time_prob * sequence_length // mask_time_length`` feature"
108
+ "vectors will be masked along the time axis."
109
+ },
110
+ )
111
+ mask_time_length: int = field(
112
+ default=10,
113
+ metadata={"help": "Length of vector span to mask along the time axis."},
114
+ )
115
+ mask_feature_prob: float = field(
116
+ default=0.0,
117
+ metadata={
118
+ "help": "Probability of each feature vector along the feature axis to be chosen as the start of the vector"
119
+ "span to be masked. Approximately ``mask_feature_prob * sequence_length // mask_feature_length`` feature bins will be masked along the time axis."
120
+ },
121
+ )
122
+ mask_feature_length: int = field(
123
+ default=10,
124
+ metadata={"help": "Length of vector span to mask along the feature axis."},
125
+ )
126
+ layerdrop: float = field(default=0.0, metadata={"help": "The LayerDrop probability."})
127
+ ctc_loss_reduction: Optional[str] = field(
128
+ default="mean", metadata={"help": "The way the ctc loss should be reduced. Should be one of 'mean' or 'sum'."}
129
+ )
130
+
131
+
132
+ @dataclass
133
+ class DataTrainingArguments:
134
+ """
135
+ Arguments pertaining to what data we are going to input our model for training and eval.
136
+
137
+ Using `HfArgumentParser` we can turn this class
138
+ into argparse arguments to be able to specify them on
139
+ the command line.
140
+ """
141
+
142
+ dataset_name: str = field(
143
+ metadata={"help": "The configuration name of the dataset to use (via the datasets library)."}
144
+ )
145
+ dataset_config_name: str = field(
146
+ default=None, metadata={"help": "The configuration name of the dataset to use (via the datasets library)."}
147
+ )
148
+ train_split_name: str = field(
149
+ default="train+validation",
150
+ metadata={
151
+ "help": "The name of the training data set split to use (via the datasets library). Defaults to 'train'"
152
+ },
153
+ )
154
+ eval_split_name: str = field(
155
+ default="test",
156
+ metadata={
157
+ "help": "The name of the training data set split to use (via the datasets library). Defaults to 'train'"
158
+ },
159
+ )
160
+ audio_column_name: str = field(
161
+ default="audio",
162
+ metadata={"help": "The name of the dataset column containing the audio data. Defaults to 'audio'"},
163
+ )
164
+ text_column_name: str = field(
165
+ default="text",
166
+ metadata={"help": "The name of the dataset column containing the text data. Defaults to 'text'"},
167
+ )
168
+ overwrite_cache: bool = field(
169
+ default=False, metadata={"help": "Overwrite the cached preprocessed datasets or not."}
170
+ )
171
+ preprocessing_num_workers: Optional[int] = field(
172
+ default=None,
173
+ metadata={"help": "The number of processes to use for the preprocessing."},
174
+ )
175
+ max_train_samples: Optional[int] = field(
176
+ default=None,
177
+ metadata={
178
+ "help": "For debugging purposes or quicker training, truncate the number of training examples to this "
179
+ "value if set."
180
+ },
181
+ )
182
+ max_eval_samples: Optional[int] = field(
183
+ default=None,
184
+ metadata={
185
+ "help": "For debugging purposes or quicker training, truncate the number of validation examples to this "
186
+ "value if set."
187
+ },
188
+ )
189
+ chars_to_ignore: Optional[List[str]] = list_field(
190
+ default=None,
191
+ metadata={"help": "A list of characters to remove from the transcripts."},
192
+ )
193
+ eval_metrics: List[str] = list_field(
194
+ default=["wer"],
195
+ metadata={"help": "A list of metrics the model should be evaluated on. E.g. `'wer cer'`"},
196
+ )
197
+ max_duration_in_seconds: float = field(
198
+ default=20.0,
199
+ metadata={
200
+ "help": "Filter audio files that are longer than `max_duration_in_seconds` seconds to 'max_duration_in_seconds`"
201
+ },
202
+ )
203
+ min_duration_in_seconds: float = field(
204
+ default=0.0, metadata={"help": "Filter audio files that are shorter than `min_duration_in_seconds` seconds"}
205
+ )
206
+ preprocessing_only: bool = field(
207
+ default=False,
208
+ metadata={
209
+ "help": "Whether to only do data preprocessing and skip training. "
210
+ "This is especially useful when data preprocessing errors out in distributed training due to timeout. "
211
+ "In this case, one should run the preprocessing in a non-distributed setup with `preprocessing_only=True` "
212
+ "so that the cached datasets can consequently be loaded in distributed training"
213
+ },
214
+ )
215
+ use_auth_token: bool = field(
216
+ default=False,
217
+ metadata={
218
+ "help": "If :obj:`True`, will use the token generated when running"
219
+ ":obj:`transformers-cli login` as HTTP bearer authorization for remote files."
220
+ },
221
+ )
222
+ unk_token: str = field(
223
+ default="[UNK]",
224
+ metadata={"help": "The unk token for the tokenizer"},
225
+ )
226
+ pad_token: str = field(
227
+ default="[PAD]",
228
+ metadata={"help": "The padding token for the tokenizer"},
229
+ )
230
+ word_delimiter_token: str = field(
231
+ default="|",
232
+ metadata={"help": "The word delimiter token for the tokenizer"},
233
+ )
234
+ phoneme_language: Optional[str] = field(
235
+ default=None,
236
+ metadata={
237
+ "help": "The target language that should be used be"
238
+ " passed to the tokenizer for tokenization. Note that"
239
+ " this is only relevant if the model classifies the"
240
+ " input audio to a sequence of phoneme sequences."
241
+ },
242
+ )
243
+
244
+
245
+ @dataclass
246
+ class DataCollatorCTCWithPadding:
247
+ """
248
+ Data collator that will dynamically pad the inputs received.
249
+ Args:
250
+ processor (:class:`~transformers.AutoProcessor`)
251
+ The processor used for proccessing the data.
252
+ padding (:obj:`bool`, :obj:`str` or :class:`~transformers.tokenization_utils_base.PaddingStrategy`, `optional`, defaults to :obj:`True`):
253
+ Select a strategy to pad the returned sequences (according to the model's padding side and padding index)
254
+ among:
255
+ * :obj:`True` or :obj:`'longest'`: Pad to the longest sequence in the batch (or no padding if only a single
256
+ sequence if provided).
257
+ * :obj:`'max_length'`: Pad to a maximum length specified with the argument :obj:`max_length` or to the
258
+ maximum acceptable input length for the model if that argument is not provided.
259
+ * :obj:`False` or :obj:`'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of
260
+ different lengths).
261
+ max_length (:obj:`int`, `optional`):
262
+ Maximum length of the ``input_values`` of the returned list and optionally padding length (see above).
263
+ max_length_labels (:obj:`int`, `optional`):
264
+ Maximum length of the ``labels`` returned list and optionally padding length (see above).
265
+ pad_to_multiple_of (:obj:`int`, `optional`):
266
+ If set will pad the sequence to a multiple of the provided value.
267
+ This is especially useful to enable the use of Tensor Cores on NVIDIA hardware with compute capability >=
268
+ 7.5 (Volta).
269
+ """
270
+
271
+ processor: AutoProcessor
272
+ padding: Union[bool, str] = "longest"
273
+ pad_to_multiple_of: Optional[int] = None
274
+ pad_to_multiple_of_labels: Optional[int] = None
275
+
276
+ def __call__(self, features: List[Dict[str, Union[List[int], torch.Tensor]]]) -> Dict[str, torch.Tensor]:
277
+ # split inputs and labels since they have to be of different lenghts and need
278
+ # different padding methods
279
+ input_features = [{"input_values": feature["input_values"]} for feature in features]
280
+ label_features = [{"input_ids": feature["labels"]} for feature in features]
281
+
282
+ batch = self.processor.pad(
283
+ input_features,
284
+ padding=self.padding,
285
+ pad_to_multiple_of=self.pad_to_multiple_of,
286
+ return_tensors="pt",
287
+ )
288
+
289
+ with self.processor.as_target_processor():
290
+ labels_batch = self.processor.pad(
291
+ label_features,
292
+ padding=self.padding,
293
+ pad_to_multiple_of=self.pad_to_multiple_of_labels,
294
+ return_tensors="pt",
295
+ )
296
+
297
+ # replace padding with -100 to ignore loss correctly
298
+ labels = labels_batch["input_ids"].masked_fill(labels_batch.attention_mask.ne(1), -100)
299
+
300
+ batch["labels"] = labels
301
+
302
+ return batch
303
+
304
+
305
+ def create_vocabulary_from_data(
306
+ datasets: DatasetDict,
307
+ word_delimiter_token: Optional[str] = None,
308
+ unk_token: Optional[str] = None,
309
+ pad_token: Optional[str] = None,
310
+ ):
311
+ # Given training and test labels create vocabulary
312
+ def extract_all_chars(batch):
313
+ all_text = " ".join(batch["target_text"])
314
+ vocab = list(set(all_text))
315
+ return {"vocab": [vocab], "all_text": [all_text]}
316
+
317
+ vocabs = datasets.map(
318
+ extract_all_chars,
319
+ batched=True,
320
+ batch_size=-1,
321
+ keep_in_memory=True,
322
+ remove_columns=datasets["train"].column_names,
323
+ )
324
+
325
+ # take union of all unique characters in each dataset
326
+ vocab_set = functools.reduce(
327
+ lambda vocab_1, vocab_2: set(vocab_1["vocab"][0]) | set(vocab_2["vocab"][0]), vocabs.values()
328
+ )
329
+
330
+ vocab_dict = {v: k for k, v in enumerate(sorted(list(vocab_set)))}
331
+
332
+ # replace white space with delimiter token
333
+ if word_delimiter_token is not None:
334
+ vocab_dict[word_delimiter_token] = vocab_dict[" "]
335
+ del vocab_dict[" "]
336
+
337
+ # add unk and pad token
338
+ if unk_token is not None:
339
+ vocab_dict[unk_token] = len(vocab_dict)
340
+
341
+ if pad_token is not None:
342
+ vocab_dict[pad_token] = len(vocab_dict)
343
+
344
+ return vocab_dict
345
+
346
+
347
+ def main():
348
+ # See all possible arguments in src/transformers/training_args.py
349
+ # or by passing the --help flag to this script.
350
+ # We now keep distinct sets of args, for a cleaner separation of concerns.
351
+
352
+ parser = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments))
353
+ if len(sys.argv) == 2 and sys.argv[1].endswith(".json"):
354
+ # If we pass only one argument to the script and it's the path to a json file,
355
+ # let's parse it to get our arguments.
356
+ model_args, data_args, training_args = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1]))
357
+ else:
358
+ model_args, data_args, training_args = parser.parse_args_into_dataclasses()
359
+
360
+ # Detecting last checkpoint.
361
+ last_checkpoint = None
362
+ if os.path.isdir(training_args.output_dir) and training_args.do_train and not training_args.overwrite_output_dir:
363
+ last_checkpoint = get_last_checkpoint(training_args.output_dir)
364
+ if last_checkpoint is None and len(os.listdir(training_args.output_dir)) > 0:
365
+ raise ValueError(
366
+ f"Output directory ({training_args.output_dir}) already exists and is not empty. "
367
+ "Use --overwrite_output_dir to overcome."
368
+ )
369
+ elif last_checkpoint is not None:
370
+ logger.info(
371
+ f"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change "
372
+ "the `--output_dir` or add `--overwrite_output_dir` to train from scratch."
373
+ )
374
+
375
+ # Setup logging
376
+ logging.basicConfig(
377
+ format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
378
+ datefmt="%m/%d/%Y %H:%M:%S",
379
+ handlers=[logging.StreamHandler(sys.stdout)],
380
+ )
381
+ logger.setLevel(logging.INFO if is_main_process(training_args.local_rank) else logging.WARN)
382
+
383
+ # Log on each process the small summary:
384
+ logger.warning(
385
+ f"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"
386
+ f"distributed training: {bool(training_args.local_rank != -1)}, 16-bits training: {training_args.fp16}"
387
+ )
388
+ # Set the verbosity to info of the Transformers logger (on main process only):
389
+ if is_main_process(training_args.local_rank):
390
+ transformers.utils.logging.set_verbosity_info()
391
+ logger.info("Training/evaluation parameters %s", training_args)
392
+
393
+ # Set seed before initializing model.
394
+ set_seed(training_args.seed)
395
+
396
+ # 1. First, let's load the dataset
397
+ raw_datasets = DatasetDict()
398
+
399
+ if training_args.do_train:
400
+ raw_datasets["train"] = load_dataset(
401
+ data_args.dataset_name,
402
+ data_args.dataset_config_name,
403
+ split=data_args.train_split_name,
404
+ use_auth_token=data_args.use_auth_token,
405
+ )
406
+
407
+ if data_args.audio_column_name not in raw_datasets["train"].column_names:
408
+ raise ValueError(
409
+ f"--audio_column_name '{data_args.audio_column_name}' not found in dataset '{data_args.dataset_name}'. "
410
+ "Make sure to set `--audio_column_name` to the correct audio column - one of "
411
+ f"{', '.join(raw_datasets['train'].column_names)}."
412
+ )
413
+
414
+ if data_args.text_column_name not in raw_datasets["train"].column_names:
415
+ raise ValueError(
416
+ f"--text_column_name {data_args.text_column_name} not found in dataset '{data_args.dataset_name}'. "
417
+ "Make sure to set `--text_column_name` to the correct text column - one of "
418
+ f"{', '.join(raw_datasets['train'].column_names)}."
419
+ )
420
+
421
+ if data_args.max_train_samples is not None:
422
+ raw_datasets["train"] = raw_datasets["train"].select(range(data_args.max_train_samples))
423
+
424
+ if training_args.do_eval:
425
+ raw_datasets["eval"] = load_dataset(
426
+ data_args.dataset_name,
427
+ data_args.dataset_config_name,
428
+ split=data_args.eval_split_name,
429
+ use_auth_token=data_args.use_auth_token,
430
+ )
431
+
432
+ if data_args.max_eval_samples is not None:
433
+ raw_datasets["eval"] = raw_datasets["eval"].select(range(data_args.max_eval_samples))
434
+
435
+ # 2. We remove some special characters from the datasets
436
+ # that make training complicated and do not help in transcribing the speech
437
+ # E.g. characters, such as `,` and `.` do not really have an acoustic characteristic
438
+ # that could be easily picked up by the model
439
+ chars_to_ignore_regex = (
440
+ f'[{"".join(data_args.chars_to_ignore)}]' if data_args.chars_to_ignore is not None else None
441
+ )
442
+ text_column_name = data_args.text_column_name
443
+
444
+ def remove_special_characters(batch):
445
+ if chars_to_ignore_regex is not None:
446
+ batch["target_text"] = re.sub(chars_to_ignore_regex, "", batch[text_column_name]).lower() + " "
447
+ else:
448
+ batch["target_text"] = batch[text_column_name].lower() + " "
449
+ return batch
450
+
451
+ with training_args.main_process_first(desc="dataset map special characters removal"):
452
+ raw_datasets = raw_datasets.map(
453
+ remove_special_characters,
454
+ remove_columns=[text_column_name],
455
+ desc="remove special characters from datasets",
456
+ )
457
+
458
+ # save special tokens for tokenizer
459
+ word_delimiter_token = data_args.word_delimiter_token
460
+ unk_token = data_args.unk_token
461
+ pad_token = data_args.pad_token
462
+
463
+ # 3. Next, let's load the config as we might need it to create
464
+ # the tokenizer
465
+ # load config
466
+ config = AutoConfig.from_pretrained(
467
+ model_args.model_name_or_path, cache_dir=model_args.cache_dir, use_auth_token=data_args.use_auth_token
468
+ )
469
+
470
+ # 4. Next, if no tokenizer file is defined,
471
+ # we create the vocabulary of the model by extracting all unique characters from
472
+ # the training and evaluation datasets
473
+ # We need to make sure that only first rank saves vocabulary
474
+ # make sure all processes wait until vocab is created
475
+ tokenizer_name_or_path = model_args.tokenizer_name_or_path
476
+ tokenizer_kwargs = {}
477
+ if tokenizer_name_or_path is None:
478
+ # save vocab in training output dir
479
+ tokenizer_name_or_path = training_args.output_dir
480
+
481
+ vocab_file = os.path.join(tokenizer_name_or_path, "vocab.json")
482
+
483
+ with training_args.main_process_first():
484
+ if training_args.overwrite_output_dir and os.path.isfile(vocab_file):
485
+ os.remove(vocab_file)
486
+
487
+ with training_args.main_process_first(desc="dataset map vocabulary creation"):
488
+ if not os.path.isfile(vocab_file):
489
+ os.makedirs(tokenizer_name_or_path, exist_ok=True)
490
+ vocab_dict = create_vocabulary_from_data(
491
+ raw_datasets,
492
+ word_delimiter_token=word_delimiter_token,
493
+ unk_token=unk_token,
494
+ pad_token=pad_token,
495
+ )
496
+
497
+ # save vocab dict to be loaded into tokenizer
498
+ with open(vocab_file, "w") as file:
499
+ json.dump(vocab_dict, file)
500
+
501
+ # if tokenizer has just been created
502
+ # it is defined by `tokenizer_class` if present in config else by `model_type`
503
+ tokenizer_kwargs = {
504
+ "config": config if config.tokenizer_class is not None else None,
505
+ "tokenizer_type": config.model_type if config.tokenizer_class is None else None,
506
+ "unk_token": unk_token,
507
+ "pad_token": pad_token,
508
+ "word_delimiter_token": word_delimiter_token,
509
+ }
510
+
511
+ # 5. Now we can instantiate the feature extractor, tokenizer and model
512
+ # Note for distributed training, the .from_pretrained methods guarantee that only
513
+ # one local process can concurrently download model & vocab.
514
+
515
+ # load feature_extractor and tokenizer
516
+ tokenizer = AutoTokenizer.from_pretrained(
517
+ tokenizer_name_or_path,
518
+ use_auth_token=data_args.use_auth_token,
519
+ **tokenizer_kwargs,
520
+ )
521
+ feature_extractor = AutoFeatureExtractor.from_pretrained(
522
+ model_args.model_name_or_path, cache_dir=model_args.cache_dir, use_auth_token=data_args.use_auth_token
523
+ )
524
+
525
+ # adapt config
526
+ config.update(
527
+ {
528
+ "feat_proj_dropout": model_args.feat_proj_dropout,
529
+ "attention_dropout": model_args.attention_dropout,
530
+ "hidden_dropout": model_args.hidden_dropout,
531
+ "final_dropout": model_args.final_dropout,
532
+ "mask_time_prob": model_args.mask_time_prob,
533
+ "mask_time_length": model_args.mask_time_length,
534
+ "mask_feature_prob": model_args.mask_feature_prob,
535
+ "mask_feature_length": model_args.mask_feature_length,
536
+ "gradient_checkpointing": training_args.gradient_checkpointing,
537
+ "layerdrop": model_args.layerdrop,
538
+ "ctc_loss_reduction": model_args.ctc_loss_reduction,
539
+ "pad_token_id": tokenizer.pad_token_id,
540
+ "vocab_size": len(tokenizer),
541
+ "activation_dropout": model_args.activation_dropout,
542
+ }
543
+ )
544
+
545
+ # create model
546
+ model = AutoModelForCTC.from_pretrained(
547
+ model_args.model_name_or_path,
548
+ cache_dir=model_args.cache_dir,
549
+ config=config,
550
+ use_auth_token=data_args.use_auth_token,
551
+ )
552
+
553
+ # freeze encoder
554
+ if model_args.freeze_feature_encoder:
555
+ model.freeze_feature_encoder()
556
+
557
+ # 6. Now we preprocess the datasets including loading the audio, resampling and normalization
558
+ # Thankfully, `datasets` takes care of automatically loading and resampling the audio,
559
+ # so that we just need to set the correct target sampling rate and normalize the input
560
+ # via the `feature_extractor`
561
+
562
+ # make sure that dataset decodes audio with correct sampling rate
563
+ dataset_sampling_rate = next(iter(raw_datasets.values())).features[data_args.audio_column_name].sampling_rate
564
+ if dataset_sampling_rate != feature_extractor.sampling_rate:
565
+ raw_datasets = raw_datasets.cast_column(
566
+ data_args.audio_column_name, datasets.features.Audio(sampling_rate=feature_extractor.sampling_rate)
567
+ )
568
+
569
+ # derive max & min input length for sample rate & max duration
570
+ max_input_length = data_args.max_duration_in_seconds * feature_extractor.sampling_rate
571
+ min_input_length = data_args.min_duration_in_seconds * feature_extractor.sampling_rate
572
+ audio_column_name = data_args.audio_column_name
573
+ num_workers = data_args.preprocessing_num_workers
574
+
575
+ # `phoneme_language` is only relevant if the model is fine-tuned on phoneme classification
576
+ phoneme_language = data_args.phoneme_language
577
+
578
+ # Preprocessing the datasets.
579
+ # We need to read the audio files as arrays and tokenize the targets.
580
+ def prepare_dataset(batch):
581
+ # load audio
582
+ sample = batch[audio_column_name]
583
+
584
+ inputs = feature_extractor(sample["array"], sampling_rate=sample["sampling_rate"])
585
+ batch["input_values"] = inputs.input_values[0]
586
+ batch["input_length"] = len(batch["input_values"])
587
+
588
+ # encode targets
589
+ additional_kwargs = {}
590
+ if phoneme_language is not None:
591
+ additional_kwargs["phonemizer_lang"] = phoneme_language
592
+
593
+ batch["labels"] = tokenizer(batch["target_text"], **additional_kwargs).input_ids
594
+ return batch
595
+
596
+ with training_args.main_process_first(desc="dataset map preprocessing"):
597
+ vectorized_datasets = raw_datasets.map(
598
+ prepare_dataset,
599
+ remove_columns=next(iter(raw_datasets.values())).column_names,
600
+ num_proc=num_workers,
601
+ desc="preprocess datasets",
602
+ )
603
+
604
+ def is_audio_in_length_range(length):
605
+ return length > min_input_length and length < max_input_length
606
+
607
+ # filter data that is shorter than min_input_length
608
+ vectorized_datasets = vectorized_datasets.filter(
609
+ is_audio_in_length_range,
610
+ num_proc=num_workers,
611
+ input_columns=["input_length"],
612
+ )
613
+
614
+ # 7. Next, we can prepare the training.
615
+ # Let's use word error rate (WER) as our evaluation metric,
616
+ # instantiate a data collator and the trainer
617
+
618
+ # Define evaluation metrics during training, *i.e.* word error rate, character error rate
619
+ eval_metrics = {metric: load_metric(metric) for metric in data_args.eval_metrics}
620
+
621
+ # for large datasets it is advised to run the preprocessing on a
622
+ # single machine first with ``args.preprocessing_only`` since there will mostly likely
623
+ # be a timeout when running the script in distributed mode.
624
+ # In a second step ``args.preprocessing_only`` can then be set to `False` to load the
625
+ # cached dataset
626
+ if data_args.preprocessing_only:
627
+ logger.info(f"Data preprocessing finished. Files cached at {vectorized_datasets.cache_files}")
628
+ return
629
+
630
+ def compute_metrics(pred):
631
+ pred_logits = pred.predictions
632
+ pred_ids = np.argmax(pred_logits, axis=-1)
633
+
634
+ pred.label_ids[pred.label_ids == -100] = tokenizer.pad_token_id
635
+
636
+ pred_str = tokenizer.batch_decode(pred_ids)
637
+ # we do not want to group tokens when computing the metrics
638
+ label_str = tokenizer.batch_decode(pred.label_ids, group_tokens=False)
639
+
640
+ metrics = {k: v.compute(predictions=pred_str, references=label_str) for k, v in eval_metrics.items()}
641
+
642
+ return metrics
643
+
644
+ # Now save everything to be able to create a single processor later
645
+ if is_main_process(training_args.local_rank):
646
+ # save feature extractor, tokenizer and config
647
+ feature_extractor.save_pretrained(training_args.output_dir)
648
+ tokenizer.save_pretrained(training_args.output_dir)
649
+ config.save_pretrained(training_args.output_dir)
650
+
651
+ try:
652
+ processor = AutoProcessor.from_pretrained(training_args.output_dir)
653
+ except (OSError, KeyError):
654
+ warnings.warn(
655
+ "Loading a processor from a feature extractor config that does not"
656
+ " include a `processor_class` attribute is deprecated and will be removed in v5. Please add the following "
657
+ " attribute to your `preprocessor_config.json` file to suppress this warning: "
658
+ " `'processor_class': 'Wav2Vec2Processor'`",
659
+ FutureWarning,
660
+ )
661
+ processor = Wav2Vec2Processor.from_pretrained(training_args.output_dir)
662
+
663
+ # Instantiate custom data collator
664
+ data_collator = DataCollatorCTCWithPadding(processor=processor)
665
+
666
+ decay_parameters = get_parameter_names(model, [torch.nn.LayerNorm])
667
+ decay_parameters = [name for name in decay_parameters if "bias" not in name]
668
+ optimizer_grouped_parameters = [
669
+ {
670
+ "params": [p for n, p in model.named_parameters() if n in decay_parameters],
671
+ "weight_decay": training_args.weight_decay,
672
+ },
673
+ {
674
+ "params": [p for n, p in model.named_parameters() if n not in decay_parameters],
675
+ "weight_decay": 0.0,
676
+ },
677
+ ]
678
+ optimizer = bnb.optim.Adam8bit(
679
+ params=optimizer_grouped_parameters,
680
+ betas=(training_args.adam_beta1, training_args.adam_beta2),
681
+ eps=training_args.adam_epsilon,
682
+ )
683
+
684
+ optimizers = (optimizer, None)
685
+
686
+ # Initialize Trainer
687
+ trainer = Trainer(
688
+ model=model,
689
+ data_collator=data_collator,
690
+ args=training_args,
691
+ compute_metrics=compute_metrics,
692
+ train_dataset=vectorized_datasets["train"] if training_args.do_train else None,
693
+ eval_dataset=vectorized_datasets["eval"] if training_args.do_eval else None,
694
+ tokenizer=feature_extractor,
695
+ optimizers=optimizers,
696
+ )
697
+
698
+ # 8. Finally, we can start training
699
+
700
+ # Training
701
+ if training_args.do_train:
702
+
703
+ # use last checkpoint if exist
704
+ if last_checkpoint is not None:
705
+ checkpoint = last_checkpoint
706
+ elif os.path.isdir(model_args.model_name_or_path):
707
+ checkpoint = model_args.model_name_or_path
708
+ else:
709
+ checkpoint = None
710
+
711
+ train_result = trainer.train(resume_from_checkpoint=checkpoint)
712
+ trainer.save_model()
713
+
714
+ metrics = train_result.metrics
715
+ max_train_samples = (
716
+ data_args.max_train_samples
717
+ if data_args.max_train_samples is not None
718
+ else len(vectorized_datasets["train"])
719
+ )
720
+ metrics["train_samples"] = min(max_train_samples, len(vectorized_datasets["train"]))
721
+
722
+ trainer.log_metrics("train", metrics)
723
+ trainer.save_metrics("train", metrics)
724
+ trainer.save_state()
725
+
726
+ # Evaluation
727
+ results = {}
728
+ if training_args.do_eval:
729
+ logger.info("*** Evaluate ***")
730
+ metrics = trainer.evaluate()
731
+ max_eval_samples = (
732
+ data_args.max_eval_samples if data_args.max_eval_samples is not None else len(vectorized_datasets["eval"])
733
+ )
734
+ metrics["eval_samples"] = min(max_eval_samples, len(vectorized_datasets["eval"]))
735
+
736
+ trainer.log_metrics("eval", metrics)
737
+ trainer.save_metrics("eval", metrics)
738
+
739
+ # Write model card and (optionally) push to hub
740
+ config_name = data_args.dataset_config_name if data_args.dataset_config_name is not None else "na"
741
+ kwargs = {
742
+ "finetuned_from": model_args.model_name_or_path,
743
+ "tasks": "speech-recognition",
744
+ "tags": ["automatic-speech-recognition", data_args.dataset_name],
745
+ "dataset_args": f"Config: {config_name}, Training split: {data_args.train_split_name}, Eval split: {data_args.eval_split_name}",
746
+ "dataset": f"{data_args.dataset_name.upper()} - {config_name.upper()}",
747
+ }
748
+ if "common_voice" in data_args.dataset_name:
749
+ kwargs["language"] = config_name
750
+
751
+ if training_args.push_to_hub:
752
+ trainer.push_to_hub(**kwargs)
753
+ else:
754
+ trainer.create_model_card(**kwargs)
755
+
756
+ return results
757
+
758
+
759
+ if __name__ == "__main__":
760
+ main()
tokenizer_config.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"unk_token": "[UNK]", "bos_token": "<s>", "eos_token": "</s>", "pad_token": "[PAD]", "do_lower_case": false, "word_delimiter_token": "|", "special_tokens_map_file": null, "tokenizer_file": null, "name_or_path": "./", "tokenizer_class": "Wav2Vec2CTCTokenizer"}
training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c19e96618f39d839667183717b1addf5ff614f17eaf5b2ed4d00159fb275d6fa
3
+ size 3055
vocab.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"a": 1, "b": 2, "c": 3, "d": 4, "e": 5, "f": 6, "g": 7, "h": 8, "i": 9, "j": 10, "k": 11, "l": 12, "m": 13, "n": 14, "o": 15, "p": 16, "q": 17, "r": 18, "s": 19, "t": 20, "u": 21, "v": 22, "w": 23, "x": 24, "y": 25, "z": 26, "í": 27, "ñ": 28, "|": 0, "[UNK]": 29, "[PAD]": 30}
wandb/debug-internal.log ADDED
@@ -0,0 +1 @@
 
 
1
+ run-20220205_233515-2f29fa6z/logs/debug-internal.log
wandb/debug.log ADDED
@@ -0,0 +1 @@
 
 
1
+ run-20220205_233515-2f29fa6z/logs/debug.log
wandb/latest-run ADDED
@@ -0,0 +1 @@
 
 
1
+ run-20220205_233515-2f29fa6z
wandb/run-20220205_233515-2f29fa6z/files/config.yaml ADDED
The diff for this file is too large to render. See raw diff
 
wandb/run-20220205_233515-2f29fa6z/files/output.log ADDED
@@ -0,0 +1,602 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+
3
+
4
+
5
+
6
+
7
+
8
+
9
+
10
+
11
+
12
+
13
+
14
+
15
+
16
+
17
+
18
+
19
+
20
+
21
+
22
+
23
+
24
+
25
+
26
+
27
+
28
+
29
+
30
+
31
+
32
+
33
+
34
+
35
+
36
+
37
+
38
+
39
+
40
+
41
+
42
+
43
+
44
+
45
+
46
+
47
+
48
+
49
+
50
+
51
+
52
+
53
+
54
+
55
+
56
+
57
+
58
+
59
+
60
+
61
+
62
+
63
+
64
+
65
+
66
+
67
+
68
+
69
+
70
+
71
+
72
+
73
+
74
+
75
+
76
+
77
+
78
+
79
+
80
+
81
+
82
+
83
+
84
+
85
+
86
+
87
+
88
+
89
+
90
+
91
+
92
+
93
+
94
+
95
+
96
+
97
+
98
+
99
+
100
+
101
+
102
+ 1%|▌ | 100/11800 [12:45<25:55:55, 7.98s/it]
103
+
104
+
105
+
106
+
107
+
108
+
109
+
110
+
111
+
112
+
113
+
114
+
115
+
116
+
117
+
118
+
119
+
120
+
121
+
122
+
123
+
124
+
125
+
126
+
127
+
128
+
129
+
130
+
131
+
132
+
133
+
134
+
135
+
136
+
137
+
138
+
139
+
140
+
141
+
142
+
143
+
144
+
145
+
146
+
147
+
148
+
149
+
150
+
151
+
152
+
153
+
154
+
155
+
156
+
157
+
158
+
159
+
160
+
161
+
162
+
163
+
164
+
165
+
166
+
167
+
168
+
169
+
170
+
171
+
172
+
173
+
174
+
175
+
176
+
177
+
178
+
179
+
180
+
181
+
182
+
183
+
184
+
185
+
186
+
187
+
188
+
189
+
190
+
191
+
192
+
193
+
194
+
195
+
196
+
197
+
198
+
199
+
200
+
201
+
202
+ 2%|█ | 199/11800 [24:54<20:08:45, 6.25s/it]
203
+
204
+
205
+
206
+
207
+
208
+
209
+
210
+
211
+
212
+
213
+
214
+
215
+
216
+
217
+
218
+
219
+
220
+
221
+
222
+
223
+
224
+
225
+
226
+
227
+
228
+
229
+
230
+
231
+
232
+
233
+
234
+
235
+
236
+
237
+
238
+
239
+
240
+
241
+
242
+
243
+
244
+
245
+
246
+
247
+
248
+
249
+
250
+
251
+
252
+
253
+
254
+
255
+
256
+
257
+
258
+
259
+
260
+
261
+
262
+
263
+
264
+
265
+
266
+
267
+
268
+
269
+
270
+
271
+
272
+
273
+
274
+
275
+
276
+
277
+
278
+
279
+
280
+
281
+
282
+
283
+
284
+
285
+
286
+
287
+
288
+
289
+
290
+
291
+
292
+
293
+
294
+
295
+
296
+
297
+
298
+
299
+
300
+
301
+
302
+
303
+ 3%|█▌ | 299/11800 [37:18<29:05:53, 9.11s/it]
304
+
305
+
306
+
307
+
308
+
309
+
310
+
311
+
312
+
313
+
314
+
315
+
316
+
317
+
318
+
319
+
320
+
321
+
322
+
323
+
324
+
325
+
326
+
327
+
328
+
329
+
330
+
331
+
332
+
333
+
334
+
335
+
336
+
337
+
338
+
339
+
340
+
341
+
342
+
343
+
344
+
345
+
346
+
347
+
348
+
349
+
350
+
351
+
352
+
353
+
354
+
355
+
356
+
357
+
358
+
359
+
360
+
361
+
362
+
363
+
364
+
365
+
366
+
367
+
368
+
369
+
370
+
371
+
372
+
373
+
374
+
375
+
376
+
377
+
378
+
379
+
380
+
381
+
382
+
383
+
384
+
385
+
386
+
387
+
388
+
389
+
390
+
391
+
392
+
393
+
394
+
395
+
396
+
397
+
398
+
399
+
400
+
401
+
402
+
403
+
404
+
405
+ 3%|██ | 400/11800 [49:53<22:55:28, 7.24s/it]
406
+
407
+
408
+
409
+
410
+
411
+
412
+
413
+
414
+
415
+
416
+
417
+
418
+
419
+
420
+
421
+
422
+
423
+
424
+
425
+
426
+
427
+
428
+
429
+
430
+
431
+
432
+
433
+
434
+
435
+
436
+
437
+
438
+
439
+
440
+
441
+
442
+
443
+
444
+
445
+
446
+
447
+
448
+
449
+
450
+
451
+
452
+
453
+
454
+
455
+
456
+
457
+
458
+
459
+
460
+
461
+
462
+
463
+
464
+
465
+
466
+
467
+
468
+
469
+
470
+
471
+
472
+
473
+
474
+
475
+
476
+
477
+
478
+
479
+
480
+
481
+
482
+
483
+
484
+
485
+
486
+
487
+
488
+
489
+
490
+
491
+
492
+
493
+
494
+
495
+
496
+
497
+
498
+
499
+
500
+
501
+
502
+
503
+
504
+
505
+ 4%|██▍ | 499/11800 [1:01:50<17:39:50, 5.63s/it]
506
+ 4%|██▍ | 500/11800 [1:01:55<16:52:37, 5.38s/it]The following columns in the evaluation set don't have a corresponding argument in `Wav2Vec2ForCTC.forward` and have been ignored: input_length.
507
+ ***** Running Evaluation *****
508
+ Num examples = 6463
509
+ Batch size = 72
510
+
511
+
512
+
513
+
514
+
515
+
516
+
517
+
518
+
519
+
520
+
521
+
522
+
523
+
524
+
525
+
526
+
527
+
528
+
529
+
530
+
531
+
532
+
533
+
534
+
535
+
536
+
537
+
538
+
539
+
540
+
541
+
542
+
543
+
544
+
545
+
546
+
547
+
548
+
549
+
550
+
551
+
552
+
553
+
554
+
555
+
556
+
557
+
558
+
559
+
560
+
561
+
562
+
563
+
564
+
565
+
566
+
567
+
568
+
569
+
570
+
571
+
572
+
573
+
574
+
575
+
576
+
577
+
578
+
579
+
580
+
581
+
582
+
583
+
584
+
585
+
586
+
587
+
588
+
589
+
590
+
591
+
592
+
593
+
594
+
595
+
596
+
597
+
598
+
599
+ Configuration saved in ./checkpoint-500/config.json
600
+ Model weights saved in ./checkpoint-500/pytorch_model.bin
601
+ Configuration saved in ./checkpoint-500/preprocessor_config.json
602
+ {'eval_loss': 0.2469930201768875, 'eval_wer': 0.36629738582545746, 'eval_runtime': 294.3209, 'eval_samples_per_second': 21.959, 'eval_steps_per_second': 0.306, 'epoch': 4.24}
wandb/run-20220205_233515-2f29fa6z/files/requirements.txt ADDED
@@ -0,0 +1,87 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ aiohttp==3.8.1
2
+ aiosignal==1.2.0
3
+ appdirs==1.4.4
4
+ async-timeout==4.0.2
5
+ attrs==21.4.0
6
+ audioread==2.1.9
7
+ bitsandbytes-cuda113==0.26.0
8
+ certifi==2021.10.8
9
+ cffi==1.15.0
10
+ charset-normalizer==2.0.10
11
+ click==8.0.3
12
+ clldutils==3.10.1
13
+ colorlog==6.6.0
14
+ configparser==5.2.0
15
+ csvw==1.11.0
16
+ datasets==1.18.4.dev0
17
+ decorator==5.1.1
18
+ dill==0.3.4
19
+ dlinfo==1.2.1
20
+ docker-pycreds==0.4.0
21
+ filelock==3.4.2
22
+ frozenlist==1.3.0
23
+ fsspec==2022.1.0
24
+ gitdb==4.0.9
25
+ gitpython==3.1.26
26
+ huggingface-hub==0.4.0
27
+ hypothesis==6.36.0
28
+ idna==3.3
29
+ isodate==0.6.1
30
+ jiwer==2.3.0
31
+ joblib==1.1.0
32
+ librosa==0.8.1
33
+ llvmlite==0.38.0
34
+ multidict==6.0.2
35
+ multiprocess==0.70.12.2
36
+ numba==0.55.0
37
+ numpy==1.21.5
38
+ packaging==21.3
39
+ pandas==1.4.0
40
+ pathtools==0.1.2
41
+ phonemizer==3.0.1
42
+ pip==21.3.1
43
+ pooch==1.6.0
44
+ promise==2.3
45
+ protobuf==3.19.3
46
+ psutil==5.9.0
47
+ pyarrow==6.0.1
48
+ pycparser==2.21
49
+ pyctcdecode==0.3.0
50
+ pygtrie==2.4.2
51
+ pyparsing==3.0.7
52
+ python-dateutil==2.8.2
53
+ python-levenshtein==0.12.2
54
+ pytz==2021.3
55
+ pyyaml==6.0
56
+ regex==2022.1.18
57
+ requests==2.27.1
58
+ resampy==0.2.2
59
+ rfc3986==2.0.0
60
+ sacremoses==0.0.47
61
+ scikit-learn==1.0.2
62
+ scipy==1.7.3
63
+ segments==2.2.0
64
+ sentry-sdk==1.5.4
65
+ setuptools==60.2.0
66
+ shortuuid==1.0.8
67
+ six==1.16.0
68
+ smmap==5.0.0
69
+ sortedcontainers==2.4.0
70
+ soundfile==0.10.3.post1
71
+ subprocess32==3.5.4
72
+ tabulate==0.8.9
73
+ termcolor==1.1.0
74
+ threadpoolctl==3.0.0
75
+ tokenizers==0.11.4
76
+ torch==1.10.1
77
+ torchaudio==0.10.1
78
+ tqdm==4.62.3
79
+ transformers==4.16.0.dev0
80
+ typing-extensions==4.0.1
81
+ uritemplate==4.1.1
82
+ urllib3==1.26.8
83
+ wandb==0.12.9
84
+ wheel==0.37.1
85
+ xxhash==2.0.2
86
+ yarl==1.7.2
87
+ yaspin==2.1.0
wandb/run-20220205_233515-2f29fa6z/files/wandb-metadata.json ADDED
@@ -0,0 +1,76 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "os": "Linux-4.18.0-305.10.2.el8_4.x86_64-x86_64-with-glibc2.28",
3
+ "python": "3.9.6",
4
+ "heartbeatAt": "2022-02-06T04:35:16.820568",
5
+ "startedAt": "2022-02-06T04:35:15.779968",
6
+ "docker": null,
7
+ "gpu": "Tesla V100-PCIE-32GB",
8
+ "gpu_count": 3,
9
+ "cpu_count": 64,
10
+ "cuda": null,
11
+ "args": [
12
+ "--dataset_name=mozilla-foundation/common_voice_8_0",
13
+ "--model_name_or_path=facebook/wav2vec2-xls-r-300m",
14
+ "--dataset_config_name=eu",
15
+ "--output_dir=./",
16
+ "--overwrite_output_dir",
17
+ "--num_train_epochs=100",
18
+ "--per_device_train_batch_size=72",
19
+ "--per_device_eval_batch_size=72",
20
+ "--gradient_accumulation_steps=2",
21
+ "--learning_rate=3e-4",
22
+ "--save_total_limit=1",
23
+ "--warmup_steps=500",
24
+ "--evaluation_strategy=steps",
25
+ "--text_column_name=sentence",
26
+ "--length_column_name=input_length",
27
+ "--save_steps=500",
28
+ "--eval_steps=500",
29
+ "--logging_steps=100",
30
+ "--layerdrop=0.0",
31
+ "--freeze_feature_encoder",
32
+ "--feat_proj_dropout=0.1",
33
+ "--chars_to_ignore",
34
+ ",",
35
+ "?",
36
+ ".",
37
+ "!",
38
+ "-",
39
+ ";",
40
+ ":",
41
+ "\"",
42
+ "\u201c",
43
+ "%",
44
+ "\u2018",
45
+ "\u201d",
46
+ "\ufffd",
47
+ "\u2014",
48
+ "\u2019",
49
+ "\u2026",
50
+ "\u2013",
51
+ "--gradient_checkpointing",
52
+ "--lr_scheduler_type=cosine",
53
+ "--fp16",
54
+ "--group_by_length",
55
+ "--mask_time_prob=0.1",
56
+ "--mask_time_length=10",
57
+ "--report_to=wandb",
58
+ "--run_name=cosine+drop_proj+low_specaugment-300M+cv_8_0",
59
+ "--do_train",
60
+ "--do_eval",
61
+ "--use_auth_token",
62
+ "--push_to_hub"
63
+ ],
64
+ "state": "running",
65
+ "program": "/home/sagrilaft/Project/audio/xls-r-eus/src/run_speech_recognition_ctc_bnb.py",
66
+ "codePath": "src/run_speech_recognition_ctc_bnb.py",
67
+ "git": {
68
+ "remote": "https://huggingface.co/shpotes/xls-r-eus",
69
+ "commit": "a9c6c150723de524a89bd71107dad862f6bd86e5"
70
+ },
71
+ "email": "shpotes3@gmail.com",
72
+ "root": "/home/sagrilaft/Project/audio/xls-r-eus",
73
+ "host": "ganymede.eafit.edu.co",
74
+ "username": "sagrilaft",
75
+ "executable": "/home/sagrilaft/Project/audio/xls-r-et/.venv/bin/python"
76
+ }
wandb/run-20220205_233515-2f29fa6z/files/wandb-summary.json ADDED
The diff for this file is too large to render. See raw diff
 
wandb/run-20220205_233515-2f29fa6z/logs/debug-internal.log ADDED
The diff for this file is too large to render. See raw diff
 
wandb/run-20220205_233515-2f29fa6z/logs/debug.log ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ 2022-02-05 23:35:15,783 INFO MainThread:3431832 [wandb_setup.py:_flush():71] setting env: {'project': 'xls-r-basque'}
2
+ 2022-02-05 23:35:15,783 INFO MainThread:3431832 [wandb_setup.py:_flush():71] setting login settings: {}
3
+ 2022-02-05 23:35:15,783 INFO MainThread:3431832 [wandb_init.py:_log_setup():371] Logging user logs to /home/sagrilaft/Project/audio/xls-r-eus/wandb/run-20220205_233515-2f29fa6z/logs/debug.log
4
+ 2022-02-05 23:35:15,783 INFO MainThread:3431832 [wandb_init.py:_log_setup():372] Logging internal logs to /home/sagrilaft/Project/audio/xls-r-eus/wandb/run-20220205_233515-2f29fa6z/logs/debug-internal.log
5
+ 2022-02-05 23:35:15,784 INFO MainThread:3431832 [wandb_init.py:init():404] calling init triggers
6
+ 2022-02-05 23:35:15,784 INFO MainThread:3431832 [wandb_init.py:init():409] wandb.init called with sweep_config: {}
7
+ config: {}
8
+ 2022-02-05 23:35:15,784 INFO MainThread:3431832 [wandb_init.py:init():460] starting backend
9
+ 2022-02-05 23:35:15,784 INFO MainThread:3431832 [backend.py:_multiprocessing_setup():99] multiprocessing start_methods=fork,spawn,forkserver, using: spawn
10
+ 2022-02-05 23:35:15,810 INFO MainThread:3431832 [backend.py:ensure_launched():216] starting backend process...
11
+ 2022-02-05 23:35:15,832 INFO MainThread:3431832 [backend.py:ensure_launched():221] started backend process with pid: 3433017
12
+ 2022-02-05 23:35:15,834 INFO MainThread:3431832 [wandb_init.py:init():469] backend started and connected
13
+ 2022-02-05 23:35:15,839 INFO MainThread:3431832 [wandb_init.py:init():533] updated telemetry
14
+ 2022-02-05 23:35:15,895 INFO MainThread:3431832 [wandb_init.py:init():563] communicating current version
15
+ 2022-02-05 23:35:16,539 INFO MainThread:3431832 [wandb_init.py:init():568] got version response upgrade_message: "wandb version 0.12.10 is available! To upgrade, please run:\n $ pip install wandb --upgrade"
16
+
17
+ 2022-02-05 23:35:16,539 INFO MainThread:3431832 [wandb_init.py:init():578] communicating run to backend with 30 second timeout
18
+ 2022-02-05 23:35:16,813 INFO MainThread:3431832 [wandb_init.py:init():606] starting run threads in backend
19
+ 2022-02-05 23:35:16,859 INFO MainThread:3431832 [wandb_run.py:_console_start():1810] atexit reg
20
+ 2022-02-05 23:35:16,861 INFO MainThread:3431832 [wandb_run.py:_redirect():1684] redirect: SettingsConsole.REDIRECT
21
+ 2022-02-05 23:35:16,861 INFO MainThread:3431832 [wandb_run.py:_redirect():1689] Redirecting console.
22
+ 2022-02-05 23:35:16,864 INFO MainThread:3431832 [wandb_run.py:_redirect():1745] Redirects installed.
23
+ 2022-02-05 23:35:16,865 INFO MainThread:3431832 [wandb_init.py:init():633] run started, returning control to user process
24
+ 2022-02-05 23:35:16,882 INFO MainThread:3431832 [wandb_run.py:_config_callback():956] config_cb None None {'return_dict': True, 'output_hidden_states': False, 'output_attentions': False, 'torchscript': False, 'torch_dtype': 'float32', 'use_bfloat16': False, 'pruned_heads': {}, 'tie_word_embeddings': True, 'is_encoder_decoder': False, 'is_decoder': False, 'cross_attention_hidden_size': None, 'add_cross_attention': False, 'tie_encoder_decoder': False, 'max_length': 20, 'min_length': 0, 'do_sample': False, 'early_stopping': False, 'num_beams': 1, 'num_beam_groups': 1, 'diversity_penalty': 0.0, 'temperature': 1.0, 'top_k': 50, 'top_p': 1.0, 'repetition_penalty': 1.0, 'length_penalty': 1.0, 'no_repeat_ngram_size': 0, 'encoder_no_repeat_ngram_size': 0, 'bad_words_ids': None, 'num_return_sequences': 1, 'chunk_size_feed_forward': 0, 'output_scores': False, 'return_dict_in_generate': False, 'forced_bos_token_id': None, 'forced_eos_token_id': None, 'remove_invalid_values': False, 'architectures': ['Wav2Vec2ForPreTraining'], 'finetuning_task': None, 'id2label': {0: 'LABEL_0', 1: 'LABEL_1'}, 'label2id': {'LABEL_0': 0, 'LABEL_1': 1}, 'tokenizer_class': None, 'prefix': None, 'bos_token_id': 1, 'pad_token_id': 30, 'eos_token_id': 2, 'sep_token_id': None, 'decoder_start_token_id': None, 'task_specific_params': None, 'problem_type': None, '_name_or_path': 'facebook/wav2vec2-xls-r-300m', 'transformers_version': '4.16.0.dev0', 'feat_extract_dropout': 0.0, 'model_type': 'wav2vec2', 'num_feat_extract_layers': 7, 'hidden_size': 1024, 'feat_extract_norm': 'layer', 'feat_extract_activation': 'gelu', 'conv_dim': [512, 512, 512, 512, 512, 512, 512], 'conv_stride': [5, 2, 2, 2, 2, 2, 2], 'conv_kernel': [10, 3, 3, 3, 3, 2, 2], 'conv_bias': True, 'num_conv_pos_embeddings': 128, 'num_conv_pos_embedding_groups': 16, 'num_hidden_layers': 24, 'intermediate_size': 4096, 'hidden_act': 'gelu', 'num_attention_heads': 16, 'hidden_dropout': 0.0, 'attention_dropout': 0.0, 'activation_dropout': 0.0, 'feat_proj_dropout': 0.1, 'final_dropout': 0.0, 'layerdrop': 0.0, 'layer_norm_eps': 1e-05, 'initializer_range': 0.02, 'vocab_size': 33, 'do_stable_layer_norm': True, 'use_weighted_layer_sum': False, 'apply_spec_augment': True, 'mask_time_prob': 0.1, 'mask_time_length': 10, 'mask_time_min_masks': 2, 'mask_feature_prob': 0.0, 'mask_feature_length': 10, 'mask_feature_min_masks': 0, 'num_codevectors_per_group': 320, 'num_codevector_groups': 2, 'contrastive_logits_temperature': 0.1, 'feat_quantizer_dropout': 0.0, 'num_negatives': 100, 'codevector_dim': 768, 'proj_codevector_dim': 768, 'diversity_loss_weight': 0.1, 'ctc_loss_reduction': 'mean', 'ctc_zero_infinity': False, 'add_adapter': False, 'adapter_kernel_size': 3, 'adapter_stride': 2, 'num_adapter_layers': 3, 'output_hidden_size': 1024, 'classifier_proj_size': 256, 'tdnn_dim': [512, 512, 512, 512, 1500], 'tdnn_kernel': [5, 3, 3, 1, 1], 'tdnn_dilation': [1, 2, 3, 1, 1], 'xvector_output_dim': 512, 'output_dir': './', 'overwrite_output_dir': True, 'do_train': True, 'do_eval': True, 'do_predict': False, 'evaluation_strategy': 'steps', 'prediction_loss_only': False, 'per_device_train_batch_size': 72, 'per_device_eval_batch_size': 72, 'per_gpu_train_batch_size': 'None', 'per_gpu_eval_batch_size': 'None', 'gradient_accumulation_steps': 2, 'eval_accumulation_steps': 'None', 'learning_rate': 0.0003, 'weight_decay': 0.0, 'adam_beta1': 0.9, 'adam_beta2': 0.999, 'adam_epsilon': 1e-08, 'max_grad_norm': 1.0, 'num_train_epochs': 100.0, 'max_steps': -1, 'lr_scheduler_type': 'cosine', 'warmup_ratio': 0.0, 'warmup_steps': 500, 'log_level': -1, 'log_level_replica': -1, 'log_on_each_node': True, 'logging_dir': './runs/Feb05_23-23-24_ganymede.eafit.edu.co', 'logging_strategy': 'steps', 'logging_first_step': False, 'logging_steps': 100, 'logging_nan_inf_filter': True, 'save_strategy': 'steps', 'save_steps': 500, 'save_total_limit': 1, 'save_on_each_node': False, 'no_cuda': False, 'seed': 42, 'bf16': False, 'fp16': True, 'fp16_opt_level': 'O1', 'half_precision_backend': 'amp', 'bf16_full_eval': False, 'fp16_full_eval': False, 'tf32': 'None', 'local_rank': -1, 'xpu_backend': 'None', 'tpu_num_cores': 'None', 'tpu_metrics_debug': False, 'debug': '[]', 'dataloader_drop_last': False, 'eval_steps': 500, 'dataloader_num_workers': 0, 'past_index': -1, 'run_name': 'cosine+drop_proj+low_specaugment-300M+cv_8_0', 'disable_tqdm': False, 'remove_unused_columns': True, 'label_names': 'None', 'load_best_model_at_end': False, 'metric_for_best_model': 'None', 'greater_is_better': 'None', 'ignore_data_skip': False, 'sharded_ddp': '[]', 'deepspeed': 'None', 'label_smoothing_factor': 0.0, 'optim': 'adamw_hf', 'adafactor': False, 'group_by_length': True, 'length_column_name': 'input_length', 'report_to': "['wandb']", 'ddp_find_unused_parameters': 'None', 'ddp_bucket_cap_mb': 'None', 'dataloader_pin_memory': True, 'skip_memory_metrics': True, 'use_legacy_prediction_loop': False, 'push_to_hub': True, 'resume_from_checkpoint': 'None', 'hub_model_id': 'None', 'hub_strategy': 'every_save', 'hub_token': '<HUB_TOKEN>', 'gradient_checkpointing': True, 'fp16_backend': 'auto', 'push_to_hub_model_id': 'None', 'push_to_hub_organization': 'None', 'push_to_hub_token': '<PUSH_TO_HUB_TOKEN>', '_n_gpu': 1, 'mp_parameters': '', 'train_batch_size': 72, 'eval_batch_size': 72}
25
+ 2022-02-05 23:35:16,885 INFO MainThread:3431832 [wandb_watch.py:watch():43] Watching
wandb/run-20220205_233515-2f29fa6z/run-2f29fa6z.wandb ADDED
Binary file (3.55 MB). View file