Rolv-Arild commited on
Commit
3308cff
1 Parent(s): 1d10b4b

Training in progress, step 400

Browse files
.gitignore ADDED
@@ -0,0 +1 @@
 
1
+ checkpoint-*/
config.json ADDED
@@ -0,0 +1,282 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "./",
3
+ "architectures": [
4
+ "SpeechEncoderDecoderModel"
5
+ ],
6
+ "decoder": {
7
+ "_name_or_path": "facebook/bart-large",
8
+ "activation_dropout": 0.1,
9
+ "activation_function": "gelu",
10
+ "add_bias_logits": false,
11
+ "add_cross_attention": true,
12
+ "add_final_layer_norm": false,
13
+ "architectures": [
14
+ "BartModel"
15
+ ],
16
+ "attention_dropout": 0.1,
17
+ "bad_words_ids": null,
18
+ "bos_token_id": 0,
19
+ "chunk_size_feed_forward": 0,
20
+ "classif_dropout": 0.1,
21
+ "classifier_dropout": 0.0,
22
+ "cross_attention_hidden_size": null,
23
+ "d_model": 1024,
24
+ "decoder_attention_heads": 16,
25
+ "decoder_ffn_dim": 4096,
26
+ "decoder_layerdrop": 0.0,
27
+ "decoder_layers": 12,
28
+ "decoder_start_token_id": 2,
29
+ "diversity_penalty": 0.0,
30
+ "do_sample": false,
31
+ "dropout": 0.1,
32
+ "early_stopping": true,
33
+ "encoder_attention_heads": 16,
34
+ "encoder_ffn_dim": 4096,
35
+ "encoder_layerdrop": 0.0,
36
+ "encoder_layers": 12,
37
+ "encoder_no_repeat_ngram_size": 0,
38
+ "eos_token_id": 2,
39
+ "finetuning_task": null,
40
+ "forced_bos_token_id": null,
41
+ "forced_eos_token_id": 2,
42
+ "gradient_checkpointing": false,
43
+ "id2label": {
44
+ "0": "LABEL_0",
45
+ "1": "LABEL_1",
46
+ "2": "LABEL_2"
47
+ },
48
+ "init_std": 0.02,
49
+ "is_decoder": true,
50
+ "is_encoder_decoder": false,
51
+ "label2id": {
52
+ "LABEL_0": 0,
53
+ "LABEL_1": 1,
54
+ "LABEL_2": 2
55
+ },
56
+ "length_penalty": 1.0,
57
+ "max_length": 20,
58
+ "max_position_embeddings": 1024,
59
+ "min_length": 0,
60
+ "model_type": "bart",
61
+ "no_repeat_ngram_size": 3,
62
+ "normalize_before": false,
63
+ "num_beam_groups": 1,
64
+ "num_beams": 4,
65
+ "num_hidden_layers": 12,
66
+ "num_return_sequences": 1,
67
+ "output_attentions": false,
68
+ "output_hidden_states": false,
69
+ "output_scores": false,
70
+ "pad_token_id": 1,
71
+ "prefix": null,
72
+ "problem_type": null,
73
+ "pruned_heads": {},
74
+ "remove_invalid_values": false,
75
+ "repetition_penalty": 1.0,
76
+ "return_dict": true,
77
+ "return_dict_in_generate": false,
78
+ "scale_embedding": false,
79
+ "sep_token_id": null,
80
+ "task_specific_params": {
81
+ "summarization": {
82
+ "length_penalty": 1.0,
83
+ "max_length": 128,
84
+ "min_length": 12,
85
+ "num_beams": 4
86
+ },
87
+ "summarization_cnn": {
88
+ "length_penalty": 2.0,
89
+ "max_length": 142,
90
+ "min_length": 56,
91
+ "num_beams": 4
92
+ },
93
+ "summarization_xsum": {
94
+ "length_penalty": 1.0,
95
+ "max_length": 62,
96
+ "min_length": 11,
97
+ "num_beams": 6
98
+ }
99
+ },
100
+ "temperature": 1.0,
101
+ "tie_encoder_decoder": false,
102
+ "tie_word_embeddings": true,
103
+ "tokenizer_class": null,
104
+ "top_k": 50,
105
+ "top_p": 1.0,
106
+ "torch_dtype": null,
107
+ "torchscript": false,
108
+ "transformers_version": "4.17.0.dev0",
109
+ "use_bfloat16": false,
110
+ "use_cache": true,
111
+ "vocab_size": 50265
112
+ },
113
+ "decoder_start_token_id": 0,
114
+ "encoder": {
115
+ "_name_or_path": "facebook/wav2vec2-xls-r-300m",
116
+ "activation_dropout": 0.0,
117
+ "adapter_kernel_size": 3,
118
+ "adapter_stride": 2,
119
+ "add_adapter": true,
120
+ "add_cross_attention": false,
121
+ "apply_spec_augment": true,
122
+ "architectures": [
123
+ "Wav2Vec2ForPreTraining"
124
+ ],
125
+ "attention_dropout": 0.1,
126
+ "bad_words_ids": null,
127
+ "bos_token_id": 1,
128
+ "chunk_size_feed_forward": 0,
129
+ "classifier_proj_size": 256,
130
+ "codevector_dim": 768,
131
+ "contrastive_logits_temperature": 0.1,
132
+ "conv_bias": true,
133
+ "conv_dim": [
134
+ 512,
135
+ 512,
136
+ 512,
137
+ 512,
138
+ 512,
139
+ 512,
140
+ 512
141
+ ],
142
+ "conv_kernel": [
143
+ 10,
144
+ 3,
145
+ 3,
146
+ 3,
147
+ 3,
148
+ 2,
149
+ 2
150
+ ],
151
+ "conv_stride": [
152
+ 5,
153
+ 2,
154
+ 2,
155
+ 2,
156
+ 2,
157
+ 2,
158
+ 2
159
+ ],
160
+ "cross_attention_hidden_size": null,
161
+ "ctc_loss_reduction": "sum",
162
+ "ctc_zero_infinity": false,
163
+ "decoder_start_token_id": null,
164
+ "diversity_loss_weight": 0.1,
165
+ "diversity_penalty": 0.0,
166
+ "do_sample": false,
167
+ "do_stable_layer_norm": true,
168
+ "early_stopping": false,
169
+ "encoder_no_repeat_ngram_size": 0,
170
+ "eos_token_id": 2,
171
+ "feat_extract_activation": "gelu",
172
+ "feat_extract_dropout": 0.0,
173
+ "feat_extract_norm": "layer",
174
+ "feat_proj_dropout": 0.0,
175
+ "feat_quantizer_dropout": 0.0,
176
+ "final_dropout": 0.0,
177
+ "finetuning_task": null,
178
+ "forced_bos_token_id": null,
179
+ "forced_eos_token_id": null,
180
+ "gradient_checkpointing": false,
181
+ "hidden_act": "gelu",
182
+ "hidden_dropout": 0.1,
183
+ "hidden_size": 1024,
184
+ "id2label": {
185
+ "0": "LABEL_0",
186
+ "1": "LABEL_1"
187
+ },
188
+ "initializer_range": 0.02,
189
+ "intermediate_size": 4096,
190
+ "is_decoder": false,
191
+ "is_encoder_decoder": false,
192
+ "label2id": {
193
+ "LABEL_0": 0,
194
+ "LABEL_1": 1
195
+ },
196
+ "layer_norm_eps": 1e-05,
197
+ "layerdrop": 0.0,
198
+ "length_penalty": 1.0,
199
+ "mask_feature_length": 10,
200
+ "mask_feature_min_masks": 0,
201
+ "mask_feature_prob": 0.0,
202
+ "mask_time_length": 10,
203
+ "mask_time_min_masks": 2,
204
+ "mask_time_prob": 0.1,
205
+ "max_length": 20,
206
+ "min_length": 0,
207
+ "model_type": "wav2vec2",
208
+ "no_repeat_ngram_size": 0,
209
+ "num_adapter_layers": 3,
210
+ "num_attention_heads": 16,
211
+ "num_beam_groups": 1,
212
+ "num_beams": 1,
213
+ "num_codevector_groups": 2,
214
+ "num_codevectors_per_group": 320,
215
+ "num_conv_pos_embedding_groups": 16,
216
+ "num_conv_pos_embeddings": 128,
217
+ "num_feat_extract_layers": 7,
218
+ "num_hidden_layers": 24,
219
+ "num_negatives": 100,
220
+ "num_return_sequences": 1,
221
+ "output_attentions": false,
222
+ "output_hidden_size": 1024,
223
+ "output_hidden_states": false,
224
+ "output_scores": false,
225
+ "pad_token_id": 0,
226
+ "prefix": null,
227
+ "problem_type": null,
228
+ "proj_codevector_dim": 768,
229
+ "pruned_heads": {},
230
+ "remove_invalid_values": false,
231
+ "repetition_penalty": 1.0,
232
+ "return_dict": true,
233
+ "return_dict_in_generate": false,
234
+ "sep_token_id": null,
235
+ "task_specific_params": null,
236
+ "tdnn_dilation": [
237
+ 1,
238
+ 2,
239
+ 3,
240
+ 1,
241
+ 1
242
+ ],
243
+ "tdnn_dim": [
244
+ 512,
245
+ 512,
246
+ 512,
247
+ 512,
248
+ 1500
249
+ ],
250
+ "tdnn_kernel": [
251
+ 5,
252
+ 3,
253
+ 3,
254
+ 1,
255
+ 1
256
+ ],
257
+ "temperature": 1.0,
258
+ "tie_encoder_decoder": false,
259
+ "tie_word_embeddings": true,
260
+ "tokenizer_class": null,
261
+ "top_k": 50,
262
+ "top_p": 1.0,
263
+ "torch_dtype": "float32",
264
+ "torchscript": false,
265
+ "transformers_version": "4.17.0.dev0",
266
+ "use_bfloat16": false,
267
+ "use_weighted_layer_sum": false,
268
+ "vocab_size": 32,
269
+ "xvector_output_dim": 512
270
+ },
271
+ "eos_token_id": 2,
272
+ "is_encoder_decoder": true,
273
+ "max_length": 200,
274
+ "model_type": "speech-encoder-decoder",
275
+ "num_beams": 5,
276
+ "pad_token_id": 1,
277
+ "processor_class": "Wav2Vec2Processor",
278
+ "tie_word_embeddings": false,
279
+ "torch_dtype": "float32",
280
+ "transformers_version": null,
281
+ "use_cache": false
282
+ }
create_model.py ADDED
@@ -0,0 +1,30 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python3
2
+ from transformers import SpeechEncoderDecoderModel, AutoFeatureExtractor, AutoTokenizer
3
+ import torch
4
+
5
+
6
+ encoder_id = "facebook/wav2vec2-xls-r-300m"
7
+ decoder_id = "facebook/bart-large"
8
+
9
+ model = SpeechEncoderDecoderModel.from_encoder_decoder_pretrained(encoder_id, decoder_id, encoder_add_adapter=True)
10
+ model.config.encoder.feat_proj_dropout = 0.0
11
+ model.config.encoder.final_dropout = 0.0
12
+ model.config.encoder.mask_time_prob = 0.1
13
+ model.config.decoder_start_token_id = model.decoder.config.bos_token_id
14
+ model.config.pad_token_id = model.decoder.config.pad_token_id
15
+ model.config.eos_token_id = model.decoder.config.eos_token_id
16
+ model.config.max_length = 200
17
+ model.config.num_beams = 5
18
+ model.config.encoder.layerdrop = 0.0
19
+ model.config.use_cache = False
20
+ model.config.processor_class = "Wav2Vec2Processor"
21
+
22
+ # check if generation works
23
+ out = model.generate(torch.ones((1, 2000)))
24
+
25
+ model.save_pretrained("./")
26
+
27
+ feature_etxractor = AutoFeatureExtractor.from_pretrained(encoder_id)
28
+ feature_etxractor.save_pretrained("./")
29
+ tokenizer = AutoTokenizer.from_pretrained(decoder_id)
30
+ tokenizer.save_pretrained("./")
merges.txt ADDED
The diff for this file is too large to render. See raw diff
preprocessor_config.json ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "do_normalize": true,
3
+ "feature_extractor_type": "Wav2Vec2FeatureExtractor",
4
+ "feature_size": 1,
5
+ "padding_side": "right",
6
+ "padding_value": 0,
7
+ "return_attention_mask": true,
8
+ "sampling_rate": 16000
9
+ }
pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:01ea452512c687c6403be2748f21bf6929c49faa8b618b28e9766e33158e2c18
3
+ size 2353905713
run.sh ADDED
@@ -0,0 +1,32 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ python run_speech_recognition_seq2seq.py \
2
+ --dataset_name="NbAiLab/NPSC" \
3
+ --dataset_config_name="16K_mp3" \
4
+ --data_cache_dir="/mnt/lv_ai_1_ficino/rolvb/cache" \
5
+ --model_name_or_path="./" \
6
+ --output_dir="./" \
7
+ --preprocessing_num_workers="16" \
8
+ --length_column_name="input_length" \
9
+ --overwrite_output_dir \
10
+ --num_train_epochs="5" \
11
+ --per_device_train_batch_size="8" \
12
+ --per_device_eval_batch_size="8" \
13
+ --gradient_accumulation_steps="8" \
14
+ --learning_rate="3e-4" \
15
+ --warmup_steps="400" \
16
+ --evaluation_strategy="steps" \
17
+ --text_column_name="text" \
18
+ --save_steps="400" \
19
+ --eval_steps="400" \
20
+ --logging_steps="10" \
21
+ --save_total_limit="1" \
22
+ --freeze_feature_encoder \
23
+ --gradient_checkpointing \
24
+ --fp16 \
25
+ --group_by_length \
26
+ --predict_with_generate \
27
+ --generation_max_length="40" \
28
+ --generation_num_beams="1" \
29
+ --do_train --do_eval \
30
+ --do_lower_case \
31
+ --preprocessing_num_workers="8" \
32
+ --push_to_hub
run_speech_recognition_seq2seq.py ADDED
@@ -0,0 +1,506 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python
2
+ # coding=utf-8
3
+ # Copyright 2021 The HuggingFace Team. All rights reserved.
4
+ #
5
+ # Licensed under the Apache License, Version 2.0 (the "License");
6
+ # you may not use this file except in compliance with the License.
7
+ # You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing, software
12
+ # distributed under the License is distributed on an "AS IS" BASIS,
13
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
+ # See the License for the specific language governing permissions and
15
+ # limitations under the License.
16
+ """
17
+ Fine-tuning the library models for sequence to sequence speech recognition.
18
+ """
19
+ # You can also adapt this script on your own sequence to sequence speech
20
+ # recognition task. Pointers for this are left as comments.
21
+
22
+ import logging
23
+ import os
24
+ import sys
25
+ from dataclasses import dataclass, field
26
+ from typing import Any, Dict, List, Optional, Union
27
+
28
+ import datasets
29
+ import torch
30
+ from datasets import DatasetDict, load_dataset, load_metric
31
+
32
+ import transformers
33
+ from transformers import (
34
+ AutoConfig,
35
+ AutoFeatureExtractor,
36
+ AutoModelForSpeechSeq2Seq,
37
+ AutoProcessor,
38
+ AutoTokenizer,
39
+ HfArgumentParser,
40
+ Seq2SeqTrainer,
41
+ Seq2SeqTrainingArguments,
42
+ set_seed,
43
+ )
44
+ from transformers.trainer_utils import get_last_checkpoint, is_main_process
45
+ from transformers.utils import check_min_version
46
+ from transformers.utils.versions import require_version
47
+
48
+
49
+ # Will error if the minimal version of Transformers is not installed. Remove at your own risks.
50
+ check_min_version("4.17.0.dev0")
51
+
52
+ require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/summarization/requirements.txt")
53
+
54
+ logger = logging.getLogger(__name__)
55
+
56
+
57
+ @dataclass
58
+ class ModelArguments:
59
+ """
60
+ Arguments pertaining to which model/config/tokenizer we are going to fine-tune from.
61
+ """
62
+
63
+ model_name_or_path: str = field(
64
+ metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"}
65
+ )
66
+ config_name: Optional[str] = field(
67
+ default=None, metadata={"help": "Pretrained config name or path if not the same as model_name"}
68
+ )
69
+ tokenizer_name: Optional[str] = field(
70
+ default=None, metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"}
71
+ )
72
+ feature_extractor_name: Optional[str] = field(
73
+ default=None, metadata={"help": "feature extractor name or path if not the same as model_name"}
74
+ )
75
+ cache_dir: Optional[str] = field(
76
+ default=None,
77
+ metadata={"help": "Where to store the pretrained models downloaded from huggingface.co"},
78
+ )
79
+ use_fast_tokenizer: bool = field(
80
+ default=True,
81
+ metadata={"help": "Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."},
82
+ )
83
+ model_revision: str = field(
84
+ default="main",
85
+ metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."},
86
+ )
87
+ use_auth_token: bool = field(
88
+ default=False,
89
+ metadata={
90
+ "help": "Will use the token generated when running `transformers-cli login` (necessary to use this script "
91
+ "with private models)."
92
+ },
93
+ )
94
+ freeze_feature_encoder: bool = field(
95
+ default=True, metadata={"help": "Whether to freeze the feature encoder layers of the model."}
96
+ )
97
+
98
+
99
+ @dataclass
100
+ class DataTrainingArguments:
101
+ """
102
+ Arguments pertaining to what data we are going to input our model for training and eval.
103
+ """
104
+
105
+ dataset_name: str = field(
106
+ default=None, metadata={"help": "The name of the dataset to use (via the datasets library)."}
107
+ )
108
+ dataset_config_name: Optional[str] = field(
109
+ default=None, metadata={"help": "The configuration name of the dataset to use (via the datasets library)."}
110
+ )
111
+ text_column: Optional[str] = field(
112
+ default=None,
113
+ metadata={"help": "The name of the column in the datasets containing the full texts (for summarization)."},
114
+ )
115
+ overwrite_cache: bool = field(
116
+ default=False, metadata={"help": "Overwrite the cached training and evaluation sets"}
117
+ )
118
+ preprocessing_num_workers: Optional[int] = field(
119
+ default=None,
120
+ metadata={"help": "The number of processes to use for the preprocessing."},
121
+ )
122
+ max_train_samples: Optional[int] = field(
123
+ default=None,
124
+ metadata={
125
+ "help": "For debugging purposes or quicker training, truncate the number of training examples to this "
126
+ "value if set."
127
+ },
128
+ )
129
+ max_eval_samples: Optional[int] = field(
130
+ default=None,
131
+ metadata={
132
+ "help": "For debugging purposes or quicker training, truncate the number of evaluation examples to this "
133
+ "value if set."
134
+ },
135
+ )
136
+ audio_column_name: str = field(
137
+ default="audio",
138
+ metadata={"help": "The name of the dataset column containing the audio data. Defaults to 'audio'"},
139
+ )
140
+ text_column_name: str = field(
141
+ default="text",
142
+ metadata={"help": "The name of the dataset column containing the text data. Defaults to 'text'"},
143
+ )
144
+ max_duration_in_seconds: float = field(
145
+ default=20.0,
146
+ metadata={
147
+ "help": "Truncate audio files that are longer than `max_duration_in_seconds` seconds to 'max_duration_in_seconds`"
148
+ },
149
+ )
150
+ min_duration_in_seconds: float = field(
151
+ default=0.0, metadata={"help": "Filter audio files that are shorter than `min_duration_in_seconds` seconds"}
152
+ )
153
+ preprocessing_only: bool = field(
154
+ default=False,
155
+ metadata={
156
+ "help": "Whether to only do data preprocessing and skip training. "
157
+ "This is especially useful when data preprocessing errors out in distributed training due to timeout. "
158
+ "In this case, one should run the preprocessing in a non-distributed setup with `preprocessing_only=True` "
159
+ "so that the cached datasets can consequently be loaded in distributed training"
160
+ },
161
+ )
162
+ train_split_name: str = field(
163
+ default="train",
164
+ metadata={
165
+ "help": "The name of the training data set split to use (via the datasets library). Defaults to 'train'"
166
+ },
167
+ )
168
+ eval_split_name: str = field(
169
+ default="test",
170
+ metadata={
171
+ "help": "The name of the training data set split to use (via the datasets library). Defaults to 'train'"
172
+ },
173
+ )
174
+ do_lower_case: bool = field(
175
+ default=True,
176
+ metadata={"help": "Whether the target text should be lower cased."},
177
+ ),
178
+ data_cache_dir: Optional[str] = field(
179
+ default=None,
180
+ metadata={"help": "Cache directory for dataset."}
181
+ )
182
+
183
+
184
+ @dataclass
185
+ class DataCollatorSpeechSeq2SeqWithPadding:
186
+ """
187
+ Data collator that will dynamically pad the inputs received.
188
+ Args:
189
+ processor ([`Wav2Vec2Processor`])
190
+ The processor used for proccessing the data.
191
+ decoder_start_token_id (`int`)
192
+ The begin-of-sentence of the decoder.
193
+ """
194
+
195
+ processor: Any
196
+ decoder_start_token_id: int
197
+
198
+ def __call__(self, features: List[Dict[str, Union[List[int], torch.Tensor]]]) -> Dict[str, torch.Tensor]:
199
+ # split inputs and labels since they have to be of different lenghts and need
200
+ # different padding methods
201
+ input_features = [{"input_values": feature["input_values"]} for feature in features]
202
+ label_features = [{"input_ids": feature["labels"]} for feature in features]
203
+
204
+ batch = self.processor.feature_extractor.pad(input_features, return_tensors="pt")
205
+
206
+ labels_batch = self.processor.tokenizer.pad(label_features, return_tensors="pt")
207
+
208
+ # replace padding with -100 to ignore loss correctly
209
+ labels = labels_batch["input_ids"].masked_fill(labels_batch.attention_mask.ne(1), -100)
210
+
211
+ # if bos token is appended in previous tokenization step,
212
+ # cut bos token here as it's append later anyways
213
+ if (labels[:, 0] == self.decoder_start_token_id).all().cpu().item():
214
+ labels = labels[:, 1:]
215
+
216
+ batch["labels"] = labels
217
+
218
+ return batch
219
+
220
+
221
+ def main():
222
+ # 1. Parse input arguments
223
+ # See all possible arguments in src/transformers/training_args.py
224
+ # or by passing the --help flag to this script.
225
+ # We now keep distinct sets of args, for a cleaner separation of concerns.
226
+ parser = HfArgumentParser((ModelArguments, DataTrainingArguments, Seq2SeqTrainingArguments))
227
+
228
+ if len(sys.argv) == 2 and sys.argv[1].endswith(".json"):
229
+ # If we pass only one argument to the script and it's the path to a json file,
230
+ # let's parse it to get our arguments.
231
+ model_args, data_args, training_args = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1]))
232
+ else:
233
+ model_args, data_args, training_args = parser.parse_args_into_dataclasses()
234
+
235
+ # 2. Setup logging
236
+ logging.basicConfig(
237
+ format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
238
+ datefmt="%m/%d/%Y %H:%M:%S",
239
+ handlers=[logging.StreamHandler(sys.stdout)],
240
+ )
241
+ log_level = training_args.get_process_log_level()
242
+ logger.setLevel(log_level)
243
+ datasets.utils.logging.set_verbosity(log_level)
244
+ transformers.utils.logging.set_verbosity(log_level)
245
+ transformers.utils.logging.enable_default_handler()
246
+ transformers.utils.logging.enable_explicit_format()
247
+
248
+ logger.setLevel(logging.INFO if is_main_process(training_args.local_rank) else logging.WARN)
249
+
250
+ # Log on each process the small summary:
251
+ logger.warning(
252
+ f"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"
253
+ f"distributed training: {bool(training_args.local_rank != -1)}, 16-bits training: {training_args.fp16}"
254
+ )
255
+ logger.info(f"Training/evaluation parameters {training_args}")
256
+
257
+ # Set the verbosity to info of the Transformers logger (on main process only):
258
+ if is_main_process(training_args.local_rank):
259
+ transformers.utils.logging.set_verbosity_info()
260
+ logger.info("Training/evaluation parameters %s", training_args)
261
+
262
+ # 3. Detecting last checkpoint and eventualy continue from last checkpoint
263
+ last_checkpoint = None
264
+ if os.path.isdir(training_args.output_dir) and training_args.do_train and not training_args.overwrite_output_dir:
265
+ last_checkpoint = get_last_checkpoint(training_args.output_dir)
266
+ if last_checkpoint is None and len(os.listdir(training_args.output_dir)) > 0:
267
+ raise ValueError(
268
+ f"Output directory ({training_args.output_dir}) already exists and is not empty. "
269
+ "Use --overwrite_output_dir to overcome."
270
+ )
271
+ elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
272
+ logger.info(
273
+ f"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change "
274
+ "the `--output_dir` or add `--overwrite_output_dir` to train from scratch."
275
+ )
276
+
277
+ # Set seed before initializing model.
278
+ set_seed(training_args.seed)
279
+
280
+ # 4. Load dataset
281
+ raw_datasets = DatasetDict()
282
+
283
+ if training_args.do_train:
284
+ raw_datasets["train"] = load_dataset(
285
+ data_args.dataset_name, data_args.dataset_config_name, split=data_args.train_split_name, cache_dir=data_args.data_cache_dir
286
+ )
287
+
288
+ if training_args.do_eval:
289
+ raw_datasets["eval"] = load_dataset(
290
+ data_args.dataset_name, data_args.dataset_config_name, split=data_args.eval_split_name, cache_dir=data_args.data_cache_dir
291
+ )
292
+
293
+ if data_args.audio_column_name not in next(iter(raw_datasets.values())).column_names:
294
+ raise ValueError(
295
+ f"--audio_column_name '{data_args.audio_column_name}' not found in dataset '{data_args.dataset_name}'. "
296
+ "Make sure to set `--audio_column_name` to the correct audio column - one of "
297
+ f"{', '.join(next(iter(raw_datasets.values())).column_names)}."
298
+ )
299
+
300
+ if data_args.text_column_name not in next(iter(raw_datasets.values())).column_names:
301
+ raise ValueError(
302
+ f"--text_column_name {data_args.text_column_name} not found in dataset '{data_args.dataset_name}'. "
303
+ "Make sure to set `--text_column_name` to the correct text column - one of "
304
+ f"{', '.join(next(iter(raw_datasets.values())).column_names)}."
305
+ )
306
+
307
+ # 5. Load pretrained model, tokenizer, and feature extractor
308
+ #
309
+ # Distributed training:
310
+ # The .from_pretrained methods guarantee that only one local process can concurrently
311
+ config = AutoConfig.from_pretrained(
312
+ model_args.config_name if model_args.config_name else model_args.model_name_or_path,
313
+ cache_dir=model_args.cache_dir,
314
+ revision=model_args.model_revision,
315
+ use_auth_token=True if model_args.use_auth_token else None,
316
+ )
317
+
318
+ feature_extractor = AutoFeatureExtractor.from_pretrained(
319
+ model_args.feature_extractor_name if model_args.feature_extractor_name else model_args.model_name_or_path,
320
+ cache_dir=model_args.cache_dir,
321
+ revision=model_args.model_revision,
322
+ use_auth_token=True if model_args.use_auth_token else None,
323
+ )
324
+ tokenizer = AutoTokenizer.from_pretrained(
325
+ model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path,
326
+ cache_dir=model_args.cache_dir,
327
+ use_fast=model_args.use_fast_tokenizer,
328
+ revision=model_args.model_revision,
329
+ use_auth_token=True if model_args.use_auth_token else None,
330
+ )
331
+ model = AutoModelForSpeechSeq2Seq.from_pretrained(
332
+ model_args.model_name_or_path,
333
+ config=config,
334
+ cache_dir=model_args.cache_dir,
335
+ revision=model_args.model_revision,
336
+ use_auth_token=True if model_args.use_auth_token else None,
337
+ )
338
+
339
+ if model.config.decoder_start_token_id is None:
340
+ raise ValueError("Make sure that `config.decoder_start_token_id` is correctly defined")
341
+
342
+ if model_args.freeze_feature_encoder:
343
+ model.freeze_feature_encoder()
344
+
345
+ # 6. Resample speech dataset if necassary
346
+ dataset_sampling_rate = next(iter(raw_datasets.values())).features[data_args.audio_column_name].sampling_rate
347
+ if dataset_sampling_rate != feature_extractor.sampling_rate:
348
+ raw_datasets = raw_datasets.cast_column(
349
+ data_args.audio_column_name, datasets.features.Audio(sampling_rate=feature_extractor.sampling_rate)
350
+ )
351
+
352
+ # 7. Preprocessing the datasets.
353
+ # We need to read the audio files as arrays and tokenize the targets.
354
+ max_input_length = data_args.max_duration_in_seconds * feature_extractor.sampling_rate
355
+ min_input_length = data_args.min_duration_in_seconds * feature_extractor.sampling_rate
356
+ audio_column_name = data_args.audio_column_name
357
+ num_workers = data_args.preprocessing_num_workers
358
+ text_column_name = data_args.text_column_name
359
+ model_input_name = feature_extractor.model_input_names[0]
360
+ do_lower_case = data_args.do_lower_case
361
+
362
+ if data_args.max_train_samples is not None:
363
+ raw_datasets["train"] = raw_datasets["train"].select(range(data_args.max_train_samples))
364
+
365
+ if data_args.max_eval_samples is not None:
366
+ raw_datasets["eval"] = raw_datasets["eval"].select(range(data_args.max_eval_samples))
367
+
368
+ def prepare_dataset(batch):
369
+ # process audio
370
+ sample = batch[audio_column_name]
371
+ inputs = feature_extractor(sample["array"], sampling_rate=sample["sampling_rate"])
372
+ # process audio length
373
+ batch[model_input_name] = inputs.input_values[0]
374
+ batch["input_length"] = len(batch["input_values"])
375
+
376
+ # process targets
377
+ input_str = batch[text_column_name].lower() if do_lower_case else batch[text_column_name]
378
+ batch["labels"] = tokenizer(input_str).input_ids
379
+ return batch
380
+
381
+ with training_args.main_process_first(desc="dataset map pre-processing"):
382
+ vectorized_datasets = raw_datasets.map(
383
+ prepare_dataset,
384
+ remove_columns=next(iter(raw_datasets.values())).column_names,
385
+ num_proc=data_args.preprocessing_num_workers,
386
+ desc="preprocess train dataset",
387
+ )
388
+
389
+ # filter data that is shorter than min_input_length or longer than
390
+ # max_input_length
391
+ def is_audio_in_length_range(length):
392
+ return length > min_input_length and length < max_input_length
393
+
394
+ vectorized_datasets = vectorized_datasets.filter(
395
+ is_audio_in_length_range,
396
+ num_proc=num_workers,
397
+ input_columns=["input_length"],
398
+ )
399
+
400
+ # for large datasets it is advised to run the preprocessing on a
401
+ # single machine first with `args.preprocessing_only` since there will mostly likely
402
+ # be a timeout when running the script in distributed mode.
403
+ # In a second step `args.preprocessing_only` can then be set to `False` to load the
404
+ # cached dataset
405
+ if data_args.preprocessing_only:
406
+ cache = {k: v.cache_files for k, v in vectorized_datasets.items()}
407
+ logger.info(f"Data preprocessing finished. Files cached at {cache}.")
408
+ return
409
+
410
+ # 8. Load Metric
411
+ metric = load_metric("wer")
412
+
413
+ def compute_metrics(pred):
414
+ pred_ids = pred.predictions
415
+
416
+ pred.label_ids[pred.label_ids == -100] = tokenizer.pad_token_id
417
+
418
+ pred_str = tokenizer.batch_decode(pred_ids, skip_special_tokens=True)
419
+ # we do not want to group tokens when computing the metrics
420
+ label_str = tokenizer.batch_decode(pred.label_ids, skip_special_tokens=True)
421
+
422
+ wer = metric.compute(predictions=pred_str, references=label_str)
423
+
424
+ return {"wer": wer}
425
+
426
+ # 9. Create a single speech processor
427
+ if is_main_process(training_args.local_rank):
428
+ # save feature extractor, tokenizer and config
429
+ feature_extractor.save_pretrained(training_args.output_dir)
430
+ tokenizer.save_pretrained(training_args.output_dir)
431
+ config.save_pretrained(training_args.output_dir)
432
+
433
+ processor = AutoProcessor.from_pretrained(training_args.output_dir)
434
+
435
+ # 10. Define data collator
436
+ data_collator = DataCollatorSpeechSeq2SeqWithPadding(
437
+ processor=processor, decoder_start_token_id=model.config.decoder_start_token_id
438
+ )
439
+
440
+ # 11. Initialize Trainer
441
+ trainer = Seq2SeqTrainer(
442
+ model=model,
443
+ args=training_args,
444
+ train_dataset=vectorized_datasets["train"] if training_args.do_train else None,
445
+ eval_dataset=vectorized_datasets["eval"] if training_args.do_eval else None,
446
+ tokenizer=feature_extractor,
447
+ data_collator=data_collator,
448
+ compute_metrics=compute_metrics if training_args.predict_with_generate else None,
449
+ )
450
+
451
+ # 12. Training
452
+ if training_args.do_train:
453
+ checkpoint = None
454
+ if training_args.resume_from_checkpoint is not None:
455
+ checkpoint = training_args.resume_from_checkpoint
456
+ elif last_checkpoint is not None:
457
+ checkpoint = last_checkpoint
458
+ train_result = trainer.train(resume_from_checkpoint=checkpoint)
459
+ trainer.save_model() # Saves the feature extractor too for easy upload
460
+
461
+ metrics = train_result.metrics
462
+ max_train_samples = (
463
+ data_args.max_train_samples
464
+ if data_args.max_train_samples is not None
465
+ else len(vectorized_datasets["train"])
466
+ )
467
+ metrics["train_samples"] = min(max_train_samples, len(vectorized_datasets["train"]))
468
+ trainer.log_metrics("train", metrics)
469
+ trainer.save_metrics("train", metrics)
470
+ trainer.save_state()
471
+
472
+ # 13. Evaluation
473
+ results = {}
474
+ if training_args.do_eval:
475
+ logger.info("*** Evaluate ***")
476
+ metrics = trainer.evaluate(
477
+ metric_key_prefix="eval", max_length=model.config.max_length, num_beams=model.config.num_beams
478
+ )
479
+ max_eval_samples = (
480
+ data_args.max_eval_samples if data_args.max_eval_samples is not None else len(vectorized_datasets["eval"])
481
+ )
482
+ metrics["eval_samples"] = min(max_eval_samples, len(vectorized_datasets["eval"]))
483
+
484
+ trainer.log_metrics("eval", metrics)
485
+ trainer.save_metrics("eval", metrics)
486
+
487
+ # 14. Write Training Stats
488
+ kwargs = {"finetuned_from": model_args.model_name_or_path, "tasks": "speech recognition"}
489
+ if data_args.dataset_name is not None:
490
+ kwargs["dataset_tags"] = data_args.dataset_name
491
+ if data_args.dataset_config_name is not None:
492
+ kwargs["dataset_args"] = data_args.dataset_config_name
493
+ kwargs["dataset"] = f"{data_args.dataset_name} {data_args.dataset_config_name}"
494
+ else:
495
+ kwargs["dataset"] = data_args.dataset_name
496
+
497
+ if training_args.push_to_hub:
498
+ trainer.push_to_hub(**kwargs)
499
+ else:
500
+ trainer.create_model_card(**kwargs)
501
+
502
+ return results
503
+
504
+
505
+ if __name__ == "__main__":
506
+ main()
runs/Feb04_15-13-00_ficino/1643984060.5462978/events.out.tfevents.1643984060.ficino.743775.1 ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ef92f0eb59dc9ba98e41e3f2744c4f5cc122622dbb36e9f884e0b3b287beb791
3
+ size 4957
runs/Feb04_15-13-00_ficino/events.out.tfevents.1643984060.ficino.743775.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8457d5c3699ee30cc165b04d7b23c1a498d87df8631cdd3279613cfea36efebe
3
+ size 16126
special_tokens_map.json ADDED
@@ -0,0 +1 @@
 
1
+ {"bos_token": "<s>", "eos_token": "</s>", "unk_token": "<unk>", "sep_token": "</s>", "pad_token": "<pad>", "cls_token": "<s>", "mask_token": {"content": "<mask>", "single_word": false, "lstrip": true, "rstrip": false, "normalized": false}}
tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
tokenizer_config.json ADDED
@@ -0,0 +1 @@
 
1
+ {"unk_token": "<unk>", "bos_token": "<s>", "eos_token": "</s>", "add_prefix_space": false, "errors": "replace", "sep_token": "</s>", "cls_token": "<s>", "pad_token": "<pad>", "mask_token": "<mask>", "trim_offsets": true, "model_max_length": 1024, "special_tokens_map_file": null, "name_or_path": "./", "tokenizer_class": "BartTokenizer"}
training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:00cbe42c27e92b2a6094fa4ea0e115b53eee63fd0c175ad7df7ddb17127f0f59
3
+ size 3119
vocab.json ADDED
The diff for this file is too large to render. See raw diff