masapasa commited on
Commit
263939c
1 Parent(s): 49c626a

Training in progress, step 5

Browse files
.gitignore ADDED
@@ -0,0 +1 @@
 
1
+ checkpoint-*/
.ipynb_checkpoints/debugger_ovh_transformers-checkpoint.ipynb ADDED
@@ -0,0 +1,768 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "cells": [
3
+ {
4
+ "cell_type": "markdown",
5
+ "metadata": {},
6
+ "source": [
7
+ "# HuggingFace challenge - Debugger notebook\n",
8
+ "Run this notebook to verify your libraries versions, check GPU config and run a quick training"
9
+ ]
10
+ },
11
+ {
12
+ "cell_type": "code",
13
+ "execution_count": 1,
14
+ "metadata": {
15
+ "id": "T2utsYSKszvv"
16
+ },
17
+ "outputs": [],
18
+ "source": [
19
+ "import platform\n",
20
+ "import multiprocessing\n",
21
+ "\n",
22
+ "import torch\n",
23
+ "import transformers\n",
24
+ "import datasets\n",
25
+ "\n",
26
+ "import soundfile"
27
+ ]
28
+ },
29
+ {
30
+ "cell_type": "markdown",
31
+ "metadata": {},
32
+ "source": [
33
+ "## Print main infos"
34
+ ]
35
+ },
36
+ {
37
+ "cell_type": "code",
38
+ "execution_count": 2,
39
+ "metadata": {
40
+ "colab": {
41
+ "base_uri": "https://localhost:8080/"
42
+ },
43
+ "id": "5P6I-W9ts-kR",
44
+ "outputId": "939bd550-1486-46a6-8371-e82ada0f448c"
45
+ },
46
+ "outputs": [
47
+ {
48
+ "name": "stdout",
49
+ "output_type": "stream",
50
+ "text": [
51
+ "Platform: Linux-5.11.0-37-generic-x86_64-with-glibc2.10\n",
52
+ "CPU cores: 60\n",
53
+ "Python version: 3.8.8\n",
54
+ "PyTorch version: 1.10.1+cu102\n",
55
+ "GPU is visible: True\n",
56
+ "Transformers version: 4.16.0.dev0\n",
57
+ "Datasets version: 1.17.1.dev0\n",
58
+ "soundfile version: 0.10.3\n"
59
+ ]
60
+ }
61
+ ],
62
+ "source": [
63
+ "print(f\"Platform: {platform.platform()}\")\n",
64
+ "print(f\"CPU cores: {multiprocessing.cpu_count()}\")\n",
65
+ "\n",
66
+ "print(f\"Python version: {platform.python_version()}\")\n",
67
+ "\n",
68
+ "print(f\"PyTorch version: {torch.__version__}\")\n",
69
+ "print(f\"GPU is visible: {torch.cuda.is_available()}\")\n",
70
+ "\n",
71
+ "print(f\"Transformers version: {transformers.__version__}\")\n",
72
+ "print(f\"Datasets version: {datasets.__version__}\")\n",
73
+ "\n",
74
+ "print(f\"soundfile version: {soundfile.__version__}\")"
75
+ ]
76
+ },
77
+ {
78
+ "cell_type": "markdown",
79
+ "metadata": {},
80
+ "source": [
81
+ "## Check your GPU informations (if any)\n",
82
+ "If you launched an AI Training job with GPU resources, they should be listed below (Tesla V100s 32GB).\n",
83
+ "Driver and CUDA version "
84
+ ]
85
+ },
86
+ {
87
+ "cell_type": "code",
88
+ "execution_count": 3,
89
+ "metadata": {
90
+ "colab": {
91
+ "base_uri": "https://localhost:8080/"
92
+ },
93
+ "id": "YT7fRnKctggU",
94
+ "outputId": "f355a3e0-20da-489f-bd1f-5e508e792a68"
95
+ },
96
+ "outputs": [
97
+ {
98
+ "name": "stdout",
99
+ "output_type": "stream",
100
+ "text": [
101
+ "Wed Jan 12 10:34:59 2022 \n",
102
+ "+-----------------------------------------------------------------------------+\n",
103
+ "| NVIDIA-SMI 470.57.02 Driver Version: 470.57.02 CUDA Version: 11.4 |\n",
104
+ "|-------------------------------+----------------------+----------------------+\n",
105
+ "| GPU Name Persistence-M| Bus-Id Disp.A | Volatile Uncorr. ECC |\n",
106
+ "| Fan Temp Perf Pwr:Usage/Cap| Memory-Usage | GPU-Util Compute M. |\n",
107
+ "| | | MIG M. |\n",
108
+ "|===============================+======================+======================|\n",
109
+ "| 0 Tesla V100S-PCI... Off | 00000000:00:07.0 Off | 0 |\n",
110
+ "| N/A 35C P0 27W / 250W | 4MiB / 32510MiB | 0% Default |\n",
111
+ "| | | N/A |\n",
112
+ "+-------------------------------+----------------------+----------------------+\n",
113
+ " \n",
114
+ "+-----------------------------------------------------------------------------+\n",
115
+ "| Processes: |\n",
116
+ "| GPU GI CI PID Type Process name GPU Memory |\n",
117
+ "| ID ID Usage |\n",
118
+ "|=============================================================================|\n",
119
+ "| No running processes found |\n",
120
+ "+-----------------------------------------------------------------------------+\n"
121
+ ]
122
+ }
123
+ ],
124
+ "source": [
125
+ "!nvidia-smi"
126
+ ]
127
+ },
128
+ {
129
+ "cell_type": "markdown",
130
+ "metadata": {
131
+ "id": "TorMtpwPv6RQ"
132
+ },
133
+ "source": [
134
+ "## Quick training run with a dummy model and data\n",
135
+ "more information on https://github.com/huggingface/transformers/tree/master/examples/pytorch/speech-recognition"
136
+ ]
137
+ },
138
+ {
139
+ "cell_type": "code",
140
+ "execution_count": 2,
141
+ "metadata": {
142
+ "colab": {
143
+ "base_uri": "https://localhost:8080/"
144
+ },
145
+ "id": "fevoJD15u4Ss",
146
+ "outputId": "5861d34e-745b-45ee-e780-ed363043e655"
147
+ },
148
+ "outputs": [
149
+ {
150
+ "name": "stdout",
151
+ "output_type": "stream",
152
+ "text": [
153
+ "--2022-01-31 16:15:26-- https://raw.githubusercontent.com/huggingface/transformers/master/examples/pytorch/speech-recognition/run_speech_recognition_ctc.py\n",
154
+ "Resolving raw.githubusercontent.com (raw.githubusercontent.com)... 185.199.108.133, 185.199.109.133, 185.199.110.133, ...\n",
155
+ "Connecting to raw.githubusercontent.com (raw.githubusercontent.com)|185.199.108.133|:443... connected.\n",
156
+ "HTTP request sent, awaiting response... 200 OK\n",
157
+ "Length: 30360 (30K) [text/plain]\n",
158
+ "Saving to: ‘run_speech_recognition_ctc.py’\n",
159
+ "\n",
160
+ "run_speech_recognit 100%[===================>] 29.65K --.-KB/s in 0.001s \n",
161
+ "\n",
162
+ "2022-01-31 16:15:27 (57.7 MB/s) - ‘run_speech_recognition_ctc.py’ saved [30360/30360]\n",
163
+ "\n"
164
+ ]
165
+ }
166
+ ],
167
+ "source": [
168
+ "!wget -O run_speech_recognition_ctc.py https://raw.githubusercontent.com/huggingface/transformers/master/examples/pytorch/speech-recognition/run_speech_recognition_ctc.py"
169
+ ]
170
+ },
171
+ {
172
+ "cell_type": "code",
173
+ "execution_count": 3,
174
+ "metadata": {
175
+ "colab": {
176
+ "base_uri": "https://localhost:8080/"
177
+ },
178
+ "id": "Mz4bubhxxsad",
179
+ "outputId": "23398525-cc19-43c2-9fec-497e06214f29"
180
+ },
181
+ "outputs": [
182
+ {
183
+ "name": "stdout",
184
+ "output_type": "stream",
185
+ "text": [
186
+ "01/31/2022 16:15:35 - WARNING - __main__ - Process rank: -1, device: cuda:0, n_gpu: 1distributed training: False, 16-bits training: True\n",
187
+ "01/31/2022 16:15:35 - INFO - __main__ - Training/evaluation parameters TrainingArguments(\n",
188
+ "_n_gpu=1,\n",
189
+ "adafactor=False,\n",
190
+ "adam_beta1=0.9,\n",
191
+ "adam_beta2=0.999,\n",
192
+ "adam_epsilon=1e-08,\n",
193
+ "bf16=False,\n",
194
+ "bf16_full_eval=False,\n",
195
+ "dataloader_drop_last=False,\n",
196
+ "dataloader_num_workers=0,\n",
197
+ "dataloader_pin_memory=True,\n",
198
+ "ddp_bucket_cap_mb=None,\n",
199
+ "ddp_find_unused_parameters=None,\n",
200
+ "debug=[],\n",
201
+ "deepspeed=None,\n",
202
+ "disable_tqdm=False,\n",
203
+ "do_eval=True,\n",
204
+ "do_predict=False,\n",
205
+ "do_train=True,\n",
206
+ "eval_accumulation_steps=None,\n",
207
+ "eval_steps=500,\n",
208
+ "evaluation_strategy=IntervalStrategy.STEPS,\n",
209
+ "fp16=True,\n",
210
+ "fp16_backend=auto,\n",
211
+ "fp16_full_eval=False,\n",
212
+ "fp16_opt_level=O1,\n",
213
+ "gradient_accumulation_steps=1,\n",
214
+ "gradient_checkpointing=True,\n",
215
+ "greater_is_better=None,\n",
216
+ "group_by_length=True,\n",
217
+ "half_precision_backend=auto,\n",
218
+ "hub_model_id=None,\n",
219
+ "hub_strategy=HubStrategy.EVERY_SAVE,\n",
220
+ "hub_token=<HUB_TOKEN>,\n",
221
+ "ignore_data_skip=False,\n",
222
+ "label_names=None,\n",
223
+ "label_smoothing_factor=0.0,\n",
224
+ "learning_rate=0.0003,\n",
225
+ "length_column_name=input_length,\n",
226
+ "load_best_model_at_end=False,\n",
227
+ "local_rank=-1,\n",
228
+ "log_level=-1,\n",
229
+ "log_level_replica=-1,\n",
230
+ "log_on_each_node=True,\n",
231
+ "logging_dir=./runs/Jan31_16-15-35_job-6a6be32c-c82d-4385-805b-1f7606124d5b,\n",
232
+ "logging_first_step=False,\n",
233
+ "logging_nan_inf_filter=True,\n",
234
+ "logging_steps=500,\n",
235
+ "logging_strategy=IntervalStrategy.STEPS,\n",
236
+ "lr_scheduler_type=SchedulerType.LINEAR,\n",
237
+ "max_grad_norm=1.0,\n",
238
+ "max_steps=10,\n",
239
+ "metric_for_best_model=None,\n",
240
+ "mp_parameters=,\n",
241
+ "no_cuda=False,\n",
242
+ "num_train_epochs=3.0,\n",
243
+ "optim=OptimizerNames.ADAMW_HF,\n",
244
+ "output_dir=./,\n",
245
+ "overwrite_output_dir=True,\n",
246
+ "past_index=-1,\n",
247
+ "per_device_eval_batch_size=8,\n",
248
+ "per_device_train_batch_size=2,\n",
249
+ "prediction_loss_only=False,\n",
250
+ "push_to_hub=False,\n",
251
+ "push_to_hub_model_id=None,\n",
252
+ "push_to_hub_organization=None,\n",
253
+ "push_to_hub_token=<PUSH_TO_HUB_TOKEN>,\n",
254
+ "remove_unused_columns=True,\n",
255
+ "report_to=[],\n",
256
+ "resume_from_checkpoint=None,\n",
257
+ "run_name=./,\n",
258
+ "save_on_each_node=False,\n",
259
+ "save_steps=5,\n",
260
+ "save_strategy=IntervalStrategy.STEPS,\n",
261
+ "save_total_limit=1,\n",
262
+ "seed=42,\n",
263
+ "sharded_ddp=[],\n",
264
+ "skip_memory_metrics=True,\n",
265
+ "tf32=None,\n",
266
+ "tpu_metrics_debug=False,\n",
267
+ "tpu_num_cores=None,\n",
268
+ "use_legacy_prediction_loop=False,\n",
269
+ "warmup_ratio=0.0,\n",
270
+ "warmup_steps=0,\n",
271
+ "weight_decay=0.0,\n",
272
+ "xpu_backend=None,\n",
273
+ ")\n",
274
+ "Downloading: 23.3kB [00:00, 23.5MB/s] \n",
275
+ "Downloading: 168kB [00:00, 105MB/s] \n",
276
+ "Downloading and preparing dataset common_voice/ab (download: 39.14 MiB, generated: 40.14 MiB, post-processed: Unknown size, total: 79.28 MiB) to /workspace/.cache/huggingface/datasets/common_voice/ab/6.1.0/5693bfc0feeade582a78c2fb250bc88f52bd86f0a7f1bb22bfee67e715de30fd...\n",
277
+ "Downloading: 100%|█████████████████��███████| 41.0M/41.0M [00:03<00:00, 11.4MB/s]\n",
278
+ "Dataset common_voice downloaded and prepared to /workspace/.cache/huggingface/datasets/common_voice/ab/6.1.0/5693bfc0feeade582a78c2fb250bc88f52bd86f0a7f1bb22bfee67e715de30fd. Subsequent calls will reuse this data.\n",
279
+ "01/31/2022 16:15:43 - WARNING - datasets.builder - Reusing dataset common_voice (/workspace/.cache/huggingface/datasets/common_voice/ab/6.1.0/5693bfc0feeade582a78c2fb250bc88f52bd86f0a7f1bb22bfee67e715de30fd)\n",
280
+ "remove special characters from datasets: 22ex [00:00, 6184.63ex/s]\n",
281
+ "remove special characters from datasets: 9ex [00:00, 5925.09ex/s]\n",
282
+ "https://huggingface.co/hf-test/xls-r-dummy/resolve/main/config.json not found in cache or force_download set to True, downloading to /workspace/.cache/huggingface/transformers/tmpj1jsvojt\n",
283
+ "Downloading: 100%|█████████████████████████| 1.95k/1.95k [00:00<00:00, 2.32MB/s]\n",
284
+ "storing https://huggingface.co/hf-test/xls-r-dummy/resolve/main/config.json in cache at /workspace/.cache/huggingface/transformers/8157526a5096028eb61c63d228d882e5437edef5cb8b1a033ae35bf6249d1568.80b921aeb31bf1fa045a15aafa0e6f7e2ac68d338c1d83a3c76c99e260b22a62\n",
285
+ "creating metadata file for /workspace/.cache/huggingface/transformers/8157526a5096028eb61c63d228d882e5437edef5cb8b1a033ae35bf6249d1568.80b921aeb31bf1fa045a15aafa0e6f7e2ac68d338c1d83a3c76c99e260b22a62\n",
286
+ "loading configuration file https://huggingface.co/hf-test/xls-r-dummy/resolve/main/config.json from cache at /workspace/.cache/huggingface/transformers/8157526a5096028eb61c63d228d882e5437edef5cb8b1a033ae35bf6249d1568.80b921aeb31bf1fa045a15aafa0e6f7e2ac68d338c1d83a3c76c99e260b22a62\n",
287
+ "Model config Wav2Vec2Config {\n",
288
+ " \"_name_or_path\": \"hf-test/xls-r-dummy\",\n",
289
+ " \"activation_dropout\": 0.1,\n",
290
+ " \"adapter_kernel_size\": 3,\n",
291
+ " \"adapter_stride\": 2,\n",
292
+ " \"add_adapter\": false,\n",
293
+ " \"apply_spec_augment\": true,\n",
294
+ " \"architectures\": [\n",
295
+ " \"Wav2Vec2Model\"\n",
296
+ " ],\n",
297
+ " \"attention_dropout\": 0.1,\n",
298
+ " \"bos_token_id\": 1,\n",
299
+ " \"classifier_proj_size\": 256,\n",
300
+ " \"codevector_dim\": 256,\n",
301
+ " \"contrastive_logits_temperature\": 0.1,\n",
302
+ " \"conv_bias\": false,\n",
303
+ " \"conv_dim\": [\n",
304
+ " 32,\n",
305
+ " 32,\n",
306
+ " 32\n",
307
+ " ],\n",
308
+ " \"conv_kernel\": [\n",
309
+ " 8,\n",
310
+ " 8,\n",
311
+ " 8\n",
312
+ " ],\n",
313
+ " \"conv_stride\": [\n",
314
+ " 4,\n",
315
+ " 4,\n",
316
+ " 4\n",
317
+ " ],\n",
318
+ " \"ctc_loss_reduction\": \"sum\",\n",
319
+ " \"ctc_zero_infinity\": false,\n",
320
+ " \"diversity_loss_weight\": 0.1,\n",
321
+ " \"do_stable_layer_norm\": true,\n",
322
+ " \"eos_token_id\": 2,\n",
323
+ " \"feat_extract_activation\": \"gelu\",\n",
324
+ " \"feat_extract_dropout\": 0.0,\n",
325
+ " \"feat_extract_norm\": \"layer\",\n",
326
+ " \"feat_proj_dropout\": 0.1,\n",
327
+ " \"feat_quantizer_dropout\": 0.0,\n",
328
+ " \"final_dropout\": 0.1,\n",
329
+ " \"gradient_checkpointing\": false,\n",
330
+ " \"hidden_act\": \"gelu\",\n",
331
+ " \"hidden_dropout\": 0.1,\n",
332
+ " \"hidden_dropout_prob\": 0.1,\n",
333
+ " \"hidden_size\": 16,\n",
334
+ " \"initializer_range\": 0.02,\n",
335
+ " \"intermediate_size\": 20,\n",
336
+ " \"layer_norm_eps\": 1e-05,\n",
337
+ " \"layerdrop\": 0.1,\n",
338
+ " \"mask_feature_length\": 10,\n",
339
+ " \"mask_feature_min_masks\": 0,\n",
340
+ " \"mask_feature_prob\": 0.0,\n",
341
+ " \"mask_time_length\": 10,\n",
342
+ " \"mask_time_min_masks\": 2,\n",
343
+ " \"mask_time_prob\": 0.05,\n",
344
+ " \"model_type\": \"wav2vec2\",\n",
345
+ " \"num_adapter_layers\": 3,\n",
346
+ " \"num_attention_heads\": 2,\n",
347
+ " \"num_codevector_groups\": 2,\n",
348
+ " \"num_codevectors_per_group\": 320,\n",
349
+ " \"num_conv_pos_embedding_groups\": 2,\n",
350
+ " \"num_conv_pos_embeddings\": 16,\n",
351
+ " \"num_feat_extract_layers\": 3,\n",
352
+ " \"num_hidden_layers\": 4,\n",
353
+ " \"num_negatives\": 10,\n",
354
+ " \"output_hidden_size\": 16,\n",
355
+ " \"pad_token_id\": 0,\n",
356
+ " \"proj_codevector_dim\": 256,\n",
357
+ " \"tdnn_dilation\": [\n",
358
+ " 1,\n",
359
+ " 2,\n",
360
+ " 3,\n",
361
+ " 1,\n",
362
+ " 1\n",
363
+ " ],\n",
364
+ " \"tdnn_dim\": [\n",
365
+ " 512,\n",
366
+ " 512,\n",
367
+ " 512,\n",
368
+ " 512,\n",
369
+ " 1500\n",
370
+ " ],\n",
371
+ " \"tdnn_kernel\": [\n",
372
+ " 5,\n",
373
+ " 3,\n",
374
+ " 3,\n",
375
+ " 1,\n",
376
+ " 1\n",
377
+ " ],\n",
378
+ " \"torch_dtype\": \"float32\",\n",
379
+ " \"transformers_version\": \"4.17.0.dev0\",\n",
380
+ " \"use_weighted_layer_sum\": false,\n",
381
+ " \"vocab_size\": 32,\n",
382
+ " \"xvector_output_dim\": 512\n",
383
+ "}\n",
384
+ "\n",
385
+ "100%|████████████████████████████████████████████| 1/1 [00:00<00:00, 478.47ba/s]\n",
386
+ "100%|████████████████████████████████████████████| 1/1 [00:00<00:00, 811.12ba/s]\n",
387
+ "Didn't find file ./tokenizer_config.json. We won't load it.\n",
388
+ "Didn't find file ./added_tokens.json. We won't load it.\n",
389
+ "Didn't find file ./special_tokens_map.json. We won't load it.\n",
390
+ "Didn't find file ./tokenizer.json. We won't load it.\n",
391
+ "loading file ./vocab.json\n",
392
+ "loading file None\n",
393
+ "loading file None\n",
394
+ "loading file None\n",
395
+ "loading file None\n",
396
+ "file ./config.json not found\n",
397
+ "Adding <s> to the vocabulary\n",
398
+ "Adding </s> to the vocabulary\n",
399
+ "Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained.\n",
400
+ "loading configuration file https://huggingface.co/hf-test/xls-r-dummy/resolve/main/config.json from cache at /workspace/.cache/huggingface/transformers/8157526a5096028eb61c63d228d882e5437edef5cb8b1a033ae35bf6249d1568.80b921aeb31bf1fa045a15aafa0e6f7e2ac68d338c1d83a3c76c99e260b22a62\n",
401
+ "Model config Wav2Vec2Config {\n",
402
+ " \"_name_or_path\": \"hf-test/xls-r-dummy\",\n",
403
+ " \"activation_dropout\": 0.1,\n",
404
+ " \"adapter_kernel_size\": 3,\n",
405
+ " \"adapter_stride\": 2,\n",
406
+ " \"add_adapter\": false,\n",
407
+ " \"apply_spec_augment\": true,\n",
408
+ " \"architectures\": [\n",
409
+ " \"Wav2Vec2Model\"\n",
410
+ " ],\n",
411
+ " \"attention_dropout\": 0.1,\n",
412
+ " \"bos_token_id\": 1,\n",
413
+ " \"classifier_proj_size\": 256,\n",
414
+ " \"codevector_dim\": 256,\n",
415
+ " \"contrastive_logits_temperature\": 0.1,\n",
416
+ " \"conv_bias\": false,\n",
417
+ " \"conv_dim\": [\n",
418
+ " 32,\n",
419
+ " 32,\n",
420
+ " 32\n",
421
+ " ],\n",
422
+ " \"conv_kernel\": [\n",
423
+ " 8,\n",
424
+ " 8,\n",
425
+ " 8\n",
426
+ " ],\n",
427
+ " \"conv_stride\": [\n",
428
+ " 4,\n",
429
+ " 4,\n",
430
+ " 4\n",
431
+ " ],\n",
432
+ " \"ctc_loss_reduction\": \"sum\",\n",
433
+ " \"ctc_zero_infinity\": false,\n",
434
+ " \"diversity_loss_weight\": 0.1,\n",
435
+ " \"do_stable_layer_norm\": true,\n",
436
+ " \"eos_token_id\": 2,\n",
437
+ " \"feat_extract_activation\": \"gelu\",\n",
438
+ " \"feat_extract_dropout\": 0.0,\n",
439
+ " \"feat_extract_norm\": \"layer\",\n",
440
+ " \"feat_proj_dropout\": 0.1,\n",
441
+ " \"feat_quantizer_dropout\": 0.0,\n",
442
+ " \"final_dropout\": 0.1,\n",
443
+ " \"gradient_checkpointing\": false,\n",
444
+ " \"hidden_act\": \"gelu\",\n",
445
+ " \"hidden_dropout\": 0.1,\n",
446
+ " \"hidden_dropout_prob\": 0.1,\n",
447
+ " \"hidden_size\": 16,\n",
448
+ " \"initializer_range\": 0.02,\n",
449
+ " \"intermediate_size\": 20,\n",
450
+ " \"layer_norm_eps\": 1e-05,\n",
451
+ " \"layerdrop\": 0.1,\n",
452
+ " \"mask_feature_length\": 10,\n",
453
+ " \"mask_feature_min_masks\": 0,\n",
454
+ " \"mask_feature_prob\": 0.0,\n",
455
+ " \"mask_time_length\": 10,\n",
456
+ " \"mask_time_min_masks\": 2,\n",
457
+ " \"mask_time_prob\": 0.05,\n",
458
+ " \"model_type\": \"wav2vec2\",\n",
459
+ " \"num_adapter_layers\": 3,\n",
460
+ " \"num_attention_heads\": 2,\n",
461
+ " \"num_codevector_groups\": 2,\n",
462
+ " \"num_codevectors_per_group\": 320,\n",
463
+ " \"num_conv_pos_embedding_groups\": 2,\n",
464
+ " \"num_conv_pos_embeddings\": 16,\n",
465
+ " \"num_feat_extract_layers\": 3,\n",
466
+ " \"num_hidden_layers\": 4,\n",
467
+ " \"num_negatives\": 10,\n",
468
+ " \"output_hidden_size\": 16,\n",
469
+ " \"pad_token_id\": 0,\n",
470
+ " \"proj_codevector_dim\": 256,\n",
471
+ " \"tdnn_dilation\": [\n",
472
+ " 1,\n",
473
+ " 2,\n",
474
+ " 3,\n",
475
+ " 1,\n",
476
+ " 1\n",
477
+ " ],\n",
478
+ " \"tdnn_dim\": [\n",
479
+ " 512,\n",
480
+ " 512,\n",
481
+ " 512,\n",
482
+ " 512,\n",
483
+ " 1500\n",
484
+ " ],\n",
485
+ " \"tdnn_kernel\": [\n",
486
+ " 5,\n",
487
+ " 3,\n",
488
+ " 3,\n",
489
+ " 1,\n",
490
+ " 1\n",
491
+ " ],\n",
492
+ " \"torch_dtype\": \"float32\",\n",
493
+ " \"transformers_version\": \"4.17.0.dev0\",\n",
494
+ " \"use_weighted_layer_sum\": false,\n",
495
+ " \"vocab_size\": 32,\n",
496
+ " \"xvector_output_dim\": 512\n",
497
+ "}\n",
498
+ "\n",
499
+ "https://huggingface.co/hf-test/xls-r-dummy/resolve/main/preprocessor_config.json not found in cache or force_download set to True, downloading to /workspace/.cache/huggingface/transformers/tmp65bus7d7\n",
500
+ "Downloading: 100%|██████████████████████████████| 243/243 [00:00<00:00, 295kB/s]\n",
501
+ "storing https://huggingface.co/hf-test/xls-r-dummy/resolve/main/preprocessor_config.json in cache at /workspace/.cache/huggingface/transformers/0ba9471c5a13055b5740bbac451b95c783dcaead5aacc5d0175959022489c3aa.bd1cf6fc7017d09efe9b164cbc7b32f9bbc3b3bcc243032c6f8e87573bde4292\n",
502
+ "creating metadata file for /workspace/.cache/huggingface/transformers/0ba9471c5a13055b5740bbac451b95c783dcaead5aacc5d0175959022489c3aa.bd1cf6fc7017d09efe9b164cbc7b32f9bbc3b3bcc243032c6f8e87573bde4292\n",
503
+ "loading feature extractor configuration file https://huggingface.co/hf-test/xls-r-dummy/resolve/main/preprocessor_config.json from cache at /workspace/.cache/huggingface/transformers/0ba9471c5a13055b5740bbac451b95c783dcaead5aacc5d0175959022489c3aa.bd1cf6fc7017d09efe9b164cbc7b32f9bbc3b3bcc243032c6f8e87573bde4292\n",
504
+ "Feature extractor Wav2Vec2FeatureExtractor {\n",
505
+ " \"do_normalize\": true,\n",
506
+ " \"feature_extractor_type\": \"Wav2Vec2FeatureExtractor\",\n",
507
+ " \"feature_size\": 1,\n",
508
+ " \"padding_side\": \"right\",\n",
509
+ " \"padding_value\": 0.0,\n",
510
+ " \"return_attention_mask\": false,\n",
511
+ " \"sampling_rate\": 16000\n",
512
+ "}\n",
513
+ "\n",
514
+ "https://huggingface.co/hf-test/xls-r-dummy/resolve/main/pytorch_model.bin not found in cache or force_download set to True, downloading to /workspace/.cache/huggingface/transformers/tmpknzbltu6\n",
515
+ "Downloading: 100%|████████████████████████████| 134k/134k [00:00<00:00, 512kB/s]\n",
516
+ "storing https://huggingface.co/hf-test/xls-r-dummy/resolve/main/pytorch_model.bin in cache at /workspace/.cache/huggingface/transformers/d374ffdefd19b7dca1d007484e8a16189d261a626cc06a3481bb034d23fe194a.4dc5ab5d8c52b8612a63c422a98f6d3de7e0bbf1469c52e89e028b4ec90e4b43\n",
517
+ "creating metadata file for /workspace/.cache/huggingface/transformers/d374ffdefd19b7dca1d007484e8a16189d261a626cc06a3481bb034d23fe194a.4dc5ab5d8c52b8612a63c422a98f6d3de7e0bbf1469c52e89e028b4ec90e4b43\n",
518
+ "loading weights file https://huggingface.co/hf-test/xls-r-dummy/resolve/main/pytorch_model.bin from cache at /workspace/.cache/huggingface/transformers/d374ffdefd19b7dca1d007484e8a16189d261a626cc06a3481bb034d23fe194a.4dc5ab5d8c52b8612a63c422a98f6d3de7e0bbf1469c52e89e028b4ec90e4b43\n",
519
+ "All model checkpoint weights were used when initializing Wav2Vec2ForCTC.\n",
520
+ "\n",
521
+ "Some weights of Wav2Vec2ForCTC were not initialized from the model checkpoint at hf-test/xls-r-dummy and are newly initialized: ['lm_head.bias', 'lm_head.weight']\n",
522
+ "You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference.\n",
523
+ "preprocess datasets: 22ex [00:00, 70.03ex/s]\n",
524
+ "preprocess datasets: 9ex [00:00, 100.33ex/s]\n",
525
+ "100%|████████████████████████████████████████████| 1/1 [00:00<00:00, 837.02ba/s]\n",
526
+ "100%|███████████████████████████████████████████| 1/1 [00:00<00:00, 1258.42ba/s]\n",
527
+ "Downloading: 4.48kB [00:00, 5.81MB/s] \n",
528
+ "Configuration saved in ./preprocessor_config.json\n",
529
+ "tokenizer config file saved in ./tokenizer_config.json\n",
530
+ "Special tokens file saved in ./special_tokens_map.json\n",
531
+ "added tokens file saved in ./added_tokens.json\n",
532
+ "Configuration saved in ./config.json\n",
533
+ "loading feature extractor configuration file ./preprocessor_config.json\n",
534
+ "loading configuration file ./config.json\n",
535
+ "Model config Wav2Vec2Config {\n",
536
+ " \"_name_or_path\": \"./\",\n",
537
+ " \"activation_dropout\": 0.0,\n",
538
+ " \"adapter_kernel_size\": 3,\n",
539
+ " \"adapter_stride\": 2,\n",
540
+ " \"add_adapter\": false,\n",
541
+ " \"apply_spec_augment\": true,\n",
542
+ " \"architectures\": [\n",
543
+ " \"Wav2Vec2Model\"\n",
544
+ " ],\n",
545
+ " \"attention_dropout\": 0.0,\n",
546
+ " \"bos_token_id\": 1,\n",
547
+ " \"classifier_proj_size\": 256,\n",
548
+ " \"codevector_dim\": 256,\n",
549
+ " \"contrastive_logits_temperature\": 0.1,\n",
550
+ " \"conv_bias\": false,\n",
551
+ " \"conv_dim\": [\n",
552
+ " 32,\n",
553
+ " 32,\n",
554
+ " 32\n",
555
+ " ],\n",
556
+ " \"conv_kernel\": [\n",
557
+ " 8,\n",
558
+ " 8,\n",
559
+ " 8\n",
560
+ " ],\n",
561
+ " \"conv_stride\": [\n",
562
+ " 4,\n",
563
+ " 4,\n",
564
+ " 4\n",
565
+ " ],\n",
566
+ " \"ctc_loss_reduction\": \"mean\",\n",
567
+ " \"ctc_zero_infinity\": false,\n",
568
+ " \"diversity_loss_weight\": 0.1,\n",
569
+ " \"do_stable_layer_norm\": true,\n",
570
+ " \"eos_token_id\": 2,\n",
571
+ " \"feat_extract_activation\": \"gelu\",\n",
572
+ " \"feat_extract_dropout\": 0.0,\n",
573
+ " \"feat_extract_norm\": \"layer\",\n",
574
+ " \"feat_proj_dropout\": 0.0,\n",
575
+ " \"feat_quantizer_dropout\": 0.0,\n",
576
+ " \"final_dropout\": 0.0,\n",
577
+ " \"hidden_act\": \"gelu\",\n",
578
+ " \"hidden_dropout\": 0.0,\n",
579
+ " \"hidden_dropout_prob\": 0.1,\n",
580
+ " \"hidden_size\": 16,\n",
581
+ " \"initializer_range\": 0.02,\n",
582
+ " \"intermediate_size\": 20,\n",
583
+ " \"layer_norm_eps\": 1e-05,\n",
584
+ " \"layerdrop\": 0.0,\n",
585
+ " \"mask_feature_length\": 10,\n",
586
+ " \"mask_feature_min_masks\": 0,\n",
587
+ " \"mask_feature_prob\": 0.0,\n",
588
+ " \"mask_time_length\": 10,\n",
589
+ " \"mask_time_min_masks\": 2,\n",
590
+ " \"mask_time_prob\": 0.05,\n",
591
+ " \"model_type\": \"wav2vec2\",\n",
592
+ " \"num_adapter_layers\": 3,\n",
593
+ " \"num_attention_heads\": 2,\n",
594
+ " \"num_codevector_groups\": 2,\n",
595
+ " \"num_codevectors_per_group\": 320,\n",
596
+ " \"num_conv_pos_embedding_groups\": 2,\n",
597
+ " \"num_conv_pos_embeddings\": 16,\n",
598
+ " \"num_feat_extract_layers\": 3,\n",
599
+ " \"num_hidden_layers\": 4,\n",
600
+ " \"num_negatives\": 10,\n",
601
+ " \"output_hidden_size\": 16,\n",
602
+ " \"pad_token_id\": 45,\n",
603
+ " \"proj_codevector_dim\": 256,\n",
604
+ " \"tdnn_dilation\": [\n",
605
+ " 1,\n",
606
+ " 2,\n",
607
+ " 3,\n",
608
+ " 1,\n",
609
+ " 1\n",
610
+ " ],\n",
611
+ " \"tdnn_dim\": [\n",
612
+ " 512,\n",
613
+ " 512,\n",
614
+ " 512,\n",
615
+ " 512,\n",
616
+ " 1500\n",
617
+ " ],\n",
618
+ " \"tdnn_kernel\": [\n",
619
+ " 5,\n",
620
+ " 3,\n",
621
+ " 3,\n",
622
+ " 1,\n",
623
+ " 1\n",
624
+ " ],\n",
625
+ " \"torch_dtype\": \"float32\",\n",
626
+ " \"transformers_version\": \"4.17.0.dev0\",\n",
627
+ " \"use_weighted_layer_sum\": false,\n",
628
+ " \"vocab_size\": 48,\n",
629
+ " \"xvector_output_dim\": 512\n",
630
+ "}\n",
631
+ "\n",
632
+ "loading feature extractor configuration file ./preprocessor_config.json\n",
633
+ "Feature extractor Wav2Vec2FeatureExtractor {\n",
634
+ " \"do_normalize\": true,\n",
635
+ " \"feature_extractor_type\": \"Wav2Vec2FeatureExtractor\",\n",
636
+ " \"feature_size\": 1,\n",
637
+ " \"padding_side\": \"right\",\n",
638
+ " \"padding_value\": 0.0,\n",
639
+ " \"return_attention_mask\": false,\n",
640
+ " \"sampling_rate\": 16000\n",
641
+ "}\n",
642
+ "\n",
643
+ "Didn't find file ./tokenizer.json. We won't load it.\n",
644
+ "loading file ./vocab.json\n",
645
+ "loading file ./tokenizer_config.json\n",
646
+ "loading file ./added_tokens.json\n",
647
+ "loading file ./special_tokens_map.json\n",
648
+ "loading file None\n",
649
+ "Adding <s> to the vocabulary\n",
650
+ "Adding </s> to the vocabulary\n",
651
+ "max_steps is given, it will override any value given in num_train_epochs\n",
652
+ "Using amp half precision backend\n",
653
+ "The following columns in the training set don't have a corresponding argument in `Wav2Vec2ForCTC.forward` and have been ignored: input_length.\n",
654
+ "/opt/conda/lib/python3.8/site-packages/transformers/optimization.py:306: FutureWarning: This implementation of AdamW is deprecated and will be removed in a future version. Use thePyTorch implementation torch.optim.AdamW instead, or set `no_deprecation_warning=True` to disable this warning\n",
655
+ " warnings.warn(\n",
656
+ "***** Running training *****\n",
657
+ " Num examples = 22\n",
658
+ " Num Epochs = 1\n",
659
+ " Instantaneous batch size per device = 2\n",
660
+ " Total train batch size (w. parallel, distributed & accumulation) = 2\n",
661
+ " Gradient Accumulation steps = 1\n",
662
+ " Total optimization steps = 10\n",
663
+ " 50%|██████████████████████ | 5/10 [00:00<00:00, 8.80it/s]Saving model checkpoint to ./checkpoint-5\n",
664
+ "Configuration saved in ./checkpoint-5/config.json\n",
665
+ "Model weights saved in ./checkpoint-5/pytorch_model.bin\n",
666
+ "Configuration saved in ./checkpoint-5/preprocessor_config.json\n",
667
+ " 90%|███████████████████████████████████████▌ | 9/10 [00:00<00:00, 9.41it/s]Saving model checkpoint to ./checkpoint-10\n",
668
+ "Configuration saved in ./checkpoint-10/config.json\n",
669
+ "Model weights saved in ./checkpoint-10/pytorch_model.bin\n",
670
+ "Configuration saved in ./checkpoint-10/preprocessor_config.json\n",
671
+ "Deleting older checkpoint [checkpoint-5] due to args.save_total_limit\n",
672
+ "\n",
673
+ "\n",
674
+ "Training completed. Do not forget to share your model on huggingface.co/models =)\n",
675
+ "\n",
676
+ "\n",
677
+ "{'train_runtime': 1.0733, 'train_samples_per_second': 18.635, 'train_steps_per_second': 9.317, 'train_loss': 132.1487060546875, 'epoch': 0.91}\n",
678
+ "100%|███████████████████████████████████████████| 10/10 [00:01<00:00, 9.32it/s]\n",
679
+ "Saving model checkpoint to ./\n",
680
+ "Configuration saved in ./config.json\n",
681
+ "Model weights saved in ./pytorch_model.bin\n",
682
+ "Configuration saved in ./preprocessor_config.json\n",
683
+ "***** train metrics *****\n",
684
+ " epoch = 0.91\n",
685
+ " train_loss = 132.1487\n",
686
+ " train_runtime = 0:00:01.07\n",
687
+ " train_samples = 22\n",
688
+ " train_samples_per_second = 18.635\n",
689
+ " train_steps_per_second = 9.317\n",
690
+ "01/31/2022 16:15:52 - INFO - __main__ - *** Evaluate ***\n",
691
+ "The following columns in the evaluation set don't have a corresponding argument in `Wav2Vec2ForCTC.forward` and have been ignored: input_length.\n",
692
+ "***** Running Evaluation *****\n",
693
+ " Num examples = 9\n",
694
+ " Batch size = 8\n",
695
+ "100%|█████████████████████████████████████████████| 2/2 [00:00<00:00, 35.19it/s]\n",
696
+ "***** eval metrics *****\n",
697
+ " epoch = 0.91\n",
698
+ " eval_loss = 128.2049\n",
699
+ " eval_runtime = 0:00:00.32\n",
700
+ " eval_samples = 9\n",
701
+ " eval_samples_per_second = 27.51\n",
702
+ " eval_steps_per_second = 6.113\n",
703
+ " eval_wer = 14.1429\n",
704
+ "Dropping the following result as it does not have all the necessary fields:\n",
705
+ "{'dataset': {'name': 'COMMON_VOICE - AB', 'type': 'common_voice', 'args': 'Config: ab, Training split: train+validation, Eval split: test'}}\n"
706
+ ]
707
+ }
708
+ ],
709
+ "source": [
710
+ "!python run_speech_recognition_ctc.py \\\n",
711
+ "\t--dataset_name=\"common_voice\" \\\n",
712
+ "\t--model_name_or_path=\"hf-test/xls-r-dummy\" \\\n",
713
+ "\t--dataset_config_name=\"ab\" \\\n",
714
+ "\t--output_dir=\"./\" \\\n",
715
+ "\t--overwrite_output_dir \\\n",
716
+ "\t--max_steps=\"10\" \\\n",
717
+ "\t--per_device_train_batch_size=\"2\" \\\n",
718
+ "\t--learning_rate=\"3e-4\" \\\n",
719
+ "\t--save_total_limit=\"1\" \\\n",
720
+ "\t--evaluation_strategy=\"steps\" \\\n",
721
+ "\t--text_column_name=\"sentence\" \\\n",
722
+ "\t--length_column_name=\"input_length\" \\\n",
723
+ "\t--save_steps=\"5\" \\\n",
724
+ "\t--layerdrop=\"0.0\" \\\n",
725
+ "\t--freeze_feature_encoder \\\n",
726
+ "\t--gradient_checkpointing \\\n",
727
+ "\t--fp16 \\\n",
728
+ "\t--group_by_length \\\n",
729
+ "\t--do_train --do_eval"
730
+ ]
731
+ },
732
+ {
733
+ "cell_type": "code",
734
+ "execution_count": null,
735
+ "metadata": {},
736
+ "outputs": [],
737
+ "source": []
738
+ }
739
+ ],
740
+ "metadata": {
741
+ "accelerator": "GPU",
742
+ "colab": {
743
+ "authorship_tag": "ABX9TyM3OaMlm9YQtKpl28c8gBBd",
744
+ "include_colab_link": true,
745
+ "name": "DebugOVHTransformers.ipynb",
746
+ "provenance": []
747
+ },
748
+ "kernelspec": {
749
+ "display_name": "Python 3 (ipykernel)",
750
+ "language": "python",
751
+ "name": "python3"
752
+ },
753
+ "language_info": {
754
+ "codemirror_mode": {
755
+ "name": "ipython",
756
+ "version": 3
757
+ },
758
+ "file_extension": ".py",
759
+ "mimetype": "text/x-python",
760
+ "name": "python",
761
+ "nbconvert_exporter": "python",
762
+ "pygments_lexer": "ipython3",
763
+ "version": "3.8.8"
764
+ }
765
+ },
766
+ "nbformat": 4,
767
+ "nbformat_minor": 4
768
+ }
added_tokens.json ADDED
@@ -0,0 +1 @@
 
1
+ {"<s>": 52, "</s>": 53}
config.json ADDED
@@ -0,0 +1,96 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "hf-test/xls-r-dummy",
3
+ "activation_dropout": 0.0,
4
+ "adapter_kernel_size": 3,
5
+ "adapter_stride": 2,
6
+ "add_adapter": false,
7
+ "apply_spec_augment": true,
8
+ "architectures": [
9
+ "Wav2Vec2ForCTC"
10
+ ],
11
+ "attention_dropout": 0.0,
12
+ "bos_token_id": 1,
13
+ "classifier_proj_size": 256,
14
+ "codevector_dim": 256,
15
+ "contrastive_logits_temperature": 0.1,
16
+ "conv_bias": false,
17
+ "conv_dim": [
18
+ 32,
19
+ 32,
20
+ 32
21
+ ],
22
+ "conv_kernel": [
23
+ 8,
24
+ 8,
25
+ 8
26
+ ],
27
+ "conv_stride": [
28
+ 4,
29
+ 4,
30
+ 4
31
+ ],
32
+ "ctc_loss_reduction": "mean",
33
+ "ctc_zero_infinity": false,
34
+ "diversity_loss_weight": 0.1,
35
+ "do_stable_layer_norm": true,
36
+ "eos_token_id": 2,
37
+ "feat_extract_activation": "gelu",
38
+ "feat_extract_dropout": 0.0,
39
+ "feat_extract_norm": "layer",
40
+ "feat_proj_dropout": 0.0,
41
+ "feat_quantizer_dropout": 0.0,
42
+ "final_dropout": 0.0,
43
+ "hidden_act": "gelu",
44
+ "hidden_dropout": 0.0,
45
+ "hidden_dropout_prob": 0.1,
46
+ "hidden_size": 16,
47
+ "initializer_range": 0.02,
48
+ "intermediate_size": 20,
49
+ "layer_norm_eps": 1e-05,
50
+ "layerdrop": 0.0,
51
+ "mask_feature_length": 10,
52
+ "mask_feature_min_masks": 0,
53
+ "mask_feature_prob": 0.0,
54
+ "mask_time_length": 10,
55
+ "mask_time_min_masks": 2,
56
+ "mask_time_prob": 0.05,
57
+ "model_type": "wav2vec2",
58
+ "num_adapter_layers": 3,
59
+ "num_attention_heads": 2,
60
+ "num_codevector_groups": 2,
61
+ "num_codevectors_per_group": 320,
62
+ "num_conv_pos_embedding_groups": 2,
63
+ "num_conv_pos_embeddings": 16,
64
+ "num_feat_extract_layers": 3,
65
+ "num_hidden_layers": 4,
66
+ "num_negatives": 10,
67
+ "output_hidden_size": 16,
68
+ "pad_token_id": 51,
69
+ "proj_codevector_dim": 256,
70
+ "tdnn_dilation": [
71
+ 1,
72
+ 2,
73
+ 3,
74
+ 1,
75
+ 1
76
+ ],
77
+ "tdnn_dim": [
78
+ 512,
79
+ 512,
80
+ 512,
81
+ 512,
82
+ 1500
83
+ ],
84
+ "tdnn_kernel": [
85
+ 5,
86
+ 3,
87
+ 3,
88
+ 1,
89
+ 1
90
+ ],
91
+ "torch_dtype": "float32",
92
+ "transformers_version": "4.17.0.dev0",
93
+ "use_weighted_layer_sum": false,
94
+ "vocab_size": 54,
95
+ "xvector_output_dim": 512
96
+ }
debugger_ovh_transformers.ipynb ADDED
@@ -0,0 +1,494 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "cells": [
3
+ {
4
+ "cell_type": "markdown",
5
+ "metadata": {},
6
+ "source": [
7
+ "# HuggingFace challenge - Debugger notebook\n",
8
+ "Run this notebook to verify your libraries versions, check GPU config and run a quick training"
9
+ ]
10
+ },
11
+ {
12
+ "cell_type": "code",
13
+ "execution_count": 1,
14
+ "metadata": {
15
+ "id": "T2utsYSKszvv"
16
+ },
17
+ "outputs": [],
18
+ "source": [
19
+ "import platform\n",
20
+ "import multiprocessing\n",
21
+ "\n",
22
+ "import torch\n",
23
+ "import transformers\n",
24
+ "import datasets\n",
25
+ "\n",
26
+ "import soundfile"
27
+ ]
28
+ },
29
+ {
30
+ "cell_type": "markdown",
31
+ "metadata": {},
32
+ "source": [
33
+ "## Print main infos"
34
+ ]
35
+ },
36
+ {
37
+ "cell_type": "markdown",
38
+ "metadata": {},
39
+ "source": [
40
+ "## Check your GPU informations (if any)\n",
41
+ "If you launched an AI Training job with GPU resources, they should be listed below (Tesla V100s 32GB).\n",
42
+ "Driver and CUDA version "
43
+ ]
44
+ },
45
+ {
46
+ "cell_type": "markdown",
47
+ "metadata": {
48
+ "id": "TorMtpwPv6RQ"
49
+ },
50
+ "source": [
51
+ "## Quick training run with a dummy model and data\n",
52
+ "more information on https://github.com/huggingface/transformers/tree/master/examples/pytorch/speech-recognition"
53
+ ]
54
+ },
55
+ {
56
+ "cell_type": "code",
57
+ "execution_count": 2,
58
+ "metadata": {
59
+ "colab": {
60
+ "base_uri": "https://localhost:8080/"
61
+ },
62
+ "id": "fevoJD15u4Ss",
63
+ "outputId": "5861d34e-745b-45ee-e780-ed363043e655"
64
+ },
65
+ "outputs": [
66
+ {
67
+ "name": "stdout",
68
+ "output_type": "stream",
69
+ "text": [
70
+ "--2022-01-31 17:09:10-- https://raw.githubusercontent.com/huggingface/transformers/master/examples/pytorch/speech-recognition/run_speech_recognition_ctc.py\n",
71
+ "Resolving raw.githubusercontent.com (raw.githubusercontent.com)... 185.199.111.133, 185.199.108.133, 185.199.110.133, ...\n",
72
+ "Connecting to raw.githubusercontent.com (raw.githubusercontent.com)|185.199.111.133|:443... connected.\n",
73
+ "HTTP request sent, awaiting response... 200 OK\n",
74
+ "Length: 30360 (30K) [text/plain]\n",
75
+ "Saving to: ‘run_speech_recognition_ctc.py’\n",
76
+ "\n",
77
+ "run_speech_recognit 100%[===================>] 29.65K --.-KB/s in 0.001s \n",
78
+ "\n",
79
+ "2022-01-31 17:09:10 (55.6 MB/s) - ‘run_speech_recognition_ctc.py’ saved [30360/30360]\n",
80
+ "\n"
81
+ ]
82
+ }
83
+ ],
84
+ "source": [
85
+ "!wget -O run_speech_recognition_ctc.py https://raw.githubusercontent.com/huggingface/transformers/master/examples/pytorch/speech-recognition/run_speech_recognition_ctc.py"
86
+ ]
87
+ },
88
+ {
89
+ "cell_type": "code",
90
+ "execution_count": null,
91
+ "metadata": {
92
+ "colab": {
93
+ "base_uri": "https://localhost:8080/"
94
+ },
95
+ "id": "Mz4bubhxxsad",
96
+ "outputId": "23398525-cc19-43c2-9fec-497e06214f29"
97
+ },
98
+ "outputs": [
99
+ {
100
+ "name": "stdout",
101
+ "output_type": "stream",
102
+ "text": [
103
+ "01/31/2022 17:10:15 - WARNING - __main__ - Process rank: -1, device: cuda:0, n_gpu: 1distributed training: False, 16-bits training: True\n",
104
+ "01/31/2022 17:10:15 - INFO - __main__ - Training/evaluation parameters TrainingArguments(\n",
105
+ "_n_gpu=1,\n",
106
+ "adafactor=False,\n",
107
+ "adam_beta1=0.9,\n",
108
+ "adam_beta2=0.999,\n",
109
+ "adam_epsilon=1e-08,\n",
110
+ "bf16=False,\n",
111
+ "bf16_full_eval=False,\n",
112
+ "dataloader_drop_last=False,\n",
113
+ "dataloader_num_workers=0,\n",
114
+ "dataloader_pin_memory=True,\n",
115
+ "ddp_bucket_cap_mb=None,\n",
116
+ "ddp_find_unused_parameters=None,\n",
117
+ "debug=[],\n",
118
+ "deepspeed=None,\n",
119
+ "disable_tqdm=False,\n",
120
+ "do_eval=True,\n",
121
+ "do_predict=False,\n",
122
+ "do_train=True,\n",
123
+ "eval_accumulation_steps=None,\n",
124
+ "eval_steps=500,\n",
125
+ "evaluation_strategy=IntervalStrategy.STEPS,\n",
126
+ "fp16=True,\n",
127
+ "fp16_backend=auto,\n",
128
+ "fp16_full_eval=False,\n",
129
+ "fp16_opt_level=O1,\n",
130
+ "gradient_accumulation_steps=1,\n",
131
+ "gradient_checkpointing=True,\n",
132
+ "greater_is_better=None,\n",
133
+ "group_by_length=True,\n",
134
+ "half_precision_backend=auto,\n",
135
+ "hub_model_id=None,\n",
136
+ "hub_strategy=HubStrategy.EVERY_SAVE,\n",
137
+ "hub_token=<HUB_TOKEN>,\n",
138
+ "ignore_data_skip=False,\n",
139
+ "label_names=None,\n",
140
+ "label_smoothing_factor=0.0,\n",
141
+ "learning_rate=0.0003,\n",
142
+ "length_column_name=input_length,\n",
143
+ "load_best_model_at_end=False,\n",
144
+ "local_rank=-1,\n",
145
+ "log_level=-1,\n",
146
+ "log_level_replica=-1,\n",
147
+ "log_on_each_node=True,\n",
148
+ "logging_dir=./runs/Jan31_17-10-15_job-6a6be32c-c82d-4385-805b-1f7606124d5b,\n",
149
+ "logging_first_step=False,\n",
150
+ "logging_nan_inf_filter=True,\n",
151
+ "logging_steps=500,\n",
152
+ "logging_strategy=IntervalStrategy.STEPS,\n",
153
+ "lr_scheduler_type=SchedulerType.LINEAR,\n",
154
+ "max_grad_norm=1.0,\n",
155
+ "max_steps=10,\n",
156
+ "metric_for_best_model=None,\n",
157
+ "mp_parameters=,\n",
158
+ "no_cuda=False,\n",
159
+ "num_train_epochs=3.0,\n",
160
+ "optim=OptimizerNames.ADAMW_HF,\n",
161
+ "output_dir=./,\n",
162
+ "overwrite_output_dir=True,\n",
163
+ "past_index=-1,\n",
164
+ "per_device_eval_batch_size=8,\n",
165
+ "per_device_train_batch_size=2,\n",
166
+ "prediction_loss_only=False,\n",
167
+ "push_to_hub=True,\n",
168
+ "push_to_hub_model_id=None,\n",
169
+ "push_to_hub_organization=None,\n",
170
+ "push_to_hub_token=<PUSH_TO_HUB_TOKEN>,\n",
171
+ "remove_unused_columns=True,\n",
172
+ "report_to=[],\n",
173
+ "resume_from_checkpoint=None,\n",
174
+ "run_name=./,\n",
175
+ "save_on_each_node=False,\n",
176
+ "save_steps=5,\n",
177
+ "save_strategy=IntervalStrategy.STEPS,\n",
178
+ "save_total_limit=1,\n",
179
+ "seed=42,\n",
180
+ "sharded_ddp=[],\n",
181
+ "skip_memory_metrics=True,\n",
182
+ "tf32=None,\n",
183
+ "tpu_metrics_debug=False,\n",
184
+ "tpu_num_cores=None,\n",
185
+ "use_legacy_prediction_loop=False,\n",
186
+ "warmup_ratio=0.0,\n",
187
+ "warmup_steps=0,\n",
188
+ "weight_decay=0.0,\n",
189
+ "xpu_backend=None,\n",
190
+ ")\n",
191
+ "Downloading: 100%|█████████████████████████| 10.1k/10.1k [00:00<00:00, 7.28MB/s]\n",
192
+ "Downloading: 100%|█████████████████████████| 2.98k/2.98k [00:00<00:00, 3.39MB/s]\n",
193
+ "Downloading: 100%|██████████████████████████| 53.1k/53.1k [00:00<00:00, 325kB/s]\n",
194
+ "Downloading and preparing dataset common_voice/ab to /workspace/.cache/huggingface/datasets/mozilla-foundation___common_voice/ab/8.0.0/b8bc4d453193c06a43269b46cd87f075c70f152ac963b7f28f7a2760c45ec3e8...\n",
195
+ "Downloading: 100%|█████████████████████████| 1.72G/1.72G [01:31<00:00, 18.8MB/s]\n",
196
+ "Dataset common_voice downloaded and prepared to /workspace/.cache/huggingface/datasets/mozilla-foundation___common_voice/ab/8.0.0/b8bc4d453193c06a43269b46cd87f075c70f152ac963b7f28f7a2760c45ec3e8. Subsequent calls will reuse this data.\n",
197
+ "01/31/2022 17:13:15 - WARNING - datasets.builder - Reusing dataset common_voice (/workspace/.cache/huggingface/datasets/mozilla-foundation___common_voice/ab/8.0.0/b8bc4d453193c06a43269b46cd87f075c70f152ac963b7f28f7a2760c45ec3e8)\n",
198
+ "remove special characters from datasets: 30002ex [00:05, 5673.36ex/s]\n",
199
+ "remove special characters from datasets: 9184ex [00:01, 5662.70ex/s]\n",
200
+ "loading configuration file https://huggingface.co/hf-test/xls-r-dummy/resolve/main/config.json from cache at /workspace/.cache/huggingface/transformers/8157526a5096028eb61c63d228d882e5437edef5cb8b1a033ae35bf6249d1568.80b921aeb31bf1fa045a15aafa0e6f7e2ac68d338c1d83a3c76c99e260b22a62\n",
201
+ "Model config Wav2Vec2Config {\n",
202
+ " \"_name_or_path\": \"hf-test/xls-r-dummy\",\n",
203
+ " \"activation_dropout\": 0.1,\n",
204
+ " \"adapter_kernel_size\": 3,\n",
205
+ " \"adapter_stride\": 2,\n",
206
+ " \"add_adapter\": false,\n",
207
+ " \"apply_spec_augment\": true,\n",
208
+ " \"architectures\": [\n",
209
+ " \"Wav2Vec2Model\"\n",
210
+ " ],\n",
211
+ " \"attention_dropout\": 0.1,\n",
212
+ " \"bos_token_id\": 1,\n",
213
+ " \"classifier_proj_size\": 256,\n",
214
+ " \"codevector_dim\": 256,\n",
215
+ " \"contrastive_logits_temperature\": 0.1,\n",
216
+ " \"conv_bias\": false,\n",
217
+ " \"conv_dim\": [\n",
218
+ " 32,\n",
219
+ " 32,\n",
220
+ " 32\n",
221
+ " ],\n",
222
+ " \"conv_kernel\": [\n",
223
+ " 8,\n",
224
+ " 8,\n",
225
+ " 8\n",
226
+ " ],\n",
227
+ " \"conv_stride\": [\n",
228
+ " 4,\n",
229
+ " 4,\n",
230
+ " 4\n",
231
+ " ],\n",
232
+ " \"ctc_loss_reduction\": \"sum\",\n",
233
+ " \"ctc_zero_infinity\": false,\n",
234
+ " \"diversity_loss_weight\": 0.1,\n",
235
+ " \"do_stable_layer_norm\": true,\n",
236
+ " \"eos_token_id\": 2,\n",
237
+ " \"feat_extract_activation\": \"gelu\",\n",
238
+ " \"feat_extract_dropout\": 0.0,\n",
239
+ " \"feat_extract_norm\": \"layer\",\n",
240
+ " \"feat_proj_dropout\": 0.1,\n",
241
+ " \"feat_quantizer_dropout\": 0.0,\n",
242
+ " \"final_dropout\": 0.1,\n",
243
+ " \"gradient_checkpointing\": false,\n",
244
+ " \"hidden_act\": \"gelu\",\n",
245
+ " \"hidden_dropout\": 0.1,\n",
246
+ " \"hidden_dropout_prob\": 0.1,\n",
247
+ " \"hidden_size\": 16,\n",
248
+ " \"initializer_range\": 0.02,\n",
249
+ " \"intermediate_size\": 20,\n",
250
+ " \"layer_norm_eps\": 1e-05,\n",
251
+ " \"layerdrop\": 0.1,\n",
252
+ " \"mask_feature_length\": 10,\n",
253
+ " \"mask_feature_min_masks\": 0,\n",
254
+ " \"mask_feature_prob\": 0.0,\n",
255
+ " \"mask_time_length\": 10,\n",
256
+ " \"mask_time_min_masks\": 2,\n",
257
+ " \"mask_time_prob\": 0.05,\n",
258
+ " \"model_type\": \"wav2vec2\",\n",
259
+ " \"num_adapter_layers\": 3,\n",
260
+ " \"num_attention_heads\": 2,\n",
261
+ " \"num_codevector_groups\": 2,\n",
262
+ " \"num_codevectors_per_group\": 320,\n",
263
+ " \"num_conv_pos_embedding_groups\": 2,\n",
264
+ " \"num_conv_pos_embeddings\": 16,\n",
265
+ " \"num_feat_extract_layers\": 3,\n",
266
+ " \"num_hidden_layers\": 4,\n",
267
+ " \"num_negatives\": 10,\n",
268
+ " \"output_hidden_size\": 16,\n",
269
+ " \"pad_token_id\": 0,\n",
270
+ " \"proj_codevector_dim\": 256,\n",
271
+ " \"tdnn_dilation\": [\n",
272
+ " 1,\n",
273
+ " 2,\n",
274
+ " 3,\n",
275
+ " 1,\n",
276
+ " 1\n",
277
+ " ],\n",
278
+ " \"tdnn_dim\": [\n",
279
+ " 512,\n",
280
+ " 512,\n",
281
+ " 512,\n",
282
+ " 512,\n",
283
+ " 1500\n",
284
+ " ],\n",
285
+ " \"tdnn_kernel\": [\n",
286
+ " 5,\n",
287
+ " 3,\n",
288
+ " 3,\n",
289
+ " 1,\n",
290
+ " 1\n",
291
+ " ],\n",
292
+ " \"torch_dtype\": \"float32\",\n",
293
+ " \"transformers_version\": \"4.17.0.dev0\",\n",
294
+ " \"use_weighted_layer_sum\": false,\n",
295
+ " \"vocab_size\": 32,\n",
296
+ " \"xvector_output_dim\": 512\n",
297
+ "}\n",
298
+ "\n",
299
+ "100%|█████████████████████████████████████████████| 1/1 [00:00<00:00, 1.03ba/s]\n",
300
+ "100%|█████████████████████████████████████████████| 1/1 [00:00<00:00, 4.09ba/s]\n",
301
+ "Didn't find file ./tokenizer_config.json. We won't load it.\n",
302
+ "Didn't find file ./added_tokens.json. We won't load it.\n",
303
+ "Didn't find file ./special_tokens_map.json. We won't load it.\n",
304
+ "Didn't find file ./tokenizer.json. We won't load it.\n",
305
+ "loading file ./vocab.json\n",
306
+ "loading file None\n",
307
+ "loading file None\n",
308
+ "loading file None\n",
309
+ "loading file None\n",
310
+ "file ./config.json not found\n",
311
+ "Adding <s> to the vocabulary\n",
312
+ "Adding </s> to the vocabulary\n",
313
+ "Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained.\n",
314
+ "loading configuration file https://huggingface.co/hf-test/xls-r-dummy/resolve/main/config.json from cache at /workspace/.cache/huggingface/transformers/8157526a5096028eb61c63d228d882e5437edef5cb8b1a033ae35bf6249d1568.80b921aeb31bf1fa045a15aafa0e6f7e2ac68d338c1d83a3c76c99e260b22a62\n",
315
+ "Model config Wav2Vec2Config {\n",
316
+ " \"_name_or_path\": \"hf-test/xls-r-dummy\",\n",
317
+ " \"activation_dropout\": 0.1,\n",
318
+ " \"adapter_kernel_size\": 3,\n",
319
+ " \"adapter_stride\": 2,\n",
320
+ " \"add_adapter\": false,\n",
321
+ " \"apply_spec_augment\": true,\n",
322
+ " \"architectures\": [\n",
323
+ " \"Wav2Vec2Model\"\n",
324
+ " ],\n",
325
+ " \"attention_dropout\": 0.1,\n",
326
+ " \"bos_token_id\": 1,\n",
327
+ " \"classifier_proj_size\": 256,\n",
328
+ " \"codevector_dim\": 256,\n",
329
+ " \"contrastive_logits_temperature\": 0.1,\n",
330
+ " \"conv_bias\": false,\n",
331
+ " \"conv_dim\": [\n",
332
+ " 32,\n",
333
+ " 32,\n",
334
+ " 32\n",
335
+ " ],\n",
336
+ " \"conv_kernel\": [\n",
337
+ " 8,\n",
338
+ " 8,\n",
339
+ " 8\n",
340
+ " ],\n",
341
+ " \"conv_stride\": [\n",
342
+ " 4,\n",
343
+ " 4,\n",
344
+ " 4\n",
345
+ " ],\n",
346
+ " \"ctc_loss_reduction\": \"sum\",\n",
347
+ " \"ctc_zero_infinity\": false,\n",
348
+ " \"diversity_loss_weight\": 0.1,\n",
349
+ " \"do_stable_layer_norm\": true,\n",
350
+ " \"eos_token_id\": 2,\n",
351
+ " \"feat_extract_activation\": \"gelu\",\n",
352
+ " \"feat_extract_dropout\": 0.0,\n",
353
+ " \"feat_extract_norm\": \"layer\",\n",
354
+ " \"feat_proj_dropout\": 0.1,\n",
355
+ " \"feat_quantizer_dropout\": 0.0,\n",
356
+ " \"final_dropout\": 0.1,\n",
357
+ " \"gradient_checkpointing\": false,\n",
358
+ " \"hidden_act\": \"gelu\",\n",
359
+ " \"hidden_dropout\": 0.1,\n",
360
+ " \"hidden_dropout_prob\": 0.1,\n",
361
+ " \"hidden_size\": 16,\n",
362
+ " \"initializer_range\": 0.02,\n",
363
+ " \"intermediate_size\": 20,\n",
364
+ " \"layer_norm_eps\": 1e-05,\n",
365
+ " \"layerdrop\": 0.1,\n",
366
+ " \"mask_feature_length\": 10,\n",
367
+ " \"mask_feature_min_masks\": 0,\n",
368
+ " \"mask_feature_prob\": 0.0,\n",
369
+ " \"mask_time_length\": 10,\n",
370
+ " \"mask_time_min_masks\": 2,\n",
371
+ " \"mask_time_prob\": 0.05,\n",
372
+ " \"model_type\": \"wav2vec2\",\n",
373
+ " \"num_adapter_layers\": 3,\n",
374
+ " \"num_attention_heads\": 2,\n",
375
+ " \"num_codevector_groups\": 2,\n",
376
+ " \"num_codevectors_per_group\": 320,\n",
377
+ " \"num_conv_pos_embedding_groups\": 2,\n",
378
+ " \"num_conv_pos_embeddings\": 16,\n",
379
+ " \"num_feat_extract_layers\": 3,\n",
380
+ " \"num_hidden_layers\": 4,\n",
381
+ " \"num_negatives\": 10,\n",
382
+ " \"output_hidden_size\": 16,\n",
383
+ " \"pad_token_id\": 0,\n",
384
+ " \"proj_codevector_dim\": 256,\n",
385
+ " \"tdnn_dilation\": [\n",
386
+ " 1,\n",
387
+ " 2,\n",
388
+ " 3,\n",
389
+ " 1,\n",
390
+ " 1\n",
391
+ " ],\n",
392
+ " \"tdnn_dim\": [\n",
393
+ " 512,\n",
394
+ " 512,\n",
395
+ " 512,\n",
396
+ " 512,\n",
397
+ " 1500\n",
398
+ " ],\n",
399
+ " \"tdnn_kernel\": [\n",
400
+ " 5,\n",
401
+ " 3,\n",
402
+ " 3,\n",
403
+ " 1,\n",
404
+ " 1\n",
405
+ " ],\n",
406
+ " \"torch_dtype\": \"float32\",\n",
407
+ " \"transformers_version\": \"4.17.0.dev0\",\n",
408
+ " \"use_weighted_layer_sum\": false,\n",
409
+ " \"vocab_size\": 32,\n",
410
+ " \"xvector_output_dim\": 512\n",
411
+ "}\n",
412
+ "\n",
413
+ "loading feature extractor configuration file https://huggingface.co/hf-test/xls-r-dummy/resolve/main/preprocessor_config.json from cache at /workspace/.cache/huggingface/transformers/0ba9471c5a13055b5740bbac451b95c783dcaead5aacc5d0175959022489c3aa.bd1cf6fc7017d09efe9b164cbc7b32f9bbc3b3bcc243032c6f8e87573bde4292\n",
414
+ "Feature extractor Wav2Vec2FeatureExtractor {\n",
415
+ " \"do_normalize\": true,\n",
416
+ " \"feature_extractor_type\": \"Wav2Vec2FeatureExtractor\",\n",
417
+ " \"feature_size\": 1,\n",
418
+ " \"padding_side\": \"right\",\n",
419
+ " \"padding_value\": 0.0,\n",
420
+ " \"return_attention_mask\": false,\n",
421
+ " \"sampling_rate\": 16000\n",
422
+ "}\n",
423
+ "\n",
424
+ "loading weights file https://huggingface.co/hf-test/xls-r-dummy/resolve/main/pytorch_model.bin from cache at /workspace/.cache/huggingface/transformers/d374ffdefd19b7dca1d007484e8a16189d261a626cc06a3481bb034d23fe194a.4dc5ab5d8c52b8612a63c422a98f6d3de7e0bbf1469c52e89e028b4ec90e4b43\n",
425
+ "All model checkpoint weights were used when initializing Wav2Vec2ForCTC.\n",
426
+ "\n",
427
+ "Some weights of Wav2Vec2ForCTC were not initialized from the model checkpoint at hf-test/xls-r-dummy and are newly initialized: ['lm_head.bias', 'lm_head.weight']\n",
428
+ "You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference.\n",
429
+ "preprocess datasets: 26930ex [03:18, 192.81ex/s]"
430
+ ]
431
+ }
432
+ ],
433
+ "source": [
434
+ "!python run_speech_recognition_ctc.py \\\n",
435
+ "\t--dataset_name=\"mozilla-foundation/common_voice_8_0\" \\\n",
436
+ "\t--model_name_or_path=\"hf-test/xls-r-dummy\" \\\n",
437
+ "\t--dataset_config_name=\"ab\" \\\n",
438
+ "\t--output_dir=\"./\" \\\n",
439
+ "\t--overwrite_output_dir \\\n",
440
+ "\t--max_steps=\"10\" \\\n",
441
+ "\t--per_device_train_batch_size=\"2\" \\\n",
442
+ "\t--learning_rate=\"3e-4\" \\\n",
443
+ "\t--save_total_limit=\"1\" \\\n",
444
+ "\t--evaluation_strategy=\"steps\" \\\n",
445
+ "\t--text_column_name=\"sentence\" \\\n",
446
+ "\t--length_column_name=\"input_length\" \\\n",
447
+ "\t--save_steps=\"5\" \\\n",
448
+ "\t--layerdrop=\"0.0\" \\\n",
449
+ "\t--freeze_feature_encoder \\\n",
450
+ "\t--gradient_checkpointing \\\n",
451
+ "\t--fp16 \\\n",
452
+ "\t--group_by_length \\\n",
453
+ "\t--push_to_hub \\\n",
454
+ "\t--use_auth_token \\\n",
455
+ "\t--do_train --do_eva"
456
+ ]
457
+ },
458
+ {
459
+ "cell_type": "code",
460
+ "execution_count": null,
461
+ "metadata": {},
462
+ "outputs": [],
463
+ "source": []
464
+ }
465
+ ],
466
+ "metadata": {
467
+ "accelerator": "GPU",
468
+ "colab": {
469
+ "authorship_tag": "ABX9TyM3OaMlm9YQtKpl28c8gBBd",
470
+ "include_colab_link": true,
471
+ "name": "DebugOVHTransformers.ipynb",
472
+ "provenance": []
473
+ },
474
+ "kernelspec": {
475
+ "display_name": "Python 3 (ipykernel)",
476
+ "language": "python",
477
+ "name": "python3"
478
+ },
479
+ "language_info": {
480
+ "codemirror_mode": {
481
+ "name": "ipython",
482
+ "version": 3
483
+ },
484
+ "file_extension": ".py",
485
+ "mimetype": "text/x-python",
486
+ "name": "python",
487
+ "nbconvert_exporter": "python",
488
+ "pygments_lexer": "ipython3",
489
+ "version": "3.8.8"
490
+ }
491
+ },
492
+ "nbformat": 4,
493
+ "nbformat_minor": 4
494
+ }
preprocessor_config.json ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "do_normalize": true,
3
+ "feature_extractor_type": "Wav2Vec2FeatureExtractor",
4
+ "feature_size": 1,
5
+ "padding_side": "right",
6
+ "padding_value": 0.0,
7
+ "return_attention_mask": false,
8
+ "sampling_rate": 16000
9
+ }
pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c3b0f9917eaf9f86f97f7c5c4881ac1458f56f6a944aaedfa540387983cb6110
3
+ size 143974
run_speech_recognition_ctc.py ADDED
@@ -0,0 +1,737 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python
2
+ # coding=utf-8
3
+ # Copyright 2021 The HuggingFace Inc. team. All rights reserved.
4
+ #
5
+ # Licensed under the Apache License, Version 2.0 (the "License");
6
+ # you may not use this file except in compliance with the License.
7
+ # You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing, software
12
+ # distributed under the License is distributed on an "AS IS" BASIS,
13
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
+ # See the License for the specific language governing permissions and
15
+
16
+ """ Fine-tuning a 🤗 Transformers CTC model for automatic speech recognition"""
17
+
18
+ import functools
19
+ import json
20
+ import logging
21
+ import os
22
+ import re
23
+ import sys
24
+ import warnings
25
+ from dataclasses import dataclass, field
26
+ from typing import Dict, List, Optional, Union
27
+
28
+ import datasets
29
+ import numpy as np
30
+ import torch
31
+ from datasets import DatasetDict, load_dataset, load_metric
32
+
33
+ import transformers
34
+ from transformers import (
35
+ AutoConfig,
36
+ AutoFeatureExtractor,
37
+ AutoModelForCTC,
38
+ AutoProcessor,
39
+ AutoTokenizer,
40
+ HfArgumentParser,
41
+ Trainer,
42
+ TrainingArguments,
43
+ Wav2Vec2Processor,
44
+ set_seed,
45
+ )
46
+ from transformers.trainer_utils import get_last_checkpoint, is_main_process
47
+ from transformers.utils import check_min_version
48
+ from transformers.utils.versions import require_version
49
+
50
+
51
+ # Will error if the minimal version of Transformers is not installed. Remove at your own risks.
52
+ check_min_version("4.17.0.dev0")
53
+
54
+ require_version("datasets>=1.13.3", "To fix: pip install -r examples/pytorch/text-classification/requirements.txt")
55
+
56
+
57
+ logger = logging.getLogger(__name__)
58
+
59
+
60
+ def list_field(default=None, metadata=None):
61
+ return field(default_factory=lambda: default, metadata=metadata)
62
+
63
+
64
+ @dataclass
65
+ class ModelArguments:
66
+ """
67
+ Arguments pertaining to which model/config/tokenizer we are going to fine-tune from.
68
+ """
69
+
70
+ model_name_or_path: str = field(
71
+ metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"}
72
+ )
73
+ tokenizer_name_or_path: Optional[str] = field(
74
+ default=None,
75
+ metadata={"help": "Path to pretrained tokenizer or tokenizer identifier from huggingface.co/models"},
76
+ )
77
+ cache_dir: Optional[str] = field(
78
+ default=None,
79
+ metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"},
80
+ )
81
+ freeze_feature_encoder: bool = field(
82
+ default=True, metadata={"help": "Whether to freeze the feature encoder layers of the model."}
83
+ )
84
+ attention_dropout: float = field(
85
+ default=0.0, metadata={"help": "The dropout ratio for the attention probabilities."}
86
+ )
87
+ activation_dropout: float = field(
88
+ default=0.0, metadata={"help": "The dropout ratio for activations inside the fully connected layer."}
89
+ )
90
+ feat_proj_dropout: float = field(default=0.0, metadata={"help": "The dropout ratio for the projected features."})
91
+ hidden_dropout: float = field(
92
+ default=0.0,
93
+ metadata={
94
+ "help": "The dropout probability for all fully connected layers in the embeddings, encoder, and pooler."
95
+ },
96
+ )
97
+ final_dropout: float = field(
98
+ default=0.0,
99
+ metadata={"help": "The dropout probability for the final projection layer."},
100
+ )
101
+ mask_time_prob: float = field(
102
+ default=0.05,
103
+ metadata={
104
+ "help": "Probability of each feature vector along the time axis to be chosen as the start of the vector"
105
+ "span to be masked. Approximately ``mask_time_prob * sequence_length // mask_time_length`` feature"
106
+ "vectors will be masked along the time axis."
107
+ },
108
+ )
109
+ mask_time_length: int = field(
110
+ default=10,
111
+ metadata={"help": "Length of vector span to mask along the time axis."},
112
+ )
113
+ mask_feature_prob: float = field(
114
+ default=0.0,
115
+ metadata={
116
+ "help": "Probability of each feature vector along the feature axis to be chosen as the start of the vector"
117
+ "span to be masked. Approximately ``mask_feature_prob * sequence_length // mask_feature_length`` feature bins will be masked along the time axis."
118
+ },
119
+ )
120
+ mask_feature_length: int = field(
121
+ default=10,
122
+ metadata={"help": "Length of vector span to mask along the feature axis."},
123
+ )
124
+ layerdrop: float = field(default=0.0, metadata={"help": "The LayerDrop probability."})
125
+ ctc_loss_reduction: Optional[str] = field(
126
+ default="mean", metadata={"help": "The way the ctc loss should be reduced. Should be one of 'mean' or 'sum'."}
127
+ )
128
+
129
+
130
+ @dataclass
131
+ class DataTrainingArguments:
132
+ """
133
+ Arguments pertaining to what data we are going to input our model for training and eval.
134
+
135
+ Using `HfArgumentParser` we can turn this class
136
+ into argparse arguments to be able to specify them on
137
+ the command line.
138
+ """
139
+
140
+ dataset_name: str = field(
141
+ metadata={"help": "The configuration name of the dataset to use (via the datasets library)."}
142
+ )
143
+ dataset_config_name: str = field(
144
+ default=None, metadata={"help": "The configuration name of the dataset to use (via the datasets library)."}
145
+ )
146
+ train_split_name: str = field(
147
+ default="train+validation",
148
+ metadata={
149
+ "help": "The name of the training data set split to use (via the datasets library). Defaults to 'train+validation'"
150
+ },
151
+ )
152
+ eval_split_name: str = field(
153
+ default="test",
154
+ metadata={
155
+ "help": "The name of the evaluation data set split to use (via the datasets library). Defaults to 'test'"
156
+ },
157
+ )
158
+ audio_column_name: str = field(
159
+ default="audio",
160
+ metadata={"help": "The name of the dataset column containing the audio data. Defaults to 'audio'"},
161
+ )
162
+ text_column_name: str = field(
163
+ default="text",
164
+ metadata={"help": "The name of the dataset column containing the text data. Defaults to 'text'"},
165
+ )
166
+ overwrite_cache: bool = field(
167
+ default=False, metadata={"help": "Overwrite the cached preprocessed datasets or not."}
168
+ )
169
+ preprocessing_num_workers: Optional[int] = field(
170
+ default=None,
171
+ metadata={"help": "The number of processes to use for the preprocessing."},
172
+ )
173
+ max_train_samples: Optional[int] = field(
174
+ default=None,
175
+ metadata={
176
+ "help": "For debugging purposes or quicker training, truncate the number of training examples to this "
177
+ "value if set."
178
+ },
179
+ )
180
+ max_eval_samples: Optional[int] = field(
181
+ default=None,
182
+ metadata={
183
+ "help": "For debugging purposes or quicker training, truncate the number of validation examples to this "
184
+ "value if set."
185
+ },
186
+ )
187
+ chars_to_ignore: Optional[List[str]] = list_field(
188
+ default=None,
189
+ metadata={"help": "A list of characters to remove from the transcripts."},
190
+ )
191
+ eval_metrics: List[str] = list_field(
192
+ default=["wer"],
193
+ metadata={"help": "A list of metrics the model should be evaluated on. E.g. `'wer cer'`"},
194
+ )
195
+ max_duration_in_seconds: float = field(
196
+ default=20.0,
197
+ metadata={
198
+ "help": "Filter audio files that are longer than `max_duration_in_seconds` seconds to 'max_duration_in_seconds`"
199
+ },
200
+ )
201
+ min_duration_in_seconds: float = field(
202
+ default=0.0, metadata={"help": "Filter audio files that are shorter than `min_duration_in_seconds` seconds"}
203
+ )
204
+ preprocessing_only: bool = field(
205
+ default=False,
206
+ metadata={
207
+ "help": "Whether to only do data preprocessing and skip training. "
208
+ "This is especially useful when data preprocessing errors out in distributed training due to timeout. "
209
+ "In this case, one should run the preprocessing in a non-distributed setup with `preprocessing_only=True` "
210
+ "so that the cached datasets can consequently be loaded in distributed training"
211
+ },
212
+ )
213
+ use_auth_token: bool = field(
214
+ default=False,
215
+ metadata={
216
+ "help": "If :obj:`True`, will use the token generated when running"
217
+ ":obj:`transformers-cli login` as HTTP bearer authorization for remote files."
218
+ },
219
+ )
220
+ unk_token: str = field(
221
+ default="[UNK]",
222
+ metadata={"help": "The unk token for the tokenizer"},
223
+ )
224
+ pad_token: str = field(
225
+ default="[PAD]",
226
+ metadata={"help": "The padding token for the tokenizer"},
227
+ )
228
+ word_delimiter_token: str = field(
229
+ default="|",
230
+ metadata={"help": "The word delimiter token for the tokenizer"},
231
+ )
232
+ phoneme_language: Optional[str] = field(
233
+ default=None,
234
+ metadata={
235
+ "help": "The target language that should be used be"
236
+ " passed to the tokenizer for tokenization. Note that"
237
+ " this is only relevant if the model classifies the"
238
+ " input audio to a sequence of phoneme sequences."
239
+ },
240
+ )
241
+
242
+
243
+ @dataclass
244
+ class DataCollatorCTCWithPadding:
245
+ """
246
+ Data collator that will dynamically pad the inputs received.
247
+ Args:
248
+ processor (:class:`~transformers.AutoProcessor`)
249
+ The processor used for proccessing the data.
250
+ padding (:obj:`bool`, :obj:`str` or :class:`~transformers.tokenization_utils_base.PaddingStrategy`, `optional`, defaults to :obj:`True`):
251
+ Select a strategy to pad the returned sequences (according to the model's padding side and padding index)
252
+ among:
253
+ * :obj:`True` or :obj:`'longest'`: Pad to the longest sequence in the batch (or no padding if only a single
254
+ sequence if provided).
255
+ * :obj:`'max_length'`: Pad to a maximum length specified with the argument :obj:`max_length` or to the
256
+ maximum acceptable input length for the model if that argument is not provided.
257
+ * :obj:`False` or :obj:`'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of
258
+ different lengths).
259
+ max_length (:obj:`int`, `optional`):
260
+ Maximum length of the ``input_values`` of the returned list and optionally padding length (see above).
261
+ max_length_labels (:obj:`int`, `optional`):
262
+ Maximum length of the ``labels`` returned list and optionally padding length (see above).
263
+ pad_to_multiple_of (:obj:`int`, `optional`):
264
+ If set will pad the sequence to a multiple of the provided value.
265
+ This is especially useful to enable the use of Tensor Cores on NVIDIA hardware with compute capability >=
266
+ 7.5 (Volta).
267
+ """
268
+
269
+ processor: AutoProcessor
270
+ padding: Union[bool, str] = "longest"
271
+ pad_to_multiple_of: Optional[int] = None
272
+ pad_to_multiple_of_labels: Optional[int] = None
273
+
274
+ def __call__(self, features: List[Dict[str, Union[List[int], torch.Tensor]]]) -> Dict[str, torch.Tensor]:
275
+ # split inputs and labels since they have to be of different lenghts and need
276
+ # different padding methods
277
+ input_features = [{"input_values": feature["input_values"]} for feature in features]
278
+ label_features = [{"input_ids": feature["labels"]} for feature in features]
279
+
280
+ batch = self.processor.pad(
281
+ input_features,
282
+ padding=self.padding,
283
+ pad_to_multiple_of=self.pad_to_multiple_of,
284
+ return_tensors="pt",
285
+ )
286
+
287
+ with self.processor.as_target_processor():
288
+ labels_batch = self.processor.pad(
289
+ label_features,
290
+ padding=self.padding,
291
+ pad_to_multiple_of=self.pad_to_multiple_of_labels,
292
+ return_tensors="pt",
293
+ )
294
+
295
+ # replace padding with -100 to ignore loss correctly
296
+ labels = labels_batch["input_ids"].masked_fill(labels_batch.attention_mask.ne(1), -100)
297
+
298
+ batch["labels"] = labels
299
+
300
+ return batch
301
+
302
+
303
+ def create_vocabulary_from_data(
304
+ datasets: DatasetDict,
305
+ word_delimiter_token: Optional[str] = None,
306
+ unk_token: Optional[str] = None,
307
+ pad_token: Optional[str] = None,
308
+ ):
309
+ # Given training and test labels create vocabulary
310
+ def extract_all_chars(batch):
311
+ all_text = " ".join(batch["target_text"])
312
+ vocab = list(set(all_text))
313
+ return {"vocab": [vocab], "all_text": [all_text]}
314
+
315
+ vocabs = datasets.map(
316
+ extract_all_chars,
317
+ batched=True,
318
+ batch_size=-1,
319
+ keep_in_memory=True,
320
+ remove_columns=datasets["train"].column_names,
321
+ )
322
+
323
+ # take union of all unique characters in each dataset
324
+ vocab_set = functools.reduce(
325
+ lambda vocab_1, vocab_2: set(vocab_1["vocab"][0]) | set(vocab_2["vocab"][0]), vocabs.values()
326
+ )
327
+
328
+ vocab_dict = {v: k for k, v in enumerate(sorted(list(vocab_set)))}
329
+
330
+ # replace white space with delimiter token
331
+ if word_delimiter_token is not None:
332
+ vocab_dict[word_delimiter_token] = vocab_dict[" "]
333
+ del vocab_dict[" "]
334
+
335
+ # add unk and pad token
336
+ if unk_token is not None:
337
+ vocab_dict[unk_token] = len(vocab_dict)
338
+
339
+ if pad_token is not None:
340
+ vocab_dict[pad_token] = len(vocab_dict)
341
+
342
+ return vocab_dict
343
+
344
+
345
+ def main():
346
+ # See all possible arguments in src/transformers/training_args.py
347
+ # or by passing the --help flag to this script.
348
+ # We now keep distinct sets of args, for a cleaner separation of concerns.
349
+
350
+ parser = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments))
351
+ if len(sys.argv) == 2 and sys.argv[1].endswith(".json"):
352
+ # If we pass only one argument to the script and it's the path to a json file,
353
+ # let's parse it to get our arguments.
354
+ model_args, data_args, training_args = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1]))
355
+ else:
356
+ model_args, data_args, training_args = parser.parse_args_into_dataclasses()
357
+
358
+ # Detecting last checkpoint.
359
+ last_checkpoint = None
360
+ if os.path.isdir(training_args.output_dir) and training_args.do_train and not training_args.overwrite_output_dir:
361
+ last_checkpoint = get_last_checkpoint(training_args.output_dir)
362
+ if last_checkpoint is None and len(os.listdir(training_args.output_dir)) > 0:
363
+ raise ValueError(
364
+ f"Output directory ({training_args.output_dir}) already exists and is not empty. "
365
+ "Use --overwrite_output_dir to overcome."
366
+ )
367
+ elif last_checkpoint is not None:
368
+ logger.info(
369
+ f"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change "
370
+ "the `--output_dir` or add `--overwrite_output_dir` to train from scratch."
371
+ )
372
+
373
+ # Setup logging
374
+ logging.basicConfig(
375
+ format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
376
+ datefmt="%m/%d/%Y %H:%M:%S",
377
+ handlers=[logging.StreamHandler(sys.stdout)],
378
+ )
379
+ logger.setLevel(logging.INFO if is_main_process(training_args.local_rank) else logging.WARN)
380
+
381
+ # Log on each process the small summary:
382
+ logger.warning(
383
+ f"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"
384
+ f"distributed training: {bool(training_args.local_rank != -1)}, 16-bits training: {training_args.fp16}"
385
+ )
386
+ # Set the verbosity to info of the Transformers logger (on main process only):
387
+ if is_main_process(training_args.local_rank):
388
+ transformers.utils.logging.set_verbosity_info()
389
+ logger.info("Training/evaluation parameters %s", training_args)
390
+
391
+ # Set seed before initializing model.
392
+ set_seed(training_args.seed)
393
+
394
+ # 1. First, let's load the dataset
395
+ raw_datasets = DatasetDict()
396
+
397
+ if training_args.do_train:
398
+ raw_datasets["train"] = load_dataset(
399
+ data_args.dataset_name,
400
+ data_args.dataset_config_name,
401
+ split=data_args.train_split_name,
402
+ use_auth_token=data_args.use_auth_token,
403
+ )
404
+
405
+ if data_args.audio_column_name not in raw_datasets["train"].column_names:
406
+ raise ValueError(
407
+ f"--audio_column_name '{data_args.audio_column_name}' not found in dataset '{data_args.dataset_name}'. "
408
+ "Make sure to set `--audio_column_name` to the correct audio column - one of "
409
+ f"{', '.join(raw_datasets['train'].column_names)}."
410
+ )
411
+
412
+ if data_args.text_column_name not in raw_datasets["train"].column_names:
413
+ raise ValueError(
414
+ f"--text_column_name {data_args.text_column_name} not found in dataset '{data_args.dataset_name}'. "
415
+ "Make sure to set `--text_column_name` to the correct text column - one of "
416
+ f"{', '.join(raw_datasets['train'].column_names)}."
417
+ )
418
+
419
+ if data_args.max_train_samples is not None:
420
+ raw_datasets["train"] = raw_datasets["train"].select(range(data_args.max_train_samples))
421
+
422
+ if training_args.do_eval:
423
+ raw_datasets["eval"] = load_dataset(
424
+ data_args.dataset_name,
425
+ data_args.dataset_config_name,
426
+ split=data_args.eval_split_name,
427
+ use_auth_token=data_args.use_auth_token,
428
+ )
429
+
430
+ if data_args.max_eval_samples is not None:
431
+ raw_datasets["eval"] = raw_datasets["eval"].select(range(data_args.max_eval_samples))
432
+
433
+ # 2. We remove some special characters from the datasets
434
+ # that make training complicated and do not help in transcribing the speech
435
+ # E.g. characters, such as `,` and `.` do not really have an acoustic characteristic
436
+ # that could be easily picked up by the model
437
+ chars_to_ignore_regex = (
438
+ f'[{"".join(data_args.chars_to_ignore)}]' if data_args.chars_to_ignore is not None else None
439
+ )
440
+ text_column_name = data_args.text_column_name
441
+
442
+ def remove_special_characters(batch):
443
+ if chars_to_ignore_regex is not None:
444
+ batch["target_text"] = re.sub(chars_to_ignore_regex, "", batch[text_column_name]).lower() + " "
445
+ else:
446
+ batch["target_text"] = batch[text_column_name].lower() + " "
447
+ return batch
448
+
449
+ with training_args.main_process_first(desc="dataset map special characters removal"):
450
+ raw_datasets = raw_datasets.map(
451
+ remove_special_characters,
452
+ remove_columns=[text_column_name],
453
+ desc="remove special characters from datasets",
454
+ )
455
+
456
+ # save special tokens for tokenizer
457
+ word_delimiter_token = data_args.word_delimiter_token
458
+ unk_token = data_args.unk_token
459
+ pad_token = data_args.pad_token
460
+
461
+ # 3. Next, let's load the config as we might need it to create
462
+ # the tokenizer
463
+ # load config
464
+ config = AutoConfig.from_pretrained(
465
+ model_args.model_name_or_path, cache_dir=model_args.cache_dir, use_auth_token=data_args.use_auth_token
466
+ )
467
+
468
+ # 4. Next, if no tokenizer file is defined,
469
+ # we create the vocabulary of the model by extracting all unique characters from
470
+ # the training and evaluation datasets
471
+ # We need to make sure that only first rank saves vocabulary
472
+ # make sure all processes wait until vocab is created
473
+ tokenizer_name_or_path = model_args.tokenizer_name_or_path
474
+ tokenizer_kwargs = {}
475
+ if tokenizer_name_or_path is None:
476
+ # save vocab in training output dir
477
+ tokenizer_name_or_path = training_args.output_dir
478
+
479
+ vocab_file = os.path.join(tokenizer_name_or_path, "vocab.json")
480
+
481
+ with training_args.main_process_first():
482
+ if training_args.overwrite_output_dir and os.path.isfile(vocab_file):
483
+ os.remove(vocab_file)
484
+
485
+ with training_args.main_process_first(desc="dataset map vocabulary creation"):
486
+ if not os.path.isfile(vocab_file):
487
+ os.makedirs(tokenizer_name_or_path, exist_ok=True)
488
+ vocab_dict = create_vocabulary_from_data(
489
+ raw_datasets,
490
+ word_delimiter_token=word_delimiter_token,
491
+ unk_token=unk_token,
492
+ pad_token=pad_token,
493
+ )
494
+
495
+ # save vocab dict to be loaded into tokenizer
496
+ with open(vocab_file, "w") as file:
497
+ json.dump(vocab_dict, file)
498
+
499
+ # if tokenizer has just been created
500
+ # it is defined by `tokenizer_class` if present in config else by `model_type`
501
+ tokenizer_kwargs = {
502
+ "config": config if config.tokenizer_class is not None else None,
503
+ "tokenizer_type": config.model_type if config.tokenizer_class is None else None,
504
+ "unk_token": unk_token,
505
+ "pad_token": pad_token,
506
+ "word_delimiter_token": word_delimiter_token,
507
+ }
508
+
509
+ # 5. Now we can instantiate the feature extractor, tokenizer and model
510
+ # Note for distributed training, the .from_pretrained methods guarantee that only
511
+ # one local process can concurrently download model & vocab.
512
+
513
+ # load feature_extractor and tokenizer
514
+ tokenizer = AutoTokenizer.from_pretrained(
515
+ tokenizer_name_or_path,
516
+ use_auth_token=data_args.use_auth_token,
517
+ **tokenizer_kwargs,
518
+ )
519
+ feature_extractor = AutoFeatureExtractor.from_pretrained(
520
+ model_args.model_name_or_path, cache_dir=model_args.cache_dir, use_auth_token=data_args.use_auth_token
521
+ )
522
+
523
+ # adapt config
524
+ config.update(
525
+ {
526
+ "feat_proj_dropout": model_args.feat_proj_dropout,
527
+ "attention_dropout": model_args.attention_dropout,
528
+ "hidden_dropout": model_args.hidden_dropout,
529
+ "final_dropout": model_args.final_dropout,
530
+ "mask_time_prob": model_args.mask_time_prob,
531
+ "mask_time_length": model_args.mask_time_length,
532
+ "mask_feature_prob": model_args.mask_feature_prob,
533
+ "mask_feature_length": model_args.mask_feature_length,
534
+ "gradient_checkpointing": training_args.gradient_checkpointing,
535
+ "layerdrop": model_args.layerdrop,
536
+ "ctc_loss_reduction": model_args.ctc_loss_reduction,
537
+ "pad_token_id": tokenizer.pad_token_id,
538
+ "vocab_size": len(tokenizer),
539
+ "activation_dropout": model_args.activation_dropout,
540
+ }
541
+ )
542
+
543
+ # create model
544
+ model = AutoModelForCTC.from_pretrained(
545
+ model_args.model_name_or_path,
546
+ cache_dir=model_args.cache_dir,
547
+ config=config,
548
+ use_auth_token=data_args.use_auth_token,
549
+ )
550
+
551
+ # freeze encoder
552
+ if model_args.freeze_feature_encoder:
553
+ model.freeze_feature_encoder()
554
+
555
+ # 6. Now we preprocess the datasets including loading the audio, resampling and normalization
556
+ # Thankfully, `datasets` takes care of automatically loading and resampling the audio,
557
+ # so that we just need to set the correct target sampling rate and normalize the input
558
+ # via the `feature_extractor`
559
+
560
+ # make sure that dataset decodes audio with correct sampling rate
561
+ dataset_sampling_rate = next(iter(raw_datasets.values())).features[data_args.audio_column_name].sampling_rate
562
+ if dataset_sampling_rate != feature_extractor.sampling_rate:
563
+ raw_datasets = raw_datasets.cast_column(
564
+ data_args.audio_column_name, datasets.features.Audio(sampling_rate=feature_extractor.sampling_rate)
565
+ )
566
+
567
+ # derive max & min input length for sample rate & max duration
568
+ max_input_length = data_args.max_duration_in_seconds * feature_extractor.sampling_rate
569
+ min_input_length = data_args.min_duration_in_seconds * feature_extractor.sampling_rate
570
+ audio_column_name = data_args.audio_column_name
571
+ num_workers = data_args.preprocessing_num_workers
572
+
573
+ # `phoneme_language` is only relevant if the model is fine-tuned on phoneme classification
574
+ phoneme_language = data_args.phoneme_language
575
+
576
+ # Preprocessing the datasets.
577
+ # We need to read the audio files as arrays and tokenize the targets.
578
+ def prepare_dataset(batch):
579
+ # load audio
580
+ sample = batch[audio_column_name]
581
+
582
+ inputs = feature_extractor(sample["array"], sampling_rate=sample["sampling_rate"])
583
+ batch["input_values"] = inputs.input_values[0]
584
+ batch["input_length"] = len(batch["input_values"])
585
+
586
+ # encode targets
587
+ additional_kwargs = {}
588
+ if phoneme_language is not None:
589
+ additional_kwargs["phonemizer_lang"] = phoneme_language
590
+
591
+ batch["labels"] = tokenizer(batch["target_text"], **additional_kwargs).input_ids
592
+ return batch
593
+
594
+ with training_args.main_process_first(desc="dataset map preprocessing"):
595
+ vectorized_datasets = raw_datasets.map(
596
+ prepare_dataset,
597
+ remove_columns=next(iter(raw_datasets.values())).column_names,
598
+ num_proc=num_workers,
599
+ desc="preprocess datasets",
600
+ )
601
+
602
+ def is_audio_in_length_range(length):
603
+ return length > min_input_length and length < max_input_length
604
+
605
+ # filter data that is shorter than min_input_length
606
+ vectorized_datasets = vectorized_datasets.filter(
607
+ is_audio_in_length_range,
608
+ num_proc=num_workers,
609
+ input_columns=["input_length"],
610
+ )
611
+
612
+ # 7. Next, we can prepare the training.
613
+ # Let's use word error rate (WER) as our evaluation metric,
614
+ # instantiate a data collator and the trainer
615
+
616
+ # Define evaluation metrics during training, *i.e.* word error rate, character error rate
617
+ eval_metrics = {metric: load_metric(metric) for metric in data_args.eval_metrics}
618
+
619
+ # for large datasets it is advised to run the preprocessing on a
620
+ # single machine first with ``args.preprocessing_only`` since there will mostly likely
621
+ # be a timeout when running the script in distributed mode.
622
+ # In a second step ``args.preprocessing_only`` can then be set to `False` to load the
623
+ # cached dataset
624
+ if data_args.preprocessing_only:
625
+ logger.info(f"Data preprocessing finished. Files cached at {vectorized_datasets.cache_files}")
626
+ return
627
+
628
+ def compute_metrics(pred):
629
+ pred_logits = pred.predictions
630
+ pred_ids = np.argmax(pred_logits, axis=-1)
631
+
632
+ pred.label_ids[pred.label_ids == -100] = tokenizer.pad_token_id
633
+
634
+ pred_str = tokenizer.batch_decode(pred_ids)
635
+ # we do not want to group tokens when computing the metrics
636
+ label_str = tokenizer.batch_decode(pred.label_ids, group_tokens=False)
637
+
638
+ metrics = {k: v.compute(predictions=pred_str, references=label_str) for k, v in eval_metrics.items()}
639
+
640
+ return metrics
641
+
642
+ # Now save everything to be able to create a single processor later
643
+ if is_main_process(training_args.local_rank):
644
+ # save feature extractor, tokenizer and config
645
+ feature_extractor.save_pretrained(training_args.output_dir)
646
+ tokenizer.save_pretrained(training_args.output_dir)
647
+ config.save_pretrained(training_args.output_dir)
648
+
649
+ try:
650
+ processor = AutoProcessor.from_pretrained(training_args.output_dir)
651
+ except (OSError, KeyError):
652
+ warnings.warn(
653
+ "Loading a processor from a feature extractor config that does not"
654
+ " include a `processor_class` attribute is deprecated and will be removed in v5. Please add the following "
655
+ " attribute to your `preprocessor_config.json` file to suppress this warning: "
656
+ " `'processor_class': 'Wav2Vec2Processor'`",
657
+ FutureWarning,
658
+ )
659
+ processor = Wav2Vec2Processor.from_pretrained(training_args.output_dir)
660
+
661
+ # Instantiate custom data collator
662
+ data_collator = DataCollatorCTCWithPadding(processor=processor)
663
+
664
+ # Initialize Trainer
665
+ trainer = Trainer(
666
+ model=model,
667
+ data_collator=data_collator,
668
+ args=training_args,
669
+ compute_metrics=compute_metrics,
670
+ train_dataset=vectorized_datasets["train"] if training_args.do_train else None,
671
+ eval_dataset=vectorized_datasets["eval"] if training_args.do_eval else None,
672
+ tokenizer=feature_extractor,
673
+ )
674
+
675
+ # 8. Finally, we can start training
676
+
677
+ # Training
678
+ if training_args.do_train:
679
+
680
+ # use last checkpoint if exist
681
+ if last_checkpoint is not None:
682
+ checkpoint = last_checkpoint
683
+ elif os.path.isdir(model_args.model_name_or_path):
684
+ checkpoint = model_args.model_name_or_path
685
+ else:
686
+ checkpoint = None
687
+
688
+ train_result = trainer.train(resume_from_checkpoint=checkpoint)
689
+ trainer.save_model()
690
+
691
+ metrics = train_result.metrics
692
+ max_train_samples = (
693
+ data_args.max_train_samples
694
+ if data_args.max_train_samples is not None
695
+ else len(vectorized_datasets["train"])
696
+ )
697
+ metrics["train_samples"] = min(max_train_samples, len(vectorized_datasets["train"]))
698
+
699
+ trainer.log_metrics("train", metrics)
700
+ trainer.save_metrics("train", metrics)
701
+ trainer.save_state()
702
+
703
+ # Evaluation
704
+ results = {}
705
+ if training_args.do_eval:
706
+ logger.info("*** Evaluate ***")
707
+ metrics = trainer.evaluate()
708
+ max_eval_samples = (
709
+ data_args.max_eval_samples if data_args.max_eval_samples is not None else len(vectorized_datasets["eval"])
710
+ )
711
+ metrics["eval_samples"] = min(max_eval_samples, len(vectorized_datasets["eval"]))
712
+
713
+ trainer.log_metrics("eval", metrics)
714
+ trainer.save_metrics("eval", metrics)
715
+
716
+ # Write model card and (optionally) push to hub
717
+ config_name = data_args.dataset_config_name if data_args.dataset_config_name is not None else "na"
718
+ kwargs = {
719
+ "finetuned_from": model_args.model_name_or_path,
720
+ "tasks": "speech-recognition",
721
+ "tags": ["automatic-speech-recognition", data_args.dataset_name],
722
+ "dataset_args": f"Config: {config_name}, Training split: {data_args.train_split_name}, Eval split: {data_args.eval_split_name}",
723
+ "dataset": f"{data_args.dataset_name.upper()} - {config_name.upper()}",
724
+ }
725
+ if "common_voice" in data_args.dataset_name:
726
+ kwargs["language"] = config_name
727
+
728
+ if training_args.push_to_hub:
729
+ trainer.push_to_hub(**kwargs)
730
+ else:
731
+ trainer.create_model_card(**kwargs)
732
+
733
+ return results
734
+
735
+
736
+ if __name__ == "__main__":
737
+ main()
special_tokens_map.json ADDED
@@ -0,0 +1 @@
 
1
+ {"bos_token": "<s>", "eos_token": "</s>", "unk_token": "[UNK]", "pad_token": "[PAD]", "additional_special_tokens": [{"content": "<s>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true}, {"content": "</s>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true}]}
tokenizer_config.json ADDED
@@ -0,0 +1 @@
 
1
+ {"unk_token": "[UNK]", "bos_token": "<s>", "eos_token": "</s>", "pad_token": "[PAD]", "do_lower_case": false, "word_delimiter_token": "|", "special_tokens_map_file": null, "tokenizer_file": null, "name_or_path": "./", "tokenizer_class": "Wav2Vec2CTCTokenizer"}
training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4e6594028978f7210b64a22c765c8c7f64d4f9b7c9b7b32cac109333397a0579
3
+ size 2991
vocab.json ADDED
@@ -0,0 +1 @@
 
1
+ {"!": 1, ",": 2, "-": 3, ".": 4, ":": 5, ";": 6, "?": 7, "c": 8, "а": 9, "б": 10, "в": 11, "г": 12, "д": 13, "е": 14, "ж": 15, "з": 16, "и": 17, "к": 18, "л": 19, "м": 20, "н": 21, "о": 22, "п": 23, "р": 24, "с": 25, "т": 26, "у": 27, "ф": 28, "х": 29, "ц": 30, "ч": 31, "ш": 32, "ы": 33, "ь": 34, "џ": 35, "қ": 36, "ҟ": 37, "ҩ": 38, "ҭ": 39, "ҳ": 40, "ҵ": 41, "ҷ": 42, "ҽ": 43, "ҿ": 44, "ә": 45, "ӡ": 46, "ӷ": 47, "ԥ": 48, "–": 49, "|": 0, "[UNK]": 50, "[PAD]": 51}