diff --git "a/log/train_log" "b/log/train_log" new file mode 100644--- /dev/null +++ "b/log/train_log" @@ -0,0 +1,12239 @@ +# Running on vgni001 +# Started at Mon 28 Nov 15:12:15 CET 2022 +# python3 src/run_speech_recognition_ctc.py --report_to=none --run_name=experiments/results/bs_exp/linear/wav2vec2-large-960h-lv60-self/uwb_atcc/0.0ld_0.0ad_0.0attd_0.05fpd_0.075mtp_12mtl_0.0mfp_12mfl_1acc/ --preprocessing_num_workers=5 --model_name_or_path=facebook/wav2vec2-large-960h-lv60-self --dataset_name=experiments/data/uwb_atcc/train --min_duration_in_seconds=0.2 --max_duration_in_seconds=20 --eval_dataset_name=experiments/data/uwb_atcc/test --train_split_name=train --output_dir=experiments/results/bs_exp/linear/wav2vec2-large-960h-lv60-self/uwb_atcc/0.0ld_0.0ad_0.0attd_0.05fpd_0.075mtp_12mtl_0.0mfp_12mfl_1acc/ --num_train_epochs=50 --per_device_train_batch_size=24 --per_device_eval_batch_size=12 --gradient_accumulation_steps=1 --learning_rate=1e-4 --weight_decay=0.001 --warmup_steps=1000 --evaluation_strategy=steps --text_column_name=text --audio_column_name=audio --length_column_name=input_length '--chars_to_ignore=, ? . ! \; \: " “ % ‘ ” � — ’ … –' --save_steps=1000 --eval_steps=500 --logging_steps=1000 --layerdrop=0.0 --activation_dropout=0.0 --attention_dropout=0.0 --save_total_limit=1 --feat_proj_dropout=0.05 --mask_time_prob=0.075 --mask_time_length=12 --mask_feature_prob=0.0 --mask_feature_length=12 --gradient_checkpointing --freeze_feature_encoder --fp16 --group_by_length --do_train --do_eval --max_steps 10000 --overwrite_output_dir --freeze_feature_encoder +11/28/2022 15:12:25 - WARNING - __main__ - Process rank: -1, device: cuda:0, n_gpu: 1distributed training: False, 16-bits training: True +11/28/2022 15:12:25 - INFO - __main__ - Training/evaluation parameters TrainingArguments( +_n_gpu=1, +adafactor=False, +adam_beta1=0.9, +adam_beta2=0.999, +adam_epsilon=1e-08, +auto_find_batch_size=False, +bf16=False, +bf16_full_eval=False, +data_seed=None, +dataloader_drop_last=False, +dataloader_num_workers=0, +dataloader_pin_memory=True, +ddp_bucket_cap_mb=None, +ddp_find_unused_parameters=None, +ddp_timeout=1800, +debug=[], +deepspeed=None, +disable_tqdm=False, +do_eval=True, +do_predict=False, +do_train=True, +eval_accumulation_steps=None, +eval_delay=0, +eval_steps=500, +evaluation_strategy=steps, +fp16=True, +fp16_backend=auto, +fp16_full_eval=False, +fp16_opt_level=O1, +fsdp=[], +fsdp_min_num_params=0, +fsdp_transformer_layer_cls_to_wrap=None, +full_determinism=False, +gradient_accumulation_steps=1, +gradient_checkpointing=True, +greater_is_better=None, +group_by_length=True, +half_precision_backend=auto, +hub_model_id=None, +hub_private_repo=False, +hub_strategy=every_save, +hub_token=, +ignore_data_skip=False, +include_inputs_for_metrics=False, +jit_mode_eval=False, +label_names=None, +label_smoothing_factor=0.0, +learning_rate=0.0001, +length_column_name=input_length, +load_best_model_at_end=False, +local_rank=-1, +log_level=passive, +log_level_replica=passive, +log_on_each_node=True, +logging_dir=experiments/results/bs_exp/linear/wav2vec2-large-960h-lv60-self/uwb_atcc/0.0ld_0.0ad_0.0attd_0.05fpd_0.075mtp_12mtl_0.0mfp_12mfl_1acc/runs/Nov28_15-12-25_vgni001, +logging_first_step=False, +logging_nan_inf_filter=True, +logging_steps=1000, +logging_strategy=steps, +lr_scheduler_type=linear, +max_grad_norm=1.0, +max_steps=10000, +metric_for_best_model=None, +mp_parameters=, +no_cuda=False, +num_train_epochs=50.0, +optim=adamw_hf, +output_dir=experiments/results/bs_exp/linear/wav2vec2-large-960h-lv60-self/uwb_atcc/0.0ld_0.0ad_0.0attd_0.05fpd_0.075mtp_12mtl_0.0mfp_12mfl_1acc/, +overwrite_output_dir=True, +past_index=-1, +per_device_eval_batch_size=12, +per_device_train_batch_size=24, +prediction_loss_only=False, +push_to_hub=False, +push_to_hub_model_id=None, +push_to_hub_organization=None, +push_to_hub_token=, +ray_scope=last, +remove_unused_columns=True, +report_to=[], +resume_from_checkpoint=None, +run_name=experiments/results/bs_exp/linear/wav2vec2-large-960h-lv60-self/uwb_atcc/0.0ld_0.0ad_0.0attd_0.05fpd_0.075mtp_12mtl_0.0mfp_12mfl_1acc/, +save_on_each_node=False, +save_steps=1000, +save_strategy=steps, +save_total_limit=1, +seed=42, +sharded_ddp=[], +skip_memory_metrics=True, +tf32=None, +torchdynamo=None, +tpu_metrics_debug=False, +tpu_num_cores=None, +use_ipex=False, +use_legacy_prediction_loop=False, +use_mps_device=False, +warmup_ratio=0.0, +warmup_steps=1000, +weight_decay=0.001, +xpu_backend=None, +) +11/28/2022 15:12:25 - WARNING - datasets.builder - Using custom data configuration train-755c6b8b7f9b046c +Downloading and preparing dataset atc_data_loader/train to /remote/idiap.svm/temp.speech01/jzuluaga/experiments/journal/asr/github/w2v2-air-traffic/.cache/experiments/results/bs_exp/linear/wav2vec2-large-960h-lv60-self/uwb_atcc/0.0ld_0.0ad_0.0attd_0.05fpd_0.075mtp_12mtl_0.0mfp_12mfl_1acc//train/atc_data_loader/train-755c6b8b7f9b046c/0.0.0/f2633cc53c6abe32cddd4152eebde1a4e3c9953e1446e190b8d9a13330cddaa4... + Generating train split: 0 examples [00:00, ? examples/s] Generating train split: 1 examples [00:25, 25.81s/ examples] Generating train split: 256 examples [00:25, 14.05 examples/s] Generating train split: 700 examples [00:26, 48.51 examples/s] Generating train split: 1040 examples [00:26, 85.34 examples/s] Generating train split: 1536 examples [00:26, 159.93 examples/s] Generating train split: 2011 examples [00:26, 258.66 examples/s] Generating train split: 2407 examples [00:26, 368.13 examples/s] Generating train split: 2816 examples [00:26, 515.49 examples/s] Generating train split: 3319 examples [00:26, 761.90 examples/s] Generating train split: 3732 examples [00:26, 998.39 examples/s] Generating train split: 4133 examples [00:26, 1246.55 examples/s] Generating train split: 4608 examples [00:27, 1592.25 examples/s] Generating train split: 5109 examples [00:27, 2059.00 examples/s] Generating train split: 5520 examples [00:27, 2273.72 examples/s] Generating train split: 5900 examples [00:27, 2480.44 examples/s] Generating train split: 6400 examples [00:27, 2871.80 examples/s] Generating train split: 6885 examples [00:27, 3299.35 examples/s] Generating train split: 7297 examples [00:27, 3344.26 examples/s] Generating train split: 7690 examples [00:27, 3397.37 examples/s] Generating train split: 8192 examples [00:28, 3554.49 examples/s] Generating train split: 8704 examples [00:28, 3660.94 examples/s] Generating train split: 9216 examples [00:28, 3807.51 examples/s] Generating train split: 9728 examples [00:28, 3922.50 examples/s] Generating train split: 10238 examples [00:28, 4221.47 examples/s] Generating train split: 10673 examples [00:28, 4114.93 examples/s] Generating train split: 11093 examples [00:28, 3872.86 examples/s] Dataset atc_data_loader downloaded and prepared to /remote/idiap.svm/temp.speech01/jzuluaga/experiments/journal/asr/github/w2v2-air-traffic/.cache/experiments/results/bs_exp/linear/wav2vec2-large-960h-lv60-self/uwb_atcc/0.0ld_0.0ad_0.0attd_0.05fpd_0.075mtp_12mtl_0.0mfp_12mfl_1acc//train/atc_data_loader/train-755c6b8b7f9b046c/0.0.0/f2633cc53c6abe32cddd4152eebde1a4e3c9953e1446e190b8d9a13330cddaa4. Subsequent calls will reuse this data. +11/28/2022 15:12:55 - WARNING - datasets.builder - Using custom data configuration test-085e5dd7a4b8bb1c +Downloading and preparing dataset atc_data_loader/test to /remote/idiap.svm/temp.speech01/jzuluaga/experiments/journal/asr/github/w2v2-air-traffic/.cache/experiments/results/bs_exp/linear/wav2vec2-large-960h-lv60-self/uwb_atcc/0.0ld_0.0ad_0.0attd_0.05fpd_0.075mtp_12mtl_0.0mfp_12mfl_1acc//test/atc_data_loader/test-085e5dd7a4b8bb1c/0.0.0/f2633cc53c6abe32cddd4152eebde1a4e3c9953e1446e190b8d9a13330cddaa4... + Generating test split: 0 examples [00:00, ? examples/s] Generating test split: 1 examples [00:05, 5.73s/ examples] Generating test split: 285 examples [00:05, 69.22 examples/s] Generating test split: 655 examples [00:05, 191.09 examples/s] Generating test split: 1024 examples [00:06, 348.81 examples/s] Generating test split: 1536 examples [00:06, 634.68 examples/s] Generating test split: 2048 examples [00:06, 979.63 examples/s] Generating test split: 2506 examples [00:06, 1342.98 examples/s] loading configuration file config.json from cache at /idiap/temp/jzuluaga/cache/huggingface/models--facebook--wav2vec2-large-960h-lv60-self/snapshots/54074b1c16f4de6a5ad59affb4caa8f2ea03a119/config.json +Model config Wav2Vec2Config { + "_name_or_path": "facebook/wav2vec2-large-960h-lv60-self", + "activation_dropout": 0.1, + "adapter_kernel_size": 3, + "adapter_stride": 2, + "add_adapter": false, + "apply_spec_augment": true, + "architectures": [ + "Wav2Vec2ForCTC" + ], + "attention_dropout": 0.1, + "bos_token_id": 1, + "classifier_proj_size": 256, + "codevector_dim": 256, + "contrastive_logits_temperature": 0.1, + "conv_bias": true, + "conv_dim": [ + 512, + 512, + 512, + 512, + 512, + 512, + 512 + ], + "conv_kernel": [ + 10, + 3, + 3, + 3, + 3, + 2, + 2 + ], + "conv_stride": [ + 5, + 2, + 2, + 2, + 2, + 2, + 2 + ], + "ctc_loss_reduction": "sum", + "ctc_zero_infinity": false, + "diversity_loss_weight": 0.1, + "do_stable_layer_norm": true, + "eos_token_id": 2, + "feat_extract_activation": "gelu", + "feat_extract_dropout": 0.0, + "feat_extract_norm": "layer", + "feat_proj_dropout": 0.1, + "feat_quantizer_dropout": 0.0, + "final_dropout": 0.1, + "gradient_checkpointing": false, + "hidden_act": "gelu", + "hidden_dropout": 0.1, + "hidden_dropout_prob": 0.1, + "hidden_size": 1024, + "initializer_range": 0.02, + "intermediate_size": 4096, + "layer_norm_eps": 1e-05, + "layerdrop": 0.1, + "mask_feature_length": 10, + "mask_feature_min_masks": 0, + "mask_feature_prob": 0.0, + "mask_time_length": 10, + "mask_time_min_masks": 2, + "mask_time_prob": 0.05, + "model_type": "wav2vec2", + "num_adapter_layers": 3, + "num_attention_heads": 16, + "num_codevector_groups": 2, + "num_codevectors_per_group": 320, + "num_conv_pos_embedding_groups": 16, + "num_conv_pos_embeddings": 128, + "num_feat_extract_layers": 7, + "num_hidden_layers": 24, + "num_negatives": 100, + "output_hidden_size": 1024, + "pad_token_id": 0, + "proj_codevector_dim": 256, + "tdnn_dilation": [ + 1, + 2, + 3, + 1, + 1 + ], + "tdnn_dim": [ + 512, + 512, + 512, + 512, + 1500 + ], + "tdnn_kernel": [ + 5, + 3, + 3, + 1, + 1 + ], + "transformers_version": "4.24.0", + "use_weighted_layer_sum": false, + "vocab_size": 32, + "xvector_output_dim": 512 +} + +Dataset atc_data_loader downloaded and prepared to /remote/idiap.svm/temp.speech01/jzuluaga/experiments/journal/asr/github/w2v2-air-traffic/.cache/experiments/results/bs_exp/linear/wav2vec2-large-960h-lv60-self/uwb_atcc/0.0ld_0.0ad_0.0attd_0.05fpd_0.075mtp_12mtl_0.0mfp_12mfl_1acc//test/atc_data_loader/test-085e5dd7a4b8bb1c/0.0.0/f2633cc53c6abe32cddd4152eebde1a4e3c9953e1446e190b8d9a13330cddaa4. Subsequent calls will reuse this data. + 0%| | 0/1 [00:00 to the vocabulary +Adding to the vocabulary +Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained. +loading configuration file preprocessor_config.json from cache at /idiap/temp/jzuluaga/cache/huggingface/models--facebook--wav2vec2-large-960h-lv60-self/snapshots/54074b1c16f4de6a5ad59affb4caa8f2ea03a119/preprocessor_config.json +loading configuration file config.json from cache at /idiap/temp/jzuluaga/cache/huggingface/models--facebook--wav2vec2-large-960h-lv60-self/snapshots/54074b1c16f4de6a5ad59affb4caa8f2ea03a119/config.json +Model config Wav2Vec2Config { + "_name_or_path": "facebook/wav2vec2-large-960h-lv60-self", + "activation_dropout": 0.1, + "adapter_kernel_size": 3, + "adapter_stride": 2, + "add_adapter": false, + "apply_spec_augment": true, + "architectures": [ + "Wav2Vec2ForCTC" + ], + "attention_dropout": 0.1, + "bos_token_id": 1, + "classifier_proj_size": 256, + "codevector_dim": 256, + "contrastive_logits_temperature": 0.1, + "conv_bias": true, + "conv_dim": [ + 512, + 512, + 512, + 512, + 512, + 512, + 512 + ], + "conv_kernel": [ + 10, + 3, + 3, + 3, + 3, + 2, + 2 + ], + "conv_stride": [ + 5, + 2, + 2, + 2, + 2, + 2, + 2 + ], + "ctc_loss_reduction": "sum", + "ctc_zero_infinity": false, + "diversity_loss_weight": 0.1, + "do_stable_layer_norm": true, + "eos_token_id": 2, + "feat_extract_activation": "gelu", + "feat_extract_dropout": 0.0, + "feat_extract_norm": "layer", + "feat_proj_dropout": 0.1, + "feat_quantizer_dropout": 0.0, + "final_dropout": 0.1, + "gradient_checkpointing": false, + "hidden_act": "gelu", + "hidden_dropout": 0.1, + "hidden_dropout_prob": 0.1, + "hidden_size": 1024, + "initializer_range": 0.02, + "intermediate_size": 4096, + "layer_norm_eps": 1e-05, + "layerdrop": 0.1, + "mask_feature_length": 10, + "mask_feature_min_masks": 0, + "mask_feature_prob": 0.0, + "mask_time_length": 10, + "mask_time_min_masks": 2, + "mask_time_prob": 0.05, + "model_type": "wav2vec2", + "num_adapter_layers": 3, + "num_attention_heads": 16, + "num_codevector_groups": 2, + "num_codevectors_per_group": 320, + "num_conv_pos_embedding_groups": 16, + "num_conv_pos_embeddings": 128, + "num_feat_extract_layers": 7, + "num_hidden_layers": 24, + "num_negatives": 100, + "output_hidden_size": 1024, + "pad_token_id": 0, + "proj_codevector_dim": 256, + "tdnn_dilation": [ + 1, + 2, + 3, + 1, + 1 + ], + "tdnn_dim": [ + 512, + 512, + 512, + 512, + 1500 + ], + "tdnn_kernel": [ + 5, + 3, + 3, + 1, + 1 + ], + "transformers_version": "4.24.0", + "use_weighted_layer_sum": false, + "vocab_size": 32, + "xvector_output_dim": 512 +} + +Feature extractor Wav2Vec2FeatureExtractor { + "do_normalize": true, + "feature_extractor_type": "Wav2Vec2FeatureExtractor", + "feature_size": 1, + "padding_side": "right", + "padding_value": 0.0, + "return_attention_mask": true, + "sampling_rate": 16000 +} + +loading weights file pytorch_model.bin from cache at /idiap/temp/jzuluaga/cache/huggingface/models--facebook--wav2vec2-large-960h-lv60-self/snapshots/54074b1c16f4de6a5ad59affb4caa8f2ea03a119/pytorch_model.bin +All model checkpoint weights were used when initializing Wav2Vec2ForCTC. + +Some weights of Wav2Vec2ForCTC were not initialized from the model checkpoint at facebook/wav2vec2-large-960h-lv60-self and are newly initialized: ['wav2vec2.masked_spec_embed'] +You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference. +Some weights of Wav2Vec2ForCTC were not initialized from the model checkpoint at facebook/wav2vec2-large-960h-lv60-self and are newly initialized because the shapes did not match: +- lm_head.weight: found shape torch.Size([32, 1024]) in the checkpoint and torch.Size([31, 1024]) in the model instantiated +- lm_head.bias: found shape torch.Size([32]) in the checkpoint and torch.Size([31]) in the model instantiated +You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference. + preprocess datasets #0: 0%| | 0/2259 [00:00 to the vocabulary +Adding to the vocabulary +max_steps is given, it will override any value given in num_train_epochs +Using cuda_amp half precision backend +The following columns in the training set don't have a corresponding argument in `Wav2Vec2ForCTC.forward` and have been ignored: input_length. If input_length are not expected by `Wav2Vec2ForCTC.forward`, you can safely ignore this message. +/idiap/user/jzuluaga/miniconda3/envs/w2v2/lib/python3.10/site-packages/transformers/optimization.py:306: FutureWarning: This implementation of AdamW is deprecated and will be removed in a future version. Use the PyTorch implementation torch.optim.AdamW instead, or set `no_deprecation_warning=True` to disable this warning + warnings.warn( +***** Running training ***** + Num examples = 11291 + Num Epochs = 22 + Instantaneous batch size per device = 24 + Total train batch size (w. parallel, distributed & accumulation) = 24 + Gradient Accumulation steps = 1 + Total optimization steps = 10000 + Number of trainable parameters = 311260319 + 0%| | 0/10000 [00:00