# python3 -m espnet2.bin.asr_train --use_preprocessor true --bpemodel data/fr_token_list/bpe_unigram350/bpe.model --token_type bpe --token_list data/fr_token_list/bpe_unigram350/tokens.txt --non_linguistic_symbols none --cleaner none --g2p none --valid_data_path_and_name_and_type dump/raw/dev_fr/wav.scp,speech,sound --valid_data_path_and_name_and_type dump/raw/dev_fr/text,text,text --valid_shape_file exp/asr_stats_raw_fr_bpe350_sp/valid/speech_shape --valid_shape_file exp/asr_stats_raw_fr_bpe350_sp/valid/text_shape.bpe --resume true --init_param --ignore_init_mismatch false --fold_length 80000 --fold_length 150 --output_dir exp/asr_oxford_French_config_raw_fr_bpe350_sp --config conf/tuning/oxford_French_config.yaml --frontend_conf fs=16k --train_data_path_and_name_and_type dump/raw/train_fr_sp/wav.scp,speech,sound --train_data_path_and_name_and_type dump/raw/train_fr_sp/text,text,text --train_shape_file exp/asr_stats_raw_fr_bpe350_sp/train/speech_shape --train_shape_file exp/asr_stats_raw_fr_bpe350_sp/train/text_shape.bpe --ngpu 3 --multiprocessing_distributed True # Started at Fri Jun 10 14:04:08 EDT 2022 # /usr/bin/python3 /project/ocean/junweih/espnet/espnet2/bin/asr_train.py --use_preprocessor true --bpemodel data/fr_token_list/bpe_unigram350/bpe.model --token_type bpe --token_list data/fr_token_list/bpe_unigram350/tokens.txt --non_linguistic_symbols none --cleaner none --g2p none --valid_data_path_and_name_and_type dump/raw/dev_fr/wav.scp,speech,sound --valid_data_path_and_name_and_type dump/raw/dev_fr/text,text,text --valid_shape_file exp/asr_stats_raw_fr_bpe350_sp/valid/speech_shape --valid_shape_file exp/asr_stats_raw_fr_bpe350_sp/valid/text_shape.bpe --resume true --init_param --ignore_init_mismatch false --fold_length 80000 --fold_length 150 --output_dir exp/asr_oxford_French_config_raw_fr_bpe350_sp --config conf/tuning/oxford_French_config.yaml --frontend_conf fs=16k --train_data_path_and_name_and_type dump/raw/train_fr_sp/wav.scp,speech,sound --train_data_path_and_name_and_type dump/raw/train_fr_sp/text,text,text --train_shape_file exp/asr_stats_raw_fr_bpe350_sp/train/speech_shape --train_shape_file exp/asr_stats_raw_fr_bpe350_sp/train/text_shape.bpe --ngpu 3 --multiprocessing_distributed True [islpc50:0/3] 2022-06-10 14:04:26,617 (distributed_c10d:217) INFO: Added key: store_based_barrier_key:1 to store for rank: 0 [islpc50:0/3] 2022-06-10 14:04:26,618 (distributed_c10d:251) INFO: Rank 0: Completed store-based barrier for key:store_based_barrier_key:1 with 3 nodes. [islpc50:0/3] 2022-06-10 14:04:26,681 (asr:411) INFO: Vocabulary size: 350 [islpc50:0/3] 2022-06-10 14:04:27,470 (filelock:274) INFO: Lock 139713832726480 acquired on ./hub/s3prl_cache/1c76d6e88090f01736036b28dc995fef583f47f42662d55286332557f957609f.lock [islpc50:0/3] 2022-06-10 14:04:27,471 (filelock:318) INFO: Lock 139713832726480 released on ./hub/s3prl_cache/1c76d6e88090f01736036b28dc995fef583f47f42662d55286332557f957609f.lock [Featurizer] - The selected feature last_hidden_state's downsample rate is 320 [islpc50:0/3] 2022-06-10 14:04:39,975 (s3prl:159) INFO: Pretrained S3PRL frontend model parameters reloaded! [islpc50:0/3] 2022-06-10 14:04:43,809 (abs_task:1157) INFO: pytorch.version=1.10.1+cu111, cuda.available=True, cudnn.version=8005, cudnn.benchmark=False, cudnn.deterministic=True [islpc50:0/3] 2022-06-10 14:04:43,815 (abs_task:1158) INFO: Model structure: ESPnetASRModel( (frontend): S3prlFrontend( (upstream): UpstreamExpert( (model): Wav2Vec2Model( (feature_extractor): ConvFeatureExtractionModel( (conv_layers): ModuleList( (0): Sequential( (0): Conv1d(1, 512, kernel_size=(10,), stride=(5,)) (1): Dropout(p=0.0, inplace=False) (2): Sequential( (0): TransposeLast() (1): Fp32LayerNorm((512,), eps=1e-05, elementwise_affine=True) (2): TransposeLast() ) (3): GELU() ) (1): Sequential( (0): Conv1d(512, 512, kernel_size=(3,), stride=(2,)) (1): Dropout(p=0.0, inplace=False) (2): Sequential( (0): TransposeLast() (1): Fp32LayerNorm((512,), eps=1e-05, elementwise_affine=True) (2): TransposeLast() ) (3): GELU() ) (2): Sequential( (0): Conv1d(512, 512, kernel_size=(3,), stride=(2,)) (1): Dropout(p=0.0, inplace=False) (2): Sequential( (0): TransposeLast() (1): Fp32LayerNorm((512,), eps=1e-05, elementwise_affine=True) (2): TransposeLast() ) (3): GELU() ) (3): Sequential( (0): Conv1d(512, 512, kernel_size=(3,), stride=(2,)) (1): Dropout(p=0.0, inplace=False) (2): Sequential( (0): TransposeLast() (1): Fp32LayerNorm((512,), eps=1e-05, elementwise_affine=True) (2): TransposeLast() ) (3): GELU() ) (4): Sequential( (0): Conv1d(512, 512, kernel_size=(3,), stride=(2,)) (1): Dropout(p=0.0, inplace=False) (2): Sequential( (0): TransposeLast() (1): Fp32LayerNorm((512,), eps=1e-05, elementwise_affine=True) (2): TransposeLast() ) (3): GELU() ) (5): Sequential( (0): Conv1d(512, 512, kernel_size=(2,), stride=(2,)) (1): Dropout(p=0.0, inplace=False) (2): Sequential( (0): TransposeLast() (1): Fp32LayerNorm((512,), eps=1e-05, elementwise_affine=True) (2): TransposeLast() ) (3): GELU() ) (6): Sequential( (0): Conv1d(512, 512, kernel_size=(2,), stride=(2,)) (1): Dropout(p=0.0, inplace=False) (2): Sequential( (0): TransposeLast() (1): Fp32LayerNorm((512,), eps=1e-05, elementwise_affine=True) (2): TransposeLast() ) (3): GELU() ) ) ) (post_extract_proj): Linear(in_features=512, out_features=1024, bias=True) (dropout_input): Dropout(p=0.1, inplace=False) (dropout_features): Dropout(p=0.1, inplace=False) (quantizer): GumbelVectorQuantizer( (weight_proj): Linear(in_features=512, out_features=640, bias=True) ) (project_q): Linear(in_features=768, out_features=768, bias=True) (encoder): TransformerEncoder( (pos_conv): Sequential( (0): Conv1d(1024, 1024, kernel_size=(128,), stride=(1,), padding=(64,), groups=16) (1): SamePad() (2): GELU() ) (layers): ModuleList( (0): AdapterTransformerSentenceEncoderLayer( (self_attn): MultiheadAttention( (dropout_module): FairseqDropout() (k_proj): Linear(in_features=1024, out_features=1024, bias=True) (v_proj): Linear(in_features=1024, out_features=1024, bias=True) (q_proj): Linear(in_features=1024, out_features=1024, bias=True) (out_proj): Linear(in_features=1024, out_features=1024, bias=True) ) (dropout1): Dropout(p=0.0, inplace=False) (dropout2): Dropout(p=0.0, inplace=False) (dropout3): Dropout(p=0.0, inplace=False) (self_attn_layer_norm): LayerNorm((1024,), eps=1e-05, elementwise_affine=True) (fc1): Linear(in_features=1024, out_features=4096, bias=True) (fc2): Linear(in_features=4096, out_features=1024, bias=True) (final_layer_norm): LayerNorm((1024,), eps=1e-05, elementwise_affine=True) (adapter1): Adapter( (down_projection): Linear(in_features=1024, out_features=192, bias=True) (up_projection): Linear(in_features=192, out_features=1024, bias=True) ) (adapter2): Adapter( (down_projection): Linear(in_features=1024, out_features=192, bias=True) (up_projection): Linear(in_features=192, out_features=1024, bias=True) ) ) (1): AdapterTransformerSentenceEncoderLayer( (self_attn): MultiheadAttention( (dropout_module): FairseqDropout() (k_proj): Linear(in_features=1024, out_features=1024, bias=True) (v_proj): Linear(in_features=1024, out_features=1024, bias=True) (q_proj): Linear(in_features=1024, out_features=1024, bias=True) (out_proj): Linear(in_features=1024, out_features=1024, bias=True) ) (dropout1): Dropout(p=0.0, inplace=False) (dropout2): Dropout(p=0.0, inplace=False) (dropout3): Dropout(p=0.0, inplace=False) (self_attn_layer_norm): LayerNorm((1024,), eps=1e-05, elementwise_affine=True) (fc1): Linear(in_features=1024, out_features=4096, bias=True) (fc2): Linear(in_features=4096, out_features=1024, bias=True) (final_layer_norm): LayerNorm((1024,), eps=1e-05, elementwise_affine=True) (adapter1): Adapter( (down_projection): Linear(in_features=1024, out_features=192, bias=True) (up_projection): Linear(in_features=192, out_features=1024, bias=True) ) (adapter2): Adapter( (down_projection): Linear(in_features=1024, out_features=192, bias=True) (up_projection): Linear(in_features=192, out_features=1024, bias=True) ) ) (2): AdapterTransformerSentenceEncoderLayer( (self_attn): MultiheadAttention( (dropout_module): FairseqDropout() (k_proj): Linear(in_features=1024, out_features=1024, bias=True) (v_proj): Linear(in_features=1024, out_features=1024, bias=True) (q_proj): Linear(in_features=1024, out_features=1024, bias=True) (out_proj): Linear(in_features=1024, out_features=1024, bias=True) ) (dropout1): Dropout(p=0.0, inplace=False) (dropout2): Dropout(p=0.0, inplace=False) (dropout3): Dropout(p=0.0, inplace=False) (self_attn_layer_norm): LayerNorm((1024,), eps=1e-05, elementwise_affine=True) (fc1): Linear(in_features=1024, out_features=4096, bias=True) (fc2): Linear(in_features=4096, out_features=1024, bias=True) (final_layer_norm): LayerNorm((1024,), eps=1e-05, elementwise_affine=True) (adapter1): Adapter( (down_projection): Linear(in_features=1024, out_features=192, bias=True) (up_projection): Linear(in_features=192, out_features=1024, bias=True) ) (adapter2): Adapter( (down_projection): Linear(in_features=1024, out_features=192, bias=True) (up_projection): Linear(in_features=192, out_features=1024, bias=True) ) ) (3): TransformerSentenceEncoderLayer( (self_attn): MultiheadAttention( (dropout_module): FairseqDropout() (k_proj): Linear(in_features=1024, out_features=1024, bias=True) (v_proj): Linear(in_features=1024, out_features=1024, bias=True) (q_proj): Linear(in_features=1024, out_features=1024, bias=True) (out_proj): Linear(in_features=1024, out_features=1024, bias=True) ) (dropout1): Dropout(p=0.0, inplace=False) (dropout2): Dropout(p=0.0, inplace=False) (dropout3): Dropout(p=0.0, inplace=False) (self_attn_layer_norm): LayerNorm((1024,), eps=1e-05, elementwise_affine=True) (fc1): Linear(in_features=1024, out_features=4096, bias=True) (fc2): Linear(in_features=4096, out_features=1024, bias=True) (final_layer_norm): LayerNorm((1024,), eps=1e-05, elementwise_affine=True) ) (4): TransformerSentenceEncoderLayer( (self_attn): MultiheadAttention( (dropout_module): FairseqDropout() (k_proj): Linear(in_features=1024, out_features=1024, bias=True) (v_proj): Linear(in_features=1024, out_features=1024, bias=True) (q_proj): Linear(in_features=1024, out_features=1024, bias=True) (out_proj): Linear(in_features=1024, out_features=1024, bias=True) ) (dropout1): Dropout(p=0.0, inplace=False) (dropout2): Dropout(p=0.0, inplace=False) (dropout3): Dropout(p=0.0, inplace=False) (self_attn_layer_norm): LayerNorm((1024,), eps=1e-05, elementwise_affine=True) (fc1): Linear(in_features=1024, out_features=4096, bias=True) (fc2): Linear(in_features=4096, out_features=1024, bias=True) (final_layer_norm): LayerNorm((1024,), eps=1e-05, elementwise_affine=True) ) (5): TransformerSentenceEncoderLayer( (self_attn): MultiheadAttention( (dropout_module): FairseqDropout() (k_proj): Linear(in_features=1024, out_features=1024, bias=True) (v_proj): Linear(in_features=1024, out_features=1024, bias=True) (q_proj): Linear(in_features=1024, out_features=1024, bias=True) (out_proj): Linear(in_features=1024, out_features=1024, bias=True) ) (dropout1): Dropout(p=0.0, inplace=False) (dropout2): Dropout(p=0.0, inplace=False) (dropout3): Dropout(p=0.0, inplace=False) (self_attn_layer_norm): LayerNorm((1024,), eps=1e-05, elementwise_affine=True) (fc1): Linear(in_features=1024, out_features=4096, bias=True) (fc2): Linear(in_features=4096, out_features=1024, bias=True) (final_layer_norm): LayerNorm((1024,), eps=1e-05, elementwise_affine=True) ) (6): TransformerSentenceEncoderLayer( (self_attn): MultiheadAttention( (dropout_module): FairseqDropout() (k_proj): Linear(in_features=1024, out_features=1024, bias=True) (v_proj): Linear(in_features=1024, out_features=1024, bias=True) (q_proj): Linear(in_features=1024, out_features=1024, bias=True) (out_proj): Linear(in_features=1024, out_features=1024, bias=True) ) (dropout1): Dropout(p=0.0, inplace=False) (dropout2): Dropout(p=0.0, inplace=False) (dropout3): Dropout(p=0.0, inplace=False) (self_attn_layer_norm): LayerNorm((1024,), eps=1e-05, elementwise_affine=True) (fc1): Linear(in_features=1024, out_features=4096, bias=True) (fc2): Linear(in_features=4096, out_features=1024, bias=True) (final_layer_norm): LayerNorm((1024,), eps=1e-05, elementwise_affine=True) ) (7): TransformerSentenceEncoderLayer( (self_attn): MultiheadAttention( (dropout_module): FairseqDropout() (k_proj): Linear(in_features=1024, out_features=1024, bias=True) (v_proj): Linear(in_features=1024, out_features=1024, bias=True) (q_proj): Linear(in_features=1024, out_features=1024, bias=True) (out_proj): Linear(in_features=1024, out_features=1024, bias=True) ) (dropout1): Dropout(p=0.0, inplace=False) (dropout2): Dropout(p=0.0, inplace=False) (dropout3): Dropout(p=0.0, inplace=False) (self_attn_layer_norm): LayerNorm((1024,), eps=1e-05, elementwise_affine=True) (fc1): Linear(in_features=1024, out_features=4096, bias=True) (fc2): Linear(in_features=4096, out_features=1024, bias=True) (final_layer_norm): LayerNorm((1024,), eps=1e-05, elementwise_affine=True) ) (8): TransformerSentenceEncoderLayer( (self_attn): MultiheadAttention( (dropout_module): FairseqDropout() (k_proj): Linear(in_features=1024, out_features=1024, bias=True) (v_proj): Linear(in_features=1024, out_features=1024, bias=True) (q_proj): Linear(in_features=1024, out_features=1024, bias=True) (out_proj): Linear(in_features=1024, out_features=1024, bias=True) ) (dropout1): Dropout(p=0.0, inplace=False) (dropout2): Dropout(p=0.0, inplace=False) (dropout3): Dropout(p=0.0, inplace=False) (self_attn_layer_norm): LayerNorm((1024,), eps=1e-05, elementwise_affine=True) (fc1): Linear(in_features=1024, out_features=4096, bias=True) (fc2): Linear(in_features=4096, out_features=1024, bias=True) (final_layer_norm): LayerNorm((1024,), eps=1e-05, elementwise_affine=True) ) (9): TransformerSentenceEncoderLayer( (self_attn): MultiheadAttention( (dropout_module): FairseqDropout() (k_proj): Linear(in_features=1024, out_features=1024, bias=True) (v_proj): Linear(in_features=1024, out_features=1024, bias=True) (q_proj): Linear(in_features=1024, out_features=1024, bias=True) (out_proj): Linear(in_features=1024, out_features=1024, bias=True) ) (dropout1): Dropout(p=0.0, inplace=False) (dropout2): Dropout(p=0.0, inplace=False) (dropout3): Dropout(p=0.0, inplace=False) (self_attn_layer_norm): LayerNorm((1024,), eps=1e-05, elementwise_affine=True) (fc1): Linear(in_features=1024, out_features=4096, bias=True) (fc2): Linear(in_features=4096, out_features=1024, bias=True) (final_layer_norm): LayerNorm((1024,), eps=1e-05, elementwise_affine=True) ) (10): TransformerSentenceEncoderLayer( (self_attn): MultiheadAttention( (dropout_module): FairseqDropout() (k_proj): Linear(in_features=1024, out_features=1024, bias=True) (v_proj): Linear(in_features=1024, out_features=1024, bias=True) (q_proj): Linear(in_features=1024, out_features=1024, bias=True) (out_proj): Linear(in_features=1024, out_features=1024, bias=True) ) (dropout1): Dropout(p=0.0, inplace=False) (dropout2): Dropout(p=0.0, inplace=False) (dropout3): Dropout(p=0.0, inplace=False) (self_attn_layer_norm): LayerNorm((1024,), eps=1e-05, elementwise_affine=True) (fc1): Linear(in_features=1024, out_features=4096, bias=True) (fc2): Linear(in_features=4096, out_features=1024, bias=True) (final_layer_norm): LayerNorm((1024,), eps=1e-05, elementwise_affine=True) ) (11): TransformerSentenceEncoderLayer( (self_attn): MultiheadAttention( (dropout_module): FairseqDropout() (k_proj): Linear(in_features=1024, out_features=1024, bias=True) (v_proj): Linear(in_features=1024, out_features=1024, bias=True) (q_proj): Linear(in_features=1024, out_features=1024, bias=True) (out_proj): Linear(in_features=1024, out_features=1024, bias=True) ) (dropout1): Dropout(p=0.0, inplace=False) (dropout2): Dropout(p=0.0, inplace=False) (dropout3): Dropout(p=0.0, inplace=False) (self_attn_layer_norm): LayerNorm((1024,), eps=1e-05, elementwise_affine=True) (fc1): Linear(in_features=1024, out_features=4096, bias=True) (fc2): Linear(in_features=4096, out_features=1024, bias=True) (final_layer_norm): LayerNorm((1024,), eps=1e-05, elementwise_affine=True) ) (12): TransformerSentenceEncoderLayer( (self_attn): MultiheadAttention( (dropout_module): FairseqDropout() (k_proj): Linear(in_features=1024, out_features=1024, bias=True) (v_proj): Linear(in_features=1024, out_features=1024, bias=True) (q_proj): Linear(in_features=1024, out_features=1024, bias=True) (out_proj): Linear(in_features=1024, out_features=1024, bias=True) ) (dropout1): Dropout(p=0.0, inplace=False) (dropout2): Dropout(p=0.0, inplace=False) (dropout3): Dropout(p=0.0, inplace=False) (self_attn_layer_norm): LayerNorm((1024,), eps=1e-05, elementwise_affine=True) (fc1): Linear(in_features=1024, out_features=4096, bias=True) (fc2): Linear(in_features=4096, out_features=1024, bias=True) (final_layer_norm): LayerNorm((1024,), eps=1e-05, elementwise_affine=True) ) (13): TransformerSentenceEncoderLayer( (self_attn): MultiheadAttention( (dropout_module): FairseqDropout() (k_proj): Linear(in_features=1024, out_features=1024, bias=True) (v_proj): Linear(in_features=1024, out_features=1024, bias=True) (q_proj): Linear(in_features=1024, out_features=1024, bias=True) (out_proj): Linear(in_features=1024, out_features=1024, bias=True) ) (dropout1): Dropout(p=0.0, inplace=False) (dropout2): Dropout(p=0.0, inplace=False) (dropout3): Dropout(p=0.0, inplace=False) (self_attn_layer_norm): LayerNorm((1024,), eps=1e-05, elementwise_affine=True) (fc1): Linear(in_features=1024, out_features=4096, bias=True) (fc2): Linear(in_features=4096, out_features=1024, bias=True) (final_layer_norm): LayerNorm((1024,), eps=1e-05, elementwise_affine=True) ) (14): TransformerSentenceEncoderLayer( (self_attn): MultiheadAttention( (dropout_module): FairseqDropout() (k_proj): Linear(in_features=1024, out_features=1024, bias=True) (v_proj): Linear(in_features=1024, out_features=1024, bias=True) (q_proj): Linear(in_features=1024, out_features=1024, bias=True) (out_proj): Linear(in_features=1024, out_features=1024, bias=True) ) (dropout1): Dropout(p=0.0, inplace=False) (dropout2): Dropout(p=0.0, inplace=False) (dropout3): Dropout(p=0.0, inplace=False) (self_attn_layer_norm): LayerNorm((1024,), eps=1e-05, elementwise_affine=True) (fc1): Linear(in_features=1024, out_features=4096, bias=True) (fc2): Linear(in_features=4096, out_features=1024, bias=True) (final_layer_norm): LayerNorm((1024,), eps=1e-05, elementwise_affine=True) ) (15): TransformerSentenceEncoderLayer( (self_attn): MultiheadAttention( (dropout_module): FairseqDropout() (k_proj): Linear(in_features=1024, out_features=1024, bias=True) (v_proj): Linear(in_features=1024, out_features=1024, bias=True) (q_proj): Linear(in_features=1024, out_features=1024, bias=True) (out_proj): Linear(in_features=1024, out_features=1024, bias=True) ) (dropout1): Dropout(p=0.0, inplace=False) (dropout2): Dropout(p=0.0, inplace=False) (dropout3): Dropout(p=0.0, inplace=False) (self_attn_layer_norm): LayerNorm((1024,), eps=1e-05, elementwise_affine=True) (fc1): Linear(in_features=1024, out_features=4096, bias=True) (fc2): Linear(in_features=4096, out_features=1024, bias=True) (final_layer_norm): LayerNorm((1024,), eps=1e-05, elementwise_affine=True) ) (16): TransformerSentenceEncoderLayer( (self_attn): MultiheadAttention( (dropout_module): FairseqDropout() (k_proj): Linear(in_features=1024, out_features=1024, bias=True) (v_proj): Linear(in_features=1024, out_features=1024, bias=True) (q_proj): Linear(in_features=1024, out_features=1024, bias=True) (out_proj): Linear(in_features=1024, out_features=1024, bias=True) ) (dropout1): Dropout(p=0.0, inplace=False) (dropout2): Dropout(p=0.0, inplace=False) (dropout3): Dropout(p=0.0, inplace=False) (self_attn_layer_norm): LayerNorm((1024,), eps=1e-05, elementwise_affine=True) (fc1): Linear(in_features=1024, out_features=4096, bias=True) (fc2): Linear(in_features=4096, out_features=1024, bias=True) (final_layer_norm): LayerNorm((1024,), eps=1e-05, elementwise_affine=True) ) (17): TransformerSentenceEncoderLayer( (self_attn): MultiheadAttention( (dropout_module): FairseqDropout() (k_proj): Linear(in_features=1024, out_features=1024, bias=True) (v_proj): Linear(in_features=1024, out_features=1024, bias=True) (q_proj): Linear(in_features=1024, out_features=1024, bias=True) (out_proj): Linear(in_features=1024, out_features=1024, bias=True) ) (dropout1): Dropout(p=0.0, inplace=False) (dropout2): Dropout(p=0.0, inplace=False) (dropout3): Dropout(p=0.0, inplace=False) (self_attn_layer_norm): LayerNorm((1024,), eps=1e-05, elementwise_affine=True) (fc1): Linear(in_features=1024, out_features=4096, bias=True) (fc2): Linear(in_features=4096, out_features=1024, bias=True) (final_layer_norm): LayerNorm((1024,), eps=1e-05, elementwise_affine=True) ) (18): TransformerSentenceEncoderLayer( (self_attn): MultiheadAttention( (dropout_module): FairseqDropout() (k_proj): Linear(in_features=1024, out_features=1024, bias=True) (v_proj): Linear(in_features=1024, out_features=1024, bias=True) (q_proj): Linear(in_features=1024, out_features=1024, bias=True) (out_proj): Linear(in_features=1024, out_features=1024, bias=True) ) (dropout1): Dropout(p=0.0, inplace=False) (dropout2): Dropout(p=0.0, inplace=False) (dropout3): Dropout(p=0.0, inplace=False) (self_attn_layer_norm): LayerNorm((1024,), eps=1e-05, elementwise_affine=True) (fc1): Linear(in_features=1024, out_features=4096, bias=True) (fc2): Linear(in_features=4096, out_features=1024, bias=True) (final_layer_norm): LayerNorm((1024,), eps=1e-05, elementwise_affine=True) ) (19): TransformerSentenceEncoderLayer( (self_attn): MultiheadAttention( (dropout_module): FairseqDropout() (k_proj): Linear(in_features=1024, out_features=1024, bias=True) (v_proj): Linear(in_features=1024, out_features=1024, bias=True) (q_proj): Linear(in_features=1024, out_features=1024, bias=True) (out_proj): Linear(in_features=1024, out_features=1024, bias=True) ) (dropout1): Dropout(p=0.0, inplace=False) (dropout2): Dropout(p=0.0, inplace=False) (dropout3): Dropout(p=0.0, inplace=False) (self_attn_layer_norm): LayerNorm((1024,), eps=1e-05, elementwise_affine=True) (fc1): Linear(in_features=1024, out_features=4096, bias=True) (fc2): Linear(in_features=4096, out_features=1024, bias=True) (final_layer_norm): LayerNorm((1024,), eps=1e-05, elementwise_affine=True) ) (20): TransformerSentenceEncoderLayer( (self_attn): MultiheadAttention( (dropout_module): FairseqDropout() (k_proj): Linear(in_features=1024, out_features=1024, bias=True) (v_proj): Linear(in_features=1024, out_features=1024, bias=True) (q_proj): Linear(in_features=1024, out_features=1024, bias=True) (out_proj): Linear(in_features=1024, out_features=1024, bias=True) ) (dropout1): Dropout(p=0.0, inplace=False) (dropout2): Dropout(p=0.0, inplace=False) (dropout3): Dropout(p=0.0, inplace=False) (self_attn_layer_norm): LayerNorm((1024,), eps=1e-05, elementwise_affine=True) (fc1): Linear(in_features=1024, out_features=4096, bias=True) (fc2): Linear(in_features=4096, out_features=1024, bias=True) (final_layer_norm): LayerNorm((1024,), eps=1e-05, elementwise_affine=True) ) (21): TransformerSentenceEncoderLayer( (self_attn): MultiheadAttention( (dropout_module): FairseqDropout() (k_proj): Linear(in_features=1024, out_features=1024, bias=True) (v_proj): Linear(in_features=1024, out_features=1024, bias=True) (q_proj): Linear(in_features=1024, out_features=1024, bias=True) (out_proj): Linear(in_features=1024, out_features=1024, bias=True) ) (dropout1): Dropout(p=0.0, inplace=False) (dropout2): Dropout(p=0.0, inplace=False) (dropout3): Dropout(p=0.0, inplace=False) (self_attn_layer_norm): LayerNorm((1024,), eps=1e-05, elementwise_affine=True) (fc1): Linear(in_features=1024, out_features=4096, bias=True) (fc2): Linear(in_features=4096, out_features=1024, bias=True) (final_layer_norm): LayerNorm((1024,), eps=1e-05, elementwise_affine=True) ) (22): TransformerSentenceEncoderLayer( (self_attn): MultiheadAttention( (dropout_module): FairseqDropout() (k_proj): Linear(in_features=1024, out_features=1024, bias=True) (v_proj): Linear(in_features=1024, out_features=1024, bias=True) (q_proj): Linear(in_features=1024, out_features=1024, bias=True) (out_proj): Linear(in_features=1024, out_features=1024, bias=True) ) (dropout1): Dropout(p=0.0, inplace=False) (dropout2): Dropout(p=0.0, inplace=False) (dropout3): Dropout(p=0.0, inplace=False) (self_attn_layer_norm): LayerNorm((1024,), eps=1e-05, elementwise_affine=True) (fc1): Linear(in_features=1024, out_features=4096, bias=True) (fc2): Linear(in_features=4096, out_features=1024, bias=True) (final_layer_norm): LayerNorm((1024,), eps=1e-05, elementwise_affine=True) ) (23): TransformerSentenceEncoderLayer( (self_attn): MultiheadAttention( (dropout_module): FairseqDropout() (k_proj): Linear(in_features=1024, out_features=1024, bias=True) (v_proj): Linear(in_features=1024, out_features=1024, bias=True) (q_proj): Linear(in_features=1024, out_features=1024, bias=True) (out_proj): Linear(in_features=1024, out_features=1024, bias=True) ) (dropout1): Dropout(p=0.0, inplace=False) (dropout2): Dropout(p=0.0, inplace=False) (dropout3): Dropout(p=0.0, inplace=False) (self_attn_layer_norm): LayerNorm((1024,), eps=1e-05, elementwise_affine=True) (fc1): Linear(in_features=1024, out_features=4096, bias=True) (fc2): Linear(in_features=4096, out_features=1024, bias=True) (final_layer_norm): LayerNorm((1024,), eps=1e-05, elementwise_affine=True) ) ) (layer_norm): LayerNorm((1024,), eps=1e-05, elementwise_affine=True) ) (layer_norm): LayerNorm((512,), eps=1e-05, elementwise_affine=True) (final_proj): Linear(in_features=1024, out_features=768, bias=True) ) ) (featurizer): Featurizer() ) (normalize): UtteranceMVN(norm_means=True, norm_vars=False) (encoder): RNNEncoder( (enc): ModuleList( (0): RNNP( (birnn0): LSTM(1024, 320, batch_first=True, bidirectional=True) (bt0): Linear(in_features=640, out_features=320, bias=True) (birnn1): LSTM(320, 320, batch_first=True, bidirectional=True) (bt1): Linear(in_features=640, out_features=320, bias=True) (birnn2): LSTM(320, 320, batch_first=True, bidirectional=True) (bt2): Linear(in_features=640, out_features=320, bias=True) (birnn3): LSTM(320, 320, batch_first=True, bidirectional=True) (bt3): Linear(in_features=640, out_features=320, bias=True) ) ) ) (criterion_att): LabelSmoothingLoss( (criterion): KLDivLoss() ) (ctc): CTC( (ctc_lo): Linear(in_features=320, out_features=350, bias=True) (ctc_loss): CTCLoss() ) ) Model summary: Class Name: ESPnetASRModel Total Number of model parameters: 329.07 M Number of trainable parameters: 11.68 M (3.5%) Size: 46.7 MB Type: torch.float32 [islpc50:0/3] 2022-06-10 14:04:43,815 (abs_task:1161) INFO: Optimizer: Adam ( Parameter Group 0 amsgrad: False betas: (0.9, 0.999) eps: 1e-08 initial_lr: 0.0002 lr: 5e-09 weight_decay: 0 ) [islpc50:0/3] 2022-06-10 14:04:43,815 (abs_task:1162) INFO: Scheduler: WarmupLR(warmup_steps=40000) [islpc50:0/3] 2022-06-10 14:04:43,831 (abs_task:1171) INFO: Saving the configuration in exp/asr_oxford_French_config_raw_fr_bpe350_sp/config.yaml [islpc50:0/3] 2022-06-10 14:04:47,904 (abs_task:1525) INFO: [train] dataset: ESPnetDataset( speech: {"path": "dump/raw/train_fr_sp/wav.scp", "type": "sound"} text: {"path": "dump/raw/train_fr_sp/text", "type": "text"} preprocess: ) [islpc50:0/3] 2022-06-10 14:04:47,904 (abs_task:1526) INFO: [train] Batch sampler: FoldedBatchSampler(N-batch=784, batch_size=32, shape_files=['exp/asr_stats_raw_fr_bpe350_sp/train/speech_shape', 'exp/asr_stats_raw_fr_bpe350_sp/train/text_shape.bpe'], sort_in_batch=descending, sort_batch=descending) [islpc50:0/3] 2022-06-10 14:04:47,904 (abs_task:1527) INFO: [train] mini-batch sizes summary: N-batch=784, mean=19.9, min=5, max=32 [islpc50:0/3] 2022-06-10 14:04:48,032 (abs_task:1525) INFO: [valid] dataset: ESPnetDataset( speech: {"path": "dump/raw/dev_fr/wav.scp", "type": "sound"} text: {"path": "dump/raw/dev_fr/text", "type": "text"} preprocess: ) [islpc50:0/3] 2022-06-10 14:04:48,032 (abs_task:1526) INFO: [valid] Batch sampler: FoldedBatchSampler(N-batch=784, batch_size=32, shape_files=['exp/asr_stats_raw_fr_bpe350_sp/valid/speech_shape', 'exp/asr_stats_raw_fr_bpe350_sp/valid/text_shape.bpe'], sort_in_batch=descending, sort_batch=descending) [islpc50:0/3] 2022-06-10 14:04:48,032 (abs_task:1527) INFO: [valid] mini-batch sizes summary: N-batch=784, mean=19.9, min=5, max=32 [islpc50:0/3] 2022-06-10 14:04:48,280 (abs_task:1525) INFO: [plot_att] dataset: ESPnetDataset( speech: {"path": "dump/raw/dev_fr/wav.scp", "type": "sound"} text: {"path": "dump/raw/dev_fr/text", "type": "text"} preprocess: ) [islpc50:0/3] 2022-06-10 14:04:48,281 (abs_task:1526) INFO: [plot_att] Batch sampler: UnsortedBatchSampler(N-batch=15621, batch_size=1, key_file=exp/asr_stats_raw_fr_bpe350_sp/valid/speech_shape, [islpc50:0/3] 2022-06-10 14:04:48,281 (abs_task:1527) INFO: [plot_att] mini-batch sizes summary: N-batch=3, mean=1.0, min=1, max=1 islpc50:3919100:3919100 [0] NCCL INFO Bootstrap : Using bond0:128.2.205.9<0> islpc50:3919100:3919100 [0] NCCL INFO NET/Plugin : No plugin found (libnccl-net.so), using internal implementation islpc50:3919100:3919100 [0] NCCL INFO NET/IB : No device found. islpc50:3919100:3919100 [0] NCCL INFO NET/Socket : Using [0]bond0:128.2.205.9<0> islpc50:3919100:3919100 [0] NCCL INFO Using network Socket NCCL version 2.10.3+cuda11.1 islpc50:3919102:3919102 [2] NCCL INFO Bootstrap : Using bond0:128.2.205.9<0> islpc50:3919101:3919101 [1] NCCL INFO Bootstrap : Using bond0:128.2.205.9<0> islpc50:3919101:3919101 [1] NCCL INFO NET/Plugin : No plugin found (libnccl-net.so), using internal implementation islpc50:3919102:3919102 [2] NCCL INFO NET/Plugin : No plugin found (libnccl-net.so), using internal implementation islpc50:3919101:3919101 [1] NCCL INFO NET/IB : No device found. islpc50:3919102:3919102 [2] NCCL INFO NET/IB : No device found. islpc50:3919101:3919101 [1] NCCL INFO NET/Socket : Using [0]bond0:128.2.205.9<0> islpc50:3919102:3919102 [2] NCCL INFO NET/Socket : Using [0]bond0:128.2.205.9<0> islpc50:3919101:3919101 [1] NCCL INFO Using network Socket islpc50:3919102:3919102 [2] NCCL INFO Using network Socket islpc50:3919100:3919370 [0] NCCL INFO Could not enable P2P between dev 1(=1a000) and dev 0(=19000) islpc50:3919100:3919370 [0] NCCL INFO Could not enable P2P between dev 0(=19000) and dev 1(=1a000) islpc50:3919100:3919370 [0] NCCL INFO Could not enable P2P between dev 1(=1a000) and dev 0(=19000) islpc50:3919100:3919370 [0] NCCL INFO Could not enable P2P between dev 0(=19000) and dev 1(=1a000) islpc50:3919102:3919372 [2] NCCL INFO Could not enable P2P between dev 1(=1a000) and dev 0(=19000) islpc50:3919102:3919372 [2] NCCL INFO Could not enable P2P between dev 0(=19000) and dev 1(=1a000) islpc50:3919102:3919372 [2] NCCL INFO Could not enable P2P between dev 1(=1a000) and dev 0(=19000) islpc50:3919102:3919372 [2] NCCL INFO Could not enable P2P between dev 0(=19000) and dev 1(=1a000) islpc50:3919101:3919371 [1] NCCL INFO Could not enable P2P between dev 1(=1a000) and dev 0(=19000) islpc50:3919101:3919371 [1] NCCL INFO Could not enable P2P between dev 0(=19000) and dev 1(=1a000) islpc50:3919101:3919371 [1] NCCL INFO Could not enable P2P between dev 1(=1a000) and dev 0(=19000) islpc50:3919101:3919371 [1] NCCL INFO Could not enable P2P between dev 0(=19000) and dev 1(=1a000) islpc50:3919101:3919371 [1] NCCL INFO Trees [0] 2/-1/-1->1->0 [1] 2/-1/-1->1->0 islpc50:3919102:3919372 [2] NCCL INFO Trees [0] -1/-1/-1->2->1 [1] -1/-1/-1->2->1 islpc50:3919100:3919370 [0] NCCL INFO Channel 00/02 : 0 1 2 islpc50:3919101:3919371 [1] NCCL INFO Setting affinity for GPU 1 to ffffff islpc50:3919102:3919372 [2] NCCL INFO Setting affinity for GPU 2 to ffffff islpc50:3919100:3919370 [0] NCCL INFO Channel 01/02 : 0 1 2 islpc50:3919100:3919370 [0] NCCL INFO Trees [0] 1/-1/-1->0->-1 [1] 1/-1/-1->0->-1 islpc50:3919100:3919370 [0] NCCL INFO Setting affinity for GPU 0 to ffffff islpc50:3919101:3919371 [1] NCCL INFO Could not enable P2P between dev 1(=1a000) and dev 0(=19000) islpc50:3919101:3919371 [1] NCCL INFO Could not enable P2P between dev 1(=1a000) and dev 0(=19000) islpc50:3919101:3919371 [1] NCCL INFO Channel 00 : 1[1a000] -> 2[67000] via direct shared memory islpc50:3919102:3919372 [2] NCCL INFO Channel 00 : 2[67000] -> 0[19000] via direct shared memory islpc50:3919100:3919370 [0] NCCL INFO Could not enable P2P between dev 0(=19000) and dev 1(=1a000) islpc50:3919101:3919371 [1] NCCL INFO Channel 01 : 1[1a000] -> 2[67000] via direct shared memory islpc50:3919102:3919372 [2] NCCL INFO Channel 01 : 2[67000] -> 0[19000] via direct shared memory islpc50:3919100:3919370 [0] NCCL INFO Channel 00 : 0[19000] -> 1[1a000] via direct shared memory islpc50:3919100:3919370 [0] NCCL INFO Could not enable P2P between dev 0(=19000) and dev 1(=1a000) islpc50:3919100:3919370 [0] NCCL INFO Channel 01 : 0[19000] -> 1[1a000] via direct shared memory islpc50:3919102:3919372 [2] NCCL INFO Connected all rings islpc50:3919101:3919371 [1] NCCL INFO Connected all rings islpc50:3919100:3919370 [0] NCCL INFO Connected all rings islpc50:3919100:3919370 [0] NCCL INFO Could not enable P2P between dev 0(=19000) and dev 1(=1a000) islpc50:3919102:3919372 [2] NCCL INFO Channel 00 : 2[67000] -> 1[1a000] via direct shared memory islpc50:3919102:3919372 [2] NCCL INFO Channel 01 : 2[67000] -> 1[1a000] via direct shared memory islpc50:3919100:3919370 [0] NCCL INFO Could not enable P2P between dev 0(=19000) and dev 1(=1a000) islpc50:3919101:3919371 [1] NCCL INFO Could not enable P2P between dev 1(=1a000) and dev 0(=19000) islpc50:3919101:3919371 [1] NCCL INFO Channel 00 : 1[1a000] -> 0[19000] via direct shared memory islpc50:3919101:3919371 [1] NCCL INFO Could not enable P2P between dev 1(=1a000) and dev 0(=19000) islpc50:3919101:3919371 [1] NCCL INFO Channel 01 : 1[1a000] -> 0[19000] via direct shared memory islpc50:3919100:3919370 [0] NCCL INFO Connected all trees islpc50:3919100:3919370 [0] NCCL INFO threadThresholds 8/8/64 | 24/8/64 | 8/8/512 islpc50:3919100:3919370 [0] NCCL INFO 2 coll channels, 2 p2p channels, 2 p2p channels per peer islpc50:3919102:3919372 [2] NCCL INFO Connected all trees islpc50:3919102:3919372 [2] NCCL INFO threadThresholds 8/8/64 | 24/8/64 | 8/8/512 islpc50:3919102:3919372 [2] NCCL INFO 2 coll channels, 2 p2p channels, 2 p2p channels per peer islpc50:3919101:3919371 [1] NCCL INFO Connected all trees islpc50:3919101:3919371 [1] NCCL INFO threadThresholds 8/8/64 | 24/8/64 | 8/8/512 islpc50:3919101:3919371 [1] NCCL INFO 2 coll channels, 2 p2p channels, 2 p2p channels per peer islpc50:3919100:3919370 [0] NCCL INFO comm 0x7f1078002fb0 rank 0 nranks 3 cudaDev 0 busId 19000 - Init COMPLETE islpc50:3919101:3919371 [1] NCCL INFO comm 0x7ef9f0002fb0 rank 1 nranks 3 cudaDev 1 busId 1a000 - Init COMPLETE islpc50:3919102:3919372 [2] NCCL INFO comm 0x7f89bc002fb0 rank 2 nranks 3 cudaDev 2 busId 67000 - Init COMPLETE islpc50:3919100:3919100 [0] NCCL INFO Launch mode Parallel [s3prl.upstream.experts] Warning: can not import s3prl.upstream.byol_a.expert: No module named 'easydict'. Pass. [s3prl.hub] Warning: can not import s3prl.upstream.byol_a.hubconf: No module named 'easydict'. Please see upstream/byol_a/README.md [s3prl.downstream.experts] Warning: can not import s3prl.downstream.quesst14_dtw.expert: No module named 'dtw'. Pass. [s3prl.downstream.experts] Warning: can not import s3prl.downstream.separation_stft.expert: No module named 'asteroid'. Pass. [s3prl.downstream.experts] Warning: can not import s3prl.downstream.enhancement_stft.expert: No module named 'asteroid'. Pass. [s3prl.downstream.experts] Warning: can not import s3prl.downstream.speech_commands.expert: No module named 'catalyst'. Pass. [s3prl.downstream.experts] Warning: can not import s3prl.downstream.a2a-vc-vctk.expert: No module named 'resemblyzer'. Pass. [s3prl.downstream.experts] Warning: can not import s3prl.downstream.voxceleb2_ge2e.expert: No module named 'sox'. Pass. [s3prl.downstream.experts] Warning: can not import s3prl.downstream.sv_voxceleb1.expert: No module named 'sox'. Pass. Using cache found in ./hub/s3prl_cache/1c76d6e88090f01736036b28dc995fef583f47f42662d55286332557f957609f for https://dl.fbaipublicfiles.com/fairseq/wav2vec/wav2vec_vox_new.pt >> inserted adapters to the following layers: 0, 1, 2 * original model weights: 317,390,592 * new model weights - all: 319,757,184 * new model weights - trainable: 2,366,592 ( 0.75% of original model) [s3prl.upstream.experts] Warning: can not import s3prl.upstream.byol_a.expert: No module named 'easydict'. Pass. [s3prl.hub] Warning: can not import s3prl.upstream.byol_a.hubconf: No module named 'easydict'. Please see upstream/byol_a/README.md [s3prl.downstream.experts] Warning: can not import s3prl.downstream.quesst14_dtw.expert: No module named 'dtw'. Pass. [s3prl.downstream.experts] Warning: can not import s3prl.downstream.separation_stft.expert: No module named 'asteroid'. Pass. [s3prl.downstream.experts] Warning: can not import s3prl.downstream.enhancement_stft.expert: No module named 'asteroid'. Pass. [s3prl.downstream.experts] Warning: can not import s3prl.downstream.speech_commands.expert: No module named 'catalyst'. Pass. [s3prl.downstream.experts] Warning: can not import s3prl.downstream.a2a-vc-vctk.expert: No module named 'resemblyzer'. Pass. [s3prl.downstream.experts] Warning: can not import s3prl.downstream.voxceleb2_ge2e.expert: No module named 'sox'. Pass. [s3prl.downstream.experts] Warning: can not import s3prl.downstream.sv_voxceleb1.expert: No module named 'sox'. Pass. Using cache found in ./hub/s3prl_cache/1c76d6e88090f01736036b28dc995fef583f47f42662d55286332557f957609f for https://dl.fbaipublicfiles.com/fairseq/wav2vec/wav2vec_vox_new.pt >> inserted adapters to the following layers: 0, 1, 2 * original model weights: 317,390,592 * new model weights - all: 319,757,184 * new model weights - trainable: 2,366,592 ( 0.75% of original model) [islpc50:0/3] 2022-06-10 14:04:48,878 (trainer:280) INFO: 1/30epoch started [s3prl.upstream.experts] Warning: can not import s3prl.upstream.byol_a.expert: No module named 'easydict'. Pass. [s3prl.hub] Warning: can not import s3prl.upstream.byol_a.hubconf: No module named 'easydict'. Please see upstream/byol_a/README.md [s3prl.downstream.experts] Warning: can not import s3prl.downstream.quesst14_dtw.expert: No module named 'dtw'. Pass. [s3prl.downstream.experts] Warning: can not import s3prl.downstream.separation_stft.expert: No module named 'asteroid'. Pass. [s3prl.downstream.experts] Warning: can not import s3prl.downstream.enhancement_stft.expert: No module named 'asteroid'. Pass. [s3prl.downstream.experts] Warning: can not import s3prl.downstream.speech_commands.expert: No module named 'catalyst'. Pass. [s3prl.downstream.experts] Warning: can not import s3prl.downstream.a2a-vc-vctk.expert: No module named 'resemblyzer'. Pass. [s3prl.downstream.experts] Warning: can not import s3prl.downstream.voxceleb2_ge2e.expert: No module named 'sox'. Pass. [s3prl.downstream.experts] Warning: can not import s3prl.downstream.sv_voxceleb1.expert: No module named 'sox'. Pass. Using cache found in ./hub/s3prl_cache/1c76d6e88090f01736036b28dc995fef583f47f42662d55286332557f957609f for https://dl.fbaipublicfiles.com/fairseq/wav2vec/wav2vec_vox_new.pt >> inserted adapters to the following layers: 0, 1, 2 * original model weights: 317,390,592 * new model weights - all: 319,757,184 * new model weights - trainable: 2,366,592 ( 0.75% of original model) ERROR:root:Error happened with path=dump/raw/train_fr_sp/wav.scp, type=sound, id=3b573fa64a6fdd7f7c0557e4240f45b31cd77c9b2ecff3dec7469e414d80934145ab28b283b7b202ef96e0db82ecc7858fc79596ce2b0a5ca821d3b83849fcf3-common_voice_fr_19634323 ERROR:root:Error happened with path=dump/raw/train_fr_sp/wav.scp, type=sound, id=9160bae81efd8c83d08dea4c3d45008136c431e1bb1fdd42250391bda50c4632fd8bd3fc90f0fbb98d32994f2867dec8a1a7f5307fd461557d90bea7bd87cfc9-common_voice_fr_19815208 ERROR:root:Error happened with path=dump/raw/train_fr_sp/wav.scp, type=sound, id=3e87e5286f7c1a68a4e66e6a09343b84e605717cf57dad8a8e2883d611c46effc157b2a839cdd59b89056c1e641d6e5632663df272bd9d37faf3261bcc3235b9-common_voice_fr_19864394 ERROR:root:Error happened with path=dump/raw/train_fr_sp/wav.scp, type=sound, id=95f596b33d5005378f7f5b0d34f424e169e5e2bffc9feb1b326a4e93708a4b1fd53bd38e19f5993e60c509488884f9a97736a52b6b0bc570bed8a478df197433-common_voice_fr_19739200 ERROR:root:Error happened with path=dump/raw/train_fr_sp/wav.scp, type=sound, id=4bf705c059f39f3bbfa076b055138c5ca7091905da9123d433cfb2e39873205ecee6307b04060c2d5ddd5325f22aa5a71ba2392f58a3c2adc878b37d94f4b9d4-common_voice_fr_17757621 ERROR:root:Error happened with path=dump/raw/train_fr_sp/wav.scp, type=sound, id=408b4b5644b104d13576a770a413ddea2b7e0442db5a9e40717036a693d4a9c2932a4781d3bbe12aaf1152dd78e18a2e6b663b9bfbaa930ceabb2f8634023e9a-common_voice_fr_17893517 ERROR:root:Error happened with path=dump/raw/train_fr_sp/wav.scp, type=sound, id=9a01866b8e5a7dba7c36cb6e03bc5262558591cf15f5b4737e898e134059a4512347c535c669418faf48a8b505d63850e1d59458ee236e95f25457cc86d302a6-common_voice_fr_17326088 ERROR:root:Error happened with path=dump/raw/train_fr_sp/wav.scp, type=sound, id=55e2fff7c485fe3a2e007082efb181f8eeb10c8ffe2e8d14adb75b330a609f12a67731f84c2440d4153166239ae7a8d1a811100254fcd8ce2e340be0f0d5a7a2-common_voice_fr_18049999 Process SpawnProcess-2: Traceback (most recent call last): File "/usr/lib/python3.8/multiprocessing/process.py", line 315, in _bootstrap self.run() File "/usr/lib/python3.8/multiprocessing/process.py", line 108, in run self._target(*self._args, **self._kwargs) File "/project/ocean/junweih/espnet/espnet2/tasks/abs_task.py", line 1315, in main_worker cls.trainer.run( File "/project/ocean/junweih/espnet/espnet2/train/trainer.py", line 286, in run all_steps_are_invalid = cls.train_one_epoch( File "/project/ocean/junweih/espnet/espnet2/train/trainer.py", line 505, in train_one_epoch for iiter, (utt_id, batch) in enumerate( File "/project/ocean/junweih/espnet/espnet2/train/reporter.py", line 275, in measure_iter_time retval = next(iterator) File "/project/ocean/junweih/espnet/tools/python_user_base/lib/python3.8/site-packages/torch/utils/data/dataloader.py", line 521, in __next__ data = self._next_data() File "/project/ocean/junweih/espnet/tools/python_user_base/lib/python3.8/site-packages/torch/utils/data/dataloader.py", line 1203, in _next_data return self._process_data(data) File "/project/ocean/junweih/espnet/tools/python_user_base/lib/python3.8/site-packages/torch/utils/data/dataloader.py", line 1229, in _process_data data.reraise() File "/project/ocean/junweih/espnet/tools/python_user_base/lib/python3.8/site-packages/torch/_utils.py", line 434, in reraise raise exception KeyError: Caught KeyError in DataLoader worker process 0. Original Traceback (most recent call last): File "/project/ocean/junweih/espnet/tools/python_user_base/lib/python3.8/site-packages/torch/utils/data/_utils/worker.py", line 287, in _worker_loop data = fetcher.fetch(index) File "/project/ocean/junweih/espnet/tools/python_user_base/lib/python3.8/site-packages/torch/utils/data/_utils/fetch.py", line 49, in fetch data = [self.dataset[idx] for idx in possibly_batched_index] File "/project/ocean/junweih/espnet/tools/python_user_base/lib/python3.8/site-packages/torch/utils/data/_utils/fetch.py", line 49, in data = [self.dataset[idx] for idx in possibly_batched_index] File "/project/ocean/junweih/espnet/espnet2/train/dataset.py", line 402, in __getitem__ value = loader[uid] File "/project/ocean/junweih/espnet/espnet2/train/dataset.py", line 53, in __getitem__ retval = self.loader[key] File "/project/ocean/junweih/espnet/espnet2/fileio/sound_scp.py", line 42, in __getitem__ wav = self.data[key] KeyError: '3e87e5286f7c1a68a4e66e6a09343b84e605717cf57dad8a8e2883d611c46effc157b2a839cdd59b89056c1e641d6e5632663df272bd9d37faf3261bcc3235b9-common_voice_fr_19864394' Process SpawnProcess-3: Traceback (most recent call last): File "/usr/lib/python3.8/multiprocessing/process.py", line 315, in _bootstrap self.run() File "/usr/lib/python3.8/multiprocessing/process.py", line 108, in run self._target(*self._args, **self._kwargs) File "/project/ocean/junweih/espnet/espnet2/tasks/abs_task.py", line 1315, in main_worker cls.trainer.run( File "/project/ocean/junweih/espnet/espnet2/train/trainer.py", line 286, in run all_steps_are_invalid = cls.train_one_epoch( File "/project/ocean/junweih/espnet/espnet2/train/trainer.py", line 505, in train_one_epoch for iiter, (utt_id, batch) in enumerate( File "/project/ocean/junweih/espnet/espnet2/train/reporter.py", line 275, in measure_iter_time retval = next(iterator) File "/project/ocean/junweih/espnet/tools/python_user_base/lib/python3.8/site-packages/torch/utils/data/dataloader.py", line 521, in __next__ data = self._next_data() File "/project/ocean/junweih/espnet/tools/python_user_base/lib/python3.8/site-packages/torch/utils/data/dataloader.py", line 1203, in _next_data return self._process_data(data) File "/project/ocean/junweih/espnet/tools/python_user_base/lib/python3.8/site-packages/torch/utils/data/dataloader.py", line 1229, in _process_data data.reraise() File "/project/ocean/junweih/espnet/tools/python_user_base/lib/python3.8/site-packages/torch/_utils.py", line 434, in reraise raise exception KeyError: Caught KeyError in DataLoader worker process 0. Original Traceback (most recent call last): File "/project/ocean/junweih/espnet/tools/python_user_base/lib/python3.8/site-packages/torch/utils/data/_utils/worker.py", line 287, in _worker_loop data = fetcher.fetch(index) File "/project/ocean/junweih/espnet/tools/python_user_base/lib/python3.8/site-packages/torch/utils/data/_utils/fetch.py", line 49, in fetch data = [self.dataset[idx] for idx in possibly_batched_index] File "/project/ocean/junweih/espnet/tools/python_user_base/lib/python3.8/site-packages/torch/utils/data/_utils/fetch.py", line 49, in data = [self.dataset[idx] for idx in possibly_batched_index] File "/project/ocean/junweih/espnet/espnet2/train/dataset.py", line 402, in __getitem__ value = loader[uid] File "/project/ocean/junweih/espnet/espnet2/train/dataset.py", line 53, in __getitem__ retval = self.loader[key] File "/project/ocean/junweih/espnet/espnet2/fileio/sound_scp.py", line 42, in __getitem__ wav = self.data[key] KeyError: '3b573fa64a6fdd7f7c0557e4240f45b31cd77c9b2ecff3dec7469e414d80934145ab28b283b7b202ef96e0db82ecc7858fc79596ce2b0a5ca821d3b83849fcf3-common_voice_fr_19634323' Process SpawnProcess-1: Traceback (most recent call last): File "/usr/lib/python3.8/multiprocessing/process.py", line 315, in _bootstrap self.run() File "/usr/lib/python3.8/multiprocessing/process.py", line 108, in run self._target(*self._args, **self._kwargs) File "/project/ocean/junweih/espnet/espnet2/tasks/abs_task.py", line 1315, in main_worker cls.trainer.run( File "/project/ocean/junweih/espnet/espnet2/train/trainer.py", line 286, in run all_steps_are_invalid = cls.train_one_epoch( File "/project/ocean/junweih/espnet/espnet2/train/trainer.py", line 505, in train_one_epoch for iiter, (utt_id, batch) in enumerate( File "/project/ocean/junweih/espnet/espnet2/train/reporter.py", line 275, in measure_iter_time retval = next(iterator) File "/project/ocean/junweih/espnet/tools/python_user_base/lib/python3.8/site-packages/torch/utils/data/dataloader.py", line 521, in __next__ data = self._next_data() File "/project/ocean/junweih/espnet/tools/python_user_base/lib/python3.8/site-packages/torch/utils/data/dataloader.py", line 1203, in _next_data return self._process_data(data) File "/project/ocean/junweih/espnet/tools/python_user_base/lib/python3.8/site-packages/torch/utils/data/dataloader.py", line 1229, in _process_data data.reraise() File "/project/ocean/junweih/espnet/tools/python_user_base/lib/python3.8/site-packages/torch/_utils.py", line 434, in reraise raise exception KeyError: Caught KeyError in DataLoader worker process 0. Original Traceback (most recent call last): File "/project/ocean/junweih/espnet/tools/python_user_base/lib/python3.8/site-packages/torch/utils/data/_utils/worker.py", line 287, in _worker_loop data = fetcher.fetch(index) File "/project/ocean/junweih/espnet/tools/python_user_base/lib/python3.8/site-packages/torch/utils/data/_utils/fetch.py", line 49, in fetch data = [self.dataset[idx] for idx in possibly_batched_index] File "/project/ocean/junweih/espnet/tools/python_user_base/lib/python3.8/site-packages/torch/utils/data/_utils/fetch.py", line 49, in data = [self.dataset[idx] for idx in possibly_batched_index] File "/project/ocean/junweih/espnet/espnet2/train/dataset.py", line 402, in __getitem__ value = loader[uid] File "/project/ocean/junweih/espnet/espnet2/train/dataset.py", line 53, in __getitem__ retval = self.loader[key] File "/project/ocean/junweih/espnet/espnet2/fileio/sound_scp.py", line 42, in __getitem__ wav = self.data[key] KeyError: '408b4b5644b104d13576a770a413ddea2b7e0442db5a9e40717036a693d4a9c2932a4781d3bbe12aaf1152dd78e18a2e6b663b9bfbaa930ceabb2f8634023e9a-common_voice_fr_17893517' Traceback (most recent call last): File "/usr/lib/python3.8/runpy.py", line 194, in _run_module_as_main return _run_code(code, main_globals, None, File "/usr/lib/python3.8/runpy.py", line 87, in _run_code exec(code, run_globals) File "/project/ocean/junweih/espnet/espnet2/bin/asr_train.py", line 23, in main() File "/project/ocean/junweih/espnet/espnet2/bin/asr_train.py", line 19, in main ASRTask.main(cmd=cmd) File "/project/ocean/junweih/espnet/espnet2/tasks/abs_task.py", line 1069, in main while not ProcessContext(processes, error_queues).join(): File "/project/ocean/junweih/espnet/tools/python_user_base/lib/python3.8/site-packages/torch/multiprocessing/spawn.py", line 139, in join raise ProcessExitedException( torch.multiprocessing.spawn.ProcessExitedException: process 1 terminated with exit code 1 /usr/lib/python3.8/multiprocessing/resource_tracker.py:216: UserWarning: resource_tracker: There appear to be 3 leaked semaphore objects to clean up at shutdown warnings.warn('resource_tracker: There appear to be %d ' # Accounting: time=54 threads=1 # Ended (code 1) at Fri Jun 10 14:05:02 EDT 2022, elapsed time 54 seconds