diff --git "a/exp/s2t_train_s2t_multitask-ctc_ebf27_conv2d8_size1024_raw_bpe50000/train.2.log" "b/exp/s2t_train_s2t_multitask-ctc_ebf27_conv2d8_size1024_raw_bpe50000/train.2.log" new file mode 100644--- /dev/null +++ "b/exp/s2t_train_s2t_multitask-ctc_ebf27_conv2d8_size1024_raw_bpe50000/train.2.log" @@ -0,0 +1,4622 @@ +# Running on gpua006.delta.ncsa.illinois.edu +# Started at Tue Feb 13 14:05:16 CST 2024 +# SLURMD_NODENAME=gpua006 +# SLURM_CLUSTER_NAME=delta +# SLURM_CONF=/var/spool/slurmd/conf-cache/slurm.conf +# SLURM_CPUS_ON_NODE=64 +# SLURM_CPUS_PER_TASK=64 +# SLURM_EXPORT_ENV=PATH +# SLURM_GET_USER_ENV=1 +# SLURM_GPUS_ON_NODE=4 +# SLURM_GTIDS=0 +# SLURM_JOBID=2984114 +# SLURM_JOB_ACCOUNT=bbjs-delta-gpu +# SLURM_JOB_CPUS_PER_NODE='64(x16)' +# SLURM_JOB_END_TIME=1708027501 +# SLURM_JOB_GID=202 +# SLURM_JOB_GPUS=0,1,2,3 +# SLURM_JOB_ID=2984114 +# SLURM_JOB_NAME=exp/s2t_train_s2t_multitask-ctc_ebf27_conv2d8_size1024_raw_bpe50000/train.log +# SLURM_JOB_NODELIST='gpua[006-007,012,016,033,038-040,049,054-055,057,079-080,085,089]' +# SLURM_JOB_NUM_NODES=16 +# SLURM_JOB_PARTITION=gpuA100x4 +# SLURM_JOB_QOS=bbjs-delta-gpu +# SLURM_JOB_RESERVATION=bbjs +# SLURM_JOB_START_TIME=1707854701 +# SLURM_JOB_UID=68077 +# SLURM_JOB_USER=peng6 +# SLURM_LOCALID=0 +# SLURM_MEM_PER_NODE=240000 +# SLURM_MPI_TYPE=pmi2 +# SLURM_NNODES=16 +# SLURM_NODEID=0 +# SLURM_NODELIST='gpua[006-007,012,016,033,038-040,049,054-055,057,079-080,085,089]' +# SLURM_NODE_ALIASES='(null)' +# SLURM_OPEN_MODE=a +# SLURM_PRIO_PROCESS=0 +# SLURM_PROCID=0 +# SLURM_SUBMIT_DIR=/scratch/bbjs/peng6/espnet-owsm-ctc-2/egs2/owsm_v3.1_ctc/s2t1 +# SLURM_SUBMIT_HOST=dt-login03.delta.ncsa.illinois.edu +# SLURM_TASKS_PER_NODE='1(x16)' +# SLURM_TASK_PID=855140 +# SLURM_TOPOLOGY_ADDR=ss00.ss05.gpua006 +# SLURM_TOPOLOGY_ADDR_PATTERN=switch.switch.node +# SLURM_WORKING_CLUSTER=delta:dt-sched:6817:9984:109 +# srun --export=ALL python3 -m espnet2.bin.s2t_train --use_preprocessor true --bpemodel data/token_list/bpe_unigram50000/bpe.model --token_type bpe --token_list data/token_list/bpe_unigram50000/tokens.txt --non_linguistic_symbols none --cleaner none --g2p none --valid_data_path_and_name_and_type dump/raw/dev_v3/wav.scp,speech,kaldi_ark --valid_shape_file exp/s2t_stats_raw_bpe50000/valid/speech_shape --resume true --fold_length 80000 --output_dir exp/s2t_train_s2t_multitask-ctc_ebf27_conv2d8_size1024_raw_bpe50000 --config conf/train_s2t_multitask-ctc_ebf27_conv2d8_size1024.yaml --frontend_conf fs=16k --normalize=global_mvn --normalize_conf stats_file=exp/s2t_stats_raw_bpe50000/train/feats_stats.npz --train_data_path_and_name_and_type exp/s2t_stats_raw_bpe50000/splits12/wav.scp,speech,kaldi_ark --train_shape_file exp/s2t_stats_raw_bpe50000/splits12/speech_shape --fold_length 150 --train_data_path_and_name_and_type exp/s2t_stats_raw_bpe50000/splits12/text.prev,text_prev,text --train_shape_file exp/s2t_stats_raw_bpe50000/splits12/text_prev_shape.bpe --fold_length 150 --train_data_path_and_name_and_type exp/s2t_stats_raw_bpe50000/splits12/text.ctc,text_ctc,text --train_shape_file exp/s2t_stats_raw_bpe50000/splits12/text_ctc_shape.bpe --fold_length 150 --train_data_path_and_name_and_type exp/s2t_stats_raw_bpe50000/splits12/text,text,text --train_shape_file exp/s2t_stats_raw_bpe50000/splits12/text_shape.bpe --multiple_iterator true --valid_data_path_and_name_and_type dump/raw/dev_v3/text.prev,text_prev,text --valid_shape_file exp/s2t_stats_raw_bpe50000/valid/text_prev_shape.bpe --valid_data_path_and_name_and_type dump/raw/dev_v3/text.ctc,text_ctc,text --valid_shape_file exp/s2t_stats_raw_bpe50000/valid/text_ctc_shape.bpe --valid_data_path_and_name_and_type dump/raw/dev_v3/text,text,text --valid_shape_file exp/s2t_stats_raw_bpe50000/valid/text_shape.bpe --ngpu 4 --multiprocessing_distributed true --dist_launcher slurm --dist_init_method file:///scratch/bbjs/peng6/espnet-owsm-ctc-2/egs2/owsm_v3.1_ctc/s2t1/exp/s2t_train_s2t_multitask-ctc_ebf27_conv2d8_size1024_raw_bpe50000/.dist_init_6c365915-ffe7-49c8-afc4-247fd8766192 +GpuFreq=control_disabled +GpuFreq=control_disabled +GpuFreq=control_disabled +GpuFreq=control_disabled +GpuFreq=control_disabled +GpuFreq=control_disabled +GpuFreq=control_disabled +GpuFreq=control_disabled +GpuFreq=control_disabled +GpuFreq=control_disabled +GpuFreq=control_disabled +GpuFreq=control_disabled +GpuFreq=control_disabled +GpuFreq=control_disabled +GpuFreq=control_disabled +GpuFreq=control_disabled +/scratch/bbjs/peng6/espnet-owsm-ctc-2/tools/miniconda/envs/espnet/bin/python3 /scratch/bbjs/peng6/espnet-owsm-ctc-2/espnet2/bin/s2t_train.py --use_preprocessor true --bpemodel data/token_list/bpe_unigram50000/bpe.model --token_type bpe --token_list data/token_list/bpe_unigram50000/tokens.txt --non_linguistic_symbols none --cleaner none --g2p none --valid_data_path_and_name_and_type dump/raw/dev_v3/wav.scp,speech,kaldi_ark --valid_shape_file exp/s2t_stats_raw_bpe50000/valid/speech_shape --resume true --fold_length 80000 --output_dir exp/s2t_train_s2t_multitask-ctc_ebf27_conv2d8_size1024_raw_bpe50000 --config conf/train_s2t_multitask-ctc_ebf27_conv2d8_size1024.yaml --frontend_conf fs=16k --normalize=global_mvn --normalize_conf stats_file=exp/s2t_stats_raw_bpe50000/train/feats_stats.npz --train_data_path_and_name_and_type exp/s2t_stats_raw_bpe50000/splits12/wav.scp,speech,kaldi_ark --train_shape_file exp/s2t_stats_raw_bpe50000/splits12/speech_shape --fold_length 150 --train_data_path_and_name_and_type exp/s2t_stats_raw_bpe50000/splits12/text.prev,text_prev,text --train_shape_file exp/s2t_stats_raw_bpe50000/splits12/text_prev_shape.bpe --fold_length 150 --train_data_path_and_name_and_type exp/s2t_stats_raw_bpe50000/splits12/text.ctc,text_ctc,text --train_shape_file exp/s2t_stats_raw_bpe50000/splits12/text_ctc_shape.bpe --fold_length 150 --train_data_path_and_name_and_type exp/s2t_stats_raw_bpe50000/splits12/text,text,text --train_shape_file exp/s2t_stats_raw_bpe50000/splits12/text_shape.bpe --multiple_iterator true --valid_data_path_and_name_and_type dump/raw/dev_v3/text.prev,text_prev,text --valid_shape_file exp/s2t_stats_raw_bpe50000/valid/text_prev_shape.bpe --valid_data_path_and_name_and_type dump/raw/dev_v3/text.ctc,text_ctc,text --valid_shape_file exp/s2t_stats_raw_bpe50000/valid/text_ctc_shape.bpe --valid_data_path_and_name_and_type dump/raw/dev_v3/text,text,text --valid_shape_file exp/s2t_stats_raw_bpe50000/valid/text_shape.bpe --ngpu 4 --multiprocessing_distributed true --dist_launcher slurm --dist_init_method file:///scratch/bbjs/peng6/espnet-owsm-ctc-2/egs2/owsm_v3.1_ctc/s2t1/exp/s2t_train_s2t_multitask-ctc_ebf27_conv2d8_size1024_raw_bpe50000/.dist_init_6c365915-ffe7-49c8-afc4-247fd8766192 +/scratch/bbjs/peng6/espnet-owsm-ctc-2/tools/miniconda/envs/espnet/bin/python3 /scratch/bbjs/peng6/espnet-owsm-ctc-2/espnet2/bin/s2t_train.py --use_preprocessor true --bpemodel data/token_list/bpe_unigram50000/bpe.model --token_type bpe --token_list data/token_list/bpe_unigram50000/tokens.txt --non_linguistic_symbols none --cleaner none --g2p none --valid_data_path_and_name_and_type dump/raw/dev_v3/wav.scp,speech,kaldi_ark --valid_shape_file exp/s2t_stats_raw_bpe50000/valid/speech_shape --resume true --fold_length 80000 --output_dir exp/s2t_train_s2t_multitask-ctc_ebf27_conv2d8_size1024_raw_bpe50000 --config conf/train_s2t_multitask-ctc_ebf27_conv2d8_size1024.yaml --frontend_conf fs=16k --normalize=global_mvn --normalize_conf stats_file=exp/s2t_stats_raw_bpe50000/train/feats_stats.npz --train_data_path_and_name_and_type exp/s2t_stats_raw_bpe50000/splits12/wav.scp,speech,kaldi_ark --train_shape_file exp/s2t_stats_raw_bpe50000/splits12/speech_shape --fold_length 150 --train_data_path_and_name_and_type exp/s2t_stats_raw_bpe50000/splits12/text.prev,text_prev,text --train_shape_file exp/s2t_stats_raw_bpe50000/splits12/text_prev_shape.bpe --fold_length 150 --train_data_path_and_name_and_type exp/s2t_stats_raw_bpe50000/splits12/text.ctc,text_ctc,text --train_shape_file exp/s2t_stats_raw_bpe50000/splits12/text_ctc_shape.bpe --fold_length 150 --train_data_path_and_name_and_type exp/s2t_stats_raw_bpe50000/splits12/text,text,text --train_shape_file exp/s2t_stats_raw_bpe50000/splits12/text_shape.bpe --multiple_iterator true --valid_data_path_and_name_and_type dump/raw/dev_v3/text.prev,text_prev,text --valid_shape_file exp/s2t_stats_raw_bpe50000/valid/text_prev_shape.bpe --valid_data_path_and_name_and_type dump/raw/dev_v3/text.ctc,text_ctc,text --valid_shape_file exp/s2t_stats_raw_bpe50000/valid/text_ctc_shape.bpe --valid_data_path_and_name_and_type dump/raw/dev_v3/text,text,text --valid_shape_file exp/s2t_stats_raw_bpe50000/valid/text_shape.bpe --ngpu 4 --multiprocessing_distributed true --dist_launcher slurm --dist_init_method file:///scratch/bbjs/peng6/espnet-owsm-ctc-2/egs2/owsm_v3.1_ctc/s2t1/exp/s2t_train_s2t_multitask-ctc_ebf27_conv2d8_size1024_raw_bpe50000/.dist_init_6c365915-ffe7-49c8-afc4-247fd8766192 +/scratch/bbjs/peng6/espnet-owsm-ctc-2/tools/miniconda/envs/espnet/bin/python3 /scratch/bbjs/peng6/espnet-owsm-ctc-2/espnet2/bin/s2t_train.py --use_preprocessor true --bpemodel data/token_list/bpe_unigram50000/bpe.model --token_type bpe --token_list data/token_list/bpe_unigram50000/tokens.txt --non_linguistic_symbols none --cleaner none --g2p none --valid_data_path_and_name_and_type dump/raw/dev_v3/wav.scp,speech,kaldi_ark --valid_shape_file exp/s2t_stats_raw_bpe50000/valid/speech_shape --resume true --fold_length 80000 --output_dir exp/s2t_train_s2t_multitask-ctc_ebf27_conv2d8_size1024_raw_bpe50000 --config conf/train_s2t_multitask-ctc_ebf27_conv2d8_size1024.yaml --frontend_conf fs=16k --normalize=global_mvn --normalize_conf stats_file=exp/s2t_stats_raw_bpe50000/train/feats_stats.npz --train_data_path_and_name_and_type exp/s2t_stats_raw_bpe50000/splits12/wav.scp,speech,kaldi_ark --train_shape_file exp/s2t_stats_raw_bpe50000/splits12/speech_shape --fold_length 150 --train_data_path_and_name_and_type exp/s2t_stats_raw_bpe50000/splits12/text.prev,text_prev,text --train_shape_file exp/s2t_stats_raw_bpe50000/splits12/text_prev_shape.bpe --fold_length 150 --train_data_path_and_name_and_type exp/s2t_stats_raw_bpe50000/splits12/text.ctc,text_ctc,text --train_shape_file exp/s2t_stats_raw_bpe50000/splits12/text_ctc_shape.bpe --fold_length 150 --train_data_path_and_name_and_type exp/s2t_stats_raw_bpe50000/splits12/text,text,text --train_shape_file exp/s2t_stats_raw_bpe50000/splits12/text_shape.bpe --multiple_iterator true --valid_data_path_and_name_and_type dump/raw/dev_v3/text.prev,text_prev,text --valid_shape_file exp/s2t_stats_raw_bpe50000/valid/text_prev_shape.bpe --valid_data_path_and_name_and_type dump/raw/dev_v3/text.ctc,text_ctc,text --valid_shape_file exp/s2t_stats_raw_bpe50000/valid/text_ctc_shape.bpe --valid_data_path_and_name_and_type dump/raw/dev_v3/text,text,text --valid_shape_file exp/s2t_stats_raw_bpe50000/valid/text_shape.bpe --ngpu 4 --multiprocessing_distributed true --dist_launcher slurm --dist_init_method file:///scratch/bbjs/peng6/espnet-owsm-ctc-2/egs2/owsm_v3.1_ctc/s2t1/exp/s2t_train_s2t_multitask-ctc_ebf27_conv2d8_size1024_raw_bpe50000/.dist_init_6c365915-ffe7-49c8-afc4-247fd8766192 +/scratch/bbjs/peng6/espnet-owsm-ctc-2/tools/miniconda/envs/espnet/bin/python3 /scratch/bbjs/peng6/espnet-owsm-ctc-2/espnet2/bin/s2t_train.py --use_preprocessor true --bpemodel data/token_list/bpe_unigram50000/bpe.model --token_type bpe --token_list data/token_list/bpe_unigram50000/tokens.txt --non_linguistic_symbols none --cleaner none --g2p none --valid_data_path_and_name_and_type dump/raw/dev_v3/wav.scp,speech,kaldi_ark --valid_shape_file exp/s2t_stats_raw_bpe50000/valid/speech_shape --resume true --fold_length 80000 --output_dir exp/s2t_train_s2t_multitask-ctc_ebf27_conv2d8_size1024_raw_bpe50000 --config conf/train_s2t_multitask-ctc_ebf27_conv2d8_size1024.yaml --frontend_conf fs=16k --normalize=global_mvn --normalize_conf stats_file=exp/s2t_stats_raw_bpe50000/train/feats_stats.npz --train_data_path_and_name_and_type exp/s2t_stats_raw_bpe50000/splits12/wav.scp,speech,kaldi_ark --train_shape_file exp/s2t_stats_raw_bpe50000/splits12/speech_shape --fold_length 150 --train_data_path_and_name_and_type exp/s2t_stats_raw_bpe50000/splits12/text.prev,text_prev,text --train_shape_file exp/s2t_stats_raw_bpe50000/splits12/text_prev_shape.bpe --fold_length 150 --train_data_path_and_name_and_type exp/s2t_stats_raw_bpe50000/splits12/text.ctc,text_ctc,text --train_shape_file exp/s2t_stats_raw_bpe50000/splits12/text_ctc_shape.bpe --fold_length 150 --train_data_path_and_name_and_type exp/s2t_stats_raw_bpe50000/splits12/text,text,text --train_shape_file exp/s2t_stats_raw_bpe50000/splits12/text_shape.bpe --multiple_iterator true --valid_data_path_and_name_and_type dump/raw/dev_v3/text.prev,text_prev,text --valid_shape_file exp/s2t_stats_raw_bpe50000/valid/text_prev_shape.bpe --valid_data_path_and_name_and_type dump/raw/dev_v3/text.ctc,text_ctc,text --valid_shape_file exp/s2t_stats_raw_bpe50000/valid/text_ctc_shape.bpe --valid_data_path_and_name_and_type dump/raw/dev_v3/text,text,text --valid_shape_file exp/s2t_stats_raw_bpe50000/valid/text_shape.bpe --ngpu 4 --multiprocessing_distributed true --dist_launcher slurm --dist_init_method file:///scratch/bbjs/peng6/espnet-owsm-ctc-2/egs2/owsm_v3.1_ctc/s2t1/exp/s2t_train_s2t_multitask-ctc_ebf27_conv2d8_size1024_raw_bpe50000/.dist_init_6c365915-ffe7-49c8-afc4-247fd8766192 +/scratch/bbjs/peng6/espnet-owsm-ctc-2/tools/miniconda/envs/espnet/bin/python3 /scratch/bbjs/peng6/espnet-owsm-ctc-2/espnet2/bin/s2t_train.py --use_preprocessor true --bpemodel data/token_list/bpe_unigram50000/bpe.model --token_type bpe --token_list data/token_list/bpe_unigram50000/tokens.txt --non_linguistic_symbols none --cleaner none --g2p none --valid_data_path_and_name_and_type dump/raw/dev_v3/wav.scp,speech,kaldi_ark --valid_shape_file exp/s2t_stats_raw_bpe50000/valid/speech_shape --resume true --fold_length 80000 --output_dir exp/s2t_train_s2t_multitask-ctc_ebf27_conv2d8_size1024_raw_bpe50000 --config conf/train_s2t_multitask-ctc_ebf27_conv2d8_size1024.yaml --frontend_conf fs=16k --normalize=global_mvn --normalize_conf stats_file=exp/s2t_stats_raw_bpe50000/train/feats_stats.npz --train_data_path_and_name_and_type exp/s2t_stats_raw_bpe50000/splits12/wav.scp,speech,kaldi_ark --train_shape_file exp/s2t_stats_raw_bpe50000/splits12/speech_shape --fold_length 150 --train_data_path_and_name_and_type exp/s2t_stats_raw_bpe50000/splits12/text.prev,text_prev,text --train_shape_file exp/s2t_stats_raw_bpe50000/splits12/text_prev_shape.bpe --fold_length 150 --train_data_path_and_name_and_type exp/s2t_stats_raw_bpe50000/splits12/text.ctc,text_ctc,text --train_shape_file exp/s2t_stats_raw_bpe50000/splits12/text_ctc_shape.bpe --fold_length 150 --train_data_path_and_name_and_type exp/s2t_stats_raw_bpe50000/splits12/text,text,text --train_shape_file exp/s2t_stats_raw_bpe50000/splits12/text_shape.bpe --multiple_iterator true --valid_data_path_and_name_and_type dump/raw/dev_v3/text.prev,text_prev,text --valid_shape_file exp/s2t_stats_raw_bpe50000/valid/text_prev_shape.bpe --valid_data_path_and_name_and_type dump/raw/dev_v3/text.ctc,text_ctc,text --valid_shape_file exp/s2t_stats_raw_bpe50000/valid/text_ctc_shape.bpe --valid_data_path_and_name_and_type dump/raw/dev_v3/text,text,text --valid_shape_file exp/s2t_stats_raw_bpe50000/valid/text_shape.bpe --ngpu 4 --multiprocessing_distributed true --dist_launcher slurm --dist_init_method file:///scratch/bbjs/peng6/espnet-owsm-ctc-2/egs2/owsm_v3.1_ctc/s2t1/exp/s2t_train_s2t_multitask-ctc_ebf27_conv2d8_size1024_raw_bpe50000/.dist_init_6c365915-ffe7-49c8-afc4-247fd8766192 +/scratch/bbjs/peng6/espnet-owsm-ctc-2/tools/miniconda/envs/espnet/bin/python3 /scratch/bbjs/peng6/espnet-owsm-ctc-2/espnet2/bin/s2t_train.py --use_preprocessor true --bpemodel data/token_list/bpe_unigram50000/bpe.model --token_type bpe --token_list data/token_list/bpe_unigram50000/tokens.txt --non_linguistic_symbols none --cleaner none --g2p none --valid_data_path_and_name_and_type dump/raw/dev_v3/wav.scp,speech,kaldi_ark --valid_shape_file exp/s2t_stats_raw_bpe50000/valid/speech_shape --resume true --fold_length 80000 --output_dir exp/s2t_train_s2t_multitask-ctc_ebf27_conv2d8_size1024_raw_bpe50000 --config conf/train_s2t_multitask-ctc_ebf27_conv2d8_size1024.yaml --frontend_conf fs=16k --normalize=global_mvn --normalize_conf stats_file=exp/s2t_stats_raw_bpe50000/train/feats_stats.npz --train_data_path_and_name_and_type exp/s2t_stats_raw_bpe50000/splits12/wav.scp,speech,kaldi_ark --train_shape_file exp/s2t_stats_raw_bpe50000/splits12/speech_shape --fold_length 150 --train_data_path_and_name_and_type exp/s2t_stats_raw_bpe50000/splits12/text.prev,text_prev,text --train_shape_file exp/s2t_stats_raw_bpe50000/splits12/text_prev_shape.bpe --fold_length 150 --train_data_path_and_name_and_type exp/s2t_stats_raw_bpe50000/splits12/text.ctc,text_ctc,text --train_shape_file exp/s2t_stats_raw_bpe50000/splits12/text_ctc_shape.bpe --fold_length 150 --train_data_path_and_name_and_type exp/s2t_stats_raw_bpe50000/splits12/text,text,text --train_shape_file exp/s2t_stats_raw_bpe50000/splits12/text_shape.bpe --multiple_iterator true --valid_data_path_and_name_and_type dump/raw/dev_v3/text.prev,text_prev,text --valid_shape_file exp/s2t_stats_raw_bpe50000/valid/text_prev_shape.bpe --valid_data_path_and_name_and_type dump/raw/dev_v3/text.ctc,text_ctc,text --valid_shape_file exp/s2t_stats_raw_bpe50000/valid/text_ctc_shape.bpe --valid_data_path_and_name_and_type dump/raw/dev_v3/text,text,text --valid_shape_file exp/s2t_stats_raw_bpe50000/valid/text_shape.bpe --ngpu 4 --multiprocessing_distributed true --dist_launcher slurm --dist_init_method file:///scratch/bbjs/peng6/espnet-owsm-ctc-2/egs2/owsm_v3.1_ctc/s2t1/exp/s2t_train_s2t_multitask-ctc_ebf27_conv2d8_size1024_raw_bpe50000/.dist_init_6c365915-ffe7-49c8-afc4-247fd8766192 +/scratch/bbjs/peng6/espnet-owsm-ctc-2/tools/miniconda/envs/espnet/bin/python3 /scratch/bbjs/peng6/espnet-owsm-ctc-2/espnet2/bin/s2t_train.py --use_preprocessor true --bpemodel data/token_list/bpe_unigram50000/bpe.model --token_type bpe --token_list data/token_list/bpe_unigram50000/tokens.txt --non_linguistic_symbols none --cleaner none --g2p none --valid_data_path_and_name_and_type dump/raw/dev_v3/wav.scp,speech,kaldi_ark --valid_shape_file exp/s2t_stats_raw_bpe50000/valid/speech_shape --resume true --fold_length 80000 --output_dir exp/s2t_train_s2t_multitask-ctc_ebf27_conv2d8_size1024_raw_bpe50000 --config conf/train_s2t_multitask-ctc_ebf27_conv2d8_size1024.yaml --frontend_conf fs=16k --normalize=global_mvn --normalize_conf stats_file=exp/s2t_stats_raw_bpe50000/train/feats_stats.npz --train_data_path_and_name_and_type exp/s2t_stats_raw_bpe50000/splits12/wav.scp,speech,kaldi_ark --train_shape_file exp/s2t_stats_raw_bpe50000/splits12/speech_shape --fold_length 150 --train_data_path_and_name_and_type exp/s2t_stats_raw_bpe50000/splits12/text.prev,text_prev,text --train_shape_file exp/s2t_stats_raw_bpe50000/splits12/text_prev_shape.bpe --fold_length 150 --train_data_path_and_name_and_type exp/s2t_stats_raw_bpe50000/splits12/text.ctc,text_ctc,text --train_shape_file exp/s2t_stats_raw_bpe50000/splits12/text_ctc_shape.bpe --fold_length 150 --train_data_path_and_name_and_type exp/s2t_stats_raw_bpe50000/splits12/text,text,text --train_shape_file exp/s2t_stats_raw_bpe50000/splits12/text_shape.bpe --multiple_iterator true --valid_data_path_and_name_and_type dump/raw/dev_v3/text.prev,text_prev,text --valid_shape_file exp/s2t_stats_raw_bpe50000/valid/text_prev_shape.bpe --valid_data_path_and_name_and_type dump/raw/dev_v3/text.ctc,text_ctc,text --valid_shape_file exp/s2t_stats_raw_bpe50000/valid/text_ctc_shape.bpe --valid_data_path_and_name_and_type dump/raw/dev_v3/text,text,text --valid_shape_file exp/s2t_stats_raw_bpe50000/valid/text_shape.bpe --ngpu 4 --multiprocessing_distributed true --dist_launcher slurm --dist_init_method file:///scratch/bbjs/peng6/espnet-owsm-ctc-2/egs2/owsm_v3.1_ctc/s2t1/exp/s2t_train_s2t_multitask-ctc_ebf27_conv2d8_size1024_raw_bpe50000/.dist_init_6c365915-ffe7-49c8-afc4-247fd8766192 +/scratch/bbjs/peng6/espnet-owsm-ctc-2/tools/miniconda/envs/espnet/bin/python3 /scratch/bbjs/peng6/espnet-owsm-ctc-2/espnet2/bin/s2t_train.py --use_preprocessor true --bpemodel data/token_list/bpe_unigram50000/bpe.model --token_type bpe --token_list data/token_list/bpe_unigram50000/tokens.txt --non_linguistic_symbols none --cleaner none --g2p none --valid_data_path_and_name_and_type dump/raw/dev_v3/wav.scp,speech,kaldi_ark --valid_shape_file exp/s2t_stats_raw_bpe50000/valid/speech_shape --resume true --fold_length 80000 --output_dir exp/s2t_train_s2t_multitask-ctc_ebf27_conv2d8_size1024_raw_bpe50000 --config conf/train_s2t_multitask-ctc_ebf27_conv2d8_size1024.yaml --frontend_conf fs=16k --normalize=global_mvn --normalize_conf stats_file=exp/s2t_stats_raw_bpe50000/train/feats_stats.npz --train_data_path_and_name_and_type exp/s2t_stats_raw_bpe50000/splits12/wav.scp,speech,kaldi_ark --train_shape_file exp/s2t_stats_raw_bpe50000/splits12/speech_shape --fold_length 150 --train_data_path_and_name_and_type exp/s2t_stats_raw_bpe50000/splits12/text.prev,text_prev,text --train_shape_file exp/s2t_stats_raw_bpe50000/splits12/text_prev_shape.bpe --fold_length 150 --train_data_path_and_name_and_type exp/s2t_stats_raw_bpe50000/splits12/text.ctc,text_ctc,text --train_shape_file exp/s2t_stats_raw_bpe50000/splits12/text_ctc_shape.bpe --fold_length 150 --train_data_path_and_name_and_type exp/s2t_stats_raw_bpe50000/splits12/text,text,text --train_shape_file exp/s2t_stats_raw_bpe50000/splits12/text_shape.bpe --multiple_iterator true --valid_data_path_and_name_and_type dump/raw/dev_v3/text.prev,text_prev,text --valid_shape_file exp/s2t_stats_raw_bpe50000/valid/text_prev_shape.bpe --valid_data_path_and_name_and_type dump/raw/dev_v3/text.ctc,text_ctc,text --valid_shape_file exp/s2t_stats_raw_bpe50000/valid/text_ctc_shape.bpe --valid_data_path_and_name_and_type dump/raw/dev_v3/text,text,text --valid_shape_file exp/s2t_stats_raw_bpe50000/valid/text_shape.bpe --ngpu 4 --multiprocessing_distributed true --dist_launcher slurm --dist_init_method file:///scratch/bbjs/peng6/espnet-owsm-ctc-2/egs2/owsm_v3.1_ctc/s2t1/exp/s2t_train_s2t_multitask-ctc_ebf27_conv2d8_size1024_raw_bpe50000/.dist_init_6c365915-ffe7-49c8-afc4-247fd8766192 +/scratch/bbjs/peng6/espnet-owsm-ctc-2/tools/miniconda/envs/espnet/bin/python3 /scratch/bbjs/peng6/espnet-owsm-ctc-2/espnet2/bin/s2t_train.py --use_preprocessor true --bpemodel data/token_list/bpe_unigram50000/bpe.model --token_type bpe --token_list data/token_list/bpe_unigram50000/tokens.txt --non_linguistic_symbols none --cleaner none --g2p none --valid_data_path_and_name_and_type dump/raw/dev_v3/wav.scp,speech,kaldi_ark --valid_shape_file exp/s2t_stats_raw_bpe50000/valid/speech_shape --resume true --fold_length 80000 --output_dir exp/s2t_train_s2t_multitask-ctc_ebf27_conv2d8_size1024_raw_bpe50000 --config conf/train_s2t_multitask-ctc_ebf27_conv2d8_size1024.yaml --frontend_conf fs=16k --normalize=global_mvn --normalize_conf stats_file=exp/s2t_stats_raw_bpe50000/train/feats_stats.npz --train_data_path_and_name_and_type exp/s2t_stats_raw_bpe50000/splits12/wav.scp,speech,kaldi_ark --train_shape_file exp/s2t_stats_raw_bpe50000/splits12/speech_shape --fold_length 150 --train_data_path_and_name_and_type exp/s2t_stats_raw_bpe50000/splits12/text.prev,text_prev,text --train_shape_file exp/s2t_stats_raw_bpe50000/splits12/text_prev_shape.bpe --fold_length 150 --train_data_path_and_name_and_type exp/s2t_stats_raw_bpe50000/splits12/text.ctc,text_ctc,text --train_shape_file exp/s2t_stats_raw_bpe50000/splits12/text_ctc_shape.bpe --fold_length 150 --train_data_path_and_name_and_type exp/s2t_stats_raw_bpe50000/splits12/text,text,text --train_shape_file exp/s2t_stats_raw_bpe50000/splits12/text_shape.bpe --multiple_iterator true --valid_data_path_and_name_and_type dump/raw/dev_v3/text.prev,text_prev,text --valid_shape_file exp/s2t_stats_raw_bpe50000/valid/text_prev_shape.bpe --valid_data_path_and_name_and_type dump/raw/dev_v3/text.ctc,text_ctc,text --valid_shape_file exp/s2t_stats_raw_bpe50000/valid/text_ctc_shape.bpe --valid_data_path_and_name_and_type dump/raw/dev_v3/text,text,text --valid_shape_file exp/s2t_stats_raw_bpe50000/valid/text_shape.bpe --ngpu 4 --multiprocessing_distributed true --dist_launcher slurm --dist_init_method file:///scratch/bbjs/peng6/espnet-owsm-ctc-2/egs2/owsm_v3.1_ctc/s2t1/exp/s2t_train_s2t_multitask-ctc_ebf27_conv2d8_size1024_raw_bpe50000/.dist_init_6c365915-ffe7-49c8-afc4-247fd8766192 +/scratch/bbjs/peng6/espnet-owsm-ctc-2/tools/miniconda/envs/espnet/bin/python3 /scratch/bbjs/peng6/espnet-owsm-ctc-2/espnet2/bin/s2t_train.py --use_preprocessor true --bpemodel data/token_list/bpe_unigram50000/bpe.model --token_type bpe --token_list data/token_list/bpe_unigram50000/tokens.txt --non_linguistic_symbols none --cleaner none --g2p none --valid_data_path_and_name_and_type dump/raw/dev_v3/wav.scp,speech,kaldi_ark --valid_shape_file exp/s2t_stats_raw_bpe50000/valid/speech_shape --resume true --fold_length 80000 --output_dir exp/s2t_train_s2t_multitask-ctc_ebf27_conv2d8_size1024_raw_bpe50000 --config conf/train_s2t_multitask-ctc_ebf27_conv2d8_size1024.yaml --frontend_conf fs=16k --normalize=global_mvn --normalize_conf stats_file=exp/s2t_stats_raw_bpe50000/train/feats_stats.npz --train_data_path_and_name_and_type exp/s2t_stats_raw_bpe50000/splits12/wav.scp,speech,kaldi_ark --train_shape_file exp/s2t_stats_raw_bpe50000/splits12/speech_shape --fold_length 150 --train_data_path_and_name_and_type exp/s2t_stats_raw_bpe50000/splits12/text.prev,text_prev,text --train_shape_file exp/s2t_stats_raw_bpe50000/splits12/text_prev_shape.bpe --fold_length 150 --train_data_path_and_name_and_type exp/s2t_stats_raw_bpe50000/splits12/text.ctc,text_ctc,text --train_shape_file exp/s2t_stats_raw_bpe50000/splits12/text_ctc_shape.bpe --fold_length 150 --train_data_path_and_name_and_type exp/s2t_stats_raw_bpe50000/splits12/text,text,text --train_shape_file exp/s2t_stats_raw_bpe50000/splits12/text_shape.bpe --multiple_iterator true --valid_data_path_and_name_and_type dump/raw/dev_v3/text.prev,text_prev,text --valid_shape_file exp/s2t_stats_raw_bpe50000/valid/text_prev_shape.bpe --valid_data_path_and_name_and_type dump/raw/dev_v3/text.ctc,text_ctc,text --valid_shape_file exp/s2t_stats_raw_bpe50000/valid/text_ctc_shape.bpe --valid_data_path_and_name_and_type dump/raw/dev_v3/text,text,text --valid_shape_file exp/s2t_stats_raw_bpe50000/valid/text_shape.bpe --ngpu 4 --multiprocessing_distributed true --dist_launcher slurm --dist_init_method file:///scratch/bbjs/peng6/espnet-owsm-ctc-2/egs2/owsm_v3.1_ctc/s2t1/exp/s2t_train_s2t_multitask-ctc_ebf27_conv2d8_size1024_raw_bpe50000/.dist_init_6c365915-ffe7-49c8-afc4-247fd8766192 +/scratch/bbjs/peng6/espnet-owsm-ctc-2/tools/miniconda/envs/espnet/bin/python3 /scratch/bbjs/peng6/espnet-owsm-ctc-2/espnet2/bin/s2t_train.py --use_preprocessor true --bpemodel data/token_list/bpe_unigram50000/bpe.model --token_type bpe --token_list data/token_list/bpe_unigram50000/tokens.txt --non_linguistic_symbols none --cleaner none --g2p none --valid_data_path_and_name_and_type dump/raw/dev_v3/wav.scp,speech,kaldi_ark --valid_shape_file exp/s2t_stats_raw_bpe50000/valid/speech_shape --resume true --fold_length 80000 --output_dir exp/s2t_train_s2t_multitask-ctc_ebf27_conv2d8_size1024_raw_bpe50000 --config conf/train_s2t_multitask-ctc_ebf27_conv2d8_size1024.yaml --frontend_conf fs=16k --normalize=global_mvn --normalize_conf stats_file=exp/s2t_stats_raw_bpe50000/train/feats_stats.npz --train_data_path_and_name_and_type exp/s2t_stats_raw_bpe50000/splits12/wav.scp,speech,kaldi_ark --train_shape_file exp/s2t_stats_raw_bpe50000/splits12/speech_shape --fold_length 150 --train_data_path_and_name_and_type exp/s2t_stats_raw_bpe50000/splits12/text.prev,text_prev,text --train_shape_file exp/s2t_stats_raw_bpe50000/splits12/text_prev_shape.bpe --fold_length 150 --train_data_path_and_name_and_type exp/s2t_stats_raw_bpe50000/splits12/text.ctc,text_ctc,text --train_shape_file exp/s2t_stats_raw_bpe50000/splits12/text_ctc_shape.bpe --fold_length 150 --train_data_path_and_name_and_type exp/s2t_stats_raw_bpe50000/splits12/text,text,text --train_shape_file exp/s2t_stats_raw_bpe50000/splits12/text_shape.bpe --multiple_iterator true --valid_data_path_and_name_and_type dump/raw/dev_v3/text.prev,text_prev,text --valid_shape_file exp/s2t_stats_raw_bpe50000/valid/text_prev_shape.bpe --valid_data_path_and_name_and_type dump/raw/dev_v3/text.ctc,text_ctc,text --valid_shape_file exp/s2t_stats_raw_bpe50000/valid/text_ctc_shape.bpe --valid_data_path_and_name_and_type dump/raw/dev_v3/text,text,text --valid_shape_file exp/s2t_stats_raw_bpe50000/valid/text_shape.bpe --ngpu 4 --multiprocessing_distributed true --dist_launcher slurm --dist_init_method file:///scratch/bbjs/peng6/espnet-owsm-ctc-2/egs2/owsm_v3.1_ctc/s2t1/exp/s2t_train_s2t_multitask-ctc_ebf27_conv2d8_size1024_raw_bpe50000/.dist_init_6c365915-ffe7-49c8-afc4-247fd8766192 +/scratch/bbjs/peng6/espnet-owsm-ctc-2/tools/miniconda/envs/espnet/bin/python3 /scratch/bbjs/peng6/espnet-owsm-ctc-2/espnet2/bin/s2t_train.py --use_preprocessor true --bpemodel data/token_list/bpe_unigram50000/bpe.model --token_type bpe --token_list data/token_list/bpe_unigram50000/tokens.txt --non_linguistic_symbols none --cleaner none --g2p none --valid_data_path_and_name_and_type dump/raw/dev_v3/wav.scp,speech,kaldi_ark --valid_shape_file exp/s2t_stats_raw_bpe50000/valid/speech_shape --resume true --fold_length 80000 --output_dir exp/s2t_train_s2t_multitask-ctc_ebf27_conv2d8_size1024_raw_bpe50000 --config conf/train_s2t_multitask-ctc_ebf27_conv2d8_size1024.yaml --frontend_conf fs=16k --normalize=global_mvn --normalize_conf stats_file=exp/s2t_stats_raw_bpe50000/train/feats_stats.npz --train_data_path_and_name_and_type exp/s2t_stats_raw_bpe50000/splits12/wav.scp,speech,kaldi_ark --train_shape_file exp/s2t_stats_raw_bpe50000/splits12/speech_shape --fold_length 150 --train_data_path_and_name_and_type exp/s2t_stats_raw_bpe50000/splits12/text.prev,text_prev,text --train_shape_file exp/s2t_stats_raw_bpe50000/splits12/text_prev_shape.bpe --fold_length 150 --train_data_path_and_name_and_type exp/s2t_stats_raw_bpe50000/splits12/text.ctc,text_ctc,text --train_shape_file exp/s2t_stats_raw_bpe50000/splits12/text_ctc_shape.bpe --fold_length 150 --train_data_path_and_name_and_type exp/s2t_stats_raw_bpe50000/splits12/text,text,text --train_shape_file exp/s2t_stats_raw_bpe50000/splits12/text_shape.bpe --multiple_iterator true --valid_data_path_and_name_and_type dump/raw/dev_v3/text.prev,text_prev,text --valid_shape_file exp/s2t_stats_raw_bpe50000/valid/text_prev_shape.bpe --valid_data_path_and_name_and_type dump/raw/dev_v3/text.ctc,text_ctc,text --valid_shape_file exp/s2t_stats_raw_bpe50000/valid/text_ctc_shape.bpe --valid_data_path_and_name_and_type dump/raw/dev_v3/text,text,text --valid_shape_file exp/s2t_stats_raw_bpe50000/valid/text_shape.bpe --ngpu 4 --multiprocessing_distributed true --dist_launcher slurm --dist_init_method file:///scratch/bbjs/peng6/espnet-owsm-ctc-2/egs2/owsm_v3.1_ctc/s2t1/exp/s2t_train_s2t_multitask-ctc_ebf27_conv2d8_size1024_raw_bpe50000/.dist_init_6c365915-ffe7-49c8-afc4-247fd8766192 +/scratch/bbjs/peng6/espnet-owsm-ctc-2/tools/miniconda/envs/espnet/bin/python3 /scratch/bbjs/peng6/espnet-owsm-ctc-2/espnet2/bin/s2t_train.py --use_preprocessor true --bpemodel data/token_list/bpe_unigram50000/bpe.model --token_type bpe --token_list data/token_list/bpe_unigram50000/tokens.txt --non_linguistic_symbols none --cleaner none --g2p none --valid_data_path_and_name_and_type dump/raw/dev_v3/wav.scp,speech,kaldi_ark --valid_shape_file exp/s2t_stats_raw_bpe50000/valid/speech_shape --resume true --fold_length 80000 --output_dir exp/s2t_train_s2t_multitask-ctc_ebf27_conv2d8_size1024_raw_bpe50000 --config conf/train_s2t_multitask-ctc_ebf27_conv2d8_size1024.yaml --frontend_conf fs=16k --normalize=global_mvn --normalize_conf stats_file=exp/s2t_stats_raw_bpe50000/train/feats_stats.npz --train_data_path_and_name_and_type exp/s2t_stats_raw_bpe50000/splits12/wav.scp,speech,kaldi_ark --train_shape_file exp/s2t_stats_raw_bpe50000/splits12/speech_shape --fold_length 150 --train_data_path_and_name_and_type exp/s2t_stats_raw_bpe50000/splits12/text.prev,text_prev,text --train_shape_file exp/s2t_stats_raw_bpe50000/splits12/text_prev_shape.bpe --fold_length 150 --train_data_path_and_name_and_type exp/s2t_stats_raw_bpe50000/splits12/text.ctc,text_ctc,text --train_shape_file exp/s2t_stats_raw_bpe50000/splits12/text_ctc_shape.bpe --fold_length 150 --train_data_path_and_name_and_type exp/s2t_stats_raw_bpe50000/splits12/text,text,text --train_shape_file exp/s2t_stats_raw_bpe50000/splits12/text_shape.bpe --multiple_iterator true --valid_data_path_and_name_and_type dump/raw/dev_v3/text.prev,text_prev,text --valid_shape_file exp/s2t_stats_raw_bpe50000/valid/text_prev_shape.bpe --valid_data_path_and_name_and_type dump/raw/dev_v3/text.ctc,text_ctc,text --valid_shape_file exp/s2t_stats_raw_bpe50000/valid/text_ctc_shape.bpe --valid_data_path_and_name_and_type dump/raw/dev_v3/text,text,text --valid_shape_file exp/s2t_stats_raw_bpe50000/valid/text_shape.bpe --ngpu 4 --multiprocessing_distributed true --dist_launcher slurm --dist_init_method file:///scratch/bbjs/peng6/espnet-owsm-ctc-2/egs2/owsm_v3.1_ctc/s2t1/exp/s2t_train_s2t_multitask-ctc_ebf27_conv2d8_size1024_raw_bpe50000/.dist_init_6c365915-ffe7-49c8-afc4-247fd8766192 +/scratch/bbjs/peng6/espnet-owsm-ctc-2/tools/miniconda/envs/espnet/bin/python3 /scratch/bbjs/peng6/espnet-owsm-ctc-2/espnet2/bin/s2t_train.py --use_preprocessor true --bpemodel data/token_list/bpe_unigram50000/bpe.model --token_type bpe --token_list data/token_list/bpe_unigram50000/tokens.txt --non_linguistic_symbols none --cleaner none --g2p none --valid_data_path_and_name_and_type dump/raw/dev_v3/wav.scp,speech,kaldi_ark --valid_shape_file exp/s2t_stats_raw_bpe50000/valid/speech_shape --resume true --fold_length 80000 --output_dir exp/s2t_train_s2t_multitask-ctc_ebf27_conv2d8_size1024_raw_bpe50000 --config conf/train_s2t_multitask-ctc_ebf27_conv2d8_size1024.yaml --frontend_conf fs=16k --normalize=global_mvn --normalize_conf stats_file=exp/s2t_stats_raw_bpe50000/train/feats_stats.npz --train_data_path_and_name_and_type exp/s2t_stats_raw_bpe50000/splits12/wav.scp,speech,kaldi_ark --train_shape_file exp/s2t_stats_raw_bpe50000/splits12/speech_shape --fold_length 150 --train_data_path_and_name_and_type exp/s2t_stats_raw_bpe50000/splits12/text.prev,text_prev,text --train_shape_file exp/s2t_stats_raw_bpe50000/splits12/text_prev_shape.bpe --fold_length 150 --train_data_path_and_name_and_type exp/s2t_stats_raw_bpe50000/splits12/text.ctc,text_ctc,text --train_shape_file exp/s2t_stats_raw_bpe50000/splits12/text_ctc_shape.bpe --fold_length 150 --train_data_path_and_name_and_type exp/s2t_stats_raw_bpe50000/splits12/text,text,text --train_shape_file exp/s2t_stats_raw_bpe50000/splits12/text_shape.bpe --multiple_iterator true --valid_data_path_and_name_and_type dump/raw/dev_v3/text.prev,text_prev,text --valid_shape_file exp/s2t_stats_raw_bpe50000/valid/text_prev_shape.bpe --valid_data_path_and_name_and_type dump/raw/dev_v3/text.ctc,text_ctc,text --valid_shape_file exp/s2t_stats_raw_bpe50000/valid/text_ctc_shape.bpe --valid_data_path_and_name_and_type dump/raw/dev_v3/text,text,text --valid_shape_file exp/s2t_stats_raw_bpe50000/valid/text_shape.bpe --ngpu 4 --multiprocessing_distributed true --dist_launcher slurm --dist_init_method file:///scratch/bbjs/peng6/espnet-owsm-ctc-2/egs2/owsm_v3.1_ctc/s2t1/exp/s2t_train_s2t_multitask-ctc_ebf27_conv2d8_size1024_raw_bpe50000/.dist_init_6c365915-ffe7-49c8-afc4-247fd8766192 +/scratch/bbjs/peng6/espnet-owsm-ctc-2/tools/miniconda/envs/espnet/bin/python3 /scratch/bbjs/peng6/espnet-owsm-ctc-2/espnet2/bin/s2t_train.py --use_preprocessor true --bpemodel data/token_list/bpe_unigram50000/bpe.model --token_type bpe --token_list data/token_list/bpe_unigram50000/tokens.txt --non_linguistic_symbols none --cleaner none --g2p none --valid_data_path_and_name_and_type dump/raw/dev_v3/wav.scp,speech,kaldi_ark --valid_shape_file exp/s2t_stats_raw_bpe50000/valid/speech_shape --resume true --fold_length 80000 --output_dir exp/s2t_train_s2t_multitask-ctc_ebf27_conv2d8_size1024_raw_bpe50000 --config conf/train_s2t_multitask-ctc_ebf27_conv2d8_size1024.yaml --frontend_conf fs=16k --normalize=global_mvn --normalize_conf stats_file=exp/s2t_stats_raw_bpe50000/train/feats_stats.npz --train_data_path_and_name_and_type exp/s2t_stats_raw_bpe50000/splits12/wav.scp,speech,kaldi_ark --train_shape_file exp/s2t_stats_raw_bpe50000/splits12/speech_shape --fold_length 150 --train_data_path_and_name_and_type exp/s2t_stats_raw_bpe50000/splits12/text.prev,text_prev,text --train_shape_file exp/s2t_stats_raw_bpe50000/splits12/text_prev_shape.bpe --fold_length 150 --train_data_path_and_name_and_type exp/s2t_stats_raw_bpe50000/splits12/text.ctc,text_ctc,text --train_shape_file exp/s2t_stats_raw_bpe50000/splits12/text_ctc_shape.bpe --fold_length 150 --train_data_path_and_name_and_type exp/s2t_stats_raw_bpe50000/splits12/text,text,text --train_shape_file exp/s2t_stats_raw_bpe50000/splits12/text_shape.bpe --multiple_iterator true --valid_data_path_and_name_and_type dump/raw/dev_v3/text.prev,text_prev,text --valid_shape_file exp/s2t_stats_raw_bpe50000/valid/text_prev_shape.bpe --valid_data_path_and_name_and_type dump/raw/dev_v3/text.ctc,text_ctc,text --valid_shape_file exp/s2t_stats_raw_bpe50000/valid/text_ctc_shape.bpe --valid_data_path_and_name_and_type dump/raw/dev_v3/text,text,text --valid_shape_file exp/s2t_stats_raw_bpe50000/valid/text_shape.bpe --ngpu 4 --multiprocessing_distributed true --dist_launcher slurm --dist_init_method file:///scratch/bbjs/peng6/espnet-owsm-ctc-2/egs2/owsm_v3.1_ctc/s2t1/exp/s2t_train_s2t_multitask-ctc_ebf27_conv2d8_size1024_raw_bpe50000/.dist_init_6c365915-ffe7-49c8-afc4-247fd8766192 +/scratch/bbjs/peng6/espnet-owsm-ctc-2/tools/miniconda/envs/espnet/bin/python3 /scratch/bbjs/peng6/espnet-owsm-ctc-2/espnet2/bin/s2t_train.py --use_preprocessor true --bpemodel data/token_list/bpe_unigram50000/bpe.model --token_type bpe --token_list data/token_list/bpe_unigram50000/tokens.txt --non_linguistic_symbols none --cleaner none --g2p none --valid_data_path_and_name_and_type dump/raw/dev_v3/wav.scp,speech,kaldi_ark --valid_shape_file exp/s2t_stats_raw_bpe50000/valid/speech_shape --resume true --fold_length 80000 --output_dir exp/s2t_train_s2t_multitask-ctc_ebf27_conv2d8_size1024_raw_bpe50000 --config conf/train_s2t_multitask-ctc_ebf27_conv2d8_size1024.yaml --frontend_conf fs=16k --normalize=global_mvn --normalize_conf stats_file=exp/s2t_stats_raw_bpe50000/train/feats_stats.npz --train_data_path_and_name_and_type exp/s2t_stats_raw_bpe50000/splits12/wav.scp,speech,kaldi_ark --train_shape_file exp/s2t_stats_raw_bpe50000/splits12/speech_shape --fold_length 150 --train_data_path_and_name_and_type exp/s2t_stats_raw_bpe50000/splits12/text.prev,text_prev,text --train_shape_file exp/s2t_stats_raw_bpe50000/splits12/text_prev_shape.bpe --fold_length 150 --train_data_path_and_name_and_type exp/s2t_stats_raw_bpe50000/splits12/text.ctc,text_ctc,text --train_shape_file exp/s2t_stats_raw_bpe50000/splits12/text_ctc_shape.bpe --fold_length 150 --train_data_path_and_name_and_type exp/s2t_stats_raw_bpe50000/splits12/text,text,text --train_shape_file exp/s2t_stats_raw_bpe50000/splits12/text_shape.bpe --multiple_iterator true --valid_data_path_and_name_and_type dump/raw/dev_v3/text.prev,text_prev,text --valid_shape_file exp/s2t_stats_raw_bpe50000/valid/text_prev_shape.bpe --valid_data_path_and_name_and_type dump/raw/dev_v3/text.ctc,text_ctc,text --valid_shape_file exp/s2t_stats_raw_bpe50000/valid/text_ctc_shape.bpe --valid_data_path_and_name_and_type dump/raw/dev_v3/text,text,text --valid_shape_file exp/s2t_stats_raw_bpe50000/valid/text_shape.bpe --ngpu 4 --multiprocessing_distributed true --dist_launcher slurm --dist_init_method file:///scratch/bbjs/peng6/espnet-owsm-ctc-2/egs2/owsm_v3.1_ctc/s2t1/exp/s2t_train_s2t_multitask-ctc_ebf27_conv2d8_size1024_raw_bpe50000/.dist_init_6c365915-ffe7-49c8-afc4-247fd8766192 +[gpua006:0/64] 2024-02-13 14:08:06,199 (distributed_c10d:319) INFO: Added key: store_based_barrier_key:1 to store for rank: 0 +[gpua006:0/64] 2024-02-13 14:08:10,906 (distributed_c10d:353) INFO: Rank 0: Completed store-based barrier for key:store_based_barrier_key:1 with 64 nodes. +[gpua006:0/64] 2024-02-13 14:08:10,935 (s2t:420) INFO: Vocabulary size: 50002 +[gpua006:0/64] 2024-02-13 14:08:23,602 (abs_task:1270) INFO: pytorch.version=1.13.1, cuda.available=True, cudnn.version=8500, cudnn.benchmark=False, cudnn.deterministic=True +[gpua006:0/64] 2024-02-13 14:08:23,613 (abs_task:1271) INFO: Model structure: +ESPnetS2TCTCModel( + (frontend): DefaultFrontend( + (stft): Stft(n_fft=512, win_length=400, hop_length=160, center=True, normalized=False, onesided=True) + (frontend): Frontend() + (logmel): LogMel(sr=16000, n_fft=512, n_mels=80, fmin=0, fmax=8000.0, htk=False) + ) + (specaug): SpecAug( + (freq_mask): MaskAlongAxis(mask_width_range=[0, 27], num_mask=2, axis=freq) + (time_mask): MaskAlongAxisVariableMaxWidth(mask_width_ratio_range=[0.0, 0.05], num_mask=10, axis=time) + ) + (normalize): GlobalMVN(stats_file=exp/s2t_stats_raw_bpe50000/train/feats_stats.npz, norm_means=True, norm_vars=True) + (encoder): EBranchformerCTCEncoder( + (embed): Conv2dSubsampling8( + (conv): Sequential( + (0): Conv2d(1, 1024, kernel_size=(3, 3), stride=(2, 2)) + (1): ReLU() + (2): Conv2d(1024, 1024, kernel_size=(3, 3), stride=(2, 2)) + (3): ReLU() + (4): Conv2d(1024, 1024, kernel_size=(3, 3), stride=(2, 2)) + (5): ReLU() + ) + (out): Linear(in_features=9216, out_features=1024, bias=True) + (pos_enc): PositionalEncoding( + (dropout): Dropout(p=0.1, inplace=False) + ) + ) + (encoders): MultiSequential( + (0): EBranchformerEncoderLayer( + (attn): MultiHeadedAttention( + (linear_q): Linear(in_features=1024, out_features=1024, bias=True) + (linear_k): Linear(in_features=1024, out_features=1024, bias=True) + (linear_v): Linear(in_features=1024, out_features=1024, bias=True) + (linear_out): Linear(in_features=1024, out_features=1024, bias=True) + (dropout): Identity() + ) + (cgmlp): ConvolutionalGatingMLP( + (channel_proj1): Sequential( + (0): Linear(in_features=1024, out_features=4096, bias=True) + (1): GELU(approximate='none') + ) + (csgu): ConvolutionalSpatialGatingUnit( + (norm): LayerNorm((2048,), eps=1e-12, elementwise_affine=True) + (conv): Conv1d(2048, 2048, kernel_size=(31,), stride=(1,), padding=(15,), groups=2048) + (act): Identity() + (dropout): Dropout(p=0.1, inplace=False) + ) + (channel_proj2): Linear(in_features=2048, out_features=1024, bias=True) + ) + (feed_forward): PositionwiseFeedForward( + (w_1): Linear(in_features=1024, out_features=4096, bias=True) + (w_2): Linear(in_features=4096, out_features=1024, bias=True) + (dropout): Dropout(p=0.1, inplace=False) + (activation): Swish() + ) + (feed_forward_macaron): PositionwiseFeedForward( + (w_1): Linear(in_features=1024, out_features=4096, bias=True) + (w_2): Linear(in_features=4096, out_features=1024, bias=True) + (dropout): Dropout(p=0.1, inplace=False) + (activation): Swish() + ) + (norm_ff): LayerNorm((1024,), eps=1e-12, elementwise_affine=True) + (norm_ff_macaron): LayerNorm((1024,), eps=1e-12, elementwise_affine=True) + (norm_mha): LayerNorm((1024,), eps=1e-12, elementwise_affine=True) + (norm_mlp): LayerNorm((1024,), eps=1e-12, elementwise_affine=True) + (norm_final): LayerNorm((1024,), eps=1e-12, elementwise_affine=True) + (dropout): Dropout(p=0.1, inplace=False) + (depthwise_conv_fusion): Conv1d(2048, 2048, kernel_size=(31,), stride=(1,), padding=(15,), groups=2048) + (merge_proj): Linear(in_features=2048, out_features=1024, bias=True) + ) + (1): EBranchformerEncoderLayer( + (attn): MultiHeadedAttention( + (linear_q): Linear(in_features=1024, out_features=1024, bias=True) + (linear_k): Linear(in_features=1024, out_features=1024, bias=True) + (linear_v): Linear(in_features=1024, out_features=1024, bias=True) + (linear_out): Linear(in_features=1024, out_features=1024, bias=True) + (dropout): Identity() + ) + (cgmlp): ConvolutionalGatingMLP( + (channel_proj1): Sequential( + (0): Linear(in_features=1024, out_features=4096, bias=True) + (1): GELU(approximate='none') + ) + (csgu): ConvolutionalSpatialGatingUnit( + (norm): LayerNorm((2048,), eps=1e-12, elementwise_affine=True) + (conv): Conv1d(2048, 2048, kernel_size=(31,), stride=(1,), padding=(15,), groups=2048) + (act): Identity() + (dropout): Dropout(p=0.1, inplace=False) + ) + (channel_proj2): Linear(in_features=2048, out_features=1024, bias=True) + ) + (feed_forward): PositionwiseFeedForward( + (w_1): Linear(in_features=1024, out_features=4096, bias=True) + (w_2): Linear(in_features=4096, out_features=1024, bias=True) + (dropout): Dropout(p=0.1, inplace=False) + (activation): Swish() + ) + (feed_forward_macaron): PositionwiseFeedForward( + (w_1): Linear(in_features=1024, out_features=4096, bias=True) + (w_2): Linear(in_features=4096, out_features=1024, bias=True) + (dropout): Dropout(p=0.1, inplace=False) + (activation): Swish() + ) + (norm_ff): LayerNorm((1024,), eps=1e-12, elementwise_affine=True) + (norm_ff_macaron): LayerNorm((1024,), eps=1e-12, elementwise_affine=True) + (norm_mha): LayerNorm((1024,), eps=1e-12, elementwise_affine=True) + (norm_mlp): LayerNorm((1024,), eps=1e-12, elementwise_affine=True) + (norm_final): LayerNorm((1024,), eps=1e-12, elementwise_affine=True) + (dropout): Dropout(p=0.1, inplace=False) + (depthwise_conv_fusion): Conv1d(2048, 2048, kernel_size=(31,), stride=(1,), padding=(15,), groups=2048) + (merge_proj): Linear(in_features=2048, out_features=1024, bias=True) + ) + (2): EBranchformerEncoderLayer( + (attn): MultiHeadedAttention( + (linear_q): Linear(in_features=1024, out_features=1024, bias=True) + (linear_k): Linear(in_features=1024, out_features=1024, bias=True) + (linear_v): Linear(in_features=1024, out_features=1024, bias=True) + (linear_out): Linear(in_features=1024, out_features=1024, bias=True) + (dropout): Identity() + ) + (cgmlp): ConvolutionalGatingMLP( + (channel_proj1): Sequential( + (0): Linear(in_features=1024, out_features=4096, bias=True) + (1): GELU(approximate='none') + ) + (csgu): ConvolutionalSpatialGatingUnit( + (norm): LayerNorm((2048,), eps=1e-12, elementwise_affine=True) + (conv): Conv1d(2048, 2048, kernel_size=(31,), stride=(1,), padding=(15,), groups=2048) + (act): Identity() + (dropout): Dropout(p=0.1, inplace=False) + ) + (channel_proj2): Linear(in_features=2048, out_features=1024, bias=True) + ) + (feed_forward): PositionwiseFeedForward( + (w_1): Linear(in_features=1024, out_features=4096, bias=True) + (w_2): Linear(in_features=4096, out_features=1024, bias=True) + (dropout): Dropout(p=0.1, inplace=False) + (activation): Swish() + ) + (feed_forward_macaron): PositionwiseFeedForward( + (w_1): Linear(in_features=1024, out_features=4096, bias=True) + (w_2): Linear(in_features=4096, out_features=1024, bias=True) + (dropout): Dropout(p=0.1, inplace=False) + (activation): Swish() + ) + (norm_ff): LayerNorm((1024,), eps=1e-12, elementwise_affine=True) + (norm_ff_macaron): LayerNorm((1024,), eps=1e-12, elementwise_affine=True) + (norm_mha): LayerNorm((1024,), eps=1e-12, elementwise_affine=True) + (norm_mlp): LayerNorm((1024,), eps=1e-12, elementwise_affine=True) + (norm_final): LayerNorm((1024,), eps=1e-12, elementwise_affine=True) + (cross_attn): MultiHeadedAttention( + (linear_q): Linear(in_features=1024, out_features=1024, bias=True) + (linear_k): Linear(in_features=1024, out_features=1024, bias=True) + (linear_v): Linear(in_features=1024, out_features=1024, bias=True) + (linear_out): Linear(in_features=1024, out_features=1024, bias=True) + (dropout): Identity() + ) + (norm_cross_attn): LayerNorm((1024,), eps=1e-12, elementwise_affine=True) + (dropout): Dropout(p=0.1, inplace=False) + (depthwise_conv_fusion): Conv1d(2048, 2048, kernel_size=(31,), stride=(1,), padding=(15,), groups=2048) + (merge_proj): Linear(in_features=2048, out_features=1024, bias=True) + ) + (3): EBranchformerEncoderLayer( + (attn): MultiHeadedAttention( + (linear_q): Linear(in_features=1024, out_features=1024, bias=True) + (linear_k): Linear(in_features=1024, out_features=1024, bias=True) + (linear_v): Linear(in_features=1024, out_features=1024, bias=True) + (linear_out): Linear(in_features=1024, out_features=1024, bias=True) + (dropout): Identity() + ) + (cgmlp): ConvolutionalGatingMLP( + (channel_proj1): Sequential( + (0): Linear(in_features=1024, out_features=4096, bias=True) + (1): GELU(approximate='none') + ) + (csgu): ConvolutionalSpatialGatingUnit( + (norm): LayerNorm((2048,), eps=1e-12, elementwise_affine=True) + (conv): Conv1d(2048, 2048, kernel_size=(31,), stride=(1,), padding=(15,), groups=2048) + (act): Identity() + (dropout): Dropout(p=0.1, inplace=False) + ) + (channel_proj2): Linear(in_features=2048, out_features=1024, bias=True) + ) + (feed_forward): PositionwiseFeedForward( + (w_1): Linear(in_features=1024, out_features=4096, bias=True) + (w_2): Linear(in_features=4096, out_features=1024, bias=True) + (dropout): Dropout(p=0.1, inplace=False) + (activation): Swish() + ) + (feed_forward_macaron): PositionwiseFeedForward( + (w_1): Linear(in_features=1024, out_features=4096, bias=True) + (w_2): Linear(in_features=4096, out_features=1024, bias=True) + (dropout): Dropout(p=0.1, inplace=False) + (activation): Swish() + ) + (norm_ff): LayerNorm((1024,), eps=1e-12, elementwise_affine=True) + (norm_ff_macaron): LayerNorm((1024,), eps=1e-12, elementwise_affine=True) + (norm_mha): LayerNorm((1024,), eps=1e-12, elementwise_affine=True) + (norm_mlp): LayerNorm((1024,), eps=1e-12, elementwise_affine=True) + (norm_final): LayerNorm((1024,), eps=1e-12, elementwise_affine=True) + (dropout): Dropout(p=0.1, inplace=False) + (depthwise_conv_fusion): Conv1d(2048, 2048, kernel_size=(31,), stride=(1,), padding=(15,), groups=2048) + (merge_proj): Linear(in_features=2048, out_features=1024, bias=True) + ) + (4): EBranchformerEncoderLayer( + (attn): MultiHeadedAttention( + (linear_q): Linear(in_features=1024, out_features=1024, bias=True) + (linear_k): Linear(in_features=1024, out_features=1024, bias=True) + (linear_v): Linear(in_features=1024, out_features=1024, bias=True) + (linear_out): Linear(in_features=1024, out_features=1024, bias=True) + (dropout): Identity() + ) + (cgmlp): ConvolutionalGatingMLP( + (channel_proj1): Sequential( + (0): Linear(in_features=1024, out_features=4096, bias=True) + (1): GELU(approximate='none') + ) + (csgu): ConvolutionalSpatialGatingUnit( + (norm): LayerNorm((2048,), eps=1e-12, elementwise_affine=True) + (conv): Conv1d(2048, 2048, kernel_size=(31,), stride=(1,), padding=(15,), groups=2048) + (act): Identity() + (dropout): Dropout(p=0.1, inplace=False) + ) + (channel_proj2): Linear(in_features=2048, out_features=1024, bias=True) + ) + (feed_forward): PositionwiseFeedForward( + (w_1): Linear(in_features=1024, out_features=4096, bias=True) + (w_2): Linear(in_features=4096, out_features=1024, bias=True) + (dropout): Dropout(p=0.1, inplace=False) + (activation): Swish() + ) + (feed_forward_macaron): PositionwiseFeedForward( + (w_1): Linear(in_features=1024, out_features=4096, bias=True) + (w_2): Linear(in_features=4096, out_features=1024, bias=True) + (dropout): Dropout(p=0.1, inplace=False) + (activation): Swish() + ) + (norm_ff): LayerNorm((1024,), eps=1e-12, elementwise_affine=True) + (norm_ff_macaron): LayerNorm((1024,), eps=1e-12, elementwise_affine=True) + (norm_mha): LayerNorm((1024,), eps=1e-12, elementwise_affine=True) + (norm_mlp): LayerNorm((1024,), eps=1e-12, elementwise_affine=True) + (norm_final): LayerNorm((1024,), eps=1e-12, elementwise_affine=True) + (dropout): Dropout(p=0.1, inplace=False) + (depthwise_conv_fusion): Conv1d(2048, 2048, kernel_size=(31,), stride=(1,), padding=(15,), groups=2048) + (merge_proj): Linear(in_features=2048, out_features=1024, bias=True) + ) + (5): EBranchformerEncoderLayer( + (attn): MultiHeadedAttention( + (linear_q): Linear(in_features=1024, out_features=1024, bias=True) + (linear_k): Linear(in_features=1024, out_features=1024, bias=True) + (linear_v): Linear(in_features=1024, out_features=1024, bias=True) + (linear_out): Linear(in_features=1024, out_features=1024, bias=True) + (dropout): Identity() + ) + (cgmlp): ConvolutionalGatingMLP( + (channel_proj1): Sequential( + (0): Linear(in_features=1024, out_features=4096, bias=True) + (1): GELU(approximate='none') + ) + (csgu): ConvolutionalSpatialGatingUnit( + (norm): LayerNorm((2048,), eps=1e-12, elementwise_affine=True) + (conv): Conv1d(2048, 2048, kernel_size=(31,), stride=(1,), padding=(15,), groups=2048) + (act): Identity() + (dropout): Dropout(p=0.1, inplace=False) + ) + (channel_proj2): Linear(in_features=2048, out_features=1024, bias=True) + ) + (feed_forward): PositionwiseFeedForward( + (w_1): Linear(in_features=1024, out_features=4096, bias=True) + (w_2): Linear(in_features=4096, out_features=1024, bias=True) + (dropout): Dropout(p=0.1, inplace=False) + (activation): Swish() + ) + (feed_forward_macaron): PositionwiseFeedForward( + (w_1): Linear(in_features=1024, out_features=4096, bias=True) + (w_2): Linear(in_features=4096, out_features=1024, bias=True) + (dropout): Dropout(p=0.1, inplace=False) + (activation): Swish() + ) + (norm_ff): LayerNorm((1024,), eps=1e-12, elementwise_affine=True) + (norm_ff_macaron): LayerNorm((1024,), eps=1e-12, elementwise_affine=True) + (norm_mha): LayerNorm((1024,), eps=1e-12, elementwise_affine=True) + (norm_mlp): LayerNorm((1024,), eps=1e-12, elementwise_affine=True) + (norm_final): LayerNorm((1024,), eps=1e-12, elementwise_affine=True) + (cross_attn): MultiHeadedAttention( + (linear_q): Linear(in_features=1024, out_features=1024, bias=True) + (linear_k): Linear(in_features=1024, out_features=1024, bias=True) + (linear_v): Linear(in_features=1024, out_features=1024, bias=True) + (linear_out): Linear(in_features=1024, out_features=1024, bias=True) + (dropout): Identity() + ) + (norm_cross_attn): LayerNorm((1024,), eps=1e-12, elementwise_affine=True) + (dropout): Dropout(p=0.1, inplace=False) + (depthwise_conv_fusion): Conv1d(2048, 2048, kernel_size=(31,), stride=(1,), padding=(15,), groups=2048) + (merge_proj): Linear(in_features=2048, out_features=1024, bias=True) + ) + (6): EBranchformerEncoderLayer( + (attn): MultiHeadedAttention( + (linear_q): Linear(in_features=1024, out_features=1024, bias=True) + (linear_k): Linear(in_features=1024, out_features=1024, bias=True) + (linear_v): Linear(in_features=1024, out_features=1024, bias=True) + (linear_out): Linear(in_features=1024, out_features=1024, bias=True) + (dropout): Identity() + ) + (cgmlp): ConvolutionalGatingMLP( + (channel_proj1): Sequential( + (0): Linear(in_features=1024, out_features=4096, bias=True) + (1): GELU(approximate='none') + ) + (csgu): ConvolutionalSpatialGatingUnit( + (norm): LayerNorm((2048,), eps=1e-12, elementwise_affine=True) + (conv): Conv1d(2048, 2048, kernel_size=(31,), stride=(1,), padding=(15,), groups=2048) + (act): Identity() + (dropout): Dropout(p=0.1, inplace=False) + ) + (channel_proj2): Linear(in_features=2048, out_features=1024, bias=True) + ) + (feed_forward): PositionwiseFeedForward( + (w_1): Linear(in_features=1024, out_features=4096, bias=True) + (w_2): Linear(in_features=4096, out_features=1024, bias=True) + (dropout): Dropout(p=0.1, inplace=False) + (activation): Swish() + ) + (feed_forward_macaron): PositionwiseFeedForward( + (w_1): Linear(in_features=1024, out_features=4096, bias=True) + (w_2): Linear(in_features=4096, out_features=1024, bias=True) + (dropout): Dropout(p=0.1, inplace=False) + (activation): Swish() + ) + (norm_ff): LayerNorm((1024,), eps=1e-12, elementwise_affine=True) + (norm_ff_macaron): LayerNorm((1024,), eps=1e-12, elementwise_affine=True) + (norm_mha): LayerNorm((1024,), eps=1e-12, elementwise_affine=True) + (norm_mlp): LayerNorm((1024,), eps=1e-12, elementwise_affine=True) + (norm_final): LayerNorm((1024,), eps=1e-12, elementwise_affine=True) + (dropout): Dropout(p=0.1, inplace=False) + (depthwise_conv_fusion): Conv1d(2048, 2048, kernel_size=(31,), stride=(1,), padding=(15,), groups=2048) + (merge_proj): Linear(in_features=2048, out_features=1024, bias=True) + ) + (7): EBranchformerEncoderLayer( + (attn): MultiHeadedAttention( + (linear_q): Linear(in_features=1024, out_features=1024, bias=True) + (linear_k): Linear(in_features=1024, out_features=1024, bias=True) + (linear_v): Linear(in_features=1024, out_features=1024, bias=True) + (linear_out): Linear(in_features=1024, out_features=1024, bias=True) + (dropout): Identity() + ) + (cgmlp): ConvolutionalGatingMLP( + (channel_proj1): Sequential( + (0): Linear(in_features=1024, out_features=4096, bias=True) + (1): GELU(approximate='none') + ) + (csgu): ConvolutionalSpatialGatingUnit( + (norm): LayerNorm((2048,), eps=1e-12, elementwise_affine=True) + (conv): Conv1d(2048, 2048, kernel_size=(31,), stride=(1,), padding=(15,), groups=2048) + (act): Identity() + (dropout): Dropout(p=0.1, inplace=False) + ) + (channel_proj2): Linear(in_features=2048, out_features=1024, bias=True) + ) + (feed_forward): PositionwiseFeedForward( + (w_1): Linear(in_features=1024, out_features=4096, bias=True) + (w_2): Linear(in_features=4096, out_features=1024, bias=True) + (dropout): Dropout(p=0.1, inplace=False) + (activation): Swish() + ) + (feed_forward_macaron): PositionwiseFeedForward( + (w_1): Linear(in_features=1024, out_features=4096, bias=True) + (w_2): Linear(in_features=4096, out_features=1024, bias=True) + (dropout): Dropout(p=0.1, inplace=False) + (activation): Swish() + ) + (norm_ff): LayerNorm((1024,), eps=1e-12, elementwise_affine=True) + (norm_ff_macaron): LayerNorm((1024,), eps=1e-12, elementwise_affine=True) + (norm_mha): LayerNorm((1024,), eps=1e-12, elementwise_affine=True) + (norm_mlp): LayerNorm((1024,), eps=1e-12, elementwise_affine=True) + (norm_final): LayerNorm((1024,), eps=1e-12, elementwise_affine=True) + (dropout): Dropout(p=0.1, inplace=False) + (depthwise_conv_fusion): Conv1d(2048, 2048, kernel_size=(31,), stride=(1,), padding=(15,), groups=2048) + (merge_proj): Linear(in_features=2048, out_features=1024, bias=True) + ) + (8): EBranchformerEncoderLayer( + (attn): MultiHeadedAttention( + (linear_q): Linear(in_features=1024, out_features=1024, bias=True) + (linear_k): Linear(in_features=1024, out_features=1024, bias=True) + (linear_v): Linear(in_features=1024, out_features=1024, bias=True) + (linear_out): Linear(in_features=1024, out_features=1024, bias=True) + (dropout): Identity() + ) + (cgmlp): ConvolutionalGatingMLP( + (channel_proj1): Sequential( + (0): Linear(in_features=1024, out_features=4096, bias=True) + (1): GELU(approximate='none') + ) + (csgu): ConvolutionalSpatialGatingUnit( + (norm): LayerNorm((2048,), eps=1e-12, elementwise_affine=True) + (conv): Conv1d(2048, 2048, kernel_size=(31,), stride=(1,), padding=(15,), groups=2048) + (act): Identity() + (dropout): Dropout(p=0.1, inplace=False) + ) + (channel_proj2): Linear(in_features=2048, out_features=1024, bias=True) + ) + (feed_forward): PositionwiseFeedForward( + (w_1): Linear(in_features=1024, out_features=4096, bias=True) + (w_2): Linear(in_features=4096, out_features=1024, bias=True) + (dropout): Dropout(p=0.1, inplace=False) + (activation): Swish() + ) + (feed_forward_macaron): PositionwiseFeedForward( + (w_1): Linear(in_features=1024, out_features=4096, bias=True) + (w_2): Linear(in_features=4096, out_features=1024, bias=True) + (dropout): Dropout(p=0.1, inplace=False) + (activation): Swish() + ) + (norm_ff): LayerNorm((1024,), eps=1e-12, elementwise_affine=True) + (norm_ff_macaron): LayerNorm((1024,), eps=1e-12, elementwise_affine=True) + (norm_mha): LayerNorm((1024,), eps=1e-12, elementwise_affine=True) + (norm_mlp): LayerNorm((1024,), eps=1e-12, elementwise_affine=True) + (norm_final): LayerNorm((1024,), eps=1e-12, elementwise_affine=True) + (cross_attn): MultiHeadedAttention( + (linear_q): Linear(in_features=1024, out_features=1024, bias=True) + (linear_k): Linear(in_features=1024, out_features=1024, bias=True) + (linear_v): Linear(in_features=1024, out_features=1024, bias=True) + (linear_out): Linear(in_features=1024, out_features=1024, bias=True) + (dropout): Identity() + ) + (norm_cross_attn): LayerNorm((1024,), eps=1e-12, elementwise_affine=True) + (dropout): Dropout(p=0.1, inplace=False) + (depthwise_conv_fusion): Conv1d(2048, 2048, kernel_size=(31,), stride=(1,), padding=(15,), groups=2048) + (merge_proj): Linear(in_features=2048, out_features=1024, bias=True) + ) + (9): EBranchformerEncoderLayer( + (attn): MultiHeadedAttention( + (linear_q): Linear(in_features=1024, out_features=1024, bias=True) + (linear_k): Linear(in_features=1024, out_features=1024, bias=True) + (linear_v): Linear(in_features=1024, out_features=1024, bias=True) + (linear_out): Linear(in_features=1024, out_features=1024, bias=True) + (dropout): Identity() + ) + (cgmlp): ConvolutionalGatingMLP( + (channel_proj1): Sequential( + (0): Linear(in_features=1024, out_features=4096, bias=True) + (1): GELU(approximate='none') + ) + (csgu): ConvolutionalSpatialGatingUnit( + (norm): LayerNorm((2048,), eps=1e-12, elementwise_affine=True) + (conv): Conv1d(2048, 2048, kernel_size=(31,), stride=(1,), padding=(15,), groups=2048) + (act): Identity() + (dropout): Dropout(p=0.1, inplace=False) + ) + (channel_proj2): Linear(in_features=2048, out_features=1024, bias=True) + ) + (feed_forward): PositionwiseFeedForward( + (w_1): Linear(in_features=1024, out_features=4096, bias=True) + (w_2): Linear(in_features=4096, out_features=1024, bias=True) + (dropout): Dropout(p=0.1, inplace=False) + (activation): Swish() + ) + (feed_forward_macaron): PositionwiseFeedForward( + (w_1): Linear(in_features=1024, out_features=4096, bias=True) + (w_2): Linear(in_features=4096, out_features=1024, bias=True) + (dropout): Dropout(p=0.1, inplace=False) + (activation): Swish() + ) + (norm_ff): LayerNorm((1024,), eps=1e-12, elementwise_affine=True) + (norm_ff_macaron): LayerNorm((1024,), eps=1e-12, elementwise_affine=True) + (norm_mha): LayerNorm((1024,), eps=1e-12, elementwise_affine=True) + (norm_mlp): LayerNorm((1024,), eps=1e-12, elementwise_affine=True) + (norm_final): LayerNorm((1024,), eps=1e-12, elementwise_affine=True) + (dropout): Dropout(p=0.1, inplace=False) + (depthwise_conv_fusion): Conv1d(2048, 2048, kernel_size=(31,), stride=(1,), padding=(15,), groups=2048) + (merge_proj): Linear(in_features=2048, out_features=1024, bias=True) + ) + (10): EBranchformerEncoderLayer( + (attn): MultiHeadedAttention( + (linear_q): Linear(in_features=1024, out_features=1024, bias=True) + (linear_k): Linear(in_features=1024, out_features=1024, bias=True) + (linear_v): Linear(in_features=1024, out_features=1024, bias=True) + (linear_out): Linear(in_features=1024, out_features=1024, bias=True) + (dropout): Identity() + ) + (cgmlp): ConvolutionalGatingMLP( + (channel_proj1): Sequential( + (0): Linear(in_features=1024, out_features=4096, bias=True) + (1): GELU(approximate='none') + ) + (csgu): ConvolutionalSpatialGatingUnit( + (norm): LayerNorm((2048,), eps=1e-12, elementwise_affine=True) + (conv): Conv1d(2048, 2048, kernel_size=(31,), stride=(1,), padding=(15,), groups=2048) + (act): Identity() + (dropout): Dropout(p=0.1, inplace=False) + ) + (channel_proj2): Linear(in_features=2048, out_features=1024, bias=True) + ) + (feed_forward): PositionwiseFeedForward( + (w_1): Linear(in_features=1024, out_features=4096, bias=True) + (w_2): Linear(in_features=4096, out_features=1024, bias=True) + (dropout): Dropout(p=0.1, inplace=False) + (activation): Swish() + ) + (feed_forward_macaron): PositionwiseFeedForward( + (w_1): Linear(in_features=1024, out_features=4096, bias=True) + (w_2): Linear(in_features=4096, out_features=1024, bias=True) + (dropout): Dropout(p=0.1, inplace=False) + (activation): Swish() + ) + (norm_ff): LayerNorm((1024,), eps=1e-12, elementwise_affine=True) + (norm_ff_macaron): LayerNorm((1024,), eps=1e-12, elementwise_affine=True) + (norm_mha): LayerNorm((1024,), eps=1e-12, elementwise_affine=True) + (norm_mlp): LayerNorm((1024,), eps=1e-12, elementwise_affine=True) + (norm_final): LayerNorm((1024,), eps=1e-12, elementwise_affine=True) + (dropout): Dropout(p=0.1, inplace=False) + (depthwise_conv_fusion): Conv1d(2048, 2048, kernel_size=(31,), stride=(1,), padding=(15,), groups=2048) + (merge_proj): Linear(in_features=2048, out_features=1024, bias=True) + ) + (11): EBranchformerEncoderLayer( + (attn): MultiHeadedAttention( + (linear_q): Linear(in_features=1024, out_features=1024, bias=True) + (linear_k): Linear(in_features=1024, out_features=1024, bias=True) + (linear_v): Linear(in_features=1024, out_features=1024, bias=True) + (linear_out): Linear(in_features=1024, out_features=1024, bias=True) + (dropout): Identity() + ) + (cgmlp): ConvolutionalGatingMLP( + (channel_proj1): Sequential( + (0): Linear(in_features=1024, out_features=4096, bias=True) + (1): GELU(approximate='none') + ) + (csgu): ConvolutionalSpatialGatingUnit( + (norm): LayerNorm((2048,), eps=1e-12, elementwise_affine=True) + (conv): Conv1d(2048, 2048, kernel_size=(31,), stride=(1,), padding=(15,), groups=2048) + (act): Identity() + (dropout): Dropout(p=0.1, inplace=False) + ) + (channel_proj2): Linear(in_features=2048, out_features=1024, bias=True) + ) + (feed_forward): PositionwiseFeedForward( + (w_1): Linear(in_features=1024, out_features=4096, bias=True) + (w_2): Linear(in_features=4096, out_features=1024, bias=True) + (dropout): Dropout(p=0.1, inplace=False) + (activation): Swish() + ) + (feed_forward_macaron): PositionwiseFeedForward( + (w_1): Linear(in_features=1024, out_features=4096, bias=True) + (w_2): Linear(in_features=4096, out_features=1024, bias=True) + (dropout): Dropout(p=0.1, inplace=False) + (activation): Swish() + ) + (norm_ff): LayerNorm((1024,), eps=1e-12, elementwise_affine=True) + (norm_ff_macaron): LayerNorm((1024,), eps=1e-12, elementwise_affine=True) + (norm_mha): LayerNorm((1024,), eps=1e-12, elementwise_affine=True) + (norm_mlp): LayerNorm((1024,), eps=1e-12, elementwise_affine=True) + (norm_final): LayerNorm((1024,), eps=1e-12, elementwise_affine=True) + (cross_attn): MultiHeadedAttention( + (linear_q): Linear(in_features=1024, out_features=1024, bias=True) + (linear_k): Linear(in_features=1024, out_features=1024, bias=True) + (linear_v): Linear(in_features=1024, out_features=1024, bias=True) + (linear_out): Linear(in_features=1024, out_features=1024, bias=True) + (dropout): Identity() + ) + (norm_cross_attn): LayerNorm((1024,), eps=1e-12, elementwise_affine=True) + (dropout): Dropout(p=0.1, inplace=False) + (depthwise_conv_fusion): Conv1d(2048, 2048, kernel_size=(31,), stride=(1,), padding=(15,), groups=2048) + (merge_proj): Linear(in_features=2048, out_features=1024, bias=True) + ) + (12): EBranchformerEncoderLayer( + (attn): MultiHeadedAttention( + (linear_q): Linear(in_features=1024, out_features=1024, bias=True) + (linear_k): Linear(in_features=1024, out_features=1024, bias=True) + (linear_v): Linear(in_features=1024, out_features=1024, bias=True) + (linear_out): Linear(in_features=1024, out_features=1024, bias=True) + (dropout): Identity() + ) + (cgmlp): ConvolutionalGatingMLP( + (channel_proj1): Sequential( + (0): Linear(in_features=1024, out_features=4096, bias=True) + (1): GELU(approximate='none') + ) + (csgu): ConvolutionalSpatialGatingUnit( + (norm): LayerNorm((2048,), eps=1e-12, elementwise_affine=True) + (conv): Conv1d(2048, 2048, kernel_size=(31,), stride=(1,), padding=(15,), groups=2048) + (act): Identity() + (dropout): Dropout(p=0.1, inplace=False) + ) + (channel_proj2): Linear(in_features=2048, out_features=1024, bias=True) + ) + (feed_forward): PositionwiseFeedForward( + (w_1): Linear(in_features=1024, out_features=4096, bias=True) + (w_2): Linear(in_features=4096, out_features=1024, bias=True) + (dropout): Dropout(p=0.1, inplace=False) + (activation): Swish() + ) + (feed_forward_macaron): PositionwiseFeedForward( + (w_1): Linear(in_features=1024, out_features=4096, bias=True) + (w_2): Linear(in_features=4096, out_features=1024, bias=True) + (dropout): Dropout(p=0.1, inplace=False) + (activation): Swish() + ) + (norm_ff): LayerNorm((1024,), eps=1e-12, elementwise_affine=True) + (norm_ff_macaron): LayerNorm((1024,), eps=1e-12, elementwise_affine=True) + (norm_mha): LayerNorm((1024,), eps=1e-12, elementwise_affine=True) + (norm_mlp): LayerNorm((1024,), eps=1e-12, elementwise_affine=True) + (norm_final): LayerNorm((1024,), eps=1e-12, elementwise_affine=True) + (dropout): Dropout(p=0.1, inplace=False) + (depthwise_conv_fusion): Conv1d(2048, 2048, kernel_size=(31,), stride=(1,), padding=(15,), groups=2048) + (merge_proj): Linear(in_features=2048, out_features=1024, bias=True) + ) + (13): EBranchformerEncoderLayer( + (attn): MultiHeadedAttention( + (linear_q): Linear(in_features=1024, out_features=1024, bias=True) + (linear_k): Linear(in_features=1024, out_features=1024, bias=True) + (linear_v): Linear(in_features=1024, out_features=1024, bias=True) + (linear_out): Linear(in_features=1024, out_features=1024, bias=True) + (dropout): Identity() + ) + (cgmlp): ConvolutionalGatingMLP( + (channel_proj1): Sequential( + (0): Linear(in_features=1024, out_features=4096, bias=True) + (1): GELU(approximate='none') + ) + (csgu): ConvolutionalSpatialGatingUnit( + (norm): LayerNorm((2048,), eps=1e-12, elementwise_affine=True) + (conv): Conv1d(2048, 2048, kernel_size=(31,), stride=(1,), padding=(15,), groups=2048) + (act): Identity() + (dropout): Dropout(p=0.1, inplace=False) + ) + (channel_proj2): Linear(in_features=2048, out_features=1024, bias=True) + ) + (feed_forward): PositionwiseFeedForward( + (w_1): Linear(in_features=1024, out_features=4096, bias=True) + (w_2): Linear(in_features=4096, out_features=1024, bias=True) + (dropout): Dropout(p=0.1, inplace=False) + (activation): Swish() + ) + (feed_forward_macaron): PositionwiseFeedForward( + (w_1): Linear(in_features=1024, out_features=4096, bias=True) + (w_2): Linear(in_features=4096, out_features=1024, bias=True) + (dropout): Dropout(p=0.1, inplace=False) + (activation): Swish() + ) + (norm_ff): LayerNorm((1024,), eps=1e-12, elementwise_affine=True) + (norm_ff_macaron): LayerNorm((1024,), eps=1e-12, elementwise_affine=True) + (norm_mha): LayerNorm((1024,), eps=1e-12, elementwise_affine=True) + (norm_mlp): LayerNorm((1024,), eps=1e-12, elementwise_affine=True) + (norm_final): LayerNorm((1024,), eps=1e-12, elementwise_affine=True) + (dropout): Dropout(p=0.1, inplace=False) + (depthwise_conv_fusion): Conv1d(2048, 2048, kernel_size=(31,), stride=(1,), padding=(15,), groups=2048) + (merge_proj): Linear(in_features=2048, out_features=1024, bias=True) + ) + (14): EBranchformerEncoderLayer( + (attn): MultiHeadedAttention( + (linear_q): Linear(in_features=1024, out_features=1024, bias=True) + (linear_k): Linear(in_features=1024, out_features=1024, bias=True) + (linear_v): Linear(in_features=1024, out_features=1024, bias=True) + (linear_out): Linear(in_features=1024, out_features=1024, bias=True) + (dropout): Identity() + ) + (cgmlp): ConvolutionalGatingMLP( + (channel_proj1): Sequential( + (0): Linear(in_features=1024, out_features=4096, bias=True) + (1): GELU(approximate='none') + ) + (csgu): ConvolutionalSpatialGatingUnit( + (norm): LayerNorm((2048,), eps=1e-12, elementwise_affine=True) + (conv): Conv1d(2048, 2048, kernel_size=(31,), stride=(1,), padding=(15,), groups=2048) + (act): Identity() + (dropout): Dropout(p=0.1, inplace=False) + ) + (channel_proj2): Linear(in_features=2048, out_features=1024, bias=True) + ) + (feed_forward): PositionwiseFeedForward( + (w_1): Linear(in_features=1024, out_features=4096, bias=True) + (w_2): Linear(in_features=4096, out_features=1024, bias=True) + (dropout): Dropout(p=0.1, inplace=False) + (activation): Swish() + ) + (feed_forward_macaron): PositionwiseFeedForward( + (w_1): Linear(in_features=1024, out_features=4096, bias=True) + (w_2): Linear(in_features=4096, out_features=1024, bias=True) + (dropout): Dropout(p=0.1, inplace=False) + (activation): Swish() + ) + (norm_ff): LayerNorm((1024,), eps=1e-12, elementwise_affine=True) + (norm_ff_macaron): LayerNorm((1024,), eps=1e-12, elementwise_affine=True) + (norm_mha): LayerNorm((1024,), eps=1e-12, elementwise_affine=True) + (norm_mlp): LayerNorm((1024,), eps=1e-12, elementwise_affine=True) + (norm_final): LayerNorm((1024,), eps=1e-12, elementwise_affine=True) + (cross_attn): MultiHeadedAttention( + (linear_q): Linear(in_features=1024, out_features=1024, bias=True) + (linear_k): Linear(in_features=1024, out_features=1024, bias=True) + (linear_v): Linear(in_features=1024, out_features=1024, bias=True) + (linear_out): Linear(in_features=1024, out_features=1024, bias=True) + (dropout): Identity() + ) + (norm_cross_attn): LayerNorm((1024,), eps=1e-12, elementwise_affine=True) + (dropout): Dropout(p=0.1, inplace=False) + (depthwise_conv_fusion): Conv1d(2048, 2048, kernel_size=(31,), stride=(1,), padding=(15,), groups=2048) + (merge_proj): Linear(in_features=2048, out_features=1024, bias=True) + ) + (15): EBranchformerEncoderLayer( + (attn): MultiHeadedAttention( + (linear_q): Linear(in_features=1024, out_features=1024, bias=True) + (linear_k): Linear(in_features=1024, out_features=1024, bias=True) + (linear_v): Linear(in_features=1024, out_features=1024, bias=True) + (linear_out): Linear(in_features=1024, out_features=1024, bias=True) + (dropout): Identity() + ) + (cgmlp): ConvolutionalGatingMLP( + (channel_proj1): Sequential( + (0): Linear(in_features=1024, out_features=4096, bias=True) + (1): GELU(approximate='none') + ) + (csgu): ConvolutionalSpatialGatingUnit( + (norm): LayerNorm((2048,), eps=1e-12, elementwise_affine=True) + (conv): Conv1d(2048, 2048, kernel_size=(31,), stride=(1,), padding=(15,), groups=2048) + (act): Identity() + (dropout): Dropout(p=0.1, inplace=False) + ) + (channel_proj2): Linear(in_features=2048, out_features=1024, bias=True) + ) + (feed_forward): PositionwiseFeedForward( + (w_1): Linear(in_features=1024, out_features=4096, bias=True) + (w_2): Linear(in_features=4096, out_features=1024, bias=True) + (dropout): Dropout(p=0.1, inplace=False) + (activation): Swish() + ) + (feed_forward_macaron): PositionwiseFeedForward( + (w_1): Linear(in_features=1024, out_features=4096, bias=True) + (w_2): Linear(in_features=4096, out_features=1024, bias=True) + (dropout): Dropout(p=0.1, inplace=False) + (activation): Swish() + ) + (norm_ff): LayerNorm((1024,), eps=1e-12, elementwise_affine=True) + (norm_ff_macaron): LayerNorm((1024,), eps=1e-12, elementwise_affine=True) + (norm_mha): LayerNorm((1024,), eps=1e-12, elementwise_affine=True) + (norm_mlp): LayerNorm((1024,), eps=1e-12, elementwise_affine=True) + (norm_final): LayerNorm((1024,), eps=1e-12, elementwise_affine=True) + (dropout): Dropout(p=0.1, inplace=False) + (depthwise_conv_fusion): Conv1d(2048, 2048, kernel_size=(31,), stride=(1,), padding=(15,), groups=2048) + (merge_proj): Linear(in_features=2048, out_features=1024, bias=True) + ) + (16): EBranchformerEncoderLayer( + (attn): MultiHeadedAttention( + (linear_q): Linear(in_features=1024, out_features=1024, bias=True) + (linear_k): Linear(in_features=1024, out_features=1024, bias=True) + (linear_v): Linear(in_features=1024, out_features=1024, bias=True) + (linear_out): Linear(in_features=1024, out_features=1024, bias=True) + (dropout): Identity() + ) + (cgmlp): ConvolutionalGatingMLP( + (channel_proj1): Sequential( + (0): Linear(in_features=1024, out_features=4096, bias=True) + (1): GELU(approximate='none') + ) + (csgu): ConvolutionalSpatialGatingUnit( + (norm): LayerNorm((2048,), eps=1e-12, elementwise_affine=True) + (conv): Conv1d(2048, 2048, kernel_size=(31,), stride=(1,), padding=(15,), groups=2048) + (act): Identity() + (dropout): Dropout(p=0.1, inplace=False) + ) + (channel_proj2): Linear(in_features=2048, out_features=1024, bias=True) + ) + (feed_forward): PositionwiseFeedForward( + (w_1): Linear(in_features=1024, out_features=4096, bias=True) + (w_2): Linear(in_features=4096, out_features=1024, bias=True) + (dropout): Dropout(p=0.1, inplace=False) + (activation): Swish() + ) + (feed_forward_macaron): PositionwiseFeedForward( + (w_1): Linear(in_features=1024, out_features=4096, bias=True) + (w_2): Linear(in_features=4096, out_features=1024, bias=True) + (dropout): Dropout(p=0.1, inplace=False) + (activation): Swish() + ) + (norm_ff): LayerNorm((1024,), eps=1e-12, elementwise_affine=True) + (norm_ff_macaron): LayerNorm((1024,), eps=1e-12, elementwise_affine=True) + (norm_mha): LayerNorm((1024,), eps=1e-12, elementwise_affine=True) + (norm_mlp): LayerNorm((1024,), eps=1e-12, elementwise_affine=True) + (norm_final): LayerNorm((1024,), eps=1e-12, elementwise_affine=True) + (dropout): Dropout(p=0.1, inplace=False) + (depthwise_conv_fusion): Conv1d(2048, 2048, kernel_size=(31,), stride=(1,), padding=(15,), groups=2048) + (merge_proj): Linear(in_features=2048, out_features=1024, bias=True) + ) + (17): EBranchformerEncoderLayer( + (attn): MultiHeadedAttention( + (linear_q): Linear(in_features=1024, out_features=1024, bias=True) + (linear_k): Linear(in_features=1024, out_features=1024, bias=True) + (linear_v): Linear(in_features=1024, out_features=1024, bias=True) + (linear_out): Linear(in_features=1024, out_features=1024, bias=True) + (dropout): Identity() + ) + (cgmlp): ConvolutionalGatingMLP( + (channel_proj1): Sequential( + (0): Linear(in_features=1024, out_features=4096, bias=True) + (1): GELU(approximate='none') + ) + (csgu): ConvolutionalSpatialGatingUnit( + (norm): LayerNorm((2048,), eps=1e-12, elementwise_affine=True) + (conv): Conv1d(2048, 2048, kernel_size=(31,), stride=(1,), padding=(15,), groups=2048) + (act): Identity() + (dropout): Dropout(p=0.1, inplace=False) + ) + (channel_proj2): Linear(in_features=2048, out_features=1024, bias=True) + ) + (feed_forward): PositionwiseFeedForward( + (w_1): Linear(in_features=1024, out_features=4096, bias=True) + (w_2): Linear(in_features=4096, out_features=1024, bias=True) + (dropout): Dropout(p=0.1, inplace=False) + (activation): Swish() + ) + (feed_forward_macaron): PositionwiseFeedForward( + (w_1): Linear(in_features=1024, out_features=4096, bias=True) + (w_2): Linear(in_features=4096, out_features=1024, bias=True) + (dropout): Dropout(p=0.1, inplace=False) + (activation): Swish() + ) + (norm_ff): LayerNorm((1024,), eps=1e-12, elementwise_affine=True) + (norm_ff_macaron): LayerNorm((1024,), eps=1e-12, elementwise_affine=True) + (norm_mha): LayerNorm((1024,), eps=1e-12, elementwise_affine=True) + (norm_mlp): LayerNorm((1024,), eps=1e-12, elementwise_affine=True) + (norm_final): LayerNorm((1024,), eps=1e-12, elementwise_affine=True) + (cross_attn): MultiHeadedAttention( + (linear_q): Linear(in_features=1024, out_features=1024, bias=True) + (linear_k): Linear(in_features=1024, out_features=1024, bias=True) + (linear_v): Linear(in_features=1024, out_features=1024, bias=True) + (linear_out): Linear(in_features=1024, out_features=1024, bias=True) + (dropout): Identity() + ) + (norm_cross_attn): LayerNorm((1024,), eps=1e-12, elementwise_affine=True) + (dropout): Dropout(p=0.1, inplace=False) + (depthwise_conv_fusion): Conv1d(2048, 2048, kernel_size=(31,), stride=(1,), padding=(15,), groups=2048) + (merge_proj): Linear(in_features=2048, out_features=1024, bias=True) + ) + (18): EBranchformerEncoderLayer( + (attn): MultiHeadedAttention( + (linear_q): Linear(in_features=1024, out_features=1024, bias=True) + (linear_k): Linear(in_features=1024, out_features=1024, bias=True) + (linear_v): Linear(in_features=1024, out_features=1024, bias=True) + (linear_out): Linear(in_features=1024, out_features=1024, bias=True) + (dropout): Identity() + ) + (cgmlp): ConvolutionalGatingMLP( + (channel_proj1): Sequential( + (0): Linear(in_features=1024, out_features=4096, bias=True) + (1): GELU(approximate='none') + ) + (csgu): ConvolutionalSpatialGatingUnit( + (norm): LayerNorm((2048,), eps=1e-12, elementwise_affine=True) + (conv): Conv1d(2048, 2048, kernel_size=(31,), stride=(1,), padding=(15,), groups=2048) + (act): Identity() + (dropout): Dropout(p=0.1, inplace=False) + ) + (channel_proj2): Linear(in_features=2048, out_features=1024, bias=True) + ) + (feed_forward): PositionwiseFeedForward( + (w_1): Linear(in_features=1024, out_features=4096, bias=True) + (w_2): Linear(in_features=4096, out_features=1024, bias=True) + (dropout): Dropout(p=0.1, inplace=False) + (activation): Swish() + ) + (feed_forward_macaron): PositionwiseFeedForward( + (w_1): Linear(in_features=1024, out_features=4096, bias=True) + (w_2): Linear(in_features=4096, out_features=1024, bias=True) + (dropout): Dropout(p=0.1, inplace=False) + (activation): Swish() + ) + (norm_ff): LayerNorm((1024,), eps=1e-12, elementwise_affine=True) + (norm_ff_macaron): LayerNorm((1024,), eps=1e-12, elementwise_affine=True) + (norm_mha): LayerNorm((1024,), eps=1e-12, elementwise_affine=True) + (norm_mlp): LayerNorm((1024,), eps=1e-12, elementwise_affine=True) + (norm_final): LayerNorm((1024,), eps=1e-12, elementwise_affine=True) + (dropout): Dropout(p=0.1, inplace=False) + (depthwise_conv_fusion): Conv1d(2048, 2048, kernel_size=(31,), stride=(1,), padding=(15,), groups=2048) + (merge_proj): Linear(in_features=2048, out_features=1024, bias=True) + ) + (19): EBranchformerEncoderLayer( + (attn): MultiHeadedAttention( + (linear_q): Linear(in_features=1024, out_features=1024, bias=True) + (linear_k): Linear(in_features=1024, out_features=1024, bias=True) + (linear_v): Linear(in_features=1024, out_features=1024, bias=True) + (linear_out): Linear(in_features=1024, out_features=1024, bias=True) + (dropout): Identity() + ) + (cgmlp): ConvolutionalGatingMLP( + (channel_proj1): Sequential( + (0): Linear(in_features=1024, out_features=4096, bias=True) + (1): GELU(approximate='none') + ) + (csgu): ConvolutionalSpatialGatingUnit( + (norm): LayerNorm((2048,), eps=1e-12, elementwise_affine=True) + (conv): Conv1d(2048, 2048, kernel_size=(31,), stride=(1,), padding=(15,), groups=2048) + (act): Identity() + (dropout): Dropout(p=0.1, inplace=False) + ) + (channel_proj2): Linear(in_features=2048, out_features=1024, bias=True) + ) + (feed_forward): PositionwiseFeedForward( + (w_1): Linear(in_features=1024, out_features=4096, bias=True) + (w_2): Linear(in_features=4096, out_features=1024, bias=True) + (dropout): Dropout(p=0.1, inplace=False) + (activation): Swish() + ) + (feed_forward_macaron): PositionwiseFeedForward( + (w_1): Linear(in_features=1024, out_features=4096, bias=True) + (w_2): Linear(in_features=4096, out_features=1024, bias=True) + (dropout): Dropout(p=0.1, inplace=False) + (activation): Swish() + ) + (norm_ff): LayerNorm((1024,), eps=1e-12, elementwise_affine=True) + (norm_ff_macaron): LayerNorm((1024,), eps=1e-12, elementwise_affine=True) + (norm_mha): LayerNorm((1024,), eps=1e-12, elementwise_affine=True) + (norm_mlp): LayerNorm((1024,), eps=1e-12, elementwise_affine=True) + (norm_final): LayerNorm((1024,), eps=1e-12, elementwise_affine=True) + (dropout): Dropout(p=0.1, inplace=False) + (depthwise_conv_fusion): Conv1d(2048, 2048, kernel_size=(31,), stride=(1,), padding=(15,), groups=2048) + (merge_proj): Linear(in_features=2048, out_features=1024, bias=True) + ) + (20): EBranchformerEncoderLayer( + (attn): MultiHeadedAttention( + (linear_q): Linear(in_features=1024, out_features=1024, bias=True) + (linear_k): Linear(in_features=1024, out_features=1024, bias=True) + (linear_v): Linear(in_features=1024, out_features=1024, bias=True) + (linear_out): Linear(in_features=1024, out_features=1024, bias=True) + (dropout): Identity() + ) + (cgmlp): ConvolutionalGatingMLP( + (channel_proj1): Sequential( + (0): Linear(in_features=1024, out_features=4096, bias=True) + (1): GELU(approximate='none') + ) + (csgu): ConvolutionalSpatialGatingUnit( + (norm): LayerNorm((2048,), eps=1e-12, elementwise_affine=True) + (conv): Conv1d(2048, 2048, kernel_size=(31,), stride=(1,), padding=(15,), groups=2048) + (act): Identity() + (dropout): Dropout(p=0.1, inplace=False) + ) + (channel_proj2): Linear(in_features=2048, out_features=1024, bias=True) + ) + (feed_forward): PositionwiseFeedForward( + (w_1): Linear(in_features=1024, out_features=4096, bias=True) + (w_2): Linear(in_features=4096, out_features=1024, bias=True) + (dropout): Dropout(p=0.1, inplace=False) + (activation): Swish() + ) + (feed_forward_macaron): PositionwiseFeedForward( + (w_1): Linear(in_features=1024, out_features=4096, bias=True) + (w_2): Linear(in_features=4096, out_features=1024, bias=True) + (dropout): Dropout(p=0.1, inplace=False) + (activation): Swish() + ) + (norm_ff): LayerNorm((1024,), eps=1e-12, elementwise_affine=True) + (norm_ff_macaron): LayerNorm((1024,), eps=1e-12, elementwise_affine=True) + (norm_mha): LayerNorm((1024,), eps=1e-12, elementwise_affine=True) + (norm_mlp): LayerNorm((1024,), eps=1e-12, elementwise_affine=True) + (norm_final): LayerNorm((1024,), eps=1e-12, elementwise_affine=True) + (cross_attn): MultiHeadedAttention( + (linear_q): Linear(in_features=1024, out_features=1024, bias=True) + (linear_k): Linear(in_features=1024, out_features=1024, bias=True) + (linear_v): Linear(in_features=1024, out_features=1024, bias=True) + (linear_out): Linear(in_features=1024, out_features=1024, bias=True) + (dropout): Identity() + ) + (norm_cross_attn): LayerNorm((1024,), eps=1e-12, elementwise_affine=True) + (dropout): Dropout(p=0.1, inplace=False) + (depthwise_conv_fusion): Conv1d(2048, 2048, kernel_size=(31,), stride=(1,), padding=(15,), groups=2048) + (merge_proj): Linear(in_features=2048, out_features=1024, bias=True) + ) + (21): EBranchformerEncoderLayer( + (attn): MultiHeadedAttention( + (linear_q): Linear(in_features=1024, out_features=1024, bias=True) + (linear_k): Linear(in_features=1024, out_features=1024, bias=True) + (linear_v): Linear(in_features=1024, out_features=1024, bias=True) + (linear_out): Linear(in_features=1024, out_features=1024, bias=True) + (dropout): Identity() + ) + (cgmlp): ConvolutionalGatingMLP( + (channel_proj1): Sequential( + (0): Linear(in_features=1024, out_features=4096, bias=True) + (1): GELU(approximate='none') + ) + (csgu): ConvolutionalSpatialGatingUnit( + (norm): LayerNorm((2048,), eps=1e-12, elementwise_affine=True) + (conv): Conv1d(2048, 2048, kernel_size=(31,), stride=(1,), padding=(15,), groups=2048) + (act): Identity() + (dropout): Dropout(p=0.1, inplace=False) + ) + (channel_proj2): Linear(in_features=2048, out_features=1024, bias=True) + ) + (feed_forward): PositionwiseFeedForward( + (w_1): Linear(in_features=1024, out_features=4096, bias=True) + (w_2): Linear(in_features=4096, out_features=1024, bias=True) + (dropout): Dropout(p=0.1, inplace=False) + (activation): Swish() + ) + (feed_forward_macaron): PositionwiseFeedForward( + (w_1): Linear(in_features=1024, out_features=4096, bias=True) + (w_2): Linear(in_features=4096, out_features=1024, bias=True) + (dropout): Dropout(p=0.1, inplace=False) + (activation): Swish() + ) + (norm_ff): LayerNorm((1024,), eps=1e-12, elementwise_affine=True) + (norm_ff_macaron): LayerNorm((1024,), eps=1e-12, elementwise_affine=True) + (norm_mha): LayerNorm((1024,), eps=1e-12, elementwise_affine=True) + (norm_mlp): LayerNorm((1024,), eps=1e-12, elementwise_affine=True) + (norm_final): LayerNorm((1024,), eps=1e-12, elementwise_affine=True) + (dropout): Dropout(p=0.1, inplace=False) + (depthwise_conv_fusion): Conv1d(2048, 2048, kernel_size=(31,), stride=(1,), padding=(15,), groups=2048) + (merge_proj): Linear(in_features=2048, out_features=1024, bias=True) + ) + (22): EBranchformerEncoderLayer( + (attn): MultiHeadedAttention( + (linear_q): Linear(in_features=1024, out_features=1024, bias=True) + (linear_k): Linear(in_features=1024, out_features=1024, bias=True) + (linear_v): Linear(in_features=1024, out_features=1024, bias=True) + (linear_out): Linear(in_features=1024, out_features=1024, bias=True) + (dropout): Identity() + ) + (cgmlp): ConvolutionalGatingMLP( + (channel_proj1): Sequential( + (0): Linear(in_features=1024, out_features=4096, bias=True) + (1): GELU(approximate='none') + ) + (csgu): ConvolutionalSpatialGatingUnit( + (norm): LayerNorm((2048,), eps=1e-12, elementwise_affine=True) + (conv): Conv1d(2048, 2048, kernel_size=(31,), stride=(1,), padding=(15,), groups=2048) + (act): Identity() + (dropout): Dropout(p=0.1, inplace=False) + ) + (channel_proj2): Linear(in_features=2048, out_features=1024, bias=True) + ) + (feed_forward): PositionwiseFeedForward( + (w_1): Linear(in_features=1024, out_features=4096, bias=True) + (w_2): Linear(in_features=4096, out_features=1024, bias=True) + (dropout): Dropout(p=0.1, inplace=False) + (activation): Swish() + ) + (feed_forward_macaron): PositionwiseFeedForward( + (w_1): Linear(in_features=1024, out_features=4096, bias=True) + (w_2): Linear(in_features=4096, out_features=1024, bias=True) + (dropout): Dropout(p=0.1, inplace=False) + (activation): Swish() + ) + (norm_ff): LayerNorm((1024,), eps=1e-12, elementwise_affine=True) + (norm_ff_macaron): LayerNorm((1024,), eps=1e-12, elementwise_affine=True) + (norm_mha): LayerNorm((1024,), eps=1e-12, elementwise_affine=True) + (norm_mlp): LayerNorm((1024,), eps=1e-12, elementwise_affine=True) + (norm_final): LayerNorm((1024,), eps=1e-12, elementwise_affine=True) + (dropout): Dropout(p=0.1, inplace=False) + (depthwise_conv_fusion): Conv1d(2048, 2048, kernel_size=(31,), stride=(1,), padding=(15,), groups=2048) + (merge_proj): Linear(in_features=2048, out_features=1024, bias=True) + ) + (23): EBranchformerEncoderLayer( + (attn): MultiHeadedAttention( + (linear_q): Linear(in_features=1024, out_features=1024, bias=True) + (linear_k): Linear(in_features=1024, out_features=1024, bias=True) + (linear_v): Linear(in_features=1024, out_features=1024, bias=True) + (linear_out): Linear(in_features=1024, out_features=1024, bias=True) + (dropout): Identity() + ) + (cgmlp): ConvolutionalGatingMLP( + (channel_proj1): Sequential( + (0): Linear(in_features=1024, out_features=4096, bias=True) + (1): GELU(approximate='none') + ) + (csgu): ConvolutionalSpatialGatingUnit( + (norm): LayerNorm((2048,), eps=1e-12, elementwise_affine=True) + (conv): Conv1d(2048, 2048, kernel_size=(31,), stride=(1,), padding=(15,), groups=2048) + (act): Identity() + (dropout): Dropout(p=0.1, inplace=False) + ) + (channel_proj2): Linear(in_features=2048, out_features=1024, bias=True) + ) + (feed_forward): PositionwiseFeedForward( + (w_1): Linear(in_features=1024, out_features=4096, bias=True) + (w_2): Linear(in_features=4096, out_features=1024, bias=True) + (dropout): Dropout(p=0.1, inplace=False) + (activation): Swish() + ) + (feed_forward_macaron): PositionwiseFeedForward( + (w_1): Linear(in_features=1024, out_features=4096, bias=True) + (w_2): Linear(in_features=4096, out_features=1024, bias=True) + (dropout): Dropout(p=0.1, inplace=False) + (activation): Swish() + ) + (norm_ff): LayerNorm((1024,), eps=1e-12, elementwise_affine=True) + (norm_ff_macaron): LayerNorm((1024,), eps=1e-12, elementwise_affine=True) + (norm_mha): LayerNorm((1024,), eps=1e-12, elementwise_affine=True) + (norm_mlp): LayerNorm((1024,), eps=1e-12, elementwise_affine=True) + (norm_final): LayerNorm((1024,), eps=1e-12, elementwise_affine=True) + (cross_attn): MultiHeadedAttention( + (linear_q): Linear(in_features=1024, out_features=1024, bias=True) + (linear_k): Linear(in_features=1024, out_features=1024, bias=True) + (linear_v): Linear(in_features=1024, out_features=1024, bias=True) + (linear_out): Linear(in_features=1024, out_features=1024, bias=True) + (dropout): Identity() + ) + (norm_cross_attn): LayerNorm((1024,), eps=1e-12, elementwise_affine=True) + (dropout): Dropout(p=0.1, inplace=False) + (depthwise_conv_fusion): Conv1d(2048, 2048, kernel_size=(31,), stride=(1,), padding=(15,), groups=2048) + (merge_proj): Linear(in_features=2048, out_features=1024, bias=True) + ) + (24): EBranchformerEncoderLayer( + (attn): MultiHeadedAttention( + (linear_q): Linear(in_features=1024, out_features=1024, bias=True) + (linear_k): Linear(in_features=1024, out_features=1024, bias=True) + (linear_v): Linear(in_features=1024, out_features=1024, bias=True) + (linear_out): Linear(in_features=1024, out_features=1024, bias=True) + (dropout): Identity() + ) + (cgmlp): ConvolutionalGatingMLP( + (channel_proj1): Sequential( + (0): Linear(in_features=1024, out_features=4096, bias=True) + (1): GELU(approximate='none') + ) + (csgu): ConvolutionalSpatialGatingUnit( + (norm): LayerNorm((2048,), eps=1e-12, elementwise_affine=True) + (conv): Conv1d(2048, 2048, kernel_size=(31,), stride=(1,), padding=(15,), groups=2048) + (act): Identity() + (dropout): Dropout(p=0.1, inplace=False) + ) + (channel_proj2): Linear(in_features=2048, out_features=1024, bias=True) + ) + (feed_forward): PositionwiseFeedForward( + (w_1): Linear(in_features=1024, out_features=4096, bias=True) + (w_2): Linear(in_features=4096, out_features=1024, bias=True) + (dropout): Dropout(p=0.1, inplace=False) + (activation): Swish() + ) + (feed_forward_macaron): PositionwiseFeedForward( + (w_1): Linear(in_features=1024, out_features=4096, bias=True) + (w_2): Linear(in_features=4096, out_features=1024, bias=True) + (dropout): Dropout(p=0.1, inplace=False) + (activation): Swish() + ) + (norm_ff): LayerNorm((1024,), eps=1e-12, elementwise_affine=True) + (norm_ff_macaron): LayerNorm((1024,), eps=1e-12, elementwise_affine=True) + (norm_mha): LayerNorm((1024,), eps=1e-12, elementwise_affine=True) + (norm_mlp): LayerNorm((1024,), eps=1e-12, elementwise_affine=True) + (norm_final): LayerNorm((1024,), eps=1e-12, elementwise_affine=True) + (dropout): Dropout(p=0.1, inplace=False) + (depthwise_conv_fusion): Conv1d(2048, 2048, kernel_size=(31,), stride=(1,), padding=(15,), groups=2048) + (merge_proj): Linear(in_features=2048, out_features=1024, bias=True) + ) + (25): EBranchformerEncoderLayer( + (attn): MultiHeadedAttention( + (linear_q): Linear(in_features=1024, out_features=1024, bias=True) + (linear_k): Linear(in_features=1024, out_features=1024, bias=True) + (linear_v): Linear(in_features=1024, out_features=1024, bias=True) + (linear_out): Linear(in_features=1024, out_features=1024, bias=True) + (dropout): Identity() + ) + (cgmlp): ConvolutionalGatingMLP( + (channel_proj1): Sequential( + (0): Linear(in_features=1024, out_features=4096, bias=True) + (1): GELU(approximate='none') + ) + (csgu): ConvolutionalSpatialGatingUnit( + (norm): LayerNorm((2048,), eps=1e-12, elementwise_affine=True) + (conv): Conv1d(2048, 2048, kernel_size=(31,), stride=(1,), padding=(15,), groups=2048) + (act): Identity() + (dropout): Dropout(p=0.1, inplace=False) + ) + (channel_proj2): Linear(in_features=2048, out_features=1024, bias=True) + ) + (feed_forward): PositionwiseFeedForward( + (w_1): Linear(in_features=1024, out_features=4096, bias=True) + (w_2): Linear(in_features=4096, out_features=1024, bias=True) + (dropout): Dropout(p=0.1, inplace=False) + (activation): Swish() + ) + (feed_forward_macaron): PositionwiseFeedForward( + (w_1): Linear(in_features=1024, out_features=4096, bias=True) + (w_2): Linear(in_features=4096, out_features=1024, bias=True) + (dropout): Dropout(p=0.1, inplace=False) + (activation): Swish() + ) + (norm_ff): LayerNorm((1024,), eps=1e-12, elementwise_affine=True) + (norm_ff_macaron): LayerNorm((1024,), eps=1e-12, elementwise_affine=True) + (norm_mha): LayerNorm((1024,), eps=1e-12, elementwise_affine=True) + (norm_mlp): LayerNorm((1024,), eps=1e-12, elementwise_affine=True) + (norm_final): LayerNorm((1024,), eps=1e-12, elementwise_affine=True) + (dropout): Dropout(p=0.1, inplace=False) + (depthwise_conv_fusion): Conv1d(2048, 2048, kernel_size=(31,), stride=(1,), padding=(15,), groups=2048) + (merge_proj): Linear(in_features=2048, out_features=1024, bias=True) + ) + (26): EBranchformerEncoderLayer( + (attn): MultiHeadedAttention( + (linear_q): Linear(in_features=1024, out_features=1024, bias=True) + (linear_k): Linear(in_features=1024, out_features=1024, bias=True) + (linear_v): Linear(in_features=1024, out_features=1024, bias=True) + (linear_out): Linear(in_features=1024, out_features=1024, bias=True) + (dropout): Identity() + ) + (cgmlp): ConvolutionalGatingMLP( + (channel_proj1): Sequential( + (0): Linear(in_features=1024, out_features=4096, bias=True) + (1): GELU(approximate='none') + ) + (csgu): ConvolutionalSpatialGatingUnit( + (norm): LayerNorm((2048,), eps=1e-12, elementwise_affine=True) + (conv): Conv1d(2048, 2048, kernel_size=(31,), stride=(1,), padding=(15,), groups=2048) + (act): Identity() + (dropout): Dropout(p=0.1, inplace=False) + ) + (channel_proj2): Linear(in_features=2048, out_features=1024, bias=True) + ) + (feed_forward): PositionwiseFeedForward( + (w_1): Linear(in_features=1024, out_features=4096, bias=True) + (w_2): Linear(in_features=4096, out_features=1024, bias=True) + (dropout): Dropout(p=0.1, inplace=False) + (activation): Swish() + ) + (feed_forward_macaron): PositionwiseFeedForward( + (w_1): Linear(in_features=1024, out_features=4096, bias=True) + (w_2): Linear(in_features=4096, out_features=1024, bias=True) + (dropout): Dropout(p=0.1, inplace=False) + (activation): Swish() + ) + (norm_ff): LayerNorm((1024,), eps=1e-12, elementwise_affine=True) + (norm_ff_macaron): LayerNorm((1024,), eps=1e-12, elementwise_affine=True) + (norm_mha): LayerNorm((1024,), eps=1e-12, elementwise_affine=True) + (norm_mlp): LayerNorm((1024,), eps=1e-12, elementwise_affine=True) + (norm_final): LayerNorm((1024,), eps=1e-12, elementwise_affine=True) + (cross_attn): MultiHeadedAttention( + (linear_q): Linear(in_features=1024, out_features=1024, bias=True) + (linear_k): Linear(in_features=1024, out_features=1024, bias=True) + (linear_v): Linear(in_features=1024, out_features=1024, bias=True) + (linear_out): Linear(in_features=1024, out_features=1024, bias=True) + (dropout): Identity() + ) + (norm_cross_attn): LayerNorm((1024,), eps=1e-12, elementwise_affine=True) + (dropout): Dropout(p=0.1, inplace=False) + (depthwise_conv_fusion): Conv1d(2048, 2048, kernel_size=(31,), stride=(1,), padding=(15,), groups=2048) + (merge_proj): Linear(in_features=2048, out_features=1024, bias=True) + ) + ) + (after_norm): LayerNorm((1024,), eps=1e-12, elementwise_affine=True) + (conditioning_layer): Linear(in_features=50002, out_features=1024, bias=True) + ) + (prompt_encoder): TransformerEncoder( + (encoders): MultiSequential( + (0): EncoderLayer( + (self_attn): MultiHeadedAttention( + (linear_q): Linear(in_features=512, out_features=512, bias=True) + (linear_k): Linear(in_features=512, out_features=512, bias=True) + (linear_v): Linear(in_features=512, out_features=512, bias=True) + (linear_out): Linear(in_features=512, out_features=512, bias=True) + (dropout): Identity() + ) + (feed_forward): PositionwiseFeedForward( + (w_1): Linear(in_features=512, out_features=2048, bias=True) + (w_2): Linear(in_features=2048, out_features=512, bias=True) + (dropout): Dropout(p=0.1, inplace=False) + (activation): ReLU() + ) + (norm1): LayerNorm((512,), eps=1e-12, elementwise_affine=True) + (norm2): LayerNorm((512,), eps=1e-12, elementwise_affine=True) + (dropout): Dropout(p=0.1, inplace=False) + ) + (1): EncoderLayer( + (self_attn): MultiHeadedAttention( + (linear_q): Linear(in_features=512, out_features=512, bias=True) + (linear_k): Linear(in_features=512, out_features=512, bias=True) + (linear_v): Linear(in_features=512, out_features=512, bias=True) + (linear_out): Linear(in_features=512, out_features=512, bias=True) + (dropout): Identity() + ) + (feed_forward): PositionwiseFeedForward( + (w_1): Linear(in_features=512, out_features=2048, bias=True) + (w_2): Linear(in_features=2048, out_features=512, bias=True) + (dropout): Dropout(p=0.1, inplace=False) + (activation): ReLU() + ) + (norm1): LayerNorm((512,), eps=1e-12, elementwise_affine=True) + (norm2): LayerNorm((512,), eps=1e-12, elementwise_affine=True) + (dropout): Dropout(p=0.1, inplace=False) + ) + (2): EncoderLayer( + (self_attn): MultiHeadedAttention( + (linear_q): Linear(in_features=512, out_features=512, bias=True) + (linear_k): Linear(in_features=512, out_features=512, bias=True) + (linear_v): Linear(in_features=512, out_features=512, bias=True) + (linear_out): Linear(in_features=512, out_features=512, bias=True) + (dropout): Identity() + ) + (feed_forward): PositionwiseFeedForward( + (w_1): Linear(in_features=512, out_features=2048, bias=True) + (w_2): Linear(in_features=2048, out_features=512, bias=True) + (dropout): Dropout(p=0.1, inplace=False) + (activation): ReLU() + ) + (norm1): LayerNorm((512,), eps=1e-12, elementwise_affine=True) + (norm2): LayerNorm((512,), eps=1e-12, elementwise_affine=True) + (dropout): Dropout(p=0.1, inplace=False) + ) + (3): EncoderLayer( + (self_attn): MultiHeadedAttention( + (linear_q): Linear(in_features=512, out_features=512, bias=True) + (linear_k): Linear(in_features=512, out_features=512, bias=True) + (linear_v): Linear(in_features=512, out_features=512, bias=True) + (linear_out): Linear(in_features=512, out_features=512, bias=True) + (dropout): Identity() + ) + (feed_forward): PositionwiseFeedForward( + (w_1): Linear(in_features=512, out_features=2048, bias=True) + (w_2): Linear(in_features=2048, out_features=512, bias=True) + (dropout): Dropout(p=0.1, inplace=False) + (activation): ReLU() + ) + (norm1): LayerNorm((512,), eps=1e-12, elementwise_affine=True) + (norm2): LayerNorm((512,), eps=1e-12, elementwise_affine=True) + (dropout): Dropout(p=0.1, inplace=False) + ) + ) + (after_norm): LayerNorm((512,), eps=1e-12, elementwise_affine=True) + ) + (embed): Embedding(50002, 512) + (pos_enc): PositionalEncoding( + (dropout): Dropout(p=0.0, inplace=False) + ) + (embed_proj): Linear(in_features=512, out_features=1024, bias=True) + (prompt_proj): Linear(in_features=512, out_features=1024, bias=True) + (ctc): CTC( + (ctc_lo): Linear(in_features=1024, out_features=50002, bias=True) + (ctc_loss): CTCLoss() + ) +) + +Model summary: + Class Name: ESPnetS2TCTCModel + Total Number of model parameters: 1.01 B + Number of trainable parameters: 1.01 B (100.0%) + Size: 4.02 GB + Type: torch.float32 +[gpua006:0/64] 2024-02-13 14:08:23,613 (abs_task:1274) INFO: Optimizer: +AdamW ( +Parameter Group 0 + amsgrad: False + betas: [0.9, 0.98] + capturable: False + eps: 1e-06 + foreach: None + initial_lr: 0.0002 + lr: 1.6666666666666667e-09 + maximize: False + weight_decay: 0.0 +) +[gpua006:0/64] 2024-02-13 14:08:23,613 (abs_task:1275) INFO: Scheduler: PiecewiseLinearWarmupLR(warmup_steps_list=[0, 30000, 60000], warmup_lr_list=[0.0, 5e-05, 0.0002]) +[gpua006:0/64] 2024-02-13 14:08:23,625 (abs_task:1284) INFO: Saving the configuration in exp/s2t_train_s2t_multitask-ctc_ebf27_conv2d8_size1024_raw_bpe50000/config.yaml +[gpua006:0/64] 2024-02-13 14:08:29,475 (s2t:401) INFO: Optional Data Names: ('text_prev', 'text_ctc', 'text_spk2', 'text_spk3', 'text_spk4') +[gpua006:0/64] 2024-02-13 14:08:30,471 (abs_task:1660) INFO: [valid] dataset: +ESPnetDataset( + speech: {"path": "dump/raw/dev_v3/wav.scp", "type": "kaldi_ark"} + text_prev: {"path": "dump/raw/dev_v3/text.prev", "type": "text"} + text_ctc: {"path": "dump/raw/dev_v3/text.ctc", "type": "text"} + text: {"path": "dump/raw/dev_v3/text", "type": "text"} + preprocess: ) +[gpua006:0/64] 2024-02-13 14:08:30,471 (abs_task:1661) INFO: [valid] Batch sampler: UnsortedBatchSampler(N-batch=4671, batch_size=256, key_file=exp/s2t_stats_raw_bpe50000/valid/speech_shape, +[gpua006:0/64] 2024-02-13 14:08:30,484 (abs_task:1662) INFO: [valid] mini-batch sizes summary: N-batch=4671, mean=256.0, min=256, max=257 +[gpua006:0/64] 2024-02-13 14:09:00,984 (trainer:167) INFO: The training was resumed using exp/s2t_train_s2t_multitask-ctc_ebf27_conv2d8_size1024_raw_bpe50000/checkpoint.pth +gpua006:855218:855218 [0] NCCL INFO Bootstrap : Using eth1:172.28.23.6<0> +gpua006:855218:855218 [0] NCCL INFO NET/Plugin: Failed to find ncclCollNetPlugin_v6 symbol. +gpua006:855218:855218 [0] NCCL INFO NET/Plugin: Failed to find ncclCollNetPlugin symbol (v4 or v5). +gpua006:855218:855218 [0] NCCL INFO cudaDriverVersion 12020 +NCCL version 2.14.3+cuda11.7 +[gpua006:0/64] 2024-02-13 14:09:11,133 (trainer:301) INFO: 38/45epoch started +[gpua006:0/64] 2024-02-13 14:09:11,173 (multiple_iter_factory:32) INFO: Building 0th iter-factory... +[gpua006:0/64] 2024-02-13 14:09:29,469 (s2t:401) INFO: Optional Data Names: ('text_prev', 'text_ctc', 'text_spk2', 'text_spk3', 'text_spk4') +[gpua006:0/64] 2024-02-13 14:09:32,864 (abs_task:1660) INFO: [train] dataset: +ESPnetDataset( + speech: {"path": "exp/s2t_stats_raw_bpe50000/splits12/wav.scp/split.7", "type": "kaldi_ark"} + text_prev: {"path": "exp/s2t_stats_raw_bpe50000/splits12/text.prev/split.7", "type": "text"} + text_ctc: {"path": "exp/s2t_stats_raw_bpe50000/splits12/text.ctc/split.7", "type": "text"} + text: {"path": "exp/s2t_stats_raw_bpe50000/splits12/text/split.7", "type": "text"} + preprocess: ) +[gpua006:0/64] 2024-02-13 14:09:32,864 (abs_task:1661) INFO: [train] Batch sampler: UnsortedBatchSampler(N-batch=19027, batch_size=256, key_file=exp/s2t_stats_raw_bpe50000/splits12/speech_shape/split.7, +[gpua006:0/64] 2024-02-13 14:09:32,867 (abs_task:1662) INFO: [train] mini-batch sizes summary: N-batch=19027, mean=256.0, min=256, max=257 +gpua016:758973:758973 [0] NCCL INFO cudaDriverVersion 12020 +gpua016:758973:758973 [0] NCCL INFO Bootstrap : Using eth1:172.28.23.16<0> +gpua016:758973:758973 [0] NCCL INFO NET/Plugin: Failed to find ncclCollNetPlugin_v6 symbol. +gpua016:758973:758973 [0] NCCL INFO NET/Plugin: Failed to find ncclCollNetPlugin symbol (v4 or v5). +gpua016:758973:759044 [0] NCCL INFO NET/OFI Using aws-ofi-nccl 1.6.0 +gpua016:758973:759044 [0] NCCL INFO NET/OFI Selected Provider is cxi (found 2 nics) +gpua016:758973:759044 [0] NCCL INFO Using network AWS Libfabric +gpua016:758973:759044 [0] NCCL INFO Setting affinity for GPU 0 to ffff0000,00000000 +gpua016:758973:759044 [0] NCCL INFO NCCL_CROSS_NIC set by environment to 1. +gpua016:758973:759044 [0] NCCL INFO Trees [0] 13/-1/-1->12->8 [1] 13/4/-1->12->28 +gpua016:758973:759044 [0] NCCL INFO Channel 00/0 : 11[c7000] -> 12[7000] [receive] via NET/AWS Libfabric/1 +gpua016:758973:759044 [0] NCCL INFO Channel 01/0 : 11[c7000] -> 12[7000] [receive] via NET/AWS Libfabric/1 +gpua016:758973:759044 [0] NCCL INFO Channel 00/0 : 12[7000] -> 13[46000] via P2P/IPC/read +gpua016:758973:759044 [0] NCCL INFO Channel 01/0 : 12[7000] -> 13[46000] via P2P/IPC/read +gpua016:758973:759044 [0] NCCL INFO Connected all rings +gpua016:758973:759044 [0] NCCL INFO Channel 00/0 : 8[7000] -> 12[7000] [receive] via NET/AWS Libfabric/1 +gpua016:758973:759044 [0] NCCL INFO Channel 01/0 : 4[7000] -> 12[7000] [receive] via NET/AWS Libfabric/1 +gpua016:758973:759044 [0] NCCL INFO Channel 01/0 : 12[7000] -> 28[7000] [send] via NET/AWS Libfabric/1 +gpua016:758973:759044 [0] NCCL INFO Channel 01/0 : 28[7000] -> 12[7000] [receive] via NET/AWS Libfabric/1 +gpua016:758973:759044 [0] NCCL INFO Channel 01/0 : 12[7000] -> 4[7000] [send] via NET/AWS Libfabric/1 +gpua016:758973:759044 [0] NCCL INFO Channel 00/0 : 12[7000] -> 8[7000] [send] via NET/AWS Libfabric/1 +gpua016:758973:759044 [0] NCCL INFO Connected all trees +gpua016:758973:759044 [0] NCCL INFO threadThresholds 8/8/64 | 512/8/64 | 512 | 512 +gpua016:758973:759044 [0] NCCL INFO 2 coll channels, 2 p2p channels, 2 p2p channels per peer +gpua016:758973:759044 [0] NCCL INFO comm 0x5594dd9db9e0 rank 12 nranks 64 cudaDev 0 busId 7000 - Init COMPLETE +gpua016:758976:758976 [3] NCCL INFO cudaDriverVersion 12020 +gpua016:758976:758976 [3] NCCL INFO Bootstrap : Using eth1:172.28.23.16<0> +gpua016:758976:758976 [3] NCCL INFO NET/Plugin: Failed to find ncclCollNetPlugin_v6 symbol. +gpua016:758976:758976 [3] NCCL INFO NET/Plugin: Failed to find ncclCollNetPlugin symbol (v4 or v5). +gpua016:758976:759045 [3] NCCL INFO NET/OFI Using aws-ofi-nccl 1.6.0 +gpua016:758976:759045 [3] NCCL INFO NET/OFI Selected Provider is cxi (found 2 nics) +gpua016:758976:759045 [3] NCCL INFO Using network AWS Libfabric +gpua016:758976:759045 [3] NCCL INFO Setting affinity for GPU 3 to ffff +gpua016:758976:759045 [3] NCCL INFO NCCL_CROSS_NIC set by environment to 1. +gpua016:758976:759045 [3] NCCL INFO Trees [0] -1/-1/-1->15->14 [1] -1/-1/-1->15->14 +gpua016:758976:759045 [3] NCCL INFO Channel 00/0 : 15[c7000] -> 16[7000] [send] via NET/AWS Libfabric/1 +gpua016:758976:759045 [3] NCCL INFO Channel 01/0 : 15[c7000] -> 16[7000] [send] via NET/AWS Libfabric/1 +gpua016:758976:759045 [3] NCCL INFO Connected all rings +gpua016:758976:759045 [3] NCCL INFO Channel 00/0 : 15[c7000] -> 14[85000] via P2P/IPC/read +gpua016:758976:759045 [3] NCCL INFO Channel 01/0 : 15[c7000] -> 14[85000] via P2P/IPC/read +gpua016:758976:759045 [3] NCCL INFO Connected all trees +gpua016:758976:759045 [3] NCCL INFO threadThresholds 8/8/64 | 512/8/64 | 512 | 512 +gpua016:758976:759045 [3] NCCL INFO 2 coll channels, 2 p2p channels, 2 p2p channels per peer +gpua016:758976:759045 [3] NCCL INFO comm 0x55d8cff3f7a0 rank 15 nranks 64 cudaDev 3 busId c7000 - Init COMPLETE +gpua016:758974:758974 [1] NCCL INFO cudaDriverVersion 12020 +gpua016:758974:758974 [1] NCCL INFO Bootstrap : Using eth1:172.28.23.16<0> +gpua016:758974:758974 [1] NCCL INFO NET/Plugin: Failed to find ncclCollNetPlugin_v6 symbol. +gpua016:758974:758974 [1] NCCL INFO NET/Plugin: Failed to find ncclCollNetPlugin symbol (v4 or v5). +gpua016:758974:759043 [1] NCCL INFO NET/OFI Using aws-ofi-nccl 1.6.0 +gpua016:758974:759043 [1] NCCL INFO NET/OFI Selected Provider is cxi (found 2 nics) +gpua016:758974:759043 [1] NCCL INFO Using network AWS Libfabric +gpua016:758974:759043 [1] NCCL INFO Setting affinity for GPU 1 to ffff,00000000 +gpua016:758974:759043 [1] NCCL INFO NCCL_CROSS_NIC set by environment to 1. +gpua016:758974:759043 [1] NCCL INFO Trees [0] 14/-1/-1->13->12 [1] 14/20/-1->13->12 +gpua016:758974:759043 [1] NCCL INFO Channel 00/0 : 13[46000] -> 14[85000] via P2P/IPC/read +gpua016:758974:759043 [1] NCCL INFO Channel 01/0 : 13[46000] -> 14[85000] via P2P/IPC/read +gpua016:758974:759043 [1] NCCL INFO Connected all rings +gpua016:758974:759043 [1] NCCL INFO Channel 01/0 : 13[46000] -> 20[7000] [send] via NET/AWS Libfabric/1 +gpua016:758974:759043 [1] NCCL INFO Channel 01/0 : 20[7000] -> 13[46000] [receive] via NET/AWS Libfabric/1 +gpua016:758974:759043 [1] NCCL INFO Channel 00/0 : 13[46000] -> 12[7000] via P2P/IPC/read +gpua016:758974:759043 [1] NCCL INFO Channel 01/0 : 13[46000] -> 12[7000] via P2P/IPC/read +gpua016:758974:759043 [1] NCCL INFO Connected all trees +gpua016:758974:759043 [1] NCCL INFO threadThresholds 8/8/64 | 512/8/64 | 512 | 512 +gpua016:758974:759043 [1] NCCL INFO 2 coll channels, 2 p2p channels, 2 p2p channels per peer +gpua016:758974:759043 [1] NCCL INFO comm 0x5612d4de9cc0 rank 13 nranks 64 cudaDev 1 busId 46000 - Init COMPLETE +gpua016:758975:758975 [2] NCCL INFO cudaDriverVersion 12020 +gpua016:758975:758975 [2] NCCL INFO Bootstrap : Using eth1:172.28.23.16<0> +gpua016:758975:758975 [2] NCCL INFO NET/Plugin: Failed to find ncclCollNetPlugin_v6 symbol. +gpua016:758975:758975 [2] NCCL INFO NET/Plugin: Failed to find ncclCollNetPlugin symbol (v4 or v5). +gpua016:758975:759046 [2] NCCL INFO NET/OFI Using aws-ofi-nccl 1.6.0 +gpua016:758975:759046 [2] NCCL INFO NET/OFI Selected Provider is cxi (found 2 nics) +gpua016:758975:759046 [2] NCCL INFO Using network AWS Libfabric +gpua016:758975:759046 [2] NCCL INFO Setting affinity for GPU 2 to ffff0000 +gpua016:758975:759046 [2] NCCL INFO NCCL_CROSS_NIC set by environment to 1. +gpua016:758975:759046 [2] NCCL INFO Trees [0] 15/-1/-1->14->13 [1] 15/-1/-1->14->13 +gpua016:758975:759046 [2] NCCL INFO Channel 00/0 : 14[85000] -> 15[c7000] via P2P/IPC/read +gpua016:758975:759046 [2] NCCL INFO Channel 01/0 : 14[85000] -> 15[c7000] via P2P/IPC/read +gpua016:758975:759046 [2] NCCL INFO Connected all rings +gpua016:758975:759046 [2] NCCL INFO Channel 00/0 : 14[85000] -> 13[46000] via P2P/IPC/read +gpua016:758975:759046 [2] NCCL INFO Channel 01/0 : 14[85000] -> 13[46000] via P2P/IPC/read +gpua016:758975:759046 [2] NCCL INFO Connected all trees +gpua016:758975:759046 [2] NCCL INFO threadThresholds 8/8/64 | 512/8/64 | 512 | 512 +gpua016:758975:759046 [2] NCCL INFO 2 coll channels, 2 p2p channels, 2 p2p channels per peer +gpua016:758975:759046 [2] NCCL INFO comm 0x55997e4e8f80 rank 14 nranks 64 cudaDev 2 busId 85000 - Init COMPLETE +gpua055:296307:296307 [3] NCCL INFO cudaDriverVersion 12020 +gpua055:296307:296307 [3] NCCL INFO Bootstrap : Using eth1:172.28.23.55<0> +gpua055:296307:296307 [3] NCCL INFO NET/Plugin: Failed to find ncclCollNetPlugin_v6 symbol. +gpua055:296307:296307 [3] NCCL INFO NET/Plugin: Failed to find ncclCollNetPlugin symbol (v4 or v5). +gpua055:296307:296369 [3] NCCL INFO NET/OFI Using aws-ofi-nccl 1.6.0 +gpua055:296307:296369 [3] NCCL INFO NET/OFI Selected Provider is cxi (found 2 nics) +gpua055:296307:296369 [3] NCCL INFO Using network AWS Libfabric +gpua055:296307:296369 [3] NCCL INFO Setting affinity for GPU 3 to ffff +gpua055:296307:296369 [3] NCCL INFO NCCL_CROSS_NIC set by environment to 1. +gpua055:296307:296369 [3] NCCL INFO Trees [0] -1/-1/-1->43->42 [1] -1/-1/-1->43->42 +gpua055:296307:296369 [3] NCCL INFO Channel 00/0 : 43[c7000] -> 44[7000] [send] via NET/AWS Libfabric/1 +gpua055:296307:296369 [3] NCCL INFO Channel 01/0 : 43[c7000] -> 44[7000] [send] via NET/AWS Libfabric/1 +gpua055:296307:296369 [3] NCCL INFO Connected all rings +gpua055:296307:296369 [3] NCCL INFO Channel 00/0 : 43[c7000] -> 42[85000] via P2P/IPC/read +gpua055:296307:296369 [3] NCCL INFO Channel 01/0 : 43[c7000] -> 42[85000] via P2P/IPC/read +gpua055:296307:296369 [3] NCCL INFO Connected all trees +gpua055:296307:296369 [3] NCCL INFO threadThresholds 8/8/64 | 512/8/64 | 512 | 512 +gpua055:296307:296369 [3] NCCL INFO 2 coll channels, 2 p2p channels, 2 p2p channels per peer +gpua055:296307:296369 [3] NCCL INFO comm 0x5598ea6186a0 rank 43 nranks 64 cudaDev 3 busId c7000 - Init COMPLETE +gpua055:296306:296306 [2] NCCL INFO cudaDriverVersion 12020 +gpua055:296306:296306 [2] NCCL INFO Bootstrap : Using eth1:172.28.23.55<0> +gpua055:296306:296306 [2] NCCL INFO NET/Plugin: Failed to find ncclCollNetPlugin_v6 symbol. +gpua055:296306:296306 [2] NCCL INFO NET/Plugin: Failed to find ncclCollNetPlugin symbol (v4 or v5). +gpua055:296306:296370 [2] NCCL INFO NET/OFI Using aws-ofi-nccl 1.6.0 +gpua055:296306:296370 [2] NCCL INFO NET/OFI Selected Provider is cxi (found 2 nics) +gpua055:296306:296370 [2] NCCL INFO Using network AWS Libfabric +gpua055:296306:296370 [2] NCCL INFO Setting affinity for GPU 2 to ffff0000 +gpua055:296306:296370 [2] NCCL INFO NCCL_CROSS_NIC set by environment to 1. +gpua055:296306:296370 [2] NCCL INFO Trees [0] 43/-1/-1->42->41 [1] 43/-1/-1->42->41 +gpua055:296306:296370 [2] NCCL INFO Channel 00/0 : 42[85000] -> 43[c7000] via P2P/IPC/read +gpua055:296306:296370 [2] NCCL INFO Channel 01/0 : 42[85000] -> 43[c7000] via P2P/IPC/read +gpua055:296306:296370 [2] NCCL INFO Connected all rings +gpua055:296306:296370 [2] NCCL INFO Channel 00/0 : 42[85000] -> 41[46000] via P2P/IPC/read +gpua055:296306:296370 [2] NCCL INFO Channel 01/0 : 42[85000] -> 41[46000] via P2P/IPC/read +gpua055:296306:296370 [2] NCCL INFO Connected all trees +gpua055:296306:296370 [2] NCCL INFO threadThresholds 8/8/64 | 512/8/64 | 512 | 512 +gpua055:296306:296370 [2] NCCL INFO 2 coll channels, 2 p2p channels, 2 p2p channels per peer +gpua055:296306:296370 [2] NCCL INFO comm 0x5637692d0b20 rank 42 nranks 64 cudaDev 2 busId 85000 - Init COMPLETE +gpua055:296305:296305 [1] NCCL INFO cudaDriverVersion 12020 +gpua055:296305:296305 [1] NCCL INFO Bootstrap : Using eth1:172.28.23.55<0> +gpua055:296305:296305 [1] NCCL INFO NET/Plugin: Failed to find ncclCollNetPlugin_v6 symbol. +gpua055:296305:296305 [1] NCCL INFO NET/Plugin: Failed to find ncclCollNetPlugin symbol (v4 or v5). +gpua055:296305:296368 [1] NCCL INFO NET/OFI Using aws-ofi-nccl 1.6.0 +gpua055:296305:296368 [1] NCCL INFO NET/OFI Selected Provider is cxi (found 2 nics) +gpua055:296305:296368 [1] NCCL INFO Using network AWS Libfabric +gpua055:296305:296368 [1] NCCL INFO Setting affinity for GPU 1 to ffff,00000000 +gpua055:296305:296368 [1] NCCL INFO NCCL_CROSS_NIC set by environment to 1. +gpua055:296305:296368 [1] NCCL INFO Trees [0] 42/36/-1->41->40 [1] 42/-1/-1->41->40 +gpua055:296305:296368 [1] NCCL INFO Channel 00/0 : 41[46000] -> 42[85000] via P2P/IPC/read +gpua055:296305:296368 [1] NCCL INFO Channel 01/0 : 41[46000] -> 42[85000] via P2P/IPC/read +gpua055:296305:296368 [1] NCCL INFO Connected all rings +gpua055:296305:296368 [1] NCCL INFO Channel 00/0 : 36[7000] -> 41[46000] [receive] via NET/AWS Libfabric/1 +gpua055:296305:296368 [1] NCCL INFO Channel 00/0 : 41[46000] -> 36[7000] [send] via NET/AWS Libfabric/1 +gpua055:296305:296368 [1] NCCL INFO Channel 00/0 : 41[46000] -> 40[7000] via P2P/IPC/read +gpua055:296305:296368 [1] NCCL INFO Channel 01/0 : 41[46000] -> 40[7000] via P2P/IPC/read +gpua055:296305:296368 [1] NCCL INFO Connected all trees +gpua055:296305:296368 [1] NCCL INFO threadThresholds 8/8/64 | 512/8/64 | 512 | 512 +gpua055:296305:296368 [1] NCCL INFO 2 coll channels, 2 p2p channels, 2 p2p channels per peer +gpua055:296305:296368 [1] NCCL INFO comm 0x5573284d5a70 rank 41 nranks 64 cudaDev 1 busId 46000 - Init COMPLETE +gpua055:296304:296304 [0] NCCL INFO cudaDriverVersion 12020 +gpua055:296304:296304 [0] NCCL INFO Bootstrap : Using eth1:172.28.23.55<0> +gpua055:296304:296304 [0] NCCL INFO NET/Plugin: Failed to find ncclCollNetPlugin_v6 symbol. +gpua055:296304:296304 [0] NCCL INFO NET/Plugin: Failed to find ncclCollNetPlugin symbol (v4 or v5). +gpua055:296304:296371 [0] NCCL INFO NET/OFI Using aws-ofi-nccl 1.6.0 +gpua055:296304:296371 [0] NCCL INFO NET/OFI Selected Provider is cxi (found 2 nics) +gpua055:296304:296371 [0] NCCL INFO Using network AWS Libfabric +gpua055:296304:296371 [0] NCCL INFO Setting affinity for GPU 0 to ffff0000,00000000 +gpua055:296304:296371 [0] NCCL INFO NCCL_CROSS_NIC set by environment to 1. +gpua055:296304:296371 [0] NCCL INFO Trees [0] 41/44/-1->40->49 [1] 41/-1/-1->40->37 +gpua055:296304:296371 [0] NCCL INFO Channel 00/0 : 39[c7000] -> 40[7000] [receive] via NET/AWS Libfabric/1 +gpua055:296304:296371 [0] NCCL INFO Channel 01/0 : 39[c7000] -> 40[7000] [receive] via NET/AWS Libfabric/1 +gpua055:296304:296371 [0] NCCL INFO Channel 00/0 : 40[7000] -> 41[46000] via P2P/IPC/read +gpua055:296304:296371 [0] NCCL INFO Channel 01/0 : 40[7000] -> 41[46000] via P2P/IPC/read +gpua055:296304:296371 [0] NCCL INFO Connected all rings +gpua055:296304:296371 [0] NCCL INFO Channel 01/0 : 37[46000] -> 40[7000] [receive] via NET/AWS Libfabric/1 +gpua055:296304:296371 [0] NCCL INFO Channel 00/0 : 40[7000] -> 44[7000] [send] via NET/AWS Libfabric/1 +gpua055:296304:296371 [0] NCCL INFO Channel 00/0 : 40[7000] -> 49[46000] [send] via NET/AWS Libfabric/1 +gpua055:296304:296371 [0] NCCL INFO Channel 00/0 : 49[46000] -> 40[7000] [receive] via NET/AWS Libfabric/1 +gpua055:296304:296371 [0] NCCL INFO Channel 00/0 : 44[7000] -> 40[7000] [receive] via NET/AWS Libfabric/1 +gpua055:296304:296371 [0] NCCL INFO Channel 01/0 : 40[7000] -> 37[46000] [send] via NET/AWS Libfabric/1 +gpua055:296304:296371 [0] NCCL INFO Connected all trees +gpua055:296304:296371 [0] NCCL INFO threadThresholds 8/8/64 | 512/8/64 | 512 | 512 +gpua055:296304:296371 [0] NCCL INFO 2 coll channels, 2 p2p channels, 2 p2p channels per peer +gpua055:296304:296371 [0] NCCL INFO comm 0x55a8fae9f100 rank 40 nranks 64 cudaDev 0 busId 7000 - Init COMPLETE +gpua079:3871266:3871266 [3] NCCL INFO cudaDriverVersion 12020 +gpua079:3871266:3871266 [3] NCCL INFO Bootstrap : Using eth1:172.28.23.79<0> +gpua079:3871266:3871266 [3] NCCL INFO NET/Plugin: Failed to find ncclCollNetPlugin_v6 symbol. +gpua079:3871266:3871266 [3] NCCL INFO NET/Plugin: Failed to find ncclCollNetPlugin symbol (v4 or v5). +gpua079:3871266:3871335 [3] NCCL INFO NET/OFI Using aws-ofi-nccl 1.6.0 +gpua079:3871266:3871335 [3] NCCL INFO NET/OFI Selected Provider is cxi (found 2 nics) +gpua079:3871266:3871335 [3] NCCL INFO Using network AWS Libfabric +gpua079:3871266:3871335 [3] NCCL INFO Setting affinity for GPU 3 to ffff +gpua079:3871266:3871335 [3] NCCL INFO NCCL_CROSS_NIC set by environment to 1. +gpua079:3871266:3871335 [3] NCCL INFO Trees [0] -1/-1/-1->51->50 [1] -1/-1/-1->51->50 +gpua079:3871266:3871335 [3] NCCL INFO Channel 00/0 : 51[c7000] -> 52[7000] [send] via NET/AWS Libfabric/1 +gpua079:3871266:3871335 [3] NCCL INFO Channel 01/0 : 51[c7000] -> 52[7000] [send] via NET/AWS Libfabric/1 +gpua079:3871266:3871335 [3] NCCL INFO Connected all rings +gpua079:3871266:3871335 [3] NCCL INFO Channel 00/0 : 51[c7000] -> 50[85000] via P2P/IPC/read +gpua079:3871266:3871335 [3] NCCL INFO Channel 01/0 : 51[c7000] -> 50[85000] via P2P/IPC/read +gpua079:3871266:3871335 [3] NCCL INFO Connected all trees +gpua079:3871266:3871335 [3] NCCL INFO threadThresholds 8/8/64 | 512/8/64 | 512 | 512 +gpua079:3871266:3871335 [3] NCCL INFO 2 coll channels, 2 p2p channels, 2 p2p channels per peer +gpua079:3871266:3871335 [3] NCCL INFO comm 0x55b33cb2b910 rank 51 nranks 64 cudaDev 3 busId c7000 - Init COMPLETE +gpua079:3871264:3871264 [1] NCCL INFO cudaDriverVersion 12020 +gpua079:3871264:3871264 [1] NCCL INFO Bootstrap : Using eth1:172.28.23.79<0> +gpua079:3871264:3871264 [1] NCCL INFO NET/Plugin: Failed to find ncclCollNetPlugin_v6 symbol. +gpua079:3871264:3871264 [1] NCCL INFO NET/Plugin: Failed to find ncclCollNetPlugin symbol (v4 or v5). +gpua079:3871264:3871334 [1] NCCL INFO NET/OFI Using aws-ofi-nccl 1.6.0 +gpua079:3871264:3871334 [1] NCCL INFO NET/OFI Selected Provider is cxi (found 2 nics) +gpua079:3871264:3871334 [1] NCCL INFO Using network AWS Libfabric +gpua079:3871264:3871334 [1] NCCL INFO Setting affinity for GPU 1 to ffff,00000000 +gpua079:3871264:3871334 [1] NCCL INFO NCCL_CROSS_NIC set by environment to 1. +gpua079:3871264:3871334 [1] NCCL INFO Trees [0] 50/40/-1->49->48 [1] 50/-1/-1->49->48 +gpua079:3871264:3871334 [1] NCCL INFO Channel 00/0 : 49[46000] -> 50[85000] via P2P/IPC/read +gpua079:3871264:3871334 [1] NCCL INFO Channel 01/0 : 49[46000] -> 50[85000] via P2P/IPC/read +gpua079:3871264:3871334 [1] NCCL INFO Connected all rings +gpua079:3871264:3871334 [1] NCCL INFO Channel 00/0 : 40[7000] -> 49[46000] [receive] via NET/AWS Libfabric/1 +gpua079:3871264:3871334 [1] NCCL INFO Channel 00/0 : 49[46000] -> 40[7000] [send] via NET/AWS Libfabric/1 +gpua079:3871264:3871334 [1] NCCL INFO Channel 00/0 : 49[46000] -> 48[7000] via P2P/IPC/read +gpua079:3871264:3871334 [1] NCCL INFO Channel 01/0 : 49[46000] -> 48[7000] via P2P/IPC/read +gpua079:3871264:3871334 [1] NCCL INFO Connected all trees +gpua079:3871264:3871334 [1] NCCL INFO threadThresholds 8/8/64 | 512/8/64 | 512 | 512 +gpua079:3871264:3871334 [1] NCCL INFO 2 coll channels, 2 p2p channels, 2 p2p channels per peer +gpua079:3871264:3871334 [1] NCCL INFO comm 0x55b11a8bfb60 rank 49 nranks 64 cudaDev 1 busId 46000 - Init COMPLETE +gpua079:3871265:3871265 [2] NCCL INFO cudaDriverVersion 12020 +gpua079:3871265:3871265 [2] NCCL INFO Bootstrap : Using eth1:172.28.23.79<0> +gpua079:3871265:3871265 [2] NCCL INFO NET/Plugin: Failed to find ncclCollNetPlugin_v6 symbol. +gpua079:3871265:3871265 [2] NCCL INFO NET/Plugin: Failed to find ncclCollNetPlugin symbol (v4 or v5). +gpua079:3871265:3871336 [2] NCCL INFO NET/OFI Using aws-ofi-nccl 1.6.0 +gpua079:3871265:3871336 [2] NCCL INFO NET/OFI Selected Provider is cxi (found 2 nics) +gpua079:3871265:3871336 [2] NCCL INFO Using network AWS Libfabric +gpua079:3871265:3871336 [2] NCCL INFO Setting affinity for GPU 2 to ffff0000 +gpua079:3871265:3871336 [2] NCCL INFO NCCL_CROSS_NIC set by environment to 1. +gpua079:3871265:3871336 [2] NCCL INFO Trees [0] 51/-1/-1->50->49 [1] 51/-1/-1->50->49 +gpua079:3871265:3871336 [2] NCCL INFO Channel 00/0 : 50[85000] -> 51[c7000] via P2P/IPC/read +gpua079:3871265:3871336 [2] NCCL INFO Channel 01/0 : 50[85000] -> 51[c7000] via P2P/IPC/read +gpua079:3871265:3871336 [2] NCCL INFO Connected all rings +gpua079:3871265:3871336 [2] NCCL INFO Channel 00/0 : 50[85000] -> 49[46000] via P2P/IPC/read +gpua079:3871265:3871336 [2] NCCL INFO Channel 01/0 : 50[85000] -> 49[46000] via P2P/IPC/read +gpua079:3871265:3871336 [2] NCCL INFO Connected all trees +gpua079:3871265:3871336 [2] NCCL INFO threadThresholds 8/8/64 | 512/8/64 | 512 | 512 +gpua079:3871265:3871336 [2] NCCL INFO 2 coll channels, 2 p2p channels, 2 p2p channels per peer +gpua079:3871265:3871336 [2] NCCL INFO comm 0x55803f8d3b80 rank 50 nranks 64 cudaDev 2 busId 85000 - Init COMPLETE +gpua079:3871263:3871263 [0] NCCL INFO cudaDriverVersion 12020 +gpua079:3871263:3871263 [0] NCCL INFO Bootstrap : Using eth1:172.28.23.79<0> +gpua079:3871263:3871263 [0] NCCL INFO NET/Plugin: Failed to find ncclCollNetPlugin_v6 symbol. +gpua079:3871263:3871263 [0] NCCL INFO NET/Plugin: Failed to find ncclCollNetPlugin symbol (v4 or v5). +gpua079:3871263:3871337 [0] NCCL INFO NET/OFI Using aws-ofi-nccl 1.6.0 +gpua079:3871263:3871337 [0] NCCL INFO NET/OFI Selected Provider is cxi (found 2 nics) +gpua079:3871263:3871337 [0] NCCL INFO Using network AWS Libfabric +gpua079:3871263:3871337 [0] NCCL INFO Setting affinity for GPU 0 to ffff0000,00000000 +gpua079:3871263:3871337 [0] NCCL INFO NCCL_CROSS_NIC set by environment to 1. +gpua079:3871263:3871337 [0] NCCL INFO Trees [0] 49/56/-1->48->32 [1] 49/-1/-1->48->52 +gpua079:3871263:3871337 [0] NCCL INFO Channel 00/0 : 47[c7000] -> 48[7000] [receive] via NET/AWS Libfabric/1 +gpua079:3871263:3871337 [0] NCCL INFO Channel 01/0 : 47[c7000] -> 48[7000] [receive] via NET/AWS Libfabric/1 +gpua079:3871263:3871337 [0] NCCL INFO Channel 00/0 : 48[7000] -> 49[46000] via P2P/IPC/read +gpua079:3871263:3871337 [0] NCCL INFO Channel 01/0 : 48[7000] -> 49[46000] via P2P/IPC/read +gpua079:3871263:3871337 [0] NCCL INFO Connected all rings +gpua079:3871263:3871337 [0] NCCL INFO Channel 01/0 : 48[7000] -> 52[7000] [send] via NET/AWS Libfabric/1 +gpua079:3871263:3871337 [0] NCCL INFO Channel 00/0 : 48[7000] -> 56[7000] [send] via NET/AWS Libfabric/1 +gpua079:3871263:3871337 [0] NCCL INFO Channel 00/0 : 32[7000] -> 48[7000] [receive] via NET/AWS Libfabric/1 +gpua079:3871263:3871337 [0] NCCL INFO Channel 00/0 : 48[7000] -> 32[7000] [send] via NET/AWS Libfabric/1 +gpua079:3871263:3871337 [0] NCCL INFO Channel 00/0 : 56[7000] -> 48[7000] [receive] via NET/AWS Libfabric/1 +gpua079:3871263:3871337 [0] NCCL INFO Channel 01/0 : 52[7000] -> 48[7000] [receive] via NET/AWS Libfabric/1 +gpua079:3871263:3871337 [0] NCCL INFO Connected all trees +gpua079:3871263:3871337 [0] NCCL INFO threadThresholds 8/8/64 | 512/8/64 | 512 | 512 +gpua079:3871263:3871337 [0] NCCL INFO 2 coll channels, 2 p2p channels, 2 p2p channels per peer +gpua079:3871263:3871337 [0] NCCL INFO comm 0x55b0de6a4860 rank 48 nranks 64 cudaDev 0 busId 7000 - Init COMPLETE +gpua054:596841:596841 [2] NCCL INFO cudaDriverVersion 12020 +gpua054:596841:596841 [2] NCCL INFO Bootstrap : Using eth1:172.28.23.54<0> +gpua054:596841:596841 [2] NCCL INFO NET/Plugin: Failed to find ncclCollNetPlugin_v6 symbol. +gpua054:596841:596841 [2] NCCL INFO NET/Plugin: Failed to find ncclCollNetPlugin symbol (v4 or v5). +gpua054:596841:596912 [2] NCCL INFO NET/OFI Using aws-ofi-nccl 1.6.0 +gpua054:596841:596912 [2] NCCL INFO NET/OFI Selected Provider is cxi (found 2 nics) +gpua054:596841:596912 [2] NCCL INFO Using network AWS Libfabric +gpua054:596841:596912 [2] NCCL INFO Setting affinity for GPU 2 to ffff0000 +gpua054:596841:596912 [2] NCCL INFO NCCL_CROSS_NIC set by environment to 1. +gpua054:596841:596912 [2] NCCL INFO Trees [0] 39/-1/-1->38->37 [1] 39/-1/-1->38->37 +gpua054:596841:596912 [2] NCCL INFO Channel 00/0 : 38[85000] -> 39[c7000] via P2P/IPC/read +gpua054:596841:596912 [2] NCCL INFO Channel 01/0 : 38[85000] -> 39[c7000] via P2P/IPC/read +gpua054:596841:596912 [2] NCCL INFO Connected all rings +gpua054:596841:596912 [2] NCCL INFO Channel 00/0 : 38[85000] -> 37[46000] via P2P/IPC/read +gpua054:596841:596912 [2] NCCL INFO Channel 01/0 : 38[85000] -> 37[46000] via P2P/IPC/read +gpua054:596841:596912 [2] NCCL INFO Connected all trees +gpua054:596841:596912 [2] NCCL INFO threadThresholds 8/8/64 | 512/8/64 | 512 | 512 +gpua054:596841:596912 [2] NCCL INFO 2 coll channels, 2 p2p channels, 2 p2p channels per peer +gpua054:596841:596912 [2] NCCL INFO comm 0x55695e76eca0 rank 38 nranks 64 cudaDev 2 busId 85000 - Init COMPLETE +gpua054:596840:596840 [1] NCCL INFO cudaDriverVersion 12020 +gpua054:596840:596840 [1] NCCL INFO Bootstrap : Using eth1:172.28.23.54<0> +gpua054:596840:596840 [1] NCCL INFO NET/Plugin: Failed to find ncclCollNetPlugin_v6 symbol. +gpua054:596840:596840 [1] NCCL INFO NET/Plugin: Failed to find ncclCollNetPlugin symbol (v4 or v5). +gpua054:596840:596911 [1] NCCL INFO NET/OFI Using aws-ofi-nccl 1.6.0 +gpua054:596840:596911 [1] NCCL INFO NET/OFI Selected Provider is cxi (found 2 nics) +gpua054:596840:596911 [1] NCCL INFO Using network AWS Libfabric +gpua054:596840:596911 [1] NCCL INFO Setting affinity for GPU 1 to ffff,00000000 +gpua054:596840:596911 [1] NCCL INFO NCCL_CROSS_NIC set by environment to 1. +gpua054:596840:596911 [1] NCCL INFO Trees [0] 38/-1/-1->37->36 [1] 38/40/-1->37->36 +gpua054:596840:596911 [1] NCCL INFO Channel 00/0 : 37[46000] -> 38[85000] via P2P/IPC/read +gpua054:596840:596911 [1] NCCL INFO Channel 01/0 : 37[46000] -> 38[85000] via P2P/IPC/read +gpua054:596840:596911 [1] NCCL INFO Connected all rings +gpua054:596840:596911 [1] NCCL INFO Channel 01/0 : 37[46000] -> 40[7000] [send] via NET/AWS Libfabric/1 +gpua054:596840:596911 [1] NCCL INFO Channel 01/0 : 40[7000] -> 37[46000] [receive] via NET/AWS Libfabric/1 +gpua054:596840:596911 [1] NCCL INFO Channel 00/0 : 37[46000] -> 36[7000] via P2P/IPC/read +gpua054:596840:596911 [1] NCCL INFO Channel 01/0 : 37[46000] -> 36[7000] via P2P/IPC/read +gpua054:596840:596911 [1] NCCL INFO Connected all trees +gpua054:596840:596911 [1] NCCL INFO threadThresholds 8/8/64 | 512/8/64 | 512 | 512 +gpua054:596840:596911 [1] NCCL INFO 2 coll channels, 2 p2p channels, 2 p2p channels per peer +gpua054:596840:596911 [1] NCCL INFO comm 0x5642f16ef260 rank 37 nranks 64 cudaDev 1 busId 46000 - Init COMPLETE +gpua054:596842:596842 [3] NCCL INFO cudaDriverVersion 12020 +gpua054:596842:596842 [3] NCCL INFO Bootstrap : Using eth1:172.28.23.54<0> +gpua054:596842:596842 [3] NCCL INFO NET/Plugin: Failed to find ncclCollNetPlugin_v6 symbol. +gpua054:596842:596842 [3] NCCL INFO NET/Plugin: Failed to find ncclCollNetPlugin symbol (v4 or v5). +gpua054:596842:596914 [3] NCCL INFO NET/OFI Using aws-ofi-nccl 1.6.0 +gpua054:596842:596914 [3] NCCL INFO NET/OFI Selected Provider is cxi (found 2 nics) +gpua054:596842:596914 [3] NCCL INFO Using network AWS Libfabric +gpua054:596842:596914 [3] NCCL INFO Setting affinity for GPU 3 to ffff +gpua054:596842:596914 [3] NCCL INFO NCCL_CROSS_NIC set by environment to 1. +gpua054:596842:596914 [3] NCCL INFO Trees [0] -1/-1/-1->39->38 [1] -1/-1/-1->39->38 +gpua054:596842:596914 [3] NCCL INFO Channel 00/0 : 39[c7000] -> 40[7000] [send] via NET/AWS Libfabric/1 +gpua054:596842:596914 [3] NCCL INFO Channel 01/0 : 39[c7000] -> 40[7000] [send] via NET/AWS Libfabric/1 +gpua054:596842:596914 [3] NCCL INFO Connected all rings +gpua054:596842:596914 [3] NCCL INFO Channel 00/0 : 39[c7000] -> 38[85000] via P2P/IPC/read +gpua054:596842:596914 [3] NCCL INFO Channel 01/0 : 39[c7000] -> 38[85000] via P2P/IPC/read +gpua054:596842:596914 [3] NCCL INFO Connected all trees +gpua054:596842:596914 [3] NCCL INFO threadThresholds 8/8/64 | 512/8/64 | 512 | 512 +gpua054:596842:596914 [3] NCCL INFO 2 coll channels, 2 p2p channels, 2 p2p channels per peer +gpua054:596842:596914 [3] NCCL INFO comm 0x563e90153360 rank 39 nranks 64 cudaDev 3 busId c7000 - Init COMPLETE +gpua054:596839:596839 [0] NCCL INFO cudaDriverVersion 12020 +gpua054:596839:596839 [0] NCCL INFO Bootstrap : Using eth1:172.28.23.54<0> +gpua054:596839:596839 [0] NCCL INFO NET/Plugin: Failed to find ncclCollNetPlugin_v6 symbol. +gpua054:596839:596839 [0] NCCL INFO NET/Plugin: Failed to find ncclCollNetPlugin symbol (v4 or v5). +gpua054:596839:596913 [0] NCCL INFO NET/OFI Using aws-ofi-nccl 1.6.0 +gpua054:596839:596913 [0] NCCL INFO NET/OFI Selected Provider is cxi (found 2 nics) +gpua054:596839:596913 [0] NCCL INFO Using network AWS Libfabric +gpua054:596839:596913 [0] NCCL INFO Setting affinity for GPU 0 to ffff0000,00000000 +gpua054:596839:596913 [0] NCCL INFO NCCL_CROSS_NIC set by environment to 1. +gpua054:596839:596913 [0] NCCL INFO Trees [0] 37/-1/-1->36->41 [1] 37/32/-1->36->44 +gpua054:596839:596913 [0] NCCL INFO Channel 00/0 : 35[c7000] -> 36[7000] [receive] via NET/AWS Libfabric/1 +gpua054:596839:596913 [0] NCCL INFO Channel 01/0 : 35[c7000] -> 36[7000] [receive] via NET/AWS Libfabric/1 +gpua054:596839:596913 [0] NCCL INFO Channel 00/0 : 36[7000] -> 37[46000] via P2P/IPC/read +gpua054:596839:596913 [0] NCCL INFO Channel 01/0 : 36[7000] -> 37[46000] via P2P/IPC/read +gpua054:596839:596913 [0] NCCL INFO Connected all rings +gpua054:596839:596913 [0] NCCL INFO Channel 01/0 : 32[7000] -> 36[7000] [receive] via NET/AWS Libfabric/1 +gpua054:596839:596913 [0] NCCL INFO Channel 00/0 : 36[7000] -> 41[46000] [send] via NET/AWS Libfabric/1 +gpua054:596839:596913 [0] NCCL INFO Channel 01/0 : 36[7000] -> 44[7000] [send] via NET/AWS Libfabric/1 +gpua054:596839:596913 [0] NCCL INFO Channel 01/0 : 44[7000] -> 36[7000] [receive] via NET/AWS Libfabric/1 +gpua054:596839:596913 [0] NCCL INFO Channel 00/0 : 41[46000] -> 36[7000] [receive] via NET/AWS Libfabric/1 +gpua054:596839:596913 [0] NCCL INFO Channel 01/0 : 36[7000] -> 32[7000] [send] via NET/AWS Libfabric/1 +gpua054:596839:596913 [0] NCCL INFO Connected all trees +gpua054:596839:596913 [0] NCCL INFO threadThresholds 8/8/64 | 512/8/64 | 512 | 512 +gpua054:596839:596913 [0] NCCL INFO 2 coll channels, 2 p2p channels, 2 p2p channels per peer +gpua054:596839:596913 [0] NCCL INFO comm 0x563ba508aef0 rank 36 nranks 64 cudaDev 0 busId 7000 - Init COMPLETE +gpua038:354889:354889 [2] NCCL INFO cudaDriverVersion 12020 +gpua038:354889:354889 [2] NCCL INFO Bootstrap : Using eth1:172.28.23.38<0> +gpua038:354889:354889 [2] NCCL INFO NET/Plugin: Failed to find ncclCollNetPlugin_v6 symbol. +gpua038:354889:354889 [2] NCCL INFO NET/Plugin: Failed to find ncclCollNetPlugin symbol (v4 or v5). +gpua038:354889:354958 [2] NCCL INFO NET/OFI Using aws-ofi-nccl 1.6.0 +gpua038:354889:354958 [2] NCCL INFO NET/OFI Selected Provider is cxi (found 2 nics) +gpua038:354889:354958 [2] NCCL INFO Using network AWS Libfabric +gpua038:354889:354958 [2] NCCL INFO Setting affinity for GPU 2 to ffff0000 +gpua038:354889:354958 [2] NCCL INFO NCCL_CROSS_NIC set by environment to 1. +gpua038:354889:354958 [2] NCCL INFO Trees [0] 23/-1/-1->22->21 [1] 23/-1/-1->22->21 +gpua038:354889:354958 [2] NCCL INFO Channel 00/0 : 22[85000] -> 23[c7000] via P2P/IPC/read +gpua038:354889:354958 [2] NCCL INFO Channel 01/0 : 22[85000] -> 23[c7000] via P2P/IPC/read +gpua038:354889:354958 [2] NCCL INFO Connected all rings +gpua038:354889:354958 [2] NCCL INFO Channel 00/0 : 22[85000] -> 21[46000] via P2P/IPC/read +gpua038:354889:354958 [2] NCCL INFO Channel 01/0 : 22[85000] -> 21[46000] via P2P/IPC/read +gpua038:354889:354958 [2] NCCL INFO Connected all trees +gpua038:354889:354958 [2] NCCL INFO threadThresholds 8/8/64 | 512/8/64 | 512 | 512 +gpua038:354889:354958 [2] NCCL INFO 2 coll channels, 2 p2p channels, 2 p2p channels per peer +gpua038:354889:354958 [2] NCCL INFO comm 0x55fece3bf940 rank 22 nranks 64 cudaDev 2 busId 85000 - Init COMPLETE +gpua006:855219:855219 [1] NCCL INFO cudaDriverVersion 12020 +gpua006:855219:855219 [1] NCCL INFO Bootstrap : Using eth1:172.28.23.6<0> +gpua006:855219:855219 [1] NCCL INFO NET/Plugin: Failed to find ncclCollNetPlugin_v6 symbol. +gpua006:855219:855219 [1] NCCL INFO NET/Plugin: Failed to find ncclCollNetPlugin symbol (v4 or v5). +gpua006:855219:855290 [1] NCCL INFO NET/OFI Using aws-ofi-nccl 1.6.0 +gpua006:855219:855290 [1] NCCL INFO NET/OFI Selected Provider is cxi (found 2 nics) +gpua006:855219:855290 [1] NCCL INFO Using network AWS Libfabric +gpua006:855219:855290 [1] NCCL INFO Setting affinity for GPU 1 to ffff,00000000 +gpua006:855219:855290 [1] NCCL INFO NCCL_CROSS_NIC set by environment to 1. +gpua006:855219:855290 [1] NCCL INFO Trees [0] 2/-1/-1->1->0 [1] 2/-1/-1->1->0 +gpua006:855219:855290 [1] NCCL INFO Channel 00/0 : 1[46000] -> 2[85000] via P2P/IPC/read +gpua006:855219:855290 [1] NCCL INFO Channel 01/0 : 1[46000] -> 2[85000] via P2P/IPC/read +gpua006:855219:855290 [1] NCCL INFO Connected all rings +gpua049:31610:31610 [2] NCCL INFO cudaDriverVersion 12020 +gpua049:31610:31610 [2] NCCL INFO Bootstrap : Using eth1:172.28.23.49<0> +gpua049:31610:31610 [2] NCCL INFO NET/Plugin: Failed to find ncclCollNetPlugin_v6 symbol. +gpua049:31610:31610 [2] NCCL INFO NET/Plugin: Failed to find ncclCollNetPlugin symbol (v4 or v5). +gpua049:31610:31670 [2] NCCL INFO NET/OFI Using aws-ofi-nccl 1.6.0 +gpua049:31610:31670 [2] NCCL INFO NET/OFI Selected Provider is cxi (found 2 nics) +gpua049:31610:31670 [2] NCCL INFO Using network AWS Libfabric +gpua049:31610:31670 [2] NCCL INFO Setting affinity for GPU 2 to ffff0000 +gpua049:31610:31670 [2] NCCL INFO NCCL_CROSS_NIC set by environment to 1. +gpua049:31610:31670 [2] NCCL INFO Trees [0] 35/-1/-1->34->33 [1] 35/-1/-1->34->33 +gpua049:31610:31670 [2] NCCL INFO Channel 00/0 : 34[85000] -> 35[c7000] via P2P/IPC/read +gpua049:31610:31670 [2] NCCL INFO Channel 01/0 : 34[85000] -> 35[c7000] via P2P/IPC/read +gpua049:31610:31670 [2] NCCL INFO Connected all rings +gpua006:855219:855290 [1] NCCL INFO Channel 00/0 : 1[46000] -> 0[7000] via P2P/IPC/read +gpua006:855219:855290 [1] NCCL INFO Channel 01/0 : 1[46000] -> 0[7000] via P2P/IPC/read +gpua006:855219:855290 [1] NCCL INFO Connected all trees +gpua006:855219:855290 [1] NCCL INFO threadThresholds 8/8/64 | 512/8/64 | 512 | 512 +gpua006:855219:855290 [1] NCCL INFO 2 coll channels, 2 p2p channels, 2 p2p channels per peer +gpua006:855219:855290 [1] NCCL INFO comm 0x561103936b10 rank 1 nranks 64 cudaDev 1 busId 46000 - Init COMPLETE +gpua049:31610:31670 [2] NCCL INFO Channel 00/0 : 34[85000] -> 33[46000] via P2P/IPC/read +gpua049:31610:31670 [2] NCCL INFO Channel 01/0 : 34[85000] -> 33[46000] via P2P/IPC/read +gpua049:31610:31670 [2] NCCL INFO Connected all trees +gpua049:31610:31670 [2] NCCL INFO threadThresholds 8/8/64 | 512/8/64 | 512 | 512 +gpua049:31610:31670 [2] NCCL INFO 2 coll channels, 2 p2p channels, 2 p2p channels per peer +gpua049:31610:31670 [2] NCCL INFO comm 0x5647cc7164a0 rank 34 nranks 64 cudaDev 2 busId 85000 - Init COMPLETE +gpua006:855221:855221 [3] NCCL INFO cudaDriverVersion 12020 +gpua006:855221:855221 [3] NCCL INFO Bootstrap : Using eth1:172.28.23.6<0> +gpua006:855221:855221 [3] NCCL INFO NET/Plugin: Failed to find ncclCollNetPlugin_v6 symbol. +gpua006:855221:855221 [3] NCCL INFO NET/Plugin: Failed to find ncclCollNetPlugin symbol (v4 or v5). +gpua006:855221:855289 [3] NCCL INFO NET/OFI Using aws-ofi-nccl 1.6.0 +gpua006:855221:855289 [3] NCCL INFO NET/OFI Selected Provider is cxi (found 2 nics) +gpua006:855221:855289 [3] NCCL INFO Using network AWS Libfabric +gpua006:855221:855289 [3] NCCL INFO Setting affinity for GPU 3 to ffff +gpua006:855221:855289 [3] NCCL INFO NCCL_CROSS_NIC set by environment to 1. +gpua006:855221:855289 [3] NCCL INFO Trees [0] -1/-1/-1->3->2 [1] -1/-1/-1->3->2 +gpua006:855221:855289 [3] NCCL INFO Channel 00/0 : 3[c7000] -> 4[7000] [send] via NET/AWS Libfabric/1 +gpua006:855221:855289 [3] NCCL INFO Channel 01/0 : 3[c7000] -> 4[7000] [send] via NET/AWS Libfabric/1 +gpua007:1756577:1756577 [1] NCCL INFO cudaDriverVersion 12020 +gpua007:1756577:1756577 [1] NCCL INFO Bootstrap : Using eth1:172.28.23.7<0> +gpua007:1756577:1756577 [1] NCCL INFO NET/Plugin: Failed to find ncclCollNetPlugin_v6 symbol. +gpua007:1756577:1756577 [1] NCCL INFO NET/Plugin: Failed to find ncclCollNetPlugin symbol (v4 or v5). +gpua007:1756577:1756639 [1] NCCL INFO NET/OFI Using aws-ofi-nccl 1.6.0 +gpua007:1756577:1756639 [1] NCCL INFO NET/OFI Selected Provider is cxi (found 2 nics) +gpua007:1756577:1756639 [1] NCCL INFO Using network AWS Libfabric +gpua007:1756577:1756639 [1] NCCL INFO Setting affinity for GPU 1 to ffff,00000000 +gpua007:1756577:1756639 [1] NCCL INFO NCCL_CROSS_NIC set by environment to 1. +gpua007:1756577:1756639 [1] NCCL INFO Trees [0] 6/-1/-1->5->4 [1] 6/8/-1->5->4 +gpua007:1756577:1756639 [1] NCCL INFO Channel 00/0 : 5[46000] -> 6[85000] via P2P/IPC/read +gpua007:1756577:1756639 [1] NCCL INFO Channel 01/0 : 5[46000] -> 6[85000] via P2P/IPC/read +gpua006:855221:855289 [3] NCCL INFO Connected all rings +gpua006:855221:855289 [3] NCCL INFO Channel 00/0 : 3[c7000] -> 2[85000] via P2P/IPC/read +gpua006:855221:855289 [3] NCCL INFO Channel 01/0 : 3[c7000] -> 2[85000] via P2P/IPC/read +gpua006:855221:855289 [3] NCCL INFO Connected all trees +gpua006:855221:855289 [3] NCCL INFO threadThresholds 8/8/64 | 512/8/64 | 512 | 512 +gpua006:855221:855289 [3] NCCL INFO 2 coll channels, 2 p2p channels, 2 p2p channels per peer +gpua006:855221:855289 [3] NCCL INFO comm 0x55b1d054a800 rank 3 nranks 64 cudaDev 3 busId c7000 - Init COMPLETE +gpua007:1756577:1756639 [1] NCCL INFO Connected all rings +gpua007:1756577:1756639 [1] NCCL INFO Channel 01/0 : 5[46000] -> 8[7000] [send] via NET/AWS Libfabric/1 +gpua007:1756577:1756639 [1] NCCL INFO Channel 01/0 : 8[7000] -> 5[46000] [receive] via NET/AWS Libfabric/1 +gpua007:1756577:1756639 [1] NCCL INFO Channel 00/0 : 5[46000] -> 4[7000] via P2P/IPC/read +gpua007:1756577:1756639 [1] NCCL INFO Channel 01/0 : 5[46000] -> 4[7000] via P2P/IPC/read +gpua007:1756577:1756639 [1] NCCL INFO Connected all trees +gpua007:1756577:1756639 [1] NCCL INFO threadThresholds 8/8/64 | 512/8/64 | 512 | 512 +gpua007:1756577:1756639 [1] NCCL INFO 2 coll channels, 2 p2p channels, 2 p2p channels per peer +gpua007:1756577:1756639 [1] NCCL INFO comm 0x555b786333f0 rank 5 nranks 64 cudaDev 1 busId 46000 - Init COMPLETE +gpua006:855220:855220 [2] NCCL INFO cudaDriverVersion 12020 +gpua006:855220:855220 [2] NCCL INFO Bootstrap : Using eth1:172.28.23.6<0> +gpua006:855220:855220 [2] NCCL INFO NET/Plugin: Failed to find ncclCollNetPlugin_v6 symbol. +gpua006:855220:855220 [2] NCCL INFO NET/Plugin: Failed to find ncclCollNetPlugin symbol (v4 or v5). +gpua006:855220:855288 [2] NCCL INFO NET/OFI Using aws-ofi-nccl 1.6.0 +gpua006:855220:855288 [2] NCCL INFO NET/OFI Selected Provider is cxi (found 2 nics) +gpua006:855220:855288 [2] NCCL INFO Using network AWS Libfabric +gpua006:855220:855288 [2] NCCL INFO Setting affinity for GPU 2 to ffff0000 +gpua006:855220:855288 [2] NCCL INFO NCCL_CROSS_NIC set by environment to 1. +gpua006:855220:855288 [2] NCCL INFO Trees [0] 3/-1/-1->2->1 [1] 3/-1/-1->2->1 +gpua006:855220:855288 [2] NCCL INFO Channel 00/0 : 2[85000] -> 3[c7000] via P2P/IPC/read +gpua006:855220:855288 [2] NCCL INFO Channel 01/0 : 2[85000] -> 3[c7000] via P2P/IPC/read +gpua006:855220:855288 [2] NCCL INFO Connected all rings +gpua007:1756578:1756578 [2] NCCL INFO cudaDriverVersion 12020 +gpua007:1756578:1756578 [2] NCCL INFO Bootstrap : Using eth1:172.28.23.7<0> +gpua007:1756578:1756578 [2] NCCL INFO NET/Plugin: Failed to find ncclCollNetPlugin_v6 symbol. +gpua007:1756578:1756578 [2] NCCL INFO NET/Plugin: Failed to find ncclCollNetPlugin symbol (v4 or v5). +gpua007:1756578:1756641 [2] NCCL INFO NET/OFI Using aws-ofi-nccl 1.6.0 +gpua007:1756578:1756641 [2] NCCL INFO NET/OFI Selected Provider is cxi (found 2 nics) +gpua007:1756578:1756641 [2] NCCL INFO Using network AWS Libfabric +gpua007:1756578:1756641 [2] NCCL INFO Setting affinity for GPU 2 to ffff0000 +gpua007:1756578:1756641 [2] NCCL INFO NCCL_CROSS_NIC set by environment to 1. +gpua007:1756578:1756641 [2] NCCL INFO Trees [0] 7/-1/-1->6->5 [1] 7/-1/-1->6->5 +gpua007:1756578:1756641 [2] NCCL INFO Channel 00/0 : 6[85000] -> 7[c7000] via P2P/IPC/read +gpua007:1756578:1756641 [2] NCCL INFO Channel 01/0 : 6[85000] -> 7[c7000] via P2P/IPC/read +gpua039:3838097:3838097 [3] NCCL INFO cudaDriverVersion 12020 +gpua039:3838097:3838097 [3] NCCL INFO Bootstrap : Using eth1:172.28.23.39<0> +gpua039:3838097:3838097 [3] NCCL INFO NET/Plugin: Failed to find ncclCollNetPlugin_v6 symbol. +gpua039:3838097:3838097 [3] NCCL INFO NET/Plugin: Failed to find ncclCollNetPlugin symbol (v4 or v5). +gpua039:3838097:3838172 [3] NCCL INFO NET/OFI Using aws-ofi-nccl 1.6.0 +gpua039:3838097:3838172 [3] NCCL INFO NET/OFI Selected Provider is cxi (found 2 nics) +gpua039:3838097:3838172 [3] NCCL INFO Using network AWS Libfabric +gpua039:3838097:3838172 [3] NCCL INFO Setting affinity for GPU 3 to ffff +gpua039:3838097:3838172 [3] NCCL INFO NCCL_CROSS_NIC set by environment to 1. +gpua039:3838097:3838172 [3] NCCL INFO Trees [0] -1/-1/-1->27->26 [1] -1/-1/-1->27->26 +gpua039:3838097:3838172 [3] NCCL INFO Channel 00/0 : 27[c7000] -> 28[7000] [send] via NET/AWS Libfabric/1 +gpua039:3838097:3838172 [3] NCCL INFO Channel 01/0 : 27[c7000] -> 28[7000] [send] via NET/AWS Libfabric/1 +gpua006:855220:855288 [2] NCCL INFO Channel 00/0 : 2[85000] -> 1[46000] via P2P/IPC/read +gpua006:855220:855288 [2] NCCL INFO Channel 01/0 : 2[85000] -> 1[46000] via P2P/IPC/read +gpua006:855220:855288 [2] NCCL INFO Connected all trees +gpua006:855220:855288 [2] NCCL INFO threadThresholds 8/8/64 | 512/8/64 | 512 | 512 +gpua006:855220:855288 [2] NCCL INFO 2 coll channels, 2 p2p channels, 2 p2p channels per peer +gpua006:855220:855288 [2] NCCL INFO comm 0x560ee9e2da80 rank 2 nranks 64 cudaDev 2 busId 85000 - Init COMPLETE +gpua006:855218:855287 [0] NCCL INFO NET/OFI Using aws-ofi-nccl 1.6.0 +gpua006:855218:855287 [0] NCCL INFO NET/OFI Selected Provider is cxi (found 2 nics) +gpua006:855218:855287 [0] NCCL INFO Using network AWS Libfabric +gpua006:855218:855287 [0] NCCL INFO Setting affinity for GPU 0 to ffff0000,00000000 +gpua006:855218:855287 [0] NCCL INFO NCCL_CROSS_NIC set by environment to 1. +gpua007:1756578:1756641 [2] NCCL INFO Connected all rings +gpua007:1756578:1756641 [2] NCCL INFO Channel 00/0 : 6[85000] -> 5[46000] via P2P/IPC/read +gpua007:1756578:1756641 [2] NCCL INFO Channel 01/0 : 6[85000] -> 5[46000] via P2P/IPC/read +gpua007:1756578:1756641 [2] NCCL INFO Connected all trees +gpua007:1756578:1756641 [2] NCCL INFO threadThresholds 8/8/64 | 512/8/64 | 512 | 512 +gpua007:1756578:1756641 [2] NCCL INFO 2 coll channels, 2 p2p channels, 2 p2p channels per peer +gpua007:1756578:1756641 [2] NCCL INFO comm 0x5627ade13200 rank 6 nranks 64 cudaDev 2 busId 85000 - Init COMPLETE +gpua039:3838097:3838172 [3] NCCL INFO Connected all rings +gpua039:3838097:3838172 [3] NCCL INFO Channel 00/0 : 27[c7000] -> 26[85000] via P2P/IPC/read +gpua039:3838097:3838172 [3] NCCL INFO Channel 01/0 : 27[c7000] -> 26[85000] via P2P/IPC/read +gpua039:3838097:3838172 [3] NCCL INFO Connected all trees +gpua039:3838097:3838172 [3] NCCL INFO threadThresholds 8/8/64 | 512/8/64 | 512 | 512 +gpua039:3838097:3838172 [3] NCCL INFO 2 coll channels, 2 p2p channels, 2 p2p channels per peer +gpua039:3838097:3838172 [3] NCCL INFO comm 0x562cf313e4d0 rank 27 nranks 64 cudaDev 3 busId c7000 - Init COMPLETE +gpua049:31609:31609 [1] NCCL INFO cudaDriverVersion 12020 +gpua049:31609:31609 [1] NCCL INFO Bootstrap : Using eth1:172.28.23.49<0> +gpua049:31609:31609 [1] NCCL INFO NET/Plugin: Failed to find ncclCollNetPlugin_v6 symbol. +gpua049:31609:31609 [1] NCCL INFO NET/Plugin: Failed to find ncclCollNetPlugin symbol (v4 or v5). +gpua049:31609:31672 [1] NCCL INFO NET/OFI Using aws-ofi-nccl 1.6.0 +gpua049:31609:31672 [1] NCCL INFO NET/OFI Selected Provider is cxi (found 2 nics) +gpua049:31609:31672 [1] NCCL INFO Using network AWS Libfabric +gpua049:31609:31672 [1] NCCL INFO Setting affinity for GPU 1 to ffff,00000000 +gpua049:31609:31672 [1] NCCL INFO NCCL_CROSS_NIC set by environment to 1. +gpua049:31609:31672 [1] NCCL INFO Trees [0] 34/16/-1->33->32 [1] 34/-1/-1->33->32 +gpua049:31609:31672 [1] NCCL INFO Channel 00/0 : 33[46000] -> 34[85000] via P2P/IPC/read +gpua049:31609:31672 [1] NCCL INFO Channel 01/0 : 33[46000] -> 34[85000] via P2P/IPC/read +gpua049:31609:31672 [1] NCCL INFO Connected all rings +gpua006:855218:855287 [0] NCCL INFO Channel 00/02 : 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 +gpua006:855218:855287 [0] NCCL INFO Channel 01/02 : 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 +gpua006:855218:855287 [0] NCCL INFO Trees [0] 1/32/-1->0->-1 [1] 1/-1/-1->0->4 +gpua006:855218:855287 [0] NCCL INFO Channel 00/0 : 63[c7000] -> 0[7000] [receive] via NET/AWS Libfabric/1 +gpua006:855218:855287 [0] NCCL INFO Channel 01/0 : 63[c7000] -> 0[7000] [receive] via NET/AWS Libfabric/1 +gpua006:855218:855287 [0] NCCL INFO Channel 00/0 : 0[7000] -> 1[46000] via P2P/IPC/read +gpua006:855218:855287 [0] NCCL INFO Channel 01/0 : 0[7000] -> 1[46000] via P2P/IPC/read +gpua006:855218:855287 [0] NCCL INFO Connected all rings +gpua006:855218:855287 [0] NCCL INFO Channel 01/0 : 0[7000] -> 4[7000] [send] via NET/AWS Libfabric/1 +gpua006:855218:855287 [0] NCCL INFO Channel 00/0 : 32[7000] -> 0[7000] [receive] via NET/AWS Libfabric/1 +gpua007:1756576:1756576 [0] NCCL INFO cudaDriverVersion 12020 +gpua007:1756576:1756576 [0] NCCL INFO Bootstrap : Using eth1:172.28.23.7<0> +gpua007:1756576:1756576 [0] NCCL INFO NET/Plugin: Failed to find ncclCollNetPlugin_v6 symbol. +gpua007:1756576:1756576 [0] NCCL INFO NET/Plugin: Failed to find ncclCollNetPlugin symbol (v4 or v5). +gpua007:1756576:1756640 [0] NCCL INFO NET/OFI Using aws-ofi-nccl 1.6.0 +gpua007:1756576:1756640 [0] NCCL INFO NET/OFI Selected Provider is cxi (found 2 nics) +gpua007:1756576:1756640 [0] NCCL INFO Using network AWS Libfabric +gpua007:1756576:1756640 [0] NCCL INFO Setting affinity for GPU 0 to ffff0000,00000000 +gpua007:1756576:1756640 [0] NCCL INFO NCCL_CROSS_NIC set by environment to 1. +gpua007:1756576:1756640 [0] NCCL INFO Trees [0] 5/-1/-1->4->9 [1] 5/0/-1->4->12 +gpua007:1756576:1756640 [0] NCCL INFO Channel 00/0 : 3[c7000] -> 4[7000] [receive] via NET/AWS Libfabric/1 +gpua007:1756576:1756640 [0] NCCL INFO Channel 01/0 : 3[c7000] -> 4[7000] [receive] via NET/AWS Libfabric/1 +gpua049:31609:31672 [1] NCCL INFO Channel 00/0 : 16[7000] -> 33[46000] [receive] via NET/AWS Libfabric/1 +gpua049:31609:31672 [1] NCCL INFO Channel 00/0 : 33[46000] -> 16[7000] [send] via NET/AWS Libfabric/1 +gpua049:31609:31672 [1] NCCL INFO Channel 00/0 : 33[46000] -> 32[7000] via P2P/IPC/read +gpua049:31609:31672 [1] NCCL INFO Channel 01/0 : 33[46000] -> 32[7000] via P2P/IPC/read +gpua049:31609:31672 [1] NCCL INFO Connected all trees +gpua049:31609:31672 [1] NCCL INFO threadThresholds 8/8/64 | 512/8/64 | 512 | 512 +gpua049:31609:31672 [1] NCCL INFO 2 coll channels, 2 p2p channels, 2 p2p channels per peer +gpua049:31609:31672 [1] NCCL INFO comm 0x55acbe8e4e40 rank 33 nranks 64 cudaDev 1 busId 46000 - Init COMPLETE +gpua038:354887:354887 [0] NCCL INFO cudaDriverVersion 12020 +gpua038:354887:354887 [0] NCCL INFO Bootstrap : Using eth1:172.28.23.38<0> +gpua038:354887:354887 [0] NCCL INFO NET/Plugin: Failed to find ncclCollNetPlugin_v6 symbol. +gpua038:354887:354887 [0] NCCL INFO NET/Plugin: Failed to find ncclCollNetPlugin symbol (v4 or v5). +gpua038:354887:354957 [0] NCCL INFO NET/OFI Using aws-ofi-nccl 1.6.0 +gpua038:354887:354957 [0] NCCL INFO NET/OFI Selected Provider is cxi (found 2 nics) +gpua038:354887:354957 [0] NCCL INFO Using network AWS Libfabric +gpua038:354887:354957 [0] NCCL INFO Setting affinity for GPU 0 to ffff0000,00000000 +gpua038:354887:354957 [0] NCCL INFO NCCL_CROSS_NIC set by environment to 1. +gpua038:354887:354957 [0] NCCL INFO Trees [0] 21/-1/-1->20->25 [1] 21/16/-1->20->13 +gpua038:354887:354957 [0] NCCL INFO Channel 00/0 : 19[c7000] -> 20[7000] [receive] via NET/AWS Libfabric/1 +gpua038:354887:354957 [0] NCCL INFO Channel 01/0 : 19[c7000] -> 20[7000] [receive] via NET/AWS Libfabric/1 +gpua080:3446599:3446599 [0] NCCL INFO cudaDriverVersion 12020 +gpua080:3446599:3446599 [0] NCCL INFO Bootstrap : Using eth1:172.28.23.80<0> +gpua080:3446599:3446599 [0] NCCL INFO NET/Plugin: Failed to find ncclCollNetPlugin_v6 symbol. +gpua080:3446599:3446599 [0] NCCL INFO NET/Plugin: Failed to find ncclCollNetPlugin symbol (v4 or v5). +gpua080:3446599:3446679 [0] NCCL INFO NET/OFI Using aws-ofi-nccl 1.6.0 +gpua080:3446599:3446679 [0] NCCL INFO NET/OFI Selected Provider is cxi (found 2 nics) +gpua080:3446599:3446679 [0] NCCL INFO Using network AWS Libfabric +gpua080:3446599:3446679 [0] NCCL INFO Setting affinity for GPU 0 to ffff0000,00000000 +gpua080:3446599:3446679 [0] NCCL INFO NCCL_CROSS_NIC set by environment to 1. +gpua080:3446599:3446679 [0] NCCL INFO Trees [0] 53/-1/-1->52->57 [1] 53/48/-1->52->45 +gpua080:3446599:3446679 [0] NCCL INFO Channel 00/0 : 51[c7000] -> 52[7000] [receive] via NET/AWS Libfabric/1 +gpua006:855218:855287 [0] NCCL INFO Channel 00/0 : 0[7000] -> 32[7000] [send] via NET/AWS Libfabric/1 +gpua006:855218:855287 [0] NCCL INFO Channel 01/0 : 4[7000] -> 0[7000] [receive] via NET/AWS Libfabric/1 +gpua006:855218:855287 [0] NCCL INFO Connected all trees +gpua006:855218:855287 [0] NCCL INFO threadThresholds 8/8/64 | 512/8/64 | 512 | 512 +gpua006:855218:855287 [0] NCCL INFO 2 coll channels, 2 p2p channels, 2 p2p channels per peer +gpua006:855218:855287 [0] NCCL INFO comm 0x55b42e53c890 rank 0 nranks 64 cudaDev 0 busId 7000 - Init COMPLETE +gpua007:1756576:1756640 [0] NCCL INFO Channel 00/0 : 4[7000] -> 5[46000] via P2P/IPC/read +gpua007:1756576:1756640 [0] NCCL INFO Channel 01/0 : 4[7000] -> 5[46000] via P2P/IPC/read +gpua007:1756576:1756640 [0] NCCL INFO Connected all rings +gpua007:1756576:1756640 [0] NCCL INFO Channel 01/0 : 0[7000] -> 4[7000] [receive] via NET/AWS Libfabric/1 +gpua007:1756576:1756640 [0] NCCL INFO Channel 00/0 : 4[7000] -> 9[46000] [send] via NET/AWS Libfabric/1 +gpua007:1756576:1756640 [0] NCCL INFO Channel 01/0 : 4[7000] -> 12[7000] [send] via NET/AWS Libfabric/1 +gpua007:1756576:1756640 [0] NCCL INFO Channel 01/0 : 12[7000] -> 4[7000] [receive] via NET/AWS Libfabric/1 +gpua007:1756576:1756640 [0] NCCL INFO Channel 00/0 : 9[46000] -> 4[7000] [receive] via NET/AWS Libfabric/1 +gpua007:1756576:1756640 [0] NCCL INFO Channel 01/0 : 4[7000] -> 0[7000] [send] via NET/AWS Libfabric/1 +gpua007:1756576:1756640 [0] NCCL INFO Connected all trees +gpua007:1756576:1756640 [0] NCCL INFO threadThresholds 8/8/64 | 512/8/64 | 512 | 512 +gpua049:31608:31608 [0] NCCL INFO cudaDriverVersion 12020 +gpua049:31608:31608 [0] NCCL INFO Bootstrap : Using eth1:172.28.23.49<0> +gpua049:31608:31608 [0] NCCL INFO NET/Plugin: Failed to find ncclCollNetPlugin_v6 symbol. +gpua049:31608:31608 [0] NCCL INFO NET/Plugin: Failed to find ncclCollNetPlugin symbol (v4 or v5). +gpua049:31608:31669 [0] NCCL INFO NET/OFI Using aws-ofi-nccl 1.6.0 +gpua049:31608:31669 [0] NCCL INFO NET/OFI Selected Provider is cxi (found 2 nics) +gpua049:31608:31669 [0] NCCL INFO Using network AWS Libfabric +gpua049:31608:31669 [0] NCCL INFO Setting affinity for GPU 0 to ffff0000,00000000 +gpua049:31608:31669 [0] NCCL INFO NCCL_CROSS_NIC set by environment to 1. +gpua049:31608:31669 [0] NCCL INFO Trees [0] 33/48/-1->32->0 [1] 33/-1/-1->32->36 +gpua049:31608:31669 [0] NCCL INFO Channel 00/0 : 31[c7000] -> 32[7000] [receive] via NET/AWS Libfabric/1 +gpua049:31608:31669 [0] NCCL INFO Channel 01/0 : 31[c7000] -> 32[7000] [receive] via NET/AWS Libfabric/1 +gpua038:354887:354957 [0] NCCL INFO Channel 00/0 : 20[7000] -> 21[46000] via P2P/IPC/read +gpua038:354887:354957 [0] NCCL INFO Channel 01/0 : 20[7000] -> 21[46000] via P2P/IPC/read +gpua038:354887:354957 [0] NCCL INFO Connected all rings +gpua038:354887:354957 [0] NCCL INFO Channel 01/0 : 16[7000] -> 20[7000] [receive] via NET/AWS Libfabric/1 +gpua038:354887:354957 [0] NCCL INFO Channel 00/0 : 20[7000] -> 25[46000] [send] via NET/AWS Libfabric/1 +gpua038:354887:354957 [0] NCCL INFO Channel 01/0 : 13[46000] -> 20[7000] [receive] via NET/AWS Libfabric/1 +gpua038:354887:354957 [0] NCCL INFO Channel 01/0 : 20[7000] -> 13[46000] [send] via NET/AWS Libfabric/1 +gpua038:354887:354957 [0] NCCL INFO Channel 00/0 : 25[46000] -> 20[7000] [receive] via NET/AWS Libfabric/1 +gpua038:354887:354957 [0] NCCL INFO Channel 01/0 : 20[7000] -> 16[7000] [send] via NET/AWS Libfabric/1 +gpua038:354887:354957 [0] NCCL INFO Connected all trees +gpua038:354887:354957 [0] NCCL INFO threadThresholds 8/8/64 | 512/8/64 | 512 | 512 +gpua040:4035264:4035264 [3] NCCL INFO cudaDriverVersion 12020 +gpua040:4035264:4035264 [3] NCCL INFO Bootstrap : Using eth1:172.28.23.40<0> +gpua040:4035264:4035264 [3] NCCL INFO NET/Plugin: Failed to find ncclCollNetPlugin_v6 symbol. +gpua040:4035264:4035264 [3] NCCL INFO NET/Plugin: Failed to find ncclCollNetPlugin symbol (v4 or v5). +gpua040:4035264:4035325 [3] NCCL INFO NET/OFI Using aws-ofi-nccl 1.6.0 +gpua040:4035264:4035325 [3] NCCL INFO NET/OFI Selected Provider is cxi (found 2 nics) +gpua040:4035264:4035325 [3] NCCL INFO Using network AWS Libfabric +gpua040:4035264:4035325 [3] NCCL INFO Setting affinity for GPU 3 to ffff +gpua040:4035264:4035325 [3] NCCL INFO NCCL_CROSS_NIC set by environment to 1. +gpua040:4035264:4035325 [3] NCCL INFO Trees [0] -1/-1/-1->31->30 [1] -1/-1/-1->31->30 +gpua040:4035264:4035325 [3] NCCL INFO Channel 00/0 : 31[c7000] -> 32[7000] [send] via NET/AWS Libfabric/1 +gpua040:4035264:4035325 [3] NCCL INFO Channel 01/0 : 31[c7000] -> 32[7000] [send] via NET/AWS Libfabric/1 +gpua080:3446599:3446679 [0] NCCL INFO Channel 01/0 : 51[c7000] -> 52[7000] [receive] via NET/AWS Libfabric/1 +gpua080:3446599:3446679 [0] NCCL INFO Channel 00/0 : 52[7000] -> 53[46000] via P2P/IPC/read +gpua080:3446599:3446679 [0] NCCL INFO Channel 01/0 : 52[7000] -> 53[46000] via P2P/IPC/read +gpua080:3446599:3446679 [0] NCCL INFO Connected all rings +gpua080:3446599:3446679 [0] NCCL INFO Channel 01/0 : 48[7000] -> 52[7000] [receive] via NET/AWS Libfabric/1 +gpua080:3446599:3446679 [0] NCCL INFO Channel 00/0 : 52[7000] -> 57[46000] [send] via NET/AWS Libfabric/1 +gpua080:3446599:3446679 [0] NCCL INFO Channel 01/0 : 45[46000] -> 52[7000] [receive] via NET/AWS Libfabric/1 +gpua080:3446599:3446679 [0] NCCL INFO Channel 01/0 : 52[7000] -> 45[46000] [send] via NET/AWS Libfabric/1 +gpua080:3446599:3446679 [0] NCCL INFO Channel 00/0 : 57[46000] -> 52[7000] [receive] via NET/AWS Libfabric/1 +gpua080:3446599:3446679 [0] NCCL INFO Channel 01/0 : 52[7000] -> 48[7000] [send] via NET/AWS Libfabric/1 +gpua007:1756576:1756640 [0] NCCL INFO 2 coll channels, 2 p2p channels, 2 p2p channels per peer +gpua007:1756576:1756640 [0] NCCL INFO comm 0x5594e86269d0 rank 4 nranks 64 cudaDev 0 busId 7000 - Init COMPLETE +gpua033:1821569:1821569 [1] NCCL INFO cudaDriverVersion 12020 +gpua033:1821569:1821569 [1] NCCL INFO Bootstrap : Using eth1:172.28.23.33<0> +gpua033:1821569:1821569 [1] NCCL INFO NET/Plugin: Failed to find ncclCollNetPlugin_v6 symbol. +gpua033:1821569:1821569 [1] NCCL INFO NET/Plugin: Failed to find ncclCollNetPlugin symbol (v4 or v5). +gpua033:1821569:1821651 [1] NCCL INFO NET/OFI Using aws-ofi-nccl 1.6.0 +gpua033:1821569:1821651 [1] NCCL INFO NET/OFI Selected Provider is cxi (found 2 nics) +gpua033:1821569:1821651 [1] NCCL INFO Using network AWS Libfabric +gpua033:1821569:1821651 [1] NCCL INFO Setting affinity for GPU 1 to ffff,00000000 +gpua033:1821569:1821651 [1] NCCL INFO NCCL_CROSS_NIC set by environment to 1. +gpua033:1821569:1821651 [1] NCCL INFO Trees [0] 18/8/-1->17->16 [1] 18/-1/-1->17->16 +gpua033:1821569:1821651 [1] NCCL INFO Channel 00/0 : 17[46000] -> 18[85000] via P2P/IPC/read +gpua033:1821569:1821651 [1] NCCL INFO Channel 01/0 : 17[46000] -> 18[85000] via P2P/IPC/read +gpua049:31608:31669 [0] NCCL INFO Channel 00/0 : 32[7000] -> 33[46000] via P2P/IPC/read +gpua049:31608:31669 [0] NCCL INFO Channel 01/0 : 32[7000] -> 33[46000] via P2P/IPC/read +gpua049:31608:31669 [0] NCCL INFO Connected all rings +gpua049:31608:31669 [0] NCCL INFO Channel 01/0 : 32[7000] -> 36[7000] [send] via NET/AWS Libfabric/1 +gpua049:31608:31669 [0] NCCL INFO Channel 00/0 : 32[7000] -> 48[7000] [send] via NET/AWS Libfabric/1 +gpua049:31608:31669 [0] NCCL INFO Channel 00/0 : 0[7000] -> 32[7000] [receive] via NET/AWS Libfabric/1 +gpua049:31608:31669 [0] NCCL INFO Channel 00/0 : 32[7000] -> 0[7000] [send] via NET/AWS Libfabric/1 +gpua049:31608:31669 [0] NCCL INFO Channel 00/0 : 48[7000] -> 32[7000] [receive] via NET/AWS Libfabric/1 +gpua049:31608:31669 [0] NCCL INFO Channel 01/0 : 36[7000] -> 32[7000] [receive] via NET/AWS Libfabric/1 +gpua049:31608:31669 [0] NCCL INFO Connected all trees +gpua049:31608:31669 [0] NCCL INFO threadThresholds 8/8/64 | 512/8/64 | 512 | 512 +gpua038:354887:354957 [0] NCCL INFO 2 coll channels, 2 p2p channels, 2 p2p channels per peer +gpua038:354887:354957 [0] NCCL INFO comm 0x563fe6acd530 rank 20 nranks 64 cudaDev 0 busId 7000 - Init COMPLETE +gpua040:4035264:4035325 [3] NCCL INFO Connected all rings +gpua040:4035264:4035325 [3] NCCL INFO Channel 00/0 : 31[c7000] -> 30[85000] via P2P/IPC/read +gpua040:4035264:4035325 [3] NCCL INFO Channel 01/0 : 31[c7000] -> 30[85000] via P2P/IPC/read +gpua040:4035264:4035325 [3] NCCL INFO Connected all trees +gpua040:4035264:4035325 [3] NCCL INFO threadThresholds 8/8/64 | 512/8/64 | 512 | 512 +gpua040:4035264:4035325 [3] NCCL INFO 2 coll channels, 2 p2p channels, 2 p2p channels per peer +gpua040:4035264:4035325 [3] NCCL INFO comm 0x560a47c70a80 rank 31 nranks 64 cudaDev 3 busId c7000 - Init COMPLETE +gpua040:4035262:4035262 [1] NCCL INFO cudaDriverVersion 12020 +gpua040:4035262:4035262 [1] NCCL INFO Bootstrap : Using eth1:172.28.23.40<0> +gpua040:4035262:4035262 [1] NCCL INFO NET/Plugin: Failed to find ncclCollNetPlugin_v6 symbol. +gpua040:4035262:4035262 [1] NCCL INFO NET/Plugin: Failed to find ncclCollNetPlugin symbol (v4 or v5). +gpua040:4035262:4035323 [1] NCCL INFO NET/OFI Using aws-ofi-nccl 1.6.0 +gpua089:1026829:1026829 [1] NCCL INFO cudaDriverVersion 12020 +gpua089:1026829:1026829 [1] NCCL INFO Bootstrap : Using eth1:172.28.23.89<0> +gpua089:1026829:1026829 [1] NCCL INFO NET/Plugin: Failed to find ncclCollNetPlugin_v6 symbol. +gpua089:1026829:1026829 [1] NCCL INFO NET/Plugin: Failed to find ncclCollNetPlugin symbol (v4 or v5). +gpua089:1026829:1026913 [1] NCCL INFO NET/OFI Using aws-ofi-nccl 1.6.0 +gpua089:1026829:1026913 [1] NCCL INFO NET/OFI Selected Provider is cxi (found 2 nics) +gpua089:1026829:1026913 [1] NCCL INFO Using network AWS Libfabric +gpua089:1026829:1026913 [1] NCCL INFO Setting affinity for GPU 1 to ffff,00000000 +gpua089:1026829:1026913 [1] NCCL INFO NCCL_CROSS_NIC set by environment to 1. +gpua089:1026829:1026913 [1] NCCL INFO Trees [0] 62/-1/-1->61->60 [1] 62/-1/-1->61->60 +gpua089:1026829:1026913 [1] NCCL INFO Channel 00/0 : 61[46000] -> 62[85000] via P2P/IPC/read +gpua089:1026829:1026913 [1] NCCL INFO Channel 01/0 : 61[46000] -> 62[85000] via P2P/IPC/read +gpua080:3446599:3446679 [0] NCCL INFO Connected all trees +gpua080:3446599:3446679 [0] NCCL INFO threadThresholds 8/8/64 | 512/8/64 | 512 | 512 +gpua080:3446599:3446679 [0] NCCL INFO 2 coll channels, 2 p2p channels, 2 p2p channels per peer +gpua080:3446599:3446679 [0] NCCL INFO comm 0x55b5a9881140 rank 52 nranks 64 cudaDev 0 busId 7000 - Init COMPLETE +gpua033:1821569:1821651 [1] NCCL INFO Connected all rings +gpua033:1821569:1821651 [1] NCCL INFO Channel 00/0 : 8[7000] -> 17[46000] [receive] via NET/AWS Libfabric/1 +gpua033:1821569:1821651 [1] NCCL INFO Channel 00/0 : 17[46000] -> 8[7000] [send] via NET/AWS Libfabric/1 +gpua033:1821569:1821651 [1] NCCL INFO Channel 00/0 : 17[46000] -> 16[7000] via P2P/IPC/read +gpua033:1821569:1821651 [1] NCCL INFO Channel 01/0 : 17[46000] -> 16[7000] via P2P/IPC/read +gpua033:1821569:1821651 [1] NCCL INFO Connected all trees +gpua033:1821569:1821651 [1] NCCL INFO threadThresholds 8/8/64 | 512/8/64 | 512 | 512 +gpua033:1821569:1821651 [1] NCCL INFO 2 coll channels, 2 p2p channels, 2 p2p channels per peer +gpua033:1821569:1821651 [1] NCCL INFO comm 0x55c015319e80 rank 17 nranks 64 cudaDev 1 busId 46000 - Init COMPLETE +gpua049:31608:31669 [0] NCCL INFO 2 coll channels, 2 p2p channels, 2 p2p channels per peer +gpua049:31608:31669 [0] NCCL INFO comm 0x5605626cb320 rank 32 nranks 64 cudaDev 0 busId 7000 - Init COMPLETE +gpua040:4035262:4035323 [1] NCCL INFO NET/OFI Selected Provider is cxi (found 2 nics) +gpua040:4035262:4035323 [1] NCCL INFO Using network AWS Libfabric +gpua040:4035262:4035323 [1] NCCL INFO Setting affinity for GPU 1 to ffff,00000000 +gpua040:4035262:4035323 [1] NCCL INFO NCCL_CROSS_NIC set by environment to 1. +gpua040:4035262:4035323 [1] NCCL INFO Trees [0] 30/-1/-1->29->28 [1] 30/44/-1->29->28 +gpua040:4035262:4035323 [1] NCCL INFO Channel 00/0 : 29[46000] -> 30[85000] via P2P/IPC/read +gpua040:4035262:4035323 [1] NCCL INFO Channel 01/0 : 29[46000] -> 30[85000] via P2P/IPC/read +gpua040:4035262:4035323 [1] NCCL INFO Connected all rings +gpua040:4035262:4035323 [1] NCCL INFO Channel 01/0 : 29[46000] -> 44[7000] [send] via NET/AWS Libfabric/1 +gpua040:4035262:4035323 [1] NCCL INFO Channel 01/0 : 44[7000] -> 29[46000] [receive] via NET/AWS Libfabric/1 +gpua040:4035262:4035323 [1] NCCL INFO Channel 00/0 : 29[46000] -> 28[7000] via P2P/IPC/read +gpua089:1026829:1026913 [1] NCCL INFO Connected all rings +gpua089:1026829:1026913 [1] NCCL INFO Channel 00/0 : 61[46000] -> 60[7000] via P2P/IPC/read +gpua089:1026829:1026913 [1] NCCL INFO Channel 01/0 : 61[46000] -> 60[7000] via P2P/IPC/read +gpua089:1026829:1026913 [1] NCCL INFO Connected all trees +gpua089:1026829:1026913 [1] NCCL INFO threadThresholds 8/8/64 | 512/8/64 | 512 | 512 +gpua089:1026829:1026913 [1] NCCL INFO 2 coll channels, 2 p2p channels, 2 p2p channels per peer +gpua089:1026829:1026913 [1] NCCL INFO comm 0x559cc36d42a0 rank 61 nranks 64 cudaDev 1 busId 46000 - Init COMPLETE +gpua080:3446601:3446601 [2] NCCL INFO cudaDriverVersion 12020 +gpua080:3446601:3446601 [2] NCCL INFO Bootstrap : Using eth1:172.28.23.80<0> +gpua080:3446601:3446601 [2] NCCL INFO NET/Plugin: Failed to find ncclCollNetPlugin_v6 symbol. +gpua080:3446601:3446601 [2] NCCL INFO NET/Plugin: Failed to find ncclCollNetPlugin symbol (v4 or v5). +gpua080:3446601:3446678 [2] NCCL INFO NET/OFI Using aws-ofi-nccl 1.6.0 +gpua080:3446601:3446678 [2] NCCL INFO NET/OFI Selected Provider is cxi (found 2 nics) +gpua080:3446601:3446678 [2] NCCL INFO Using network AWS Libfabric +gpua080:3446601:3446678 [2] NCCL INFO Setting affinity for GPU 2 to ffff0000 +gpua080:3446601:3446678 [2] NCCL INFO NCCL_CROSS_NIC set by environment to 1. +gpua080:3446601:3446678 [2] NCCL INFO Trees [0] 55/-1/-1->54->53 [1] 55/-1/-1->54->53 +gpua080:3446601:3446678 [2] NCCL INFO Channel 00/0 : 54[85000] -> 55[c7000] via P2P/IPC/read +gpua080:3446601:3446678 [2] NCCL INFO Channel 01/0 : 54[85000] -> 55[c7000] via P2P/IPC/read +gpua012:2904258:2904258 [0] NCCL INFO cudaDriverVersion 12020 +gpua012:2904258:2904258 [0] NCCL INFO Bootstrap : Using eth1:172.28.23.12<0> +gpua012:2904258:2904258 [0] NCCL INFO NET/Plugin: Failed to find ncclCollNetPlugin_v6 symbol. +gpua012:2904258:2904258 [0] NCCL INFO NET/Plugin: Failed to find ncclCollNetPlugin symbol (v4 or v5). +gpua012:2904258:2904332 [0] NCCL INFO NET/OFI Using aws-ofi-nccl 1.6.0 +gpua012:2904258:2904332 [0] NCCL INFO NET/OFI Selected Provider is cxi (found 2 nics) +gpua012:2904258:2904332 [0] NCCL INFO Using network AWS Libfabric +gpua012:2904258:2904332 [0] NCCL INFO Setting affinity for GPU 0 to ffff0000,00000000 +gpua012:2904258:2904332 [0] NCCL INFO NCCL_CROSS_NIC set by environment to 1. +gpua012:2904258:2904332 [0] NCCL INFO Trees [0] 9/12/-1->8->17 [1] 9/-1/-1->8->5 +gpua012:2904258:2904332 [0] NCCL INFO Channel 00/0 : 7[c7000] -> 8[7000] [receive] via NET/AWS Libfabric/1 +gpua012:2904258:2904332 [0] NCCL INFO Channel 01/0 : 7[c7000] -> 8[7000] [receive] via NET/AWS Libfabric/1 +gpua033:1821570:1821570 [2] NCCL INFO cudaDriverVersion 12020 +gpua033:1821570:1821570 [2] NCCL INFO Bootstrap : Using eth1:172.28.23.33<0> +gpua033:1821570:1821570 [2] NCCL INFO NET/Plugin: Failed to find ncclCollNetPlugin_v6 symbol. +gpua033:1821570:1821570 [2] NCCL INFO NET/Plugin: Failed to find ncclCollNetPlugin symbol (v4 or v5). +gpua033:1821570:1821652 [2] NCCL INFO NET/OFI Using aws-ofi-nccl 1.6.0 +gpua033:1821570:1821652 [2] NCCL INFO NET/OFI Selected Provider is cxi (found 2 nics) +gpua033:1821570:1821652 [2] NCCL INFO Using network AWS Libfabric +gpua033:1821570:1821652 [2] NCCL INFO Setting affinity for GPU 2 to ffff0000 +gpua033:1821570:1821652 [2] NCCL INFO NCCL_CROSS_NIC set by environment to 1. +gpua033:1821570:1821652 [2] NCCL INFO Trees [0] 19/-1/-1->18->17 [1] 19/-1/-1->18->17 +gpua033:1821570:1821652 [2] NCCL INFO Channel 00/0 : 18[85000] -> 19[c7000] via P2P/IPC/read +gpua033:1821570:1821652 [2] NCCL INFO Channel 01/0 : 18[85000] -> 19[c7000] via P2P/IPC/read +gpua049:31611:31611 [3] NCCL INFO cudaDriverVersion 12020 +gpua049:31611:31611 [3] NCCL INFO Bootstrap : Using eth1:172.28.23.49<0> +gpua049:31611:31611 [3] NCCL INFO NET/Plugin: Failed to find ncclCollNetPlugin_v6 symbol. +gpua049:31611:31611 [3] NCCL INFO NET/Plugin: Failed to find ncclCollNetPlugin symbol (v4 or v5). +gpua049:31611:31671 [3] NCCL INFO NET/OFI Using aws-ofi-nccl 1.6.0 +gpua049:31611:31671 [3] NCCL INFO NET/OFI Selected Provider is cxi (found 2 nics) +gpua049:31611:31671 [3] NCCL INFO Using network AWS Libfabric +gpua049:31611:31671 [3] NCCL INFO Setting affinity for GPU 3 to ffff +gpua049:31611:31671 [3] NCCL INFO NCCL_CROSS_NIC set by environment to 1. +gpua049:31611:31671 [3] NCCL INFO Trees [0] -1/-1/-1->35->34 [1] -1/-1/-1->35->34 +gpua049:31611:31671 [3] NCCL INFO Channel 00/0 : 35[c7000] -> 36[7000] [send] via NET/AWS Libfabric/1 +gpua049:31611:31671 [3] NCCL INFO Channel 01/0 : 35[c7000] -> 36[7000] [send] via NET/AWS Libfabric/1 +gpua049:31611:31671 [3] NCCL INFO Connected all rings +gpua040:4035262:4035323 [1] NCCL INFO Channel 01/0 : 29[46000] -> 28[7000] via P2P/IPC/read +gpua040:4035262:4035323 [1] NCCL INFO Connected all trees +gpua040:4035262:4035323 [1] NCCL INFO threadThresholds 8/8/64 | 512/8/64 | 512 | 512 +gpua040:4035262:4035323 [1] NCCL INFO 2 coll channels, 2 p2p channels, 2 p2p channels per peer +gpua040:4035262:4035323 [1] NCCL INFO comm 0x55ea1a0f5d50 rank 29 nranks 64 cudaDev 1 busId 46000 - Init COMPLETE +gpua085:4194190:4194190 [0] NCCL INFO cudaDriverVersion 12020 +gpua085:4194190:4194190 [0] NCCL INFO Bootstrap : Using eth1:172.28.23.85<0> +gpua085:4194190:4194190 [0] NCCL INFO NET/Plugin: Failed to find ncclCollNetPlugin_v6 symbol. +gpua085:4194190:4194190 [0] NCCL INFO NET/Plugin: Failed to find ncclCollNetPlugin symbol (v4 or v5). +gpua085:4194190:4194266 [0] NCCL INFO NET/OFI Using aws-ofi-nccl 1.6.0 +gpua085:4194190:4194266 [0] NCCL INFO NET/OFI Selected Provider is cxi (found 2 nics) +gpua085:4194190:4194266 [0] NCCL INFO Using network AWS Libfabric +gpua085:4194190:4194266 [0] NCCL INFO Setting affinity for GPU 0 to ffff0000,00000000 +gpua085:4194190:4194266 [0] NCCL INFO NCCL_CROSS_NIC set by environment to 1. +gpua085:4194190:4194266 [0] NCCL INFO Trees [0] 57/60/-1->56->48 [1] 57/-1/-1->56->53 +gpua085:4194190:4194266 [0] NCCL INFO Channel 00/0 : 55[c7000] -> 56[7000] [receive] via NET/AWS Libfabric/1 +gpua080:3446601:3446678 [2] NCCL INFO Connected all rings +gpua080:3446601:3446678 [2] NCCL INFO Channel 00/0 : 54[85000] -> 53[46000] via P2P/IPC/read +gpua080:3446601:3446678 [2] NCCL INFO Channel 01/0 : 54[85000] -> 53[46000] via P2P/IPC/read +gpua080:3446601:3446678 [2] NCCL INFO Connected all trees +gpua080:3446601:3446678 [2] NCCL INFO threadThresholds 8/8/64 | 512/8/64 | 512 | 512 +gpua080:3446601:3446678 [2] NCCL INFO 2 coll channels, 2 p2p channels, 2 p2p channels per peer +gpua080:3446601:3446678 [2] NCCL INFO comm 0x55e58701dc70 rank 54 nranks 64 cudaDev 2 busId 85000 - Init COMPLETE +gpua012:2904258:2904332 [0] NCCL INFO Channel 00/0 : 8[7000] -> 9[46000] via P2P/IPC/read +gpua012:2904258:2904332 [0] NCCL INFO Channel 01/0 : 8[7000] -> 9[46000] via P2P/IPC/read +gpua012:2904258:2904332 [0] NCCL INFO Connected all rings +gpua012:2904258:2904332 [0] NCCL INFO Channel 01/0 : 5[46000] -> 8[7000] [receive] via NET/AWS Libfabric/1 +gpua012:2904258:2904332 [0] NCCL INFO Channel 00/0 : 8[7000] -> 12[7000] [send] via NET/AWS Libfabric/1 +gpua012:2904258:2904332 [0] NCCL INFO Channel 00/0 : 8[7000] -> 17[46000] [send] via NET/AWS Libfabric/1 +gpua012:2904258:2904332 [0] NCCL INFO Channel 00/0 : 17[46000] -> 8[7000] [receive] via NET/AWS Libfabric/1 +gpua012:2904258:2904332 [0] NCCL INFO Channel 00/0 : 12[7000] -> 8[7000] [receive] via NET/AWS Libfabric/1 +gpua012:2904258:2904332 [0] NCCL INFO Channel 01/0 : 8[7000] -> 5[46000] [send] via NET/AWS Libfabric/1 +gpua012:2904258:2904332 [0] NCCL INFO Connected all trees +gpua012:2904258:2904332 [0] NCCL INFO threadThresholds 8/8/64 | 512/8/64 | 512 | 512 +gpua033:1821570:1821652 [2] NCCL INFO Connected all rings +gpua033:1821570:1821652 [2] NCCL INFO Channel 00/0 : 18[85000] -> 17[46000] via P2P/IPC/read +gpua033:1821570:1821652 [2] NCCL INFO Channel 01/0 : 18[85000] -> 17[46000] via P2P/IPC/read +gpua033:1821570:1821652 [2] NCCL INFO Connected all trees +gpua033:1821570:1821652 [2] NCCL INFO threadThresholds 8/8/64 | 512/8/64 | 512 | 512 +gpua033:1821570:1821652 [2] NCCL INFO 2 coll channels, 2 p2p channels, 2 p2p channels per peer +gpua033:1821570:1821652 [2] NCCL INFO comm 0x55c455cb2470 rank 18 nranks 64 cudaDev 2 busId 85000 - Init COMPLETE +gpua039:3838096:3838096 [2] NCCL INFO cudaDriverVersion 12020 +gpua039:3838096:3838096 [2] NCCL INFO Bootstrap : Using eth1:172.28.23.39<0> +gpua039:3838096:3838096 [2] NCCL INFO NET/Plugin: Failed to find ncclCollNetPlugin_v6 symbol. +gpua039:3838096:3838096 [2] NCCL INFO NET/Plugin: Failed to find ncclCollNetPlugin symbol (v4 or v5). +gpua039:3838096:3838171 [2] NCCL INFO NET/OFI Using aws-ofi-nccl 1.6.0 +gpua039:3838096:3838171 [2] NCCL INFO NET/OFI Selected Provider is cxi (found 2 nics) +gpua039:3838096:3838171 [2] NCCL INFO Using network AWS Libfabric +gpua039:3838096:3838171 [2] NCCL INFO Setting affinity for GPU 2 to ffff0000 +gpua039:3838096:3838171 [2] NCCL INFO NCCL_CROSS_NIC set by environment to 1. +gpua039:3838096:3838171 [2] NCCL INFO Trees [0] 27/-1/-1->26->25 [1] 27/-1/-1->26->25 +gpua039:3838096:3838171 [2] NCCL INFO Channel 00/0 : 26[85000] -> 27[c7000] via P2P/IPC/read +gpua039:3838096:3838171 [2] NCCL INFO Channel 01/0 : 26[85000] -> 27[c7000] via P2P/IPC/read +gpua049:31611:31671 [3] NCCL INFO Channel 00/0 : 35[c7000] -> 34[85000] via P2P/IPC/read +gpua049:31611:31671 [3] NCCL INFO Channel 01/0 : 35[c7000] -> 34[85000] via P2P/IPC/read +gpua049:31611:31671 [3] NCCL INFO Connected all trees +gpua049:31611:31671 [3] NCCL INFO threadThresholds 8/8/64 | 512/8/64 | 512 | 512 +gpua049:31611:31671 [3] NCCL INFO 2 coll channels, 2 p2p channels, 2 p2p channels per peer +gpua049:31611:31671 [3] NCCL INFO comm 0x5560419e0020 rank 35 nranks 64 cudaDev 3 busId c7000 - Init COMPLETE +gpua040:4035263:4035263 [2] NCCL INFO cudaDriverVersion 12020 +gpua040:4035263:4035263 [2] NCCL INFO Bootstrap : Using eth1:172.28.23.40<0> +gpua040:4035263:4035263 [2] NCCL INFO NET/Plugin: Failed to find ncclCollNetPlugin_v6 symbol. +gpua040:4035263:4035263 [2] NCCL INFO NET/Plugin: Failed to find ncclCollNetPlugin symbol (v4 or v5). +gpua040:4035263:4035326 [2] NCCL INFO NET/OFI Using aws-ofi-nccl 1.6.0 +gpua040:4035263:4035326 [2] NCCL INFO NET/OFI Selected Provider is cxi (found 2 nics) +gpua040:4035263:4035326 [2] NCCL INFO Using network AWS Libfabric +gpua040:4035263:4035326 [2] NCCL INFO Setting affinity for GPU 2 to ffff0000 +gpua040:4035263:4035326 [2] NCCL INFO NCCL_CROSS_NIC set by environment to 1. +gpua040:4035263:4035326 [2] NCCL INFO Trees [0] 31/-1/-1->30->29 [1] 31/-1/-1->30->29 +gpua040:4035263:4035326 [2] NCCL INFO Channel 00/0 : 30[85000] -> 31[c7000] via P2P/IPC/read +gpua040:4035263:4035326 [2] NCCL INFO Channel 01/0 : 30[85000] -> 31[c7000] via P2P/IPC/read +gpua085:4194190:4194266 [0] NCCL INFO Channel 01/0 : 55[c7000] -> 56[7000] [receive] via NET/AWS Libfabric/1 +gpua085:4194190:4194266 [0] NCCL INFO Channel 00/0 : 56[7000] -> 57[46000] via P2P/IPC/read +gpua085:4194190:4194266 [0] NCCL INFO Channel 01/0 : 56[7000] -> 57[46000] via P2P/IPC/read +gpua085:4194190:4194266 [0] NCCL INFO Connected all rings +gpua085:4194190:4194266 [0] NCCL INFO Channel 01/0 : 53[46000] -> 56[7000] [receive] via NET/AWS Libfabric/1 +gpua085:4194190:4194266 [0] NCCL INFO Channel 00/0 : 56[7000] -> 60[7000] [send] via NET/AWS Libfabric/1 +gpua085:4194190:4194266 [0] NCCL INFO Channel 00/0 : 48[7000] -> 56[7000] [receive] via NET/AWS Libfabric/1 +gpua085:4194190:4194266 [0] NCCL INFO Channel 00/0 : 56[7000] -> 48[7000] [send] via NET/AWS Libfabric/1 +gpua085:4194190:4194266 [0] NCCL INFO Channel 00/0 : 60[7000] -> 56[7000] [receive] via NET/AWS Libfabric/1 +gpua085:4194190:4194266 [0] NCCL INFO Channel 01/0 : 56[7000] -> 53[46000] [send] via NET/AWS Libfabric/1 +gpua080:3446600:3446600 [1] NCCL INFO cudaDriverVersion 12020 +gpua080:3446600:3446600 [1] NCCL INFO Bootstrap : Using eth1:172.28.23.80<0> +gpua080:3446600:3446600 [1] NCCL INFO NET/Plugin: Failed to find ncclCollNetPlugin_v6 symbol. +gpua080:3446600:3446600 [1] NCCL INFO NET/Plugin: Failed to find ncclCollNetPlugin symbol (v4 or v5). +gpua080:3446600:3446680 [1] NCCL INFO NET/OFI Using aws-ofi-nccl 1.6.0 +gpua080:3446600:3446680 [1] NCCL INFO NET/OFI Selected Provider is cxi (found 2 nics) +gpua080:3446600:3446680 [1] NCCL INFO Using network AWS Libfabric +gpua080:3446600:3446680 [1] NCCL INFO Setting affinity for GPU 1 to ffff,00000000 +gpua080:3446600:3446680 [1] NCCL INFO NCCL_CROSS_NIC set by environment to 1. +gpua080:3446600:3446680 [1] NCCL INFO Trees [0] 54/-1/-1->53->52 [1] 54/56/-1->53->52 +gpua080:3446600:3446680 [1] NCCL INFO Channel 00/0 : 53[46000] -> 54[85000] via P2P/IPC/read +gpua080:3446600:3446680 [1] NCCL INFO Channel 01/0 : 53[46000] -> 54[85000] via P2P/IPC/read +gpua012:2904258:2904332 [0] NCCL INFO 2 coll channels, 2 p2p channels, 2 p2p channels per peer +gpua012:2904258:2904332 [0] NCCL INFO comm 0x55d1f10c6df0 rank 8 nranks 64 cudaDev 0 busId 7000 - Init COMPLETE +gpua033:1821571:1821571 [3] NCCL INFO cudaDriverVersion 12020 +gpua033:1821571:1821571 [3] NCCL INFO Bootstrap : Using eth1:172.28.23.33<0> +gpua033:1821571:1821571 [3] NCCL INFO NET/Plugin: Failed to find ncclCollNetPlugin_v6 symbol. +gpua033:1821571:1821571 [3] NCCL INFO NET/Plugin: Failed to find ncclCollNetPlugin symbol (v4 or v5). +gpua033:1821571:1821649 [3] NCCL INFO NET/OFI Using aws-ofi-nccl 1.6.0 +gpua033:1821571:1821649 [3] NCCL INFO NET/OFI Selected Provider is cxi (found 2 nics) +gpua033:1821571:1821649 [3] NCCL INFO Using network AWS Libfabric +gpua033:1821571:1821649 [3] NCCL INFO Setting affinity for GPU 3 to ffff +gpua033:1821571:1821649 [3] NCCL INFO NCCL_CROSS_NIC set by environment to 1. +gpua033:1821571:1821649 [3] NCCL INFO Trees [0] -1/-1/-1->19->18 [1] -1/-1/-1->19->18 +gpua033:1821571:1821649 [3] NCCL INFO Channel 00/0 : 19[c7000] -> 20[7000] [send] via NET/AWS Libfabric/1 +gpua033:1821571:1821649 [3] NCCL INFO Channel 01/0 : 19[c7000] -> 20[7000] [send] via NET/AWS Libfabric/1 +gpua039:3838096:3838171 [2] NCCL INFO Connected all rings +gpua039:3838096:3838171 [2] NCCL INFO Channel 00/0 : 26[85000] -> 25[46000] via P2P/IPC/read +gpua039:3838096:3838171 [2] NCCL INFO Channel 01/0 : 26[85000] -> 25[46000] via P2P/IPC/read +gpua039:3838096:3838171 [2] NCCL INFO Connected all trees +gpua039:3838096:3838171 [2] NCCL INFO threadThresholds 8/8/64 | 512/8/64 | 512 | 512 +gpua039:3838096:3838171 [2] NCCL INFO 2 coll channels, 2 p2p channels, 2 p2p channels per peer +gpua039:3838096:3838171 [2] NCCL INFO comm 0x564c5f983ba0 rank 26 nranks 64 cudaDev 2 busId 85000 - Init COMPLETE +gpua040:4035263:4035326 [2] NCCL INFO Connected all rings +gpua040:4035263:4035326 [2] NCCL INFO Channel 00/0 : 30[85000] -> 29[46000] via P2P/IPC/read +gpua040:4035263:4035326 [2] NCCL INFO Channel 01/0 : 30[85000] -> 29[46000] via P2P/IPC/read +gpua040:4035263:4035326 [2] NCCL INFO Connected all trees +gpua040:4035263:4035326 [2] NCCL INFO threadThresholds 8/8/64 | 512/8/64 | 512 | 512 +gpua040:4035263:4035326 [2] NCCL INFO 2 coll channels, 2 p2p channels, 2 p2p channels per peer +gpua040:4035263:4035326 [2] NCCL INFO comm 0x5557bc28e410 rank 30 nranks 64 cudaDev 2 busId 85000 - Init COMPLETE +gpua085:4194190:4194266 [0] NCCL INFO Connected all trees +gpua085:4194190:4194266 [0] NCCL INFO threadThresholds 8/8/64 | 512/8/64 | 512 | 512 +gpua085:4194190:4194266 [0] NCCL INFO 2 coll channels, 2 p2p channels, 2 p2p channels per peer +gpua085:4194190:4194266 [0] NCCL INFO comm 0x55a8634e8730 rank 56 nranks 64 cudaDev 0 busId 7000 - Init COMPLETE +gpua080:3446600:3446680 [1] NCCL INFO Connected all rings +gpua080:3446600:3446680 [1] NCCL INFO Channel 01/0 : 53[46000] -> 56[7000] [send] via NET/AWS Libfabric/1 +gpua080:3446600:3446680 [1] NCCL INFO Channel 01/0 : 56[7000] -> 53[46000] [receive] via NET/AWS Libfabric/1 +gpua080:3446600:3446680 [1] NCCL INFO Channel 00/0 : 53[46000] -> 52[7000] via P2P/IPC/read +gpua080:3446600:3446680 [1] NCCL INFO Channel 01/0 : 53[46000] -> 52[7000] via P2P/IPC/read +gpua080:3446600:3446680 [1] NCCL INFO Connected all trees +gpua080:3446600:3446680 [1] NCCL INFO threadThresholds 8/8/64 | 512/8/64 | 512 | 512 +gpua080:3446600:3446680 [1] NCCL INFO 2 coll channels, 2 p2p channels, 2 p2p channels per peer +gpua080:3446600:3446680 [1] NCCL INFO comm 0x562309404c20 rank 53 nranks 64 cudaDev 1 busId 46000 - Init COMPLETE +gpua012:2904261:2904261 [3] NCCL INFO cudaDriverVersion 12020 +gpua012:2904261:2904261 [3] NCCL INFO Bootstrap : Using eth1:172.28.23.12<0> +gpua012:2904261:2904261 [3] NCCL INFO NET/Plugin: Failed to find ncclCollNetPlugin_v6 symbol. +gpua012:2904261:2904261 [3] NCCL INFO NET/Plugin: Failed to find ncclCollNetPlugin symbol (v4 or v5). +gpua012:2904261:2904333 [3] NCCL INFO NET/OFI Using aws-ofi-nccl 1.6.0 +gpua012:2904261:2904333 [3] NCCL INFO NET/OFI Selected Provider is cxi (found 2 nics) +gpua012:2904261:2904333 [3] NCCL INFO Using network AWS Libfabric +gpua012:2904261:2904333 [3] NCCL INFO Setting affinity for GPU 3 to ffff +gpua012:2904261:2904333 [3] NCCL INFO NCCL_CROSS_NIC set by environment to 1. +gpua012:2904261:2904333 [3] NCCL INFO Trees [0] -1/-1/-1->11->10 [1] -1/-1/-1->11->10 +gpua012:2904261:2904333 [3] NCCL INFO Channel 00/0 : 11[c7000] -> 12[7000] [send] via NET/AWS Libfabric/1 +gpua012:2904261:2904333 [3] NCCL INFO Channel 01/0 : 11[c7000] -> 12[7000] [send] via NET/AWS Libfabric/1 +gpua033:1821571:1821649 [3] NCCL INFO Connected all rings +gpua033:1821571:1821649 [3] NCCL INFO Channel 00/0 : 19[c7000] -> 18[85000] via P2P/IPC/read +gpua033:1821571:1821649 [3] NCCL INFO Channel 01/0 : 19[c7000] -> 18[85000] via P2P/IPC/read +gpua033:1821571:1821649 [3] NCCL INFO Connected all trees +gpua033:1821571:1821649 [3] NCCL INFO threadThresholds 8/8/64 | 512/8/64 | 512 | 512 +gpua033:1821571:1821649 [3] NCCL INFO 2 coll channels, 2 p2p channels, 2 p2p channels per peer +gpua033:1821571:1821649 [3] NCCL INFO comm 0x555dde498d50 rank 19 nranks 64 cudaDev 3 busId c7000 - Init COMPLETE +gpua038:354890:354890 [3] NCCL INFO cudaDriverVersion 12020 +gpua038:354890:354890 [3] NCCL INFO Bootstrap : Using eth1:172.28.23.38<0> +gpua038:354890:354890 [3] NCCL INFO NET/Plugin: Failed to find ncclCollNetPlugin_v6 symbol. +gpua038:354890:354890 [3] NCCL INFO NET/Plugin: Failed to find ncclCollNetPlugin symbol (v4 or v5). +gpua038:354890:354960 [3] NCCL INFO NET/OFI Using aws-ofi-nccl 1.6.0 +gpua038:354890:354960 [3] NCCL INFO NET/OFI Selected Provider is cxi (found 2 nics) +gpua038:354890:354960 [3] NCCL INFO Using network AWS Libfabric +gpua038:354890:354960 [3] NCCL INFO Setting affinity for GPU 3 to ffff +gpua038:354890:354960 [3] NCCL INFO NCCL_CROSS_NIC set by environment to 1. +gpua038:354890:354960 [3] NCCL INFO Trees [0] -1/-1/-1->23->22 [1] -1/-1/-1->23->22 +gpua038:354890:354960 [3] NCCL INFO Channel 00/0 : 23[c7000] -> 24[7000] [send] via NET/AWS Libfabric/1 +gpua038:354890:354960 [3] NCCL INFO Channel 01/0 : 23[c7000] -> 24[7000] [send] via NET/AWS Libfabric/1 +gpua040:4035261:4035261 [0] NCCL INFO cudaDriverVersion 12020 +gpua040:4035261:4035261 [0] NCCL INFO Bootstrap : Using eth1:172.28.23.40<0> +gpua040:4035261:4035261 [0] NCCL INFO NET/Plugin: Failed to find ncclCollNetPlugin_v6 symbol. +gpua040:4035261:4035261 [0] NCCL INFO NET/Plugin: Failed to find ncclCollNetPlugin symbol (v4 or v5). +gpua040:4035261:4035324 [0] NCCL INFO NET/OFI Using aws-ofi-nccl 1.6.0 +gpua040:4035261:4035324 [0] NCCL INFO NET/OFI Selected Provider is cxi (found 2 nics) +gpua040:4035261:4035324 [0] NCCL INFO Using network AWS Libfabric +gpua040:4035261:4035324 [0] NCCL INFO Setting affinity for GPU 0 to ffff0000,00000000 +gpua040:4035261:4035324 [0] NCCL INFO NCCL_CROSS_NIC set by environment to 1. +gpua040:4035261:4035324 [0] NCCL INFO Trees [0] 29/-1/-1->28->24 [1] 29/12/-1->28->60 +gpua040:4035261:4035324 [0] NCCL INFO Channel 00/0 : 27[c7000] -> 28[7000] [receive] via NET/AWS Libfabric/1 +gpua085:4194192:4194192 [2] NCCL INFO cudaDriverVersion 12020 +gpua085:4194192:4194192 [2] NCCL INFO Bootstrap : Using eth1:172.28.23.85<0> +gpua085:4194192:4194192 [2] NCCL INFO NET/Plugin: Failed to find ncclCollNetPlugin_v6 symbol. +gpua085:4194192:4194192 [2] NCCL INFO NET/Plugin: Failed to find ncclCollNetPlugin symbol (v4 or v5). +gpua085:4194192:4194268 [2] NCCL INFO NET/OFI Using aws-ofi-nccl 1.6.0 +gpua085:4194192:4194268 [2] NCCL INFO NET/OFI Selected Provider is cxi (found 2 nics) +gpua085:4194192:4194268 [2] NCCL INFO Using network AWS Libfabric +gpua085:4194192:4194268 [2] NCCL INFO Setting affinity for GPU 2 to ffff0000 +gpua085:4194192:4194268 [2] NCCL INFO NCCL_CROSS_NIC set by environment to 1. +gpua085:4194192:4194268 [2] NCCL INFO Trees [0] 59/-1/-1->58->57 [1] 59/-1/-1->58->57 +gpua085:4194192:4194268 [2] NCCL INFO Channel 00/0 : 58[85000] -> 59[c7000] via P2P/IPC/read +gpua085:4194192:4194268 [2] NCCL INFO Channel 01/0 : 58[85000] -> 59[c7000] via P2P/IPC/read +gpua089:1026831:1026831 [3] NCCL INFO cudaDriverVersion 12020 +gpua089:1026831:1026831 [3] NCCL INFO Bootstrap : Using eth1:172.28.23.89<0> +gpua089:1026831:1026831 [3] NCCL INFO NET/Plugin: Failed to find ncclCollNetPlugin_v6 symbol. +gpua089:1026831:1026831 [3] NCCL INFO NET/Plugin: Failed to find ncclCollNetPlugin symbol (v4 or v5). +gpua089:1026831:1026911 [3] NCCL INFO NET/OFI Using aws-ofi-nccl 1.6.0 +gpua089:1026831:1026911 [3] NCCL INFO NET/OFI Selected Provider is cxi (found 2 nics) +gpua089:1026831:1026911 [3] NCCL INFO Using network AWS Libfabric +gpua089:1026831:1026911 [3] NCCL INFO Setting affinity for GPU 3 to ffff +gpua089:1026831:1026911 [3] NCCL INFO NCCL_CROSS_NIC set by environment to 1. +gpua089:1026831:1026911 [3] NCCL INFO Trees [0] -1/-1/-1->63->62 [1] -1/-1/-1->63->62 +gpua089:1026831:1026911 [3] NCCL INFO Channel 00/0 : 63[c7000] -> 0[7000] [send] via NET/AWS Libfabric/1 +gpua089:1026831:1026911 [3] NCCL INFO Channel 01/0 : 63[c7000] -> 0[7000] [send] via NET/AWS Libfabric/1 +gpua080:3446602:3446602 [3] NCCL INFO cudaDriverVersion 12020 +gpua080:3446602:3446602 [3] NCCL INFO Bootstrap : Using eth1:172.28.23.80<0> +gpua080:3446602:3446602 [3] NCCL INFO NET/Plugin: Failed to find ncclCollNetPlugin_v6 symbol. +gpua080:3446602:3446602 [3] NCCL INFO NET/Plugin: Failed to find ncclCollNetPlugin symbol (v4 or v5). +gpua080:3446602:3446677 [3] NCCL INFO NET/OFI Using aws-ofi-nccl 1.6.0 +gpua080:3446602:3446677 [3] NCCL INFO NET/OFI Selected Provider is cxi (found 2 nics) +gpua080:3446602:3446677 [3] NCCL INFO Using network AWS Libfabric +gpua080:3446602:3446677 [3] NCCL INFO Setting affinity for GPU 3 to ffff +gpua080:3446602:3446677 [3] NCCL INFO NCCL_CROSS_NIC set by environment to 1. +gpua080:3446602:3446677 [3] NCCL INFO Trees [0] -1/-1/-1->55->54 [1] -1/-1/-1->55->54 +gpua080:3446602:3446677 [3] NCCL INFO Channel 00/0 : 55[c7000] -> 56[7000] [send] via NET/AWS Libfabric/1 +gpua080:3446602:3446677 [3] NCCL INFO Channel 01/0 : 55[c7000] -> 56[7000] [send] via NET/AWS Libfabric/1 +gpua012:2904261:2904333 [3] NCCL INFO Connected all rings +gpua012:2904261:2904333 [3] NCCL INFO Channel 00/0 : 11[c7000] -> 10[85000] via P2P/IPC/read +gpua012:2904261:2904333 [3] NCCL INFO Channel 01/0 : 11[c7000] -> 10[85000] via P2P/IPC/read +gpua012:2904261:2904333 [3] NCCL INFO Connected all trees +gpua012:2904261:2904333 [3] NCCL INFO threadThresholds 8/8/64 | 512/8/64 | 512 | 512 +gpua012:2904261:2904333 [3] NCCL INFO 2 coll channels, 2 p2p channels, 2 p2p channels per peer +gpua012:2904261:2904333 [3] NCCL INFO comm 0x55818f3bcef0 rank 11 nranks 64 cudaDev 3 busId c7000 - Init COMPLETE +gpua038:354890:354960 [3] NCCL INFO Connected all rings +gpua038:354890:354960 [3] NCCL INFO Channel 00/0 : 23[c7000] -> 22[85000] via P2P/IPC/read +gpua038:354890:354960 [3] NCCL INFO Channel 01/0 : 23[c7000] -> 22[85000] via P2P/IPC/read +gpua038:354890:354960 [3] NCCL INFO Connected all trees +gpua038:354890:354960 [3] NCCL INFO threadThresholds 8/8/64 | 512/8/64 | 512 | 512 +gpua038:354890:354960 [3] NCCL INFO 2 coll channels, 2 p2p channels, 2 p2p channels per peer +gpua038:354890:354960 [3] NCCL INFO comm 0x55f32bd2bbe0 rank 23 nranks 64 cudaDev 3 busId c7000 - Init COMPLETE +gpua040:4035261:4035324 [0] NCCL INFO Channel 01/0 : 27[c7000] -> 28[7000] [receive] via NET/AWS Libfabric/1 +gpua040:4035261:4035324 [0] NCCL INFO Channel 00/0 : 28[7000] -> 29[46000] via P2P/IPC/read +gpua040:4035261:4035324 [0] NCCL INFO Channel 01/0 : 28[7000] -> 29[46000] via P2P/IPC/read +gpua040:4035261:4035324 [0] NCCL INFO Connected all rings +gpua040:4035261:4035324 [0] NCCL INFO Channel 00/0 : 24[7000] -> 28[7000] [receive] via NET/AWS Libfabric/1 +gpua040:4035261:4035324 [0] NCCL INFO Channel 01/0 : 12[7000] -> 28[7000] [receive] via NET/AWS Libfabric/1 +gpua040:4035261:4035324 [0] NCCL INFO Channel 01/0 : 60[7000] -> 28[7000] [receive] via NET/AWS Libfabric/1 +gpua040:4035261:4035324 [0] NCCL INFO Channel 01/0 : 28[7000] -> 60[7000] [send] via NET/AWS Libfabric/1 +gpua040:4035261:4035324 [0] NCCL INFO Channel 01/0 : 28[7000] -> 12[7000] [send] via NET/AWS Libfabric/1 +gpua040:4035261:4035324 [0] NCCL INFO Channel 00/0 : 28[7000] -> 24[7000] [send] via NET/AWS Libfabric/1 +gpua085:4194192:4194268 [2] NCCL INFO Connected all rings +gpua085:4194192:4194268 [2] NCCL INFO Channel 00/0 : 58[85000] -> 57[46000] via P2P/IPC/read +gpua085:4194192:4194268 [2] NCCL INFO Channel 01/0 : 58[85000] -> 57[46000] via P2P/IPC/read +gpua085:4194192:4194268 [2] NCCL INFO Connected all trees +gpua085:4194192:4194268 [2] NCCL INFO threadThresholds 8/8/64 | 512/8/64 | 512 | 512 +gpua085:4194192:4194268 [2] NCCL INFO 2 coll channels, 2 p2p channels, 2 p2p channels per peer +gpua085:4194192:4194268 [2] NCCL INFO comm 0x5644deb13770 rank 58 nranks 64 cudaDev 2 busId 85000 - Init COMPLETE +gpua089:1026831:1026911 [3] NCCL INFO Connected all rings +gpua089:1026831:1026911 [3] NCCL INFO Channel 00/0 : 63[c7000] -> 62[85000] via P2P/IPC/read +gpua089:1026831:1026911 [3] NCCL INFO Channel 01/0 : 63[c7000] -> 62[85000] via P2P/IPC/read +gpua089:1026831:1026911 [3] NCCL INFO Connected all trees +gpua089:1026831:1026911 [3] NCCL INFO threadThresholds 8/8/64 | 512/8/64 | 512 | 512 +gpua089:1026831:1026911 [3] NCCL INFO 2 coll channels, 2 p2p channels, 2 p2p channels per peer +gpua089:1026831:1026911 [3] NCCL INFO comm 0x56351004f370 rank 63 nranks 64 cudaDev 3 busId c7000 - Init COMPLETE +gpua080:3446602:3446677 [3] NCCL INFO Connected all rings +gpua080:3446602:3446677 [3] NCCL INFO Channel 00/0 : 55[c7000] -> 54[85000] via P2P/IPC/read +gpua080:3446602:3446677 [3] NCCL INFO Channel 01/0 : 55[c7000] -> 54[85000] via P2P/IPC/read +gpua080:3446602:3446677 [3] NCCL INFO Connected all trees +gpua080:3446602:3446677 [3] NCCL INFO threadThresholds 8/8/64 | 512/8/64 | 512 | 512 +gpua080:3446602:3446677 [3] NCCL INFO 2 coll channels, 2 p2p channels, 2 p2p channels per peer +gpua080:3446602:3446677 [3] NCCL INFO comm 0x557e693ad210 rank 55 nranks 64 cudaDev 3 busId c7000 - Init COMPLETE +gpua012:2904259:2904259 [1] NCCL INFO cudaDriverVersion 12020 +gpua012:2904259:2904259 [1] NCCL INFO Bootstrap : Using eth1:172.28.23.12<0> +gpua012:2904259:2904259 [1] NCCL INFO NET/Plugin: Failed to find ncclCollNetPlugin_v6 symbol. +gpua012:2904259:2904259 [1] NCCL INFO NET/Plugin: Failed to find ncclCollNetPlugin symbol (v4 or v5). +gpua012:2904259:2904334 [1] NCCL INFO NET/OFI Using aws-ofi-nccl 1.6.0 +gpua012:2904259:2904334 [1] NCCL INFO NET/OFI Selected Provider is cxi (found 2 nics) +gpua012:2904259:2904334 [1] NCCL INFO Using network AWS Libfabric +gpua012:2904259:2904334 [1] NCCL INFO Setting affinity for GPU 1 to ffff,00000000 +gpua012:2904259:2904334 [1] NCCL INFO NCCL_CROSS_NIC set by environment to 1. +gpua012:2904259:2904334 [1] NCCL INFO Trees [0] 10/4/-1->9->8 [1] 10/-1/-1->9->8 +gpua012:2904259:2904334 [1] NCCL INFO Channel 00/0 : 9[46000] -> 10[85000] via P2P/IPC/read +gpua012:2904259:2904334 [1] NCCL INFO Channel 01/0 : 9[46000] -> 10[85000] via P2P/IPC/read +gpua040:4035261:4035324 [0] NCCL INFO Connected all trees +gpua040:4035261:4035324 [0] NCCL INFO threadThresholds 8/8/64 | 512/8/64 | 512 | 512 +gpua040:4035261:4035324 [0] NCCL INFO 2 coll channels, 2 p2p channels, 2 p2p channels per peer +gpua040:4035261:4035324 [0] NCCL INFO comm 0x558f07c52120 rank 28 nranks 64 cudaDev 0 busId 7000 - Init COMPLETE +gpua085:4194191:4194191 [1] NCCL INFO cudaDriverVersion 12020 +gpua085:4194191:4194191 [1] NCCL INFO Bootstrap : Using eth1:172.28.23.85<0> +gpua085:4194191:4194191 [1] NCCL INFO NET/Plugin: Failed to find ncclCollNetPlugin_v6 symbol. +gpua085:4194191:4194191 [1] NCCL INFO NET/Plugin: Failed to find ncclCollNetPlugin symbol (v4 or v5). +gpua085:4194191:4194267 [1] NCCL INFO NET/OFI Using aws-ofi-nccl 1.6.0 +gpua085:4194191:4194267 [1] NCCL INFO NET/OFI Selected Provider is cxi (found 2 nics) +gpua085:4194191:4194267 [1] NCCL INFO Using network AWS Libfabric +gpua085:4194191:4194267 [1] NCCL INFO Setting affinity for GPU 1 to ffff,00000000 +gpua085:4194191:4194267 [1] NCCL INFO NCCL_CROSS_NIC set by environment to 1. +gpua085:4194191:4194267 [1] NCCL INFO Trees [0] 58/52/-1->57->56 [1] 58/-1/-1->57->56 +gpua085:4194191:4194267 [1] NCCL INFO Channel 00/0 : 57[46000] -> 58[85000] via P2P/IPC/read +gpua085:4194191:4194267 [1] NCCL INFO Channel 01/0 : 57[46000] -> 58[85000] via P2P/IPC/read +gpua089:1026830:1026830 [2] NCCL INFO cudaDriverVersion 12020 +gpua089:1026830:1026830 [2] NCCL INFO Bootstrap : Using eth1:172.28.23.89<0> +gpua089:1026830:1026830 [2] NCCL INFO NET/Plugin: Failed to find ncclCollNetPlugin_v6 symbol. +gpua089:1026830:1026830 [2] NCCL INFO NET/Plugin: Failed to find ncclCollNetPlugin symbol (v4 or v5). +gpua089:1026830:1026912 [2] NCCL INFO NET/OFI Using aws-ofi-nccl 1.6.0 +gpua089:1026830:1026912 [2] NCCL INFO NET/OFI Selected Provider is cxi (found 2 nics) +gpua089:1026830:1026912 [2] NCCL INFO Using network AWS Libfabric +gpua089:1026830:1026912 [2] NCCL INFO Setting affinity for GPU 2 to ffff0000 +gpua089:1026830:1026912 [2] NCCL INFO NCCL_CROSS_NIC set by environment to 1. +gpua089:1026830:1026912 [2] NCCL INFO Trees [0] 63/-1/-1->62->61 [1] 63/-1/-1->62->61 +gpua089:1026830:1026912 [2] NCCL INFO Channel 00/0 : 62[85000] -> 63[c7000] via P2P/IPC/read +gpua089:1026830:1026912 [2] NCCL INFO Channel 01/0 : 62[85000] -> 63[c7000] via P2P/IPC/read +gpua012:2904259:2904334 [1] NCCL INFO Connected all rings +gpua012:2904259:2904334 [1] NCCL INFO Channel 00/0 : 4[7000] -> 9[46000] [receive] via NET/AWS Libfabric/1 +gpua012:2904259:2904334 [1] NCCL INFO Channel 00/0 : 9[46000] -> 4[7000] [send] via NET/AWS Libfabric/1 +gpua012:2904259:2904334 [1] NCCL INFO Channel 00/0 : 9[46000] -> 8[7000] via P2P/IPC/read +gpua012:2904259:2904334 [1] NCCL INFO Channel 01/0 : 9[46000] -> 8[7000] via P2P/IPC/read +gpua012:2904259:2904334 [1] NCCL INFO Connected all trees +gpua012:2904259:2904334 [1] NCCL INFO threadThresholds 8/8/64 | 512/8/64 | 512 | 512 +gpua012:2904259:2904334 [1] NCCL INFO 2 coll channels, 2 p2p channels, 2 p2p channels per peer +gpua012:2904259:2904334 [1] NCCL INFO comm 0x560862e56060 rank 9 nranks 64 cudaDev 1 busId 46000 - Init COMPLETE +gpua057:4061620:4061620 [3] NCCL INFO cudaDriverVersion 12020 +gpua057:4061620:4061620 [3] NCCL INFO Bootstrap : Using eth1:172.28.23.57<0> +gpua057:4061620:4061620 [3] NCCL INFO NET/Plugin: Failed to find ncclCollNetPlugin_v6 symbol. +gpua057:4061620:4061620 [3] NCCL INFO NET/Plugin: Failed to find ncclCollNetPlugin symbol (v4 or v5). +gpua057:4061620:4061692 [3] NCCL INFO NET/OFI Using aws-ofi-nccl 1.6.0 +gpua057:4061620:4061692 [3] NCCL INFO NET/OFI Selected Provider is cxi (found 2 nics) +gpua057:4061620:4061692 [3] NCCL INFO Using network AWS Libfabric +gpua057:4061620:4061692 [3] NCCL INFO Setting affinity for GPU 3 to ffff +gpua057:4061620:4061692 [3] NCCL INFO NCCL_CROSS_NIC set by environment to 1. +gpua057:4061620:4061692 [3] NCCL INFO Trees [0] -1/-1/-1->47->46 [1] -1/-1/-1->47->46 +gpua057:4061620:4061692 [3] NCCL INFO Channel 00/0 : 47[c7000] -> 48[7000] [send] via NET/AWS Libfabric/1 +gpua057:4061620:4061692 [3] NCCL INFO Channel 01/0 : 47[c7000] -> 48[7000] [send] via NET/AWS Libfabric/1 +gpua085:4194191:4194267 [1] NCCL INFO Connected all rings +gpua085:4194191:4194267 [1] NCCL INFO Channel 00/0 : 52[7000] -> 57[46000] [receive] via NET/AWS Libfabric/1 +gpua085:4194191:4194267 [1] NCCL INFO Channel 00/0 : 57[46000] -> 52[7000] [send] via NET/AWS Libfabric/1 +gpua085:4194191:4194267 [1] NCCL INFO Channel 00/0 : 57[46000] -> 56[7000] via P2P/IPC/read +gpua085:4194191:4194267 [1] NCCL INFO Channel 01/0 : 57[46000] -> 56[7000] via P2P/IPC/read +gpua085:4194191:4194267 [1] NCCL INFO Connected all trees +gpua085:4194191:4194267 [1] NCCL INFO threadThresholds 8/8/64 | 512/8/64 | 512 | 512 +gpua085:4194191:4194267 [1] NCCL INFO 2 coll channels, 2 p2p channels, 2 p2p channels per peer +gpua085:4194191:4194267 [1] NCCL INFO comm 0x5579bbe53e70 rank 57 nranks 64 cudaDev 1 busId 46000 - Init COMPLETE +gpua089:1026830:1026912 [2] NCCL INFO Connected all rings +gpua089:1026830:1026912 [2] NCCL INFO Channel 00/0 : 62[85000] -> 61[46000] via P2P/IPC/read +gpua089:1026830:1026912 [2] NCCL INFO Channel 01/0 : 62[85000] -> 61[46000] via P2P/IPC/read +gpua089:1026830:1026912 [2] NCCL INFO Connected all trees +gpua089:1026830:1026912 [2] NCCL INFO threadThresholds 8/8/64 | 512/8/64 | 512 | 512 +gpua089:1026830:1026912 [2] NCCL INFO 2 coll channels, 2 p2p channels, 2 p2p channels per peer +gpua089:1026830:1026912 [2] NCCL INFO comm 0x556ab847afe0 rank 62 nranks 64 cudaDev 2 busId 85000 - Init COMPLETE +gpua007:1756579:1756579 [3] NCCL INFO cudaDriverVersion 12020 +gpua007:1756579:1756579 [3] NCCL INFO Bootstrap : Using eth1:172.28.23.7<0> +gpua007:1756579:1756579 [3] NCCL INFO NET/Plugin: Failed to find ncclCollNetPlugin_v6 symbol. +gpua007:1756579:1756579 [3] NCCL INFO NET/Plugin: Failed to find ncclCollNetPlugin symbol (v4 or v5). +gpua007:1756579:1756642 [3] NCCL INFO NET/OFI Using aws-ofi-nccl 1.6.0 +gpua007:1756579:1756642 [3] NCCL INFO NET/OFI Selected Provider is cxi (found 2 nics) +gpua007:1756579:1756642 [3] NCCL INFO Using network AWS Libfabric +gpua007:1756579:1756642 [3] NCCL INFO Setting affinity for GPU 3 to ffff +gpua007:1756579:1756642 [3] NCCL INFO NCCL_CROSS_NIC set by environment to 1. +gpua007:1756579:1756642 [3] NCCL INFO Trees [0] -1/-1/-1->7->6 [1] -1/-1/-1->7->6 +gpua007:1756579:1756642 [3] NCCL INFO Channel 00/0 : 7[c7000] -> 8[7000] [send] via NET/AWS Libfabric/1 +gpua007:1756579:1756642 [3] NCCL INFO Channel 01/0 : 7[c7000] -> 8[7000] [send] via NET/AWS Libfabric/1 +gpua057:4061620:4061692 [3] NCCL INFO Connected all rings +gpua057:4061620:4061692 [3] NCCL INFO Channel 00/0 : 47[c7000] -> 46[85000] via P2P/IPC/read +gpua057:4061620:4061692 [3] NCCL INFO Channel 01/0 : 47[c7000] -> 46[85000] via P2P/IPC/read +gpua057:4061620:4061692 [3] NCCL INFO Connected all trees +gpua057:4061620:4061692 [3] NCCL INFO threadThresholds 8/8/64 | 512/8/64 | 512 | 512 +gpua057:4061620:4061692 [3] NCCL INFO 2 coll channels, 2 p2p channels, 2 p2p channels per peer +gpua057:4061620:4061692 [3] NCCL INFO comm 0x55ad840259e0 rank 47 nranks 64 cudaDev 3 busId c7000 - Init COMPLETE +gpua057:4061617:4061617 [0] NCCL INFO cudaDriverVersion 12020 +gpua057:4061617:4061617 [0] NCCL INFO Bootstrap : Using eth1:172.28.23.57<0> +gpua057:4061617:4061617 [0] NCCL INFO NET/Plugin: Failed to find ncclCollNetPlugin_v6 symbol. +gpua057:4061617:4061617 [0] NCCL INFO NET/Plugin: Failed to find ncclCollNetPlugin symbol (v4 or v5). +gpua057:4061617:4061689 [0] NCCL INFO NET/OFI Using aws-ofi-nccl 1.6.0 +gpua085:4194193:4194193 [3] NCCL INFO cudaDriverVersion 12020 +gpua085:4194193:4194193 [3] NCCL INFO Bootstrap : Using eth1:172.28.23.85<0> +gpua085:4194193:4194193 [3] NCCL INFO NET/Plugin: Failed to find ncclCollNetPlugin_v6 symbol. +gpua085:4194193:4194193 [3] NCCL INFO NET/Plugin: Failed to find ncclCollNetPlugin symbol (v4 or v5). +gpua085:4194193:4194265 [3] NCCL INFO NET/OFI Using aws-ofi-nccl 1.6.0 +gpua085:4194193:4194265 [3] NCCL INFO NET/OFI Selected Provider is cxi (found 2 nics) +gpua085:4194193:4194265 [3] NCCL INFO Using network AWS Libfabric +gpua085:4194193:4194265 [3] NCCL INFO Setting affinity for GPU 3 to ffff +gpua085:4194193:4194265 [3] NCCL INFO NCCL_CROSS_NIC set by environment to 1. +gpua085:4194193:4194265 [3] NCCL INFO Trees [0] -1/-1/-1->59->58 [1] -1/-1/-1->59->58 +gpua085:4194193:4194265 [3] NCCL INFO Channel 00/0 : 59[c7000] -> 60[7000] [send] via NET/AWS Libfabric/1 +gpua085:4194193:4194265 [3] NCCL INFO Channel 01/0 : 59[c7000] -> 60[7000] [send] via NET/AWS Libfabric/1 +gpua089:1026828:1026828 [0] NCCL INFO cudaDriverVersion 12020 +gpua089:1026828:1026828 [0] NCCL INFO Bootstrap : Using eth1:172.28.23.89<0> +gpua089:1026828:1026828 [0] NCCL INFO NET/Plugin: Failed to find ncclCollNetPlugin_v6 symbol. +gpua089:1026828:1026828 [0] NCCL INFO NET/Plugin: Failed to find ncclCollNetPlugin symbol (v4 or v5). +gpua089:1026828:1026914 [0] NCCL INFO NET/OFI Using aws-ofi-nccl 1.6.0 +gpua089:1026828:1026914 [0] NCCL INFO NET/OFI Selected Provider is cxi (found 2 nics) +gpua089:1026828:1026914 [0] NCCL INFO Using network AWS Libfabric +gpua089:1026828:1026914 [0] NCCL INFO Setting affinity for GPU 0 to ffff0000,00000000 +gpua089:1026828:1026914 [0] NCCL INFO NCCL_CROSS_NIC set by environment to 1. +gpua089:1026828:1026914 [0] NCCL INFO Trees [0] 61/-1/-1->60->56 [1] 61/28/-1->60->-1 +gpua089:1026828:1026914 [0] NCCL INFO Channel 00/0 : 59[c7000] -> 60[7000] [receive] via NET/AWS Libfabric/1 +gpua007:1756579:1756642 [3] NCCL INFO Connected all rings +gpua007:1756579:1756642 [3] NCCL INFO Channel 00/0 : 7[c7000] -> 6[85000] via P2P/IPC/read +gpua007:1756579:1756642 [3] NCCL INFO Channel 01/0 : 7[c7000] -> 6[85000] via P2P/IPC/read +gpua007:1756579:1756642 [3] NCCL INFO Connected all trees +gpua007:1756579:1756642 [3] NCCL INFO threadThresholds 8/8/64 | 512/8/64 | 512 | 512 +gpua007:1756579:1756642 [3] NCCL INFO 2 coll channels, 2 p2p channels, 2 p2p channels per peer +gpua007:1756579:1756642 [3] NCCL INFO comm 0x56414da7e710 rank 7 nranks 64 cudaDev 3 busId c7000 - Init COMPLETE +gpua057:4061617:4061689 [0] NCCL INFO NET/OFI Selected Provider is cxi (found 2 nics) +gpua057:4061617:4061689 [0] NCCL INFO Using network AWS Libfabric +gpua057:4061617:4061689 [0] NCCL INFO Setting affinity for GPU 0 to ffff0000,00000000 +gpua057:4061617:4061689 [0] NCCL INFO NCCL_CROSS_NIC set by environment to 1. +gpua057:4061617:4061689 [0] NCCL INFO Trees [0] 45/-1/-1->44->40 [1] 45/36/-1->44->29 +gpua057:4061617:4061689 [0] NCCL INFO Channel 00/0 : 43[c7000] -> 44[7000] [receive] via NET/AWS Libfabric/1 +gpua057:4061617:4061689 [0] NCCL INFO Channel 01/0 : 43[c7000] -> 44[7000] [receive] via NET/AWS Libfabric/1 +gpua057:4061617:4061689 [0] NCCL INFO Channel 00/0 : 44[7000] -> 45[46000] via P2P/IPC/read +gpua057:4061617:4061689 [0] NCCL INFO Channel 01/0 : 44[7000] -> 45[46000] via P2P/IPC/read +gpua057:4061617:4061689 [0] NCCL INFO Connected all rings +gpua057:4061617:4061689 [0] NCCL INFO Channel 00/0 : 40[7000] -> 44[7000] [receive] via NET/AWS Libfabric/1 +gpua085:4194193:4194265 [3] NCCL INFO Connected all rings +gpua085:4194193:4194265 [3] NCCL INFO Channel 00/0 : 59[c7000] -> 58[85000] via P2P/IPC/read +gpua085:4194193:4194265 [3] NCCL INFO Channel 01/0 : 59[c7000] -> 58[85000] via P2P/IPC/read +gpua085:4194193:4194265 [3] NCCL INFO Connected all trees +gpua085:4194193:4194265 [3] NCCL INFO threadThresholds 8/8/64 | 512/8/64 | 512 | 512 +gpua085:4194193:4194265 [3] NCCL INFO 2 coll channels, 2 p2p channels, 2 p2p channels per peer +gpua085:4194193:4194265 [3] NCCL INFO comm 0x563bb0002d90 rank 59 nranks 64 cudaDev 3 busId c7000 - Init COMPLETE +gpua089:1026828:1026914 [0] NCCL INFO Channel 01/0 : 59[c7000] -> 60[7000] [receive] via NET/AWS Libfabric/1 +gpua089:1026828:1026914 [0] NCCL INFO Channel 00/0 : 60[7000] -> 61[46000] via P2P/IPC/read +gpua089:1026828:1026914 [0] NCCL INFO Channel 01/0 : 60[7000] -> 61[46000] via P2P/IPC/read +gpua089:1026828:1026914 [0] NCCL INFO Connected all rings +gpua089:1026828:1026914 [0] NCCL INFO Channel 00/0 : 56[7000] -> 60[7000] [receive] via NET/AWS Libfabric/1 +gpua089:1026828:1026914 [0] NCCL INFO Channel 01/0 : 28[7000] -> 60[7000] [receive] via NET/AWS Libfabric/1 +gpua089:1026828:1026914 [0] NCCL INFO Channel 01/0 : 60[7000] -> 28[7000] [send] via NET/AWS Libfabric/1 +gpua089:1026828:1026914 [0] NCCL INFO Channel 00/0 : 60[7000] -> 56[7000] [send] via NET/AWS Libfabric/1 +gpua089:1026828:1026914 [0] NCCL INFO Connected all trees +gpua089:1026828:1026914 [0] NCCL INFO threadThresholds 8/8/64 | 512/8/64 | 512 | 512 +gpua089:1026828:1026914 [0] NCCL INFO 2 coll channels, 2 p2p channels, 2 p2p channels per peer +gpua057:4061617:4061689 [0] NCCL INFO Channel 01/0 : 36[7000] -> 44[7000] [receive] via NET/AWS Libfabric/1 +gpua057:4061617:4061689 [0] NCCL INFO Channel 01/0 : 29[46000] -> 44[7000] [receive] via NET/AWS Libfabric/1 +gpua057:4061617:4061689 [0] NCCL INFO Channel 01/0 : 44[7000] -> 29[46000] [send] via NET/AWS Libfabric/1 +gpua057:4061617:4061689 [0] NCCL INFO Channel 01/0 : 44[7000] -> 36[7000] [send] via NET/AWS Libfabric/1 +gpua057:4061617:4061689 [0] NCCL INFO Channel 00/0 : 44[7000] -> 40[7000] [send] via NET/AWS Libfabric/1 +gpua057:4061617:4061689 [0] NCCL INFO Connected all trees +gpua057:4061617:4061689 [0] NCCL INFO threadThresholds 8/8/64 | 512/8/64 | 512 | 512 +gpua057:4061617:4061689 [0] NCCL INFO 2 coll channels, 2 p2p channels, 2 p2p channels per peer +gpua057:4061617:4061689 [0] NCCL INFO comm 0x564d4d7d8e60 rank 44 nranks 64 cudaDev 0 busId 7000 - Init COMPLETE +gpua089:1026828:1026914 [0] NCCL INFO comm 0x55e91cc24140 rank 60 nranks 64 cudaDev 0 busId 7000 - Init COMPLETE +gpua057:4061618:4061618 [1] NCCL INFO cudaDriverVersion 12020 +gpua057:4061618:4061618 [1] NCCL INFO Bootstrap : Using eth1:172.28.23.57<0> +gpua057:4061618:4061618 [1] NCCL INFO NET/Plugin: Failed to find ncclCollNetPlugin_v6 symbol. +gpua057:4061618:4061618 [1] NCCL INFO NET/Plugin: Failed to find ncclCollNetPlugin symbol (v4 or v5). +gpua057:4061618:4061691 [1] NCCL INFO NET/OFI Using aws-ofi-nccl 1.6.0 +gpua057:4061618:4061691 [1] NCCL INFO NET/OFI Selected Provider is cxi (found 2 nics) +gpua057:4061618:4061691 [1] NCCL INFO Using network AWS Libfabric +gpua057:4061618:4061691 [1] NCCL INFO Setting affinity for GPU 1 to ffff,00000000 +gpua057:4061618:4061691 [1] NCCL INFO NCCL_CROSS_NIC set by environment to 1. +gpua057:4061618:4061691 [1] NCCL INFO Trees [0] 46/-1/-1->45->44 [1] 46/52/-1->45->44 +gpua057:4061618:4061691 [1] NCCL INFO Channel 00/0 : 45[46000] -> 46[85000] via P2P/IPC/read +gpua057:4061618:4061691 [1] NCCL INFO Channel 01/0 : 45[46000] -> 46[85000] via P2P/IPC/read +gpua033:1821568:1821568 [0] NCCL INFO cudaDriverVersion 12020 +gpua033:1821568:1821568 [0] NCCL INFO Bootstrap : Using eth1:172.28.23.33<0> +gpua033:1821568:1821568 [0] NCCL INFO NET/Plugin: Failed to find ncclCollNetPlugin_v6 symbol. +gpua033:1821568:1821568 [0] NCCL INFO NET/Plugin: Failed to find ncclCollNetPlugin symbol (v4 or v5). +gpua033:1821568:1821650 [0] NCCL INFO NET/OFI Using aws-ofi-nccl 1.6.0 +gpua033:1821568:1821650 [0] NCCL INFO NET/OFI Selected Provider is cxi (found 2 nics) +gpua033:1821568:1821650 [0] NCCL INFO Using network AWS Libfabric +gpua033:1821568:1821650 [0] NCCL INFO Setting affinity for GPU 0 to ffff0000,00000000 +gpua033:1821568:1821650 [0] NCCL INFO NCCL_CROSS_NIC set by environment to 1. +gpua033:1821568:1821650 [0] NCCL INFO Trees [0] 17/24/-1->16->33 [1] 17/-1/-1->16->20 +gpua033:1821568:1821650 [0] NCCL INFO Channel 00/0 : 15[c7000] -> 16[7000] [receive] via NET/AWS Libfabric/1 +gpua057:4061618:4061691 [1] NCCL INFO Connected all rings +gpua057:4061618:4061691 [1] NCCL INFO Channel 01/0 : 45[46000] -> 52[7000] [send] via NET/AWS Libfabric/1 +gpua057:4061618:4061691 [1] NCCL INFO Channel 01/0 : 52[7000] -> 45[46000] [receive] via NET/AWS Libfabric/1 +gpua057:4061618:4061691 [1] NCCL INFO Channel 00/0 : 45[46000] -> 44[7000] via P2P/IPC/read +gpua057:4061618:4061691 [1] NCCL INFO Channel 01/0 : 45[46000] -> 44[7000] via P2P/IPC/read +gpua057:4061618:4061691 [1] NCCL INFO Connected all trees +gpua057:4061618:4061691 [1] NCCL INFO threadThresholds 8/8/64 | 512/8/64 | 512 | 512 +gpua057:4061618:4061691 [1] NCCL INFO 2 coll channels, 2 p2p channels, 2 p2p channels per peer +gpua057:4061618:4061691 [1] NCCL INFO comm 0x564f27ef3460 rank 45 nranks 64 cudaDev 1 busId 46000 - Init COMPLETE +gpua033:1821568:1821650 [0] NCCL INFO Channel 01/0 : 15[c7000] -> 16[7000] [receive] via NET/AWS Libfabric/1 +gpua033:1821568:1821650 [0] NCCL INFO Channel 00/0 : 16[7000] -> 17[46000] via P2P/IPC/read +gpua033:1821568:1821650 [0] NCCL INFO Channel 01/0 : 16[7000] -> 17[46000] via P2P/IPC/read +gpua033:1821568:1821650 [0] NCCL INFO Connected all rings +gpua033:1821568:1821650 [0] NCCL INFO Channel 01/0 : 16[7000] -> 20[7000] [send] via NET/AWS Libfabric/1 +gpua033:1821568:1821650 [0] NCCL INFO Channel 00/0 : 16[7000] -> 24[7000] [send] via NET/AWS Libfabric/1 +gpua033:1821568:1821650 [0] NCCL INFO Channel 00/0 : 16[7000] -> 33[46000] [send] via NET/AWS Libfabric/1 +gpua033:1821568:1821650 [0] NCCL INFO Channel 00/0 : 33[46000] -> 16[7000] [receive] via NET/AWS Libfabric/1 +gpua033:1821568:1821650 [0] NCCL INFO Channel 00/0 : 24[7000] -> 16[7000] [receive] via NET/AWS Libfabric/1 +gpua033:1821568:1821650 [0] NCCL INFO Channel 01/0 : 20[7000] -> 16[7000] [receive] via NET/AWS Libfabric/1 +gpua057:4061619:4061619 [2] NCCL INFO cudaDriverVersion 12020 +gpua057:4061619:4061619 [2] NCCL INFO Bootstrap : Using eth1:172.28.23.57<0> +gpua057:4061619:4061619 [2] NCCL INFO NET/Plugin: Failed to find ncclCollNetPlugin_v6 symbol. +gpua057:4061619:4061619 [2] NCCL INFO NET/Plugin: Failed to find ncclCollNetPlugin symbol (v4 or v5). +gpua057:4061619:4061690 [2] NCCL INFO NET/OFI Using aws-ofi-nccl 1.6.0 +gpua057:4061619:4061690 [2] NCCL INFO NET/OFI Selected Provider is cxi (found 2 nics) +gpua057:4061619:4061690 [2] NCCL INFO Using network AWS Libfabric +gpua057:4061619:4061690 [2] NCCL INFO Setting affinity for GPU 2 to ffff0000 +gpua057:4061619:4061690 [2] NCCL INFO NCCL_CROSS_NIC set by environment to 1. +gpua057:4061619:4061690 [2] NCCL INFO Trees [0] 47/-1/-1->46->45 [1] 47/-1/-1->46->45 +gpua057:4061619:4061690 [2] NCCL INFO Channel 00/0 : 46[85000] -> 47[c7000] via P2P/IPC/read +gpua057:4061619:4061690 [2] NCCL INFO Channel 01/0 : 46[85000] -> 47[c7000] via P2P/IPC/read +gpua033:1821568:1821650 [0] NCCL INFO Connected all trees +gpua033:1821568:1821650 [0] NCCL INFO threadThresholds 8/8/64 | 512/8/64 | 512 | 512 +gpua033:1821568:1821650 [0] NCCL INFO 2 coll channels, 2 p2p channels, 2 p2p channels per peer +gpua033:1821568:1821650 [0] NCCL INFO comm 0x55d53db55020 rank 16 nranks 64 cudaDev 0 busId 7000 - Init COMPLETE +gpua057:4061619:4061690 [2] NCCL INFO Connected all rings +gpua057:4061619:4061690 [2] NCCL INFO Channel 00/0 : 46[85000] -> 45[46000] via P2P/IPC/read +gpua057:4061619:4061690 [2] NCCL INFO Channel 01/0 : 46[85000] -> 45[46000] via P2P/IPC/read +gpua057:4061619:4061690 [2] NCCL INFO Connected all trees +gpua057:4061619:4061690 [2] NCCL INFO threadThresholds 8/8/64 | 512/8/64 | 512 | 512 +gpua057:4061619:4061690 [2] NCCL INFO 2 coll channels, 2 p2p channels, 2 p2p channels per peer +gpua057:4061619:4061690 [2] NCCL INFO comm 0x55dee3d4c4f0 rank 46 nranks 64 cudaDev 2 busId 85000 - Init COMPLETE +gpua039:3838094:3838094 [0] NCCL INFO cudaDriverVersion 12020 +gpua039:3838094:3838094 [0] NCCL INFO Bootstrap : Using eth1:172.28.23.39<0> +gpua039:3838094:3838094 [0] NCCL INFO NET/Plugin: Failed to find ncclCollNetPlugin_v6 symbol. +gpua039:3838094:3838094 [0] NCCL INFO NET/Plugin: Failed to find ncclCollNetPlugin symbol (v4 or v5). +gpua039:3838094:3838170 [0] NCCL INFO NET/OFI Using aws-ofi-nccl 1.6.0 +gpua039:3838094:3838170 [0] NCCL INFO NET/OFI Selected Provider is cxi (found 2 nics) +gpua039:3838094:3838170 [0] NCCL INFO Using network AWS Libfabric +gpua039:3838094:3838170 [0] NCCL INFO Setting affinity for GPU 0 to ffff0000,00000000 +gpua039:3838094:3838170 [0] NCCL INFO NCCL_CROSS_NIC set by environment to 1. +gpua039:3838094:3838170 [0] NCCL INFO Trees [0] 25/28/-1->24->16 [1] 25/-1/-1->24->21 +gpua039:3838094:3838170 [0] NCCL INFO Channel 00/0 : 23[c7000] -> 24[7000] [receive] via NET/AWS Libfabric/1 +gpua039:3838094:3838170 [0] NCCL INFO Channel 01/0 : 23[c7000] -> 24[7000] [receive] via NET/AWS Libfabric/1 +gpua039:3838094:3838170 [0] NCCL INFO Channel 00/0 : 24[7000] -> 25[46000] via P2P/IPC/read +gpua039:3838094:3838170 [0] NCCL INFO Channel 01/0 : 24[7000] -> 25[46000] via P2P/IPC/read +gpua039:3838094:3838170 [0] NCCL INFO Connected all rings +gpua039:3838094:3838170 [0] NCCL INFO Channel 01/0 : 21[46000] -> 24[7000] [receive] via NET/AWS Libfabric/1 +gpua039:3838094:3838170 [0] NCCL INFO Channel 00/0 : 24[7000] -> 28[7000] [send] via NET/AWS Libfabric/1 +gpua039:3838094:3838170 [0] NCCL INFO Channel 00/0 : 16[7000] -> 24[7000] [receive] via NET/AWS Libfabric/1 +gpua039:3838094:3838170 [0] NCCL INFO Channel 00/0 : 24[7000] -> 16[7000] [send] via NET/AWS Libfabric/1 +gpua039:3838094:3838170 [0] NCCL INFO Channel 00/0 : 28[7000] -> 24[7000] [receive] via NET/AWS Libfabric/1 +gpua039:3838094:3838170 [0] NCCL INFO Channel 01/0 : 24[7000] -> 21[46000] [send] via NET/AWS Libfabric/1 +gpua039:3838094:3838170 [0] NCCL INFO Connected all trees +gpua039:3838094:3838170 [0] NCCL INFO threadThresholds 8/8/64 | 512/8/64 | 512 | 512 +gpua039:3838094:3838170 [0] NCCL INFO 2 coll channels, 2 p2p channels, 2 p2p channels per peer +gpua039:3838094:3838170 [0] NCCL INFO comm 0x56492097fd60 rank 24 nranks 64 cudaDev 0 busId 7000 - Init COMPLETE +gpua039:3838095:3838095 [1] NCCL INFO cudaDriverVersion 12020 +gpua039:3838095:3838095 [1] NCCL INFO Bootstrap : Using eth1:172.28.23.39<0> +gpua039:3838095:3838095 [1] NCCL INFO NET/Plugin: Failed to find ncclCollNetPlugin_v6 symbol. +gpua039:3838095:3838095 [1] NCCL INFO NET/Plugin: Failed to find ncclCollNetPlugin symbol (v4 or v5). +gpua039:3838095:3838173 [1] NCCL INFO NET/OFI Using aws-ofi-nccl 1.6.0 +gpua039:3838095:3838173 [1] NCCL INFO NET/OFI Selected Provider is cxi (found 2 nics) +gpua039:3838095:3838173 [1] NCCL INFO Using network AWS Libfabric +gpua039:3838095:3838173 [1] NCCL INFO Setting affinity for GPU 1 to ffff,00000000 +gpua039:3838095:3838173 [1] NCCL INFO NCCL_CROSS_NIC set by environment to 1. +gpua039:3838095:3838173 [1] NCCL INFO Trees [0] 26/20/-1->25->24 [1] 26/-1/-1->25->24 +gpua039:3838095:3838173 [1] NCCL INFO Channel 00/0 : 25[46000] -> 26[85000] via P2P/IPC/read +gpua039:3838095:3838173 [1] NCCL INFO Channel 01/0 : 25[46000] -> 26[85000] via P2P/IPC/read +gpua039:3838095:3838173 [1] NCCL INFO Connected all rings +gpua039:3838095:3838173 [1] NCCL INFO Channel 00/0 : 20[7000] -> 25[46000] [receive] via NET/AWS Libfabric/1 +gpua039:3838095:3838173 [1] NCCL INFO Channel 00/0 : 25[46000] -> 20[7000] [send] via NET/AWS Libfabric/1 +gpua039:3838095:3838173 [1] NCCL INFO Channel 00/0 : 25[46000] -> 24[7000] via P2P/IPC/read +gpua039:3838095:3838173 [1] NCCL INFO Channel 01/0 : 25[46000] -> 24[7000] via P2P/IPC/read +gpua039:3838095:3838173 [1] NCCL INFO Connected all trees +gpua039:3838095:3838173 [1] NCCL INFO threadThresholds 8/8/64 | 512/8/64 | 512 | 512 +gpua039:3838095:3838173 [1] NCCL INFO 2 coll channels, 2 p2p channels, 2 p2p channels per peer +gpua039:3838095:3838173 [1] NCCL INFO comm 0x5601d5adfb70 rank 25 nranks 64 cudaDev 1 busId 46000 - Init COMPLETE +gpua012:2904260:2904260 [2] NCCL INFO cudaDriverVersion 12020 +gpua012:2904260:2904260 [2] NCCL INFO Bootstrap : Using eth1:172.28.23.12<0> +gpua012:2904260:2904260 [2] NCCL INFO NET/Plugin: Failed to find ncclCollNetPlugin_v6 symbol. +gpua012:2904260:2904260 [2] NCCL INFO NET/Plugin: Failed to find ncclCollNetPlugin symbol (v4 or v5). +gpua012:2904260:2904331 [2] NCCL INFO NET/OFI Using aws-ofi-nccl 1.6.0 +gpua012:2904260:2904331 [2] NCCL INFO NET/OFI Selected Provider is cxi (found 2 nics) +gpua012:2904260:2904331 [2] NCCL INFO Using network AWS Libfabric +gpua012:2904260:2904331 [2] NCCL INFO Setting affinity for GPU 2 to ffff0000 +gpua012:2904260:2904331 [2] NCCL INFO NCCL_CROSS_NIC set by environment to 1. +gpua012:2904260:2904331 [2] NCCL INFO Trees [0] 11/-1/-1->10->9 [1] 11/-1/-1->10->9 +gpua012:2904260:2904331 [2] NCCL INFO Channel 00/0 : 10[85000] -> 11[c7000] via P2P/IPC/read +gpua012:2904260:2904331 [2] NCCL INFO Channel 01/0 : 10[85000] -> 11[c7000] via P2P/IPC/read +gpua012:2904260:2904331 [2] NCCL INFO Connected all rings +gpua012:2904260:2904331 [2] NCCL INFO Channel 00/0 : 10[85000] -> 9[46000] via P2P/IPC/read +gpua012:2904260:2904331 [2] NCCL INFO Channel 01/0 : 10[85000] -> 9[46000] via P2P/IPC/read +gpua012:2904260:2904331 [2] NCCL INFO Connected all trees +gpua012:2904260:2904331 [2] NCCL INFO threadThresholds 8/8/64 | 512/8/64 | 512 | 512 +gpua012:2904260:2904331 [2] NCCL INFO 2 coll channels, 2 p2p channels, 2 p2p channels per peer +gpua012:2904260:2904331 [2] NCCL INFO comm 0x56373d046b20 rank 10 nranks 64 cudaDev 2 busId 85000 - Init COMPLETE +gpua038:354888:354888 [1] NCCL INFO cudaDriverVersion 12020 +gpua038:354888:354888 [1] NCCL INFO Bootstrap : Using eth1:172.28.23.38<0> +gpua038:354888:354888 [1] NCCL INFO NET/Plugin: Failed to find ncclCollNetPlugin_v6 symbol. +gpua038:354888:354888 [1] NCCL INFO NET/Plugin: Failed to find ncclCollNetPlugin symbol (v4 or v5). +gpua038:354888:354959 [1] NCCL INFO NET/OFI Using aws-ofi-nccl 1.6.0 +gpua038:354888:354959 [1] NCCL INFO NET/OFI Selected Provider is cxi (found 2 nics) +gpua038:354888:354959 [1] NCCL INFO Using network AWS Libfabric +gpua038:354888:354959 [1] NCCL INFO Setting affinity for GPU 1 to ffff,00000000 +gpua038:354888:354959 [1] NCCL INFO NCCL_CROSS_NIC set by environment to 1. +gpua038:354888:354959 [1] NCCL INFO Trees [0] 22/-1/-1->21->20 [1] 22/24/-1->21->20 +gpua038:354888:354959 [1] NCCL INFO Channel 00/0 : 21[46000] -> 22[85000] via P2P/IPC/read +gpua038:354888:354959 [1] NCCL INFO Channel 01/0 : 21[46000] -> 22[85000] via P2P/IPC/read +gpua038:354888:354959 [1] NCCL INFO Connected all rings +gpua038:354888:354959 [1] NCCL INFO Channel 01/0 : 21[46000] -> 24[7000] [send] via NET/AWS Libfabric/1 +gpua038:354888:354959 [1] NCCL INFO Channel 01/0 : 24[7000] -> 21[46000] [receive] via NET/AWS Libfabric/1 +gpua038:354888:354959 [1] NCCL INFO Channel 00/0 : 21[46000] -> 20[7000] via P2P/IPC/read +gpua038:354888:354959 [1] NCCL INFO Channel 01/0 : 21[46000] -> 20[7000] via P2P/IPC/read +gpua038:354888:354959 [1] NCCL INFO Connected all trees +gpua038:354888:354959 [1] NCCL INFO threadThresholds 8/8/64 | 512/8/64 | 512 | 512 +gpua038:354888:354959 [1] NCCL INFO 2 coll channels, 2 p2p channels, 2 p2p channels per peer +gpua038:354888:354959 [1] NCCL INFO comm 0x560d6376d6d0 rank 21 nranks 64 cudaDev 1 busId 46000 - Init COMPLETE +[gpua006:0/64] 2024-02-13 14:17:17,915 (distributed:1027) INFO: Reducer buckets have been rebuilt in this iteration. +[gpua006:0/64] 2024-02-13 14:18:55,459 (trainer:756) INFO: 38epoch:train:1-100batch: iter_time=4.185, forward_time=0.216, loss_ctc=80.995, loss_interctc_layer6=87.982, loss_interctc_layer12=72.685, loss_interctc_layer15=66.510, loss_interctc_layer21=84.157, loss=78.466, backward_time=0.221, grad_norm=83.737, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.141, optim0_lr0=6.576e-05, train_time=5.842 +[gpua006:0/64] 2024-02-13 14:20:30,733 (trainer:756) INFO: 38epoch:train:101-200batch: iter_time=8.921e-05, forward_time=0.143, loss_ctc=66.308, loss_interctc_layer6=80.921, loss_interctc_layer12=66.840, loss_interctc_layer15=61.164, loss_interctc_layer21=68.556, loss=68.758, backward_time=0.208, grad_norm=69.361, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.139, optim0_lr0=6.576e-05, train_time=0.953 +[gpua006:0/64] 2024-02-13 14:22:14,193 (trainer:756) INFO: 38epoch:train:201-300batch: iter_time=8.800e-05, forward_time=0.141, loss_ctc=76.155, loss_interctc_layer6=91.193, loss_interctc_layer12=75.516, loss_interctc_layer15=69.233, loss_interctc_layer21=78.904, loss=78.200, backward_time=0.207, grad_norm=83.094, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.139, optim0_lr0=6.575e-05, train_time=1.034 +[gpua006:0/64] 2024-02-13 14:25:27,323 (trainer:756) INFO: 38epoch:train:301-400batch: iter_time=8.982e-05, forward_time=0.140, loss_ctc=65.964, loss_interctc_layer6=77.284, loss_interctc_layer12=64.072, loss_interctc_layer15=58.839, loss_interctc_layer21=68.343, loss=66.901, backward_time=0.204, grad_norm=79.045, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.139, optim0_lr0=6.575e-05, train_time=1.931 +[gpua006:0/64] 2024-02-13 14:28:50,894 (trainer:756) INFO: 38epoch:train:401-500batch: iter_time=9.595e-05, forward_time=0.141, loss_ctc=67.562, loss_interctc_layer6=82.252, loss_interctc_layer12=68.105, loss_interctc_layer15=62.434, loss_interctc_layer21=69.488, loss=69.968, backward_time=0.204, grad_norm=69.930, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.138, optim0_lr0=6.574e-05, train_time=2.035 +[gpua006:0/64] 2024-02-13 14:31:10,788 (trainer:756) INFO: 38epoch:train:501-600batch: iter_time=8.739e-05, forward_time=0.215, loss_ctc=71.747, loss_interctc_layer6=81.853, loss_interctc_layer12=67.743, loss_interctc_layer15=62.071, loss_interctc_layer21=74.394, loss=71.562, backward_time=0.239, grad_norm=67.580, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.142, optim0_lr0=6.573e-05, train_time=1.398 +[gpua006:0/64] 2024-02-13 14:33:10,505 (trainer:756) INFO: 38epoch:train:601-700batch: iter_time=8.867e-05, forward_time=0.199, loss_ctc=67.901, loss_interctc_layer6=77.430, loss_interctc_layer12=64.086, loss_interctc_layer15=59.027, loss_interctc_layer21=70.328, loss=67.754, backward_time=0.222, grad_norm=84.731, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.139, optim0_lr0=6.573e-05, train_time=1.197 +[gpua006:0/64] 2024-02-13 14:35:34,248 (trainer:756) INFO: 38epoch:train:701-800batch: iter_time=9.168e-05, forward_time=0.142, loss_ctc=78.619, loss_interctc_layer6=84.029, loss_interctc_layer12=69.479, loss_interctc_layer15=63.607, loss_interctc_layer21=81.544, loss=75.456, backward_time=0.205, grad_norm=71.715, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.138, optim0_lr0=6.572e-05, train_time=1.437 +[gpua006:0/64] 2024-02-13 14:38:00,220 (trainer:756) INFO: 38epoch:train:801-900batch: iter_time=8.931e-05, forward_time=0.141, loss_ctc=67.715, loss_interctc_layer6=80.850, loss_interctc_layer12=66.751, loss_interctc_layer15=60.982, loss_interctc_layer21=70.079, loss=69.275, backward_time=0.205, grad_norm=65.814, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.138, optim0_lr0=6.572e-05, train_time=1.459 +[gpua006:0/64] 2024-02-13 14:40:29,098 (trainer:756) INFO: 38epoch:train:901-1000batch: iter_time=9.331e-05, forward_time=0.141, loss_ctc=76.898, loss_interctc_layer6=81.623, loss_interctc_layer12=67.876, loss_interctc_layer15=62.377, loss_interctc_layer21=80.003, loss=73.755, backward_time=0.206, grad_norm=80.210, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.138, optim0_lr0=6.571e-05, train_time=1.489 +[gpua006:0/64] 2024-02-13 14:43:00,174 (trainer:756) INFO: 38epoch:train:1001-1100batch: iter_time=8.918e-05, forward_time=0.141, loss_ctc=79.577, loss_interctc_layer6=85.826, loss_interctc_layer12=71.449, loss_interctc_layer15=65.617, loss_interctc_layer21=82.432, loss=76.980, backward_time=0.206, grad_norm=80.700, clip=100.000, loss_scale=2.130e+31, optim_step_time=0.138, optim0_lr0=6.571e-05, train_time=1.511 +[gpua006:0/64] 2024-02-13 14:45:09,293 (trainer:756) INFO: 38epoch:train:1101-1200batch: iter_time=9.628e-05, forward_time=0.142, loss_ctc=80.419, loss_interctc_layer6=87.804, loss_interctc_layer12=73.047, loss_interctc_layer15=67.267, loss_interctc_layer21=83.421, loss=78.392, backward_time=0.205, grad_norm=77.794, clip=100.000, loss_scale=4.056e+31, optim_step_time=0.137, optim0_lr0=6.570e-05, train_time=1.291 +[gpua006:0/64] 2024-02-13 14:46:28,568 (multiple_iter_factory:32) INFO: Building 1th iter-factory... +[gpua006:0/64] 2024-02-13 14:46:47,165 (s2t:401) INFO: Optional Data Names: ('text_prev', 'text_ctc', 'text_spk2', 'text_spk3', 'text_spk4') +[gpua006:0/64] 2024-02-13 14:46:50,690 (abs_task:1660) INFO: [train] dataset: +ESPnetDataset( + speech: {"path": "exp/s2t_stats_raw_bpe50000/splits12/wav.scp/split.3", "type": "kaldi_ark"} + text_prev: {"path": "exp/s2t_stats_raw_bpe50000/splits12/text.prev/split.3", "type": "text"} + text_ctc: {"path": "exp/s2t_stats_raw_bpe50000/splits12/text.ctc/split.3", "type": "text"} + text: {"path": "exp/s2t_stats_raw_bpe50000/splits12/text/split.3", "type": "text"} + preprocess: ) +[gpua006:0/64] 2024-02-13 14:46:50,690 (abs_task:1661) INFO: [train] Batch sampler: UnsortedBatchSampler(N-batch=19027, batch_size=256, key_file=exp/s2t_stats_raw_bpe50000/splits12/speech_shape/split.3, +[gpua006:0/64] 2024-02-13 14:46:50,693 (abs_task:1662) INFO: [train] mini-batch sizes summary: N-batch=19027, mean=256.0, min=256, max=257 +[gpua006:0/64] 2024-02-13 14:56:04,170 (trainer:756) INFO: 38epoch:train:1201-1300batch: iter_time=2.949, forward_time=0.142, loss_ctc=85.181, loss_interctc_layer6=99.687, loss_interctc_layer12=82.635, loss_interctc_layer15=75.749, loss_interctc_layer21=88.117, loss=86.274, backward_time=0.206, grad_norm=84.764, clip=100.000, loss_scale=4.056e+31, optim_step_time=0.138, optim0_lr0=6.569e-05, train_time=6.549 +[gpua006:0/64] 2024-02-13 14:57:38,280 (trainer:756) INFO: 38epoch:train:1301-1400batch: iter_time=8.849e-05, forward_time=0.144, loss_ctc=67.933, loss_interctc_layer6=78.284, loss_interctc_layer12=64.853, loss_interctc_layer15=59.409, loss_interctc_layer21=70.374, loss=68.171, backward_time=0.208, grad_norm=111.541, clip=100.000, loss_scale=4.056e+31, optim_step_time=0.138, optim0_lr0=6.569e-05, train_time=0.941 +[gpua006:0/64] 2024-02-13 15:00:07,706 (trainer:756) INFO: 38epoch:train:1401-1500batch: iter_time=8.781e-05, forward_time=0.143, loss_ctc=71.158, loss_interctc_layer6=81.952, loss_interctc_layer12=67.516, loss_interctc_layer15=61.677, loss_interctc_layer21=73.840, loss=71.228, backward_time=0.206, grad_norm=66.129, clip=100.000, loss_scale=4.056e+31, optim_step_time=0.138, optim0_lr0=6.568e-05, train_time=1.494 +[gpua006:0/64] 2024-02-13 15:02:02,378 (trainer:756) INFO: 38epoch:train:1501-1600batch: iter_time=9.524e-05, forward_time=0.144, loss_ctc=73.772, loss_interctc_layer6=88.074, loss_interctc_layer12=72.744, loss_interctc_layer15=66.695, loss_interctc_layer21=76.356, loss=75.528, backward_time=0.208, grad_norm=79.691, clip=100.000, loss_scale=4.056e+31, optim_step_time=0.138, optim0_lr0=6.568e-05, train_time=1.146 +[gpua006:0/64] 2024-02-13 15:05:23,742 (trainer:756) INFO: 38epoch:train:1601-1700batch: iter_time=1.678e-04, forward_time=0.230, loss_ctc=60.064, loss_interctc_layer6=76.293, loss_interctc_layer12=62.900, loss_interctc_layer15=57.490, loss_interctc_layer21=62.002, loss=63.750, backward_time=0.273, grad_norm=62.882, clip=100.000, loss_scale=4.056e+31, optim_step_time=0.142, optim0_lr0=6.567e-05, train_time=2.013 +[gpua006:0/64] 2024-02-13 15:08:14,554 (trainer:756) INFO: 38epoch:train:1701-1800batch: iter_time=8.684e-05, forward_time=0.163, loss_ctc=72.177, loss_interctc_layer6=79.876, loss_interctc_layer12=65.869, loss_interctc_layer15=60.210, loss_interctc_layer21=74.648, loss=70.556, backward_time=0.224, grad_norm=65.945, clip=100.000, loss_scale=4.056e+31, optim_step_time=0.138, optim0_lr0=6.566e-05, train_time=1.709 +[gpua006:0/64] 2024-02-13 15:10:12,049 (trainer:756) INFO: 38epoch:train:1801-1900batch: iter_time=8.748e-05, forward_time=0.163, loss_ctc=71.267, loss_interctc_layer6=80.840, loss_interctc_layer12=66.997, loss_interctc_layer15=61.669, loss_interctc_layer21=73.761, loss=70.907, backward_time=0.208, grad_norm=80.853, clip=100.000, loss_scale=4.056e+31, optim_step_time=0.138, optim0_lr0=6.566e-05, train_time=1.175 +[gpua006:0/64] 2024-02-13 15:13:03,224 (trainer:756) INFO: 38epoch:train:1901-2000batch: iter_time=9.320e-05, forward_time=0.143, loss_ctc=69.174, loss_interctc_layer6=76.666, loss_interctc_layer12=63.384, loss_interctc_layer15=58.172, loss_interctc_layer21=71.719, loss=67.823, backward_time=0.206, grad_norm=60.462, clip=100.000, loss_scale=4.056e+31, optim_step_time=0.138, optim0_lr0=6.565e-05, train_time=1.712 +[gpua006:0/64] 2024-02-13 15:15:12,802 (trainer:756) INFO: 38epoch:train:2001-2100batch: iter_time=9.095e-05, forward_time=0.143, loss_ctc=68.502, loss_interctc_layer6=81.650, loss_interctc_layer12=67.399, loss_interctc_layer15=61.694, loss_interctc_layer21=70.892, loss=70.027, backward_time=0.207, grad_norm=77.239, clip=100.000, loss_scale=4.056e+31, optim_step_time=0.138, optim0_lr0=6.565e-05, train_time=1.296 +[gpua006:0/64] 2024-02-13 15:17:36,770 (trainer:756) INFO: 38epoch:train:2101-2200batch: iter_time=9.456e-05, forward_time=0.144, loss_ctc=77.639, loss_interctc_layer6=82.679, loss_interctc_layer12=68.473, loss_interctc_layer15=62.714, loss_interctc_layer21=80.800, loss=74.461, backward_time=0.207, grad_norm=84.149, clip=100.000, loss_scale=4.056e+31, optim_step_time=0.138, optim0_lr0=6.564e-05, train_time=1.439 +[gpua006:0/64] 2024-02-13 15:20:19,113 (trainer:756) INFO: 38epoch:train:2201-2300batch: iter_time=9.222e-05, forward_time=0.143, loss_ctc=78.760, loss_interctc_layer6=83.163, loss_interctc_layer12=68.860, loss_interctc_layer15=63.183, loss_interctc_layer21=81.708, loss=75.135, backward_time=0.206, grad_norm=82.230, clip=100.000, loss_scale=4.056e+31, optim_step_time=0.137, optim0_lr0=6.563e-05, train_time=1.623 +[gpua006:0/64] 2024-02-13 15:22:43,641 (trainer:756) INFO: 38epoch:train:2301-2400batch: iter_time=9.295e-05, forward_time=0.142, loss_ctc=67.204, loss_interctc_layer6=79.208, loss_interctc_layer12=65.449, loss_interctc_layer15=60.000, loss_interctc_layer21=69.656, loss=68.303, backward_time=0.205, grad_norm=101.734, clip=100.000, loss_scale=4.056e+31, optim_step_time=0.137, optim0_lr0=6.563e-05, train_time=1.445 +[gpua006:0/64] 2024-02-13 15:24:40,355 (trainer:756) INFO: 38epoch:train:2401-2500batch: iter_time=9.149e-05, forward_time=0.144, loss_ctc=89.242, loss_interctc_layer6=98.590, loss_interctc_layer12=82.046, loss_interctc_layer15=75.772, loss_interctc_layer21=92.264, loss=87.583, backward_time=0.208, grad_norm=92.121, clip=100.000, loss_scale=4.056e+31, optim_step_time=0.138, optim0_lr0=6.562e-05, train_time=1.167 +[gpua006:0/64] 2024-02-13 15:25:00,384 (multiple_iter_factory:32) INFO: Building 2th iter-factory... +[gpua006:0/64] 2024-02-13 15:25:19,285 (s2t:401) INFO: Optional Data Names: ('text_prev', 'text_ctc', 'text_spk2', 'text_spk3', 'text_spk4') +[gpua006:0/64] 2024-02-13 15:25:22,746 (abs_task:1660) INFO: [train] dataset: +ESPnetDataset( + speech: {"path": "exp/s2t_stats_raw_bpe50000/splits12/wav.scp/split.4", "type": "kaldi_ark"} + text_prev: {"path": "exp/s2t_stats_raw_bpe50000/splits12/text.prev/split.4", "type": "text"} + text_ctc: {"path": "exp/s2t_stats_raw_bpe50000/splits12/text.ctc/split.4", "type": "text"} + text: {"path": "exp/s2t_stats_raw_bpe50000/splits12/text/split.4", "type": "text"} + preprocess: ) +[gpua006:0/64] 2024-02-13 15:25:22,746 (abs_task:1661) INFO: [train] Batch sampler: UnsortedBatchSampler(N-batch=19027, batch_size=256, key_file=exp/s2t_stats_raw_bpe50000/splits12/speech_shape/split.4, +[gpua006:0/64] 2024-02-13 15:25:22,750 (abs_task:1662) INFO: [train] mini-batch sizes summary: N-batch=19027, mean=256.0, min=256, max=257 +[gpua006:0/64] 2024-02-13 15:38:48,006 (trainer:756) INFO: 38epoch:train:2501-2600batch: iter_time=3.355, forward_time=0.211, loss_ctc=86.437, loss_interctc_layer6=86.865, loss_interctc_layer12=71.421, loss_interctc_layer15=65.117, loss_interctc_layer21=90.049, loss=79.978, backward_time=0.221, grad_norm=80.027, clip=100.000, loss_scale=4.056e+31, optim_step_time=0.139, optim0_lr0=6.562e-05, train_time=8.476 +[gpua006:0/64] 2024-02-13 15:41:27,610 (trainer:756) INFO: 38epoch:train:2601-2700batch: iter_time=8.701e-05, forward_time=0.143, loss_ctc=69.693, loss_interctc_layer6=80.508, loss_interctc_layer12=66.409, loss_interctc_layer15=60.642, loss_interctc_layer21=72.195, loss=69.889, backward_time=0.208, grad_norm=76.862, clip=100.000, loss_scale=4.056e+31, optim_step_time=0.138, optim0_lr0=6.561e-05, train_time=1.596 +[gpua006:0/64] 2024-02-13 15:43:49,602 (trainer:756) INFO: 38epoch:train:2701-2800batch: iter_time=8.759e-05, forward_time=0.165, loss_ctc=83.981, loss_interctc_layer6=90.820, loss_interctc_layer12=74.858, loss_interctc_layer15=68.394, loss_interctc_layer21=87.168, loss=81.044, backward_time=0.219, grad_norm=77.172, clip=100.000, loss_scale=4.056e+31, optim_step_time=0.138, optim0_lr0=6.561e-05, train_time=1.420 +[gpua006:0/64] 2024-02-13 15:46:21,079 (trainer:756) INFO: 38epoch:train:2801-2900batch: iter_time=8.784e-05, forward_time=0.143, loss_ctc=70.295, loss_interctc_layer6=76.645, loss_interctc_layer12=63.334, loss_interctc_layer15=58.116, loss_interctc_layer21=72.889, loss=68.256, backward_time=0.206, grad_norm=112.494, clip=100.000, loss_scale=4.056e+31, optim_step_time=0.138, optim0_lr0=6.560e-05, train_time=1.515 +[gpua006:0/64] 2024-02-13 15:49:25,132 (trainer:756) INFO: 38epoch:train:2901-3000batch: iter_time=9.181e-05, forward_time=0.143, loss_ctc=67.974, loss_interctc_layer6=81.414, loss_interctc_layer12=67.352, loss_interctc_layer15=61.636, loss_interctc_layer21=69.889, loss=69.653, backward_time=0.205, grad_norm=80.273, clip=100.000, loss_scale=4.056e+31, optim_step_time=0.138, optim0_lr0=6.559e-05, train_time=1.840 +[gpua006:0/64] 2024-02-13 15:51:50,626 (trainer:756) INFO: 38epoch:train:3001-3100batch: iter_time=9.635e-05, forward_time=0.142, loss_ctc=71.021, loss_interctc_layer6=80.643, loss_interctc_layer12=66.464, loss_interctc_layer15=60.719, loss_interctc_layer21=73.563, loss=70.482, backward_time=0.206, grad_norm=83.304, clip=100.000, loss_scale=4.259e+31, optim_step_time=0.138, optim0_lr0=6.559e-05, train_time=1.455 +[gpua006:0/64] 2024-02-13 15:53:58,210 (trainer:756) INFO: 38epoch:train:3101-3200batch: iter_time=8.759e-05, forward_time=0.145, loss_ctc=70.667, loss_interctc_layer6=75.957, loss_interctc_layer12=63.107, loss_interctc_layer15=57.989, loss_interctc_layer21=73.181, loss=68.180, backward_time=0.208, grad_norm=77.552, clip=100.000, loss_scale=8.113e+31, optim_step_time=0.138, optim0_lr0=6.558e-05, train_time=1.276 +[gpua006:0/64] 2024-02-13 15:56:27,231 (trainer:756) INFO: 38epoch:train:3201-3300batch: iter_time=8.682e-05, forward_time=0.143, loss_ctc=83.554, loss_interctc_layer6=82.956, loss_interctc_layer12=68.491, loss_interctc_layer15=62.654, loss_interctc_layer21=86.816, loss=76.894, backward_time=0.207, grad_norm=77.480, clip=100.000, loss_scale=8.113e+31, optim_step_time=0.138, optim0_lr0=6.558e-05, train_time=1.490 +[gpua006:0/64] 2024-02-13 15:58:29,369 (trainer:756) INFO: 38epoch:train:3301-3400batch: iter_time=9.191e-05, forward_time=0.143, loss_ctc=70.410, loss_interctc_layer6=79.092, loss_interctc_layer12=65.200, loss_interctc_layer15=59.504, loss_interctc_layer21=72.963, loss=69.434, backward_time=0.208, grad_norm=68.225, clip=100.000, loss_scale=8.113e+31, optim_step_time=0.138, optim0_lr0=6.557e-05, train_time=1.221 +[gpua006:0/64] 2024-02-13 16:00:35,474 (trainer:756) INFO: 38epoch:train:3401-3500batch: iter_time=9.954e-05, forward_time=0.144, loss_ctc=81.554, loss_interctc_layer6=81.207, loss_interctc_layer12=67.433, loss_interctc_layer15=61.892, loss_interctc_layer21=85.018, loss=75.421, backward_time=0.208, grad_norm=74.612, clip=100.000, loss_scale=8.113e+31, optim_step_time=0.138, optim0_lr0=6.556e-05, train_time=1.261 +[gpua006:0/64] 2024-02-13 16:02:52,486 (trainer:756) INFO: 38epoch:train:3501-3600batch: iter_time=9.987e-05, forward_time=0.144, loss_ctc=84.759, loss_interctc_layer6=85.136, loss_interctc_layer12=70.637, loss_interctc_layer15=64.777, loss_interctc_layer21=87.906, loss=78.643, backward_time=0.208, grad_norm=83.004, clip=100.000, loss_scale=8.113e+31, optim_step_time=0.138, optim0_lr0=6.556e-05, train_time=1.370 +[gpua006:0/64] 2024-02-13 16:05:10,811 (trainer:756) INFO: 38epoch:train:3601-3700batch: iter_time=9.463e-05, forward_time=0.186, loss_ctc=84.998, loss_interctc_layer6=86.644, loss_interctc_layer12=71.737, loss_interctc_layer15=65.907, loss_interctc_layer21=88.261, loss=79.510, backward_time=0.262, grad_norm=86.107, clip=100.000, loss_scale=8.113e+31, optim_step_time=0.143, optim0_lr0=6.555e-05, train_time=1.381 +[gpua006:0/64] 2024-02-13 16:06:46,151 (multiple_iter_factory:32) INFO: Building 3th iter-factory... +[gpua006:0/64] 2024-02-13 16:07:04,819 (s2t:401) INFO: Optional Data Names: ('text_prev', 'text_ctc', 'text_spk2', 'text_spk3', 'text_spk4') +[gpua006:0/64] 2024-02-13 16:07:08,230 (abs_task:1660) INFO: [train] dataset: +ESPnetDataset( + speech: {"path": "exp/s2t_stats_raw_bpe50000/splits12/wav.scp/split.10", "type": "kaldi_ark"} + text_prev: {"path": "exp/s2t_stats_raw_bpe50000/splits12/text.prev/split.10", "type": "text"} + text_ctc: {"path": "exp/s2t_stats_raw_bpe50000/splits12/text.ctc/split.10", "type": "text"} + text: {"path": "exp/s2t_stats_raw_bpe50000/splits12/text/split.10", "type": "text"} + preprocess: ) +[gpua006:0/64] 2024-02-13 16:07:08,230 (abs_task:1661) INFO: [train] Batch sampler: UnsortedBatchSampler(N-batch=19027, batch_size=256, key_file=exp/s2t_stats_raw_bpe50000/splits12/speech_shape/split.10, +[gpua006:0/64] 2024-02-13 16:07:08,302 (abs_task:1662) INFO: [train] mini-batch sizes summary: N-batch=19027, mean=256.0, min=256, max=257 +[gpua006:0/64] 2024-02-13 16:12:28,741 (trainer:687) WARNING: The grad norm is nan. Skipping updating the model. +[gpua006:0/64] 2024-02-13 16:12:37,229 (trainer:756) INFO: 38epoch:train:3701-3800batch: iter_time=2.978, forward_time=0.165, loss_ctc=88.409, loss_interctc_layer6=98.397, loss_interctc_layer12=81.278, loss_interctc_layer15=74.520, loss_interctc_layer21=91.463, loss=86.813, backward_time=0.211, grad_norm=85.846, clip=100.000, loss_scale=7.744e+31, optim_step_time=0.138, optim0_lr0=6.555e-05, train_time=4.466 +[gpua006:0/64] 2024-02-13 16:14:09,364 (trainer:687) WARNING: The grad norm is nan. Skipping updating the model. +[gpua006:0/64] 2024-02-13 16:14:13,114 (trainer:756) INFO: 38epoch:train:3801-3900batch: iter_time=8.175e-05, forward_time=0.144, loss_ctc=71.162, loss_interctc_layer6=78.050, loss_interctc_layer12=64.355, loss_interctc_layer15=58.802, loss_interctc_layer21=73.852, loss=69.244, backward_time=0.209, grad_norm=65.863, clip=100.000, loss_scale=3.975e+31, optim_step_time=0.138, optim0_lr0=6.554e-05, train_time=0.959 +[gpua006:0/64] 2024-02-13 16:15:45,631 (trainer:687) WARNING: The grad norm is nan. Skipping updating the model. +[gpua006:0/64] 2024-02-13 16:16:48,328 (trainer:756) INFO: 38epoch:train:3901-4000batch: iter_time=8.222e-05, forward_time=0.143, loss_ctc=81.156, loss_interctc_layer6=82.833, loss_interctc_layer12=68.141, loss_interctc_layer15=62.173, loss_interctc_layer21=84.149, loss=75.690, backward_time=0.209, grad_norm=69.469, clip=100.000, loss_scale=1.711e+31, optim_step_time=0.138, optim0_lr0=6.553e-05, train_time=1.552 +[gpua006:0/64] 2024-02-13 16:18:29,530 (trainer:756) INFO: 38epoch:train:4001-4100batch: iter_time=8.128e-05, forward_time=0.142, loss_ctc=77.428, loss_interctc_layer6=87.635, loss_interctc_layer12=72.588, loss_interctc_layer15=66.348, loss_interctc_layer21=80.403, loss=76.880, backward_time=0.209, grad_norm=87.612, clip=100.000, loss_scale=1.014e+31, optim_step_time=0.138, optim0_lr0=6.553e-05, train_time=1.012 +[gpua006:0/64] 2024-02-13 16:20:48,359 (trainer:756) INFO: 38epoch:train:4101-4200batch: iter_time=1.027e-04, forward_time=0.143, loss_ctc=62.087, loss_interctc_layer6=76.217, loss_interctc_layer12=62.966, loss_interctc_layer15=57.495, loss_interctc_layer21=63.980, loss=64.549, backward_time=0.207, grad_norm=89.806, clip=100.000, loss_scale=1.014e+31, optim_step_time=0.138, optim0_lr0=6.552e-05, train_time=1.388 +[gpua006:0/64] 2024-02-13 16:23:01,785 (trainer:756) INFO: 38epoch:train:4201-4300batch: iter_time=8.267e-05, forward_time=0.142, loss_ctc=72.219, loss_interctc_layer6=79.921, loss_interctc_layer12=65.936, loss_interctc_layer15=60.280, loss_interctc_layer21=74.626, loss=70.596, backward_time=0.207, grad_norm=78.061, clip=100.000, loss_scale=1.014e+31, optim_step_time=0.138, optim0_lr0=6.552e-05, train_time=1.334 +[gpua006:0/64] 2024-02-13 16:24:51,902 (trainer:756) INFO: 38epoch:train:4301-4400batch: iter_time=8.983e-05, forward_time=0.142, loss_ctc=71.458, loss_interctc_layer6=80.091, loss_interctc_layer12=66.407, loss_interctc_layer15=60.960, loss_interctc_layer21=73.879, loss=70.559, backward_time=0.208, grad_norm=67.381, clip=100.000, loss_scale=1.014e+31, optim_step_time=0.138, optim0_lr0=6.551e-05, train_time=1.101 +[gpua006:0/64] 2024-02-13 16:27:03,276 (trainer:756) INFO: 38epoch:train:4401-4500batch: iter_time=8.567e-05, forward_time=0.142, loss_ctc=74.635, loss_interctc_layer6=75.449, loss_interctc_layer12=62.309, loss_interctc_layer15=57.098, loss_interctc_layer21=77.516, loss=69.401, backward_time=0.205, grad_norm=86.914, clip=100.000, loss_scale=1.014e+31, optim_step_time=0.137, optim0_lr0=6.551e-05, train_time=1.314 +[gpua006:0/64] 2024-02-13 16:28:51,722 (trainer:756) INFO: 38epoch:train:4501-4600batch: iter_time=8.627e-05, forward_time=0.141, loss_ctc=72.233, loss_interctc_layer6=81.199, loss_interctc_layer12=66.979, loss_interctc_layer15=61.287, loss_interctc_layer21=74.741, loss=71.288, backward_time=0.206, grad_norm=71.175, clip=100.000, loss_scale=1.014e+31, optim_step_time=0.137, optim0_lr0=6.550e-05, train_time=1.084 +[gpua006:0/64] 2024-02-13 16:30:39,914 (trainer:756) INFO: 38epoch:train:4601-4700batch: iter_time=8.654e-05, forward_time=0.144, loss_ctc=83.833, loss_interctc_layer6=82.070, loss_interctc_layer12=67.808, loss_interctc_layer15=62.056, loss_interctc_layer21=87.311, loss=76.616, backward_time=0.207, grad_norm=73.164, clip=100.000, loss_scale=1.014e+31, optim_step_time=0.138, optim0_lr0=6.549e-05, train_time=1.082 +[gpua006:0/64] 2024-02-13 16:32:43,674 (trainer:756) INFO: 38epoch:train:4701-4800batch: iter_time=1.361e-04, forward_time=0.245, loss_ctc=83.098, loss_interctc_layer6=83.173, loss_interctc_layer12=68.782, loss_interctc_layer15=62.956, loss_interctc_layer21=85.972, loss=76.796, backward_time=0.231, grad_norm=93.670, clip=100.000, loss_scale=1.014e+31, optim_step_time=0.141, optim0_lr0=6.549e-05, train_time=1.237 +[gpua006:0/64] 2024-02-13 16:35:09,311 (trainer:756) INFO: 38epoch:train:4801-4900batch: iter_time=8.435e-05, forward_time=0.163, loss_ctc=71.556, loss_interctc_layer6=78.436, loss_interctc_layer12=64.970, loss_interctc_layer15=59.574, loss_interctc_layer21=74.128, loss=69.733, backward_time=0.216, grad_norm=70.160, clip=100.000, loss_scale=1.014e+31, optim_step_time=0.138, optim0_lr0=6.548e-05, train_time=1.455 +[gpua006:0/64] 2024-02-13 16:37:25,194 (trainer:756) INFO: 38epoch:train:4901-5000batch: iter_time=8.295e-05, forward_time=0.143, loss_ctc=92.358, loss_interctc_layer6=97.541, loss_interctc_layer12=81.101, loss_interctc_layer15=74.589, loss_interctc_layer21=95.486, loss=88.215, backward_time=0.206, grad_norm=92.672, clip=100.000, loss_scale=1.014e+31, optim_step_time=0.137, optim0_lr0=6.548e-05, train_time=1.360 +[gpua006:0/64] 2024-02-13 16:37:45,266 (multiple_iter_factory:32) INFO: Building 4th iter-factory... +[gpua006:0/64] 2024-02-13 16:38:04,121 (s2t:401) INFO: Optional Data Names: ('text_prev', 'text_ctc', 'text_spk2', 'text_spk3', 'text_spk4') +[gpua006:0/64] 2024-02-13 16:38:07,481 (abs_task:1660) INFO: [train] dataset: +ESPnetDataset( + speech: {"path": "exp/s2t_stats_raw_bpe50000/splits12/wav.scp/split.11", "type": "kaldi_ark"} + text_prev: {"path": "exp/s2t_stats_raw_bpe50000/splits12/text.prev/split.11", "type": "text"} + text_ctc: {"path": "exp/s2t_stats_raw_bpe50000/splits12/text.ctc/split.11", "type": "text"} + text: {"path": "exp/s2t_stats_raw_bpe50000/splits12/text/split.11", "type": "text"} + preprocess: ) +[gpua006:0/64] 2024-02-13 16:38:07,481 (abs_task:1661) INFO: [train] Batch sampler: UnsortedBatchSampler(N-batch=19027, batch_size=256, key_file=exp/s2t_stats_raw_bpe50000/splits12/speech_shape/split.11, +[gpua006:0/64] 2024-02-13 16:38:07,525 (abs_task:1662) INFO: [train] mini-batch sizes summary: N-batch=19027, mean=256.0, min=256, max=257 +[gpua006:0/64] 2024-02-13 16:44:31,747 (trainer:756) INFO: 38epoch:train:5001-5100batch: iter_time=3.125, forward_time=0.144, loss_ctc=79.462, loss_interctc_layer6=86.770, loss_interctc_layer12=71.373, loss_interctc_layer15=65.110, loss_interctc_layer21=82.821, loss=77.107, backward_time=0.208, grad_norm=80.082, clip=100.000, loss_scale=1.014e+31, optim_step_time=0.138, optim0_lr0=6.547e-05, train_time=4.265 +[gpua006:0/64] 2024-02-13 16:46:11,996 (trainer:756) INFO: 38epoch:train:5101-5200batch: iter_time=8.204e-05, forward_time=0.143, loss_ctc=64.749, loss_interctc_layer6=79.444, loss_interctc_layer12=65.208, loss_interctc_layer15=59.425, loss_interctc_layer21=67.056, loss=67.176, backward_time=0.209, grad_norm=59.583, clip=100.000, loss_scale=1.014e+31, optim_step_time=0.138, optim0_lr0=6.546e-05, train_time=1.002 +[gpua006:0/64] 2024-02-13 16:48:32,093 (trainer:756) INFO: 38epoch:train:5201-5300batch: iter_time=8.957e-05, forward_time=0.144, loss_ctc=75.865, loss_interctc_layer6=91.027, loss_interctc_layer12=75.062, loss_interctc_layer15=68.584, loss_interctc_layer21=78.761, loss=77.860, backward_time=0.207, grad_norm=75.067, clip=100.000, loss_scale=1.014e+31, optim_step_time=0.138, optim0_lr0=6.546e-05, train_time=1.401 +[gpua006:0/64] 2024-02-13 16:50:15,874 (trainer:756) INFO: 38epoch:train:5301-5400batch: iter_time=8.772e-05, forward_time=0.141, loss_ctc=64.773, loss_interctc_layer6=75.934, loss_interctc_layer12=62.663, loss_interctc_layer15=57.331, loss_interctc_layer21=67.133, loss=65.567, backward_time=0.206, grad_norm=68.873, clip=100.000, loss_scale=1.014e+31, optim_step_time=0.137, optim0_lr0=6.545e-05, train_time=1.038 +[gpua006:0/64] 2024-02-13 16:52:10,193 (trainer:756) INFO: 38epoch:train:5401-5500batch: iter_time=9.227e-05, forward_time=0.162, loss_ctc=65.891, loss_interctc_layer6=80.623, loss_interctc_layer12=66.645, loss_interctc_layer15=60.994, loss_interctc_layer21=67.758, loss=68.382, backward_time=0.207, grad_norm=129.107, clip=100.000, loss_scale=1.014e+31, optim_step_time=0.137, optim0_lr0=6.545e-05, train_time=1.143 +[gpua006:0/64] 2024-02-13 16:54:07,366 (trainer:756) INFO: 38epoch:train:5501-5600batch: iter_time=8.921e-05, forward_time=0.142, loss_ctc=69.879, loss_interctc_layer6=79.771, loss_interctc_layer12=65.763, loss_interctc_layer15=59.971, loss_interctc_layer21=72.419, loss=69.561, backward_time=0.207, grad_norm=73.994, clip=100.000, loss_scale=1.014e+31, optim_step_time=0.138, optim0_lr0=6.544e-05, train_time=1.172 +[gpua006:0/64] 2024-02-13 16:56:19,514 (trainer:756) INFO: 38epoch:train:5601-5700batch: iter_time=8.894e-05, forward_time=0.142, loss_ctc=65.467, loss_interctc_layer6=74.652, loss_interctc_layer12=61.795, loss_interctc_layer15=56.793, loss_interctc_layer21=67.732, loss=65.288, backward_time=0.206, grad_norm=67.789, clip=100.000, loss_scale=1.014e+31, optim_step_time=0.137, optim0_lr0=6.544e-05, train_time=1.321 +[gpua006:0/64] 2024-02-13 16:58:48,790 (trainer:756) INFO: 38epoch:train:5701-5800batch: iter_time=9.234e-05, forward_time=0.147, loss_ctc=77.375, loss_interctc_layer6=82.572, loss_interctc_layer12=68.038, loss_interctc_layer15=62.281, loss_interctc_layer21=80.391, loss=74.132, backward_time=0.208, grad_norm=149.535, clip=100.000, loss_scale=1.014e+31, optim_step_time=0.138, optim0_lr0=6.543e-05, train_time=1.493 +[gpua006:0/64] 2024-02-13 17:00:48,597 (trainer:756) INFO: 38epoch:train:5801-5900batch: iter_time=2.165e-04, forward_time=0.216, loss_ctc=67.222, loss_interctc_layer6=79.898, loss_interctc_layer12=65.870, loss_interctc_layer15=60.097, loss_interctc_layer21=69.631, loss=68.544, backward_time=0.226, grad_norm=70.211, clip=100.000, loss_scale=1.014e+31, optim_step_time=0.144, optim0_lr0=6.542e-05, train_time=1.198 +[gpua006:0/64] 2024-02-13 17:03:02,178 (trainer:756) INFO: 38epoch:train:5901-6000batch: iter_time=9.405e-05, forward_time=0.155, loss_ctc=76.679, loss_interctc_layer6=80.870, loss_interctc_layer12=67.236, loss_interctc_layer15=61.666, loss_interctc_layer21=79.416, loss=73.173, backward_time=0.215, grad_norm=82.973, clip=100.000, loss_scale=1.328e+31, optim_step_time=0.139, optim0_lr0=6.542e-05, train_time=1.334 +[gpua006:0/64] 2024-02-13 17:04:58,551 (trainer:756) INFO: 38epoch:train:6001-6100batch: iter_time=8.659e-05, forward_time=0.145, loss_ctc=77.602, loss_interctc_layer6=84.473, loss_interctc_layer12=69.927, loss_interctc_layer15=63.954, loss_interctc_layer21=80.430, loss=75.277, backward_time=0.208, grad_norm=69.862, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.138, optim0_lr0=6.541e-05, train_time=1.165 +[gpua006:0/64] 2024-02-13 17:06:48,649 (trainer:756) INFO: 38epoch:train:6101-6200batch: iter_time=8.638e-05, forward_time=0.143, loss_ctc=78.593, loss_interctc_layer6=85.792, loss_interctc_layer12=70.951, loss_interctc_layer15=65.047, loss_interctc_layer21=81.495, loss=76.376, backward_time=0.207, grad_norm=74.132, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.138, optim0_lr0=6.541e-05, train_time=1.101 +[gpua006:0/64] 2024-02-13 17:08:12,113 (multiple_iter_factory:32) INFO: Building 5th iter-factory... +[gpua006:0/64] 2024-02-13 17:08:31,118 (s2t:401) INFO: Optional Data Names: ('text_prev', 'text_ctc', 'text_spk2', 'text_spk3', 'text_spk4') +[gpua006:0/64] 2024-02-13 17:08:34,533 (abs_task:1660) INFO: [train] dataset: +ESPnetDataset( + speech: {"path": "exp/s2t_stats_raw_bpe50000/splits12/wav.scp/split.6", "type": "kaldi_ark"} + text_prev: {"path": "exp/s2t_stats_raw_bpe50000/splits12/text.prev/split.6", "type": "text"} + text_ctc: {"path": "exp/s2t_stats_raw_bpe50000/splits12/text.ctc/split.6", "type": "text"} + text: {"path": "exp/s2t_stats_raw_bpe50000/splits12/text/split.6", "type": "text"} + preprocess: ) +[gpua006:0/64] 2024-02-13 17:08:34,533 (abs_task:1661) INFO: [train] Batch sampler: UnsortedBatchSampler(N-batch=19027, batch_size=256, key_file=exp/s2t_stats_raw_bpe50000/splits12/speech_shape/split.6, +[gpua006:0/64] 2024-02-13 17:08:34,623 (abs_task:1662) INFO: [train] mini-batch sizes summary: N-batch=19027, mean=256.0, min=256, max=257 +[gpua006:0/64] 2024-02-13 17:16:56,294 (trainer:756) INFO: 38epoch:train:6201-6300batch: iter_time=2.765, forward_time=0.144, loss_ctc=87.147, loss_interctc_layer6=97.865, loss_interctc_layer12=80.852, loss_interctc_layer15=74.163, loss_interctc_layer21=90.380, loss=86.081, backward_time=0.208, grad_norm=89.001, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.138, optim0_lr0=6.540e-05, train_time=6.076 +[gpua006:0/64] 2024-02-13 17:18:46,638 (trainer:756) INFO: 38epoch:train:6301-6400batch: iter_time=8.626e-05, forward_time=0.142, loss_ctc=70.242, loss_interctc_layer6=77.646, loss_interctc_layer12=63.901, loss_interctc_layer15=58.280, loss_interctc_layer21=72.986, loss=68.611, backward_time=0.208, grad_norm=73.276, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.139, optim0_lr0=6.539e-05, train_time=1.103 +[gpua006:0/64] 2024-02-13 17:21:00,204 (trainer:756) INFO: 38epoch:train:6401-6500batch: iter_time=8.991e-05, forward_time=0.145, loss_ctc=80.143, loss_interctc_layer6=81.667, loss_interctc_layer12=67.149, loss_interctc_layer15=61.270, loss_interctc_layer21=83.183, loss=74.683, backward_time=0.209, grad_norm=66.395, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.138, optim0_lr0=6.539e-05, train_time=1.335 +[gpua006:0/64] 2024-02-13 17:22:58,367 (trainer:756) INFO: 38epoch:train:6501-6600batch: iter_time=9.737e-05, forward_time=0.143, loss_ctc=77.449, loss_interctc_layer6=87.328, loss_interctc_layer12=72.103, loss_interctc_layer15=66.019, loss_interctc_layer21=80.298, loss=76.639, backward_time=0.208, grad_norm=73.742, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.138, optim0_lr0=6.538e-05, train_time=1.181 +[gpua006:0/64] 2024-02-13 17:25:00,364 (trainer:756) INFO: 38epoch:train:6601-6700batch: iter_time=9.595e-05, forward_time=0.143, loss_ctc=60.759, loss_interctc_layer6=74.975, loss_interctc_layer12=61.720, loss_interctc_layer15=56.378, loss_interctc_layer21=62.635, loss=63.293, backward_time=0.208, grad_norm=93.854, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.138, optim0_lr0=6.538e-05, train_time=1.220 +[gpua006:0/64] 2024-02-13 17:27:19,415 (trainer:756) INFO: 38epoch:train:6701-6800batch: iter_time=9.080e-05, forward_time=0.198, loss_ctc=71.843, loss_interctc_layer6=79.379, loss_interctc_layer12=65.372, loss_interctc_layer15=59.746, loss_interctc_layer21=74.268, loss=70.122, backward_time=0.221, grad_norm=60.144, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.140, optim0_lr0=6.537e-05, train_time=1.390 +[gpua006:0/64] 2024-02-13 17:29:47,003 (trainer:756) INFO: 38epoch:train:6801-6900batch: iter_time=9.785e-05, forward_time=0.218, loss_ctc=70.988, loss_interctc_layer6=80.081, loss_interctc_layer12=66.325, loss_interctc_layer15=61.069, loss_interctc_layer21=73.488, loss=70.390, backward_time=0.229, grad_norm=102.350, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.141, optim0_lr0=6.537e-05, train_time=1.476 +[gpua006:0/64] 2024-02-13 17:32:07,374 (trainer:756) INFO: 38epoch:train:6901-7000batch: iter_time=9.242e-05, forward_time=0.143, loss_ctc=74.677, loss_interctc_layer6=75.821, loss_interctc_layer12=62.626, loss_interctc_layer15=57.400, loss_interctc_layer21=77.587, loss=69.622, backward_time=0.208, grad_norm=69.277, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.138, optim0_lr0=6.536e-05, train_time=1.403 +[gpua006:0/64] 2024-02-13 17:34:29,309 (trainer:756) INFO: 38epoch:train:7001-7100batch: iter_time=9.110e-05, forward_time=0.145, loss_ctc=72.199, loss_interctc_layer6=81.121, loss_interctc_layer12=66.873, loss_interctc_layer15=61.187, loss_interctc_layer21=74.694, loss=71.215, backward_time=0.206, grad_norm=73.875, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.138, optim0_lr0=6.535e-05, train_time=1.420 +[gpua006:0/64] 2024-02-13 17:36:50,412 (trainer:756) INFO: 38epoch:train:7101-7200batch: iter_time=9.200e-05, forward_time=0.143, loss_ctc=83.779, loss_interctc_layer6=81.888, loss_interctc_layer12=67.605, loss_interctc_layer15=61.976, loss_interctc_layer21=87.143, loss=76.478, backward_time=0.208, grad_norm=77.086, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.138, optim0_lr0=6.535e-05, train_time=1.411 +[gpua006:0/64] 2024-02-13 17:39:15,103 (trainer:756) INFO: 38epoch:train:7201-7300batch: iter_time=9.274e-05, forward_time=0.144, loss_ctc=83.270, loss_interctc_layer6=83.181, loss_interctc_layer12=68.820, loss_interctc_layer15=62.965, loss_interctc_layer21=86.310, loss=76.910, backward_time=0.208, grad_norm=95.514, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.138, optim0_lr0=6.534e-05, train_time=1.447 +[gpua006:0/64] 2024-02-13 17:41:45,238 (trainer:756) INFO: 38epoch:train:7301-7400batch: iter_time=9.560e-05, forward_time=0.143, loss_ctc=70.919, loss_interctc_layer6=78.201, loss_interctc_layer12=64.559, loss_interctc_layer15=59.118, loss_interctc_layer21=73.539, loss=69.267, backward_time=0.207, grad_norm=72.881, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.138, optim0_lr0=6.534e-05, train_time=1.501 +[gpua006:0/64] 2024-02-13 17:44:10,120 (trainer:756) INFO: 38epoch:train:7401-7500batch: iter_time=9.900e-05, forward_time=0.144, loss_ctc=93.594, loss_interctc_layer6=98.009, loss_interctc_layer12=81.497, loss_interctc_layer15=74.975, loss_interctc_layer21=96.711, loss=88.957, backward_time=0.206, grad_norm=85.829, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.137, optim0_lr0=6.533e-05, train_time=1.449 +[gpua006:0/64] 2024-02-13 17:44:30,157 (multiple_iter_factory:32) INFO: Building 6th iter-factory... +[gpua006:0/64] 2024-02-13 17:44:48,853 (s2t:401) INFO: Optional Data Names: ('text_prev', 'text_ctc', 'text_spk2', 'text_spk3', 'text_spk4') +[gpua006:0/64] 2024-02-13 17:44:52,278 (abs_task:1660) INFO: [train] dataset: +ESPnetDataset( + speech: {"path": "exp/s2t_stats_raw_bpe50000/splits12/wav.scp/split.8", "type": "kaldi_ark"} + text_prev: {"path": "exp/s2t_stats_raw_bpe50000/splits12/text.prev/split.8", "type": "text"} + text_ctc: {"path": "exp/s2t_stats_raw_bpe50000/splits12/text.ctc/split.8", "type": "text"} + text: {"path": "exp/s2t_stats_raw_bpe50000/splits12/text/split.8", "type": "text"} + preprocess: ) +[gpua006:0/64] 2024-02-13 17:44:52,278 (abs_task:1661) INFO: [train] Batch sampler: UnsortedBatchSampler(N-batch=19027, batch_size=256, key_file=exp/s2t_stats_raw_bpe50000/splits12/speech_shape/split.8, +[gpua006:0/64] 2024-02-13 17:44:52,344 (abs_task:1662) INFO: [train] mini-batch sizes summary: N-batch=19027, mean=256.0, min=256, max=257 +[gpua006:0/64] 2024-02-13 17:54:41,809 (trainer:756) INFO: 38epoch:train:7501-7600batch: iter_time=2.619, forward_time=0.225, loss_ctc=84.736, loss_interctc_layer6=85.525, loss_interctc_layer12=70.229, loss_interctc_layer15=64.099, loss_interctc_layer21=87.844, loss=78.486, backward_time=0.244, grad_norm=80.525, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.141, optim0_lr0=6.532e-05, train_time=6.317 +[gpua006:0/64] 2024-02-13 17:56:38,187 (trainer:756) INFO: 38epoch:train:7601-7700batch: iter_time=9.287e-05, forward_time=0.144, loss_ctc=68.010, loss_interctc_layer6=79.294, loss_interctc_layer12=65.088, loss_interctc_layer15=59.418, loss_interctc_layer21=70.378, loss=68.438, backward_time=0.209, grad_norm=60.928, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.138, optim0_lr0=6.532e-05, train_time=1.162 +[gpua006:0/64] 2024-02-13 17:58:46,927 (trainer:756) INFO: 38epoch:train:7701-7800batch: iter_time=8.400e-05, forward_time=0.145, loss_ctc=82.695, loss_interctc_layer6=90.265, loss_interctc_layer12=74.361, loss_interctc_layer15=67.942, loss_interctc_layer21=85.725, loss=80.198, backward_time=0.207, grad_norm=128.155, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.138, optim0_lr0=6.531e-05, train_time=1.289 +[gpua006:0/64] 2024-02-13 18:01:37,940 (trainer:756) INFO: 38epoch:train:7801-7900batch: iter_time=8.950e-05, forward_time=0.142, loss_ctc=69.115, loss_interctc_layer6=75.553, loss_interctc_layer12=62.466, loss_interctc_layer15=57.185, loss_interctc_layer21=71.679, loss=67.199, backward_time=0.206, grad_norm=77.301, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.138, optim0_lr0=6.531e-05, train_time=1.710 +[gpua006:0/64] 2024-02-13 18:04:04,042 (trainer:756) INFO: 38epoch:train:7901-8000batch: iter_time=8.801e-05, forward_time=0.143, loss_ctc=66.825, loss_interctc_layer6=80.233, loss_interctc_layer12=66.190, loss_interctc_layer15=60.562, loss_interctc_layer21=68.749, loss=68.512, backward_time=0.206, grad_norm=102.749, clip=100.000, loss_scale=2.657e+31, optim_step_time=0.138, optim0_lr0=6.530e-05, train_time=1.461 +[gpua006:0/64] 2024-02-13 18:06:14,061 (trainer:756) INFO: 38epoch:train:8001-8100batch: iter_time=8.639e-05, forward_time=0.142, loss_ctc=70.473, loss_interctc_layer6=79.956, loss_interctc_layer12=65.697, loss_interctc_layer15=59.957, loss_interctc_layer21=73.019, loss=69.820, backward_time=0.206, grad_norm=62.867, clip=100.000, loss_scale=4.056e+31, optim_step_time=0.138, optim0_lr0=6.530e-05, train_time=1.300 +[gpua006:0/64] 2024-02-13 18:08:08,088 (trainer:687) WARNING: The grad norm is nan. Skipping updating the model. +[gpua006:0/64] 2024-02-13 18:08:33,985 (trainer:756) INFO: 38epoch:train:8101-8200batch: iter_time=9.382e-05, forward_time=0.142, loss_ctc=69.000, loss_interctc_layer6=74.461, loss_interctc_layer12=61.667, loss_interctc_layer15=56.569, loss_interctc_layer21=71.367, loss=66.613, backward_time=0.206, grad_norm=61.602, clip=100.000, loss_scale=3.626e+31, optim_step_time=0.138, optim0_lr0=6.529e-05, train_time=1.399 +[gpua006:0/64] 2024-02-13 18:10:27,225 (trainer:756) INFO: 38epoch:train:8201-8300batch: iter_time=9.125e-05, forward_time=0.142, loss_ctc=82.906, loss_interctc_layer6=82.682, loss_interctc_layer12=68.159, loss_interctc_layer15=62.323, loss_interctc_layer21=86.036, loss=76.421, backward_time=0.206, grad_norm=87.475, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.138, optim0_lr0=6.528e-05, train_time=1.132 +[gpua006:0/64] 2024-02-13 18:13:01,587 (trainer:756) INFO: 38epoch:train:8301-8400batch: iter_time=9.162e-05, forward_time=0.144, loss_ctc=71.092, loss_interctc_layer6=79.435, loss_interctc_layer12=65.380, loss_interctc_layer15=59.695, loss_interctc_layer21=73.714, loss=69.863, backward_time=0.206, grad_norm=81.481, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.138, optim0_lr0=6.528e-05, train_time=1.543 +[gpua006:0/64] 2024-02-13 18:15:29,717 (trainer:756) INFO: 38epoch:train:8401-8500batch: iter_time=3.354e-04, forward_time=0.206, loss_ctc=80.807, loss_interctc_layer6=80.625, loss_interctc_layer12=66.948, loss_interctc_layer15=61.444, loss_interctc_layer21=83.958, loss=74.757, backward_time=0.248, grad_norm=77.630, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.141, optim0_lr0=6.527e-05, train_time=1.481 +[gpua006:0/64] 2024-02-13 18:17:15,746 (trainer:756) INFO: 38epoch:train:8501-8600batch: iter_time=9.226e-05, forward_time=0.175, loss_ctc=83.499, loss_interctc_layer6=84.808, loss_interctc_layer12=70.153, loss_interctc_layer15=64.339, loss_interctc_layer21=86.466, loss=77.853, backward_time=0.221, grad_norm=116.225, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.138, optim0_lr0=6.527e-05, train_time=1.060 +[gpua006:0/64] 2024-02-13 18:19:10,332 (trainer:687) WARNING: The grad norm is nan. Skipping updating the model. +[gpua006:0/64] 2024-02-13 18:19:26,328 (trainer:756) INFO: 38epoch:train:8601-8700batch: iter_time=9.162e-05, forward_time=0.143, loss_ctc=83.913, loss_interctc_layer6=85.742, loss_interctc_layer12=70.927, loss_interctc_layer15=64.961, loss_interctc_layer21=87.089, loss=78.527, backward_time=0.207, grad_norm=87.020, clip=100.000, loss_scale=1.936e+31, optim_step_time=0.138, optim0_lr0=6.526e-05, train_time=1.304 +[gpua006:0/64] 2024-02-13 18:20:50,554 (multiple_iter_factory:32) INFO: Building 7th iter-factory... +[gpua006:0/64] 2024-02-13 18:21:09,531 (s2t:401) INFO: Optional Data Names: ('text_prev', 'text_ctc', 'text_spk2', 'text_spk3', 'text_spk4') +[gpua006:0/64] 2024-02-13 18:21:12,932 (abs_task:1660) INFO: [train] dataset: +ESPnetDataset( + speech: {"path": "exp/s2t_stats_raw_bpe50000/splits12/wav.scp/split.9", "type": "kaldi_ark"} + text_prev: {"path": "exp/s2t_stats_raw_bpe50000/splits12/text.prev/split.9", "type": "text"} + text_ctc: {"path": "exp/s2t_stats_raw_bpe50000/splits12/text.ctc/split.9", "type": "text"} + text: {"path": "exp/s2t_stats_raw_bpe50000/splits12/text/split.9", "type": "text"} + preprocess: ) +[gpua006:0/64] 2024-02-13 18:21:12,932 (abs_task:1661) INFO: [train] Batch sampler: UnsortedBatchSampler(N-batch=19027, batch_size=256, key_file=exp/s2t_stats_raw_bpe50000/splits12/speech_shape/split.9, +[gpua006:0/64] 2024-02-13 18:21:12,937 (abs_task:1662) INFO: [train] mini-batch sizes summary: N-batch=19027, mean=256.0, min=256, max=257 +[gpua006:0/64] 2024-02-13 18:27:15,642 (trainer:756) INFO: 38epoch:train:8701-8800batch: iter_time=3.314, forward_time=0.145, loss_ctc=84.092, loss_interctc_layer6=97.446, loss_interctc_layer12=80.450, loss_interctc_layer15=73.796, loss_interctc_layer21=87.166, loss=84.590, backward_time=0.208, grad_norm=88.194, clip=100.000, loss_scale=1.014e+31, optim_step_time=0.138, optim0_lr0=6.526e-05, train_time=4.695 +[gpua006:0/64] 2024-02-13 18:28:54,292 (trainer:756) INFO: 38epoch:train:8801-8900batch: iter_time=1.173e-04, forward_time=0.142, loss_ctc=67.686, loss_interctc_layer6=77.254, loss_interctc_layer12=63.520, loss_interctc_layer15=58.044, loss_interctc_layer21=70.282, loss=67.357, backward_time=0.207, grad_norm=69.040, clip=100.000, loss_scale=1.014e+31, optim_step_time=0.138, optim0_lr0=6.525e-05, train_time=0.986 +[gpua006:0/64] 2024-02-13 18:30:39,440 (trainer:756) INFO: 38epoch:train:8901-9000batch: iter_time=9.299e-05, forward_time=0.142, loss_ctc=71.318, loss_interctc_layer6=81.698, loss_interctc_layer12=67.116, loss_interctc_layer15=61.244, loss_interctc_layer21=74.047, loss=71.085, backward_time=0.209, grad_norm=65.147, clip=100.000, loss_scale=1.014e+31, optim_step_time=0.138, optim0_lr0=6.524e-05, train_time=1.051 +[gpua006:0/64] 2024-02-13 18:32:42,434 (trainer:756) INFO: 38epoch:train:9001-9100batch: iter_time=8.281e-05, forward_time=0.144, loss_ctc=72.807, loss_interctc_layer6=86.348, loss_interctc_layer12=71.275, loss_interctc_layer15=65.203, loss_interctc_layer21=75.479, loss=74.222, backward_time=0.208, grad_norm=80.376, clip=100.000, loss_scale=1.014e+31, optim_step_time=0.138, optim0_lr0=6.524e-05, train_time=1.230 +[gpua006:0/64] 2024-02-13 18:34:44,250 (trainer:756) INFO: 38epoch:train:9101-9200batch: iter_time=8.107e-05, forward_time=0.142, loss_ctc=58.820, loss_interctc_layer6=74.880, loss_interctc_layer12=61.560, loss_interctc_layer15=56.158, loss_interctc_layer21=60.671, loss=62.418, backward_time=0.207, grad_norm=75.119, clip=100.000, loss_scale=1.014e+31, optim_step_time=0.138, optim0_lr0=6.523e-05, train_time=1.218 +[gpua006:0/64] 2024-02-13 18:36:59,172 (trainer:756) INFO: 38epoch:train:9201-9300batch: iter_time=8.608e-05, forward_time=0.142, loss_ctc=71.324, loss_interctc_layer6=79.349, loss_interctc_layer12=65.257, loss_interctc_layer15=59.683, loss_interctc_layer21=73.867, loss=69.896, backward_time=0.207, grad_norm=92.759, clip=100.000, loss_scale=1.014e+31, optim_step_time=0.138, optim0_lr0=6.523e-05, train_time=1.349 +[gpua006:0/64] 2024-02-13 18:39:38,011 (trainer:756) INFO: 38epoch:train:9301-9400batch: iter_time=8.671e-05, forward_time=0.142, loss_ctc=69.780, loss_interctc_layer6=79.962, loss_interctc_layer12=66.221, loss_interctc_layer15=60.730, loss_interctc_layer21=72.148, loss=69.768, backward_time=0.209, grad_norm=72.300, clip=100.000, loss_scale=1.014e+31, optim_step_time=0.138, optim0_lr0=6.522e-05, train_time=1.588 +[gpua006:0/64] 2024-02-13 18:41:47,099 (trainer:756) INFO: 38epoch:train:9401-9500batch: iter_time=2.006e-04, forward_time=0.158, loss_ctc=68.367, loss_interctc_layer6=75.494, loss_interctc_layer12=62.325, loss_interctc_layer15=57.075, loss_interctc_layer21=70.945, loss=66.841, backward_time=0.207, grad_norm=55.655, clip=100.000, loss_scale=1.014e+31, optim_step_time=0.138, optim0_lr0=6.521e-05, train_time=1.291 +[gpua006:0/64] 2024-02-13 18:44:11,732 (trainer:756) INFO: 38epoch:train:9501-9600batch: iter_time=8.506e-05, forward_time=0.241, loss_ctc=67.712, loss_interctc_layer6=81.327, loss_interctc_layer12=67.076, loss_interctc_layer15=61.379, loss_interctc_layer21=70.191, loss=69.537, backward_time=0.232, grad_norm=70.710, clip=100.000, loss_scale=1.014e+31, optim_step_time=0.147, optim0_lr0=6.521e-05, train_time=1.446 +[gpua006:0/64] 2024-02-13 18:46:00,470 (trainer:756) INFO: 38epoch:train:9601-9700batch: iter_time=8.628e-05, forward_time=0.143, loss_ctc=77.031, loss_interctc_layer6=81.928, loss_interctc_layer12=67.634, loss_interctc_layer15=61.831, loss_interctc_layer21=80.200, loss=73.725, backward_time=0.209, grad_norm=117.189, clip=100.000, loss_scale=1.014e+31, optim_step_time=0.139, optim0_lr0=6.520e-05, train_time=1.087 +[gpua006:0/64] 2024-02-13 18:47:51,859 (trainer:756) INFO: 38epoch:train:9701-9800batch: iter_time=8.603e-05, forward_time=0.144, loss_ctc=78.622, loss_interctc_layer6=82.821, loss_interctc_layer12=68.565, loss_interctc_layer15=62.672, loss_interctc_layer21=81.576, loss=74.851, backward_time=0.209, grad_norm=119.950, clip=100.000, loss_scale=1.014e+31, optim_step_time=0.139, optim0_lr0=6.520e-05, train_time=1.113 +[gpua006:0/64] 2024-02-13 18:50:18,549 (trainer:756) INFO: 38epoch:train:9801-9900batch: iter_time=8.560e-05, forward_time=0.143, loss_ctc=65.972, loss_interctc_layer6=77.540, loss_interctc_layer12=63.991, loss_interctc_layer15=58.532, loss_interctc_layer21=68.446, loss=66.896, backward_time=0.207, grad_norm=80.260, clip=100.000, loss_scale=1.014e+31, optim_step_time=0.139, optim0_lr0=6.519e-05, train_time=1.467 +[gpua006:0/64] 2024-02-13 18:52:19,987 (trainer:756) INFO: 38epoch:train:9901-10000batch: iter_time=8.572e-05, forward_time=0.144, loss_ctc=88.189, loss_interctc_layer6=96.927, loss_interctc_layer12=80.445, loss_interctc_layer15=74.025, loss_interctc_layer21=91.361, loss=86.189, backward_time=0.207, grad_norm=80.317, clip=100.000, loss_scale=1.014e+31, optim_step_time=0.139, optim0_lr0=6.519e-05, train_time=1.214 +[gpua006:0/64] 2024-02-13 18:52:40,016 (multiple_iter_factory:32) INFO: Building 8th iter-factory... +[gpua006:0/64] 2024-02-13 18:52:58,925 (s2t:401) INFO: Optional Data Names: ('text_prev', 'text_ctc', 'text_spk2', 'text_spk3', 'text_spk4') +[gpua006:0/64] 2024-02-13 18:53:02,320 (abs_task:1660) INFO: [train] dataset: +ESPnetDataset( + speech: {"path": "exp/s2t_stats_raw_bpe50000/splits12/wav.scp/split.2", "type": "kaldi_ark"} + text_prev: {"path": "exp/s2t_stats_raw_bpe50000/splits12/text.prev/split.2", "type": "text"} + text_ctc: {"path": "exp/s2t_stats_raw_bpe50000/splits12/text.ctc/split.2", "type": "text"} + text: {"path": "exp/s2t_stats_raw_bpe50000/splits12/text/split.2", "type": "text"} + preprocess: ) +[gpua006:0/64] 2024-02-13 18:53:02,320 (abs_task:1661) INFO: [train] Batch sampler: UnsortedBatchSampler(N-batch=19027, batch_size=256, key_file=exp/s2t_stats_raw_bpe50000/splits12/speech_shape/split.2, +[gpua006:0/64] 2024-02-13 18:53:02,393 (abs_task:1662) INFO: [train] mini-batch sizes summary: N-batch=19027, mean=256.0, min=256, max=257 +[gpua006:0/64] 2024-02-13 18:58:27,426 (trainer:687) WARNING: The grad norm is nan. Skipping updating the model. +[gpua006:0/64] 2024-02-13 18:59:47,246 (trainer:756) INFO: 38epoch:train:10001-10100batch: iter_time=3.178, forward_time=0.168, loss_ctc=84.353, loss_interctc_layer6=86.097, loss_interctc_layer12=70.638, loss_interctc_layer15=64.554, loss_interctc_layer21=87.806, loss=78.690, backward_time=0.212, grad_norm=81.570, clip=100.000, loss_scale=6.607e+30, optim_step_time=0.139, optim0_lr0=6.518e-05, train_time=4.472 +[gpua006:0/64] 2024-02-13 19:01:42,169 (trainer:756) INFO: 38epoch:train:10101-10200batch: iter_time=9.040e-05, forward_time=0.204, loss_ctc=68.166, loss_interctc_layer6=79.149, loss_interctc_layer12=64.894, loss_interctc_layer15=59.139, loss_interctc_layer21=70.624, loss=68.395, backward_time=0.258, grad_norm=76.326, clip=100.000, loss_scale=5.071e+30, optim_step_time=0.141, optim0_lr0=6.517e-05, train_time=1.149 +[gpua006:0/64] 2024-02-13 19:03:52,728 (trainer:756) INFO: 38epoch:train:10201-10300batch: iter_time=9.763e-05, forward_time=0.144, loss_ctc=82.750, loss_interctc_layer6=89.955, loss_interctc_layer12=74.101, loss_interctc_layer15=67.686, loss_interctc_layer21=85.962, loss=80.091, backward_time=0.208, grad_norm=69.929, clip=100.000, loss_scale=5.071e+30, optim_step_time=0.138, optim0_lr0=6.517e-05, train_time=1.305 +[gpua006:0/64] 2024-02-13 19:05:47,373 (trainer:756) INFO: 38epoch:train:10301-10400batch: iter_time=1.022e-04, forward_time=0.143, loss_ctc=69.021, loss_interctc_layer6=76.097, loss_interctc_layer12=62.859, loss_interctc_layer15=57.519, loss_interctc_layer21=71.682, loss=67.436, backward_time=0.208, grad_norm=82.556, clip=100.000, loss_scale=5.071e+30, optim_step_time=0.138, optim0_lr0=6.516e-05, train_time=1.145 +[gpua006:0/64] 2024-02-13 19:08:05,563 (trainer:756) INFO: 38epoch:train:10401-10500batch: iter_time=9.505e-05, forward_time=0.145, loss_ctc=66.488, loss_interctc_layer6=80.545, loss_interctc_layer12=66.603, loss_interctc_layer15=60.816, loss_interctc_layer21=68.444, loss=68.579, backward_time=0.207, grad_norm=91.485, clip=100.000, loss_scale=5.071e+30, optim_step_time=0.138, optim0_lr0=6.516e-05, train_time=1.383 +[gpua006:0/64] 2024-02-13 19:10:23,669 (trainer:756) INFO: 38epoch:train:10501-10600batch: iter_time=1.016e-04, forward_time=0.142, loss_ctc=68.684, loss_interctc_layer6=79.013, loss_interctc_layer12=64.985, loss_interctc_layer15=59.344, loss_interctc_layer21=71.020, loss=68.609, backward_time=0.206, grad_norm=178.181, clip=100.000, loss_scale=5.071e+30, optim_step_time=0.138, optim0_lr0=6.515e-05, train_time=1.381 +[gpua006:0/64] 2024-02-13 19:12:35,321 (trainer:756) INFO: 38epoch:train:10601-10700batch: iter_time=1.033e-04, forward_time=0.143, loss_ctc=69.383, loss_interctc_layer6=74.060, loss_interctc_layer12=61.487, loss_interctc_layer15=56.321, loss_interctc_layer21=71.866, loss=66.623, backward_time=0.207, grad_norm=72.070, clip=100.000, loss_scale=5.071e+30, optim_step_time=0.138, optim0_lr0=6.515e-05, train_time=1.316 +[gpua006:0/64] 2024-02-13 19:14:36,005 (trainer:756) INFO: 38epoch:train:10701-10800batch: iter_time=9.615e-05, forward_time=0.144, loss_ctc=84.125, loss_interctc_layer6=83.222, loss_interctc_layer12=68.556, loss_interctc_layer15=62.757, loss_interctc_layer21=87.348, loss=77.202, backward_time=0.207, grad_norm=87.556, clip=100.000, loss_scale=5.071e+30, optim_step_time=0.137, optim0_lr0=6.514e-05, train_time=1.207 +[gpua006:0/64] 2024-02-13 19:16:56,066 (trainer:756) INFO: 38epoch:train:10801-10900batch: iter_time=9.380e-05, forward_time=0.143, loss_ctc=70.496, loss_interctc_layer6=79.242, loss_interctc_layer12=65.242, loss_interctc_layer15=59.616, loss_interctc_layer21=72.999, loss=69.519, backward_time=0.206, grad_norm=91.536, clip=100.000, loss_scale=5.071e+30, optim_step_time=0.138, optim0_lr0=6.513e-05, train_time=1.400 +[gpua006:0/64] 2024-02-13 19:18:57,109 (trainer:756) INFO: 38epoch:train:10901-11000batch: iter_time=9.800e-05, forward_time=0.145, loss_ctc=80.760, loss_interctc_layer6=80.220, loss_interctc_layer12=66.597, loss_interctc_layer15=61.086, loss_interctc_layer21=83.982, loss=74.529, backward_time=0.207, grad_norm=117.757, clip=100.000, loss_scale=5.071e+30, optim_step_time=0.138, optim0_lr0=6.513e-05, train_time=1.210 +[gpua006:0/64] 2024-02-13 19:20:56,407 (trainer:756) INFO: 38epoch:train:11001-11100batch: iter_time=9.554e-05, forward_time=0.164, loss_ctc=83.596, loss_interctc_layer6=84.533, loss_interctc_layer12=69.875, loss_interctc_layer15=64.062, loss_interctc_layer21=86.600, loss=77.733, backward_time=0.223, grad_norm=81.546, clip=100.000, loss_scale=5.071e+30, optim_step_time=0.138, optim0_lr0=6.512e-05, train_time=1.193 +[gpua006:0/64] 2024-02-13 19:23:29,252 (trainer:756) INFO: 38epoch:train:11101-11200batch: iter_time=8.956e-05, forward_time=0.197, loss_ctc=84.029, loss_interctc_layer6=85.765, loss_interctc_layer12=71.004, loss_interctc_layer15=65.084, loss_interctc_layer21=87.258, loss=78.628, backward_time=0.262, grad_norm=96.274, clip=100.000, loss_scale=5.071e+30, optim_step_time=0.143, optim0_lr0=6.512e-05, train_time=1.528 +[gpua006:0/64] 2024-02-13 19:24:49,469 (multiple_iter_factory:32) INFO: Building 9th iter-factory... +[gpua006:0/64] 2024-02-13 19:25:08,170 (s2t:401) INFO: Optional Data Names: ('text_prev', 'text_ctc', 'text_spk2', 'text_spk3', 'text_spk4') +[gpua006:0/64] 2024-02-13 19:25:11,573 (abs_task:1660) INFO: [train] dataset: +ESPnetDataset( + speech: {"path": "exp/s2t_stats_raw_bpe50000/splits12/wav.scp/split.0", "type": "kaldi_ark"} + text_prev: {"path": "exp/s2t_stats_raw_bpe50000/splits12/text.prev/split.0", "type": "text"} + text_ctc: {"path": "exp/s2t_stats_raw_bpe50000/splits12/text.ctc/split.0", "type": "text"} + text: {"path": "exp/s2t_stats_raw_bpe50000/splits12/text/split.0", "type": "text"} + preprocess: ) +[gpua006:0/64] 2024-02-13 19:25:11,573 (abs_task:1661) INFO: [train] Batch sampler: UnsortedBatchSampler(N-batch=19027, batch_size=256, key_file=exp/s2t_stats_raw_bpe50000/splits12/speech_shape/split.0, +[gpua006:0/64] 2024-02-13 19:25:11,580 (abs_task:1662) INFO: [train] mini-batch sizes summary: N-batch=19027, mean=256.0, min=256, max=257 +[gpua006:0/64] 2024-02-13 19:33:06,679 (trainer:756) INFO: 38epoch:train:11201-11300batch: iter_time=2.948, forward_time=0.145, loss_ctc=87.842, loss_interctc_layer6=97.462, loss_interctc_layer12=80.534, loss_interctc_layer15=73.799, loss_interctc_layer21=91.144, loss=86.156, backward_time=0.208, grad_norm=241.227, clip=100.000, loss_scale=5.071e+30, optim_step_time=0.138, optim0_lr0=6.511e-05, train_time=5.774 +[gpua006:0/64] 2024-02-13 19:35:45,687 (trainer:756) INFO: 38epoch:train:11301-11400batch: iter_time=8.795e-05, forward_time=0.145, loss_ctc=69.995, loss_interctc_layer6=77.028, loss_interctc_layer12=63.216, loss_interctc_layer15=57.732, loss_interctc_layer21=72.636, loss=68.121, backward_time=0.206, grad_norm=72.966, clip=100.000, loss_scale=5.071e+30, optim_step_time=0.138, optim0_lr0=6.511e-05, train_time=1.590 +[gpua006:0/64] 2024-02-13 19:38:12,172 (trainer:756) INFO: 38epoch:train:11401-11500batch: iter_time=8.922e-05, forward_time=0.142, loss_ctc=79.745, loss_interctc_layer6=81.617, loss_interctc_layer12=67.012, loss_interctc_layer15=61.056, loss_interctc_layer21=82.711, loss=74.428, backward_time=0.206, grad_norm=88.807, clip=100.000, loss_scale=5.071e+30, optim_step_time=0.138, optim0_lr0=6.510e-05, train_time=1.465 +[gpua006:0/64] 2024-02-13 19:40:30,688 (trainer:756) INFO: 38epoch:train:11501-11600batch: iter_time=2.571e-04, forward_time=0.143, loss_ctc=76.738, loss_interctc_layer6=86.698, loss_interctc_layer12=71.445, loss_interctc_layer15=65.303, loss_interctc_layer21=79.635, loss=75.964, backward_time=0.207, grad_norm=78.118, clip=100.000, loss_scale=5.071e+30, optim_step_time=0.138, optim0_lr0=6.509e-05, train_time=1.385 +[gpua006:0/64] 2024-02-13 19:43:41,461 (trainer:756) INFO: 38epoch:train:11601-11700batch: iter_time=9.733e-05, forward_time=0.142, loss_ctc=62.375, loss_interctc_layer6=75.598, loss_interctc_layer12=62.334, loss_interctc_layer15=56.843, loss_interctc_layer21=64.334, loss=64.297, backward_time=0.205, grad_norm=74.840, clip=100.000, loss_scale=5.071e+30, optim_step_time=0.138, optim0_lr0=6.509e-05, train_time=1.908 +[gpua006:0/64] 2024-02-13 19:47:05,268 (trainer:756) INFO: 38epoch:train:11701-11800batch: iter_time=9.154e-05, forward_time=0.144, loss_ctc=72.409, loss_interctc_layer6=78.864, loss_interctc_layer12=64.975, loss_interctc_layer15=59.303, loss_interctc_layer21=74.960, loss=70.102, backward_time=0.204, grad_norm=84.670, clip=100.000, loss_scale=5.071e+30, optim_step_time=0.138, optim0_lr0=6.508e-05, train_time=2.038 +[gpua006:0/64] 2024-02-13 19:49:58,114 (trainer:756) INFO: 38epoch:train:11801-11900batch: iter_time=9.230e-05, forward_time=0.155, loss_ctc=71.730, loss_interctc_layer6=79.639, loss_interctc_layer12=66.096, loss_interctc_layer15=60.618, loss_interctc_layer21=74.308, loss=70.478, backward_time=0.205, grad_norm=117.466, clip=100.000, loss_scale=5.071e+30, optim_step_time=0.138, optim0_lr0=6.508e-05, train_time=1.728 +[gpua006:0/64] 2024-02-13 19:52:56,517 (trainer:756) INFO: 38epoch:train:11901-12000batch: iter_time=9.443e-05, forward_time=0.179, loss_ctc=74.356, loss_interctc_layer6=75.927, loss_interctc_layer12=62.668, loss_interctc_layer15=57.432, loss_interctc_layer21=76.867, loss=69.450, backward_time=0.223, grad_norm=100.237, clip=100.000, loss_scale=5.071e+30, optim_step_time=0.142, optim0_lr0=6.507e-05, train_time=1.784 +[gpua006:0/64] 2024-02-13 19:55:46,322 (trainer:756) INFO: 38epoch:train:12001-12100batch: iter_time=9.061e-05, forward_time=0.174, loss_ctc=71.328, loss_interctc_layer6=80.685, loss_interctc_layer12=66.454, loss_interctc_layer15=60.715, loss_interctc_layer21=73.953, loss=70.627, backward_time=0.214, grad_norm=76.445, clip=100.000, loss_scale=8.569e+30, optim_step_time=0.138, optim0_lr0=6.507e-05, train_time=1.698 +[gpua006:0/64] 2024-02-13 19:58:15,881 (trainer:756) INFO: 38epoch:train:12101-12200batch: iter_time=9.635e-05, forward_time=0.143, loss_ctc=82.656, loss_interctc_layer6=81.587, loss_interctc_layer12=67.443, loss_interctc_layer15=61.585, loss_interctc_layer21=85.883, loss=75.831, backward_time=0.205, grad_norm=90.912, clip=100.000, loss_scale=1.014e+31, optim_step_time=0.137, optim0_lr0=6.506e-05, train_time=1.495 +[gpua006:0/64] 2024-02-13 20:01:16,228 (trainer:756) INFO: 38epoch:train:12201-12300batch: iter_time=9.418e-05, forward_time=0.143, loss_ctc=82.305, loss_interctc_layer6=83.118, loss_interctc_layer12=68.644, loss_interctc_layer15=62.693, loss_interctc_layer21=85.435, loss=76.439, backward_time=0.205, grad_norm=81.815, clip=100.000, loss_scale=1.014e+31, optim_step_time=0.137, optim0_lr0=6.505e-05, train_time=1.803 +[gpua006:0/64] 2024-02-13 20:04:05,293 (trainer:756) INFO: 38epoch:train:12301-12400batch: iter_time=9.375e-05, forward_time=0.144, loss_ctc=71.987, loss_interctc_layer6=77.944, loss_interctc_layer12=64.260, loss_interctc_layer15=58.818, loss_interctc_layer21=74.536, loss=69.509, backward_time=0.208, grad_norm=83.064, clip=100.000, loss_scale=1.014e+31, optim_step_time=0.138, optim0_lr0=6.505e-05, train_time=1.691 +[gpua006:0/64] 2024-02-13 20:07:21,344 (trainer:756) INFO: 38epoch:train:12401-12500batch: iter_time=9.910e-05, forward_time=0.145, loss_ctc=91.146, loss_interctc_layer6=97.135, loss_interctc_layer12=80.552, loss_interctc_layer15=73.970, loss_interctc_layer21=94.135, loss=87.387, backward_time=0.208, grad_norm=82.004, clip=100.000, loss_scale=1.014e+31, optim_step_time=0.138, optim0_lr0=6.504e-05, train_time=1.960 +[gpua006:0/64] 2024-02-13 20:07:41,374 (multiple_iter_factory:32) INFO: Building 10th iter-factory... +[gpua006:0/64] 2024-02-13 20:08:00,436 (s2t:401) INFO: Optional Data Names: ('text_prev', 'text_ctc', 'text_spk2', 'text_spk3', 'text_spk4') +[gpua006:0/64] 2024-02-13 20:08:03,860 (abs_task:1660) INFO: [train] dataset: +ESPnetDataset( + speech: {"path": "exp/s2t_stats_raw_bpe50000/splits12/wav.scp/split.1", "type": "kaldi_ark"} + text_prev: {"path": "exp/s2t_stats_raw_bpe50000/splits12/text.prev/split.1", "type": "text"} + text_ctc: {"path": "exp/s2t_stats_raw_bpe50000/splits12/text.ctc/split.1", "type": "text"} + text: {"path": "exp/s2t_stats_raw_bpe50000/splits12/text/split.1", "type": "text"} + preprocess: ) +[gpua006:0/64] 2024-02-13 20:08:03,860 (abs_task:1661) INFO: [train] Batch sampler: UnsortedBatchSampler(N-batch=19027, batch_size=256, key_file=exp/s2t_stats_raw_bpe50000/splits12/speech_shape/split.1, +[gpua006:0/64] 2024-02-13 20:08:03,866 (abs_task:1662) INFO: [train] mini-batch sizes summary: N-batch=19027, mean=256.0, min=256, max=257 +[gpua006:0/64] 2024-02-13 20:14:33,936 (trainer:756) INFO: 38epoch:train:12501-12600batch: iter_time=2.976, forward_time=0.145, loss_ctc=79.789, loss_interctc_layer6=85.740, loss_interctc_layer12=70.495, loss_interctc_layer15=64.251, loss_interctc_layer21=83.113, loss=76.678, backward_time=0.207, grad_norm=81.442, clip=100.000, loss_scale=1.014e+31, optim_step_time=0.138, optim0_lr0=6.504e-05, train_time=4.326 +[gpua006:0/64] 2024-02-13 20:17:01,739 (trainer:756) INFO: 38epoch:train:12601-12700batch: iter_time=8.764e-05, forward_time=0.143, loss_ctc=64.045, loss_interctc_layer6=79.337, loss_interctc_layer12=64.944, loss_interctc_layer15=59.101, loss_interctc_layer21=66.275, loss=66.740, backward_time=0.213, grad_norm=67.279, clip=100.000, loss_scale=1.014e+31, optim_step_time=0.137, optim0_lr0=6.503e-05, train_time=1.478 +[gpua006:0/64] 2024-02-13 20:18:50,324 (trainer:756) INFO: 38epoch:train:12701-12800batch: iter_time=8.712e-05, forward_time=0.149, loss_ctc=74.607, loss_interctc_layer6=89.549, loss_interctc_layer12=73.539, loss_interctc_layer15=67.088, loss_interctc_layer21=77.554, loss=76.467, backward_time=0.214, grad_norm=95.083, clip=100.000, loss_scale=1.014e+31, optim_step_time=0.138, optim0_lr0=6.503e-05, train_time=1.086 +[gpua006:0/64] 2024-02-13 20:20:56,989 (trainer:756) INFO: 38epoch:train:12801-12900batch: iter_time=8.528e-05, forward_time=0.182, loss_ctc=64.828, loss_interctc_layer6=75.851, loss_interctc_layer12=62.649, loss_interctc_layer15=57.217, loss_interctc_layer21=67.382, loss=65.585, backward_time=0.254, grad_norm=69.232, clip=100.000, loss_scale=1.014e+31, optim_step_time=0.142, optim0_lr0=6.502e-05, train_time=1.266 +[gpua006:0/64] 2024-02-13 20:22:55,909 (trainer:756) INFO: 38epoch:train:12901-13000batch: iter_time=8.510e-05, forward_time=0.164, loss_ctc=64.759, loss_interctc_layer6=79.833, loss_interctc_layer12=65.925, loss_interctc_layer15=60.239, loss_interctc_layer21=66.617, loss=67.475, backward_time=0.215, grad_norm=68.512, clip=100.000, loss_scale=1.014e+31, optim_step_time=0.137, optim0_lr0=6.501e-05, train_time=1.189 +[gpua006:0/64] 2024-02-13 20:24:50,111 (trainer:756) INFO: 38epoch:train:13001-13100batch: iter_time=8.450e-05, forward_time=0.141, loss_ctc=68.402, loss_interctc_layer6=79.522, loss_interctc_layer12=65.397, loss_interctc_layer15=59.663, loss_interctc_layer21=70.936, loss=68.784, backward_time=0.206, grad_norm=113.729, clip=100.000, loss_scale=1.014e+31, optim_step_time=0.137, optim0_lr0=6.501e-05, train_time=1.142 +[gpua006:0/64] 2024-02-13 20:27:24,545 (trainer:756) INFO: 38epoch:train:13101-13200batch: iter_time=8.410e-05, forward_time=0.141, loss_ctc=65.132, loss_interctc_layer6=74.219, loss_interctc_layer12=61.513, loss_interctc_layer15=56.379, loss_interctc_layer21=67.404, loss=64.929, backward_time=0.205, grad_norm=69.839, clip=100.000, loss_scale=1.014e+31, optim_step_time=0.137, optim0_lr0=6.500e-05, train_time=1.543 +[gpua006:0/64] 2024-02-13 20:29:31,271 (trainer:756) INFO: 38epoch:train:13201-13300batch: iter_time=8.993e-05, forward_time=0.142, loss_ctc=76.799, loss_interctc_layer6=82.121, loss_interctc_layer12=67.629, loss_interctc_layer15=61.860, loss_interctc_layer21=79.722, loss=73.626, backward_time=0.207, grad_norm=86.473, clip=100.000, loss_scale=1.014e+31, optim_step_time=0.137, optim0_lr0=6.500e-05, train_time=1.267 +[gpua006:0/64] 2024-02-13 20:31:42,908 (trainer:756) INFO: 38epoch:train:13301-13400batch: iter_time=8.719e-05, forward_time=0.143, loss_ctc=66.044, loss_interctc_layer6=79.055, loss_interctc_layer12=65.069, loss_interctc_layer15=59.404, loss_interctc_layer21=68.329, loss=67.580, backward_time=0.206, grad_norm=66.984, clip=100.000, loss_scale=1.014e+31, optim_step_time=0.137, optim0_lr0=6.499e-05, train_time=1.317 +[gpua006:0/64] 2024-02-13 20:33:55,083 (trainer:756) INFO: 38epoch:train:13401-13500batch: iter_time=8.808e-05, forward_time=0.143, loss_ctc=76.112, loss_interctc_layer6=79.842, loss_interctc_layer12=66.222, loss_interctc_layer15=60.758, loss_interctc_layer21=79.321, loss=72.451, backward_time=0.207, grad_norm=83.876, clip=100.000, loss_scale=1.014e+31, optim_step_time=0.137, optim0_lr0=6.498e-05, train_time=1.322 +[gpua006:0/64] 2024-02-13 20:35:57,587 (trainer:756) INFO: 38epoch:train:13501-13600batch: iter_time=1.586e-04, forward_time=0.143, loss_ctc=78.377, loss_interctc_layer6=84.592, loss_interctc_layer12=69.903, loss_interctc_layer15=64.064, loss_interctc_layer21=81.235, loss=75.634, backward_time=0.208, grad_norm=94.626, clip=100.000, loss_scale=1.014e+31, optim_step_time=0.137, optim0_lr0=6.498e-05, train_time=1.225 +[gpua006:0/64] 2024-02-13 20:37:57,429 (trainer:756) INFO: 38epoch:train:13601-13700batch: iter_time=8.900e-05, forward_time=0.142, loss_ctc=79.084, loss_interctc_layer6=85.344, loss_interctc_layer12=70.554, loss_interctc_layer15=64.712, loss_interctc_layer21=82.221, loss=76.383, backward_time=0.206, grad_norm=83.325, clip=100.000, loss_scale=1.014e+31, optim_step_time=0.137, optim0_lr0=6.497e-05, train_time=1.198 +[gpua006:0/64] 2024-02-13 20:39:29,233 (multiple_iter_factory:32) INFO: Building 11th iter-factory... +[gpua006:0/64] 2024-02-13 20:39:48,163 (s2t:401) INFO: Optional Data Names: ('text_prev', 'text_ctc', 'text_spk2', 'text_spk3', 'text_spk4') +[gpua006:0/64] 2024-02-13 20:39:51,542 (abs_task:1660) INFO: [train] dataset: +ESPnetDataset( + speech: {"path": "exp/s2t_stats_raw_bpe50000/splits12/wav.scp/split.5", "type": "kaldi_ark"} + text_prev: {"path": "exp/s2t_stats_raw_bpe50000/splits12/text.prev/split.5", "type": "text"} + text_ctc: {"path": "exp/s2t_stats_raw_bpe50000/splits12/text.ctc/split.5", "type": "text"} + text: {"path": "exp/s2t_stats_raw_bpe50000/splits12/text/split.5", "type": "text"} + preprocess: ) +[gpua006:0/64] 2024-02-13 20:39:51,542 (abs_task:1661) INFO: [train] Batch sampler: UnsortedBatchSampler(N-batch=19027, batch_size=256, key_file=exp/s2t_stats_raw_bpe50000/splits12/speech_shape/split.5, +[gpua006:0/64] 2024-02-13 20:39:51,573 (abs_task:1662) INFO: [train] mini-batch sizes summary: N-batch=19027, mean=256.0, min=256, max=257 +[gpua006:0/64] 2024-02-13 20:45:26,366 (trainer:756) INFO: 38epoch:train:13701-13800batch: iter_time=2.964, forward_time=0.187, loss_ctc=82.373, loss_interctc_layer6=96.287, loss_interctc_layer12=79.532, loss_interctc_layer15=72.790, loss_interctc_layer21=85.533, loss=83.303, backward_time=0.217, grad_norm=109.262, clip=100.000, loss_scale=1.014e+31, optim_step_time=0.138, optim0_lr0=6.497e-05, train_time=4.489 +[gpua006:0/64] 2024-02-13 20:47:35,243 (trainer:756) INFO: 38epoch:train:13801-13900batch: iter_time=9.379e-05, forward_time=0.143, loss_ctc=67.268, loss_interctc_layer6=76.728, loss_interctc_layer12=63.062, loss_interctc_layer15=57.550, loss_interctc_layer21=69.875, loss=66.896, backward_time=0.207, grad_norm=92.630, clip=100.000, loss_scale=1.014e+31, optim_step_time=0.138, optim0_lr0=6.496e-05, train_time=1.289 +[gpua006:0/64] 2024-02-13 20:49:17,212 (trainer:756) INFO: 38epoch:train:13901-14000batch: iter_time=9.016e-05, forward_time=0.143, loss_ctc=70.319, loss_interctc_layer6=80.811, loss_interctc_layer12=66.274, loss_interctc_layer15=60.346, loss_interctc_layer21=73.034, loss=70.157, backward_time=0.207, grad_norm=74.992, clip=100.000, loss_scale=1.014e+31, optim_step_time=0.138, optim0_lr0=6.496e-05, train_time=1.019 +[gpua006:0/64] 2024-02-13 20:51:21,618 (trainer:756) INFO: 38epoch:train:14001-14100batch: iter_time=0.010, forward_time=0.143, loss_ctc=73.126, loss_interctc_layer6=87.011, loss_interctc_layer12=71.839, loss_interctc_layer15=65.604, loss_interctc_layer21=75.889, loss=74.694, backward_time=0.208, grad_norm=74.784, clip=100.000, loss_scale=1.714e+31, optim_step_time=0.138, optim0_lr0=6.495e-05, train_time=1.244 +[gpua006:0/64] 2024-02-13 20:53:45,073 (trainer:756) INFO: 38epoch:train:14101-14200batch: iter_time=9.893e-05, forward_time=0.143, loss_ctc=58.678, loss_interctc_layer6=74.907, loss_interctc_layer12=61.668, loss_interctc_layer15=56.194, loss_interctc_layer21=60.438, loss=62.377, backward_time=0.206, grad_norm=68.745, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.138, optim0_lr0=6.494e-05, train_time=1.434 +[gpua006:0/64] 2024-02-13 20:57:34,415 (trainer:756) INFO: 38epoch:train:14201-14300batch: iter_time=9.707e-05, forward_time=0.142, loss_ctc=71.528, loss_interctc_layer6=79.302, loss_interctc_layer12=65.290, loss_interctc_layer15=59.565, loss_interctc_layer21=74.103, loss=69.958, backward_time=0.206, grad_norm=65.956, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.138, optim0_lr0=6.494e-05, train_time=2.293 +[gpua006:0/64] 2024-02-13 21:00:17,221 (trainer:756) INFO: 38epoch:train:14301-14400batch: iter_time=9.328e-05, forward_time=0.143, loss_ctc=68.610, loss_interctc_layer6=78.870, loss_interctc_layer12=65.237, loss_interctc_layer15=59.757, loss_interctc_layer21=70.894, loss=68.674, backward_time=0.206, grad_norm=83.995, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.138, optim0_lr0=6.493e-05, train_time=1.628 +[gpua006:0/64] 2024-02-13 21:02:56,688 (trainer:756) INFO: 38epoch:train:14401-14500batch: iter_time=1.011e-04, forward_time=0.191, loss_ctc=68.573, loss_interctc_layer6=75.866, loss_interctc_layer12=62.694, loss_interctc_layer15=57.352, loss_interctc_layer21=71.091, loss=67.115, backward_time=0.219, grad_norm=62.935, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.142, optim0_lr0=6.493e-05, train_time=1.594 +[gpua006:0/64] 2024-02-13 21:05:11,729 (trainer:756) INFO: 38epoch:train:14501-14600batch: iter_time=9.087e-05, forward_time=0.192, loss_ctc=66.210, loss_interctc_layer6=79.968, loss_interctc_layer12=65.858, loss_interctc_layer15=60.139, loss_interctc_layer21=68.564, loss=68.148, backward_time=0.240, grad_norm=92.908, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.140, optim0_lr0=6.492e-05, train_time=1.350 +[gpua006:0/64] 2024-02-13 21:07:27,028 (trainer:756) INFO: 38epoch:train:14601-14700batch: iter_time=9.398e-05, forward_time=0.143, loss_ctc=76.565, loss_interctc_layer6=81.311, loss_interctc_layer12=66.963, loss_interctc_layer15=61.314, loss_interctc_layer21=79.744, loss=73.179, backward_time=0.207, grad_norm=83.616, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.137, optim0_lr0=6.492e-05, train_time=1.353 +[gpua006:0/64] 2024-02-13 21:09:12,540 (trainer:756) INFO: 38epoch:train:14701-14800batch: iter_time=8.907e-05, forward_time=0.143, loss_ctc=78.460, loss_interctc_layer6=82.556, loss_interctc_layer12=68.160, loss_interctc_layer15=62.319, loss_interctc_layer21=81.521, loss=74.603, backward_time=0.209, grad_norm=69.660, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.138, optim0_lr0=6.491e-05, train_time=1.055 +[gpua006:0/64] 2024-02-13 21:11:12,623 (trainer:756) INFO: 38epoch:train:14801-14900batch: iter_time=9.699e-05, forward_time=0.143, loss_ctc=65.964, loss_interctc_layer6=77.419, loss_interctc_layer12=63.950, loss_interctc_layer15=58.530, loss_interctc_layer21=68.349, loss=66.843, backward_time=0.208, grad_norm=74.346, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.137, optim0_lr0=6.491e-05, train_time=1.201 +[gpua006:0/64] 2024-02-13 21:13:10,248 (trainer:756) INFO: 38epoch:train:14901-15000batch: iter_time=9.188e-05, forward_time=0.144, loss_ctc=88.049, loss_interctc_layer6=97.013, loss_interctc_layer12=80.396, loss_interctc_layer15=73.918, loss_interctc_layer21=91.072, loss=86.090, backward_time=0.208, grad_norm=126.377, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.137, optim0_lr0=6.490e-05, train_time=1.176 +[gpua006:0/64] 2024-02-13 21:44:47,633 (trainer:355) INFO: 38epoch results: [train] iter_time=0.249, forward_time=0.154, loss_ctc=74.496, loss_interctc_layer6=82.398, loss_interctc_layer12=68.045, loss_interctc_layer15=62.278, loss_interctc_layer21=77.215, loss=72.886, backward_time=0.212, grad_norm=83.722, clip=100.000, loss_scale=2.059e+31, optim_step_time=0.138, optim0_lr0=6.533e-05, train_time=1.696, time=7 hours, 4 minutes and 23.65 seconds, total_count=570000, gpu_max_cached_mem_GB=33.436, [valid] loss_ctc=40.081, cer_ctc=0.188, loss_interctc_layer6=45.755, cer_interctc_layer6=0.205, loss_interctc_layer12=33.268, cer_interctc_layer12=0.140, loss_interctc_layer15=28.920, cer_interctc_layer15=0.116, loss_interctc_layer21=42.515, cer_interctc_layer21=0.200, loss=38.108, time=31 minutes and 12.78 seconds, total_count=177498, gpu_max_cached_mem_GB=33.436 +[gpua006:0/64] 2024-02-13 21:45:16,460 (trainer:410) INFO: The best model has been updated: valid.cer_ctc, valid.loss_ctc, valid.total_count +[gpua006:0/64] 2024-02-13 21:45:16,575 (trainer:289) INFO: 39/45epoch started. Estimated time to finish: 2 days, 5 hours and 12 minutes +[gpua006:0/64] 2024-02-13 21:45:16,593 (multiple_iter_factory:32) INFO: Building 0th iter-factory... +[gpua006:0/64] 2024-02-13 21:45:34,356 (s2t:401) INFO: Optional Data Names: ('text_prev', 'text_ctc', 'text_spk2', 'text_spk3', 'text_spk4') +[gpua006:0/64] 2024-02-13 21:45:37,759 (abs_task:1660) INFO: [train] dataset: +ESPnetDataset( + speech: {"path": "exp/s2t_stats_raw_bpe50000/splits12/wav.scp/split.4", "type": "kaldi_ark"} + text_prev: {"path": "exp/s2t_stats_raw_bpe50000/splits12/text.prev/split.4", "type": "text"} + text_ctc: {"path": "exp/s2t_stats_raw_bpe50000/splits12/text.ctc/split.4", "type": "text"} + text: {"path": "exp/s2t_stats_raw_bpe50000/splits12/text/split.4", "type": "text"} + preprocess: ) +[gpua006:0/64] 2024-02-13 21:45:37,759 (abs_task:1661) INFO: [train] Batch sampler: UnsortedBatchSampler(N-batch=19027, batch_size=256, key_file=exp/s2t_stats_raw_bpe50000/splits12/speech_shape/split.4, +[gpua006:0/64] 2024-02-13 21:45:37,762 (abs_task:1662) INFO: [train] mini-batch sizes summary: N-batch=19027, mean=256.0, min=256, max=257 +[gpua006:0/64] 2024-02-13 21:54:11,714 (trainer:756) INFO: 39epoch:train:1-100batch: iter_time=2.455, forward_time=0.183, loss_ctc=77.133, loss_interctc_layer6=86.203, loss_interctc_layer12=71.530, loss_interctc_layer15=65.792, loss_interctc_layer21=79.694, loss=76.070, backward_time=0.217, grad_norm=91.999, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.138, optim0_lr0=6.489e-05, train_time=5.351 +[gpua006:0/64] 2024-02-13 21:55:45,748 (trainer:756) INFO: 39epoch:train:101-200batch: iter_time=8.262e-05, forward_time=0.142, loss_ctc=85.311, loss_interctc_layer6=84.372, loss_interctc_layer12=69.999, loss_interctc_layer15=64.116, loss_interctc_layer21=88.669, loss=78.493, backward_time=0.207, grad_norm=87.100, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.138, optim0_lr0=6.489e-05, train_time=0.940 +[gpua006:0/64] 2024-02-13 21:57:25,108 (trainer:756) INFO: 39epoch:train:201-300batch: iter_time=8.226e-05, forward_time=0.143, loss_ctc=76.084, loss_interctc_layer6=83.146, loss_interctc_layer12=69.040, loss_interctc_layer15=63.338, loss_interctc_layer21=78.729, loss=74.067, backward_time=0.207, grad_norm=118.702, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.138, optim0_lr0=6.488e-05, train_time=0.993 +[gpua006:0/64] 2024-02-13 21:59:37,262 (trainer:756) INFO: 39epoch:train:301-400batch: iter_time=8.736e-05, forward_time=0.142, loss_ctc=76.511, loss_interctc_layer6=75.007, loss_interctc_layer12=62.672, loss_interctc_layer15=57.744, loss_interctc_layer21=79.336, loss=70.254, backward_time=0.205, grad_norm=79.479, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.138, optim0_lr0=6.488e-05, train_time=1.321 +[gpua006:0/64] 2024-02-13 22:01:34,945 (trainer:756) INFO: 39epoch:train:401-500batch: iter_time=8.541e-05, forward_time=0.142, loss_ctc=73.805, loss_interctc_layer6=73.833, loss_interctc_layer12=60.786, loss_interctc_layer15=55.533, loss_interctc_layer21=76.623, loss=68.116, backward_time=0.206, grad_norm=95.604, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.137, optim0_lr0=6.487e-05, train_time=1.177 +[gpua006:0/64] 2024-02-13 22:03:33,747 (trainer:756) INFO: 39epoch:train:501-600batch: iter_time=8.617e-05, forward_time=0.144, loss_ctc=91.427, loss_interctc_layer6=90.951, loss_interctc_layer12=75.823, loss_interctc_layer15=69.708, loss_interctc_layer21=94.734, loss=84.528, backward_time=0.210, grad_norm=87.266, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.137, optim0_lr0=6.487e-05, train_time=1.188 +[gpua006:0/64] 2024-02-13 22:05:24,943 (trainer:756) INFO: 39epoch:train:601-700batch: iter_time=8.745e-05, forward_time=0.146, loss_ctc=68.967, loss_interctc_layer6=78.496, loss_interctc_layer12=65.886, loss_interctc_layer15=60.738, loss_interctc_layer21=71.416, loss=69.101, backward_time=0.207, grad_norm=77.497, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.138, optim0_lr0=6.486e-05, train_time=1.110 +[gpua006:0/64] 2024-02-13 22:07:14,762 (trainer:756) INFO: 39epoch:train:701-800batch: iter_time=8.788e-05, forward_time=0.147, loss_ctc=77.892, loss_interctc_layer6=82.065, loss_interctc_layer12=68.350, loss_interctc_layer15=62.720, loss_interctc_layer21=80.592, loss=74.324, backward_time=0.208, grad_norm=96.134, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.138, optim0_lr0=6.485e-05, train_time=1.100 +[gpua006:0/64] 2024-02-13 22:09:39,169 (trainer:756) INFO: 39epoch:train:801-900batch: iter_time=2.105e-04, forward_time=0.159, loss_ctc=71.748, loss_interctc_layer6=75.309, loss_interctc_layer12=62.233, loss_interctc_layer15=56.933, loss_interctc_layer21=74.386, loss=68.122, backward_time=0.225, grad_norm=67.813, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.139, optim0_lr0=6.485e-05, train_time=1.444 +[gpua006:0/64] 2024-02-13 22:12:10,790 (trainer:756) INFO: 39epoch:train:901-1000batch: iter_time=9.911e-05, forward_time=0.163, loss_ctc=72.306, loss_interctc_layer6=80.274, loss_interctc_layer12=66.520, loss_interctc_layer15=60.961, loss_interctc_layer21=74.824, loss=70.977, backward_time=0.219, grad_norm=92.132, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.138, optim0_lr0=6.484e-05, train_time=1.516 +[gpua006:0/64] 2024-02-13 22:14:10,854 (trainer:756) INFO: 39epoch:train:1001-1100batch: iter_time=8.602e-05, forward_time=0.177, loss_ctc=61.638, loss_interctc_layer6=70.485, loss_interctc_layer12=58.291, loss_interctc_layer15=53.318, loss_interctc_layer21=63.833, loss=61.513, backward_time=0.218, grad_norm=58.567, clip=100.000, loss_scale=3.428e+31, optim_step_time=0.139, optim0_lr0=6.484e-05, train_time=1.200 +[gpua006:0/64] 2024-02-13 22:16:28,574 (trainer:756) INFO: 39epoch:train:1101-1200batch: iter_time=8.712e-05, forward_time=0.142, loss_ctc=72.350, loss_interctc_layer6=74.717, loss_interctc_layer12=61.473, loss_interctc_layer15=56.098, loss_interctc_layer21=74.913, loss=67.910, backward_time=0.205, grad_norm=69.213, clip=100.000, loss_scale=4.056e+31, optim_step_time=0.137, optim0_lr0=6.483e-05, train_time=1.377 +[gpua006:0/64] 2024-02-13 22:17:43,348 (multiple_iter_factory:32) INFO: Building 1th iter-factory... +[gpua006:0/64] 2024-02-13 22:18:02,099 (s2t:401) INFO: Optional Data Names: ('text_prev', 'text_ctc', 'text_spk2', 'text_spk3', 'text_spk4') +[gpua006:0/64] 2024-02-13 22:18:05,773 (abs_task:1660) INFO: [train] dataset: +ESPnetDataset( + speech: {"path": "exp/s2t_stats_raw_bpe50000/splits12/wav.scp/split.9", "type": "kaldi_ark"} + text_prev: {"path": "exp/s2t_stats_raw_bpe50000/splits12/text.prev/split.9", "type": "text"} + text_ctc: {"path": "exp/s2t_stats_raw_bpe50000/splits12/text.ctc/split.9", "type": "text"} + text: {"path": "exp/s2t_stats_raw_bpe50000/splits12/text/split.9", "type": "text"} + preprocess: ) +[gpua006:0/64] 2024-02-13 22:18:05,773 (abs_task:1661) INFO: [train] Batch sampler: UnsortedBatchSampler(N-batch=19027, batch_size=256, key_file=exp/s2t_stats_raw_bpe50000/splits12/speech_shape/split.9, +[gpua006:0/64] 2024-02-13 22:18:05,776 (abs_task:1662) INFO: [train] mini-batch sizes summary: N-batch=19027, mean=256.0, min=256, max=257 +[gpua006:0/64] 2024-02-13 22:23:02,020 (trainer:756) INFO: 39epoch:train:1201-1300batch: iter_time=2.635, forward_time=0.143, loss_ctc=75.588, loss_interctc_layer6=90.005, loss_interctc_layer12=74.673, loss_interctc_layer15=68.941, loss_interctc_layer21=78.246, loss=77.491, backward_time=0.206, grad_norm=74.708, clip=100.000, loss_scale=4.056e+31, optim_step_time=0.137, optim0_lr0=6.483e-05, train_time=3.934 +[gpua006:0/64] 2024-02-13 22:23:45,371 (trainer:687) WARNING: The grad norm is nan. Skipping updating the model. +[gpua006:0/64] 2024-02-13 22:24:36,174 (trainer:756) INFO: 39epoch:train:1301-1400batch: iter_time=9.595e-05, forward_time=0.143, loss_ctc=79.235, loss_interctc_layer6=84.024, loss_interctc_layer12=69.377, loss_interctc_layer15=63.492, loss_interctc_layer21=82.379, loss=75.701, backward_time=0.208, grad_norm=74.925, clip=100.000, loss_scale=2.950e+31, optim_step_time=0.137, optim0_lr0=6.482e-05, train_time=0.941 +[gpua006:0/64] 2024-02-13 22:26:38,051 (trainer:687) WARNING: The grad norm is nan. Skipping updating the model. +[gpua006:0/64] 2024-02-13 22:27:08,931 (trainer:756) INFO: 39epoch:train:1401-1500batch: iter_time=9.659e-05, forward_time=0.143, loss_ctc=67.379, loss_interctc_layer6=82.598, loss_interctc_layer12=68.524, loss_interctc_layer15=62.917, loss_interctc_layer21=69.841, loss=70.252, backward_time=0.207, grad_norm=72.226, clip=100.000, loss_scale=1.793e+31, optim_step_time=0.137, optim0_lr0=6.481e-05, train_time=1.527 +[gpua006:0/64] 2024-02-13 22:29:06,950 (trainer:756) INFO: 39epoch:train:1501-1600batch: iter_time=9.750e-05, forward_time=0.143, loss_ctc=79.449, loss_interctc_layer6=80.873, loss_interctc_layer12=67.242, loss_interctc_layer15=61.770, loss_interctc_layer21=82.416, loss=74.350, backward_time=0.206, grad_norm=68.596, clip=100.000, loss_scale=1.014e+31, optim_step_time=0.138, optim0_lr0=6.481e-05, train_time=1.180 +[gpua006:0/64] 2024-02-13 22:30:57,184 (trainer:756) INFO: 39epoch:train:1601-1700batch: iter_time=9.525e-05, forward_time=0.143, loss_ctc=58.648, loss_interctc_layer6=65.725, loss_interctc_layer12=54.054, loss_interctc_layer15=49.441, loss_interctc_layer21=61.036, loss=57.781, backward_time=0.210, grad_norm=85.160, clip=100.000, loss_scale=1.014e+31, optim_step_time=0.138, optim0_lr0=6.480e-05, train_time=1.102 +[gpua006:0/64] 2024-02-13 22:33:04,216 (trainer:756) INFO: 39epoch:train:1701-1800batch: iter_time=9.830e-05, forward_time=0.145, loss_ctc=87.286, loss_interctc_layer6=92.833, loss_interctc_layer12=77.056, loss_interctc_layer15=70.647, loss_interctc_layer21=90.490, loss=83.662, backward_time=0.206, grad_norm=83.709, clip=100.000, loss_scale=1.014e+31, optim_step_time=0.139, optim0_lr0=6.480e-05, train_time=1.270 +[gpua006:0/64] 2024-02-13 22:35:02,434 (trainer:756) INFO: 39epoch:train:1801-1900batch: iter_time=9.801e-05, forward_time=0.144, loss_ctc=69.073, loss_interctc_layer6=79.180, loss_interctc_layer12=65.727, loss_interctc_layer15=60.489, loss_interctc_layer21=71.459, loss=69.186, backward_time=0.209, grad_norm=78.835, clip=100.000, loss_scale=1.014e+31, optim_step_time=0.138, optim0_lr0=6.479e-05, train_time=1.180 +[gpua006:0/64] 2024-02-13 22:36:47,708 (trainer:756) INFO: 39epoch:train:1901-2000batch: iter_time=3.672e-04, forward_time=0.170, loss_ctc=71.427, loss_interctc_layer6=80.096, loss_interctc_layer12=66.533, loss_interctc_layer15=61.131, loss_interctc_layer21=73.978, loss=70.633, backward_time=0.217, grad_norm=79.599, clip=100.000, loss_scale=1.014e+31, optim_step_time=0.139, optim0_lr0=6.479e-05, train_time=1.054 +[gpua006:0/64] 2024-02-13 22:38:30,466 (trainer:756) INFO: 39epoch:train:2001-2100batch: iter_time=9.805e-05, forward_time=0.149, loss_ctc=60.881, loss_interctc_layer6=72.389, loss_interctc_layer12=60.008, loss_interctc_layer15=54.911, loss_interctc_layer21=63.038, loss=62.245, backward_time=0.209, grad_norm=66.760, clip=100.000, loss_scale=1.014e+31, optim_step_time=0.139, optim0_lr0=6.478e-05, train_time=1.027 +[gpua006:0/64] 2024-02-13 22:41:04,959 (trainer:756) INFO: 39epoch:train:2101-2200batch: iter_time=1.001e-04, forward_time=0.169, loss_ctc=74.612, loss_interctc_layer6=79.608, loss_interctc_layer12=65.887, loss_interctc_layer15=60.418, loss_interctc_layer21=77.600, loss=71.625, backward_time=0.241, grad_norm=74.701, clip=100.000, loss_scale=1.014e+31, optim_step_time=0.141, optim0_lr0=6.477e-05, train_time=1.545 +[gpua006:0/64] 2024-02-13 22:43:07,537 (trainer:756) INFO: 39epoch:train:2201-2300batch: iter_time=9.794e-05, forward_time=0.155, loss_ctc=61.157, loss_interctc_layer6=73.981, loss_interctc_layer12=61.098, loss_interctc_layer15=55.854, loss_interctc_layer21=63.126, loss=63.043, backward_time=0.211, grad_norm=60.318, clip=100.000, loss_scale=1.014e+31, optim_step_time=0.139, optim0_lr0=6.477e-05, train_time=1.226 +[gpua006:0/64] 2024-02-13 22:45:55,168 (trainer:756) INFO: 39epoch:train:2301-2400batch: iter_time=9.583e-05, forward_time=0.142, loss_ctc=55.447, loss_interctc_layer6=68.454, loss_interctc_layer12=56.316, loss_interctc_layer15=51.314, loss_interctc_layer21=57.299, loss=57.766, backward_time=0.205, grad_norm=50.348, clip=100.000, loss_scale=1.014e+31, optim_step_time=0.138, optim0_lr0=6.476e-05, train_time=1.676 +[gpua006:0/64] 2024-02-13 22:47:34,075 (trainer:756) INFO: 39epoch:train:2401-2500batch: iter_time=1.012e-04, forward_time=0.143, loss_ctc=81.327, loss_interctc_layer6=88.475, loss_interctc_layer12=73.182, loss_interctc_layer15=67.104, loss_interctc_layer21=84.266, loss=78.871, backward_time=0.207, grad_norm=81.402, clip=100.000, loss_scale=1.014e+31, optim_step_time=0.138, optim0_lr0=6.476e-05, train_time=0.989 +[gpua006:0/64] 2024-02-13 22:47:54,105 (multiple_iter_factory:32) INFO: Building 2th iter-factory... +[gpua006:0/64] 2024-02-13 22:48:12,981 (s2t:401) INFO: Optional Data Names: ('text_prev', 'text_ctc', 'text_spk2', 'text_spk3', 'text_spk4') +[gpua006:0/64] 2024-02-13 22:48:16,355 (abs_task:1660) INFO: [train] dataset: +ESPnetDataset( + speech: {"path": "exp/s2t_stats_raw_bpe50000/splits12/wav.scp/split.11", "type": "kaldi_ark"} + text_prev: {"path": "exp/s2t_stats_raw_bpe50000/splits12/text.prev/split.11", "type": "text"} + text_ctc: {"path": "exp/s2t_stats_raw_bpe50000/splits12/text.ctc/split.11", "type": "text"} + text: {"path": "exp/s2t_stats_raw_bpe50000/splits12/text/split.11", "type": "text"} + preprocess: ) +[gpua006:0/64] 2024-02-13 22:48:16,356 (abs_task:1661) INFO: [train] Batch sampler: UnsortedBatchSampler(N-batch=19027, batch_size=256, key_file=exp/s2t_stats_raw_bpe50000/splits12/speech_shape/split.11, +[gpua006:0/64] 2024-02-13 22:48:16,362 (abs_task:1662) INFO: [train] mini-batch sizes summary: N-batch=19027, mean=256.0, min=256, max=257 +[gpua006:0/64] 2024-02-13 22:54:50,869 (trainer:756) INFO: 39epoch:train:2501-2600batch: iter_time=3.085, forward_time=0.147, loss_ctc=70.665, loss_interctc_layer6=84.394, loss_interctc_layer12=69.816, loss_interctc_layer15=63.980, loss_interctc_layer21=72.994, loss=72.370, backward_time=0.211, grad_norm=85.069, clip=100.000, loss_scale=1.014e+31, optim_step_time=0.138, optim0_lr0=6.475e-05, train_time=4.368 +[gpua006:0/64] 2024-02-13 22:56:46,116 (trainer:756) INFO: 39epoch:train:2601-2700batch: iter_time=8.499e-05, forward_time=0.143, loss_ctc=74.868, loss_interctc_layer6=84.280, loss_interctc_layer12=70.030, loss_interctc_layer15=64.232, loss_interctc_layer21=77.731, loss=74.228, backward_time=0.208, grad_norm=94.946, clip=100.000, loss_scale=1.014e+31, optim_step_time=0.138, optim0_lr0=6.475e-05, train_time=1.152 +[gpua006:0/64] 2024-02-13 22:58:20,693 (trainer:756) INFO: 39epoch:train:2701-2800batch: iter_time=8.613e-05, forward_time=0.143, loss_ctc=72.131, loss_interctc_layer6=81.877, loss_interctc_layer12=67.778, loss_interctc_layer15=62.025, loss_interctc_layer21=74.725, loss=71.707, backward_time=0.208, grad_norm=72.305, clip=100.000, loss_scale=1.014e+31, optim_step_time=0.138, optim0_lr0=6.474e-05, train_time=0.946 +[gpua006:0/64] 2024-02-13 23:00:38,247 (trainer:756) INFO: 39epoch:train:2801-2900batch: iter_time=8.928e-05, forward_time=0.143, loss_ctc=71.697, loss_interctc_layer6=74.078, loss_interctc_layer12=61.391, loss_interctc_layer15=56.423, loss_interctc_layer21=74.539, loss=67.626, backward_time=0.210, grad_norm=79.509, clip=100.000, loss_scale=1.014e+31, optim_step_time=0.138, optim0_lr0=6.473e-05, train_time=1.375 +[gpua006:0/64] 2024-02-13 23:02:32,113 (trainer:756) INFO: 39epoch:train:2901-3000batch: iter_time=9.019e-05, forward_time=0.143, loss_ctc=67.141, loss_interctc_layer6=73.837, loss_interctc_layer12=60.694, loss_interctc_layer15=55.351, loss_interctc_layer21=69.740, loss=65.353, backward_time=0.209, grad_norm=64.295, clip=100.000, loss_scale=1.014e+31, optim_step_time=0.138, optim0_lr0=6.473e-05, train_time=1.137 +[gpua006:0/64] 2024-02-13 23:04:34,892 (trainer:756) INFO: 39epoch:train:3001-3100batch: iter_time=8.885e-05, forward_time=0.171, loss_ctc=85.381, loss_interctc_layer6=90.408, loss_interctc_layer12=75.129, loss_interctc_layer15=68.969, loss_interctc_layer21=88.421, loss=81.662, backward_time=0.209, grad_norm=97.272, clip=100.000, loss_scale=1.014e+31, optim_step_time=0.137, optim0_lr0=6.472e-05, train_time=1.229 +[gpua006:0/64] 2024-02-13 23:06:50,402 (trainer:756) INFO: 39epoch:train:3101-3200batch: iter_time=8.531e-05, forward_time=0.192, loss_ctc=64.330, loss_interctc_layer6=77.611, loss_interctc_layer12=64.752, loss_interctc_layer15=59.485, loss_interctc_layer21=66.722, loss=66.580, backward_time=0.226, grad_norm=101.029, clip=100.000, loss_scale=1.014e+31, optim_step_time=0.139, optim0_lr0=6.472e-05, train_time=1.355 +[gpua006:0/64] 2024-02-13 23:08:53,797 (trainer:756) INFO: 39epoch:train:3201-3300batch: iter_time=8.987e-05, forward_time=0.148, loss_ctc=71.860, loss_interctc_layer6=80.107, loss_interctc_layer12=66.351, loss_interctc_layer15=60.715, loss_interctc_layer21=74.291, loss=70.665, backward_time=0.215, grad_norm=96.433, clip=100.000, loss_scale=1.014e+31, optim_step_time=0.138, optim0_lr0=6.471e-05, train_time=1.234 +[gpua006:0/64] 2024-02-13 23:10:44,513 (trainer:756) INFO: 39epoch:train:3301-3400batch: iter_time=8.480e-05, forward_time=0.142, loss_ctc=67.733, loss_interctc_layer6=75.500, loss_interctc_layer12=62.467, loss_interctc_layer15=57.175, loss_interctc_layer21=70.186, loss=66.612, backward_time=0.207, grad_norm=76.942, clip=100.000, loss_scale=1.014e+31, optim_step_time=0.138, optim0_lr0=6.471e-05, train_time=1.107 +[gpua006:0/64] 2024-02-13 23:14:03,454 (trainer:756) INFO: 39epoch:train:3401-3500batch: iter_time=9.423e-05, forward_time=0.142, loss_ctc=65.754, loss_interctc_layer6=79.111, loss_interctc_layer12=65.227, loss_interctc_layer15=59.972, loss_interctc_layer21=68.057, loss=67.624, backward_time=0.206, grad_norm=72.293, clip=100.000, loss_scale=1.247e+31, optim_step_time=0.138, optim0_lr0=6.470e-05, train_time=1.989 +[gpua006:0/64] 2024-02-13 23:15:52,228 (trainer:756) INFO: 39epoch:train:3501-3600batch: iter_time=8.912e-05, forward_time=0.142, loss_ctc=58.140, loss_interctc_layer6=69.809, loss_interctc_layer12=57.660, loss_interctc_layer15=52.746, loss_interctc_layer21=60.299, loss=59.731, backward_time=0.207, grad_norm=76.734, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.138, optim0_lr0=6.470e-05, train_time=1.088 +[gpua006:0/64] 2024-02-13 23:17:37,311 (trainer:756) INFO: 39epoch:train:3601-3700batch: iter_time=8.649e-05, forward_time=0.145, loss_ctc=66.821, loss_interctc_layer6=74.780, loss_interctc_layer12=61.436, loss_interctc_layer15=56.031, loss_interctc_layer21=69.283, loss=65.670, backward_time=0.211, grad_norm=83.250, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.138, optim0_lr0=6.469e-05, train_time=1.051 +[gpua006:0/64] 2024-02-13 23:18:46,754 (multiple_iter_factory:32) INFO: Building 3th iter-factory... +[gpua006:0/64] 2024-02-13 23:19:05,274 (s2t:401) INFO: Optional Data Names: ('text_prev', 'text_ctc', 'text_spk2', 'text_spk3', 'text_spk4') +[gpua006:0/64] 2024-02-13 23:19:08,688 (abs_task:1660) INFO: [train] dataset: +ESPnetDataset( + speech: {"path": "exp/s2t_stats_raw_bpe50000/splits12/wav.scp/split.10", "type": "kaldi_ark"} + text_prev: {"path": "exp/s2t_stats_raw_bpe50000/splits12/text.prev/split.10", "type": "text"} + text_ctc: {"path": "exp/s2t_stats_raw_bpe50000/splits12/text.ctc/split.10", "type": "text"} + text: {"path": "exp/s2t_stats_raw_bpe50000/splits12/text/split.10", "type": "text"} + preprocess: ) +[gpua006:0/64] 2024-02-13 23:19:08,688 (abs_task:1661) INFO: [train] Batch sampler: UnsortedBatchSampler(N-batch=19027, batch_size=256, key_file=exp/s2t_stats_raw_bpe50000/splits12/speech_shape/split.10, +[gpua006:0/64] 2024-02-13 23:19:08,696 (abs_task:1662) INFO: [train] mini-batch sizes summary: N-batch=19027, mean=256.0, min=256, max=257 +[gpua006:0/64] 2024-02-13 23:24:05,036 (trainer:756) INFO: 39epoch:train:3701-3800batch: iter_time=2.763, forward_time=0.162, loss_ctc=74.441, loss_interctc_layer6=89.345, loss_interctc_layer12=74.300, loss_interctc_layer15=68.072, loss_interctc_layer21=76.849, loss=76.601, backward_time=0.212, grad_norm=99.934, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.139, optim0_lr0=6.468e-05, train_time=3.877 +[gpua006:0/64] 2024-02-13 23:25:59,943 (trainer:756) INFO: 39epoch:train:3801-3900batch: iter_time=9.322e-05, forward_time=0.144, loss_ctc=85.359, loss_interctc_layer6=83.898, loss_interctc_layer12=69.142, loss_interctc_layer15=63.099, loss_interctc_layer21=88.737, loss=78.047, backward_time=0.208, grad_norm=86.492, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.139, optim0_lr0=6.468e-05, train_time=1.149 +[gpua006:0/64] 2024-02-13 23:27:35,177 (trainer:756) INFO: 39epoch:train:3901-4000batch: iter_time=9.876e-05, forward_time=0.143, loss_ctc=73.987, loss_interctc_layer6=81.992, loss_interctc_layer12=68.011, loss_interctc_layer15=62.372, loss_interctc_layer21=76.688, loss=72.610, backward_time=0.208, grad_norm=79.382, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.138, optim0_lr0=6.467e-05, train_time=0.952 +[gpua006:0/64] 2024-02-13 23:29:27,537 (trainer:756) INFO: 39epoch:train:4001-4100batch: iter_time=9.632e-05, forward_time=0.145, loss_ctc=82.277, loss_interctc_layer6=79.690, loss_interctc_layer12=65.992, loss_interctc_layer15=60.417, loss_interctc_layer21=85.401, loss=74.755, backward_time=0.207, grad_norm=103.398, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.138, optim0_lr0=6.467e-05, train_time=1.123 +[gpua006:0/64] 2024-02-13 23:31:55,124 (trainer:756) INFO: 39epoch:train:4101-4200batch: iter_time=9.868e-05, forward_time=0.204, loss_ctc=62.996, loss_interctc_layer6=65.162, loss_interctc_layer12=53.357, loss_interctc_layer15=48.755, loss_interctc_layer21=65.589, loss=59.172, backward_time=0.221, grad_norm=57.123, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.140, optim0_lr0=6.466e-05, train_time=1.476 +[gpua006:0/64] 2024-02-13 23:34:13,291 (trainer:756) INFO: 39epoch:train:4201-4300batch: iter_time=1.029e-04, forward_time=0.157, loss_ctc=94.979, loss_interctc_layer6=91.767, loss_interctc_layer12=76.006, loss_interctc_layer15=69.604, loss_interctc_layer21=98.513, loss=86.174, backward_time=0.223, grad_norm=88.671, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.139, optim0_lr0=6.466e-05, train_time=1.381 +[gpua006:0/64] 2024-02-13 23:36:32,665 (trainer:756) INFO: 39epoch:train:4301-4400batch: iter_time=1.035e-04, forward_time=0.142, loss_ctc=68.023, loss_interctc_layer6=78.124, loss_interctc_layer12=64.712, loss_interctc_layer15=59.297, loss_interctc_layer21=70.430, loss=68.117, backward_time=0.207, grad_norm=81.089, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.137, optim0_lr0=6.465e-05, train_time=1.394 +[gpua006:0/64] 2024-02-13 23:38:36,208 (trainer:756) INFO: 39epoch:train:4401-4500batch: iter_time=9.235e-05, forward_time=0.142, loss_ctc=77.175, loss_interctc_layer6=79.994, loss_interctc_layer12=66.621, loss_interctc_layer15=61.228, loss_interctc_layer21=80.114, loss=73.026, backward_time=0.206, grad_norm=77.542, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.137, optim0_lr0=6.464e-05, train_time=1.235 +[gpua006:0/64] 2024-02-13 23:40:12,171 (trainer:756) INFO: 39epoch:train:4501-4600batch: iter_time=1.007e-04, forward_time=0.142, loss_ctc=63.347, loss_interctc_layer6=72.456, loss_interctc_layer12=60.010, loss_interctc_layer15=54.922, loss_interctc_layer21=65.470, loss=63.241, backward_time=0.207, grad_norm=71.768, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.137, optim0_lr0=6.464e-05, train_time=0.959 +[gpua006:0/64] 2024-02-13 23:42:20,268 (trainer:756) INFO: 39epoch:train:4601-4700batch: iter_time=9.273e-05, forward_time=0.146, loss_ctc=80.765, loss_interctc_layer6=79.047, loss_interctc_layer12=65.247, loss_interctc_layer15=59.750, loss_interctc_layer21=84.102, loss=73.782, backward_time=0.213, grad_norm=85.368, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.138, optim0_lr0=6.463e-05, train_time=1.281 +[gpua006:0/64] 2024-02-13 23:44:04,953 (trainer:756) INFO: 39epoch:train:4701-4800batch: iter_time=9.267e-05, forward_time=0.142, loss_ctc=64.079, loss_interctc_layer6=73.463, loss_interctc_layer12=60.430, loss_interctc_layer15=55.135, loss_interctc_layer21=66.142, loss=63.850, backward_time=0.208, grad_norm=110.681, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.138, optim0_lr0=6.463e-05, train_time=1.047 +[gpua006:0/64] 2024-02-13 23:46:58,880 (trainer:756) INFO: 39epoch:train:4801-4900batch: iter_time=2.382e-04, forward_time=0.155, loss_ctc=57.823, loss_interctc_layer6=67.457, loss_interctc_layer12=55.213, loss_interctc_layer15=50.290, loss_interctc_layer21=59.814, loss=58.119, backward_time=0.232, grad_norm=130.133, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.139, optim0_lr0=6.462e-05, train_time=1.739 +[gpua006:0/64] 2024-02-13 23:49:07,320 (trainer:756) INFO: 39epoch:train:4901-5000batch: iter_time=9.452e-05, forward_time=0.144, loss_ctc=88.794, loss_interctc_layer6=87.878, loss_interctc_layer12=72.552, loss_interctc_layer15=66.445, loss_interctc_layer21=92.130, loss=81.560, backward_time=0.209, grad_norm=80.621, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.138, optim0_lr0=6.462e-05, train_time=1.284 +[gpua006:0/64] 2024-02-13 23:49:27,407 (multiple_iter_factory:32) INFO: Building 4th iter-factory... +[gpua006:0/64] 2024-02-13 23:49:46,113 (s2t:401) INFO: Optional Data Names: ('text_prev', 'text_ctc', 'text_spk2', 'text_spk3', 'text_spk4') +[gpua006:0/64] 2024-02-13 23:49:49,474 (abs_task:1660) INFO: [train] dataset: +ESPnetDataset( + speech: {"path": "exp/s2t_stats_raw_bpe50000/splits12/wav.scp/split.5", "type": "kaldi_ark"} + text_prev: {"path": "exp/s2t_stats_raw_bpe50000/splits12/text.prev/split.5", "type": "text"} + text_ctc: {"path": "exp/s2t_stats_raw_bpe50000/splits12/text.ctc/split.5", "type": "text"} + text: {"path": "exp/s2t_stats_raw_bpe50000/splits12/text/split.5", "type": "text"} + preprocess: ) +[gpua006:0/64] 2024-02-13 23:49:49,474 (abs_task:1661) INFO: [train] Batch sampler: UnsortedBatchSampler(N-batch=19027, batch_size=256, key_file=exp/s2t_stats_raw_bpe50000/splits12/speech_shape/split.5, +[gpua006:0/64] 2024-02-13 23:49:49,512 (abs_task:1662) INFO: [train] mini-batch sizes summary: N-batch=19027, mean=256.0, min=256, max=257 +[gpua006:0/64] 2024-02-13 23:56:21,161 (trainer:756) INFO: 39epoch:train:5001-5100batch: iter_time=2.922, forward_time=0.177, loss_ctc=72.065, loss_interctc_layer6=85.292, loss_interctc_layer12=70.805, loss_interctc_layer15=64.750, loss_interctc_layer21=74.345, loss=73.451, backward_time=0.215, grad_norm=142.933, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.139, optim0_lr0=6.461e-05, train_time=4.338 +[gpua006:0/64] 2024-02-13 23:57:55,028 (trainer:756) INFO: 39epoch:train:5101-5200batch: iter_time=9.185e-05, forward_time=0.143, loss_ctc=74.761, loss_interctc_layer6=83.671, loss_interctc_layer12=69.225, loss_interctc_layer15=63.383, loss_interctc_layer21=77.715, loss=73.751, backward_time=0.209, grad_norm=68.807, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.138, optim0_lr0=6.461e-05, train_time=0.938 +[gpua006:0/64] 2024-02-13 23:59:59,467 (trainer:756) INFO: 39epoch:train:5201-5300batch: iter_time=9.361e-05, forward_time=0.143, loss_ctc=70.285, loss_interctc_layer6=81.337, loss_interctc_layer12=67.168, loss_interctc_layer15=61.532, loss_interctc_layer21=72.829, loss=70.630, backward_time=0.209, grad_norm=73.568, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.138, optim0_lr0=6.460e-05, train_time=1.244 +[gpua006:0/64] 2024-02-14 00:02:23,280 (trainer:756) INFO: 39epoch:train:5301-5400batch: iter_time=9.998e-05, forward_time=0.142, loss_ctc=71.345, loss_interctc_layer6=73.152, loss_interctc_layer12=60.471, loss_interctc_layer15=55.596, loss_interctc_layer21=74.191, loss=66.951, backward_time=0.207, grad_norm=68.069, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.137, optim0_lr0=6.459e-05, train_time=1.438 +[gpua006:0/64] 2024-02-14 00:04:39,442 (trainer:756) INFO: 39epoch:train:5401-5500batch: iter_time=1.074e-04, forward_time=0.143, loss_ctc=66.114, loss_interctc_layer6=72.601, loss_interctc_layer12=59.666, loss_interctc_layer15=54.363, loss_interctc_layer21=68.630, loss=64.274, backward_time=0.208, grad_norm=59.946, clip=100.000, loss_scale=2.495e+31, optim_step_time=0.138, optim0_lr0=6.459e-05, train_time=1.361 +[gpua006:0/64] 2024-02-14 00:06:19,213 (trainer:756) INFO: 39epoch:train:5501-5600batch: iter_time=9.350e-05, forward_time=0.145, loss_ctc=85.213, loss_interctc_layer6=89.867, loss_interctc_layer12=74.501, loss_interctc_layer15=68.358, loss_interctc_layer21=88.322, loss=81.252, backward_time=0.211, grad_norm=87.497, clip=100.000, loss_scale=4.056e+31, optim_step_time=0.137, optim0_lr0=6.458e-05, train_time=0.997 +[gpua006:0/64] 2024-02-14 00:08:09,093 (trainer:756) INFO: 39epoch:train:5601-5700batch: iter_time=9.150e-05, forward_time=0.142, loss_ctc=64.603, loss_interctc_layer6=76.775, loss_interctc_layer12=63.973, loss_interctc_layer15=58.796, loss_interctc_layer21=67.093, loss=66.248, backward_time=0.207, grad_norm=74.145, clip=100.000, loss_scale=4.056e+31, optim_step_time=0.137, optim0_lr0=6.458e-05, train_time=1.099 +[gpua006:0/64] 2024-02-14 00:10:01,881 (trainer:756) INFO: 39epoch:train:5701-5800batch: iter_time=9.407e-05, forward_time=0.157, loss_ctc=71.174, loss_interctc_layer6=80.230, loss_interctc_layer12=66.469, loss_interctc_layer15=60.866, loss_interctc_layer21=73.688, loss=70.485, backward_time=0.229, grad_norm=66.421, clip=100.000, loss_scale=4.056e+31, optim_step_time=0.139, optim0_lr0=6.457e-05, train_time=1.127 +[gpua006:0/64] 2024-02-14 00:11:56,145 (trainer:756) INFO: 39epoch:train:5801-5900batch: iter_time=9.282e-05, forward_time=0.143, loss_ctc=68.003, loss_interctc_layer6=75.351, loss_interctc_layer12=62.421, loss_interctc_layer15=57.073, loss_interctc_layer21=70.406, loss=66.651, backward_time=0.207, grad_norm=75.950, clip=100.000, loss_scale=4.056e+31, optim_step_time=0.137, optim0_lr0=6.457e-05, train_time=1.142 +[gpua006:0/64] 2024-02-14 00:14:21,761 (trainer:756) INFO: 39epoch:train:5901-6000batch: iter_time=9.275e-05, forward_time=0.142, loss_ctc=65.559, loss_interctc_layer6=79.328, loss_interctc_layer12=65.468, loss_interctc_layer15=59.902, loss_interctc_layer21=68.012, loss=67.654, backward_time=0.206, grad_norm=111.699, clip=100.000, loss_scale=4.056e+31, optim_step_time=0.137, optim0_lr0=6.456e-05, train_time=1.456 +[gpua006:0/64] 2024-02-14 00:16:41,002 (trainer:756) INFO: 39epoch:train:6001-6100batch: iter_time=9.302e-05, forward_time=0.168, loss_ctc=56.950, loss_interctc_layer6=69.741, loss_interctc_layer12=57.457, loss_interctc_layer15=52.458, loss_interctc_layer21=59.022, loss=59.126, backward_time=0.247, grad_norm=83.657, clip=100.000, loss_scale=4.056e+31, optim_step_time=0.142, optim0_lr0=6.455e-05, train_time=1.393 +[gpua006:0/64] 2024-02-14 00:18:37,552 (trainer:756) INFO: 39epoch:train:6101-6200batch: iter_time=9.571e-05, forward_time=0.142, loss_ctc=66.375, loss_interctc_layer6=74.552, loss_interctc_layer12=61.222, loss_interctc_layer15=55.809, loss_interctc_layer21=68.831, loss=65.358, backward_time=0.207, grad_norm=71.868, clip=100.000, loss_scale=4.056e+31, optim_step_time=0.138, optim0_lr0=6.455e-05, train_time=1.165 +[gpua006:0/64] 2024-02-14 00:19:50,699 (multiple_iter_factory:32) INFO: Building 5th iter-factory... +[gpua006:0/64] 2024-02-14 00:20:09,686 (s2t:401) INFO: Optional Data Names: ('text_prev', 'text_ctc', 'text_spk2', 'text_spk3', 'text_spk4') +[gpua006:0/64] 2024-02-14 00:20:13,391 (abs_task:1660) INFO: [train] dataset: +ESPnetDataset( + speech: {"path": "exp/s2t_stats_raw_bpe50000/splits12/wav.scp/split.3", "type": "kaldi_ark"} + text_prev: {"path": "exp/s2t_stats_raw_bpe50000/splits12/text.prev/split.3", "type": "text"} + text_ctc: {"path": "exp/s2t_stats_raw_bpe50000/splits12/text.ctc/split.3", "type": "text"} + text: {"path": "exp/s2t_stats_raw_bpe50000/splits12/text/split.3", "type": "text"} + preprocess: ) +[gpua006:0/64] 2024-02-14 00:20:13,391 (abs_task:1661) INFO: [train] Batch sampler: UnsortedBatchSampler(N-batch=19027, batch_size=256, key_file=exp/s2t_stats_raw_bpe50000/splits12/speech_shape/split.3, +[gpua006:0/64] 2024-02-14 00:20:13,395 (abs_task:1662) INFO: [train] mini-batch sizes summary: N-batch=19027, mean=256.0, min=256, max=257 +[gpua006:0/64] 2024-02-14 00:28:56,750 (trainer:756) INFO: 39epoch:train:6201-6300batch: iter_time=3.045, forward_time=0.143, loss_ctc=72.075, loss_interctc_layer6=89.224, loss_interctc_layer12=73.884, loss_interctc_layer15=67.845, loss_interctc_layer21=74.430, loss=75.492, backward_time=0.207, grad_norm=82.145, clip=100.000, loss_scale=4.056e+31, optim_step_time=0.139, optim0_lr0=6.454e-05, train_time=6.192 +[gpua006:0/64] 2024-02-14 00:30:37,547 (trainer:756) INFO: 39epoch:train:6301-6400batch: iter_time=8.463e-05, forward_time=0.144, loss_ctc=77.916, loss_interctc_layer6=83.921, loss_interctc_layer12=69.211, loss_interctc_layer15=63.134, loss_interctc_layer21=80.948, loss=75.026, backward_time=0.210, grad_norm=75.831, clip=100.000, loss_scale=4.056e+31, optim_step_time=0.140, optim0_lr0=6.454e-05, train_time=1.008 +[gpua006:0/64] 2024-02-14 00:32:45,665 (trainer:756) INFO: 39epoch:train:6401-6500batch: iter_time=8.517e-05, forward_time=0.144, loss_ctc=66.943, loss_interctc_layer6=81.664, loss_interctc_layer12=67.737, loss_interctc_layer15=62.184, loss_interctc_layer21=69.474, loss=69.600, backward_time=0.214, grad_norm=70.194, clip=100.000, loss_scale=4.056e+31, optim_step_time=0.140, optim0_lr0=6.453e-05, train_time=1.281 +[gpua006:0/64] 2024-02-14 00:34:26,860 (trainer:756) INFO: 39epoch:train:6501-6600batch: iter_time=9.200e-05, forward_time=0.144, loss_ctc=78.581, loss_interctc_layer6=79.372, loss_interctc_layer12=65.713, loss_interctc_layer15=60.145, loss_interctc_layer21=81.531, loss=73.069, backward_time=0.209, grad_norm=80.900, clip=100.000, loss_scale=4.056e+31, optim_step_time=0.140, optim0_lr0=6.453e-05, train_time=1.012 +[gpua006:0/64] 2024-02-14 00:36:42,351 (trainer:756) INFO: 39epoch:train:6601-6700batch: iter_time=0.001, forward_time=0.185, loss_ctc=58.282, loss_interctc_layer6=65.159, loss_interctc_layer12=53.322, loss_interctc_layer15=48.727, loss_interctc_layer21=60.696, loss=57.237, backward_time=0.214, grad_norm=55.968, clip=100.000, loss_scale=4.056e+31, optim_step_time=0.140, optim0_lr0=6.452e-05, train_time=1.354 +[gpua006:0/64] 2024-02-14 00:38:50,824 (trainer:756) INFO: 39epoch:train:6701-6800batch: iter_time=8.706e-05, forward_time=0.145, loss_ctc=87.170, loss_interctc_layer6=91.978, loss_interctc_layer12=76.128, loss_interctc_layer15=69.627, loss_interctc_layer21=90.281, loss=83.037, backward_time=0.209, grad_norm=81.997, clip=100.000, loss_scale=4.056e+31, optim_step_time=0.139, optim0_lr0=6.452e-05, train_time=1.284 +[gpua006:0/64] 2024-02-14 00:40:57,044 (trainer:756) INFO: 39epoch:train:6801-6900batch: iter_time=8.505e-05, forward_time=0.143, loss_ctc=66.752, loss_interctc_layer6=78.479, loss_interctc_layer12=65.029, loss_interctc_layer15=59.603, loss_interctc_layer21=69.057, loss=67.784, backward_time=0.208, grad_norm=73.997, clip=100.000, loss_scale=4.056e+31, optim_step_time=0.139, optim0_lr0=6.451e-05, train_time=1.263 +[gpua006:0/64] 2024-02-14 00:43:17,313 (trainer:756) INFO: 39epoch:train:6901-7000batch: iter_time=8.490e-05, forward_time=0.164, loss_ctc=71.127, loss_interctc_layer6=80.078, loss_interctc_layer12=66.788, loss_interctc_layer15=61.307, loss_interctc_layer21=73.854, loss=70.631, backward_time=0.254, grad_norm=106.447, clip=100.000, loss_scale=4.056e+31, optim_step_time=0.141, optim0_lr0=6.450e-05, train_time=1.402 +[gpua006:0/64] 2024-02-14 00:45:59,096 (trainer:756) INFO: 39epoch:train:7001-7100batch: iter_time=8.446e-05, forward_time=0.163, loss_ctc=61.199, loss_interctc_layer6=72.114, loss_interctc_layer12=59.632, loss_interctc_layer15=54.424, loss_interctc_layer21=63.385, loss=62.151, backward_time=0.221, grad_norm=72.484, clip=100.000, loss_scale=4.056e+31, optim_step_time=0.140, optim0_lr0=6.450e-05, train_time=1.618 +[gpua006:0/64] 2024-02-14 00:48:08,071 (trainer:756) INFO: 39epoch:train:7101-7200batch: iter_time=8.638e-05, forward_time=0.143, loss_ctc=72.324, loss_interctc_layer6=78.761, loss_interctc_layer12=64.921, loss_interctc_layer15=59.343, loss_interctc_layer21=75.211, loss=70.112, backward_time=0.208, grad_norm=69.570, clip=100.000, loss_scale=4.056e+31, optim_step_time=0.140, optim0_lr0=6.449e-05, train_time=1.290 +[gpua006:0/64] 2024-02-14 00:49:51,231 (trainer:756) INFO: 39epoch:train:7201-7300batch: iter_time=8.408e-05, forward_time=0.143, loss_ctc=62.054, loss_interctc_layer6=74.459, loss_interctc_layer12=61.399, loss_interctc_layer15=56.101, loss_interctc_layer21=64.429, loss=63.688, backward_time=0.209, grad_norm=76.608, clip=100.000, loss_scale=4.056e+31, optim_step_time=0.139, optim0_lr0=6.449e-05, train_time=1.031 +[gpua006:0/64] 2024-02-14 00:52:08,810 (trainer:756) INFO: 39epoch:train:7301-7400batch: iter_time=8.606e-05, forward_time=0.142, loss_ctc=54.904, loss_interctc_layer6=67.449, loss_interctc_layer12=55.226, loss_interctc_layer15=50.245, loss_interctc_layer21=56.877, loss=56.940, backward_time=0.206, grad_norm=78.272, clip=100.000, loss_scale=4.056e+31, optim_step_time=0.139, optim0_lr0=6.448e-05, train_time=1.376 +[gpua006:0/64] 2024-02-14 00:54:19,084 (trainer:756) INFO: 39epoch:train:7401-7500batch: iter_time=8.156e-05, forward_time=0.144, loss_ctc=80.728, loss_interctc_layer6=87.661, loss_interctc_layer12=72.294, loss_interctc_layer15=66.113, loss_interctc_layer21=83.641, loss=78.087, backward_time=0.208, grad_norm=72.559, clip=100.000, loss_scale=4.989e+31, optim_step_time=0.139, optim0_lr0=6.448e-05, train_time=1.303 +[gpua006:0/64] 2024-02-14 00:54:39,113 (multiple_iter_factory:32) INFO: Building 6th iter-factory... +[gpua006:0/64] 2024-02-14 00:54:58,020 (s2t:401) INFO: Optional Data Names: ('text_prev', 'text_ctc', 'text_spk2', 'text_spk3', 'text_spk4') +[gpua006:0/64] 2024-02-14 00:55:01,445 (abs_task:1660) INFO: [train] dataset: +ESPnetDataset( + speech: {"path": "exp/s2t_stats_raw_bpe50000/splits12/wav.scp/split.8", "type": "kaldi_ark"} + text_prev: {"path": "exp/s2t_stats_raw_bpe50000/splits12/text.prev/split.8", "type": "text"} + text_ctc: {"path": "exp/s2t_stats_raw_bpe50000/splits12/text.ctc/split.8", "type": "text"} + text: {"path": "exp/s2t_stats_raw_bpe50000/splits12/text/split.8", "type": "text"} + preprocess: ) +[gpua006:0/64] 2024-02-14 00:55:01,446 (abs_task:1661) INFO: [train] Batch sampler: UnsortedBatchSampler(N-batch=19027, batch_size=256, key_file=exp/s2t_stats_raw_bpe50000/splits12/speech_shape/split.8, +[gpua006:0/64] 2024-02-14 00:55:01,449 (abs_task:1662) INFO: [train] mini-batch sizes summary: N-batch=19027, mean=256.0, min=256, max=257 +[gpua006:0/64] 2024-02-14 01:00:51,551 (trainer:756) INFO: 39epoch:train:7501-7600batch: iter_time=2.880, forward_time=0.174, loss_ctc=74.490, loss_interctc_layer6=83.655, loss_interctc_layer12=68.937, loss_interctc_layer15=63.121, loss_interctc_layer21=76.967, loss=73.434, backward_time=0.218, grad_norm=69.090, clip=100.000, loss_scale=8.113e+31, optim_step_time=0.139, optim0_lr0=6.447e-05, train_time=3.924 +[gpua006:0/64] 2024-02-14 01:02:25,466 (trainer:756) INFO: 39epoch:train:7601-7700batch: iter_time=8.557e-05, forward_time=0.143, loss_ctc=85.180, loss_interctc_layer6=83.855, loss_interctc_layer12=69.330, loss_interctc_layer15=63.468, loss_interctc_layer21=88.661, loss=78.099, backward_time=0.210, grad_norm=114.188, clip=100.000, loss_scale=8.113e+31, optim_step_time=0.138, optim0_lr0=6.447e-05, train_time=0.939 +[gpua006:0/64] 2024-02-14 01:04:46,424 (trainer:756) INFO: 39epoch:train:7701-7800batch: iter_time=1.118e-04, forward_time=0.143, loss_ctc=75.252, loss_interctc_layer6=81.938, loss_interctc_layer12=67.686, loss_interctc_layer15=61.924, loss_interctc_layer21=78.002, loss=72.961, backward_time=0.207, grad_norm=82.318, clip=100.000, loss_scale=8.113e+31, optim_step_time=0.137, optim0_lr0=6.446e-05, train_time=1.409 +[gpua006:0/64] 2024-02-14 01:07:08,173 (trainer:756) INFO: 39epoch:train:7801-7900batch: iter_time=1.003e-04, forward_time=0.145, loss_ctc=74.120, loss_interctc_layer6=73.249, loss_interctc_layer12=60.527, loss_interctc_layer15=55.577, loss_interctc_layer21=77.139, loss=68.122, backward_time=0.208, grad_norm=90.069, clip=100.000, loss_scale=8.113e+31, optim_step_time=0.137, optim0_lr0=6.445e-05, train_time=1.417 +[gpua006:0/64] 2024-02-14 01:09:03,598 (trainer:756) INFO: 39epoch:train:7901-8000batch: iter_time=9.488e-05, forward_time=0.142, loss_ctc=72.067, loss_interctc_layer6=72.506, loss_interctc_layer12=59.424, loss_interctc_layer15=54.142, loss_interctc_layer21=74.932, loss=66.614, backward_time=0.208, grad_norm=61.855, clip=100.000, loss_scale=8.113e+31, optim_step_time=0.137, optim0_lr0=6.445e-05, train_time=1.154 +[gpua006:0/64] 2024-02-14 01:11:36,571 (trainer:756) INFO: 39epoch:train:8001-8100batch: iter_time=9.321e-05, forward_time=0.143, loss_ctc=89.745, loss_interctc_layer6=89.401, loss_interctc_layer12=74.097, loss_interctc_layer15=67.863, loss_interctc_layer21=93.190, loss=82.859, backward_time=0.208, grad_norm=97.696, clip=100.000, loss_scale=8.113e+31, optim_step_time=0.138, optim0_lr0=6.444e-05, train_time=1.530 +[gpua006:0/64] 2024-02-14 01:13:36,355 (trainer:756) INFO: 39epoch:train:8101-8200batch: iter_time=9.171e-05, forward_time=0.142, loss_ctc=66.918, loss_interctc_layer6=76.611, loss_interctc_layer12=63.760, loss_interctc_layer15=58.563, loss_interctc_layer21=69.267, loss=67.024, backward_time=0.207, grad_norm=69.257, clip=100.000, loss_scale=8.113e+31, optim_step_time=0.137, optim0_lr0=6.444e-05, train_time=1.198 +[gpua006:0/64] 2024-02-14 01:15:41,573 (trainer:756) INFO: 39epoch:train:8201-8300batch: iter_time=9.358e-05, forward_time=0.142, loss_ctc=76.522, loss_interctc_layer6=80.392, loss_interctc_layer12=66.547, loss_interctc_layer15=60.848, loss_interctc_layer21=79.194, loss=72.701, backward_time=0.206, grad_norm=88.986, clip=100.000, loss_scale=8.113e+31, optim_step_time=0.137, optim0_lr0=6.443e-05, train_time=1.252 +[gpua006:0/64] 2024-02-14 01:17:47,661 (trainer:756) INFO: 39epoch:train:8301-8400batch: iter_time=9.513e-05, forward_time=0.143, loss_ctc=70.909, loss_interctc_layer6=74.286, loss_interctc_layer12=61.253, loss_interctc_layer15=55.922, loss_interctc_layer21=73.625, loss=67.199, backward_time=0.207, grad_norm=96.308, clip=100.000, loss_scale=8.113e+31, optim_step_time=0.137, optim0_lr0=6.443e-05, train_time=1.261 +[gpua006:0/64] 2024-02-14 01:20:39,644 (trainer:756) INFO: 39epoch:train:8401-8500batch: iter_time=8.983e-05, forward_time=0.176, loss_ctc=71.123, loss_interctc_layer6=78.693, loss_interctc_layer12=64.883, loss_interctc_layer15=59.448, loss_interctc_layer21=73.604, loss=69.550, backward_time=0.226, grad_norm=74.283, clip=100.000, loss_scale=8.113e+31, optim_step_time=0.138, optim0_lr0=6.442e-05, train_time=1.719 +[gpua006:0/64] 2024-02-14 01:22:54,023 (trainer:756) INFO: 39epoch:train:8501-8600batch: iter_time=8.792e-05, forward_time=0.176, loss_ctc=60.816, loss_interctc_layer6=69.841, loss_interctc_layer12=57.557, loss_interctc_layer15=52.538, loss_interctc_layer21=63.003, loss=60.751, backward_time=0.237, grad_norm=75.587, clip=100.000, loss_scale=8.113e+31, optim_step_time=0.139, optim0_lr0=6.442e-05, train_time=1.344 +[gpua006:0/64] 2024-02-14 01:25:06,895 (trainer:756) INFO: 39epoch:train:8601-8700batch: iter_time=8.510e-05, forward_time=0.142, loss_ctc=71.601, loss_interctc_layer6=74.405, loss_interctc_layer12=60.966, loss_interctc_layer15=55.496, loss_interctc_layer21=74.195, loss=67.333, backward_time=0.206, grad_norm=69.332, clip=100.000, loss_scale=8.113e+31, optim_step_time=0.138, optim0_lr0=6.441e-05, train_time=1.329 +[gpua006:0/64] 2024-02-14 01:26:14,083 (multiple_iter_factory:32) INFO: Building 7th iter-factory... +[gpua006:0/64] 2024-02-14 01:26:32,865 (s2t:401) INFO: Optional Data Names: ('text_prev', 'text_ctc', 'text_spk2', 'text_spk3', 'text_spk4') +[gpua006:0/64] 2024-02-14 01:26:36,367 (abs_task:1660) INFO: [train] dataset: +ESPnetDataset( + speech: {"path": "exp/s2t_stats_raw_bpe50000/splits12/wav.scp/split.6", "type": "kaldi_ark"} + text_prev: {"path": "exp/s2t_stats_raw_bpe50000/splits12/text.prev/split.6", "type": "text"} + text_ctc: {"path": "exp/s2t_stats_raw_bpe50000/splits12/text.ctc/split.6", "type": "text"} + text: {"path": "exp/s2t_stats_raw_bpe50000/splits12/text/split.6", "type": "text"} + preprocess: ) +[gpua006:0/64] 2024-02-14 01:26:36,367 (abs_task:1661) INFO: [train] Batch sampler: UnsortedBatchSampler(N-batch=19027, batch_size=256, key_file=exp/s2t_stats_raw_bpe50000/splits12/speech_shape/split.6, +[gpua006:0/64] 2024-02-14 01:26:36,370 (abs_task:1662) INFO: [train] mini-batch sizes summary: N-batch=19027, mean=256.0, min=256, max=257 +[gpua006:0/64] 2024-02-14 01:31:40,210 (trainer:756) INFO: 39epoch:train:8701-8800batch: iter_time=2.870, forward_time=0.171, loss_ctc=77.136, loss_interctc_layer6=88.802, loss_interctc_layer12=73.529, loss_interctc_layer15=67.545, loss_interctc_layer21=79.569, loss=77.316, backward_time=0.214, grad_norm=77.769, clip=100.000, loss_scale=8.113e+31, optim_step_time=0.139, optim0_lr0=6.440e-05, train_time=3.933 +[gpua006:0/64] 2024-02-14 01:33:23,550 (trainer:756) INFO: 39epoch:train:8801-8900batch: iter_time=8.403e-05, forward_time=0.144, loss_ctc=84.818, loss_interctc_layer6=83.427, loss_interctc_layer12=68.740, loss_interctc_layer15=62.846, loss_interctc_layer21=88.180, loss=77.602, backward_time=0.209, grad_norm=68.695, clip=100.000, loss_scale=8.113e+31, optim_step_time=0.138, optim0_lr0=6.440e-05, train_time=1.033 +[gpua006:0/64] 2024-02-14 01:34:22,837 (trainer:687) WARNING: The grad norm is nan. Skipping updating the model. +[gpua006:0/64] 2024-02-14 01:34:57,411 (trainer:687) WARNING: The grad norm is nan. Skipping updating the model. +[gpua006:0/64] 2024-02-14 01:35:00,298 (trainer:756) INFO: 39epoch:train:8901-9000batch: iter_time=8.461e-05, forward_time=0.143, loss_ctc=73.103, loss_interctc_layer6=81.584, loss_interctc_layer12=67.656, loss_interctc_layer15=62.051, loss_interctc_layer21=75.723, loss=72.023, backward_time=0.208, grad_norm=84.933, clip=100.000, loss_scale=6.437e+31, optim_step_time=0.138, optim0_lr0=6.439e-05, train_time=0.967 +[gpua006:0/64] 2024-02-14 01:37:00,419 (trainer:756) INFO: 39epoch:train:9001-9100batch: iter_time=8.447e-05, forward_time=0.145, loss_ctc=81.801, loss_interctc_layer6=79.718, loss_interctc_layer12=66.057, loss_interctc_layer15=60.396, loss_interctc_layer21=85.124, loss=74.619, backward_time=0.207, grad_norm=81.588, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.138, optim0_lr0=6.439e-05, train_time=1.201 +[gpua006:0/64] 2024-02-14 01:39:10,573 (trainer:756) INFO: 39epoch:train:9101-9200batch: iter_time=8.671e-05, forward_time=0.141, loss_ctc=61.917, loss_interctc_layer6=64.896, loss_interctc_layer12=53.113, loss_interctc_layer15=48.467, loss_interctc_layer21=64.619, loss=58.603, backward_time=0.206, grad_norm=61.912, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.138, optim0_lr0=6.438e-05, train_time=1.301 +[gpua006:0/64] 2024-02-14 01:41:33,675 (trainer:756) INFO: 39epoch:train:9201-9300batch: iter_time=8.640e-05, forward_time=0.143, loss_ctc=94.294, loss_interctc_layer6=91.684, loss_interctc_layer12=75.838, loss_interctc_layer15=69.471, loss_interctc_layer21=97.830, loss=85.823, backward_time=0.205, grad_norm=76.683, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.138, optim0_lr0=6.438e-05, train_time=1.431 +[gpua006:0/64] 2024-02-14 01:43:44,522 (trainer:756) INFO: 39epoch:train:9301-9400batch: iter_time=8.584e-05, forward_time=0.188, loss_ctc=67.488, loss_interctc_layer6=77.339, loss_interctc_layer12=63.946, loss_interctc_layer15=58.690, loss_interctc_layer21=69.815, loss=67.456, backward_time=0.223, grad_norm=86.198, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.139, optim0_lr0=6.437e-05, train_time=1.308 +[gpua006:0/64] 2024-02-14 01:46:01,479 (trainer:756) INFO: 39epoch:train:9401-9500batch: iter_time=8.723e-05, forward_time=0.142, loss_ctc=76.985, loss_interctc_layer6=79.557, loss_interctc_layer12=66.300, loss_interctc_layer15=60.842, loss_interctc_layer21=79.845, loss=72.706, backward_time=0.206, grad_norm=86.320, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.138, optim0_lr0=6.437e-05, train_time=1.369 +[gpua006:0/64] 2024-02-14 01:48:14,896 (trainer:756) INFO: 39epoch:train:9501-9600batch: iter_time=8.390e-05, forward_time=0.141, loss_ctc=62.853, loss_interctc_layer6=72.489, loss_interctc_layer12=59.933, loss_interctc_layer15=54.757, loss_interctc_layer21=64.964, loss=62.999, backward_time=0.207, grad_norm=82.540, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.138, optim0_lr0=6.436e-05, train_time=1.335 +[gpua006:0/64] 2024-02-14 01:50:25,002 (trainer:756) INFO: 39epoch:train:9601-9700batch: iter_time=8.555e-05, forward_time=0.163, loss_ctc=79.884, loss_interctc_layer6=78.538, loss_interctc_layer12=64.668, loss_interctc_layer15=59.183, loss_interctc_layer21=83.079, loss=73.071, backward_time=0.217, grad_norm=72.938, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.140, optim0_lr0=6.435e-05, train_time=1.301 +[gpua006:0/64] 2024-02-14 01:52:38,177 (trainer:756) INFO: 39epoch:train:9701-9800batch: iter_time=7.622e-05, forward_time=0.153, loss_ctc=64.405, loss_interctc_layer6=73.968, loss_interctc_layer12=61.040, loss_interctc_layer15=55.752, loss_interctc_layer21=66.798, loss=64.393, backward_time=0.208, grad_norm=64.187, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.140, optim0_lr0=6.435e-05, train_time=1.331 +[gpua006:0/64] 2024-02-14 01:54:31,602 (trainer:756) INFO: 39epoch:train:9801-9900batch: iter_time=2.285e-04, forward_time=0.162, loss_ctc=58.187, loss_interctc_layer6=68.044, loss_interctc_layer12=55.792, loss_interctc_layer15=50.785, loss_interctc_layer21=60.286, loss=58.618, backward_time=0.221, grad_norm=91.813, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.138, optim0_lr0=6.434e-05, train_time=1.135 +[gpua006:0/64] 2024-02-14 01:56:24,681 (trainer:756) INFO: 39epoch:train:9901-10000batch: iter_time=7.896e-05, forward_time=0.143, loss_ctc=88.712, loss_interctc_layer6=88.243, loss_interctc_layer12=72.777, loss_interctc_layer15=66.557, loss_interctc_layer21=91.841, loss=81.626, backward_time=0.209, grad_norm=77.046, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.138, optim0_lr0=6.434e-05, train_time=1.131 +[gpua006:0/64] 2024-02-14 01:56:44,711 (multiple_iter_factory:32) INFO: Building 8th iter-factory... +[gpua006:0/64] 2024-02-14 01:57:03,700 (s2t:401) INFO: Optional Data Names: ('text_prev', 'text_ctc', 'text_spk2', 'text_spk3', 'text_spk4') +[gpua006:0/64] 2024-02-14 01:57:07,125 (abs_task:1660) INFO: [train] dataset: +ESPnetDataset( + speech: {"path": "exp/s2t_stats_raw_bpe50000/splits12/wav.scp/split.1", "type": "kaldi_ark"} + text_prev: {"path": "exp/s2t_stats_raw_bpe50000/splits12/text.prev/split.1", "type": "text"} + text_ctc: {"path": "exp/s2t_stats_raw_bpe50000/splits12/text.ctc/split.1", "type": "text"} + text: {"path": "exp/s2t_stats_raw_bpe50000/splits12/text/split.1", "type": "text"} + preprocess: ) +[gpua006:0/64] 2024-02-14 01:57:07,125 (abs_task:1661) INFO: [train] Batch sampler: UnsortedBatchSampler(N-batch=19027, batch_size=256, key_file=exp/s2t_stats_raw_bpe50000/splits12/speech_shape/split.1, +[gpua006:0/64] 2024-02-14 01:57:07,194 (abs_task:1662) INFO: [train] mini-batch sizes summary: N-batch=19027, mean=256.0, min=256, max=257 +[gpua006:0/64] 2024-02-14 02:03:03,814 (trainer:756) INFO: 39epoch:train:10001-10100batch: iter_time=2.867, forward_time=0.144, loss_ctc=70.883, loss_interctc_layer6=84.386, loss_interctc_layer12=69.685, loss_interctc_layer15=63.910, loss_interctc_layer21=73.441, loss=72.461, backward_time=0.207, grad_norm=79.481, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.138, optim0_lr0=6.433e-05, train_time=3.991 +[gpua006:0/64] 2024-02-14 02:04:45,310 (trainer:756) INFO: 39epoch:train:10101-10200batch: iter_time=8.476e-05, forward_time=0.143, loss_ctc=74.460, loss_interctc_layer6=83.543, loss_interctc_layer12=69.213, loss_interctc_layer15=63.353, loss_interctc_layer21=77.731, loss=73.660, backward_time=0.207, grad_norm=85.480, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.138, optim0_lr0=6.433e-05, train_time=1.015 +[gpua006:0/64] 2024-02-14 02:07:35,620 (trainer:756) INFO: 39epoch:train:10201-10300batch: iter_time=9.571e-05, forward_time=0.169, loss_ctc=69.943, loss_interctc_layer6=81.077, loss_interctc_layer12=66.879, loss_interctc_layer15=61.086, loss_interctc_layer21=72.432, loss=70.283, backward_time=0.252, grad_norm=111.761, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.142, optim0_lr0=6.432e-05, train_time=1.703 +[gpua006:0/64] 2024-02-14 02:09:34,836 (trainer:756) INFO: 39epoch:train:10301-10400batch: iter_time=8.912e-05, forward_time=0.143, loss_ctc=71.708, loss_interctc_layer6=73.051, loss_interctc_layer12=60.505, loss_interctc_layer15=55.474, loss_interctc_layer21=74.623, loss=67.072, backward_time=0.208, grad_norm=65.295, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.138, optim0_lr0=6.432e-05, train_time=1.191 +[gpua006:0/64] 2024-02-14 02:11:43,063 (trainer:756) INFO: 39epoch:train:10401-10500batch: iter_time=8.956e-05, forward_time=0.142, loss_ctc=65.670, loss_interctc_layer6=72.507, loss_interctc_layer12=59.450, loss_interctc_layer15=54.063, loss_interctc_layer21=68.318, loss=64.002, backward_time=0.207, grad_norm=96.943, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.137, optim0_lr0=6.431e-05, train_time=1.283 +[gpua006:0/64] 2024-02-14 02:13:59,576 (trainer:756) INFO: 39epoch:train:10501-10600batch: iter_time=1.029e-04, forward_time=0.165, loss_ctc=84.243, loss_interctc_layer6=89.436, loss_interctc_layer12=74.124, loss_interctc_layer15=67.959, loss_interctc_layer21=87.353, loss=80.623, backward_time=0.213, grad_norm=80.485, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.139, optim0_lr0=6.430e-05, train_time=1.365 +[gpua006:0/64] 2024-02-14 02:16:25,337 (trainer:756) INFO: 39epoch:train:10601-10700batch: iter_time=9.851e-05, forward_time=0.150, loss_ctc=63.965, loss_interctc_layer6=75.839, loss_interctc_layer12=63.031, loss_interctc_layer15=57.918, loss_interctc_layer21=66.495, loss=65.450, backward_time=0.212, grad_norm=68.544, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.140, optim0_lr0=6.430e-05, train_time=1.457 +[gpua006:0/64] 2024-02-14 02:18:59,556 (trainer:756) INFO: 39epoch:train:10701-10800batch: iter_time=9.911e-05, forward_time=0.176, loss_ctc=70.142, loss_interctc_layer6=79.571, loss_interctc_layer12=65.851, loss_interctc_layer15=60.137, loss_interctc_layer21=72.894, loss=69.719, backward_time=0.214, grad_norm=78.833, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.139, optim0_lr0=6.429e-05, train_time=1.542 +[gpua006:0/64] 2024-02-14 02:21:22,167 (trainer:756) INFO: 39epoch:train:10801-10900batch: iter_time=9.356e-05, forward_time=0.143, loss_ctc=67.561, loss_interctc_layer6=74.831, loss_interctc_layer12=61.821, loss_interctc_layer15=56.495, loss_interctc_layer21=70.011, loss=66.144, backward_time=0.208, grad_norm=138.696, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.138, optim0_lr0=6.429e-05, train_time=1.426 +[gpua006:0/64] 2024-02-14 02:23:43,661 (trainer:756) INFO: 39epoch:train:10901-11000batch: iter_time=8.706e-05, forward_time=0.142, loss_ctc=65.917, loss_interctc_layer6=79.253, loss_interctc_layer12=65.432, loss_interctc_layer15=59.828, loss_interctc_layer21=68.384, loss=67.763, backward_time=0.207, grad_norm=108.311, clip=100.000, loss_scale=2.089e+31, optim_step_time=0.137, optim0_lr0=6.428e-05, train_time=1.415 +[gpua006:0/64] 2024-02-14 02:26:03,897 (trainer:756) INFO: 39epoch:train:11001-11100batch: iter_time=9.362e-05, forward_time=0.142, loss_ctc=56.485, loss_interctc_layer6=69.293, loss_interctc_layer12=56.998, loss_interctc_layer15=52.053, loss_interctc_layer21=58.531, loss=58.672, backward_time=0.206, grad_norm=64.073, clip=100.000, loss_scale=4.056e+31, optim_step_time=0.137, optim0_lr0=6.428e-05, train_time=1.402 +[gpua006:0/64] 2024-02-14 02:27:54,533 (trainer:756) INFO: 39epoch:train:11101-11200batch: iter_time=8.802e-05, forward_time=0.144, loss_ctc=66.443, loss_interctc_layer6=74.319, loss_interctc_layer12=60.879, loss_interctc_layer15=55.428, loss_interctc_layer21=68.886, loss=65.191, backward_time=0.208, grad_norm=89.109, clip=100.000, loss_scale=4.056e+31, optim_step_time=0.137, optim0_lr0=6.427e-05, train_time=1.106 +[gpua006:0/64] 2024-02-14 02:29:19,208 (multiple_iter_factory:32) INFO: Building 9th iter-factory... +[gpua006:0/64] 2024-02-14 02:29:37,950 (s2t:401) INFO: Optional Data Names: ('text_prev', 'text_ctc', 'text_spk2', 'text_spk3', 'text_spk4') +[gpua006:0/64] 2024-02-14 02:29:41,462 (abs_task:1660) INFO: [train] dataset: +ESPnetDataset( + speech: {"path": "exp/s2t_stats_raw_bpe50000/splits12/wav.scp/split.2", "type": "kaldi_ark"} + text_prev: {"path": "exp/s2t_stats_raw_bpe50000/splits12/text.prev/split.2", "type": "text"} + text_ctc: {"path": "exp/s2t_stats_raw_bpe50000/splits12/text.ctc/split.2", "type": "text"} + text: {"path": "exp/s2t_stats_raw_bpe50000/splits12/text/split.2", "type": "text"} + preprocess: ) +[gpua006:0/64] 2024-02-14 02:29:41,463 (abs_task:1661) INFO: [train] Batch sampler: UnsortedBatchSampler(N-batch=19027, batch_size=256, key_file=exp/s2t_stats_raw_bpe50000/splits12/speech_shape/split.2, +[gpua006:0/64] 2024-02-14 02:29:41,466 (abs_task:1662) INFO: [train] mini-batch sizes summary: N-batch=19027, mean=256.0, min=256, max=257 +[gpua006:0/64] 2024-02-14 02:34:59,337 (trainer:756) INFO: 39epoch:train:11201-11300batch: iter_time=2.881, forward_time=0.183, loss_ctc=74.207, loss_interctc_layer6=88.388, loss_interctc_layer12=73.092, loss_interctc_layer15=67.449, loss_interctc_layer21=76.870, loss=76.001, backward_time=0.230, grad_norm=92.967, clip=100.000, loss_scale=4.056e+31, optim_step_time=0.139, optim0_lr0=6.427e-05, train_time=4.247 +[gpua006:0/64] 2024-02-14 02:36:33,561 (trainer:756) INFO: 39epoch:train:11301-11400batch: iter_time=9.535e-05, forward_time=0.144, loss_ctc=84.986, loss_interctc_layer6=83.667, loss_interctc_layer12=68.877, loss_interctc_layer15=62.919, loss_interctc_layer21=88.446, loss=77.779, backward_time=0.210, grad_norm=92.890, clip=100.000, loss_scale=4.056e+31, optim_step_time=0.139, optim0_lr0=6.426e-05, train_time=0.942 +[gpua006:0/64] 2024-02-14 02:38:19,612 (trainer:756) INFO: 39epoch:train:11401-11500batch: iter_time=3.253e-04, forward_time=0.163, loss_ctc=73.196, loss_interctc_layer6=81.595, loss_interctc_layer12=67.512, loss_interctc_layer15=61.917, loss_interctc_layer21=75.868, loss=72.018, backward_time=0.216, grad_norm=79.846, clip=100.000, loss_scale=4.056e+31, optim_step_time=0.141, optim0_lr0=6.425e-05, train_time=1.060 +[gpua006:0/64] 2024-02-14 02:40:53,961 (trainer:756) INFO: 39epoch:train:11501-11600batch: iter_time=9.469e-05, forward_time=0.144, loss_ctc=81.248, loss_interctc_layer6=78.719, loss_interctc_layer12=65.045, loss_interctc_layer15=59.411, loss_interctc_layer21=84.433, loss=73.771, backward_time=0.208, grad_norm=101.345, clip=100.000, loss_scale=4.056e+31, optim_step_time=0.139, optim0_lr0=6.425e-05, train_time=1.544 +[gpua006:0/64] 2024-02-14 02:43:03,515 (trainer:756) INFO: 39epoch:train:11601-11700batch: iter_time=1.032e-04, forward_time=0.142, loss_ctc=62.172, loss_interctc_layer6=64.737, loss_interctc_layer12=52.964, loss_interctc_layer15=48.303, loss_interctc_layer21=64.776, loss=58.590, backward_time=0.207, grad_norm=67.512, clip=100.000, loss_scale=4.056e+31, optim_step_time=0.139, optim0_lr0=6.424e-05, train_time=1.295 +[gpua006:0/64] 2024-02-14 02:43:53,311 (trainer:687) WARNING: The grad norm is nan. Skipping updating the model. +[gpua006:0/64] 2024-02-14 02:45:06,929 (trainer:756) INFO: 39epoch:train:11701-11800batch: iter_time=9.886e-05, forward_time=0.145, loss_ctc=94.586, loss_interctc_layer6=91.196, loss_interctc_layer12=75.301, loss_interctc_layer15=68.952, loss_interctc_layer21=98.038, loss=85.615, backward_time=0.210, grad_norm=68.736, clip=100.000, loss_scale=3.053e+31, optim_step_time=0.139, optim0_lr0=6.424e-05, train_time=1.234 +[gpua006:0/64] 2024-02-14 02:46:53,583 (trainer:756) INFO: 39epoch:train:11801-11900batch: iter_time=9.078e-05, forward_time=0.143, loss_ctc=68.034, loss_interctc_layer6=77.955, loss_interctc_layer12=64.410, loss_interctc_layer15=58.985, loss_interctc_layer21=70.418, loss=67.960, backward_time=0.209, grad_norm=76.048, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.139, optim0_lr0=6.423e-05, train_time=1.066 +[gpua006:0/64] 2024-02-14 02:48:45,007 (trainer:756) INFO: 39epoch:train:11901-12000batch: iter_time=9.437e-05, forward_time=0.143, loss_ctc=76.241, loss_interctc_layer6=79.056, loss_interctc_layer12=65.660, loss_interctc_layer15=60.211, loss_interctc_layer21=79.106, loss=72.055, backward_time=0.208, grad_norm=80.283, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.139, optim0_lr0=6.423e-05, train_time=1.114 +[gpua006:0/64] 2024-02-14 02:51:05,187 (trainer:756) INFO: 39epoch:train:12001-12100batch: iter_time=1.026e-04, forward_time=0.144, loss_ctc=62.842, loss_interctc_layer6=72.116, loss_interctc_layer12=59.632, loss_interctc_layer15=54.499, loss_interctc_layer21=64.902, loss=62.798, backward_time=0.210, grad_norm=125.016, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.139, optim0_lr0=6.422e-05, train_time=1.402 +[gpua006:0/64] 2024-02-14 02:53:15,729 (trainer:687) WARNING: The grad norm is nan. Skipping updating the model. +[gpua006:0/64] 2024-02-14 02:53:15,740 (trainer:756) INFO: 39epoch:train:12101-12200batch: iter_time=9.824e-05, forward_time=0.188, loss_ctc=80.466, loss_interctc_layer6=78.628, loss_interctc_layer12=64.791, loss_interctc_layer15=59.221, loss_interctc_layer21=83.871, loss=73.395, backward_time=0.217, grad_norm=98.099, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.140, optim0_lr0=6.422e-05, train_time=1.305 +[gpua006:0/64] 2024-02-14 02:56:03,389 (trainer:756) INFO: 39epoch:train:12201-12300batch: iter_time=9.038e-05, forward_time=0.143, loss_ctc=63.571, loss_interctc_layer6=73.479, loss_interctc_layer12=60.582, loss_interctc_layer15=55.191, loss_interctc_layer21=65.642, loss=63.693, backward_time=0.208, grad_norm=81.144, clip=100.000, loss_scale=1.014e+31, optim_step_time=0.138, optim0_lr0=6.421e-05, train_time=1.676 +[gpua006:0/64] 2024-02-14 02:58:20,147 (trainer:756) INFO: 39epoch:train:12301-12400batch: iter_time=9.801e-05, forward_time=0.166, loss_ctc=57.708, loss_interctc_layer6=67.501, loss_interctc_layer12=55.297, loss_interctc_layer15=50.287, loss_interctc_layer21=59.761, loss=58.111, backward_time=0.221, grad_norm=68.945, clip=100.000, loss_scale=1.014e+31, optim_step_time=0.140, optim0_lr0=6.420e-05, train_time=1.367 +[gpua006:0/64] 2024-02-14 03:00:27,562 (trainer:756) INFO: 39epoch:train:12401-12500batch: iter_time=9.765e-05, forward_time=0.145, loss_ctc=88.126, loss_interctc_layer6=87.794, loss_interctc_layer12=72.407, loss_interctc_layer15=66.097, loss_interctc_layer21=91.309, loss=81.147, backward_time=0.208, grad_norm=77.204, clip=100.000, loss_scale=1.014e+31, optim_step_time=0.139, optim0_lr0=6.420e-05, train_time=1.274 +[gpua006:0/64] 2024-02-14 03:00:47,654 (multiple_iter_factory:32) INFO: Building 10th iter-factory... +[gpua006:0/64] 2024-02-14 03:01:06,233 (s2t:401) INFO: Optional Data Names: ('text_prev', 'text_ctc', 'text_spk2', 'text_spk3', 'text_spk4') +[gpua006:0/64] 2024-02-14 03:01:09,656 (abs_task:1660) INFO: [train] dataset: +ESPnetDataset( + speech: {"path": "exp/s2t_stats_raw_bpe50000/splits12/wav.scp/split.0", "type": "kaldi_ark"} + text_prev: {"path": "exp/s2t_stats_raw_bpe50000/splits12/text.prev/split.0", "type": "text"} + text_ctc: {"path": "exp/s2t_stats_raw_bpe50000/splits12/text.ctc/split.0", "type": "text"} + text: {"path": "exp/s2t_stats_raw_bpe50000/splits12/text/split.0", "type": "text"} + preprocess: ) +[gpua006:0/64] 2024-02-14 03:01:09,656 (abs_task:1661) INFO: [train] Batch sampler: UnsortedBatchSampler(N-batch=19027, batch_size=256, key_file=exp/s2t_stats_raw_bpe50000/splits12/speech_shape/split.0, +[gpua006:0/64] 2024-02-14 03:01:09,660 (abs_task:1662) INFO: [train] mini-batch sizes summary: N-batch=19027, mean=256.0, min=256, max=257 +[gpua006:0/64] 2024-02-14 03:07:20,408 (trainer:756) INFO: 39epoch:train:12501-12600batch: iter_time=2.986, forward_time=0.162, loss_ctc=74.334, loss_interctc_layer6=83.915, loss_interctc_layer12=69.405, loss_interctc_layer15=63.425, loss_interctc_layer21=76.865, loss=73.589, backward_time=0.213, grad_norm=96.530, clip=100.000, loss_scale=1.014e+31, optim_step_time=0.138, optim0_lr0=6.419e-05, train_time=4.128 +[gpua006:0/64] 2024-02-14 03:09:14,443 (trainer:756) INFO: 39epoch:train:12601-12700batch: iter_time=9.082e-05, forward_time=0.143, loss_ctc=84.099, loss_interctc_layer6=82.704, loss_interctc_layer12=68.406, loss_interctc_layer15=62.597, loss_interctc_layer21=87.362, loss=77.034, backward_time=0.209, grad_norm=92.965, clip=100.000, loss_scale=1.014e+31, optim_step_time=0.138, optim0_lr0=6.419e-05, train_time=1.141 +[gpua006:0/64] 2024-02-14 03:12:05,744 (trainer:756) INFO: 39epoch:train:12701-12800batch: iter_time=9.711e-05, forward_time=0.146, loss_ctc=73.270, loss_interctc_layer6=80.449, loss_interctc_layer12=66.451, loss_interctc_layer15=60.701, loss_interctc_layer21=75.861, loss=71.346, backward_time=0.208, grad_norm=68.923, clip=100.000, loss_scale=1.014e+31, optim_step_time=0.138, optim0_lr0=6.418e-05, train_time=1.713 +[gpua006:0/64] 2024-02-14 03:13:58,169 (trainer:756) INFO: 39epoch:train:12801-12900batch: iter_time=9.852e-05, forward_time=0.142, loss_ctc=73.938, loss_interctc_layer6=72.891, loss_interctc_layer12=60.181, loss_interctc_layer15=55.195, loss_interctc_layer21=76.932, loss=67.827, backward_time=0.208, grad_norm=96.291, clip=100.000, loss_scale=1.014e+31, optim_step_time=0.138, optim0_lr0=6.418e-05, train_time=1.124 +[gpua006:0/64] 2024-02-14 03:15:59,839 (trainer:756) INFO: 39epoch:train:12901-13000batch: iter_time=9.492e-05, forward_time=0.178, loss_ctc=71.691, loss_interctc_layer6=72.090, loss_interctc_layer12=59.071, loss_interctc_layer15=53.750, loss_interctc_layer21=74.549, loss=66.230, backward_time=0.226, grad_norm=68.300, clip=100.000, loss_scale=1.014e+31, optim_step_time=0.139, optim0_lr0=6.417e-05, train_time=1.216 +[gpua006:0/64] 2024-02-14 03:18:15,252 (trainer:756) INFO: 39epoch:train:13001-13100batch: iter_time=9.342e-05, forward_time=0.144, loss_ctc=89.361, loss_interctc_layer6=89.485, loss_interctc_layer12=73.923, loss_interctc_layer15=67.699, loss_interctc_layer21=92.649, loss=82.623, backward_time=0.207, grad_norm=69.692, clip=100.000, loss_scale=1.014e+31, optim_step_time=0.138, optim0_lr0=6.417e-05, train_time=1.354 +[gpua006:0/64] 2024-02-14 03:20:49,410 (trainer:756) INFO: 39epoch:train:13101-13200batch: iter_time=9.930e-05, forward_time=0.142, loss_ctc=65.811, loss_interctc_layer6=76.117, loss_interctc_layer12=63.198, loss_interctc_layer15=57.943, loss_interctc_layer21=68.280, loss=66.270, backward_time=0.208, grad_norm=150.777, clip=100.000, loss_scale=1.014e+31, optim_step_time=0.138, optim0_lr0=6.416e-05, train_time=1.541 +[gpua006:0/64] 2024-02-14 03:23:05,447 (trainer:756) INFO: 39epoch:train:13201-13300batch: iter_time=3.744e-04, forward_time=0.164, loss_ctc=76.007, loss_interctc_layer6=80.329, loss_interctc_layer12=66.413, loss_interctc_layer15=60.691, loss_interctc_layer21=78.778, loss=72.444, backward_time=0.220, grad_norm=79.213, clip=100.000, loss_scale=1.014e+31, optim_step_time=0.139, optim0_lr0=6.416e-05, train_time=1.360 +[gpua006:0/64] 2024-02-14 03:25:39,745 (trainer:756) INFO: 39epoch:train:13301-13400batch: iter_time=1.043e-04, forward_time=0.143, loss_ctc=70.609, loss_interctc_layer6=74.398, loss_interctc_layer12=61.386, loss_interctc_layer15=56.061, loss_interctc_layer21=73.189, loss=67.128, backward_time=0.207, grad_norm=70.867, clip=100.000, loss_scale=1.014e+31, optim_step_time=0.137, optim0_lr0=6.415e-05, train_time=1.542 +[gpua006:0/64] 2024-02-14 03:27:34,470 (trainer:756) INFO: 39epoch:train:13401-13500batch: iter_time=1.007e-04, forward_time=0.150, loss_ctc=71.724, loss_interctc_layer6=79.575, loss_interctc_layer12=65.722, loss_interctc_layer15=60.162, loss_interctc_layer21=74.435, loss=70.324, backward_time=0.226, grad_norm=174.517, clip=100.000, loss_scale=1.014e+31, optim_step_time=0.138, optim0_lr0=6.414e-05, train_time=1.147 +[gpua006:0/64] 2024-02-14 03:29:40,725 (trainer:756) INFO: 39epoch:train:13501-13600batch: iter_time=9.909e-05, forward_time=0.143, loss_ctc=60.241, loss_interctc_layer6=69.508, loss_interctc_layer12=57.149, loss_interctc_layer15=52.150, loss_interctc_layer21=62.448, loss=60.299, backward_time=0.208, grad_norm=65.555, clip=100.000, loss_scale=1.014e+31, optim_step_time=0.137, optim0_lr0=6.414e-05, train_time=1.263 +[gpua006:0/64] 2024-02-14 03:31:39,612 (trainer:756) INFO: 39epoch:train:13601-13700batch: iter_time=1.043e-04, forward_time=0.143, loss_ctc=71.651, loss_interctc_layer6=74.381, loss_interctc_layer12=61.026, loss_interctc_layer15=55.499, loss_interctc_layer21=74.376, loss=67.387, backward_time=0.208, grad_norm=65.770, clip=100.000, loss_scale=1.014e+31, optim_step_time=0.137, optim0_lr0=6.413e-05, train_time=1.189 +[gpua006:0/64] 2024-02-14 03:33:09,274 (multiple_iter_factory:32) INFO: Building 11th iter-factory... +[gpua006:0/64] 2024-02-14 03:33:28,208 (s2t:401) INFO: Optional Data Names: ('text_prev', 'text_ctc', 'text_spk2', 'text_spk3', 'text_spk4') +[gpua006:0/64] 2024-02-14 03:33:31,651 (abs_task:1660) INFO: [train] dataset: +ESPnetDataset( + speech: {"path": "exp/s2t_stats_raw_bpe50000/splits12/wav.scp/split.7", "type": "kaldi_ark"} + text_prev: {"path": "exp/s2t_stats_raw_bpe50000/splits12/text.prev/split.7", "type": "text"} + text_ctc: {"path": "exp/s2t_stats_raw_bpe50000/splits12/text.ctc/split.7", "type": "text"} + text: {"path": "exp/s2t_stats_raw_bpe50000/splits12/text/split.7", "type": "text"} + preprocess: ) +[gpua006:0/64] 2024-02-14 03:33:31,651 (abs_task:1661) INFO: [train] Batch sampler: UnsortedBatchSampler(N-batch=19027, batch_size=256, key_file=exp/s2t_stats_raw_bpe50000/splits12/speech_shape/split.7, +[gpua006:0/64] 2024-02-14 03:33:31,684 (abs_task:1662) INFO: [train] mini-batch sizes summary: N-batch=19027, mean=256.0, min=256, max=257 +[gpua006:0/64] 2024-02-14 03:38:35,148 (trainer:756) INFO: 39epoch:train:13701-13800batch: iter_time=2.878, forward_time=0.167, loss_ctc=74.132, loss_interctc_layer6=88.862, loss_interctc_layer12=73.257, loss_interctc_layer15=67.129, loss_interctc_layer21=76.543, loss=75.985, backward_time=0.211, grad_norm=80.849, clip=100.000, loss_scale=1.014e+31, optim_step_time=0.138, optim0_lr0=6.413e-05, train_time=4.155 +[gpua006:0/64] 2024-02-14 03:40:09,469 (trainer:756) INFO: 39epoch:train:13801-13900batch: iter_time=9.090e-05, forward_time=0.143, loss_ctc=78.244, loss_interctc_layer6=83.341, loss_interctc_layer12=68.530, loss_interctc_layer15=62.574, loss_interctc_layer21=81.256, loss=74.789, backward_time=0.209, grad_norm=70.647, clip=100.000, loss_scale=1.014e+31, optim_step_time=0.138, optim0_lr0=6.412e-05, train_time=0.943 +[gpua006:0/64] 2024-02-14 03:42:10,086 (trainer:756) INFO: 39epoch:train:13901-14000batch: iter_time=9.639e-05, forward_time=0.154, loss_ctc=67.078, loss_interctc_layer6=81.522, loss_interctc_layer12=67.618, loss_interctc_layer15=61.984, loss_interctc_layer21=69.663, loss=69.573, backward_time=0.213, grad_norm=77.317, clip=100.000, loss_scale=1.014e+31, optim_step_time=0.138, optim0_lr0=6.412e-05, train_time=1.206 +[gpua006:0/64] 2024-02-14 03:43:54,920 (trainer:756) INFO: 39epoch:train:14001-14100batch: iter_time=9.225e-05, forward_time=0.171, loss_ctc=79.340, loss_interctc_layer6=79.911, loss_interctc_layer12=66.099, loss_interctc_layer15=60.458, loss_interctc_layer21=82.406, loss=73.643, backward_time=0.211, grad_norm=76.605, clip=100.000, loss_scale=1.014e+31, optim_step_time=0.140, optim0_lr0=6.411e-05, train_time=1.048 +[gpua006:0/64] 2024-02-14 03:46:32,506 (trainer:756) INFO: 39epoch:train:14101-14200batch: iter_time=9.966e-05, forward_time=0.142, loss_ctc=57.723, loss_interctc_layer6=64.547, loss_interctc_layer12=52.783, loss_interctc_layer15=48.042, loss_interctc_layer21=60.183, loss=56.656, backward_time=0.207, grad_norm=62.156, clip=100.000, loss_scale=1.014e+31, optim_step_time=0.138, optim0_lr0=6.411e-05, train_time=1.576 +[gpua006:0/64] 2024-02-14 03:48:28,043 (trainer:756) INFO: 39epoch:train:14201-14300batch: iter_time=9.985e-05, forward_time=0.143, loss_ctc=86.940, loss_interctc_layer6=91.466, loss_interctc_layer12=75.652, loss_interctc_layer15=69.194, loss_interctc_layer21=90.142, loss=82.679, backward_time=0.208, grad_norm=69.109, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.138, optim0_lr0=6.410e-05, train_time=1.155 +[gpua006:0/64] 2024-02-14 03:50:24,347 (trainer:756) INFO: 39epoch:train:14301-14400batch: iter_time=9.196e-05, forward_time=0.153, loss_ctc=66.279, loss_interctc_layer6=77.669, loss_interctc_layer12=64.173, loss_interctc_layer15=58.772, loss_interctc_layer21=68.589, loss=67.096, backward_time=0.222, grad_norm=78.406, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.138, optim0_lr0=6.409e-05, train_time=1.162 +[gpua006:0/64] 2024-02-14 03:52:33,835 (trainer:756) INFO: 39epoch:train:14401-14500batch: iter_time=9.417e-05, forward_time=0.142, loss_ctc=70.260, loss_interctc_layer6=79.376, loss_interctc_layer12=65.916, loss_interctc_layer15=60.373, loss_interctc_layer21=72.986, loss=69.782, backward_time=0.206, grad_norm=80.372, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.137, optim0_lr0=6.409e-05, train_time=1.295 +[gpua006:0/64] 2024-02-14 03:55:11,219 (trainer:756) INFO: 39epoch:train:14501-14600batch: iter_time=1.090e-04, forward_time=0.142, loss_ctc=60.652, loss_interctc_layer6=71.567, loss_interctc_layer12=58.984, loss_interctc_layer15=53.847, loss_interctc_layer21=62.879, loss=61.586, backward_time=0.206, grad_norm=96.778, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.138, optim0_lr0=6.408e-05, train_time=1.574 +[gpua006:0/64] 2024-02-14 03:57:33,417 (trainer:756) INFO: 39epoch:train:14601-14700batch: iter_time=1.079e-04, forward_time=0.143, loss_ctc=72.163, loss_interctc_layer6=77.786, loss_interctc_layer12=63.990, loss_interctc_layer15=58.509, loss_interctc_layer21=75.062, loss=69.502, backward_time=0.208, grad_norm=65.512, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.138, optim0_lr0=6.408e-05, train_time=1.422 +[gpua006:0/64] 2024-02-14 03:59:45,224 (trainer:756) INFO: 39epoch:train:14701-14800batch: iter_time=9.508e-05, forward_time=0.160, loss_ctc=61.636, loss_interctc_layer6=73.825, loss_interctc_layer12=60.793, loss_interctc_layer15=55.638, loss_interctc_layer21=63.792, loss=63.137, backward_time=0.241, grad_norm=59.569, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.140, optim0_lr0=6.407e-05, train_time=1.318 +[gpua006:0/64] 2024-02-14 04:02:07,248 (trainer:756) INFO: 39epoch:train:14801-14900batch: iter_time=9.345e-05, forward_time=0.147, loss_ctc=55.150, loss_interctc_layer6=67.687, loss_interctc_layer12=55.452, loss_interctc_layer15=50.420, loss_interctc_layer21=57.085, loss=57.159, backward_time=0.207, grad_norm=120.447, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.138, optim0_lr0=6.407e-05, train_time=1.420 +[gpua006:0/64] 2024-02-14 04:04:07,955 (trainer:756) INFO: 39epoch:train:14901-15000batch: iter_time=1.032e-04, forward_time=0.143, loss_ctc=79.939, loss_interctc_layer6=87.659, loss_interctc_layer12=72.293, loss_interctc_layer15=66.062, loss_interctc_layer21=82.924, loss=77.775, backward_time=0.208, grad_norm=76.807, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.138, optim0_lr0=6.406e-05, train_time=1.207 +[gpua006:0/64] 2024-02-14 04:35:07,411 (trainer:355) INFO: 39epoch results: [train] iter_time=0.229, forward_time=0.151, loss_ctc=72.181, loss_interctc_layer6=78.877, loss_interctc_layer12=65.189, loss_interctc_layer15=59.666, loss_interctc_layer21=74.853, loss=70.153, backward_time=0.212, grad_norm=82.662, clip=100.000, loss_scale=2.779e+31, optim_step_time=0.138, optim0_lr0=6.448e-05, train_time=1.515, time=6 hours, 19 minutes and 15.89 seconds, total_count=585000, gpu_max_cached_mem_GB=33.436, [valid] loss_ctc=41.253, cer_ctc=0.191, loss_interctc_layer6=47.239, cer_interctc_layer6=0.208, loss_interctc_layer12=34.462, cer_interctc_layer12=0.142, loss_interctc_layer15=30.271, cer_interctc_layer15=0.120, loss_interctc_layer21=43.845, cer_interctc_layer21=0.202, loss=39.414, time=30 minutes and 34.82 seconds, total_count=182169, gpu_max_cached_mem_GB=33.436 +[gpua006:0/64] 2024-02-14 04:35:28,834 (trainer:410) INFO: The best model has been updated: valid.total_count +[gpua006:0/64] 2024-02-14 04:35:29,057 (trainer:464) INFO: The model files were removed: exp/s2t_train_s2t_multitask-ctc_ebf27_conv2d8_size1024_raw_bpe50000/34epoch.pth +[gpua006:0/64] 2024-02-14 04:35:29,132 (trainer:289) INFO: 40/45epoch started. Estimated time to finish: 1 day, 19 hours and 18 minutes +[gpua006:0/64] 2024-02-14 04:35:29,917 (multiple_iter_factory:32) INFO: Building 0th iter-factory... +[gpua006:0/64] 2024-02-14 04:35:48,304 (s2t:401) INFO: Optional Data Names: ('text_prev', 'text_ctc', 'text_spk2', 'text_spk3', 'text_spk4') +[gpua006:0/64] 2024-02-14 04:35:51,871 (abs_task:1660) INFO: [train] dataset: +ESPnetDataset( + speech: {"path": "exp/s2t_stats_raw_bpe50000/splits12/wav.scp/split.1", "type": "kaldi_ark"} + text_prev: {"path": "exp/s2t_stats_raw_bpe50000/splits12/text.prev/split.1", "type": "text"} + text_ctc: {"path": "exp/s2t_stats_raw_bpe50000/splits12/text.ctc/split.1", "type": "text"} + text: {"path": "exp/s2t_stats_raw_bpe50000/splits12/text/split.1", "type": "text"} + preprocess: ) +[gpua006:0/64] 2024-02-14 04:35:51,871 (abs_task:1661) INFO: [train] Batch sampler: UnsortedBatchSampler(N-batch=19027, batch_size=256, key_file=exp/s2t_stats_raw_bpe50000/splits12/speech_shape/split.1, +[gpua006:0/64] 2024-02-14 04:35:51,875 (abs_task:1662) INFO: [train] mini-batch sizes summary: N-batch=19027, mean=256.0, min=256, max=257 +[gpua006:0/64] 2024-02-14 04:41:30,529 (trainer:756) INFO: 40epoch:train:1-100batch: iter_time=2.566, forward_time=0.172, loss_ctc=77.103, loss_interctc_layer6=83.825, loss_interctc_layer12=69.372, loss_interctc_layer15=63.547, loss_interctc_layer21=80.367, loss=74.843, backward_time=0.218, grad_norm=137.113, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.139, optim0_lr0=6.406e-05, train_time=3.611 +[gpua006:0/64] 2024-02-14 04:43:09,432 (trainer:756) INFO: 40epoch:train:101-200batch: iter_time=8.391e-05, forward_time=0.143, loss_ctc=85.010, loss_interctc_layer6=89.759, loss_interctc_layer12=74.688, loss_interctc_layer15=68.677, loss_interctc_layer21=88.117, loss=81.250, backward_time=0.209, grad_norm=108.577, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.138, optim0_lr0=6.405e-05, train_time=0.990 +[gpua006:0/64] 2024-02-14 04:44:44,932 (trainer:756) INFO: 40epoch:train:201-300batch: iter_time=1.026e-04, forward_time=0.143, loss_ctc=67.381, loss_interctc_layer6=76.113, loss_interctc_layer12=62.815, loss_interctc_layer15=57.429, loss_interctc_layer21=69.811, loss=66.710, backward_time=0.209, grad_norm=80.219, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.138, optim0_lr0=6.405e-05, train_time=0.955 +[gpua006:0/64] 2024-02-14 04:46:25,958 (trainer:756) INFO: 40epoch:train:301-400batch: iter_time=8.666e-05, forward_time=0.157, loss_ctc=71.854, loss_interctc_layer6=84.110, loss_interctc_layer12=69.255, loss_interctc_layer15=63.259, loss_interctc_layer21=74.384, loss=72.572, backward_time=0.222, grad_norm=74.806, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.139, optim0_lr0=6.404e-05, train_time=1.010 +[gpua006:0/64] 2024-02-14 04:48:20,844 (trainer:756) INFO: 40epoch:train:401-500batch: iter_time=2.762e-04, forward_time=0.196, loss_ctc=75.785, loss_interctc_layer6=84.454, loss_interctc_layer12=70.225, loss_interctc_layer15=64.535, loss_interctc_layer21=78.649, loss=74.730, backward_time=0.234, grad_norm=64.070, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.141, optim0_lr0=6.403e-05, train_time=1.148 +[gpua006:0/64] 2024-02-14 04:50:28,318 (trainer:756) INFO: 40epoch:train:501-600batch: iter_time=8.095e-05, forward_time=0.163, loss_ctc=89.586, loss_interctc_layer6=93.672, loss_interctc_layer12=78.124, loss_interctc_layer15=71.795, loss_interctc_layer21=92.935, loss=85.223, backward_time=0.214, grad_norm=85.728, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.139, optim0_lr0=6.403e-05, train_time=1.275 +[gpua006:0/64] 2024-02-14 04:52:36,686 (trainer:756) INFO: 40epoch:train:601-700batch: iter_time=1.483e-04, forward_time=0.152, loss_ctc=64.916, loss_interctc_layer6=73.572, loss_interctc_layer12=61.455, loss_interctc_layer15=56.610, loss_interctc_layer21=67.353, loss=64.781, backward_time=0.223, grad_norm=71.774, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.138, optim0_lr0=6.402e-05, train_time=1.283 +[gpua006:0/64] 2024-02-14 04:54:27,613 (trainer:756) INFO: 40epoch:train:701-800batch: iter_time=8.337e-05, forward_time=0.143, loss_ctc=65.202, loss_interctc_layer6=80.298, loss_interctc_layer12=66.575, loss_interctc_layer15=61.226, loss_interctc_layer21=67.635, loss=68.187, backward_time=0.209, grad_norm=85.057, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.138, optim0_lr0=6.402e-05, train_time=1.109 +[gpua006:0/64] 2024-02-14 04:56:15,372 (trainer:756) INFO: 40epoch:train:801-900batch: iter_time=8.344e-05, forward_time=0.146, loss_ctc=84.904, loss_interctc_layer6=93.311, loss_interctc_layer12=77.154, loss_interctc_layer15=70.603, loss_interctc_layer21=88.125, loss=82.819, backward_time=0.209, grad_norm=86.326, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.138, optim0_lr0=6.401e-05, train_time=1.077 +[gpua006:0/64] 2024-02-14 04:58:17,135 (trainer:756) INFO: 40epoch:train:901-1000batch: iter_time=8.389e-05, forward_time=0.165, loss_ctc=73.979, loss_interctc_layer6=81.551, loss_interctc_layer12=67.975, loss_interctc_layer15=62.522, loss_interctc_layer21=76.531, loss=72.511, backward_time=0.218, grad_norm=79.059, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.140, optim0_lr0=6.401e-05, train_time=1.217 +[gpua006:0/64] 2024-02-14 05:00:29,015 (trainer:756) INFO: 40epoch:train:1001-1100batch: iter_time=7.900e-05, forward_time=0.195, loss_ctc=70.395, loss_interctc_layer6=73.158, loss_interctc_layer12=60.891, loss_interctc_layer15=55.876, loss_interctc_layer21=73.278, loss=66.720, backward_time=0.216, grad_norm=70.347, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.139, optim0_lr0=6.400e-05, train_time=1.318 +[gpua006:0/64] 2024-02-14 05:02:45,340 (trainer:756) INFO: 40epoch:train:1101-1200batch: iter_time=8.520e-05, forward_time=0.162, loss_ctc=96.019, loss_interctc_layer6=97.991, loss_interctc_layer12=80.941, loss_interctc_layer15=74.117, loss_interctc_layer21=99.723, loss=89.758, backward_time=0.223, grad_norm=92.779, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.139, optim0_lr0=6.400e-05, train_time=1.364 +[gpua006:0/64] 2024-02-14 05:04:00,257 (multiple_iter_factory:32) INFO: Building 1th iter-factory... +[gpua006:0/64] 2024-02-14 05:04:19,024 (s2t:401) INFO: Optional Data Names: ('text_prev', 'text_ctc', 'text_spk2', 'text_spk3', 'text_spk4') +[gpua006:0/64] 2024-02-14 05:04:22,442 (abs_task:1660) INFO: [train] dataset: +ESPnetDataset( + speech: {"path": "exp/s2t_stats_raw_bpe50000/splits12/wav.scp/split.3", "type": "kaldi_ark"} + text_prev: {"path": "exp/s2t_stats_raw_bpe50000/splits12/text.prev/split.3", "type": "text"} + text_ctc: {"path": "exp/s2t_stats_raw_bpe50000/splits12/text.ctc/split.3", "type": "text"} + text: {"path": "exp/s2t_stats_raw_bpe50000/splits12/text/split.3", "type": "text"} + preprocess: ) +[gpua006:0/64] 2024-02-14 05:04:22,442 (abs_task:1661) INFO: [train] Batch sampler: UnsortedBatchSampler(N-batch=19027, batch_size=256, key_file=exp/s2t_stats_raw_bpe50000/splits12/speech_shape/split.3, +[gpua006:0/64] 2024-02-14 05:04:22,535 (abs_task:1662) INFO: [train] mini-batch sizes summary: N-batch=19027, mean=256.0, min=256, max=257 +[gpua006:0/64] 2024-02-14 05:09:48,367 (trainer:756) INFO: 40epoch:train:1201-1300batch: iter_time=2.467, forward_time=0.144, loss_ctc=83.195, loss_interctc_layer6=91.065, loss_interctc_layer12=75.556, loss_interctc_layer15=69.290, loss_interctc_layer21=86.054, loss=81.032, backward_time=0.206, grad_norm=79.402, clip=100.000, loss_scale=4.056e+31, optim_step_time=0.138, optim0_lr0=6.399e-05, train_time=4.230 +[gpua006:0/64] 2024-02-14 05:11:23,372 (trainer:756) INFO: 40epoch:train:1301-1400batch: iter_time=1.105e-04, forward_time=0.143, loss_ctc=74.945, loss_interctc_layer6=80.882, loss_interctc_layer12=67.235, loss_interctc_layer15=61.693, loss_interctc_layer21=77.717, loss=72.494, backward_time=0.207, grad_norm=121.910, clip=100.000, loss_scale=4.056e+31, optim_step_time=0.138, optim0_lr0=6.399e-05, train_time=0.950 +[gpua006:0/64] 2024-02-14 05:12:59,945 (trainer:756) INFO: 40epoch:train:1401-1500batch: iter_time=1.045e-04, forward_time=0.143, loss_ctc=77.712, loss_interctc_layer6=80.933, loss_interctc_layer12=66.720, loss_interctc_layer15=61.099, loss_interctc_layer21=80.604, loss=73.414, backward_time=0.208, grad_norm=80.338, clip=100.000, loss_scale=4.056e+31, optim_step_time=0.138, optim0_lr0=6.398e-05, train_time=0.966 +[gpua006:0/64] 2024-02-14 05:14:41,969 (trainer:756) INFO: 40epoch:train:1501-1600batch: iter_time=1.050e-04, forward_time=0.143, loss_ctc=63.259, loss_interctc_layer6=76.436, loss_interctc_layer12=62.988, loss_interctc_layer15=57.584, loss_interctc_layer21=65.470, loss=65.147, backward_time=0.207, grad_norm=74.556, clip=100.000, loss_scale=4.056e+31, optim_step_time=0.138, optim0_lr0=6.397e-05, train_time=1.020 +[gpua006:0/64] 2024-02-14 05:16:46,869 (trainer:756) INFO: 40epoch:train:1601-1700batch: iter_time=9.653e-05, forward_time=0.144, loss_ctc=77.914, loss_interctc_layer6=87.887, loss_interctc_layer12=72.470, loss_interctc_layer15=66.390, loss_interctc_layer21=80.956, loss=77.123, backward_time=0.208, grad_norm=83.589, clip=100.000, loss_scale=4.056e+31, optim_step_time=0.139, optim0_lr0=6.397e-05, train_time=1.249 +[gpua006:0/64] 2024-02-14 05:18:55,108 (trainer:756) INFO: 40epoch:train:1701-1800batch: iter_time=9.301e-05, forward_time=0.154, loss_ctc=81.232, loss_interctc_layer6=90.590, loss_interctc_layer12=75.450, loss_interctc_layer15=69.439, loss_interctc_layer21=84.072, loss=80.157, backward_time=0.211, grad_norm=74.874, clip=100.000, loss_scale=4.056e+31, optim_step_time=0.139, optim0_lr0=6.396e-05, train_time=1.282 +[gpua006:0/64] 2024-02-14 05:20:21,339 (trainer:687) WARNING: The grad norm is nan. Skipping updating the model. +[gpua006:0/64] 2024-02-14 05:21:09,001 (trainer:756) INFO: 40epoch:train:1801-1900batch: iter_time=8.623e-05, forward_time=0.185, loss_ctc=78.737, loss_interctc_layer6=83.380, loss_interctc_layer12=69.110, loss_interctc_layer15=63.356, loss_interctc_layer21=81.836, loss=75.284, backward_time=0.222, grad_norm=75.942, clip=100.000, loss_scale=3.298e+31, optim_step_time=0.139, optim0_lr0=6.396e-05, train_time=1.339 +[gpua006:0/64] 2024-02-14 05:23:16,497 (trainer:756) INFO: 40epoch:train:1901-2000batch: iter_time=1.997e-04, forward_time=0.180, loss_ctc=54.987, loss_interctc_layer6=67.303, loss_interctc_layer12=56.271, loss_interctc_layer15=51.748, loss_interctc_layer21=56.913, loss=57.444, backward_time=0.240, grad_norm=86.778, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.140, optim0_lr0=6.395e-05, train_time=1.274 +[gpua006:0/64] 2024-02-14 05:25:10,353 (trainer:756) INFO: 40epoch:train:2001-2100batch: iter_time=9.297e-05, forward_time=0.143, loss_ctc=71.842, loss_interctc_layer6=85.401, loss_interctc_layer12=70.696, loss_interctc_layer15=64.791, loss_interctc_layer21=74.601, loss=73.466, backward_time=0.207, grad_norm=76.305, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.138, optim0_lr0=6.395e-05, train_time=1.139 +[gpua006:0/64] 2024-02-14 05:27:07,750 (trainer:756) INFO: 40epoch:train:2101-2200batch: iter_time=1.038e-04, forward_time=0.144, loss_ctc=93.141, loss_interctc_layer6=93.474, loss_interctc_layer12=77.165, loss_interctc_layer15=70.568, loss_interctc_layer21=96.659, loss=86.202, backward_time=0.208, grad_norm=115.576, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.138, optim0_lr0=6.394e-05, train_time=1.174 +[gpua006:0/64] 2024-02-14 05:29:18,135 (trainer:756) INFO: 40epoch:train:2201-2300batch: iter_time=8.762e-05, forward_time=0.142, loss_ctc=67.878, loss_interctc_layer6=77.481, loss_interctc_layer12=64.545, loss_interctc_layer15=59.479, loss_interctc_layer21=70.399, loss=67.956, backward_time=0.205, grad_norm=69.503, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.137, optim0_lr0=6.394e-05, train_time=1.304 +[gpua006:0/64] 2024-02-14 05:31:50,269 (trainer:756) INFO: 40epoch:train:2301-2400batch: iter_time=8.763e-05, forward_time=0.145, loss_ctc=75.506, loss_interctc_layer6=85.957, loss_interctc_layer12=71.419, loss_interctc_layer15=65.571, loss_interctc_layer21=78.439, loss=75.379, backward_time=0.205, grad_norm=70.613, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.138, optim0_lr0=6.393e-05, train_time=1.521 +[gpua006:0/64] 2024-02-14 05:33:51,204 (trainer:756) INFO: 40epoch:train:2401-2500batch: iter_time=9.110e-05, forward_time=0.143, loss_ctc=94.123, loss_interctc_layer6=95.015, loss_interctc_layer12=78.487, loss_interctc_layer15=71.846, loss_interctc_layer21=97.590, loss=87.412, backward_time=0.208, grad_norm=120.333, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.138, optim0_lr0=6.393e-05, train_time=1.209 +[gpua006:0/64] 2024-02-14 05:34:11,230 (multiple_iter_factory:32) INFO: Building 2th iter-factory... +[gpua006:0/64] 2024-02-14 05:34:30,017 (s2t:401) INFO: Optional Data Names: ('text_prev', 'text_ctc', 'text_spk2', 'text_spk3', 'text_spk4') +[gpua006:0/64] 2024-02-14 05:34:33,416 (abs_task:1660) INFO: [train] dataset: +ESPnetDataset( + speech: {"path": "exp/s2t_stats_raw_bpe50000/splits12/wav.scp/split.4", "type": "kaldi_ark"} + text_prev: {"path": "exp/s2t_stats_raw_bpe50000/splits12/text.prev/split.4", "type": "text"} + text_ctc: {"path": "exp/s2t_stats_raw_bpe50000/splits12/text.ctc/split.4", "type": "text"} + text: {"path": "exp/s2t_stats_raw_bpe50000/splits12/text/split.4", "type": "text"} + preprocess: ) +[gpua006:0/64] 2024-02-14 05:34:33,416 (abs_task:1661) INFO: [train] Batch sampler: UnsortedBatchSampler(N-batch=19027, batch_size=256, key_file=exp/s2t_stats_raw_bpe50000/splits12/speech_shape/split.4, +[gpua006:0/64] 2024-02-14 05:34:33,432 (abs_task:1662) INFO: [train] mini-batch sizes summary: N-batch=19027, mean=256.0, min=256, max=257 +[gpua006:0/64] 2024-02-14 05:40:30,372 (trainer:756) INFO: 40epoch:train:2501-2600batch: iter_time=2.851, forward_time=0.144, loss_ctc=80.818, loss_interctc_layer6=82.692, loss_interctc_layer12=68.243, loss_interctc_layer15=62.481, loss_interctc_layer21=83.982, loss=75.643, backward_time=0.208, grad_norm=86.852, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.137, optim0_lr0=6.392e-05, train_time=3.991 +[gpua006:0/64] 2024-02-14 05:42:18,716 (trainer:756) INFO: 40epoch:train:2601-2700batch: iter_time=8.761e-05, forward_time=0.143, loss_ctc=88.285, loss_interctc_layer6=88.778, loss_interctc_layer12=73.489, loss_interctc_layer15=67.374, loss_interctc_layer21=91.526, loss=81.890, backward_time=0.207, grad_norm=129.366, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.138, optim0_lr0=6.391e-05, train_time=1.083 +[gpua006:0/64] 2024-02-14 05:44:31,766 (trainer:756) INFO: 40epoch:train:2701-2800batch: iter_time=8.790e-05, forward_time=0.166, loss_ctc=70.835, loss_interctc_layer6=75.622, loss_interctc_layer12=62.308, loss_interctc_layer15=56.937, loss_interctc_layer21=73.601, loss=67.861, backward_time=0.222, grad_norm=71.983, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.139, optim0_lr0=6.391e-05, train_time=1.330 +[gpua006:0/64] 2024-02-14 05:46:22,757 (trainer:756) INFO: 40epoch:train:2801-2900batch: iter_time=8.796e-05, forward_time=0.202, loss_ctc=76.824, loss_interctc_layer6=83.251, loss_interctc_layer12=68.475, loss_interctc_layer15=62.529, loss_interctc_layer21=79.664, loss=74.149, backward_time=0.231, grad_norm=70.101, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.145, optim0_lr0=6.390e-05, train_time=1.110 +[gpua006:0/64] 2024-02-14 05:48:18,507 (trainer:756) INFO: 40epoch:train:2901-3000batch: iter_time=9.149e-05, forward_time=0.153, loss_ctc=80.370, loss_interctc_layer6=83.427, loss_interctc_layer12=69.353, loss_interctc_layer15=63.801, loss_interctc_layer21=83.488, loss=76.088, backward_time=0.209, grad_norm=80.699, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.138, optim0_lr0=6.390e-05, train_time=1.157 +[gpua006:0/64] 2024-02-14 05:50:23,095 (trainer:756) INFO: 40epoch:train:3001-3100batch: iter_time=9.422e-05, forward_time=0.143, loss_ctc=91.696, loss_interctc_layer6=93.110, loss_interctc_layer12=77.451, loss_interctc_layer15=71.251, loss_interctc_layer21=95.076, loss=85.717, backward_time=0.207, grad_norm=81.031, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.137, optim0_lr0=6.389e-05, train_time=1.247 +[gpua006:0/64] 2024-02-14 05:52:32,787 (trainer:756) INFO: 40epoch:train:3101-3200batch: iter_time=9.222e-05, forward_time=0.142, loss_ctc=67.763, loss_interctc_layer6=72.767, loss_interctc_layer12=60.499, loss_interctc_layer15=55.589, loss_interctc_layer21=70.233, loss=65.370, backward_time=0.205, grad_norm=81.249, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.137, optim0_lr0=6.389e-05, train_time=1.297 +[gpua006:0/64] 2024-02-14 05:54:25,816 (trainer:756) INFO: 40epoch:train:3201-3300batch: iter_time=8.741e-05, forward_time=0.160, loss_ctc=67.458, loss_interctc_layer6=78.730, loss_interctc_layer12=65.056, loss_interctc_layer15=59.753, loss_interctc_layer21=69.863, loss=68.172, backward_time=0.206, grad_norm=75.488, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.137, optim0_lr0=6.388e-05, train_time=1.130 +[gpua006:0/64] 2024-02-14 05:56:33,792 (trainer:756) INFO: 40epoch:train:3301-3400batch: iter_time=9.341e-05, forward_time=0.145, loss_ctc=88.029, loss_interctc_layer6=92.470, loss_interctc_layer12=76.336, loss_interctc_layer15=69.841, loss_interctc_layer21=91.367, loss=83.608, backward_time=0.208, grad_norm=100.298, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.138, optim0_lr0=6.388e-05, train_time=1.280 +[gpua006:0/64] 2024-02-14 05:58:44,460 (trainer:756) INFO: 40epoch:train:3401-3500batch: iter_time=9.776e-05, forward_time=0.145, loss_ctc=75.600, loss_interctc_layer6=81.085, loss_interctc_layer12=67.386, loss_interctc_layer15=61.913, loss_interctc_layer21=78.315, loss=72.860, backward_time=0.207, grad_norm=73.333, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.138, optim0_lr0=6.387e-05, train_time=1.306 +[gpua006:0/64] 2024-02-14 06:00:52,870 (trainer:756) INFO: 40epoch:train:3501-3600batch: iter_time=8.833e-05, forward_time=0.142, loss_ctc=75.725, loss_interctc_layer6=72.622, loss_interctc_layer12=60.213, loss_interctc_layer15=55.159, loss_interctc_layer21=78.919, loss=68.528, backward_time=0.205, grad_norm=59.323, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.138, optim0_lr0=6.387e-05, train_time=1.284 +[gpua006:0/64] 2024-02-14 06:02:43,722 (trainer:756) INFO: 40epoch:train:3601-3700batch: iter_time=8.587e-05, forward_time=0.144, loss_ctc=100.303, loss_interctc_layer6=97.815, loss_interctc_layer12=80.833, loss_interctc_layer15=74.058, loss_interctc_layer21=104.309, loss=91.463, backward_time=0.208, grad_norm=116.588, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.138, optim0_lr0=6.386e-05, train_time=1.108 +[gpua006:0/64] 2024-02-14 06:04:05,153 (multiple_iter_factory:32) INFO: Building 3th iter-factory... +[gpua006:0/64] 2024-02-14 06:04:23,651 (s2t:401) INFO: Optional Data Names: ('text_prev', 'text_ctc', 'text_spk2', 'text_spk3', 'text_spk4') +[gpua006:0/64] 2024-02-14 06:04:27,093 (abs_task:1660) INFO: [train] dataset: +ESPnetDataset( + speech: {"path": "exp/s2t_stats_raw_bpe50000/splits12/wav.scp/split.0", "type": "kaldi_ark"} + text_prev: {"path": "exp/s2t_stats_raw_bpe50000/splits12/text.prev/split.0", "type": "text"} + text_ctc: {"path": "exp/s2t_stats_raw_bpe50000/splits12/text.ctc/split.0", "type": "text"} + text: {"path": "exp/s2t_stats_raw_bpe50000/splits12/text/split.0", "type": "text"} + preprocess: ) +[gpua006:0/64] 2024-02-14 06:04:27,093 (abs_task:1661) INFO: [train] Batch sampler: UnsortedBatchSampler(N-batch=19027, batch_size=256, key_file=exp/s2t_stats_raw_bpe50000/splits12/speech_shape/split.0, +[gpua006:0/64] 2024-02-14 06:04:27,110 (abs_task:1662) INFO: [train] mini-batch sizes summary: N-batch=19027, mean=256.0, min=256, max=257 +[gpua006:0/64] 2024-02-14 06:09:37,741 (trainer:756) INFO: 40epoch:train:3701-3800batch: iter_time=2.884, forward_time=0.173, loss_ctc=89.195, loss_interctc_layer6=90.218, loss_interctc_layer12=74.693, loss_interctc_layer15=68.424, loss_interctc_layer21=92.382, loss=82.982, backward_time=0.218, grad_norm=118.137, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.139, optim0_lr0=6.385e-05, train_time=4.139 +[gpua006:0/64] 2024-02-14 06:11:40,197 (trainer:756) INFO: 40epoch:train:3801-3900batch: iter_time=8.065e-05, forward_time=0.143, loss_ctc=79.062, loss_interctc_layer6=80.224, loss_interctc_layer12=66.492, loss_interctc_layer15=60.986, loss_interctc_layer21=82.011, loss=73.755, backward_time=0.206, grad_norm=76.894, clip=100.000, loss_scale=2.779e+31, optim_step_time=0.138, optim0_lr0=6.385e-05, train_time=1.225 +[gpua006:0/64] 2024-02-14 06:13:27,122 (trainer:756) INFO: 40epoch:train:3901-4000batch: iter_time=8.051e-05, forward_time=0.143, loss_ctc=79.810, loss_interctc_layer6=80.256, loss_interctc_layer12=66.245, loss_interctc_layer15=60.454, loss_interctc_layer21=82.810, loss=73.915, backward_time=0.207, grad_norm=74.826, clip=100.000, loss_scale=4.056e+31, optim_step_time=0.138, optim0_lr0=6.384e-05, train_time=1.069 +[gpua006:0/64] 2024-02-14 06:15:03,907 (trainer:756) INFO: 40epoch:train:4001-4100batch: iter_time=8.737e-05, forward_time=0.142, loss_ctc=66.508, loss_interctc_layer6=76.100, loss_interctc_layer12=62.694, loss_interctc_layer15=57.390, loss_interctc_layer21=68.987, loss=66.336, backward_time=0.207, grad_norm=65.117, clip=100.000, loss_scale=4.056e+31, optim_step_time=0.138, optim0_lr0=6.384e-05, train_time=0.968 +[gpua006:0/64] 2024-02-14 06:17:11,541 (trainer:756) INFO: 40epoch:train:4101-4200batch: iter_time=8.508e-05, forward_time=0.144, loss_ctc=84.780, loss_interctc_layer6=87.852, loss_interctc_layer12=72.373, loss_interctc_layer15=66.229, loss_interctc_layer21=87.839, loss=79.815, backward_time=0.207, grad_norm=85.617, clip=100.000, loss_scale=4.056e+31, optim_step_time=0.138, optim0_lr0=6.383e-05, train_time=1.276 +[gpua006:0/64] 2024-02-14 06:19:06,550 (trainer:756) INFO: 40epoch:train:4201-4300batch: iter_time=8.763e-05, forward_time=0.143, loss_ctc=84.122, loss_interctc_layer6=90.168, loss_interctc_layer12=75.105, loss_interctc_layer15=69.020, loss_interctc_layer21=86.981, loss=81.079, backward_time=0.207, grad_norm=95.300, clip=100.000, loss_scale=4.056e+31, optim_step_time=0.138, optim0_lr0=6.383e-05, train_time=1.150 +[gpua006:0/64] 2024-02-14 06:21:06,034 (trainer:756) INFO: 40epoch:train:4301-4400batch: iter_time=7.765e-05, forward_time=0.143, loss_ctc=82.981, loss_interctc_layer6=82.661, loss_interctc_layer12=68.402, loss_interctc_layer15=62.690, loss_interctc_layer21=86.137, loss=76.574, backward_time=0.207, grad_norm=97.019, clip=100.000, loss_scale=4.056e+31, optim_step_time=0.138, optim0_lr0=6.382e-05, train_time=1.195 +[gpua006:0/64] 2024-02-14 06:23:14,098 (trainer:756) INFO: 40epoch:train:4401-4500batch: iter_time=8.513e-05, forward_time=0.142, loss_ctc=58.200, loss_interctc_layer6=66.711, loss_interctc_layer12=55.576, loss_interctc_layer15=51.065, loss_interctc_layer21=60.022, loss=58.315, backward_time=0.206, grad_norm=61.480, clip=100.000, loss_scale=4.056e+31, optim_step_time=0.138, optim0_lr0=6.382e-05, train_time=1.280 +[gpua006:0/64] 2024-02-14 06:25:43,349 (trainer:756) INFO: 40epoch:train:4501-4600batch: iter_time=8.470e-05, forward_time=0.143, loss_ctc=76.339, loss_interctc_layer6=85.064, loss_interctc_layer12=70.403, loss_interctc_layer15=64.498, loss_interctc_layer21=79.317, loss=75.124, backward_time=0.212, grad_norm=100.547, clip=100.000, loss_scale=4.056e+31, optim_step_time=0.138, optim0_lr0=6.381e-05, train_time=1.492 +[gpua006:0/64] 2024-02-14 06:27:57,449 (trainer:756) INFO: 40epoch:train:4601-4700batch: iter_time=8.467e-05, forward_time=0.144, loss_ctc=95.628, loss_interctc_layer6=92.909, loss_interctc_layer12=76.638, loss_interctc_layer15=70.182, loss_interctc_layer21=99.252, loss=86.922, backward_time=0.207, grad_norm=95.754, clip=100.000, loss_scale=4.056e+31, optim_step_time=0.138, optim0_lr0=6.381e-05, train_time=1.341 +[gpua006:0/64] 2024-02-14 06:29:56,750 (trainer:756) INFO: 40epoch:train:4701-4800batch: iter_time=8.884e-05, forward_time=0.142, loss_ctc=70.317, loss_interctc_layer6=76.863, loss_interctc_layer12=63.989, loss_interctc_layer15=58.894, loss_interctc_layer21=72.787, loss=68.570, backward_time=0.206, grad_norm=71.859, clip=100.000, loss_scale=4.056e+31, optim_step_time=0.138, optim0_lr0=6.380e-05, train_time=1.193 +[gpua006:0/64] 2024-02-14 06:31:54,217 (trainer:687) WARNING: The grad norm is nan. Skipping updating the model. +[gpua006:0/64] 2024-02-14 06:32:10,093 (trainer:756) INFO: 40epoch:train:4801-4900batch: iter_time=8.663e-05, forward_time=0.199, loss_ctc=82.509, loss_interctc_layer6=85.673, loss_interctc_layer12=71.127, loss_interctc_layer15=65.234, loss_interctc_layer21=85.900, loss=78.089, backward_time=0.289, grad_norm=93.647, clip=100.000, loss_scale=3.770e+31, optim_step_time=0.142, optim0_lr0=6.380e-05, train_time=1.332 +[gpua006:0/64] 2024-02-14 06:33:56,559 (trainer:756) INFO: 40epoch:train:4901-5000batch: iter_time=8.318e-05, forward_time=0.147, loss_ctc=98.954, loss_interctc_layer6=95.195, loss_interctc_layer12=78.551, loss_interctc_layer15=71.878, loss_interctc_layer21=102.664, loss=89.448, backward_time=0.207, grad_norm=100.958, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.137, optim0_lr0=6.379e-05, train_time=1.065 +[gpua006:0/64] 2024-02-14 06:34:16,589 (multiple_iter_factory:32) INFO: Building 4th iter-factory... +[gpua006:0/64] 2024-02-14 06:34:35,520 (s2t:401) INFO: Optional Data Names: ('text_prev', 'text_ctc', 'text_spk2', 'text_spk3', 'text_spk4') +[gpua006:0/64] 2024-02-14 06:34:38,897 (abs_task:1660) INFO: [train] dataset: +ESPnetDataset( + speech: {"path": "exp/s2t_stats_raw_bpe50000/splits12/wav.scp/split.7", "type": "kaldi_ark"} + text_prev: {"path": "exp/s2t_stats_raw_bpe50000/splits12/text.prev/split.7", "type": "text"} + text_ctc: {"path": "exp/s2t_stats_raw_bpe50000/splits12/text.ctc/split.7", "type": "text"} + text: {"path": "exp/s2t_stats_raw_bpe50000/splits12/text/split.7", "type": "text"} + preprocess: ) +[gpua006:0/64] 2024-02-14 06:34:38,897 (abs_task:1661) INFO: [train] Batch sampler: UnsortedBatchSampler(N-batch=19027, batch_size=256, key_file=exp/s2t_stats_raw_bpe50000/splits12/speech_shape/split.7, +[gpua006:0/64] 2024-02-14 06:34:38,934 (abs_task:1662) INFO: [train] mini-batch sizes summary: N-batch=19027, mean=256.0, min=256, max=257 +[gpua006:0/64] 2024-02-14 06:40:55,827 (trainer:756) INFO: 40epoch:train:5001-5100batch: iter_time=3.068, forward_time=0.145, loss_ctc=75.702, loss_interctc_layer6=82.323, loss_interctc_layer12=67.846, loss_interctc_layer15=62.197, loss_interctc_layer21=78.601, loss=73.334, backward_time=0.209, grad_norm=74.142, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.140, optim0_lr0=6.378e-05, train_time=4.192 +[gpua006:0/64] 2024-02-14 06:42:48,220 (trainer:756) INFO: 40epoch:train:5101-5200batch: iter_time=1.021e-04, forward_time=0.145, loss_ctc=83.378, loss_interctc_layer6=87.792, loss_interctc_layer12=72.576, loss_interctc_layer15=66.600, loss_interctc_layer21=86.436, loss=79.356, backward_time=0.209, grad_norm=72.370, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.140, optim0_lr0=6.378e-05, train_time=1.124 +[gpua006:0/64] 2024-02-14 06:44:46,960 (trainer:756) INFO: 40epoch:train:5201-5300batch: iter_time=1.020e-04, forward_time=0.144, loss_ctc=66.568, loss_interctc_layer6=75.133, loss_interctc_layer12=61.790, loss_interctc_layer15=56.451, loss_interctc_layer21=69.138, loss=65.816, backward_time=0.208, grad_norm=78.961, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.139, optim0_lr0=6.377e-05, train_time=1.187 +[gpua006:0/64] 2024-02-14 06:47:10,325 (trainer:756) INFO: 40epoch:train:5301-5400batch: iter_time=8.373e-05, forward_time=0.158, loss_ctc=70.334, loss_interctc_layer6=82.530, loss_interctc_layer12=67.792, loss_interctc_layer15=61.776, loss_interctc_layer21=72.770, loss=71.041, backward_time=0.206, grad_norm=72.096, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.137, optim0_lr0=6.377e-05, train_time=1.433 +[gpua006:0/64] 2024-02-14 06:48:56,471 (trainer:756) INFO: 40epoch:train:5401-5500batch: iter_time=8.643e-05, forward_time=0.145, loss_ctc=74.208, loss_interctc_layer6=82.948, loss_interctc_layer12=68.723, loss_interctc_layer15=63.115, loss_interctc_layer21=77.247, loss=73.248, backward_time=0.207, grad_norm=72.929, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.138, optim0_lr0=6.376e-05, train_time=1.061 +[gpua006:0/64] 2024-02-14 06:50:49,854 (trainer:756) INFO: 40epoch:train:5501-5600batch: iter_time=8.692e-05, forward_time=0.142, loss_ctc=87.650, loss_interctc_layer6=91.702, loss_interctc_layer12=76.167, loss_interctc_layer15=69.923, loss_interctc_layer21=90.998, loss=83.288, backward_time=0.206, grad_norm=77.902, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.137, optim0_lr0=6.376e-05, train_time=1.134 +[gpua006:0/64] 2024-02-14 06:52:41,787 (trainer:756) INFO: 40epoch:train:5601-5700batch: iter_time=8.797e-05, forward_time=0.142, loss_ctc=63.326, loss_interctc_layer6=72.541, loss_interctc_layer12=60.299, loss_interctc_layer15=55.400, loss_interctc_layer21=65.689, loss=63.451, backward_time=0.206, grad_norm=115.582, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.137, optim0_lr0=6.375e-05, train_time=1.119 +[gpua006:0/64] 2024-02-14 06:54:58,221 (trainer:756) INFO: 40epoch:train:5701-5800batch: iter_time=8.694e-05, forward_time=0.142, loss_ctc=63.444, loss_interctc_layer6=77.692, loss_interctc_layer12=64.315, loss_interctc_layer15=59.173, loss_interctc_layer21=65.975, loss=66.120, backward_time=0.205, grad_norm=81.206, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.137, optim0_lr0=6.375e-05, train_time=1.364 +[gpua006:0/64] 2024-02-14 06:57:02,352 (trainer:756) INFO: 40epoch:train:5801-5900batch: iter_time=8.612e-05, forward_time=0.232, loss_ctc=84.204, loss_interctc_layer6=92.746, loss_interctc_layer12=76.687, loss_interctc_layer15=70.081, loss_interctc_layer21=87.408, loss=82.225, backward_time=0.249, grad_norm=97.274, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.143, optim0_lr0=6.374e-05, train_time=1.240 +[gpua006:0/64] 2024-02-14 06:59:01,559 (trainer:756) INFO: 40epoch:train:5901-6000batch: iter_time=8.181e-05, forward_time=0.149, loss_ctc=73.179, loss_interctc_layer6=80.752, loss_interctc_layer12=66.978, loss_interctc_layer15=61.556, loss_interctc_layer21=75.833, loss=71.660, backward_time=0.206, grad_norm=88.672, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.138, optim0_lr0=6.374e-05, train_time=1.193 +[gpua006:0/64] 2024-02-14 07:01:04,844 (trainer:756) INFO: 40epoch:train:6001-6100batch: iter_time=8.796e-05, forward_time=0.142, loss_ctc=69.131, loss_interctc_layer6=72.104, loss_interctc_layer12=59.589, loss_interctc_layer15=54.607, loss_interctc_layer21=71.793, loss=65.445, backward_time=0.206, grad_norm=132.944, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.138, optim0_lr0=6.373e-05, train_time=1.233 +[gpua006:0/64] 2024-02-14 07:03:22,016 (trainer:756) INFO: 40epoch:train:6101-6200batch: iter_time=9.036e-05, forward_time=0.144, loss_ctc=94.339, loss_interctc_layer6=97.458, loss_interctc_layer12=80.532, loss_interctc_layer15=73.616, loss_interctc_layer21=98.153, loss=88.819, backward_time=0.207, grad_norm=97.160, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.138, optim0_lr0=6.373e-05, train_time=1.371 +[gpua006:0/64] 2024-02-14 07:04:44,740 (multiple_iter_factory:32) INFO: Building 5th iter-factory... +[gpua006:0/64] 2024-02-14 07:05:03,400 (s2t:401) INFO: Optional Data Names: ('text_prev', 'text_ctc', 'text_spk2', 'text_spk3', 'text_spk4') +[gpua006:0/64] 2024-02-14 07:05:06,823 (abs_task:1660) INFO: [train] dataset: +ESPnetDataset( + speech: {"path": "exp/s2t_stats_raw_bpe50000/splits12/wav.scp/split.8", "type": "kaldi_ark"} + text_prev: {"path": "exp/s2t_stats_raw_bpe50000/splits12/text.prev/split.8", "type": "text"} + text_ctc: {"path": "exp/s2t_stats_raw_bpe50000/splits12/text.ctc/split.8", "type": "text"} + text: {"path": "exp/s2t_stats_raw_bpe50000/splits12/text/split.8", "type": "text"} + preprocess: ) +[gpua006:0/64] 2024-02-14 07:05:06,823 (abs_task:1661) INFO: [train] Batch sampler: UnsortedBatchSampler(N-batch=19027, batch_size=256, key_file=exp/s2t_stats_raw_bpe50000/splits12/speech_shape/split.8, +[gpua006:0/64] 2024-02-14 07:05:06,855 (abs_task:1662) INFO: [train] mini-batch sizes summary: N-batch=19027, mean=256.0, min=256, max=257 +[gpua006:0/64] 2024-02-14 07:10:03,477 (trainer:756) INFO: 40epoch:train:6201-6300batch: iter_time=2.808, forward_time=0.143, loss_ctc=86.049, loss_interctc_layer6=90.782, loss_interctc_layer12=75.199, loss_interctc_layer15=68.984, loss_interctc_layer21=89.240, loss=82.051, backward_time=0.206, grad_norm=114.110, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.139, optim0_lr0=6.372e-05, train_time=4.014 +[gpua006:0/64] 2024-02-14 07:11:38,321 (trainer:756) INFO: 40epoch:train:6301-6400batch: iter_time=8.179e-05, forward_time=0.145, loss_ctc=78.095, loss_interctc_layer6=79.242, loss_interctc_layer12=65.500, loss_interctc_layer15=60.108, loss_interctc_layer21=81.072, loss=72.804, backward_time=0.208, grad_norm=71.949, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.138, optim0_lr0=6.371e-05, train_time=0.948 +[gpua006:0/64] 2024-02-14 07:13:18,853 (trainer:756) INFO: 40epoch:train:6401-6500batch: iter_time=8.736e-05, forward_time=0.144, loss_ctc=79.367, loss_interctc_layer6=79.775, loss_interctc_layer12=65.562, loss_interctc_layer15=59.859, loss_interctc_layer21=82.352, loss=73.383, backward_time=0.210, grad_norm=113.255, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.139, optim0_lr0=6.371e-05, train_time=1.005 +[gpua006:0/64] 2024-02-14 07:15:28,006 (trainer:756) INFO: 40epoch:train:6501-6600batch: iter_time=8.495e-05, forward_time=0.144, loss_ctc=66.425, loss_interctc_layer6=76.082, loss_interctc_layer12=62.565, loss_interctc_layer15=57.099, loss_interctc_layer21=68.835, loss=66.201, backward_time=0.208, grad_norm=78.145, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.139, optim0_lr0=6.370e-05, train_time=1.291 +[gpua006:0/64] 2024-02-14 07:17:33,871 (trainer:756) INFO: 40epoch:train:6601-6700batch: iter_time=8.762e-05, forward_time=0.282, loss_ctc=84.153, loss_interctc_layer6=87.636, loss_interctc_layer12=72.130, loss_interctc_layer15=66.105, loss_interctc_layer21=87.321, loss=79.469, backward_time=0.232, grad_norm=92.924, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.143, optim0_lr0=6.370e-05, train_time=1.258 +[gpua006:0/64] 2024-02-14 07:19:54,497 (trainer:756) INFO: 40epoch:train:6701-6800batch: iter_time=8.521e-05, forward_time=0.143, loss_ctc=83.341, loss_interctc_layer6=89.343, loss_interctc_layer12=74.164, loss_interctc_layer15=68.140, loss_interctc_layer21=86.262, loss=80.250, backward_time=0.208, grad_norm=68.789, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.138, optim0_lr0=6.369e-05, train_time=1.405 +[gpua006:0/64] 2024-02-14 07:22:16,467 (trainer:756) INFO: 40epoch:train:6801-6900batch: iter_time=8.510e-05, forward_time=0.143, loss_ctc=82.467, loss_interctc_layer6=82.676, loss_interctc_layer12=68.402, loss_interctc_layer15=62.659, loss_interctc_layer21=85.679, loss=76.377, backward_time=0.207, grad_norm=70.690, clip=100.000, loss_scale=2.312e+31, optim_step_time=0.138, optim0_lr0=6.369e-05, train_time=1.420 +[gpua006:0/64] 2024-02-14 07:24:18,328 (trainer:756) INFO: 40epoch:train:6901-7000batch: iter_time=9.279e-05, forward_time=0.142, loss_ctc=57.625, loss_interctc_layer6=66.437, loss_interctc_layer12=55.330, loss_interctc_layer15=50.875, loss_interctc_layer21=59.632, loss=57.980, backward_time=0.208, grad_norm=60.628, clip=100.000, loss_scale=4.056e+31, optim_step_time=0.138, optim0_lr0=6.368e-05, train_time=1.218 +[gpua006:0/64] 2024-02-14 07:26:04,139 (trainer:756) INFO: 40epoch:train:7001-7100batch: iter_time=8.391e-05, forward_time=0.142, loss_ctc=76.278, loss_interctc_layer6=84.648, loss_interctc_layer12=70.034, loss_interctc_layer15=64.269, loss_interctc_layer21=79.203, loss=74.886, backward_time=0.206, grad_norm=99.387, clip=100.000, loss_scale=4.056e+31, optim_step_time=0.137, optim0_lr0=6.368e-05, train_time=1.058 +[gpua006:0/64] 2024-02-14 07:28:11,747 (trainer:756) INFO: 40epoch:train:7101-7200batch: iter_time=8.453e-05, forward_time=0.143, loss_ctc=95.556, loss_interctc_layer6=93.563, loss_interctc_layer12=76.990, loss_interctc_layer15=70.365, loss_interctc_layer21=99.069, loss=87.108, backward_time=0.206, grad_norm=95.456, clip=100.000, loss_scale=4.056e+31, optim_step_time=0.137, optim0_lr0=6.367e-05, train_time=1.276 +[gpua006:0/64] 2024-02-14 07:30:23,027 (trainer:756) INFO: 40epoch:train:7201-7300batch: iter_time=6.085e-04, forward_time=0.144, loss_ctc=69.582, loss_interctc_layer6=76.351, loss_interctc_layer12=63.345, loss_interctc_layer15=58.291, loss_interctc_layer21=72.009, loss=67.915, backward_time=0.208, grad_norm=73.006, clip=100.000, loss_scale=4.056e+31, optim_step_time=0.139, optim0_lr0=6.367e-05, train_time=1.312 +[gpua006:0/64] 2024-02-14 07:32:40,858 (trainer:756) INFO: 40epoch:train:7301-7400batch: iter_time=9.176e-05, forward_time=0.144, loss_ctc=82.427, loss_interctc_layer6=85.464, loss_interctc_layer12=70.839, loss_interctc_layer15=64.919, loss_interctc_layer21=85.630, loss=77.856, backward_time=0.208, grad_norm=84.593, clip=100.000, loss_scale=4.056e+31, optim_step_time=0.137, optim0_lr0=6.366e-05, train_time=1.378 +[gpua006:0/64] 2024-02-14 07:34:27,701 (trainer:756) INFO: 40epoch:train:7401-7500batch: iter_time=8.268e-05, forward_time=0.143, loss_ctc=97.866, loss_interctc_layer6=95.289, loss_interctc_layer12=78.386, loss_interctc_layer15=71.645, loss_interctc_layer21=101.451, loss=88.927, backward_time=0.207, grad_norm=87.849, clip=100.000, loss_scale=4.056e+31, optim_step_time=0.137, optim0_lr0=6.366e-05, train_time=1.068 +[gpua006:0/64] 2024-02-14 07:34:47,731 (multiple_iter_factory:32) INFO: Building 6th iter-factory... +[gpua006:0/64] 2024-02-14 07:35:06,487 (s2t:401) INFO: Optional Data Names: ('text_prev', 'text_ctc', 'text_spk2', 'text_spk3', 'text_spk4') +[gpua006:0/64] 2024-02-14 07:35:09,833 (abs_task:1660) INFO: [train] dataset: +ESPnetDataset( + speech: {"path": "exp/s2t_stats_raw_bpe50000/splits12/wav.scp/split.9", "type": "kaldi_ark"} + text_prev: {"path": "exp/s2t_stats_raw_bpe50000/splits12/text.prev/split.9", "type": "text"} + text_ctc: {"path": "exp/s2t_stats_raw_bpe50000/splits12/text.ctc/split.9", "type": "text"} + text: {"path": "exp/s2t_stats_raw_bpe50000/splits12/text/split.9", "type": "text"} + preprocess: ) +[gpua006:0/64] 2024-02-14 07:35:09,833 (abs_task:1661) INFO: [train] Batch sampler: UnsortedBatchSampler(N-batch=19027, batch_size=256, key_file=exp/s2t_stats_raw_bpe50000/splits12/speech_shape/split.9, +[gpua006:0/64] 2024-02-14 07:35:09,958 (abs_task:1662) INFO: [train] mini-batch sizes summary: N-batch=19027, mean=256.0, min=256, max=257 +[gpua006:0/64] 2024-02-14 07:41:17,688 (trainer:756) INFO: 40epoch:train:7501-7600batch: iter_time=2.976, forward_time=0.189, loss_ctc=75.518, loss_interctc_layer6=82.173, loss_interctc_layer12=67.667, loss_interctc_layer15=61.875, loss_interctc_layer21=78.415, loss=73.130, backward_time=0.218, grad_norm=74.464, clip=100.000, loss_scale=4.056e+31, optim_step_time=0.141, optim0_lr0=6.365e-05, train_time=4.100 +[gpua006:0/64] 2024-02-14 07:42:54,672 (trainer:756) INFO: 40epoch:train:7601-7700batch: iter_time=9.385e-05, forward_time=0.144, loss_ctc=82.737, loss_interctc_layer6=87.843, loss_interctc_layer12=72.598, loss_interctc_layer15=66.535, loss_interctc_layer21=85.895, loss=79.122, backward_time=0.210, grad_norm=89.659, clip=100.000, loss_scale=4.056e+31, optim_step_time=0.140, optim0_lr0=6.364e-05, train_time=0.968 +[gpua006:0/64] 2024-02-14 07:44:40,955 (trainer:756) INFO: 40epoch:train:7701-7800batch: iter_time=9.247e-05, forward_time=0.145, loss_ctc=66.396, loss_interctc_layer6=75.065, loss_interctc_layer12=61.833, loss_interctc_layer15=56.333, loss_interctc_layer21=69.010, loss=65.727, backward_time=0.210, grad_norm=65.694, clip=100.000, loss_scale=4.056e+31, optim_step_time=0.140, optim0_lr0=6.364e-05, train_time=1.064 +[gpua006:0/64] 2024-02-14 07:46:27,754 (trainer:756) INFO: 40epoch:train:7801-7900batch: iter_time=9.787e-05, forward_time=0.144, loss_ctc=70.151, loss_interctc_layer6=82.250, loss_interctc_layer12=67.324, loss_interctc_layer15=61.417, loss_interctc_layer21=72.811, loss=70.791, backward_time=0.209, grad_norm=117.852, clip=100.000, loss_scale=4.056e+31, optim_step_time=0.140, optim0_lr0=6.363e-05, train_time=1.068 +[gpua006:0/64] 2024-02-14 07:48:21,303 (trainer:756) INFO: 40epoch:train:7901-8000batch: iter_time=1.006e-04, forward_time=0.144, loss_ctc=73.951, loss_interctc_layer6=82.394, loss_interctc_layer12=68.201, loss_interctc_layer15=62.529, loss_interctc_layer21=76.838, loss=72.783, backward_time=0.209, grad_norm=72.363, clip=100.000, loss_scale=4.056e+31, optim_step_time=0.140, optim0_lr0=6.363e-05, train_time=1.135 +[gpua006:0/64] 2024-02-14 07:51:08,588 (trainer:756) INFO: 40epoch:train:8001-8100batch: iter_time=9.806e-05, forward_time=0.144, loss_ctc=87.400, loss_interctc_layer6=91.363, loss_interctc_layer12=75.725, loss_interctc_layer15=69.440, loss_interctc_layer21=90.636, loss=82.913, backward_time=0.207, grad_norm=109.207, clip=100.000, loss_scale=4.056e+31, optim_step_time=0.139, optim0_lr0=6.362e-05, train_time=1.673 +[gpua006:0/64] 2024-02-14 07:53:09,958 (trainer:756) INFO: 40epoch:train:8101-8200batch: iter_time=8.939e-05, forward_time=0.144, loss_ctc=63.107, loss_interctc_layer6=72.015, loss_interctc_layer12=59.702, loss_interctc_layer15=54.862, loss_interctc_layer21=65.516, loss=63.040, backward_time=0.206, grad_norm=71.734, clip=100.000, loss_scale=4.056e+31, optim_step_time=0.138, optim0_lr0=6.362e-05, train_time=1.213 +[gpua006:0/64] 2024-02-14 07:55:03,104 (trainer:756) INFO: 40epoch:train:8201-8300batch: iter_time=8.678e-05, forward_time=0.142, loss_ctc=62.771, loss_interctc_layer6=77.401, loss_interctc_layer12=64.004, loss_interctc_layer15=58.641, loss_interctc_layer21=65.044, loss=65.572, backward_time=0.206, grad_norm=71.098, clip=100.000, loss_scale=4.056e+31, optim_step_time=0.138, optim0_lr0=6.361e-05, train_time=1.131 +[gpua006:0/64] 2024-02-14 07:57:33,976 (trainer:756) INFO: 40epoch:train:8301-8400batch: iter_time=8.747e-05, forward_time=0.143, loss_ctc=83.070, loss_interctc_layer6=91.836, loss_interctc_layer12=75.681, loss_interctc_layer15=69.222, loss_interctc_layer21=86.259, loss=81.214, backward_time=0.207, grad_norm=85.569, clip=100.000, loss_scale=4.056e+31, optim_step_time=0.138, optim0_lr0=6.361e-05, train_time=1.508 +[gpua006:0/64] 2024-02-14 07:59:41,689 (trainer:756) INFO: 40epoch:train:8401-8500batch: iter_time=8.741e-05, forward_time=0.142, loss_ctc=72.687, loss_interctc_layer6=80.372, loss_interctc_layer12=66.436, loss_interctc_layer15=60.941, loss_interctc_layer21=75.356, loss=71.158, backward_time=0.206, grad_norm=78.498, clip=100.000, loss_scale=4.056e+31, optim_step_time=0.138, optim0_lr0=6.360e-05, train_time=1.277 +[gpua006:0/64] 2024-02-14 08:01:46,974 (trainer:756) INFO: 40epoch:train:8501-8600batch: iter_time=9.231e-05, forward_time=0.168, loss_ctc=69.321, loss_interctc_layer6=71.947, loss_interctc_layer12=59.398, loss_interctc_layer15=54.460, loss_interctc_layer21=72.044, loss=65.434, backward_time=0.257, grad_norm=67.575, clip=100.000, loss_scale=4.056e+31, optim_step_time=0.142, optim0_lr0=6.360e-05, train_time=1.252 +[gpua006:0/64] 2024-02-14 08:03:35,398 (trainer:687) WARNING: The grad norm is nan. Skipping updating the model. +[gpua006:0/64] 2024-02-14 08:03:48,251 (trainer:756) INFO: 40epoch:train:8601-8700batch: iter_time=8.581e-05, forward_time=0.165, loss_ctc=95.046, loss_interctc_layer6=97.047, loss_interctc_layer12=80.096, loss_interctc_layer15=73.119, loss_interctc_layer21=98.819, loss=88.825, backward_time=0.220, grad_norm=94.286, clip=100.000, loss_scale=3.811e+31, optim_step_time=0.141, optim0_lr0=6.359e-05, train_time=1.214 +[gpua006:0/64] 2024-02-14 08:04:59,688 (multiple_iter_factory:32) INFO: Building 7th iter-factory... +[gpua006:0/64] 2024-02-14 08:05:18,500 (s2t:401) INFO: Optional Data Names: ('text_prev', 'text_ctc', 'text_spk2', 'text_spk3', 'text_spk4') +[gpua006:0/64] 2024-02-14 08:05:21,947 (abs_task:1660) INFO: [train] dataset: +ESPnetDataset( + speech: {"path": "exp/s2t_stats_raw_bpe50000/splits12/wav.scp/split.11", "type": "kaldi_ark"} + text_prev: {"path": "exp/s2t_stats_raw_bpe50000/splits12/text.prev/split.11", "type": "text"} + text_ctc: {"path": "exp/s2t_stats_raw_bpe50000/splits12/text.ctc/split.11", "type": "text"} + text: {"path": "exp/s2t_stats_raw_bpe50000/splits12/text/split.11", "type": "text"} + preprocess: ) +[gpua006:0/64] 2024-02-14 08:05:21,947 (abs_task:1661) INFO: [train] Batch sampler: UnsortedBatchSampler(N-batch=19027, batch_size=256, key_file=exp/s2t_stats_raw_bpe50000/splits12/speech_shape/split.11, +[gpua006:0/64] 2024-02-14 08:05:21,951 (abs_task:1662) INFO: [train] mini-batch sizes summary: N-batch=19027, mean=256.0, min=256, max=257 +[gpua006:0/64] 2024-02-14 08:10:21,316 (trainer:756) INFO: 40epoch:train:8701-8800batch: iter_time=2.714, forward_time=0.144, loss_ctc=82.798, loss_interctc_layer6=90.038, loss_interctc_layer12=74.582, loss_interctc_layer15=68.391, loss_interctc_layer21=85.825, loss=80.327, backward_time=0.208, grad_norm=82.757, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.139, optim0_lr0=6.359e-05, train_time=3.930 +[gpua006:0/64] 2024-02-14 08:12:12,644 (trainer:756) INFO: 40epoch:train:8801-8900batch: iter_time=9.884e-05, forward_time=0.144, loss_ctc=73.281, loss_interctc_layer6=79.857, loss_interctc_layer12=65.988, loss_interctc_layer15=60.469, loss_interctc_layer21=76.099, loss=71.139, backward_time=0.208, grad_norm=99.849, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.139, optim0_lr0=6.358e-05, train_time=1.113 +[gpua006:0/64] 2024-02-14 08:14:09,386 (trainer:756) INFO: 40epoch:train:8901-9000batch: iter_time=9.620e-05, forward_time=0.143, loss_ctc=76.076, loss_interctc_layer6=79.643, loss_interctc_layer12=65.426, loss_interctc_layer15=59.649, loss_interctc_layer21=78.882, loss=71.935, backward_time=0.209, grad_norm=81.684, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.138, optim0_lr0=6.357e-05, train_time=1.167 +[gpua006:0/64] 2024-02-14 08:15:56,972 (trainer:756) INFO: 40epoch:train:9001-9100batch: iter_time=9.828e-05, forward_time=0.142, loss_ctc=63.080, loss_interctc_layer6=76.243, loss_interctc_layer12=62.749, loss_interctc_layer15=57.405, loss_interctc_layer21=65.442, loss=64.984, backward_time=0.206, grad_norm=77.409, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.139, optim0_lr0=6.357e-05, train_time=1.076 +[gpua006:0/64] 2024-02-14 08:17:46,234 (trainer:756) INFO: 40epoch:train:9101-9200batch: iter_time=1.044e-04, forward_time=0.143, loss_ctc=76.629, loss_interctc_layer6=87.009, loss_interctc_layer12=71.382, loss_interctc_layer15=65.255, loss_interctc_layer21=79.371, loss=75.929, backward_time=0.207, grad_norm=76.060, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.139, optim0_lr0=6.356e-05, train_time=1.092 +[gpua006:0/64] 2024-02-14 08:19:16,120 (trainer:687) WARNING: The grad norm is nan. Skipping updating the model. +[gpua006:0/64] 2024-02-14 08:20:19,072 (trainer:756) INFO: 40epoch:train:9201-9300batch: iter_time=9.615e-05, forward_time=0.143, loss_ctc=79.770, loss_interctc_layer6=89.294, loss_interctc_layer12=74.153, loss_interctc_layer15=68.075, loss_interctc_layer21=82.658, loss=78.790, backward_time=0.206, grad_norm=77.113, clip=100.000, loss_scale=1.516e+31, optim_step_time=0.138, optim0_lr0=6.356e-05, train_time=1.528 +[gpua006:0/64] 2024-02-14 08:23:06,720 (trainer:756) INFO: 40epoch:train:9301-9400batch: iter_time=1.001e-04, forward_time=0.214, loss_ctc=78.866, loss_interctc_layer6=82.957, loss_interctc_layer12=68.495, loss_interctc_layer15=62.782, loss_interctc_layer21=81.924, loss=75.005, backward_time=0.216, grad_norm=75.968, clip=100.000, loss_scale=1.014e+31, optim_step_time=0.140, optim0_lr0=6.355e-05, train_time=1.676 +[gpua006:0/64] 2024-02-14 08:25:00,170 (trainer:756) INFO: 40epoch:train:9401-9500batch: iter_time=1.010e-04, forward_time=0.170, loss_ctc=54.768, loss_interctc_layer6=67.077, loss_interctc_layer12=55.918, loss_interctc_layer15=51.400, loss_interctc_layer21=56.645, loss=57.161, backward_time=0.232, grad_norm=95.872, clip=100.000, loss_scale=1.014e+31, optim_step_time=0.140, optim0_lr0=6.355e-05, train_time=1.134 +[gpua006:0/64] 2024-02-14 08:26:37,443 (trainer:756) INFO: 40epoch:train:9501-9600batch: iter_time=9.294e-05, forward_time=0.142, loss_ctc=70.577, loss_interctc_layer6=84.263, loss_interctc_layer12=69.620, loss_interctc_layer15=63.965, loss_interctc_layer21=73.357, loss=72.356, backward_time=0.207, grad_norm=123.685, clip=100.000, loss_scale=1.014e+31, optim_step_time=0.137, optim0_lr0=6.354e-05, train_time=0.973 +[gpua006:0/64] 2024-02-14 08:28:34,758 (trainer:756) INFO: 40epoch:train:9601-9700batch: iter_time=9.304e-05, forward_time=0.144, loss_ctc=92.168, loss_interctc_layer6=93.111, loss_interctc_layer12=76.519, loss_interctc_layer15=69.907, loss_interctc_layer21=95.760, loss=85.493, backward_time=0.208, grad_norm=94.024, clip=100.000, loss_scale=1.014e+31, optim_step_time=0.139, optim0_lr0=6.354e-05, train_time=1.172 +[gpua006:0/64] 2024-02-14 08:30:42,749 (trainer:756) INFO: 40epoch:train:9701-9800batch: iter_time=9.129e-05, forward_time=0.142, loss_ctc=66.401, loss_interctc_layer6=76.212, loss_interctc_layer12=63.257, loss_interctc_layer15=58.193, loss_interctc_layer21=68.729, loss=66.558, backward_time=0.207, grad_norm=89.601, clip=100.000, loss_scale=1.014e+31, optim_step_time=0.137, optim0_lr0=6.353e-05, train_time=1.280 +[gpua006:0/64] 2024-02-14 08:33:04,553 (trainer:756) INFO: 40epoch:train:9801-9900batch: iter_time=9.340e-05, forward_time=0.144, loss_ctc=74.588, loss_interctc_layer6=85.064, loss_interctc_layer12=70.448, loss_interctc_layer15=64.522, loss_interctc_layer21=77.571, loss=74.439, backward_time=0.207, grad_norm=85.823, clip=100.000, loss_scale=1.014e+31, optim_step_time=0.137, optim0_lr0=6.353e-05, train_time=1.418 +[gpua006:0/64] 2024-02-14 08:34:53,682 (trainer:756) INFO: 40epoch:train:9901-10000batch: iter_time=8.625e-05, forward_time=0.142, loss_ctc=92.853, loss_interctc_layer6=94.351, loss_interctc_layer12=77.793, loss_interctc_layer15=71.165, loss_interctc_layer21=96.239, loss=86.480, backward_time=0.207, grad_norm=112.654, clip=100.000, loss_scale=1.014e+31, optim_step_time=0.137, optim0_lr0=6.352e-05, train_time=1.091 +[gpua006:0/64] 2024-02-14 08:35:13,713 (multiple_iter_factory:32) INFO: Building 8th iter-factory... +[gpua006:0/64] 2024-02-14 08:35:32,365 (s2t:401) INFO: Optional Data Names: ('text_prev', 'text_ctc', 'text_spk2', 'text_spk3', 'text_spk4') +[gpua006:0/64] 2024-02-14 08:35:36,055 (abs_task:1660) INFO: [train] dataset: +ESPnetDataset( + speech: {"path": "exp/s2t_stats_raw_bpe50000/splits12/wav.scp/split.10", "type": "kaldi_ark"} + text_prev: {"path": "exp/s2t_stats_raw_bpe50000/splits12/text.prev/split.10", "type": "text"} + text_ctc: {"path": "exp/s2t_stats_raw_bpe50000/splits12/text.ctc/split.10", "type": "text"} + text: {"path": "exp/s2t_stats_raw_bpe50000/splits12/text/split.10", "type": "text"} + preprocess: ) +[gpua006:0/64] 2024-02-14 08:35:36,055 (abs_task:1661) INFO: [train] Batch sampler: UnsortedBatchSampler(N-batch=19027, batch_size=256, key_file=exp/s2t_stats_raw_bpe50000/splits12/speech_shape/split.10, +[gpua006:0/64] 2024-02-14 08:35:36,059 (abs_task:1662) INFO: [train] mini-batch sizes summary: N-batch=19027, mean=256.0, min=256, max=257 +[gpua006:0/64] 2024-02-14 08:41:37,091 (trainer:756) INFO: 40epoch:train:10001-10100batch: iter_time=2.913, forward_time=0.174, loss_ctc=79.790, loss_interctc_layer6=81.022, loss_interctc_layer12=66.645, loss_interctc_layer15=60.895, loss_interctc_layer21=83.125, loss=74.295, backward_time=0.216, grad_norm=73.665, clip=100.000, loss_scale=1.014e+31, optim_step_time=0.140, optim0_lr0=6.352e-05, train_time=4.034 +[gpua006:0/64] 2024-02-14 08:43:32,518 (trainer:756) INFO: 40epoch:train:10101-10200batch: iter_time=9.255e-05, forward_time=0.143, loss_ctc=86.426, loss_interctc_layer6=87.534, loss_interctc_layer12=72.274, loss_interctc_layer15=66.196, loss_interctc_layer21=89.560, loss=80.398, backward_time=0.208, grad_norm=81.188, clip=100.000, loss_scale=1.014e+31, optim_step_time=0.138, optim0_lr0=6.351e-05, train_time=1.154 +[gpua006:0/64] 2024-02-14 08:45:36,082 (trainer:756) INFO: 40epoch:train:10201-10300batch: iter_time=8.970e-05, forward_time=0.190, loss_ctc=69.367, loss_interctc_layer6=75.328, loss_interctc_layer12=61.851, loss_interctc_layer15=56.412, loss_interctc_layer21=71.994, loss=66.990, backward_time=0.215, grad_norm=75.369, clip=100.000, loss_scale=1.014e+31, optim_step_time=0.140, optim0_lr0=6.351e-05, train_time=1.235 +[gpua006:0/64] 2024-02-14 08:48:00,991 (trainer:756) INFO: 40epoch:train:10301-10400batch: iter_time=8.998e-05, forward_time=0.143, loss_ctc=75.118, loss_interctc_layer6=82.144, loss_interctc_layer12=67.294, loss_interctc_layer15=61.361, loss_interctc_layer21=77.975, loss=72.779, backward_time=0.207, grad_norm=76.488, clip=100.000, loss_scale=1.014e+31, optim_step_time=0.139, optim0_lr0=6.350e-05, train_time=1.449 +[gpua006:0/64] 2024-02-14 08:50:16,782 (trainer:756) INFO: 40epoch:train:10401-10500batch: iter_time=9.612e-05, forward_time=0.144, loss_ctc=79.673, loss_interctc_layer6=82.327, loss_interctc_layer12=67.943, loss_interctc_layer15=62.213, loss_interctc_layer21=82.767, loss=74.984, backward_time=0.208, grad_norm=88.803, clip=100.000, loss_scale=1.014e+31, optim_step_time=0.139, optim0_lr0=6.349e-05, train_time=1.358 +[gpua006:0/64] 2024-02-14 08:52:22,125 (trainer:756) INFO: 40epoch:train:10501-10600batch: iter_time=9.450e-05, forward_time=0.144, loss_ctc=90.317, loss_interctc_layer6=91.311, loss_interctc_layer12=75.560, loss_interctc_layer15=69.323, loss_interctc_layer21=93.632, loss=84.029, backward_time=0.207, grad_norm=82.283, clip=100.000, loss_scale=1.014e+31, optim_step_time=0.138, optim0_lr0=6.349e-05, train_time=1.253 +[gpua006:0/64] 2024-02-14 08:54:30,150 (trainer:756) INFO: 40epoch:train:10601-10700batch: iter_time=9.688e-05, forward_time=0.141, loss_ctc=67.246, loss_interctc_layer6=72.032, loss_interctc_layer12=59.665, loss_interctc_layer15=54.796, loss_interctc_layer21=69.773, loss=64.702, backward_time=0.206, grad_norm=67.448, clip=100.000, loss_scale=1.014e+31, optim_step_time=0.138, optim0_lr0=6.348e-05, train_time=1.280 +[gpua006:0/64] 2024-02-14 08:56:24,991 (trainer:756) INFO: 40epoch:train:10701-10800batch: iter_time=9.798e-05, forward_time=0.143, loss_ctc=66.628, loss_interctc_layer6=77.473, loss_interctc_layer12=64.097, loss_interctc_layer15=58.820, loss_interctc_layer21=69.084, loss=67.220, backward_time=0.207, grad_norm=80.103, clip=100.000, loss_scale=1.014e+31, optim_step_time=0.138, optim0_lr0=6.348e-05, train_time=1.148 +[gpua006:0/64] 2024-02-14 08:59:41,606 (trainer:756) INFO: 40epoch:train:10801-10900batch: iter_time=9.925e-05, forward_time=0.143, loss_ctc=86.912, loss_interctc_layer6=91.410, loss_interctc_layer12=75.046, loss_interctc_layer15=68.599, loss_interctc_layer21=90.298, loss=82.453, backward_time=0.206, grad_norm=79.098, clip=100.000, loss_scale=1.014e+31, optim_step_time=0.138, optim0_lr0=6.347e-05, train_time=1.966 +[gpua006:0/64] 2024-02-14 09:01:46,017 (trainer:756) INFO: 40epoch:train:10901-11000batch: iter_time=3.067e-04, forward_time=0.187, loss_ctc=75.244, loss_interctc_layer6=80.605, loss_interctc_layer12=66.788, loss_interctc_layer15=61.325, loss_interctc_layer21=77.947, loss=72.382, backward_time=0.232, grad_norm=65.873, clip=100.000, loss_scale=1.014e+31, optim_step_time=0.141, optim0_lr0=6.347e-05, train_time=1.244 +[gpua006:0/64] 2024-02-14 09:03:45,265 (trainer:756) INFO: 40epoch:train:11001-11100batch: iter_time=9.463e-05, forward_time=0.143, loss_ctc=73.581, loss_interctc_layer6=71.631, loss_interctc_layer12=59.059, loss_interctc_layer15=54.089, loss_interctc_layer21=76.571, loss=66.986, backward_time=0.208, grad_norm=64.431, clip=100.000, loss_scale=1.014e+31, optim_step_time=0.138, optim0_lr0=6.346e-05, train_time=1.192 +[gpua006:0/64] 2024-02-14 09:06:03,647 (trainer:756) INFO: 40epoch:train:11101-11200batch: iter_time=1.004e-04, forward_time=0.144, loss_ctc=98.628, loss_interctc_layer6=96.844, loss_interctc_layer12=79.633, loss_interctc_layer15=72.819, loss_interctc_layer21=102.634, loss=90.111, backward_time=0.209, grad_norm=93.575, clip=100.000, loss_scale=1.014e+31, optim_step_time=0.138, optim0_lr0=6.346e-05, train_time=1.384 +[gpua006:0/64] 2024-02-14 09:08:02,602 (multiple_iter_factory:32) INFO: Building 9th iter-factory... +[gpua006:0/64] 2024-02-14 09:08:21,344 (s2t:401) INFO: Optional Data Names: ('text_prev', 'text_ctc', 'text_spk2', 'text_spk3', 'text_spk4') +[gpua006:0/64] 2024-02-14 09:08:25,025 (abs_task:1660) INFO: [train] dataset: +ESPnetDataset( + speech: {"path": "exp/s2t_stats_raw_bpe50000/splits12/wav.scp/split.6", "type": "kaldi_ark"} + text_prev: {"path": "exp/s2t_stats_raw_bpe50000/splits12/text.prev/split.6", "type": "text"} + text_ctc: {"path": "exp/s2t_stats_raw_bpe50000/splits12/text.ctc/split.6", "type": "text"} + text: {"path": "exp/s2t_stats_raw_bpe50000/splits12/text/split.6", "type": "text"} + preprocess: ) +[gpua006:0/64] 2024-02-14 09:08:25,025 (abs_task:1661) INFO: [train] Batch sampler: UnsortedBatchSampler(N-batch=19027, batch_size=256, key_file=exp/s2t_stats_raw_bpe50000/splits12/speech_shape/split.6, +[gpua006:0/64] 2024-02-14 09:08:25,028 (abs_task:1662) INFO: [train] mini-batch sizes summary: N-batch=19027, mean=256.0, min=256, max=257 +[gpua006:0/64] 2024-02-14 09:13:45,131 (trainer:756) INFO: 40epoch:train:11201-11300batch: iter_time=2.963, forward_time=0.162, loss_ctc=89.210, loss_interctc_layer6=89.874, loss_interctc_layer12=74.347, loss_interctc_layer15=68.075, loss_interctc_layer21=92.499, loss=82.801, backward_time=0.210, grad_norm=79.133, clip=100.000, loss_scale=1.521e+31, optim_step_time=0.138, optim0_lr0=6.345e-05, train_time=4.614 +[gpua006:0/64] 2024-02-14 09:15:19,242 (trainer:756) INFO: 40epoch:train:11301-11400batch: iter_time=8.386e-05, forward_time=0.143, loss_ctc=77.590, loss_interctc_layer6=79.170, loss_interctc_layer12=65.322, loss_interctc_layer15=59.793, loss_interctc_layer21=80.657, loss=72.506, backward_time=0.208, grad_norm=85.930, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.138, optim0_lr0=6.345e-05, train_time=0.941 +[gpua006:0/64] 2024-02-14 09:17:13,093 (trainer:756) INFO: 40epoch:train:11401-11500batch: iter_time=9.496e-05, forward_time=0.144, loss_ctc=78.212, loss_interctc_layer6=79.737, loss_interctc_layer12=65.570, loss_interctc_layer15=59.861, loss_interctc_layer21=81.057, loss=72.887, backward_time=0.209, grad_norm=74.782, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.138, optim0_lr0=6.344e-05, train_time=1.138 +[gpua006:0/64] 2024-02-14 09:19:23,387 (trainer:756) INFO: 40epoch:train:11501-11600batch: iter_time=1.052e-04, forward_time=0.144, loss_ctc=66.463, loss_interctc_layer6=76.198, loss_interctc_layer12=62.662, loss_interctc_layer15=57.268, loss_interctc_layer21=69.026, loss=66.323, backward_time=0.207, grad_norm=143.576, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.138, optim0_lr0=6.344e-05, train_time=1.303 +[gpua006:0/64] 2024-02-14 09:22:07,810 (trainer:756) INFO: 40epoch:train:11601-11700batch: iter_time=1.037e-04, forward_time=0.144, loss_ctc=83.112, loss_interctc_layer6=86.892, loss_interctc_layer12=71.519, loss_interctc_layer15=65.307, loss_interctc_layer21=86.230, loss=78.612, backward_time=0.205, grad_norm=147.324, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.138, optim0_lr0=6.343e-05, train_time=1.644 +[gpua006:0/64] 2024-02-14 09:24:33,805 (trainer:756) INFO: 40epoch:train:11701-11800batch: iter_time=1.015e-04, forward_time=0.194, loss_ctc=83.364, loss_interctc_layer6=89.280, loss_interctc_layer12=73.927, loss_interctc_layer15=67.830, loss_interctc_layer21=86.353, loss=80.151, backward_time=0.223, grad_norm=81.287, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.139, optim0_lr0=6.343e-05, train_time=1.460 +[gpua006:0/64] 2024-02-14 09:26:43,892 (trainer:756) INFO: 40epoch:train:11801-11900batch: iter_time=9.491e-05, forward_time=0.144, loss_ctc=82.574, loss_interctc_layer6=82.231, loss_interctc_layer12=67.869, loss_interctc_layer15=62.143, loss_interctc_layer21=85.820, loss=76.127, backward_time=0.207, grad_norm=72.830, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.139, optim0_lr0=6.342e-05, train_time=1.301 +[gpua006:0/64] 2024-02-14 09:28:53,179 (trainer:756) INFO: 40epoch:train:11901-12000batch: iter_time=9.894e-05, forward_time=0.144, loss_ctc=58.314, loss_interctc_layer6=66.821, loss_interctc_layer12=55.723, loss_interctc_layer15=51.269, loss_interctc_layer21=60.310, loss=58.487, backward_time=0.208, grad_norm=91.415, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.139, optim0_lr0=6.341e-05, train_time=1.293 +[gpua006:0/64] 2024-02-14 09:31:12,223 (trainer:756) INFO: 40epoch:train:12001-12100batch: iter_time=1.042e-04, forward_time=0.144, loss_ctc=75.225, loss_interctc_layer6=83.912, loss_interctc_layer12=69.253, loss_interctc_layer15=63.462, loss_interctc_layer21=78.173, loss=74.005, backward_time=0.208, grad_norm=74.436, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.139, optim0_lr0=6.341e-05, train_time=1.390 +[gpua006:0/64] 2024-02-14 09:33:31,160 (trainer:756) INFO: 40epoch:train:12101-12200batch: iter_time=1.041e-04, forward_time=0.173, loss_ctc=94.835, loss_interctc_layer6=92.398, loss_interctc_layer12=75.922, loss_interctc_layer15=69.374, loss_interctc_layer21=98.325, loss=86.171, backward_time=0.221, grad_norm=94.288, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.141, optim0_lr0=6.340e-05, train_time=1.389 +[gpua006:0/64] 2024-02-14 09:36:10,282 (trainer:756) INFO: 40epoch:train:12201-12300batch: iter_time=1.046e-04, forward_time=0.150, loss_ctc=69.703, loss_interctc_layer6=76.132, loss_interctc_layer12=63.306, loss_interctc_layer15=58.150, loss_interctc_layer21=72.173, loss=67.893, backward_time=0.219, grad_norm=67.276, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.140, optim0_lr0=6.340e-05, train_time=1.591 +[gpua006:0/64] 2024-02-14 09:38:03,980 (trainer:756) INFO: 40epoch:train:12301-12400batch: iter_time=1.037e-04, forward_time=0.145, loss_ctc=80.246, loss_interctc_layer6=84.126, loss_interctc_layer12=69.582, loss_interctc_layer15=63.699, loss_interctc_layer21=83.494, loss=76.230, backward_time=0.209, grad_norm=74.566, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.139, optim0_lr0=6.339e-05, train_time=1.137 +[gpua006:0/64] 2024-02-14 09:39:48,078 (trainer:756) INFO: 40epoch:train:12401-12500batch: iter_time=1.010e-04, forward_time=0.145, loss_ctc=98.637, loss_interctc_layer6=94.892, loss_interctc_layer12=78.260, loss_interctc_layer15=71.445, loss_interctc_layer21=102.263, loss=89.100, backward_time=0.209, grad_norm=89.436, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.139, optim0_lr0=6.339e-05, train_time=1.041 +[gpua006:0/64] 2024-02-14 09:40:08,214 (multiple_iter_factory:32) INFO: Building 10th iter-factory... +[gpua006:0/64] 2024-02-14 09:40:26,802 (s2t:401) INFO: Optional Data Names: ('text_prev', 'text_ctc', 'text_spk2', 'text_spk3', 'text_spk4') +[gpua006:0/64] 2024-02-14 09:40:30,199 (abs_task:1660) INFO: [train] dataset: +ESPnetDataset( + speech: {"path": "exp/s2t_stats_raw_bpe50000/splits12/wav.scp/split.2", "type": "kaldi_ark"} + text_prev: {"path": "exp/s2t_stats_raw_bpe50000/splits12/text.prev/split.2", "type": "text"} + text_ctc: {"path": "exp/s2t_stats_raw_bpe50000/splits12/text.ctc/split.2", "type": "text"} + text: {"path": "exp/s2t_stats_raw_bpe50000/splits12/text/split.2", "type": "text"} + preprocess: ) +[gpua006:0/64] 2024-02-14 09:40:30,199 (abs_task:1661) INFO: [train] Batch sampler: UnsortedBatchSampler(N-batch=19027, batch_size=256, key_file=exp/s2t_stats_raw_bpe50000/splits12/speech_shape/split.2, +[gpua006:0/64] 2024-02-14 09:40:30,204 (abs_task:1662) INFO: [train] mini-batch sizes summary: N-batch=19027, mean=256.0, min=256, max=257 +[gpua006:0/64] 2024-02-14 09:46:49,446 (trainer:756) INFO: 40epoch:train:12501-12600batch: iter_time=2.912, forward_time=0.168, loss_ctc=79.568, loss_interctc_layer6=81.523, loss_interctc_layer12=67.103, loss_interctc_layer15=61.341, loss_interctc_layer21=82.660, loss=74.439, backward_time=0.216, grad_norm=87.780, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.140, optim0_lr0=6.338e-05, train_time=4.214 +[gpua006:0/64] 2024-02-14 09:48:40,343 (trainer:756) INFO: 40epoch:train:12601-12700batch: iter_time=9.075e-05, forward_time=0.143, loss_ctc=85.146, loss_interctc_layer6=87.256, loss_interctc_layer12=71.964, loss_interctc_layer15=65.896, loss_interctc_layer21=88.296, loss=79.712, backward_time=0.208, grad_norm=82.148, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.138, optim0_lr0=6.338e-05, train_time=1.109 +[gpua006:0/64] 2024-02-14 09:50:31,693 (trainer:756) INFO: 40epoch:train:12701-12800batch: iter_time=9.951e-05, forward_time=0.143, loss_ctc=68.531, loss_interctc_layer6=74.603, loss_interctc_layer12=61.440, loss_interctc_layer15=56.048, loss_interctc_layer21=71.076, loss=66.340, backward_time=0.207, grad_norm=68.968, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.138, optim0_lr0=6.337e-05, train_time=1.113 +[gpua006:0/64] 2024-02-14 09:52:42,600 (trainer:756) INFO: 40epoch:train:12801-12900batch: iter_time=8.733e-05, forward_time=0.180, loss_ctc=75.141, loss_interctc_layer6=82.029, loss_interctc_layer12=67.107, loss_interctc_layer15=61.162, loss_interctc_layer21=77.927, loss=72.673, backward_time=0.215, grad_norm=93.004, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.139, optim0_lr0=6.337e-05, train_time=1.309 +[gpua006:0/64] 2024-02-14 09:54:54,441 (trainer:756) INFO: 40epoch:train:12901-13000batch: iter_time=8.331e-05, forward_time=0.149, loss_ctc=79.046, loss_interctc_layer6=82.573, loss_interctc_layer12=68.391, loss_interctc_layer15=62.620, loss_interctc_layer21=82.048, loss=74.936, backward_time=0.214, grad_norm=86.703, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.138, optim0_lr0=6.336e-05, train_time=1.318 +[gpua006:0/64] 2024-02-14 09:57:23,142 (trainer:756) INFO: 40epoch:train:13001-13100batch: iter_time=8.225e-05, forward_time=0.144, loss_ctc=89.884, loss_interctc_layer6=91.225, loss_interctc_layer12=75.563, loss_interctc_layer15=69.259, loss_interctc_layer21=93.296, loss=83.846, backward_time=0.208, grad_norm=76.269, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.138, optim0_lr0=6.336e-05, train_time=1.487 +[gpua006:0/64] 2024-02-14 10:00:02,172 (trainer:756) INFO: 40epoch:train:13101-13200batch: iter_time=8.395e-05, forward_time=0.142, loss_ctc=67.541, loss_interctc_layer6=71.949, loss_interctc_layer12=59.758, loss_interctc_layer15=54.935, loss_interctc_layer21=69.928, loss=64.822, backward_time=0.207, grad_norm=74.495, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.138, optim0_lr0=6.335e-05, train_time=1.590 +[gpua006:0/64] 2024-02-14 10:02:21,894 (trainer:756) INFO: 40epoch:train:13201-13300batch: iter_time=7.917e-05, forward_time=0.142, loss_ctc=66.150, loss_interctc_layer6=77.219, loss_interctc_layer12=63.856, loss_interctc_layer15=58.577, loss_interctc_layer21=68.510, loss=66.862, backward_time=0.208, grad_norm=86.707, clip=100.000, loss_scale=3.042e+31, optim_step_time=0.138, optim0_lr0=6.335e-05, train_time=1.397 +[gpua006:0/64] 2024-02-14 10:04:26,346 (trainer:756) INFO: 40epoch:train:13301-13400batch: iter_time=8.537e-05, forward_time=0.154, loss_ctc=86.698, loss_interctc_layer6=91.207, loss_interctc_layer12=75.003, loss_interctc_layer15=68.424, loss_interctc_layer21=90.077, loss=82.282, backward_time=0.240, grad_norm=110.407, clip=100.000, loss_scale=4.056e+31, optim_step_time=0.139, optim0_lr0=6.334e-05, train_time=1.244 +[gpua006:0/64] 2024-02-14 10:06:52,206 (trainer:756) INFO: 40epoch:train:13401-13500batch: iter_time=8.414e-05, forward_time=0.148, loss_ctc=75.422, loss_interctc_layer6=80.440, loss_interctc_layer12=66.760, loss_interctc_layer15=61.380, loss_interctc_layer21=78.084, loss=72.417, backward_time=0.209, grad_norm=81.925, clip=100.000, loss_scale=4.056e+31, optim_step_time=0.138, optim0_lr0=6.334e-05, train_time=1.459 +[gpua006:0/64] 2024-02-14 10:08:46,301 (trainer:756) INFO: 40epoch:train:13501-13600batch: iter_time=8.459e-05, forward_time=0.142, loss_ctc=73.434, loss_interctc_layer6=71.727, loss_interctc_layer12=59.238, loss_interctc_layer15=54.158, loss_interctc_layer21=76.230, loss=66.957, backward_time=0.208, grad_norm=87.265, clip=100.000, loss_scale=4.056e+31, optim_step_time=0.138, optim0_lr0=6.333e-05, train_time=1.141 +[gpua006:0/64] 2024-02-14 10:11:12,706 (trainer:756) INFO: 40epoch:train:13601-13700batch: iter_time=8.282e-05, forward_time=0.143, loss_ctc=98.951, loss_interctc_layer6=97.004, loss_interctc_layer12=79.943, loss_interctc_layer15=73.089, loss_interctc_layer21=102.950, loss=90.388, backward_time=0.208, grad_norm=85.739, clip=100.000, loss_scale=4.056e+31, optim_step_time=0.138, optim0_lr0=6.332e-05, train_time=1.464 +[gpua006:0/64] 2024-02-14 10:12:35,419 (multiple_iter_factory:32) INFO: Building 11th iter-factory... +[gpua006:0/64] 2024-02-14 10:12:54,417 (s2t:401) INFO: Optional Data Names: ('text_prev', 'text_ctc', 'text_spk2', 'text_spk3', 'text_spk4') +[gpua006:0/64] 2024-02-14 10:12:57,843 (abs_task:1660) INFO: [train] dataset: +ESPnetDataset( + speech: {"path": "exp/s2t_stats_raw_bpe50000/splits12/wav.scp/split.5", "type": "kaldi_ark"} + text_prev: {"path": "exp/s2t_stats_raw_bpe50000/splits12/text.prev/split.5", "type": "text"} + text_ctc: {"path": "exp/s2t_stats_raw_bpe50000/splits12/text.ctc/split.5", "type": "text"} + text: {"path": "exp/s2t_stats_raw_bpe50000/splits12/text/split.5", "type": "text"} + preprocess: ) +[gpua006:0/64] 2024-02-14 10:12:57,843 (abs_task:1661) INFO: [train] Batch sampler: UnsortedBatchSampler(N-batch=19027, batch_size=256, key_file=exp/s2t_stats_raw_bpe50000/splits12/speech_shape/split.5, +[gpua006:0/64] 2024-02-14 10:12:57,851 (abs_task:1662) INFO: [train] mini-batch sizes summary: N-batch=19027, mean=256.0, min=256, max=257 +[gpua006:0/64] 2024-02-14 10:18:02,733 (trainer:756) INFO: 40epoch:train:13701-13800batch: iter_time=2.803, forward_time=0.170, loss_ctc=86.257, loss_interctc_layer6=89.516, loss_interctc_layer12=73.868, loss_interctc_layer15=67.474, loss_interctc_layer21=89.652, loss=81.353, backward_time=0.213, grad_norm=79.858, clip=100.000, loss_scale=4.056e+31, optim_step_time=0.138, optim0_lr0=6.332e-05, train_time=4.099 +[gpua006:0/64] 2024-02-14 10:19:36,954 (trainer:756) INFO: 40epoch:train:13801-13900batch: iter_time=9.152e-05, forward_time=0.143, loss_ctc=72.417, loss_interctc_layer6=79.045, loss_interctc_layer12=65.253, loss_interctc_layer15=59.716, loss_interctc_layer21=75.308, loss=70.348, backward_time=0.208, grad_norm=73.801, clip=100.000, loss_scale=4.056e+31, optim_step_time=0.137, optim0_lr0=6.331e-05, train_time=0.943 +[gpua006:0/64] 2024-02-14 10:21:44,818 (trainer:756) INFO: 40epoch:train:13901-14000batch: iter_time=8.867e-05, forward_time=0.142, loss_ctc=75.848, loss_interctc_layer6=79.667, loss_interctc_layer12=65.450, loss_interctc_layer15=59.660, loss_interctc_layer21=78.631, loss=71.851, backward_time=0.207, grad_norm=83.151, clip=100.000, loss_scale=4.056e+31, optim_step_time=0.137, optim0_lr0=6.331e-05, train_time=1.278 +[gpua006:0/64] 2024-02-14 10:23:47,522 (trainer:756) INFO: 40epoch:train:14001-14100batch: iter_time=9.321e-05, forward_time=0.142, loss_ctc=63.522, loss_interctc_layer6=75.841, loss_interctc_layer12=62.337, loss_interctc_layer15=57.025, loss_interctc_layer21=65.855, loss=64.916, backward_time=0.207, grad_norm=91.374, clip=100.000, loss_scale=4.056e+31, optim_step_time=0.137, optim0_lr0=6.330e-05, train_time=1.227 +[gpua006:0/64] 2024-02-14 10:25:47,501 (trainer:756) INFO: 40epoch:train:14101-14200batch: iter_time=9.594e-05, forward_time=0.143, loss_ctc=77.027, loss_interctc_layer6=86.440, loss_interctc_layer12=71.074, loss_interctc_layer15=64.899, loss_interctc_layer21=79.881, loss=75.864, backward_time=0.207, grad_norm=85.741, clip=100.000, loss_scale=4.056e+31, optim_step_time=0.137, optim0_lr0=6.330e-05, train_time=1.200 +[gpua006:0/64] 2024-02-14 10:28:47,332 (trainer:756) INFO: 40epoch:train:14201-14300batch: iter_time=9.389e-05, forward_time=0.170, loss_ctc=80.121, loss_interctc_layer6=88.864, loss_interctc_layer12=73.719, loss_interctc_layer15=67.615, loss_interctc_layer21=83.093, loss=78.683, backward_time=0.248, grad_norm=76.054, clip=100.000, loss_scale=4.056e+31, optim_step_time=0.140, optim0_lr0=6.329e-05, train_time=1.798 +[gpua006:0/64] 2024-02-14 10:31:53,183 (trainer:756) INFO: 40epoch:train:14301-14400batch: iter_time=1.024e-04, forward_time=0.143, loss_ctc=77.836, loss_interctc_layer6=81.341, loss_interctc_layer12=67.112, loss_interctc_layer15=61.442, loss_interctc_layer21=80.970, loss=73.740, backward_time=0.208, grad_norm=73.146, clip=100.000, loss_scale=4.056e+31, optim_step_time=0.138, optim0_lr0=6.329e-05, train_time=1.859 +[gpua006:0/64] 2024-02-14 10:34:10,348 (trainer:756) INFO: 40epoch:train:14401-14500batch: iter_time=9.839e-05, forward_time=0.143, loss_ctc=54.230, loss_interctc_layer6=66.151, loss_interctc_layer12=55.049, loss_interctc_layer15=50.670, loss_interctc_layer21=56.153, loss=56.450, backward_time=0.206, grad_norm=104.695, clip=100.000, loss_scale=4.056e+31, optim_step_time=0.137, optim0_lr0=6.328e-05, train_time=1.371 +[gpua006:0/64] 2024-02-14 10:36:34,899 (trainer:756) INFO: 40epoch:train:14501-14600batch: iter_time=9.439e-05, forward_time=0.144, loss_ctc=70.136, loss_interctc_layer6=83.419, loss_interctc_layer12=68.748, loss_interctc_layer15=62.928, loss_interctc_layer21=73.094, loss=71.665, backward_time=0.207, grad_norm=76.277, clip=100.000, loss_scale=4.056e+31, optim_step_time=0.138, optim0_lr0=6.328e-05, train_time=1.445 +[gpua006:0/64] 2024-02-14 10:38:31,301 (trainer:756) INFO: 40epoch:train:14601-14700batch: iter_time=1.002e-04, forward_time=0.150, loss_ctc=92.229, loss_interctc_layer6=92.143, loss_interctc_layer12=75.714, loss_interctc_layer15=69.167, loss_interctc_layer21=95.881, loss=85.027, backward_time=0.216, grad_norm=73.105, clip=100.000, loss_scale=4.056e+31, optim_step_time=0.137, optim0_lr0=6.327e-05, train_time=1.164 +[gpua006:0/64] 2024-02-14 10:40:49,485 (trainer:756) INFO: 40epoch:train:14701-14800batch: iter_time=1.011e-04, forward_time=0.160, loss_ctc=66.348, loss_interctc_layer6=75.972, loss_interctc_layer12=63.081, loss_interctc_layer15=58.033, loss_interctc_layer21=68.702, loss=66.427, backward_time=0.229, grad_norm=75.625, clip=100.000, loss_scale=4.056e+31, optim_step_time=0.150, optim0_lr0=6.327e-05, train_time=1.381 +[gpua006:0/64] 2024-02-14 10:43:06,579 (trainer:756) INFO: 40epoch:train:14801-14900batch: iter_time=9.963e-05, forward_time=0.142, loss_ctc=74.845, loss_interctc_layer6=84.434, loss_interctc_layer12=69.804, loss_interctc_layer15=63.797, loss_interctc_layer21=77.777, loss=74.132, backward_time=0.206, grad_norm=77.110, clip=100.000, loss_scale=4.056e+31, optim_step_time=0.137, optim0_lr0=6.326e-05, train_time=1.371 +[gpua006:0/64] 2024-02-14 10:45:39,366 (trainer:756) INFO: 40epoch:train:14901-15000batch: iter_time=9.416e-05, forward_time=0.143, loss_ctc=93.142, loss_interctc_layer6=94.184, loss_interctc_layer12=77.553, loss_interctc_layer15=70.792, loss_interctc_layer21=96.635, loss=86.461, backward_time=0.207, grad_norm=83.726, clip=100.000, loss_scale=4.056e+31, optim_step_time=0.137, optim0_lr0=6.326e-05, train_time=1.528 +[gpua006:0/64] 2024-02-14 11:18:28,909 (trainer:355) INFO: 40epoch results: [train] iter_time=0.226, forward_time=0.152, loss_ctc=77.822, loss_interctc_layer6=83.312, loss_interctc_layer12=68.868, loss_interctc_layer15=63.091, loss_interctc_layer21=80.754, loss=74.769, backward_time=0.212, grad_norm=85.675, clip=100.000, loss_scale=2.601e+31, optim_step_time=0.139, optim0_lr0=6.365e-05, train_time=1.480, time=6 hours, 10 minutes and 34.17 seconds, total_count=600000, gpu_max_cached_mem_GB=33.436, [valid] loss_ctc=40.502, cer_ctc=0.188, loss_interctc_layer6=45.827, cer_interctc_layer6=0.205, loss_interctc_layer12=33.369, cer_interctc_layer12=0.139, loss_interctc_layer15=29.220, cer_interctc_layer15=0.115, loss_interctc_layer21=42.853, cer_interctc_layer21=0.200, loss=38.354, time=32 minutes and 25.5 seconds, total_count=186840, gpu_max_cached_mem_GB=33.436 +[gpua006:0/64] 2024-02-14 11:18:49,215 (trainer:410) INFO: The best model has been updated: valid.cer_ctc, valid.total_count +[gpua006:0/64] 2024-02-14 11:18:49,273 (average_nbest_models:69) INFO: Averaging 5best models: criterion="valid.cer_ctc": exp/s2t_train_s2t_multitask-ctc_ebf27_conv2d8_size1024_raw_bpe50000/valid.cer_ctc.ave_5best.till40epoch.pth +[gpua006:0/64] 2024-02-14 11:19:38,202 (average_nbest_models:69) INFO: Averaging 5best models: criterion="valid.loss_ctc": exp/s2t_train_s2t_multitask-ctc_ebf27_conv2d8_size1024_raw_bpe50000/valid.loss_ctc.ave_5best.till40epoch.pth +[gpua006:0/64] 2024-02-14 11:20:06,662 (average_nbest_models:69) INFO: Averaging 5best models: criterion="valid.total_count": exp/s2t_train_s2t_multitask-ctc_ebf27_conv2d8_size1024_raw_bpe50000/valid.total_count.ave_5best.till40epoch.pth +[gpua006:0/64] 2024-02-14 11:20:28,917 (trainer:289) INFO: 41/45epoch started. Estimated time to finish: 1 day, 11 hours and 18 minutes +[gpua006:0/64] 2024-02-14 11:20:29,842 (multiple_iter_factory:32) INFO: Building 0th iter-factory... +[gpua006:0/64] 2024-02-14 11:20:48,581 (s2t:401) INFO: Optional Data Names: ('text_prev', 'text_ctc', 'text_spk2', 'text_spk3', 'text_spk4') +[gpua006:0/64] 2024-02-14 11:20:52,228 (abs_task:1660) INFO: [train] dataset: +ESPnetDataset( + speech: {"path": "exp/s2t_stats_raw_bpe50000/splits12/wav.scp/split.6", "type": "kaldi_ark"} + text_prev: {"path": "exp/s2t_stats_raw_bpe50000/splits12/text.prev/split.6", "type": "text"} + text_ctc: {"path": "exp/s2t_stats_raw_bpe50000/splits12/text.ctc/split.6", "type": "text"} + text: {"path": "exp/s2t_stats_raw_bpe50000/splits12/text/split.6", "type": "text"} + preprocess: ) +[gpua006:0/64] 2024-02-14 11:20:52,228 (abs_task:1661) INFO: [train] Batch sampler: UnsortedBatchSampler(N-batch=19027, batch_size=256, key_file=exp/s2t_stats_raw_bpe50000/splits12/speech_shape/split.6, +[gpua006:0/64] 2024-02-14 11:20:52,285 (abs_task:1662) INFO: [train] mini-batch sizes summary: N-batch=19027, mean=256.0, min=256, max=257 +[gpua006:0/64] 2024-02-14 11:28:06,485 (trainer:756) INFO: 41epoch:train:1-100batch: iter_time=3.550, forward_time=0.188, loss_ctc=63.944, loss_interctc_layer6=69.443, loss_interctc_layer12=56.983, loss_interctc_layer15=52.043, loss_interctc_layer21=66.350, loss=61.753, backward_time=0.220, grad_norm=103.425, clip=100.000, loss_scale=4.056e+31, optim_step_time=0.139, optim0_lr0=6.325e-05, train_time=4.566 +[gpua006:0/64] 2024-02-14 11:29:41,646 (trainer:756) INFO: 41epoch:train:101-200batch: iter_time=9.499e-05, forward_time=0.141, loss_ctc=90.177, loss_interctc_layer6=82.215, loss_interctc_layer12=68.708, loss_interctc_layer15=63.250, loss_interctc_layer21=93.828, loss=79.636, backward_time=0.207, grad_norm=87.014, clip=100.000, loss_scale=4.056e+31, optim_step_time=0.138, optim0_lr0=6.325e-05, train_time=0.952 +[gpua006:0/64] 2024-02-14 11:33:22,524 (trainer:756) INFO: 41epoch:train:201-300batch: iter_time=1.060e-04, forward_time=0.143, loss_ctc=90.810, loss_interctc_layer6=94.176, loss_interctc_layer12=77.748, loss_interctc_layer15=71.213, loss_interctc_layer21=94.049, loss=85.599, backward_time=0.207, grad_norm=90.295, clip=100.000, loss_scale=6.085e+31, optim_step_time=0.138, optim0_lr0=6.324e-05, train_time=2.209 +[gpua006:0/64] 2024-02-14 11:35:16,660 (trainer:756) INFO: 41epoch:train:301-400batch: iter_time=9.991e-05, forward_time=0.143, loss_ctc=84.456, loss_interctc_layer6=96.097, loss_interctc_layer12=79.601, loss_interctc_layer15=73.047, loss_interctc_layer21=87.421, loss=84.125, backward_time=0.206, grad_norm=106.347, clip=100.000, loss_scale=8.113e+31, optim_step_time=0.137, optim0_lr0=6.323e-05, train_time=1.141 +[gpua006:0/64] 2024-02-14 11:38:00,464 (trainer:756) INFO: 41epoch:train:401-500batch: iter_time=1.139e-04, forward_time=0.142, loss_ctc=81.527, loss_interctc_layer6=81.818, loss_interctc_layer12=67.406, loss_interctc_layer15=61.614, loss_interctc_layer21=84.521, loss=75.377, backward_time=0.207, grad_norm=91.441, clip=100.000, loss_scale=8.113e+31, optim_step_time=0.138, optim0_lr0=6.323e-05, train_time=1.638 +[gpua006:0/64] 2024-02-14 11:39:47,442 (trainer:756) INFO: 41epoch:train:501-600batch: iter_time=9.144e-05, forward_time=0.141, loss_ctc=74.888, loss_interctc_layer6=72.851, loss_interctc_layer12=60.795, loss_interctc_layer15=55.716, loss_interctc_layer21=77.629, loss=68.376, backward_time=0.206, grad_norm=78.161, clip=100.000, loss_scale=8.113e+31, optim_step_time=0.137, optim0_lr0=6.322e-05, train_time=1.070 +[gpua006:0/64] 2024-02-14 11:43:11,341 (trainer:756) INFO: 41epoch:train:601-700batch: iter_time=9.833e-05, forward_time=0.142, loss_ctc=76.138, loss_interctc_layer6=77.265, loss_interctc_layer12=63.570, loss_interctc_layer15=58.134, loss_interctc_layer21=78.879, loss=70.797, backward_time=0.210, grad_norm=71.625, clip=100.000, loss_scale=8.113e+31, optim_step_time=0.137, optim0_lr0=6.322e-05, train_time=2.039 +[gpua006:0/64] 2024-02-14 11:45:28,481 (trainer:756) INFO: 41epoch:train:701-800batch: iter_time=9.463e-05, forward_time=0.142, loss_ctc=81.508, loss_interctc_layer6=85.744, loss_interctc_layer12=71.569, loss_interctc_layer15=66.071, loss_interctc_layer21=84.708, loss=77.920, backward_time=0.205, grad_norm=120.593, clip=100.000, loss_scale=8.113e+31, optim_step_time=0.138, optim0_lr0=6.321e-05, train_time=1.371 +[gpua006:0/64] 2024-02-14 11:47:49,736 (trainer:756) INFO: 41epoch:train:801-900batch: iter_time=1.036e-04, forward_time=0.201, loss_ctc=81.904, loss_interctc_layer6=88.720, loss_interctc_layer12=74.138, loss_interctc_layer15=68.228, loss_interctc_layer21=84.913, loss=79.581, backward_time=0.213, grad_norm=85.087, clip=100.000, loss_scale=8.113e+31, optim_step_time=0.141, optim0_lr0=6.321e-05, train_time=1.412 +[gpua006:0/64] 2024-02-14 11:50:21,142 (trainer:756) INFO: 41epoch:train:901-1000batch: iter_time=2.568e-04, forward_time=0.179, loss_ctc=76.230, loss_interctc_layer6=81.154, loss_interctc_layer12=67.135, loss_interctc_layer15=61.549, loss_interctc_layer21=79.031, loss=73.020, backward_time=0.221, grad_norm=77.320, clip=100.000, loss_scale=8.113e+31, optim_step_time=0.142, optim0_lr0=6.320e-05, train_time=1.515 +[gpua006:0/64] 2024-02-14 11:52:57,943 (trainer:756) INFO: 41epoch:train:1001-1100batch: iter_time=1.087e-04, forward_time=0.193, loss_ctc=66.109, loss_interctc_layer6=73.142, loss_interctc_layer12=60.146, loss_interctc_layer15=54.927, loss_interctc_layer21=68.597, loss=64.584, backward_time=0.259, grad_norm=68.310, clip=100.000, loss_scale=8.113e+31, optim_step_time=0.142, optim0_lr0=6.320e-05, train_time=1.568 +[gpua006:0/64] 2024-02-14 11:55:15,554 (trainer:756) INFO: 41epoch:train:1101-1200batch: iter_time=1.142e-04, forward_time=0.145, loss_ctc=78.862, loss_interctc_layer6=83.287, loss_interctc_layer12=70.080, loss_interctc_layer15=64.986, loss_interctc_layer21=81.445, loss=75.732, backward_time=0.207, grad_norm=90.393, clip=100.000, loss_scale=8.113e+31, optim_step_time=0.139, optim0_lr0=6.319e-05, train_time=1.376 +[gpua006:0/64] 2024-02-14 11:56:31,130 (multiple_iter_factory:32) INFO: Building 1th iter-factory... +[gpua006:0/64] 2024-02-14 11:56:49,917 (s2t:401) INFO: Optional Data Names: ('text_prev', 'text_ctc', 'text_spk2', 'text_spk3', 'text_spk4') +[gpua006:0/64] 2024-02-14 11:56:53,372 (abs_task:1660) INFO: [train] dataset: +ESPnetDataset( + speech: {"path": "exp/s2t_stats_raw_bpe50000/splits12/wav.scp/split.5", "type": "kaldi_ark"} + text_prev: {"path": "exp/s2t_stats_raw_bpe50000/splits12/text.prev/split.5", "type": "text"} + text_ctc: {"path": "exp/s2t_stats_raw_bpe50000/splits12/text.ctc/split.5", "type": "text"} + text: {"path": "exp/s2t_stats_raw_bpe50000/splits12/text/split.5", "type": "text"} + preprocess: ) +[gpua006:0/64] 2024-02-14 11:56:53,372 (abs_task:1661) INFO: [train] Batch sampler: UnsortedBatchSampler(N-batch=19027, batch_size=256, key_file=exp/s2t_stats_raw_bpe50000/splits12/speech_shape/split.5, +[gpua006:0/64] 2024-02-14 11:56:53,384 (abs_task:1662) INFO: [train] mini-batch sizes summary: N-batch=19027, mean=256.0, min=256, max=257 +[gpua006:0/64] 2024-02-14 12:02:48,972 (trainer:756) INFO: 41epoch:train:1201-1300batch: iter_time=3.340, forward_time=0.143, loss_ctc=61.775, loss_interctc_layer6=67.923, loss_interctc_layer12=55.870, loss_interctc_layer15=51.097, loss_interctc_layer21=63.976, loss=60.128, backward_time=0.209, grad_norm=70.221, clip=100.000, loss_scale=8.113e+31, optim_step_time=0.138, optim0_lr0=6.319e-05, train_time=4.534 +[gpua006:0/64] 2024-02-14 12:04:26,731 (trainer:756) INFO: 41epoch:train:1301-1400batch: iter_time=9.777e-05, forward_time=0.142, loss_ctc=72.373, loss_interctc_layer6=72.531, loss_interctc_layer12=60.188, loss_interctc_layer15=55.419, loss_interctc_layer21=75.444, loss=67.191, backward_time=0.208, grad_norm=73.009, clip=100.000, loss_scale=8.113e+31, optim_step_time=0.137, optim0_lr0=6.318e-05, train_time=0.977 +[gpua006:0/64] 2024-02-14 12:07:00,176 (trainer:756) INFO: 41epoch:train:1401-1500batch: iter_time=1.002e-04, forward_time=0.142, loss_ctc=83.493, loss_interctc_layer6=86.075, loss_interctc_layer12=71.234, loss_interctc_layer15=65.412, loss_interctc_layer21=86.631, loss=78.569, backward_time=0.206, grad_norm=79.820, clip=100.000, loss_scale=8.113e+31, optim_step_time=0.137, optim0_lr0=6.318e-05, train_time=1.534 +[gpua006:0/64] 2024-02-14 12:08:33,297 (trainer:687) WARNING: The grad norm is nan. Skipping updating the model. +[gpua006:0/64] 2024-02-14 12:09:13,237 (trainer:756) INFO: 41epoch:train:1501-1600batch: iter_time=8.925e-05, forward_time=0.144, loss_ctc=73.429, loss_interctc_layer6=92.693, loss_interctc_layer12=76.269, loss_interctc_layer15=69.699, loss_interctc_layer21=76.058, loss=77.630, backward_time=0.209, grad_norm=84.785, clip=100.000, loss_scale=7.212e+31, optim_step_time=0.138, optim0_lr0=6.317e-05, train_time=1.330 +[gpua006:0/64] 2024-02-14 12:11:27,903 (trainer:756) INFO: 41epoch:train:1601-1700batch: iter_time=8.585e-05, forward_time=0.146, loss_ctc=88.235, loss_interctc_layer6=94.373, loss_interctc_layer12=78.152, loss_interctc_layer15=71.770, loss_interctc_layer21=91.711, loss=84.848, backward_time=0.210, grad_norm=88.426, clip=100.000, loss_scale=4.056e+31, optim_step_time=0.138, optim0_lr0=6.317e-05, train_time=1.347 +[gpua006:0/64] 2024-02-14 12:13:31,743 (trainer:756) INFO: 41epoch:train:1701-1800batch: iter_time=8.110e-05, forward_time=0.144, loss_ctc=75.656, loss_interctc_layer6=81.839, loss_interctc_layer12=67.918, loss_interctc_layer15=62.287, loss_interctc_layer21=78.572, loss=73.254, backward_time=0.209, grad_norm=77.035, clip=100.000, loss_scale=4.056e+31, optim_step_time=0.140, optim0_lr0=6.316e-05, train_time=1.238 +[gpua006:0/64] 2024-02-14 12:14:27,610 (trainer:687) WARNING: The grad norm is nan. Skipping updating the model. +[gpua006:0/64] 2024-02-14 12:15:20,217 (trainer:756) INFO: 41epoch:train:1801-1900batch: iter_time=8.446e-05, forward_time=0.143, loss_ctc=74.274, loss_interctc_layer6=73.382, loss_interctc_layer12=60.160, loss_interctc_layer15=54.804, loss_interctc_layer21=77.052, loss=67.935, backward_time=0.209, grad_norm=85.583, clip=100.000, loss_scale=3.073e+31, optim_step_time=0.138, optim0_lr0=6.316e-05, train_time=1.084 +[gpua006:0/64] 2024-02-14 12:17:53,968 (trainer:756) INFO: 41epoch:train:1901-2000batch: iter_time=8.318e-05, forward_time=0.167, loss_ctc=64.025, loss_interctc_layer6=76.632, loss_interctc_layer12=63.883, loss_interctc_layer15=58.777, loss_interctc_layer21=66.440, loss=65.951, backward_time=0.226, grad_norm=128.449, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.145, optim0_lr0=6.315e-05, train_time=1.538 +[gpua006:0/64] 2024-02-14 12:19:54,289 (trainer:756) INFO: 41epoch:train:2001-2100batch: iter_time=8.540e-05, forward_time=0.164, loss_ctc=72.175, loss_interctc_layer6=84.082, loss_interctc_layer12=69.846, loss_interctc_layer15=64.105, loss_interctc_layer21=74.849, loss=73.012, backward_time=0.210, grad_norm=104.675, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.139, optim0_lr0=6.315e-05, train_time=1.203 +[gpua006:0/64] 2024-02-14 12:22:43,119 (trainer:756) INFO: 41epoch:train:2101-2200batch: iter_time=8.088e-05, forward_time=0.191, loss_ctc=78.980, loss_interctc_layer6=91.801, loss_interctc_layer12=76.319, loss_interctc_layer15=70.126, loss_interctc_layer21=81.774, loss=79.800, backward_time=0.228, grad_norm=92.308, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.140, optim0_lr0=6.314e-05, train_time=1.688 +[gpua006:0/64] 2024-02-14 12:25:29,573 (trainer:756) INFO: 41epoch:train:2201-2300batch: iter_time=8.200e-05, forward_time=0.143, loss_ctc=71.239, loss_interctc_layer6=72.600, loss_interctc_layer12=59.591, loss_interctc_layer15=54.496, loss_interctc_layer21=74.131, loss=66.411, backward_time=0.208, grad_norm=73.784, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.138, optim0_lr0=6.314e-05, train_time=1.664 +[gpua006:0/64] 2024-02-14 12:27:16,616 (trainer:756) INFO: 41epoch:train:2301-2400batch: iter_time=8.067e-05, forward_time=0.142, loss_ctc=71.068, loss_interctc_layer6=82.632, loss_interctc_layer12=69.153, loss_interctc_layer15=63.951, loss_interctc_layer21=73.496, loss=72.060, backward_time=0.209, grad_norm=83.289, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.138, optim0_lr0=6.313e-05, train_time=1.070 +[gpua006:0/64] 2024-02-14 12:29:08,150 (trainer:756) INFO: 41epoch:train:2401-2500batch: iter_time=7.557e-05, forward_time=0.142, loss_ctc=60.018, loss_interctc_layer6=68.180, loss_interctc_layer12=56.276, loss_interctc_layer15=51.552, loss_interctc_layer21=62.166, loss=59.638, backward_time=0.208, grad_norm=63.408, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.138, optim0_lr0=6.312e-05, train_time=1.116 +[gpua006:0/64] 2024-02-14 12:29:28,181 (multiple_iter_factory:32) INFO: Building 2th iter-factory... +[gpua006:0/64] 2024-02-14 12:29:47,084 (s2t:401) INFO: Optional Data Names: ('text_prev', 'text_ctc', 'text_spk2', 'text_spk3', 'text_spk4') +[gpua006:0/64] 2024-02-14 12:29:50,503 (abs_task:1660) INFO: [train] dataset: +ESPnetDataset( + speech: {"path": "exp/s2t_stats_raw_bpe50000/splits12/wav.scp/split.11", "type": "kaldi_ark"} + text_prev: {"path": "exp/s2t_stats_raw_bpe50000/splits12/text.prev/split.11", "type": "text"} + text_ctc: {"path": "exp/s2t_stats_raw_bpe50000/splits12/text.ctc/split.11", "type": "text"} + text: {"path": "exp/s2t_stats_raw_bpe50000/splits12/text/split.11", "type": "text"} + preprocess: ) +[gpua006:0/64] 2024-02-14 12:29:50,503 (abs_task:1661) INFO: [train] Batch sampler: UnsortedBatchSampler(N-batch=19027, batch_size=256, key_file=exp/s2t_stats_raw_bpe50000/splits12/speech_shape/split.11, +[gpua006:0/64] 2024-02-14 12:29:50,507 (abs_task:1662) INFO: [train] mini-batch sizes summary: N-batch=19027, mean=256.0, min=256, max=257 +[gpua006:0/64] 2024-02-14 12:37:26,703 (trainer:756) INFO: 41epoch:train:2501-2600batch: iter_time=3.710, forward_time=0.143, loss_ctc=61.361, loss_interctc_layer6=68.911, loss_interctc_layer12=56.493, loss_interctc_layer15=51.639, loss_interctc_layer21=63.748, loss=60.431, backward_time=0.213, grad_norm=67.233, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.139, optim0_lr0=6.312e-05, train_time=4.985 +[gpua006:0/64] 2024-02-14 12:39:06,078 (trainer:756) INFO: 41epoch:train:2601-2700batch: iter_time=9.385e-05, forward_time=0.142, loss_ctc=84.254, loss_interctc_layer6=80.988, loss_interctc_layer12=67.475, loss_interctc_layer15=62.041, loss_interctc_layer21=87.588, loss=76.469, backward_time=0.208, grad_norm=81.537, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.138, optim0_lr0=6.311e-05, train_time=0.994 +[gpua006:0/64] 2024-02-14 12:41:31,403 (trainer:756) INFO: 41epoch:train:2701-2800batch: iter_time=9.441e-05, forward_time=0.143, loss_ctc=82.974, loss_interctc_layer6=93.987, loss_interctc_layer12=77.477, loss_interctc_layer15=70.877, loss_interctc_layer21=85.905, loss=82.244, backward_time=0.206, grad_norm=75.496, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.137, optim0_lr0=6.311e-05, train_time=1.453 +[gpua006:0/64] 2024-02-14 12:43:52,503 (trainer:756) INFO: 41epoch:train:2801-2900batch: iter_time=9.347e-05, forward_time=0.144, loss_ctc=78.680, loss_interctc_layer6=94.715, loss_interctc_layer12=78.283, loss_interctc_layer15=71.693, loss_interctc_layer21=81.389, loss=80.952, backward_time=0.207, grad_norm=72.028, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.139, optim0_lr0=6.310e-05, train_time=1.411 +[gpua006:0/64] 2024-02-14 12:46:22,191 (trainer:756) INFO: 41epoch:train:2901-3000batch: iter_time=1.015e-04, forward_time=0.142, loss_ctc=76.232, loss_interctc_layer6=81.852, loss_interctc_layer12=67.603, loss_interctc_layer15=61.780, loss_interctc_layer21=79.066, loss=73.307, backward_time=0.207, grad_norm=76.848, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.138, optim0_lr0=6.310e-05, train_time=1.497 +[gpua006:0/64] 2024-02-14 12:46:55,997 (trainer:687) WARNING: The grad norm is nan. Skipping updating the model. +[gpua006:0/64] 2024-02-14 12:49:58,819 (trainer:756) INFO: 41epoch:train:3001-3100batch: iter_time=9.656e-05, forward_time=0.192, loss_ctc=68.933, loss_interctc_layer6=71.822, loss_interctc_layer12=59.585, loss_interctc_layer15=54.589, loss_interctc_layer21=71.424, loss=65.271, backward_time=0.213, grad_norm=75.834, clip=100.000, loss_scale=1.158e+31, optim_step_time=0.142, optim0_lr0=6.309e-05, train_time=2.165 +[gpua006:0/64] 2024-02-14 12:52:16,609 (trainer:756) INFO: 41epoch:train:3101-3200batch: iter_time=8.768e-05, forward_time=0.152, loss_ctc=71.500, loss_interctc_layer6=77.131, loss_interctc_layer12=63.378, loss_interctc_layer15=57.882, loss_interctc_layer21=74.226, loss=68.824, backward_time=0.207, grad_norm=67.244, clip=100.000, loss_scale=1.014e+31, optim_step_time=0.137, optim0_lr0=6.309e-05, train_time=1.378 +[gpua006:0/64] 2024-02-14 12:54:49,188 (trainer:756) INFO: 41epoch:train:3201-3300batch: iter_time=8.661e-05, forward_time=0.236, loss_ctc=73.736, loss_interctc_layer6=84.460, loss_interctc_layer12=70.405, loss_interctc_layer15=64.803, loss_interctc_layer21=76.789, loss=74.039, backward_time=0.270, grad_norm=114.571, clip=100.000, loss_scale=1.014e+31, optim_step_time=0.142, optim0_lr0=6.308e-05, train_time=1.526 +[gpua006:0/64] 2024-02-14 12:56:55,211 (trainer:756) INFO: 41epoch:train:3301-3400batch: iter_time=9.385e-05, forward_time=0.144, loss_ctc=76.670, loss_interctc_layer6=88.404, loss_interctc_layer12=73.628, loss_interctc_layer15=67.631, loss_interctc_layer21=79.465, loss=77.160, backward_time=0.209, grad_norm=77.032, clip=100.000, loss_scale=1.014e+31, optim_step_time=0.138, optim0_lr0=6.308e-05, train_time=1.260 +[gpua006:0/64] 2024-02-14 12:59:14,675 (trainer:756) INFO: 41epoch:train:3401-3500batch: iter_time=8.985e-05, forward_time=0.142, loss_ctc=71.772, loss_interctc_layer6=80.738, loss_interctc_layer12=66.598, loss_interctc_layer15=60.884, loss_interctc_layer21=74.426, loss=70.884, backward_time=0.207, grad_norm=99.586, clip=100.000, loss_scale=1.014e+31, optim_step_time=0.137, optim0_lr0=6.307e-05, train_time=1.394 +[gpua006:0/64] 2024-02-14 13:01:28,821 (trainer:756) INFO: 41epoch:train:3501-3600batch: iter_time=1.013e-04, forward_time=0.143, loss_ctc=62.846, loss_interctc_layer6=72.544, loss_interctc_layer12=59.685, loss_interctc_layer15=54.409, loss_interctc_layer21=65.199, loss=62.936, backward_time=0.208, grad_norm=70.853, clip=100.000, loss_scale=1.014e+31, optim_step_time=0.138, optim0_lr0=6.307e-05, train_time=1.341 +[gpua006:0/64] 2024-02-14 13:03:48,405 (trainer:756) INFO: 41epoch:train:3601-3700batch: iter_time=9.342e-05, forward_time=0.144, loss_ctc=74.781, loss_interctc_layer6=81.588, loss_interctc_layer12=68.345, loss_interctc_layer15=63.357, loss_interctc_layer21=77.354, loss=73.085, backward_time=0.214, grad_norm=90.792, clip=100.000, loss_scale=1.014e+31, optim_step_time=0.138, optim0_lr0=6.306e-05, train_time=1.396 +[gpua006:0/64] 2024-02-14 13:05:40,970 (multiple_iter_factory:32) INFO: Building 3th iter-factory... +[gpua006:0/64] 2024-02-14 13:05:59,996 (s2t:401) INFO: Optional Data Names: ('text_prev', 'text_ctc', 'text_spk2', 'text_spk3', 'text_spk4') +[gpua006:0/64] 2024-02-14 13:06:03,690 (abs_task:1660) INFO: [train] dataset: +ESPnetDataset( + speech: {"path": "exp/s2t_stats_raw_bpe50000/splits12/wav.scp/split.1", "type": "kaldi_ark"} + text_prev: {"path": "exp/s2t_stats_raw_bpe50000/splits12/text.prev/split.1", "type": "text"} + text_ctc: {"path": "exp/s2t_stats_raw_bpe50000/splits12/text.ctc/split.1", "type": "text"} + text: {"path": "exp/s2t_stats_raw_bpe50000/splits12/text/split.1", "type": "text"} + preprocess: ) +[gpua006:0/64] 2024-02-14 13:06:03,690 (abs_task:1661) INFO: [train] Batch sampler: UnsortedBatchSampler(N-batch=19027, batch_size=256, key_file=exp/s2t_stats_raw_bpe50000/splits12/speech_shape/split.1, +[gpua006:0/64] 2024-02-14 13:06:03,694 (abs_task:1662) INFO: [train] mini-batch sizes summary: N-batch=19027, mean=256.0, min=256, max=257 +[gpua006:0/64] 2024-02-14 13:12:20,630 (trainer:756) INFO: 41epoch:train:3701-3800batch: iter_time=3.214, forward_time=0.143, loss_ctc=61.341, loss_interctc_layer6=67.236, loss_interctc_layer12=55.296, loss_interctc_layer15=50.425, loss_interctc_layer21=63.499, loss=59.560, backward_time=0.208, grad_norm=71.268, clip=100.000, loss_scale=1.014e+31, optim_step_time=0.138, optim0_lr0=6.306e-05, train_time=5.122 +[gpua006:0/64] 2024-02-14 13:14:01,828 (trainer:756) INFO: 41epoch:train:3801-3900batch: iter_time=8.031e-05, forward_time=0.142, loss_ctc=71.908, loss_interctc_layer6=72.010, loss_interctc_layer12=59.849, loss_interctc_layer15=54.791, loss_interctc_layer21=75.026, loss=66.717, backward_time=0.208, grad_norm=84.961, clip=100.000, loss_scale=1.014e+31, optim_step_time=0.138, optim0_lr0=6.305e-05, train_time=1.012 +[gpua006:0/64] 2024-02-14 13:16:26,598 (trainer:756) INFO: 41epoch:train:3901-4000batch: iter_time=9.495e-05, forward_time=0.142, loss_ctc=82.752, loss_interctc_layer6=85.775, loss_interctc_layer12=71.111, loss_interctc_layer15=65.192, loss_interctc_layer21=85.878, loss=78.141, backward_time=0.207, grad_norm=80.552, clip=100.000, loss_scale=1.014e+31, optim_step_time=0.138, optim0_lr0=6.305e-05, train_time=1.447 +[gpua006:0/64] 2024-02-14 13:18:24,560 (trainer:756) INFO: 41epoch:train:4001-4100batch: iter_time=9.204e-05, forward_time=0.144, loss_ctc=72.539, loss_interctc_layer6=92.246, loss_interctc_layer12=76.010, loss_interctc_layer15=69.284, loss_interctc_layer21=75.001, loss=77.016, backward_time=0.208, grad_norm=86.856, clip=100.000, loss_scale=1.014e+31, optim_step_time=0.138, optim0_lr0=6.304e-05, train_time=1.179 +[gpua006:0/64] 2024-02-14 13:20:33,001 (trainer:756) INFO: 41epoch:train:4101-4200batch: iter_time=8.838e-05, forward_time=0.169, loss_ctc=88.689, loss_interctc_layer6=94.407, loss_interctc_layer12=78.209, loss_interctc_layer15=71.701, loss_interctc_layer21=92.041, loss=85.010, backward_time=0.221, grad_norm=96.932, clip=100.000, loss_scale=1.014e+31, optim_step_time=0.140, optim0_lr0=6.304e-05, train_time=1.283 +[gpua006:0/64] 2024-02-14 13:22:46,004 (trainer:756) INFO: 41epoch:train:4201-4300batch: iter_time=8.665e-05, forward_time=0.147, loss_ctc=74.954, loss_interctc_layer6=81.897, loss_interctc_layer12=67.644, loss_interctc_layer15=62.040, loss_interctc_layer21=77.880, loss=72.883, backward_time=0.212, grad_norm=88.738, clip=100.000, loss_scale=1.014e+31, optim_step_time=0.138, optim0_lr0=6.303e-05, train_time=1.331 +[gpua006:0/64] 2024-02-14 13:25:40,173 (trainer:756) INFO: 41epoch:train:4301-4400batch: iter_time=8.800e-05, forward_time=0.214, loss_ctc=73.177, loss_interctc_layer6=73.147, loss_interctc_layer12=59.959, loss_interctc_layer15=54.601, loss_interctc_layer21=76.088, loss=67.394, backward_time=0.244, grad_norm=82.094, clip=100.000, loss_scale=1.014e+31, optim_step_time=0.143, optim0_lr0=6.303e-05, train_time=1.741 +[gpua006:0/64] 2024-02-14 13:27:49,120 (trainer:756) INFO: 41epoch:train:4401-4500batch: iter_time=8.872e-05, forward_time=0.143, loss_ctc=63.383, loss_interctc_layer6=76.020, loss_interctc_layer12=63.493, loss_interctc_layer15=58.497, loss_interctc_layer21=65.892, loss=65.457, backward_time=0.208, grad_norm=93.170, clip=100.000, loss_scale=1.014e+31, optim_step_time=0.138, optim0_lr0=6.302e-05, train_time=1.289 +[gpua006:0/64] 2024-02-14 13:30:07,873 (trainer:756) INFO: 41epoch:train:4501-4600batch: iter_time=8.574e-05, forward_time=0.143, loss_ctc=72.300, loss_interctc_layer6=84.680, loss_interctc_layer12=70.259, loss_interctc_layer15=64.521, loss_interctc_layer21=74.925, loss=73.337, backward_time=0.207, grad_norm=198.007, clip=100.000, loss_scale=1.014e+31, optim_step_time=0.138, optim0_lr0=6.302e-05, train_time=1.387 +[gpua006:0/64] 2024-02-14 13:33:32,676 (trainer:756) INFO: 41epoch:train:4601-4700batch: iter_time=9.401e-05, forward_time=0.144, loss_ctc=77.200, loss_interctc_layer6=90.156, loss_interctc_layer12=74.659, loss_interctc_layer15=68.470, loss_interctc_layer21=80.100, loss=78.117, backward_time=0.208, grad_norm=84.901, clip=100.000, loss_scale=1.014e+31, optim_step_time=0.138, optim0_lr0=6.301e-05, train_time=2.047 +[gpua006:0/64] 2024-02-14 13:35:26,084 (trainer:756) INFO: 41epoch:train:4701-4800batch: iter_time=9.504e-05, forward_time=0.143, loss_ctc=70.828, loss_interctc_layer6=72.471, loss_interctc_layer12=59.427, loss_interctc_layer15=54.147, loss_interctc_layer21=73.617, loss=66.098, backward_time=0.209, grad_norm=64.989, clip=100.000, loss_scale=1.014e+31, optim_step_time=0.137, optim0_lr0=6.300e-05, train_time=1.134 +[gpua006:0/64] 2024-02-14 13:37:10,344 (trainer:756) INFO: 41epoch:train:4801-4900batch: iter_time=9.761e-05, forward_time=0.143, loss_ctc=70.492, loss_interctc_layer6=81.543, loss_interctc_layer12=68.176, loss_interctc_layer15=62.789, loss_interctc_layer21=72.790, loss=71.158, backward_time=0.209, grad_norm=83.831, clip=100.000, loss_scale=1.014e+31, optim_step_time=0.137, optim0_lr0=6.300e-05, train_time=1.042 +[gpua006:0/64] 2024-02-14 13:39:16,049 (trainer:756) INFO: 41epoch:train:4901-5000batch: iter_time=9.561e-05, forward_time=0.142, loss_ctc=59.836, loss_interctc_layer6=68.185, loss_interctc_layer12=56.312, loss_interctc_layer15=51.457, loss_interctc_layer21=61.992, loss=59.556, backward_time=0.208, grad_norm=69.799, clip=100.000, loss_scale=1.014e+31, optim_step_time=0.137, optim0_lr0=6.299e-05, train_time=1.257 +[gpua006:0/64] 2024-02-14 13:39:36,081 (multiple_iter_factory:32) INFO: Building 4th iter-factory... +[gpua006:0/64] 2024-02-14 13:39:54,994 (s2t:401) INFO: Optional Data Names: ('text_prev', 'text_ctc', 'text_spk2', 'text_spk3', 'text_spk4') +[gpua006:0/64] 2024-02-14 13:39:58,432 (abs_task:1660) INFO: [train] dataset: +ESPnetDataset( + speech: {"path": "exp/s2t_stats_raw_bpe50000/splits12/wav.scp/split.0", "type": "kaldi_ark"} + text_prev: {"path": "exp/s2t_stats_raw_bpe50000/splits12/text.prev/split.0", "type": "text"} + text_ctc: {"path": "exp/s2t_stats_raw_bpe50000/splits12/text.ctc/split.0", "type": "text"} + text: {"path": "exp/s2t_stats_raw_bpe50000/splits12/text/split.0", "type": "text"} + preprocess: ) +[gpua006:0/64] 2024-02-14 13:39:58,432 (abs_task:1661) INFO: [train] Batch sampler: UnsortedBatchSampler(N-batch=19027, batch_size=256, key_file=exp/s2t_stats_raw_bpe50000/splits12/speech_shape/split.0, +[gpua006:0/64] 2024-02-14 13:39:58,450 (abs_task:1662) INFO: [train] mini-batch sizes summary: N-batch=19027, mean=256.0, min=256, max=257 +[gpua006:0/64] 2024-02-14 13:47:41,323 (trainer:756) INFO: 41epoch:train:5001-5100batch: iter_time=3.735, forward_time=0.170, loss_ctc=62.786, loss_interctc_layer6=68.503, loss_interctc_layer12=56.190, loss_interctc_layer15=51.311, loss_interctc_layer21=65.281, loss=60.814, backward_time=0.213, grad_norm=102.692, clip=100.000, loss_scale=1.876e+31, optim_step_time=0.140, optim0_lr0=6.299e-05, train_time=5.052 +[gpua006:0/64] 2024-02-14 13:49:31,774 (trainer:756) INFO: 41epoch:train:5101-5200batch: iter_time=1.109e-04, forward_time=0.143, loss_ctc=88.839, loss_interctc_layer6=80.949, loss_interctc_layer12=67.420, loss_interctc_layer15=62.101, loss_interctc_layer21=92.599, loss=78.382, backward_time=0.209, grad_norm=76.811, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.139, optim0_lr0=6.298e-05, train_time=1.105 +[gpua006:0/64] 2024-02-14 13:51:37,181 (trainer:756) INFO: 41epoch:train:5201-5300batch: iter_time=1.025e-04, forward_time=0.274, loss_ctc=90.528, loss_interctc_layer6=93.978, loss_interctc_layer12=77.301, loss_interctc_layer15=70.563, loss_interctc_layer21=93.905, loss=85.255, backward_time=0.254, grad_norm=71.616, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.143, optim0_lr0=6.298e-05, train_time=1.254 +[gpua006:0/64] 2024-02-14 13:54:48,371 (trainer:756) INFO: 41epoch:train:5301-5400batch: iter_time=1.009e-04, forward_time=0.145, loss_ctc=82.597, loss_interctc_layer6=94.631, loss_interctc_layer12=78.095, loss_interctc_layer15=71.516, loss_interctc_layer21=85.498, loss=82.467, backward_time=0.206, grad_norm=78.909, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.137, optim0_lr0=6.297e-05, train_time=1.912 +[gpua006:0/64] 2024-02-14 13:56:52,169 (trainer:756) INFO: 41epoch:train:5401-5500batch: iter_time=1.168e-04, forward_time=0.146, loss_ctc=81.337, loss_interctc_layer6=82.115, loss_interctc_layer12=67.630, loss_interctc_layer15=61.797, loss_interctc_layer21=84.405, loss=75.457, backward_time=0.209, grad_norm=85.414, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.138, optim0_lr0=6.297e-05, train_time=1.238 +[gpua006:0/64] 2024-02-14 13:58:44,947 (trainer:756) INFO: 41epoch:train:5501-5600batch: iter_time=9.587e-05, forward_time=0.142, loss_ctc=73.317, loss_interctc_layer6=71.838, loss_interctc_layer12=59.491, loss_interctc_layer15=54.487, loss_interctc_layer21=76.158, loss=67.058, backward_time=0.207, grad_norm=73.150, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.137, optim0_lr0=6.296e-05, train_time=1.128 +[gpua006:0/64] 2024-02-14 14:01:12,044 (trainer:756) INFO: 41epoch:train:5601-5700batch: iter_time=9.071e-05, forward_time=0.147, loss_ctc=75.842, loss_interctc_layer6=76.795, loss_interctc_layer12=63.113, loss_interctc_layer15=57.569, loss_interctc_layer21=78.739, loss=70.412, backward_time=0.208, grad_norm=87.653, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.137, optim0_lr0=6.296e-05, train_time=1.470 +[gpua006:0/64] 2024-02-14 14:03:32,372 (trainer:756) INFO: 41epoch:train:5701-5800batch: iter_time=9.177e-05, forward_time=0.142, loss_ctc=81.130, loss_interctc_layer6=84.471, loss_interctc_layer12=70.843, loss_interctc_layer15=64.972, loss_interctc_layer21=84.626, loss=77.208, backward_time=0.206, grad_norm=105.274, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.138, optim0_lr0=6.295e-05, train_time=1.404 +[gpua006:0/64] 2024-02-14 14:05:52,684 (trainer:756) INFO: 41epoch:train:5801-5900batch: iter_time=9.845e-05, forward_time=0.144, loss_ctc=79.508, loss_interctc_layer6=87.534, loss_interctc_layer12=72.599, loss_interctc_layer15=66.587, loss_interctc_layer21=82.463, loss=77.738, backward_time=0.208, grad_norm=70.416, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.138, optim0_lr0=6.295e-05, train_time=1.403 +[gpua006:0/64] 2024-02-14 14:08:24,045 (trainer:756) INFO: 41epoch:train:5901-6000batch: iter_time=8.809e-05, forward_time=0.142, loss_ctc=75.980, loss_interctc_layer6=80.687, loss_interctc_layer12=66.611, loss_interctc_layer15=60.923, loss_interctc_layer21=78.828, loss=72.606, backward_time=0.206, grad_norm=89.919, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.138, optim0_lr0=6.294e-05, train_time=1.513 +[gpua006:0/64] 2024-02-14 14:10:34,380 (trainer:756) INFO: 41epoch:train:6001-6100batch: iter_time=1.039e-04, forward_time=0.169, loss_ctc=66.230, loss_interctc_layer6=72.504, loss_interctc_layer12=59.423, loss_interctc_layer15=54.059, loss_interctc_layer21=68.705, loss=64.184, backward_time=0.225, grad_norm=152.093, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.141, optim0_lr0=6.294e-05, train_time=1.302 +[gpua006:0/64] 2024-02-14 14:12:42,835 (trainer:756) INFO: 41epoch:train:6101-6200batch: iter_time=1.033e-04, forward_time=0.147, loss_ctc=77.312, loss_interctc_layer6=81.188, loss_interctc_layer12=68.074, loss_interctc_layer15=62.820, loss_interctc_layer21=80.051, loss=73.889, backward_time=0.209, grad_norm=84.674, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.138, optim0_lr0=6.293e-05, train_time=1.285 +[gpua006:0/64] 2024-02-14 14:13:49,884 (multiple_iter_factory:32) INFO: Building 5th iter-factory... +[gpua006:0/64] 2024-02-14 14:14:08,825 (s2t:401) INFO: Optional Data Names: ('text_prev', 'text_ctc', 'text_spk2', 'text_spk3', 'text_spk4') +[gpua006:0/64] 2024-02-14 14:14:12,250 (abs_task:1660) INFO: [train] dataset: +ESPnetDataset( + speech: {"path": "exp/s2t_stats_raw_bpe50000/splits12/wav.scp/split.9", "type": "kaldi_ark"} + text_prev: {"path": "exp/s2t_stats_raw_bpe50000/splits12/text.prev/split.9", "type": "text"} + text_ctc: {"path": "exp/s2t_stats_raw_bpe50000/splits12/text.ctc/split.9", "type": "text"} + text: {"path": "exp/s2t_stats_raw_bpe50000/splits12/text/split.9", "type": "text"} + preprocess: ) +[gpua006:0/64] 2024-02-14 14:14:12,250 (abs_task:1661) INFO: [train] Batch sampler: UnsortedBatchSampler(N-batch=19027, batch_size=256, key_file=exp/s2t_stats_raw_bpe50000/splits12/speech_shape/split.9, +[gpua006:0/64] 2024-02-14 14:14:12,255 (abs_task:1662) INFO: [train] mini-batch sizes summary: N-batch=19027, mean=256.0, min=256, max=257 +[gpua006:0/64] 2024-02-14 14:20:46,396 (trainer:756) INFO: 41epoch:train:6201-6300batch: iter_time=3.557, forward_time=0.186, loss_ctc=61.514, loss_interctc_layer6=67.455, loss_interctc_layer12=55.427, loss_interctc_layer15=50.624, loss_interctc_layer21=63.878, loss=59.779, backward_time=0.213, grad_norm=123.743, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.138, optim0_lr0=6.293e-05, train_time=4.836 +[gpua006:0/64] 2024-02-14 14:22:20,616 (trainer:756) INFO: 41epoch:train:6301-6400batch: iter_time=8.656e-05, forward_time=0.142, loss_ctc=71.146, loss_interctc_layer6=71.399, loss_interctc_layer12=59.241, loss_interctc_layer15=54.117, loss_interctc_layer21=74.050, loss=65.991, backward_time=0.208, grad_norm=66.259, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.137, optim0_lr0=6.292e-05, train_time=0.942 +[gpua006:0/64] 2024-02-14 14:24:32,551 (trainer:756) INFO: 41epoch:train:6401-6500batch: iter_time=9.845e-05, forward_time=0.145, loss_ctc=82.333, loss_interctc_layer6=85.295, loss_interctc_layer12=70.461, loss_interctc_layer15=64.553, loss_interctc_layer21=85.427, loss=77.614, backward_time=0.209, grad_norm=72.672, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.138, optim0_lr0=6.292e-05, train_time=1.319 +[gpua006:0/64] 2024-02-14 14:26:41,961 (trainer:756) INFO: 41epoch:train:6501-6600batch: iter_time=9.892e-05, forward_time=0.143, loss_ctc=72.390, loss_interctc_layer6=92.034, loss_interctc_layer12=75.685, loss_interctc_layer15=69.044, loss_interctc_layer21=74.778, loss=76.786, backward_time=0.207, grad_norm=94.848, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.137, optim0_lr0=6.291e-05, train_time=1.292 +[gpua006:0/64] 2024-02-14 14:29:08,182 (trainer:756) INFO: 41epoch:train:6601-6700batch: iter_time=9.913e-05, forward_time=0.143, loss_ctc=88.046, loss_interctc_layer6=93.648, loss_interctc_layer12=77.561, loss_interctc_layer15=71.079, loss_interctc_layer21=91.366, loss=84.340, backward_time=0.206, grad_norm=72.699, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.137, optim0_lr0=6.291e-05, train_time=1.464 +[gpua006:0/64] 2024-02-14 14:31:10,716 (trainer:756) INFO: 41epoch:train:6701-6800batch: iter_time=9.955e-05, forward_time=0.142, loss_ctc=74.049, loss_interctc_layer6=80.258, loss_interctc_layer12=66.501, loss_interctc_layer15=61.082, loss_interctc_layer21=76.901, loss=71.758, backward_time=0.207, grad_norm=80.033, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.138, optim0_lr0=6.290e-05, train_time=1.225 +[gpua006:0/64] 2024-02-14 14:33:01,823 (trainer:756) INFO: 41epoch:train:6801-6900batch: iter_time=9.476e-05, forward_time=0.150, loss_ctc=73.815, loss_interctc_layer6=72.971, loss_interctc_layer12=59.762, loss_interctc_layer15=54.440, loss_interctc_layer21=76.810, loss=67.560, backward_time=0.242, grad_norm=61.802, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.139, optim0_lr0=6.290e-05, train_time=1.111 +[gpua006:0/64] 2024-02-14 14:35:46,255 (trainer:756) INFO: 41epoch:train:6901-7000batch: iter_time=9.721e-05, forward_time=0.169, loss_ctc=63.490, loss_interctc_layer6=76.307, loss_interctc_layer12=63.637, loss_interctc_layer15=58.487, loss_interctc_layer21=66.073, loss=65.599, backward_time=0.209, grad_norm=79.559, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.139, optim0_lr0=6.289e-05, train_time=1.641 +[gpua006:0/64] 2024-02-14 14:38:18,828 (trainer:756) INFO: 41epoch:train:7001-7100batch: iter_time=9.501e-05, forward_time=0.143, loss_ctc=72.058, loss_interctc_layer6=84.462, loss_interctc_layer12=69.923, loss_interctc_layer15=64.194, loss_interctc_layer21=74.665, loss=73.060, backward_time=0.206, grad_norm=89.200, clip=100.000, loss_scale=3.752e+31, optim_step_time=0.137, optim0_lr0=6.289e-05, train_time=1.528 +[gpua006:0/64] 2024-02-14 14:41:13,951 (trainer:756) INFO: 41epoch:train:7101-7200batch: iter_time=1.096e-04, forward_time=0.144, loss_ctc=77.104, loss_interctc_layer6=90.312, loss_interctc_layer12=74.607, loss_interctc_layer15=68.298, loss_interctc_layer21=79.931, loss=78.051, backward_time=0.212, grad_norm=108.629, clip=100.000, loss_scale=4.056e+31, optim_step_time=0.138, optim0_lr0=6.288e-05, train_time=1.751 +[gpua006:0/64] 2024-02-14 14:44:03,238 (trainer:756) INFO: 41epoch:train:7201-7300batch: iter_time=1.005e-04, forward_time=0.224, loss_ctc=70.586, loss_interctc_layer6=72.059, loss_interctc_layer12=59.109, loss_interctc_layer15=53.856, loss_interctc_layer21=73.293, loss=65.781, backward_time=0.248, grad_norm=73.277, clip=100.000, loss_scale=4.056e+31, optim_step_time=0.141, optim0_lr0=6.287e-05, train_time=1.693 +[gpua006:0/64] 2024-02-14 14:46:10,514 (trainer:756) INFO: 41epoch:train:7301-7400batch: iter_time=9.461e-05, forward_time=0.142, loss_ctc=69.822, loss_interctc_layer6=81.038, loss_interctc_layer12=67.861, loss_interctc_layer15=62.431, loss_interctc_layer21=72.304, loss=70.691, backward_time=0.208, grad_norm=78.504, clip=100.000, loss_scale=4.056e+31, optim_step_time=0.137, optim0_lr0=6.287e-05, train_time=1.273 +[gpua006:0/64] 2024-02-14 14:48:26,857 (trainer:756) INFO: 41epoch:train:7401-7500batch: iter_time=9.477e-05, forward_time=0.146, loss_ctc=59.848, loss_interctc_layer6=67.958, loss_interctc_layer12=56.101, loss_interctc_layer15=51.334, loss_interctc_layer21=61.926, loss=59.433, backward_time=0.211, grad_norm=71.111, clip=100.000, loss_scale=4.056e+31, optim_step_time=0.137, optim0_lr0=6.286e-05, train_time=1.363 +[gpua006:0/64] 2024-02-14 14:48:46,887 (multiple_iter_factory:32) INFO: Building 6th iter-factory... +[gpua006:0/64] 2024-02-14 14:49:05,670 (s2t:401) INFO: Optional Data Names: ('text_prev', 'text_ctc', 'text_spk2', 'text_spk3', 'text_spk4') +[gpua006:0/64] 2024-02-14 14:49:09,124 (abs_task:1660) INFO: [train] dataset: +ESPnetDataset( + speech: {"path": "exp/s2t_stats_raw_bpe50000/splits12/wav.scp/split.8", "type": "kaldi_ark"} + text_prev: {"path": "exp/s2t_stats_raw_bpe50000/splits12/text.prev/split.8", "type": "text"} + text_ctc: {"path": "exp/s2t_stats_raw_bpe50000/splits12/text.ctc/split.8", "type": "text"} + text: {"path": "exp/s2t_stats_raw_bpe50000/splits12/text/split.8", "type": "text"} + preprocess: ) +[gpua006:0/64] 2024-02-14 14:49:09,124 (abs_task:1661) INFO: [train] Batch sampler: UnsortedBatchSampler(N-batch=19027, batch_size=256, key_file=exp/s2t_stats_raw_bpe50000/splits12/speech_shape/split.8, +[gpua006:0/64] 2024-02-14 14:49:09,127 (abs_task:1662) INFO: [train] mini-batch sizes summary: N-batch=19027, mean=256.0, min=256, max=257 +[gpua006:0/64] 2024-02-14 14:56:51,097 (trainer:756) INFO: 41epoch:train:7501-7600batch: iter_time=3.690, forward_time=0.144, loss_ctc=63.077, loss_interctc_layer6=68.955, loss_interctc_layer12=56.388, loss_interctc_layer15=51.395, loss_interctc_layer21=65.483, loss=61.060, backward_time=0.213, grad_norm=70.851, clip=100.000, loss_scale=4.056e+31, optim_step_time=0.139, optim0_lr0=6.286e-05, train_time=5.042 +[gpua006:0/64] 2024-02-14 14:58:32,220 (trainer:756) INFO: 41epoch:train:7601-7700batch: iter_time=8.127e-05, forward_time=0.142, loss_ctc=87.786, loss_interctc_layer6=80.209, loss_interctc_layer12=66.767, loss_interctc_layer15=61.224, loss_interctc_layer21=91.702, loss=77.538, backward_time=0.209, grad_norm=91.861, clip=100.000, loss_scale=4.056e+31, optim_step_time=0.138, optim0_lr0=6.285e-05, train_time=1.011 +[gpua006:0/64] 2024-02-14 14:59:03,368 (trainer:687) WARNING: The grad norm is nan. Skipping updating the model. +[gpua006:0/64] 2024-02-14 15:01:01,059 (trainer:756) INFO: 41epoch:train:7701-7800batch: iter_time=8.599e-05, forward_time=0.159, loss_ctc=89.461, loss_interctc_layer6=93.692, loss_interctc_layer12=77.155, loss_interctc_layer15=70.518, loss_interctc_layer21=92.786, loss=84.723, backward_time=0.226, grad_norm=80.202, clip=100.000, loss_scale=2.581e+31, optim_step_time=0.140, optim0_lr0=6.285e-05, train_time=1.488 +[gpua006:0/64] 2024-02-14 15:03:31,777 (trainer:756) INFO: 41epoch:train:7801-7900batch: iter_time=8.922e-05, forward_time=0.157, loss_ctc=82.311, loss_interctc_layer6=94.356, loss_interctc_layer12=77.839, loss_interctc_layer15=71.309, loss_interctc_layer21=85.136, loss=82.190, backward_time=0.231, grad_norm=86.177, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.141, optim0_lr0=6.284e-05, train_time=1.507 +[gpua006:0/64] 2024-02-14 15:06:04,841 (trainer:756) INFO: 41epoch:train:7901-8000batch: iter_time=9.334e-05, forward_time=0.144, loss_ctc=80.159, loss_interctc_layer6=81.318, loss_interctc_layer12=66.860, loss_interctc_layer15=60.979, loss_interctc_layer21=83.211, loss=74.505, backward_time=0.208, grad_norm=77.641, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.138, optim0_lr0=6.284e-05, train_time=1.530 +[gpua006:0/64] 2024-02-14 15:08:09,798 (trainer:756) INFO: 41epoch:train:8001-8100batch: iter_time=9.395e-05, forward_time=0.145, loss_ctc=72.129, loss_interctc_layer6=71.256, loss_interctc_layer12=58.935, loss_interctc_layer15=53.919, loss_interctc_layer21=75.000, loss=66.248, backward_time=0.208, grad_norm=66.534, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.138, optim0_lr0=6.283e-05, train_time=1.250 +[gpua006:0/64] 2024-02-14 15:10:13,802 (trainer:756) INFO: 41epoch:train:8101-8200batch: iter_time=8.811e-05, forward_time=0.227, loss_ctc=74.946, loss_interctc_layer6=76.747, loss_interctc_layer12=62.989, loss_interctc_layer15=57.539, loss_interctc_layer21=77.660, loss=69.976, backward_time=0.223, grad_norm=141.595, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.140, optim0_lr0=6.283e-05, train_time=1.240 +[gpua006:0/64] 2024-02-14 15:12:30,241 (trainer:756) INFO: 41epoch:train:8201-8300batch: iter_time=8.816e-05, forward_time=0.143, loss_ctc=81.413, loss_interctc_layer6=83.764, loss_interctc_layer12=70.048, loss_interctc_layer15=64.315, loss_interctc_layer21=84.488, loss=76.806, backward_time=0.208, grad_norm=152.549, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.139, optim0_lr0=6.282e-05, train_time=1.364 +[gpua006:0/64] 2024-02-14 15:14:56,721 (trainer:756) INFO: 41epoch:train:8301-8400batch: iter_time=9.911e-05, forward_time=0.147, loss_ctc=79.228, loss_interctc_layer6=87.628, loss_interctc_layer12=72.712, loss_interctc_layer15=66.668, loss_interctc_layer21=82.249, loss=77.697, backward_time=0.208, grad_norm=81.011, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.138, optim0_lr0=6.282e-05, train_time=1.465 +[gpua006:0/64] 2024-02-14 15:17:17,773 (trainer:756) INFO: 41epoch:train:8401-8500batch: iter_time=8.984e-05, forward_time=0.143, loss_ctc=74.861, loss_interctc_layer6=80.250, loss_interctc_layer12=66.048, loss_interctc_layer15=60.350, loss_interctc_layer21=77.750, loss=71.852, backward_time=0.208, grad_norm=108.065, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.139, optim0_lr0=6.281e-05, train_time=1.410 +[gpua006:0/64] 2024-02-14 15:19:11,295 (trainer:756) INFO: 41epoch:train:8501-8600batch: iter_time=8.636e-05, forward_time=0.143, loss_ctc=65.535, loss_interctc_layer6=72.116, loss_interctc_layer12=59.098, loss_interctc_layer15=53.824, loss_interctc_layer21=67.964, loss=63.707, backward_time=0.210, grad_norm=64.146, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.139, optim0_lr0=6.281e-05, train_time=1.135 +[gpua006:0/64] 2024-02-14 15:21:18,771 (trainer:756) INFO: 41epoch:train:8601-8700batch: iter_time=9.400e-05, forward_time=0.144, loss_ctc=77.522, loss_interctc_layer6=81.080, loss_interctc_layer12=67.770, loss_interctc_layer15=62.678, loss_interctc_layer21=80.338, loss=73.878, backward_time=0.208, grad_norm=97.180, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.139, optim0_lr0=6.280e-05, train_time=1.275 +[gpua006:0/64] 2024-02-14 15:22:36,696 (multiple_iter_factory:32) INFO: Building 7th iter-factory... +[gpua006:0/64] 2024-02-14 15:22:55,429 (s2t:401) INFO: Optional Data Names: ('text_prev', 'text_ctc', 'text_spk2', 'text_spk3', 'text_spk4') +[gpua006:0/64] 2024-02-14 15:22:58,826 (abs_task:1660) INFO: [train] dataset: +ESPnetDataset( + speech: {"path": "exp/s2t_stats_raw_bpe50000/splits12/wav.scp/split.10", "type": "kaldi_ark"} + text_prev: {"path": "exp/s2t_stats_raw_bpe50000/splits12/text.prev/split.10", "type": "text"} + text_ctc: {"path": "exp/s2t_stats_raw_bpe50000/splits12/text.ctc/split.10", "type": "text"} + text: {"path": "exp/s2t_stats_raw_bpe50000/splits12/text/split.10", "type": "text"} + preprocess: ) +[gpua006:0/64] 2024-02-14 15:22:58,826 (abs_task:1661) INFO: [train] Batch sampler: UnsortedBatchSampler(N-batch=19027, batch_size=256, key_file=exp/s2t_stats_raw_bpe50000/splits12/speech_shape/split.10, +[gpua006:0/64] 2024-02-14 15:22:58,854 (abs_task:1662) INFO: [train] mini-batch sizes summary: N-batch=19027, mean=256.0, min=256, max=257 +[gpua006:0/64] 2024-02-14 15:27:52,926 (trainer:756) INFO: 41epoch:train:8701-8800batch: iter_time=2.731, forward_time=0.221, loss_ctc=62.002, loss_interctc_layer6=67.143, loss_interctc_layer12=55.044, loss_interctc_layer15=50.266, loss_interctc_layer21=64.321, loss=59.755, backward_time=0.215, grad_norm=82.983, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.139, optim0_lr0=6.280e-05, train_time=3.941 +[gpua006:0/64] 2024-02-14 15:29:29,043 (trainer:756) INFO: 41epoch:train:8801-8900batch: iter_time=8.653e-05, forward_time=0.142, loss_ctc=74.107, loss_interctc_layer6=71.479, loss_interctc_layer12=59.022, loss_interctc_layer15=54.103, loss_interctc_layer21=77.291, loss=67.200, backward_time=0.208, grad_norm=75.437, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.137, optim0_lr0=6.279e-05, train_time=0.961 +[gpua006:0/64] 2024-02-14 15:31:22,375 (trainer:756) INFO: 41epoch:train:8901-9000batch: iter_time=8.992e-05, forward_time=0.165, loss_ctc=87.758, loss_interctc_layer6=85.162, loss_interctc_layer12=70.451, loss_interctc_layer15=64.417, loss_interctc_layer21=91.066, loss=79.771, backward_time=0.212, grad_norm=91.848, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.137, optim0_lr0=6.279e-05, train_time=1.133 +[gpua006:0/64] 2024-02-14 15:33:34,337 (trainer:756) INFO: 41epoch:train:9001-9100batch: iter_time=9.216e-05, forward_time=0.185, loss_ctc=76.206, loss_interctc_layer6=91.485, loss_interctc_layer12=75.337, loss_interctc_layer15=68.800, loss_interctc_layer21=78.846, loss=78.135, backward_time=0.214, grad_norm=78.366, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.139, optim0_lr0=6.278e-05, train_time=1.319 +[gpua006:0/64] 2024-02-14 15:35:56,343 (trainer:756) INFO: 41epoch:train:9101-9200batch: iter_time=9.149e-05, forward_time=0.143, loss_ctc=92.258, loss_interctc_layer6=92.671, loss_interctc_layer12=76.649, loss_interctc_layer15=70.215, loss_interctc_layer21=95.737, loss=85.506, backward_time=0.208, grad_norm=93.836, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.137, optim0_lr0=6.278e-05, train_time=1.420 +[gpua006:0/64] 2024-02-14 15:38:10,926 (trainer:756) INFO: 41epoch:train:9201-9300batch: iter_time=9.116e-05, forward_time=0.145, loss_ctc=81.037, loss_interctc_layer6=80.821, loss_interctc_layer12=66.829, loss_interctc_layer15=61.223, loss_interctc_layer21=84.229, loss=74.828, backward_time=0.205, grad_norm=101.550, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.138, optim0_lr0=6.277e-05, train_time=1.346 +[gpua006:0/64] 2024-02-14 15:40:17,216 (trainer:756) INFO: 41epoch:train:9301-9400batch: iter_time=9.311e-05, forward_time=0.142, loss_ctc=78.224, loss_interctc_layer6=72.332, loss_interctc_layer12=59.161, loss_interctc_layer15=53.815, loss_interctc_layer21=81.540, loss=69.014, backward_time=0.209, grad_norm=66.587, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.138, optim0_lr0=6.277e-05, train_time=1.262 +[gpua006:0/64] 2024-02-14 15:42:11,921 (trainer:756) INFO: 41epoch:train:9401-9500batch: iter_time=9.333e-05, forward_time=0.142, loss_ctc=68.193, loss_interctc_layer6=75.729, loss_interctc_layer12=62.914, loss_interctc_layer15=57.888, loss_interctc_layer21=70.614, loss=67.068, backward_time=0.206, grad_norm=78.293, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.138, optim0_lr0=6.276e-05, train_time=1.147 +[gpua006:0/64] 2024-02-14 15:45:07,110 (trainer:756) INFO: 41epoch:train:9501-9600batch: iter_time=9.300e-05, forward_time=0.142, loss_ctc=75.247, loss_interctc_layer6=84.237, loss_interctc_layer12=69.617, loss_interctc_layer15=63.953, loss_interctc_layer21=77.879, loss=74.187, backward_time=0.205, grad_norm=88.013, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.138, optim0_lr0=6.276e-05, train_time=1.752 +[gpua006:0/64] 2024-02-14 15:47:16,959 (trainer:756) INFO: 41epoch:train:9601-9700batch: iter_time=9.284e-05, forward_time=0.144, loss_ctc=81.846, loss_interctc_layer6=90.085, loss_interctc_layer12=74.444, loss_interctc_layer15=68.038, loss_interctc_layer21=84.919, loss=79.866, backward_time=0.206, grad_norm=154.347, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.138, optim0_lr0=6.275e-05, train_time=1.298 +[gpua006:0/64] 2024-02-14 15:50:06,615 (trainer:756) INFO: 41epoch:train:9701-9800batch: iter_time=8.941e-05, forward_time=0.175, loss_ctc=72.947, loss_interctc_layer6=72.019, loss_interctc_layer12=59.003, loss_interctc_layer15=53.779, loss_interctc_layer21=75.785, loss=66.707, backward_time=0.260, grad_norm=69.178, clip=100.000, loss_scale=3.489e+31, optim_step_time=0.144, optim0_lr0=6.275e-05, train_time=1.696 +[gpua006:0/64] 2024-02-14 15:52:19,879 (trainer:756) INFO: 41epoch:train:9801-9900batch: iter_time=9.251e-05, forward_time=0.142, loss_ctc=71.737, loss_interctc_layer6=81.032, loss_interctc_layer12=67.612, loss_interctc_layer15=62.294, loss_interctc_layer21=74.256, loss=71.386, backward_time=0.206, grad_norm=119.053, clip=100.000, loss_scale=4.056e+31, optim_step_time=0.138, optim0_lr0=6.274e-05, train_time=1.333 +[gpua006:0/64] 2024-02-14 15:54:23,906 (trainer:756) INFO: 41epoch:train:9901-10000batch: iter_time=8.664e-05, forward_time=0.146, loss_ctc=60.179, loss_interctc_layer6=67.228, loss_interctc_layer12=55.284, loss_interctc_layer15=50.481, loss_interctc_layer21=62.390, loss=59.112, backward_time=0.208, grad_norm=61.518, clip=100.000, loss_scale=4.056e+31, optim_step_time=0.139, optim0_lr0=6.274e-05, train_time=1.240 +[gpua006:0/64] 2024-02-14 15:54:43,936 (multiple_iter_factory:32) INFO: Building 8th iter-factory... +[gpua006:0/64] 2024-02-14 15:55:03,138 (s2t:401) INFO: Optional Data Names: ('text_prev', 'text_ctc', 'text_spk2', 'text_spk3', 'text_spk4') +[gpua006:0/64] 2024-02-14 15:55:06,575 (abs_task:1660) INFO: [train] dataset: +ESPnetDataset( + speech: {"path": "exp/s2t_stats_raw_bpe50000/splits12/wav.scp/split.3", "type": "kaldi_ark"} + text_prev: {"path": "exp/s2t_stats_raw_bpe50000/splits12/text.prev/split.3", "type": "text"} + text_ctc: {"path": "exp/s2t_stats_raw_bpe50000/splits12/text.ctc/split.3", "type": "text"} + text: {"path": "exp/s2t_stats_raw_bpe50000/splits12/text/split.3", "type": "text"} + preprocess: ) +[gpua006:0/64] 2024-02-14 15:55:06,575 (abs_task:1661) INFO: [train] Batch sampler: UnsortedBatchSampler(N-batch=19027, batch_size=256, key_file=exp/s2t_stats_raw_bpe50000/splits12/speech_shape/split.3, +[gpua006:0/64] 2024-02-14 15:55:06,579 (abs_task:1662) INFO: [train] mini-batch sizes summary: N-batch=19027, mean=256.0, min=256, max=257 +[gpua006:0/64] 2024-02-14 16:02:09,227 (trainer:756) INFO: 41epoch:train:10001-10100batch: iter_time=3.346, forward_time=0.182, loss_ctc=60.123, loss_interctc_layer6=68.029, loss_interctc_layer12=55.655, loss_interctc_layer15=50.738, loss_interctc_layer21=62.559, loss=59.421, backward_time=0.217, grad_norm=71.813, clip=100.000, loss_scale=4.056e+31, optim_step_time=0.142, optim0_lr0=6.273e-05, train_time=4.653 +[gpua006:0/64] 2024-02-14 16:03:46,672 (trainer:756) INFO: 41epoch:train:10101-10200batch: iter_time=9.491e-05, forward_time=0.143, loss_ctc=83.286, loss_interctc_layer6=80.181, loss_interctc_layer12=66.637, loss_interctc_layer15=61.164, loss_interctc_layer21=86.540, loss=75.562, backward_time=0.208, grad_norm=106.950, clip=100.000, loss_scale=4.056e+31, optim_step_time=0.137, optim0_lr0=6.273e-05, train_time=0.975 +[gpua006:0/64] 2024-02-14 16:06:00,194 (trainer:687) WARNING: The grad norm is nan. Skipping updating the model. +[gpua006:0/64] 2024-02-14 16:06:08,776 (trainer:756) INFO: 41epoch:train:10201-10300batch: iter_time=9.988e-05, forward_time=0.325, loss_ctc=82.741, loss_interctc_layer6=93.927, loss_interctc_layer12=77.245, loss_interctc_layer15=70.505, loss_interctc_layer21=85.863, loss=82.056, backward_time=0.293, grad_norm=78.492, clip=100.000, loss_scale=3.872e+31, optim_step_time=0.147, optim0_lr0=6.272e-05, train_time=1.420 +[gpua006:0/64] 2024-02-14 16:06:53,260 (trainer:687) WARNING: The grad norm is nan. Skipping updating the model. +[gpua006:0/64] 2024-02-14 16:08:59,266 (trainer:756) INFO: 41epoch:train:10301-10400batch: iter_time=1.010e-04, forward_time=0.145, loss_ctc=78.166, loss_interctc_layer6=94.189, loss_interctc_layer12=77.750, loss_interctc_layer15=71.191, loss_interctc_layer21=80.901, loss=80.440, backward_time=0.208, grad_norm=85.170, clip=100.000, loss_scale=1.291e+31, optim_step_time=0.139, optim0_lr0=6.271e-05, train_time=1.705 +[gpua006:0/64] 2024-02-14 16:11:40,932 (trainer:756) INFO: 41epoch:train:10401-10500batch: iter_time=9.538e-05, forward_time=0.143, loss_ctc=75.430, loss_interctc_layer6=81.297, loss_interctc_layer12=66.826, loss_interctc_layer15=60.958, loss_interctc_layer21=78.360, loss=72.574, backward_time=0.208, grad_norm=80.333, clip=100.000, loss_scale=1.014e+31, optim_step_time=0.139, optim0_lr0=6.271e-05, train_time=1.617 +[gpua006:0/64] 2024-02-14 16:14:20,312 (trainer:756) INFO: 41epoch:train:10501-10600batch: iter_time=1.093e-04, forward_time=0.197, loss_ctc=67.947, loss_interctc_layer6=71.375, loss_interctc_layer12=59.177, loss_interctc_layer15=54.129, loss_interctc_layer21=70.820, loss=64.689, backward_time=0.303, grad_norm=69.850, clip=100.000, loss_scale=1.014e+31, optim_step_time=0.144, optim0_lr0=6.270e-05, train_time=1.594 +[gpua006:0/64] 2024-02-14 16:16:22,249 (trainer:756) INFO: 41epoch:train:10601-10700batch: iter_time=9.916e-05, forward_time=0.162, loss_ctc=71.080, loss_interctc_layer6=76.729, loss_interctc_layer12=62.903, loss_interctc_layer15=57.323, loss_interctc_layer21=73.823, loss=68.372, backward_time=0.210, grad_norm=80.422, clip=100.000, loss_scale=1.014e+31, optim_step_time=0.140, optim0_lr0=6.270e-05, train_time=1.219 +[gpua006:0/64] 2024-02-14 16:18:30,074 (trainer:756) INFO: 41epoch:train:10701-10800batch: iter_time=9.679e-05, forward_time=0.143, loss_ctc=73.932, loss_interctc_layer6=84.058, loss_interctc_layer12=70.270, loss_interctc_layer15=64.495, loss_interctc_layer21=76.974, loss=73.946, backward_time=0.207, grad_norm=137.451, clip=100.000, loss_scale=1.014e+31, optim_step_time=0.137, optim0_lr0=6.269e-05, train_time=1.278 +[gpua006:0/64] 2024-02-14 16:21:06,842 (trainer:756) INFO: 41epoch:train:10801-10900batch: iter_time=9.685e-05, forward_time=0.143, loss_ctc=75.526, loss_interctc_layer6=87.559, loss_interctc_layer12=72.686, loss_interctc_layer15=66.602, loss_interctc_layer21=78.312, loss=76.137, backward_time=0.207, grad_norm=72.255, clip=100.000, loss_scale=1.014e+31, optim_step_time=0.137, optim0_lr0=6.269e-05, train_time=1.568 +[gpua006:0/64] 2024-02-14 16:23:57,129 (trainer:756) INFO: 41epoch:train:10901-11000batch: iter_time=1.056e-04, forward_time=0.277, loss_ctc=70.826, loss_interctc_layer6=80.042, loss_interctc_layer12=65.956, loss_interctc_layer15=60.209, loss_interctc_layer21=73.446, loss=70.096, backward_time=0.239, grad_norm=81.383, clip=100.000, loss_scale=1.014e+31, optim_step_time=0.143, optim0_lr0=6.268e-05, train_time=1.702 +[gpua006:0/64] 2024-02-14 16:25:47,630 (trainer:756) INFO: 41epoch:train:11001-11100batch: iter_time=1.071e-04, forward_time=0.142, loss_ctc=62.885, loss_interctc_layer6=72.565, loss_interctc_layer12=59.578, loss_interctc_layer15=54.297, loss_interctc_layer21=65.324, loss=62.930, backward_time=0.208, grad_norm=64.660, clip=100.000, loss_scale=1.014e+31, optim_step_time=0.137, optim0_lr0=6.268e-05, train_time=1.105 +[gpua006:0/64] 2024-02-14 16:28:01,156 (trainer:756) INFO: 41epoch:train:11101-11200batch: iter_time=9.485e-05, forward_time=0.144, loss_ctc=73.451, loss_interctc_layer6=80.273, loss_interctc_layer12=67.026, loss_interctc_layer15=61.816, loss_interctc_layer21=76.284, loss=71.770, backward_time=0.208, grad_norm=104.743, clip=100.000, loss_scale=1.014e+31, optim_step_time=0.137, optim0_lr0=6.267e-05, train_time=1.336 +[gpua006:0/64] 2024-02-14 16:29:40,234 (multiple_iter_factory:32) INFO: Building 9th iter-factory... +[gpua006:0/64] 2024-02-14 16:29:59,353 (s2t:401) INFO: Optional Data Names: ('text_prev', 'text_ctc', 'text_spk2', 'text_spk3', 'text_spk4') +[gpua006:0/64] 2024-02-14 16:30:02,791 (abs_task:1660) INFO: [train] dataset: +ESPnetDataset( + speech: {"path": "exp/s2t_stats_raw_bpe50000/splits12/wav.scp/split.2", "type": "kaldi_ark"} + text_prev: {"path": "exp/s2t_stats_raw_bpe50000/splits12/text.prev/split.2", "type": "text"} + text_ctc: {"path": "exp/s2t_stats_raw_bpe50000/splits12/text.ctc/split.2", "type": "text"} + text: {"path": "exp/s2t_stats_raw_bpe50000/splits12/text/split.2", "type": "text"} + preprocess: ) +[gpua006:0/64] 2024-02-14 16:30:02,791 (abs_task:1661) INFO: [train] Batch sampler: UnsortedBatchSampler(N-batch=19027, batch_size=256, key_file=exp/s2t_stats_raw_bpe50000/splits12/speech_shape/split.2, +[gpua006:0/64] 2024-02-14 16:30:02,987 (abs_task:1662) INFO: [train] mini-batch sizes summary: N-batch=19027, mean=256.0, min=256, max=257 +[gpua006:0/64] 2024-02-14 16:36:08,075 (trainer:756) INFO: 41epoch:train:11201-11300batch: iter_time=3.449, forward_time=0.142, loss_ctc=61.825, loss_interctc_layer6=66.902, loss_interctc_layer12=54.908, loss_interctc_layer15=49.958, loss_interctc_layer21=64.139, loss=59.546, backward_time=0.208, grad_norm=92.176, clip=100.000, loss_scale=1.014e+31, optim_step_time=0.138, optim0_lr0=6.267e-05, train_time=4.869 +[gpua006:0/64] 2024-02-14 16:38:08,168 (trainer:756) INFO: 41epoch:train:11301-11400batch: iter_time=9.750e-05, forward_time=0.199, loss_ctc=75.125, loss_interctc_layer6=71.523, loss_interctc_layer12=59.288, loss_interctc_layer15=54.053, loss_interctc_layer21=78.211, loss=67.640, backward_time=0.222, grad_norm=78.393, clip=100.000, loss_scale=1.014e+31, optim_step_time=0.140, optim0_lr0=6.266e-05, train_time=1.201 +[gpua006:0/64] 2024-02-14 16:40:07,066 (trainer:756) INFO: 41epoch:train:11401-11500batch: iter_time=9.163e-05, forward_time=0.225, loss_ctc=87.741, loss_interctc_layer6=85.409, loss_interctc_layer12=70.471, loss_interctc_layer15=64.542, loss_interctc_layer21=91.102, loss=79.853, backward_time=0.231, grad_norm=82.093, clip=100.000, loss_scale=1.014e+31, optim_step_time=0.141, optim0_lr0=6.266e-05, train_time=1.188 +[gpua006:0/64] 2024-02-14 16:42:34,354 (trainer:756) INFO: 41epoch:train:11501-11600batch: iter_time=1.033e-04, forward_time=0.144, loss_ctc=76.714, loss_interctc_layer6=92.112, loss_interctc_layer12=75.764, loss_interctc_layer15=69.116, loss_interctc_layer21=79.372, loss=78.616, backward_time=0.208, grad_norm=75.025, clip=100.000, loss_scale=1.014e+31, optim_step_time=0.138, optim0_lr0=6.265e-05, train_time=1.474 +[gpua006:0/64] 2024-02-14 16:44:54,226 (trainer:756) INFO: 41epoch:train:11601-11700batch: iter_time=9.775e-05, forward_time=0.154, loss_ctc=92.117, loss_interctc_layer6=92.160, loss_interctc_layer12=76.114, loss_interctc_layer15=69.651, loss_interctc_layer21=95.611, loss=85.130, backward_time=0.208, grad_norm=75.581, clip=100.000, loss_scale=1.014e+31, optim_step_time=0.138, optim0_lr0=6.265e-05, train_time=1.398 +[gpua006:0/64] 2024-02-14 16:46:46,962 (trainer:756) INFO: 41epoch:train:11701-11800batch: iter_time=1.004e-04, forward_time=0.144, loss_ctc=80.325, loss_interctc_layer6=80.541, loss_interctc_layer12=66.610, loss_interctc_layer15=60.935, loss_interctc_layer21=83.785, loss=74.439, backward_time=0.209, grad_norm=80.442, clip=100.000, loss_scale=1.014e+31, optim_step_time=0.138, optim0_lr0=6.264e-05, train_time=1.127 +[gpua006:0/64] 2024-02-14 16:49:47,735 (trainer:756) INFO: 41epoch:train:11801-11900batch: iter_time=9.979e-05, forward_time=0.143, loss_ctc=78.232, loss_interctc_layer6=72.842, loss_interctc_layer12=59.622, loss_interctc_layer15=54.304, loss_interctc_layer21=81.477, loss=69.295, backward_time=0.208, grad_norm=67.689, clip=100.000, loss_scale=1.014e+31, optim_step_time=0.137, optim0_lr0=6.264e-05, train_time=1.808 +[gpua006:0/64] 2024-02-14 16:52:43,106 (trainer:756) INFO: 41epoch:train:11901-12000batch: iter_time=1.067e-04, forward_time=0.142, loss_ctc=68.097, loss_interctc_layer6=75.977, loss_interctc_layer12=63.058, loss_interctc_layer15=58.006, loss_interctc_layer21=70.601, loss=67.148, backward_time=0.208, grad_norm=84.283, clip=100.000, loss_scale=1.014e+31, optim_step_time=0.138, optim0_lr0=6.263e-05, train_time=1.753 +[gpua006:0/64] 2024-02-14 16:55:19,683 (trainer:756) INFO: 41epoch:train:12001-12100batch: iter_time=9.345e-05, forward_time=0.153, loss_ctc=75.202, loss_interctc_layer6=84.316, loss_interctc_layer12=69.763, loss_interctc_layer15=64.057, loss_interctc_layer21=78.154, loss=74.298, backward_time=0.208, grad_norm=104.575, clip=100.000, loss_scale=1.014e+31, optim_step_time=0.140, optim0_lr0=6.263e-05, train_time=1.566 +[gpua006:0/64] 2024-02-14 16:57:42,238 (trainer:756) INFO: 41epoch:train:12101-12200batch: iter_time=1.000e-04, forward_time=0.192, loss_ctc=81.757, loss_interctc_layer6=89.772, loss_interctc_layer12=74.161, loss_interctc_layer15=67.916, loss_interctc_layer21=84.915, loss=79.704, backward_time=0.238, grad_norm=85.560, clip=100.000, loss_scale=1.014e+31, optim_step_time=0.139, optim0_lr0=6.262e-05, train_time=1.425 +[gpua006:0/64] 2024-02-14 16:59:37,490 (trainer:756) INFO: 41epoch:train:12201-12300batch: iter_time=9.782e-05, forward_time=0.209, loss_ctc=73.028, loss_interctc_layer6=71.676, loss_interctc_layer12=58.679, loss_interctc_layer15=53.461, loss_interctc_layer21=75.874, loss=66.544, backward_time=0.228, grad_norm=85.823, clip=100.000, loss_scale=1.014e+31, optim_step_time=0.141, optim0_lr0=6.262e-05, train_time=1.152 +[gpua006:0/64] 2024-02-14 17:01:46,091 (trainer:756) INFO: 41epoch:train:12301-12400batch: iter_time=9.431e-05, forward_time=0.149, loss_ctc=71.174, loss_interctc_layer6=80.064, loss_interctc_layer12=66.869, loss_interctc_layer15=61.382, loss_interctc_layer21=73.613, loss=70.620, backward_time=0.212, grad_norm=83.276, clip=100.000, loss_scale=1.744e+31, optim_step_time=0.138, optim0_lr0=6.261e-05, train_time=1.285 +[gpua006:0/64] 2024-02-14 17:03:55,480 (trainer:756) INFO: 41epoch:train:12401-12500batch: iter_time=9.206e-05, forward_time=0.142, loss_ctc=61.721, loss_interctc_layer6=67.779, loss_interctc_layer12=55.865, loss_interctc_layer15=51.233, loss_interctc_layer21=63.759, loss=60.071, backward_time=0.207, grad_norm=78.455, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.137, optim0_lr0=6.261e-05, train_time=1.294 +[gpua006:0/64] 2024-02-14 17:04:15,514 (multiple_iter_factory:32) INFO: Building 10th iter-factory... +[gpua006:0/64] 2024-02-14 17:04:34,241 (s2t:401) INFO: Optional Data Names: ('text_prev', 'text_ctc', 'text_spk2', 'text_spk3', 'text_spk4') +[gpua006:0/64] 2024-02-14 17:04:37,651 (abs_task:1660) INFO: [train] dataset: +ESPnetDataset( + speech: {"path": "exp/s2t_stats_raw_bpe50000/splits12/wav.scp/split.4", "type": "kaldi_ark"} + text_prev: {"path": "exp/s2t_stats_raw_bpe50000/splits12/text.prev/split.4", "type": "text"} + text_ctc: {"path": "exp/s2t_stats_raw_bpe50000/splits12/text.ctc/split.4", "type": "text"} + text: {"path": "exp/s2t_stats_raw_bpe50000/splits12/text/split.4", "type": "text"} + preprocess: ) +[gpua006:0/64] 2024-02-14 17:04:37,651 (abs_task:1661) INFO: [train] Batch sampler: UnsortedBatchSampler(N-batch=19027, batch_size=256, key_file=exp/s2t_stats_raw_bpe50000/splits12/speech_shape/split.4, +[gpua006:0/64] 2024-02-14 17:04:37,677 (abs_task:1662) INFO: [train] mini-batch sizes summary: N-batch=19027, mean=256.0, min=256, max=257 +[gpua006:0/64] 2024-02-14 17:11:40,002 (trainer:756) INFO: 41epoch:train:12501-12600batch: iter_time=3.554, forward_time=0.145, loss_ctc=62.280, loss_interctc_layer6=67.874, loss_interctc_layer12=55.527, loss_interctc_layer15=50.625, loss_interctc_layer21=64.696, loss=60.200, backward_time=0.210, grad_norm=62.229, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.139, optim0_lr0=6.260e-05, train_time=4.645 +[gpua006:0/64] 2024-02-14 17:13:26,508 (trainer:756) INFO: 41epoch:train:12601-12700batch: iter_time=1.009e-04, forward_time=0.143, loss_ctc=86.886, loss_interctc_layer6=79.836, loss_interctc_layer12=66.157, loss_interctc_layer15=60.696, loss_interctc_layer21=90.368, loss=76.789, backward_time=0.209, grad_norm=87.594, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.139, optim0_lr0=6.260e-05, train_time=1.065 +[gpua006:0/64] 2024-02-14 17:16:03,232 (trainer:756) INFO: 41epoch:train:12701-12800batch: iter_time=1.039e-04, forward_time=0.161, loss_ctc=89.108, loss_interctc_layer6=93.613, loss_interctc_layer12=76.865, loss_interctc_layer15=70.268, loss_interctc_layer21=92.317, loss=84.434, backward_time=0.229, grad_norm=87.786, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.140, optim0_lr0=6.259e-05, train_time=1.567 +[gpua006:0/64] 2024-02-14 17:18:05,894 (trainer:756) INFO: 41epoch:train:12801-12900batch: iter_time=9.132e-05, forward_time=0.158, loss_ctc=81.518, loss_interctc_layer6=93.422, loss_interctc_layer12=76.934, loss_interctc_layer15=70.360, loss_interctc_layer21=84.318, loss=81.310, backward_time=0.220, grad_norm=90.200, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.138, optim0_lr0=6.259e-05, train_time=1.226 +[gpua006:0/64] 2024-02-14 17:20:50,596 (trainer:756) INFO: 41epoch:train:12901-13000batch: iter_time=8.842e-05, forward_time=0.173, loss_ctc=79.232, loss_interctc_layer6=80.558, loss_interctc_layer12=66.166, loss_interctc_layer15=60.450, loss_interctc_layer21=82.271, loss=73.736, backward_time=0.248, grad_norm=120.157, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.140, optim0_lr0=6.258e-05, train_time=1.647 +[gpua006:0/64] 2024-02-14 17:22:48,966 (trainer:756) INFO: 41epoch:train:13001-13100batch: iter_time=9.589e-05, forward_time=0.148, loss_ctc=72.168, loss_interctc_layer6=71.458, loss_interctc_layer12=58.805, loss_interctc_layer15=53.799, loss_interctc_layer21=75.063, loss=66.259, backward_time=0.210, grad_norm=85.308, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.138, optim0_lr0=6.258e-05, train_time=1.183 +[gpua006:0/64] 2024-02-14 17:25:01,675 (trainer:756) INFO: 41epoch:train:13101-13200batch: iter_time=8.617e-05, forward_time=0.143, loss_ctc=74.537, loss_interctc_layer6=76.304, loss_interctc_layer12=62.579, loss_interctc_layer15=57.105, loss_interctc_layer21=77.354, loss=69.576, backward_time=0.208, grad_norm=169.654, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.137, optim0_lr0=6.257e-05, train_time=1.327 +[gpua006:0/64] 2024-02-14 17:27:33,298 (trainer:756) INFO: 41epoch:train:13201-13300batch: iter_time=8.896e-05, forward_time=0.142, loss_ctc=80.779, loss_interctc_layer6=83.890, loss_interctc_layer12=70.083, loss_interctc_layer15=64.545, loss_interctc_layer21=83.722, loss=76.604, backward_time=0.207, grad_norm=87.100, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.137, optim0_lr0=6.257e-05, train_time=1.516 +[gpua006:0/64] 2024-02-14 17:29:35,780 (trainer:756) INFO: 41epoch:train:13301-13400batch: iter_time=8.847e-05, forward_time=0.143, loss_ctc=79.141, loss_interctc_layer6=87.517, loss_interctc_layer12=72.509, loss_interctc_layer15=66.479, loss_interctc_layer21=82.074, loss=77.544, backward_time=0.207, grad_norm=74.969, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.137, optim0_lr0=6.256e-05, train_time=1.225 +[gpua006:0/64] 2024-02-14 17:32:44,349 (trainer:756) INFO: 41epoch:train:13401-13500batch: iter_time=8.894e-05, forward_time=0.142, loss_ctc=74.801, loss_interctc_layer6=79.656, loss_interctc_layer12=65.460, loss_interctc_layer15=59.782, loss_interctc_layer21=77.663, loss=71.472, backward_time=0.206, grad_norm=125.181, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.137, optim0_lr0=6.256e-05, train_time=1.885 +[gpua006:0/64] 2024-02-14 17:34:49,920 (trainer:756) INFO: 41epoch:train:13501-13600batch: iter_time=9.491e-05, forward_time=0.144, loss_ctc=65.095, loss_interctc_layer6=72.106, loss_interctc_layer12=59.084, loss_interctc_layer15=53.710, loss_interctc_layer21=67.595, loss=63.518, backward_time=0.210, grad_norm=67.260, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.140, optim0_lr0=6.255e-05, train_time=1.256 +[gpua006:0/64] 2024-02-14 17:37:32,032 (trainer:756) INFO: 41epoch:train:13601-13700batch: iter_time=1.090e-04, forward_time=0.183, loss_ctc=76.632, loss_interctc_layer6=80.212, loss_interctc_layer12=67.027, loss_interctc_layer15=61.920, loss_interctc_layer21=79.356, loss=73.029, backward_time=0.214, grad_norm=73.674, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.140, optim0_lr0=6.255e-05, train_time=1.621 +[gpua006:0/64] 2024-02-14 17:39:30,213 (multiple_iter_factory:32) INFO: Building 11th iter-factory... +[gpua006:0/64] 2024-02-14 17:39:49,500 (s2t:401) INFO: Optional Data Names: ('text_prev', 'text_ctc', 'text_spk2', 'text_spk3', 'text_spk4') +[gpua006:0/64] 2024-02-14 17:39:52,973 (abs_task:1660) INFO: [train] dataset: +ESPnetDataset( + speech: {"path": "exp/s2t_stats_raw_bpe50000/splits12/wav.scp/split.7", "type": "kaldi_ark"} + text_prev: {"path": "exp/s2t_stats_raw_bpe50000/splits12/text.prev/split.7", "type": "text"} + text_ctc: {"path": "exp/s2t_stats_raw_bpe50000/splits12/text.ctc/split.7", "type": "text"} + text: {"path": "exp/s2t_stats_raw_bpe50000/splits12/text/split.7", "type": "text"} + preprocess: ) +[gpua006:0/64] 2024-02-14 17:39:52,973 (abs_task:1661) INFO: [train] Batch sampler: UnsortedBatchSampler(N-batch=19027, batch_size=256, key_file=exp/s2t_stats_raw_bpe50000/splits12/speech_shape/split.7, +[gpua006:0/64] 2024-02-14 17:39:53,025 (abs_task:1662) INFO: [train] mini-batch sizes summary: N-batch=19027, mean=256.0, min=256, max=257 +[gpua006:0/64] 2024-02-14 17:46:07,657 (trainer:756) INFO: 41epoch:train:13701-13800batch: iter_time=3.503, forward_time=0.207, loss_ctc=61.801, loss_interctc_layer6=67.077, loss_interctc_layer12=54.918, loss_interctc_layer15=50.076, loss_interctc_layer21=64.060, loss=59.586, backward_time=0.232, grad_norm=71.773, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.140, optim0_lr0=6.254e-05, train_time=5.156 +[gpua006:0/64] 2024-02-14 17:47:51,785 (trainer:756) INFO: 41epoch:train:13801-13900batch: iter_time=8.547e-05, forward_time=0.142, loss_ctc=70.675, loss_interctc_layer6=71.230, loss_interctc_layer12=58.766, loss_interctc_layer15=53.750, loss_interctc_layer21=73.672, loss=65.619, backward_time=0.209, grad_norm=89.683, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.138, optim0_lr0=6.254e-05, train_time=1.041 +[gpua006:0/64] 2024-02-14 17:49:57,570 (trainer:756) INFO: 41epoch:train:13901-14000batch: iter_time=7.953e-05, forward_time=0.142, loss_ctc=81.322, loss_interctc_layer6=84.780, loss_interctc_layer12=69.920, loss_interctc_layer15=64.042, loss_interctc_layer21=84.592, loss=76.931, backward_time=0.209, grad_norm=89.152, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.138, optim0_lr0=6.253e-05, train_time=1.258 +[gpua006:0/64] 2024-02-14 17:53:23,732 (trainer:756) INFO: 41epoch:train:14001-14100batch: iter_time=8.424e-05, forward_time=0.143, loss_ctc=72.035, loss_interctc_layer6=91.869, loss_interctc_layer12=75.446, loss_interctc_layer15=68.817, loss_interctc_layer21=74.612, loss=76.556, backward_time=0.208, grad_norm=81.715, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.138, optim0_lr0=6.253e-05, train_time=2.061 +[gpua006:0/64] 2024-02-14 17:55:36,270 (trainer:756) INFO: 41epoch:train:14101-14200batch: iter_time=9.477e-05, forward_time=0.144, loss_ctc=87.279, loss_interctc_layer6=92.823, loss_interctc_layer12=76.737, loss_interctc_layer15=70.245, loss_interctc_layer21=90.779, loss=83.572, backward_time=0.208, grad_norm=80.961, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.138, optim0_lr0=6.252e-05, train_time=1.325 +[gpua006:0/64] 2024-02-14 17:58:29,672 (trainer:756) INFO: 41epoch:train:14201-14300batch: iter_time=8.916e-05, forward_time=0.142, loss_ctc=72.862, loss_interctc_layer6=80.056, loss_interctc_layer12=66.175, loss_interctc_layer15=60.522, loss_interctc_layer21=75.815, loss=71.086, backward_time=0.206, grad_norm=80.344, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.137, optim0_lr0=6.252e-05, train_time=1.734 +[gpua006:0/64] 2024-02-14 18:00:11,828 (trainer:756) INFO: 41epoch:train:14301-14400batch: iter_time=8.586e-05, forward_time=0.144, loss_ctc=73.288, loss_interctc_layer6=72.560, loss_interctc_layer12=59.300, loss_interctc_layer15=53.922, loss_interctc_layer21=76.334, loss=67.081, backward_time=0.209, grad_norm=72.779, clip=100.000, loss_scale=3.489e+31, optim_step_time=0.137, optim0_lr0=6.251e-05, train_time=1.021 +[gpua006:0/64] 2024-02-14 18:02:25,757 (trainer:687) WARNING: The grad norm is nan. Skipping updating the model. +[gpua006:0/64] 2024-02-14 18:03:25,656 (trainer:756) INFO: 41epoch:train:14401-14500batch: iter_time=8.937e-05, forward_time=0.142, loss_ctc=63.255, loss_interctc_layer6=75.618, loss_interctc_layer12=62.938, loss_interctc_layer15=57.868, loss_interctc_layer21=65.802, loss=65.096, backward_time=0.208, grad_norm=87.196, clip=100.000, loss_scale=3.442e+31, optim_step_time=0.139, optim0_lr0=6.251e-05, train_time=1.938 +[gpua006:0/64] 2024-02-14 18:06:16,659 (trainer:756) INFO: 41epoch:train:14501-14600batch: iter_time=9.167e-05, forward_time=0.179, loss_ctc=71.439, loss_interctc_layer6=84.171, loss_interctc_layer12=69.777, loss_interctc_layer15=64.000, loss_interctc_layer21=73.917, loss=72.661, backward_time=0.211, grad_norm=101.931, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.140, optim0_lr0=6.250e-05, train_time=1.710 +[gpua006:0/64] 2024-02-14 18:08:16,634 (trainer:756) INFO: 41epoch:train:14601-14700batch: iter_time=8.516e-05, forward_time=0.162, loss_ctc=76.413, loss_interctc_layer6=89.930, loss_interctc_layer12=74.452, loss_interctc_layer15=68.103, loss_interctc_layer21=79.318, loss=77.643, backward_time=0.210, grad_norm=93.132, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.138, optim0_lr0=6.250e-05, train_time=1.200 +[gpua006:0/64] 2024-02-14 18:10:19,742 (trainer:687) WARNING: The grad norm is nan. Skipping updating the model. +[gpua006:0/64] 2024-02-14 18:11:37,356 (trainer:756) INFO: 41epoch:train:14701-14800batch: iter_time=8.820e-05, forward_time=0.201, loss_ctc=69.374, loss_interctc_layer6=71.996, loss_interctc_layer12=58.946, loss_interctc_layer15=53.665, loss_interctc_layer21=72.097, loss=65.216, backward_time=0.265, grad_norm=72.117, clip=100.000, loss_scale=1.690e+31, optim_step_time=0.141, optim0_lr0=6.249e-05, train_time=2.007 +[gpua006:0/64] 2024-02-14 18:13:30,283 (trainer:756) INFO: 41epoch:train:14801-14900batch: iter_time=9.255e-05, forward_time=0.142, loss_ctc=69.683, loss_interctc_layer6=80.665, loss_interctc_layer12=67.395, loss_interctc_layer15=61.977, loss_interctc_layer21=72.290, loss=70.402, backward_time=0.208, grad_norm=70.616, clip=100.000, loss_scale=1.014e+31, optim_step_time=0.138, optim0_lr0=6.249e-05, train_time=1.129 +[gpua006:0/64] 2024-02-14 18:15:17,117 (trainer:756) INFO: 41epoch:train:14901-15000batch: iter_time=9.511e-05, forward_time=0.142, loss_ctc=60.219, loss_interctc_layer6=67.874, loss_interctc_layer12=55.968, loss_interctc_layer15=51.109, loss_interctc_layer21=62.394, loss=59.513, backward_time=0.209, grad_norm=79.620, clip=100.000, loss_scale=1.014e+31, optim_step_time=0.138, optim0_lr0=6.248e-05, train_time=1.068 +[gpua006:0/64] 2024-02-14 18:48:14,477 (trainer:355) INFO: 41epoch results: [train] iter_time=0.276, forward_time=0.157, loss_ctc=74.723, loss_interctc_layer6=80.394, loss_interctc_layer12=66.464, loss_interctc_layer15=60.866, loss_interctc_layer21=77.550, loss=71.999, backward_time=0.215, grad_norm=87.128, clip=100.000, loss_scale=2.543e+31, optim_step_time=0.139, optim0_lr0=6.286e-05, train_time=1.659, time=6 hours, 55 minutes and 13.64 seconds, total_count=615000, gpu_max_cached_mem_GB=33.436, [valid] loss_ctc=40.188, cer_ctc=0.186, loss_interctc_layer6=46.428, cer_interctc_layer6=0.205, loss_interctc_layer12=33.430, cer_interctc_layer12=0.138, loss_interctc_layer15=29.144, cer_interctc_layer15=0.114, loss_interctc_layer21=42.738, cer_interctc_layer21=0.199, loss=38.386, time=32 minutes and 31.45 seconds, total_count=191511, gpu_max_cached_mem_GB=33.436 +[gpua006:0/64] 2024-02-14 18:48:35,325 (trainer:410) INFO: The best model has been updated: valid.cer_ctc, valid.total_count +[gpua006:0/64] 2024-02-14 18:48:35,654 (trainer:464) INFO: The model files were removed: exp/s2t_train_s2t_multitask-ctc_ebf27_conv2d8_size1024_raw_bpe50000/35epoch.pth, exp/s2t_train_s2t_multitask-ctc_ebf27_conv2d8_size1024_raw_bpe50000/36epoch.pth +[gpua006:0/64] 2024-02-14 18:48:35,654 (trainer:289) INFO: 42/45epoch started. Estimated time to finish: 1 day, 4 hours and 39 minutes +[gpua006:0/64] 2024-02-14 18:48:35,670 (multiple_iter_factory:32) INFO: Building 0th iter-factory... +[gpua006:0/64] 2024-02-14 18:48:53,633 (s2t:401) INFO: Optional Data Names: ('text_prev', 'text_ctc', 'text_spk2', 'text_spk3', 'text_spk4') +[gpua006:0/64] 2024-02-14 18:48:57,367 (abs_task:1660) INFO: [train] dataset: +ESPnetDataset( + speech: {"path": "exp/s2t_stats_raw_bpe50000/splits12/wav.scp/split.0", "type": "kaldi_ark"} + text_prev: {"path": "exp/s2t_stats_raw_bpe50000/splits12/text.prev/split.0", "type": "text"} + text_ctc: {"path": "exp/s2t_stats_raw_bpe50000/splits12/text.ctc/split.0", "type": "text"} + text: {"path": "exp/s2t_stats_raw_bpe50000/splits12/text/split.0", "type": "text"} + preprocess: ) +[gpua006:0/64] 2024-02-14 18:48:57,367 (abs_task:1661) INFO: [train] Batch sampler: UnsortedBatchSampler(N-batch=19027, batch_size=256, key_file=exp/s2t_stats_raw_bpe50000/splits12/speech_shape/split.0, +[gpua006:0/64] 2024-02-14 18:48:57,370 (abs_task:1662) INFO: [train] mini-batch sizes summary: N-batch=19027, mean=256.0, min=256, max=257 +[gpua006:0/64] 2024-02-14 18:56:09,621 (trainer:756) INFO: 42epoch:train:1-100batch: iter_time=3.308, forward_time=0.241, loss_ctc=69.332, loss_interctc_layer6=74.363, loss_interctc_layer12=62.547, loss_interctc_layer15=57.862, loss_interctc_layer21=72.409, loss=67.303, backward_time=0.222, grad_norm=101.614, clip=100.000, loss_scale=1.014e+31, optim_step_time=0.139, optim0_lr0=6.247e-05, train_time=4.538 +[gpua006:0/64] 2024-02-14 18:57:44,781 (trainer:756) INFO: 42epoch:train:101-200batch: iter_time=8.757e-05, forward_time=0.142, loss_ctc=75.050, loss_interctc_layer6=77.596, loss_interctc_layer12=64.023, loss_interctc_layer15=58.555, loss_interctc_layer21=77.774, loss=70.600, backward_time=0.208, grad_norm=76.509, clip=100.000, loss_scale=1.014e+31, optim_step_time=0.138, optim0_lr0=6.247e-05, train_time=0.953 +[gpua006:0/64] 2024-02-14 18:59:21,044 (trainer:756) INFO: 42epoch:train:201-300batch: iter_time=9.733e-05, forward_time=0.145, loss_ctc=95.309, loss_interctc_layer6=92.035, loss_interctc_layer12=76.135, loss_interctc_layer15=69.616, loss_interctc_layer21=98.922, loss=86.403, backward_time=0.211, grad_norm=103.266, clip=100.000, loss_scale=1.014e+31, optim_step_time=0.140, optim0_lr0=6.246e-05, train_time=0.962 +[gpua006:0/64] 2024-02-14 19:01:05,909 (trainer:756) INFO: 42epoch:train:301-400batch: iter_time=1.023e-04, forward_time=0.146, loss_ctc=73.340, loss_interctc_layer6=82.445, loss_interctc_layer12=68.099, loss_interctc_layer15=62.356, loss_interctc_layer21=76.248, loss=72.498, backward_time=0.212, grad_norm=74.798, clip=100.000, loss_scale=1.014e+31, optim_step_time=0.138, optim0_lr0=6.246e-05, train_time=1.049 +[gpua006:0/64] 2024-02-14 19:03:23,589 (trainer:756) INFO: 42epoch:train:401-500batch: iter_time=9.544e-05, forward_time=0.144, loss_ctc=91.425, loss_interctc_layer6=91.868, loss_interctc_layer12=76.984, loss_interctc_layer15=71.005, loss_interctc_layer21=94.684, loss=85.193, backward_time=0.209, grad_norm=147.001, clip=100.000, loss_scale=1.014e+31, optim_step_time=0.137, optim0_lr0=6.245e-05, train_time=1.377 +[gpua006:0/64] 2024-02-14 19:05:31,275 (trainer:756) INFO: 42epoch:train:501-600batch: iter_time=9.654e-05, forward_time=0.161, loss_ctc=83.753, loss_interctc_layer6=86.781, loss_interctc_layer12=72.428, loss_interctc_layer15=66.685, loss_interctc_layer21=86.620, loss=79.253, backward_time=0.224, grad_norm=94.810, clip=100.000, loss_scale=1.014e+31, optim_step_time=0.138, optim0_lr0=6.245e-05, train_time=1.277 +[gpua006:0/64] 2024-02-14 19:07:30,861 (trainer:756) INFO: 42epoch:train:601-700batch: iter_time=9.193e-05, forward_time=0.196, loss_ctc=83.128, loss_interctc_layer6=86.581, loss_interctc_layer12=72.150, loss_interctc_layer15=66.242, loss_interctc_layer21=85.949, loss=78.810, backward_time=0.255, grad_norm=96.223, clip=100.000, loss_scale=1.014e+31, optim_step_time=0.142, optim0_lr0=6.244e-05, train_time=1.195 +[gpua006:0/64] 2024-02-14 19:10:18,253 (trainer:756) INFO: 42epoch:train:701-800batch: iter_time=9.171e-05, forward_time=0.153, loss_ctc=74.432, loss_interctc_layer6=78.456, loss_interctc_layer12=64.794, loss_interctc_layer15=59.221, loss_interctc_layer21=77.135, loss=70.808, backward_time=0.210, grad_norm=95.080, clip=100.000, loss_scale=1.014e+31, optim_step_time=0.139, optim0_lr0=6.244e-05, train_time=1.675 +[gpua006:0/64] 2024-02-14 19:12:47,867 (trainer:756) INFO: 42epoch:train:801-900batch: iter_time=8.932e-05, forward_time=0.144, loss_ctc=90.662, loss_interctc_layer6=96.639, loss_interctc_layer12=81.816, loss_interctc_layer15=76.036, loss_interctc_layer21=93.415, loss=87.714, backward_time=0.207, grad_norm=164.219, clip=100.000, loss_scale=1.014e+31, optim_step_time=0.138, optim0_lr0=6.243e-05, train_time=1.495 +[gpua006:0/64] 2024-02-14 19:15:02,699 (trainer:756) INFO: 42epoch:train:901-1000batch: iter_time=8.846e-05, forward_time=0.157, loss_ctc=78.745, loss_interctc_layer6=82.517, loss_interctc_layer12=68.592, loss_interctc_layer15=63.016, loss_interctc_layer21=81.483, loss=74.870, backward_time=0.209, grad_norm=70.937, clip=100.000, loss_scale=1.014e+31, optim_step_time=0.138, optim0_lr0=6.243e-05, train_time=1.349 +[gpua006:0/64] 2024-02-14 19:17:18,613 (trainer:756) INFO: 42epoch:train:1001-1100batch: iter_time=8.857e-05, forward_time=0.156, loss_ctc=74.613, loss_interctc_layer6=87.986, loss_interctc_layer12=73.824, loss_interctc_layer15=67.926, loss_interctc_layer21=77.118, loss=76.293, backward_time=0.226, grad_norm=96.735, clip=100.000, loss_scale=1.014e+31, optim_step_time=0.140, optim0_lr0=6.242e-05, train_time=1.359 +[gpua006:0/64] 2024-02-14 19:19:28,712 (trainer:756) INFO: 42epoch:train:1101-1200batch: iter_time=9.336e-05, forward_time=0.164, loss_ctc=80.364, loss_interctc_layer6=82.700, loss_interctc_layer12=69.314, loss_interctc_layer15=63.974, loss_interctc_layer21=83.348, loss=75.940, backward_time=0.242, grad_norm=83.686, clip=100.000, loss_scale=1.014e+31, optim_step_time=0.141, optim0_lr0=6.242e-05, train_time=1.299 +[gpua006:0/64] 2024-02-14 19:20:49,944 (multiple_iter_factory:32) INFO: Building 1th iter-factory... +[gpua006:0/64] 2024-02-14 19:21:09,082 (s2t:401) INFO: Optional Data Names: ('text_prev', 'text_ctc', 'text_spk2', 'text_spk3', 'text_spk4') +[gpua006:0/64] 2024-02-14 19:21:12,455 (abs_task:1660) INFO: [train] dataset: +ESPnetDataset( + speech: {"path": "exp/s2t_stats_raw_bpe50000/splits12/wav.scp/split.7", "type": "kaldi_ark"} + text_prev: {"path": "exp/s2t_stats_raw_bpe50000/splits12/text.prev/split.7", "type": "text"} + text_ctc: {"path": "exp/s2t_stats_raw_bpe50000/splits12/text.ctc/split.7", "type": "text"} + text: {"path": "exp/s2t_stats_raw_bpe50000/splits12/text/split.7", "type": "text"} + preprocess: ) +[gpua006:0/64] 2024-02-14 19:21:12,455 (abs_task:1661) INFO: [train] Batch sampler: UnsortedBatchSampler(N-batch=19027, batch_size=256, key_file=exp/s2t_stats_raw_bpe50000/splits12/speech_shape/split.7, +[gpua006:0/64] 2024-02-14 19:21:12,541 (abs_task:1662) INFO: [train] mini-batch sizes summary: N-batch=19027, mean=256.0, min=256, max=257 +[gpua006:0/64] 2024-02-14 19:27:28,962 (trainer:756) INFO: 42epoch:train:1201-1300batch: iter_time=3.427, forward_time=0.145, loss_ctc=66.879, loss_interctc_layer6=74.926, loss_interctc_layer12=61.415, loss_interctc_layer15=55.980, loss_interctc_layer21=69.372, loss=65.714, backward_time=0.207, grad_norm=74.651, clip=100.000, loss_scale=1.014e+31, optim_step_time=0.138, optim0_lr0=6.241e-05, train_time=4.804 +[gpua006:0/64] 2024-02-14 19:29:12,983 (trainer:756) INFO: 42epoch:train:1301-1400batch: iter_time=8.426e-05, forward_time=0.169, loss_ctc=68.276, loss_interctc_layer6=78.301, loss_interctc_layer12=65.117, loss_interctc_layer15=60.002, loss_interctc_layer21=70.792, loss=68.497, backward_time=0.213, grad_norm=65.492, clip=100.000, loss_scale=1.014e+31, optim_step_time=0.139, optim0_lr0=6.241e-05, train_time=1.040 +[gpua006:0/64] 2024-02-14 19:31:01,724 (trainer:756) INFO: 42epoch:train:1401-1500batch: iter_time=8.599e-05, forward_time=0.150, loss_ctc=91.859, loss_interctc_layer6=89.105, loss_interctc_layer12=73.596, loss_interctc_layer15=67.195, loss_interctc_layer21=95.240, loss=83.399, backward_time=0.217, grad_norm=78.306, clip=100.000, loss_scale=1.014e+31, optim_step_time=0.140, optim0_lr0=6.240e-05, train_time=1.087 +[gpua006:0/64] 2024-02-14 19:33:39,041 (trainer:756) INFO: 42epoch:train:1501-1600batch: iter_time=9.375e-05, forward_time=0.144, loss_ctc=79.764, loss_interctc_layer6=85.939, loss_interctc_layer12=71.025, loss_interctc_layer15=64.948, loss_interctc_layer21=83.083, loss=76.952, backward_time=0.210, grad_norm=73.733, clip=100.000, loss_scale=1.014e+31, optim_step_time=0.138, optim0_lr0=6.240e-05, train_time=1.573 +[gpua006:0/64] 2024-02-14 19:36:01,807 (trainer:756) INFO: 42epoch:train:1601-1700batch: iter_time=9.747e-05, forward_time=0.145, loss_ctc=78.164, loss_interctc_layer6=87.880, loss_interctc_layer12=72.552, loss_interctc_layer15=66.501, loss_interctc_layer21=81.306, loss=77.280, backward_time=0.211, grad_norm=82.323, clip=100.000, loss_scale=1.014e+31, optim_step_time=0.139, optim0_lr0=6.239e-05, train_time=1.427 +[gpua006:0/64] 2024-02-14 19:38:23,053 (trainer:756) INFO: 42epoch:train:1701-1800batch: iter_time=2.291e-04, forward_time=0.172, loss_ctc=79.061, loss_interctc_layer6=83.615, loss_interctc_layer12=69.701, loss_interctc_layer15=64.459, loss_interctc_layer21=82.279, loss=75.823, backward_time=0.217, grad_norm=86.067, clip=100.000, loss_scale=1.349e+31, optim_step_time=0.143, optim0_lr0=6.239e-05, train_time=1.413 +[gpua006:0/64] 2024-02-14 19:40:15,815 (trainer:756) INFO: 42epoch:train:1801-1900batch: iter_time=8.920e-05, forward_time=0.178, loss_ctc=78.206, loss_interctc_layer6=88.626, loss_interctc_layer12=74.097, loss_interctc_layer15=68.340, loss_interctc_layer21=80.874, loss=78.029, backward_time=0.210, grad_norm=74.997, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.138, optim0_lr0=6.238e-05, train_time=1.127 +[gpua006:0/64] 2024-02-14 19:42:21,965 (trainer:756) INFO: 42epoch:train:1901-2000batch: iter_time=9.487e-05, forward_time=0.168, loss_ctc=66.796, loss_interctc_layer6=74.554, loss_interctc_layer12=61.140, loss_interctc_layer15=55.700, loss_interctc_layer21=69.134, loss=65.465, backward_time=0.225, grad_norm=60.055, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.139, optim0_lr0=6.238e-05, train_time=1.259 +[gpua006:0/64] 2024-02-14 19:44:57,659 (trainer:756) INFO: 42epoch:train:2001-2100batch: iter_time=9.777e-05, forward_time=0.144, loss_ctc=83.186, loss_interctc_layer6=87.202, loss_interctc_layer12=72.288, loss_interctc_layer15=66.314, loss_interctc_layer21=86.111, loss=79.020, backward_time=0.207, grad_norm=73.967, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.138, optim0_lr0=6.237e-05, train_time=1.559 +[gpua006:0/64] 2024-02-14 19:47:42,007 (trainer:756) INFO: 42epoch:train:2101-2200batch: iter_time=9.018e-05, forward_time=0.150, loss_ctc=78.690, loss_interctc_layer6=93.082, loss_interctc_layer12=78.790, loss_interctc_layer15=72.974, loss_interctc_layer21=81.561, loss=81.019, backward_time=0.207, grad_norm=93.124, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.137, optim0_lr0=6.237e-05, train_time=1.643 +[gpua006:0/64] 2024-02-14 19:49:38,116 (trainer:756) INFO: 42epoch:train:2201-2300batch: iter_time=0.001, forward_time=0.178, loss_ctc=72.536, loss_interctc_layer6=76.888, loss_interctc_layer12=63.538, loss_interctc_layer15=58.220, loss_interctc_layer21=75.212, loss=69.279, backward_time=0.212, grad_norm=75.978, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.140, optim0_lr0=6.236e-05, train_time=1.161 +[gpua006:0/64] 2024-02-14 19:51:43,490 (trainer:756) INFO: 42epoch:train:2301-2400batch: iter_time=1.895e-04, forward_time=0.174, loss_ctc=68.455, loss_interctc_layer6=90.027, loss_interctc_layer12=75.005, loss_interctc_layer15=68.910, loss_interctc_layer21=70.939, loss=74.667, backward_time=0.227, grad_norm=84.801, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.140, optim0_lr0=6.236e-05, train_time=1.253 +[gpua006:0/64] 2024-02-14 19:53:42,188 (trainer:756) INFO: 42epoch:train:2401-2500batch: iter_time=9.199e-05, forward_time=0.163, loss_ctc=77.021, loss_interctc_layer6=85.737, loss_interctc_layer12=70.982, loss_interctc_layer15=65.014, loss_interctc_layer21=79.875, loss=75.726, backward_time=0.211, grad_norm=85.905, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.141, optim0_lr0=6.235e-05, train_time=1.187 +[gpua006:0/64] 2024-02-14 19:54:02,242 (multiple_iter_factory:32) INFO: Building 2th iter-factory... +[gpua006:0/64] 2024-02-14 19:54:21,247 (s2t:401) INFO: Optional Data Names: ('text_prev', 'text_ctc', 'text_spk2', 'text_spk3', 'text_spk4') +[gpua006:0/64] 2024-02-14 19:54:24,646 (abs_task:1660) INFO: [train] dataset: +ESPnetDataset( + speech: {"path": "exp/s2t_stats_raw_bpe50000/splits12/wav.scp/split.1", "type": "kaldi_ark"} + text_prev: {"path": "exp/s2t_stats_raw_bpe50000/splits12/text.prev/split.1", "type": "text"} + text_ctc: {"path": "exp/s2t_stats_raw_bpe50000/splits12/text.ctc/split.1", "type": "text"} + text: {"path": "exp/s2t_stats_raw_bpe50000/splits12/text/split.1", "type": "text"} + preprocess: ) +[gpua006:0/64] 2024-02-14 19:54:24,646 (abs_task:1661) INFO: [train] Batch sampler: UnsortedBatchSampler(N-batch=19027, batch_size=256, key_file=exp/s2t_stats_raw_bpe50000/splits12/speech_shape/split.1, +[gpua006:0/64] 2024-02-14 19:54:24,764 (abs_task:1662) INFO: [train] mini-batch sizes summary: N-batch=19027, mean=256.0, min=256, max=257 +[gpua006:0/64] 2024-02-14 20:01:00,970 (trainer:756) INFO: 42epoch:train:2501-2600batch: iter_time=3.347, forward_time=0.182, loss_ctc=62.473, loss_interctc_layer6=72.380, loss_interctc_layer12=60.043, loss_interctc_layer15=55.126, loss_interctc_layer21=64.769, loss=62.958, backward_time=0.218, grad_norm=79.096, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.141, optim0_lr0=6.235e-05, train_time=4.387 +[gpua006:0/64] 2024-02-14 20:02:41,931 (trainer:756) INFO: 42epoch:train:2601-2700batch: iter_time=9.124e-05, forward_time=0.142, loss_ctc=71.985, loss_interctc_layer6=77.257, loss_interctc_layer12=63.643, loss_interctc_layer15=58.108, loss_interctc_layer21=74.616, loss=69.122, backward_time=0.209, grad_norm=99.743, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.138, optim0_lr0=6.234e-05, train_time=1.010 +[gpua006:0/64] 2024-02-14 20:05:21,537 (trainer:756) INFO: 42epoch:train:2701-2800batch: iter_time=8.793e-05, forward_time=0.198, loss_ctc=93.868, loss_interctc_layer6=90.988, loss_interctc_layer12=75.041, loss_interctc_layer15=68.490, loss_interctc_layer21=97.471, loss=85.172, backward_time=0.273, grad_norm=86.783, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.145, optim0_lr0=6.234e-05, train_time=1.596 +[gpua006:0/64] 2024-02-14 20:07:27,219 (trainer:756) INFO: 42epoch:train:2801-2900batch: iter_time=9.411e-05, forward_time=0.143, loss_ctc=69.885, loss_interctc_layer6=81.755, loss_interctc_layer12=67.428, loss_interctc_layer15=61.584, loss_interctc_layer21=72.611, loss=70.653, backward_time=0.208, grad_norm=91.079, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.138, optim0_lr0=6.233e-05, train_time=1.257 +[gpua006:0/64] 2024-02-14 20:09:15,027 (trainer:756) INFO: 42epoch:train:2901-3000batch: iter_time=9.180e-05, forward_time=0.144, loss_ctc=84.361, loss_interctc_layer6=90.842, loss_interctc_layer12=75.637, loss_interctc_layer15=69.561, loss_interctc_layer21=87.514, loss=81.583, backward_time=0.209, grad_norm=86.455, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.138, optim0_lr0=6.233e-05, train_time=1.077 +[gpua006:0/64] 2024-02-14 20:11:28,006 (trainer:756) INFO: 42epoch:train:3001-3100batch: iter_time=9.537e-04, forward_time=0.250, loss_ctc=80.165, loss_interctc_layer6=85.319, loss_interctc_layer12=70.667, loss_interctc_layer15=64.828, loss_interctc_layer21=83.008, loss=76.798, backward_time=0.237, grad_norm=84.164, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.161, optim0_lr0=6.232e-05, train_time=1.330 +[gpua006:0/64] 2024-02-14 20:13:42,034 (trainer:756) INFO: 42epoch:train:3101-3200batch: iter_time=8.523e-05, forward_time=0.144, loss_ctc=78.051, loss_interctc_layer6=86.440, loss_interctc_layer12=71.581, loss_interctc_layer15=65.684, loss_interctc_layer21=80.902, loss=76.532, backward_time=0.208, grad_norm=79.822, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.138, optim0_lr0=6.232e-05, train_time=1.340 +[gpua006:0/64] 2024-02-14 20:16:09,063 (trainer:756) INFO: 42epoch:train:3201-3300batch: iter_time=8.946e-05, forward_time=0.143, loss_ctc=68.300, loss_interctc_layer6=77.255, loss_interctc_layer12=63.477, loss_interctc_layer15=57.947, loss_interctc_layer21=70.819, loss=67.560, backward_time=0.207, grad_norm=82.343, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.138, optim0_lr0=6.231e-05, train_time=1.470 +[gpua006:0/64] 2024-02-14 20:18:30,734 (trainer:756) INFO: 42epoch:train:3301-3400batch: iter_time=8.291e-05, forward_time=0.208, loss_ctc=83.351, loss_interctc_layer6=95.495, loss_interctc_layer12=80.163, loss_interctc_layer15=74.423, loss_interctc_layer21=86.251, loss=83.936, backward_time=0.298, grad_norm=89.178, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.148, optim0_lr0=6.231e-05, train_time=1.416 +[gpua006:0/64] 2024-02-14 20:21:04,475 (trainer:756) INFO: 42epoch:train:3401-3500batch: iter_time=9.290e-05, forward_time=0.144, loss_ctc=73.160, loss_interctc_layer6=82.258, loss_interctc_layer12=68.254, loss_interctc_layer15=62.659, loss_interctc_layer21=75.841, loss=72.434, backward_time=0.207, grad_norm=90.784, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.137, optim0_lr0=6.230e-05, train_time=1.538 +[gpua006:0/64] 2024-02-14 20:23:31,059 (trainer:756) INFO: 42epoch:train:3501-3600batch: iter_time=9.657e-05, forward_time=0.143, loss_ctc=70.412, loss_interctc_layer6=86.872, loss_interctc_layer12=72.295, loss_interctc_layer15=66.262, loss_interctc_layer21=72.958, loss=73.760, backward_time=0.207, grad_norm=87.050, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.138, optim0_lr0=6.230e-05, train_time=1.466 +[gpua006:0/64] 2024-02-14 20:25:20,478 (trainer:756) INFO: 42epoch:train:3601-3700batch: iter_time=1.010e-04, forward_time=0.143, loss_ctc=75.003, loss_interctc_layer6=82.361, loss_interctc_layer12=68.597, loss_interctc_layer15=63.204, loss_interctc_layer21=77.848, loss=73.403, backward_time=0.209, grad_norm=86.349, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.138, optim0_lr0=6.229e-05, train_time=1.093 +[gpua006:0/64] 2024-02-14 20:26:58,764 (multiple_iter_factory:32) INFO: Building 3th iter-factory... +[gpua006:0/64] 2024-02-14 20:27:17,348 (s2t:401) INFO: Optional Data Names: ('text_prev', 'text_ctc', 'text_spk2', 'text_spk3', 'text_spk4') +[gpua006:0/64] 2024-02-14 20:27:20,763 (abs_task:1660) INFO: [train] dataset: +ESPnetDataset( + speech: {"path": "exp/s2t_stats_raw_bpe50000/splits12/wav.scp/split.2", "type": "kaldi_ark"} + text_prev: {"path": "exp/s2t_stats_raw_bpe50000/splits12/text.prev/split.2", "type": "text"} + text_ctc: {"path": "exp/s2t_stats_raw_bpe50000/splits12/text.ctc/split.2", "type": "text"} + text: {"path": "exp/s2t_stats_raw_bpe50000/splits12/text/split.2", "type": "text"} + preprocess: ) +[gpua006:0/64] 2024-02-14 20:27:20,763 (abs_task:1661) INFO: [train] Batch sampler: UnsortedBatchSampler(N-batch=19027, batch_size=256, key_file=exp/s2t_stats_raw_bpe50000/splits12/speech_shape/split.2, +[gpua006:0/64] 2024-02-14 20:27:20,900 (abs_task:1662) INFO: [train] mini-batch sizes summary: N-batch=19027, mean=256.0, min=256, max=257 +[gpua006:0/64] 2024-02-14 20:33:50,903 (trainer:756) INFO: 42epoch:train:3701-3800batch: iter_time=3.727, forward_time=0.273, loss_ctc=62.287, loss_interctc_layer6=74.092, loss_interctc_layer12=60.648, loss_interctc_layer15=55.257, loss_interctc_layer21=64.615, loss=63.380, backward_time=0.244, grad_norm=67.887, clip=100.000, loss_scale=2.698e+31, optim_step_time=0.157, optim0_lr0=6.229e-05, train_time=5.105 +[gpua006:0/64] 2024-02-14 20:35:51,147 (trainer:756) INFO: 42epoch:train:3801-3900batch: iter_time=9.079e-05, forward_time=0.143, loss_ctc=70.443, loss_interctc_layer6=76.705, loss_interctc_layer12=63.407, loss_interctc_layer15=58.229, loss_interctc_layer21=73.082, loss=68.373, backward_time=0.208, grad_norm=72.308, clip=100.000, loss_scale=4.056e+31, optim_step_time=0.138, optim0_lr0=6.228e-05, train_time=1.202 +[gpua006:0/64] 2024-02-14 20:37:35,814 (trainer:756) INFO: 42epoch:train:3901-4000batch: iter_time=8.822e-05, forward_time=0.143, loss_ctc=94.222, loss_interctc_layer6=89.113, loss_interctc_layer12=73.358, loss_interctc_layer15=66.948, loss_interctc_layer21=97.709, loss=84.270, backward_time=0.208, grad_norm=83.112, clip=100.000, loss_scale=4.056e+31, optim_step_time=0.138, optim0_lr0=6.228e-05, train_time=1.046 +[gpua006:0/64] 2024-02-14 20:39:36,541 (trainer:756) INFO: 42epoch:train:4001-4100batch: iter_time=8.901e-05, forward_time=0.143, loss_ctc=82.135, loss_interctc_layer6=85.653, loss_interctc_layer12=70.613, loss_interctc_layer15=64.461, loss_interctc_layer21=85.543, loss=77.681, backward_time=0.208, grad_norm=71.905, clip=100.000, loss_scale=4.056e+31, optim_step_time=0.138, optim0_lr0=6.227e-05, train_time=1.207 +[gpua006:0/64] 2024-02-14 20:42:14,240 (trainer:756) INFO: 42epoch:train:4101-4200batch: iter_time=8.758e-05, forward_time=0.143, loss_ctc=81.094, loss_interctc_layer6=87.417, loss_interctc_layer12=72.035, loss_interctc_layer15=65.928, loss_interctc_layer21=84.091, loss=78.113, backward_time=0.207, grad_norm=81.539, clip=100.000, loss_scale=4.056e+31, optim_step_time=0.137, optim0_lr0=6.227e-05, train_time=1.577 +[gpua006:0/64] 2024-02-14 20:44:33,856 (trainer:756) INFO: 42epoch:train:4201-4300batch: iter_time=8.711e-05, forward_time=0.265, loss_ctc=81.133, loss_interctc_layer6=82.494, loss_interctc_layer12=68.755, loss_interctc_layer15=63.346, loss_interctc_layer21=84.046, loss=75.955, backward_time=0.236, grad_norm=95.947, clip=100.000, loss_scale=4.056e+31, optim_step_time=0.153, optim0_lr0=6.226e-05, train_time=1.395 +[gpua006:0/64] 2024-02-14 20:47:02,772 (trainer:756) INFO: 42epoch:train:4301-4400batch: iter_time=7.941e-05, forward_time=0.143, loss_ctc=79.929, loss_interctc_layer6=86.994, loss_interctc_layer12=72.468, loss_interctc_layer15=66.721, loss_interctc_layer21=82.339, loss=77.690, backward_time=0.207, grad_norm=102.450, clip=100.000, loss_scale=4.056e+31, optim_step_time=0.138, optim0_lr0=6.226e-05, train_time=1.489 +[gpua006:0/64] 2024-02-14 20:49:24,039 (trainer:756) INFO: 42epoch:train:4401-4500batch: iter_time=8.576e-05, forward_time=0.142, loss_ctc=73.190, loss_interctc_layer6=74.730, loss_interctc_layer12=61.145, loss_interctc_layer15=55.755, loss_interctc_layer21=75.865, loss=68.137, backward_time=0.206, grad_norm=72.243, clip=100.000, loss_scale=4.056e+31, optim_step_time=0.138, optim0_lr0=6.225e-05, train_time=1.413 +[gpua006:0/64] 2024-02-14 20:51:36,564 (trainer:756) INFO: 42epoch:train:4501-4600batch: iter_time=8.643e-05, forward_time=0.143, loss_ctc=86.485, loss_interctc_layer6=86.387, loss_interctc_layer12=71.529, loss_interctc_layer15=65.560, loss_interctc_layer21=89.726, loss=79.937, backward_time=0.206, grad_norm=134.920, clip=100.000, loss_scale=4.056e+31, optim_step_time=0.138, optim0_lr0=6.225e-05, train_time=1.325 +[gpua006:0/64] 2024-02-14 20:53:41,578 (trainer:756) INFO: 42epoch:train:4601-4700batch: iter_time=8.587e-05, forward_time=0.142, loss_ctc=84.554, loss_interctc_layer6=92.379, loss_interctc_layer12=77.827, loss_interctc_layer15=72.288, loss_interctc_layer21=88.105, loss=83.031, backward_time=0.206, grad_norm=105.054, clip=100.000, loss_scale=4.056e+31, optim_step_time=0.138, optim0_lr0=6.224e-05, train_time=1.250 +[gpua006:0/64] 2024-02-14 20:55:24,439 (trainer:756) INFO: 42epoch:train:4701-4800batch: iter_time=9.518e-05, forward_time=0.142, loss_ctc=76.887, loss_interctc_layer6=76.581, loss_interctc_layer12=63.207, loss_interctc_layer15=57.703, loss_interctc_layer21=79.691, loss=70.814, backward_time=0.208, grad_norm=76.524, clip=100.000, loss_scale=4.056e+31, optim_step_time=0.138, optim0_lr0=6.224e-05, train_time=1.028 +[gpua006:0/64] 2024-02-14 20:57:13,812 (trainer:756) INFO: 42epoch:train:4801-4900batch: iter_time=9.567e-05, forward_time=0.146, loss_ctc=71.769, loss_interctc_layer6=88.936, loss_interctc_layer12=74.106, loss_interctc_layer15=68.079, loss_interctc_layer21=74.356, loss=75.449, backward_time=0.208, grad_norm=84.560, clip=100.000, loss_scale=4.056e+31, optim_step_time=0.138, optim0_lr0=6.223e-05, train_time=1.094 +[gpua006:0/64] 2024-02-14 20:59:43,505 (trainer:756) INFO: 42epoch:train:4901-5000batch: iter_time=1.019e-04, forward_time=0.276, loss_ctc=83.668, loss_interctc_layer6=85.165, loss_interctc_layer12=70.281, loss_interctc_layer15=64.351, loss_interctc_layer21=86.721, loss=78.037, backward_time=0.252, grad_norm=115.754, clip=100.000, loss_scale=4.056e+31, optim_step_time=0.143, optim0_lr0=6.223e-05, train_time=1.496 +[gpua006:0/64] 2024-02-14 21:00:03,535 (multiple_iter_factory:32) INFO: Building 4th iter-factory... +[gpua006:0/64] 2024-02-14 21:00:22,707 (s2t:401) INFO: Optional Data Names: ('text_prev', 'text_ctc', 'text_spk2', 'text_spk3', 'text_spk4') +[gpua006:0/64] 2024-02-14 21:00:26,132 (abs_task:1660) INFO: [train] dataset: +ESPnetDataset( + speech: {"path": "exp/s2t_stats_raw_bpe50000/splits12/wav.scp/split.9", "type": "kaldi_ark"} + text_prev: {"path": "exp/s2t_stats_raw_bpe50000/splits12/text.prev/split.9", "type": "text"} + text_ctc: {"path": "exp/s2t_stats_raw_bpe50000/splits12/text.ctc/split.9", "type": "text"} + text: {"path": "exp/s2t_stats_raw_bpe50000/splits12/text/split.9", "type": "text"} + preprocess: ) +[gpua006:0/64] 2024-02-14 21:00:26,132 (abs_task:1661) INFO: [train] Batch sampler: UnsortedBatchSampler(N-batch=19027, batch_size=256, key_file=exp/s2t_stats_raw_bpe50000/splits12/speech_shape/split.9, +[gpua006:0/64] 2024-02-14 21:00:26,212 (abs_task:1662) INFO: [train] mini-batch sizes summary: N-batch=19027, mean=256.0, min=256, max=257 +[gpua006:0/64] 2024-02-14 21:07:48,255 (trainer:756) INFO: 42epoch:train:5001-5100batch: iter_time=3.519, forward_time=0.144, loss_ctc=60.511, loss_interctc_layer6=70.735, loss_interctc_layer12=58.455, loss_interctc_layer15=53.657, loss_interctc_layer21=62.840, loss=61.240, backward_time=0.209, grad_norm=66.181, clip=100.000, loss_scale=4.056e+31, optim_step_time=0.139, optim0_lr0=6.222e-05, train_time=4.847 +[gpua006:0/64] 2024-02-14 21:09:29,049 (trainer:756) INFO: 42epoch:train:5101-5200batch: iter_time=1.017e-04, forward_time=0.144, loss_ctc=71.430, loss_interctc_layer6=76.550, loss_interctc_layer12=62.980, loss_interctc_layer15=57.601, loss_interctc_layer21=74.187, loss=68.550, backward_time=0.210, grad_norm=74.635, clip=100.000, loss_scale=4.056e+31, optim_step_time=0.139, optim0_lr0=6.222e-05, train_time=1.008 +[gpua006:0/64] 2024-02-14 21:12:11,349 (trainer:756) INFO: 42epoch:train:5201-5300batch: iter_time=9.430e-05, forward_time=0.146, loss_ctc=92.975, loss_interctc_layer6=91.106, loss_interctc_layer12=75.170, loss_interctc_layer15=68.652, loss_interctc_layer21=96.749, loss=84.930, backward_time=0.209, grad_norm=103.372, clip=100.000, loss_scale=4.056e+31, optim_step_time=0.139, optim0_lr0=6.221e-05, train_time=1.624 +[gpua006:0/64] 2024-02-14 21:14:31,010 (trainer:756) INFO: 42epoch:train:5301-5400batch: iter_time=1.023e-04, forward_time=0.143, loss_ctc=69.252, loss_interctc_layer6=80.943, loss_interctc_layer12=66.590, loss_interctc_layer15=60.925, loss_interctc_layer21=72.055, loss=69.953, backward_time=0.208, grad_norm=78.178, clip=100.000, loss_scale=4.056e+31, optim_step_time=0.139, optim0_lr0=6.221e-05, train_time=1.396 +[gpua006:0/64] 2024-02-14 21:16:14,499 (trainer:756) INFO: 42epoch:train:5401-5500batch: iter_time=1.036e-04, forward_time=0.144, loss_ctc=83.557, loss_interctc_layer6=89.953, loss_interctc_layer12=74.815, loss_interctc_layer15=68.791, loss_interctc_layer21=86.819, loss=80.787, backward_time=0.208, grad_norm=91.526, clip=100.000, loss_scale=4.056e+31, optim_step_time=0.139, optim0_lr0=6.220e-05, train_time=1.035 +[gpua006:0/64] 2024-02-14 21:17:59,222 (trainer:756) INFO: 42epoch:train:5501-5600batch: iter_time=9.917e-05, forward_time=0.144, loss_ctc=79.734, loss_interctc_layer6=84.625, loss_interctc_layer12=70.270, loss_interctc_layer15=64.369, loss_interctc_layer21=82.548, loss=76.309, backward_time=0.210, grad_norm=74.648, clip=100.000, loss_scale=4.056e+31, optim_step_time=0.139, optim0_lr0=6.220e-05, train_time=1.047 +[gpua006:0/64] 2024-02-14 21:20:37,684 (trainer:756) INFO: 42epoch:train:5601-5700batch: iter_time=1.042e-04, forward_time=0.318, loss_ctc=77.256, loss_interctc_layer6=85.250, loss_interctc_layer12=70.780, loss_interctc_layer15=64.949, loss_interctc_layer21=79.940, loss=75.635, backward_time=0.250, grad_norm=70.996, clip=100.000, loss_scale=4.056e+31, optim_step_time=0.152, optim0_lr0=6.219e-05, train_time=1.584 +[gpua006:0/64] 2024-02-14 21:22:58,723 (trainer:756) INFO: 42epoch:train:5701-5800batch: iter_time=1.065e-04, forward_time=0.142, loss_ctc=68.572, loss_interctc_layer6=77.594, loss_interctc_layer12=63.881, loss_interctc_layer15=58.233, loss_interctc_layer21=71.141, loss=67.884, backward_time=0.207, grad_norm=62.294, clip=100.000, loss_scale=5.395e+31, optim_step_time=0.137, optim0_lr0=6.219e-05, train_time=1.410 +[gpua006:0/64] 2024-02-14 21:25:37,854 (trainer:756) INFO: 42epoch:train:5801-5900batch: iter_time=1.091e-04, forward_time=0.150, loss_ctc=82.567, loss_interctc_layer6=94.499, loss_interctc_layer12=79.279, loss_interctc_layer15=73.228, loss_interctc_layer21=85.691, loss=83.053, backward_time=0.207, grad_norm=91.351, clip=100.000, loss_scale=8.113e+31, optim_step_time=0.137, optim0_lr0=6.218e-05, train_time=1.592 +[gpua006:0/64] 2024-02-14 21:27:59,814 (trainer:756) INFO: 42epoch:train:5901-6000batch: iter_time=9.656e-05, forward_time=0.143, loss_ctc=72.263, loss_interctc_layer6=81.764, loss_interctc_layer12=67.717, loss_interctc_layer15=62.067, loss_interctc_layer21=74.966, loss=71.756, backward_time=0.208, grad_norm=64.143, clip=100.000, loss_scale=8.113e+31, optim_step_time=0.137, optim0_lr0=6.218e-05, train_time=1.419 +[gpua006:0/64] 2024-02-14 21:29:52,716 (trainer:756) INFO: 42epoch:train:6001-6100batch: iter_time=1.046e-04, forward_time=0.144, loss_ctc=69.789, loss_interctc_layer6=86.023, loss_interctc_layer12=71.618, loss_interctc_layer15=66.327, loss_interctc_layer21=72.349, loss=73.221, backward_time=0.208, grad_norm=103.904, clip=100.000, loss_scale=8.113e+31, optim_step_time=0.138, optim0_lr0=6.217e-05, train_time=1.129 +[gpua006:0/64] 2024-02-14 21:32:34,630 (trainer:756) INFO: 42epoch:train:6101-6200batch: iter_time=1.131e-04, forward_time=0.143, loss_ctc=74.285, loss_interctc_layer6=81.526, loss_interctc_layer12=67.933, loss_interctc_layer15=62.510, loss_interctc_layer21=77.015, loss=72.654, backward_time=0.208, grad_norm=82.163, clip=100.000, loss_scale=8.113e+31, optim_step_time=0.138, optim0_lr0=6.217e-05, train_time=1.618 +[gpua006:0/64] 2024-02-14 21:33:52,755 (multiple_iter_factory:32) INFO: Building 5th iter-factory... +[gpua006:0/64] 2024-02-14 21:34:11,591 (s2t:401) INFO: Optional Data Names: ('text_prev', 'text_ctc', 'text_spk2', 'text_spk3', 'text_spk4') +[gpua006:0/64] 2024-02-14 21:34:14,984 (abs_task:1660) INFO: [train] dataset: +ESPnetDataset( + speech: {"path": "exp/s2t_stats_raw_bpe50000/splits12/wav.scp/split.3", "type": "kaldi_ark"} + text_prev: {"path": "exp/s2t_stats_raw_bpe50000/splits12/text.prev/split.3", "type": "text"} + text_ctc: {"path": "exp/s2t_stats_raw_bpe50000/splits12/text.ctc/split.3", "type": "text"} + text: {"path": "exp/s2t_stats_raw_bpe50000/splits12/text/split.3", "type": "text"} + preprocess: ) +[gpua006:0/64] 2024-02-14 21:34:14,984 (abs_task:1661) INFO: [train] Batch sampler: UnsortedBatchSampler(N-batch=19027, batch_size=256, key_file=exp/s2t_stats_raw_bpe50000/splits12/speech_shape/split.3, +[gpua006:0/64] 2024-02-14 21:34:15,004 (abs_task:1662) INFO: [train] mini-batch sizes summary: N-batch=19027, mean=256.0, min=256, max=257 +[gpua006:0/64] 2024-02-14 21:40:27,013 (trainer:756) INFO: 42epoch:train:6201-6300batch: iter_time=3.455, forward_time=0.144, loss_ctc=60.867, loss_interctc_layer6=74.684, loss_interctc_layer12=61.014, loss_interctc_layer15=55.515, loss_interctc_layer21=63.113, loss=63.039, backward_time=0.209, grad_norm=63.724, clip=100.000, loss_scale=8.113e+31, optim_step_time=0.138, optim0_lr0=6.216e-05, train_time=4.725 +[gpua006:0/64] 2024-02-14 21:42:24,374 (trainer:756) INFO: 42epoch:train:6301-6400batch: iter_time=9.389e-05, forward_time=0.145, loss_ctc=65.940, loss_interctc_layer6=75.928, loss_interctc_layer12=62.750, loss_interctc_layer15=57.695, loss_interctc_layer21=68.509, loss=66.164, backward_time=0.208, grad_norm=72.227, clip=100.000, loss_scale=8.113e+31, optim_step_time=0.138, optim0_lr0=6.216e-05, train_time=1.173 +[gpua006:0/64] 2024-02-14 21:44:01,818 (trainer:687) WARNING: The grad norm is nan. Skipping updating the model. +[gpua006:0/64] 2024-02-14 21:44:22,717 (trainer:756) INFO: 42epoch:train:6401-6500batch: iter_time=9.449e-05, forward_time=0.291, loss_ctc=91.675, loss_interctc_layer6=87.899, loss_interctc_layer12=72.323, loss_interctc_layer15=66.008, loss_interctc_layer21=95.043, loss=82.590, backward_time=0.263, grad_norm=79.597, clip=100.000, loss_scale=7.212e+31, optim_step_time=0.144, optim0_lr0=6.215e-05, train_time=1.183 +[gpua006:0/64] 2024-02-14 21:46:25,925 (trainer:756) INFO: 42epoch:train:6501-6600batch: iter_time=9.870e-05, forward_time=0.142, loss_ctc=78.385, loss_interctc_layer6=84.957, loss_interctc_layer12=69.872, loss_interctc_layer15=63.721, loss_interctc_layer21=81.747, loss=75.737, backward_time=0.207, grad_norm=76.765, clip=100.000, loss_scale=4.056e+31, optim_step_time=0.138, optim0_lr0=6.215e-05, train_time=1.232 +[gpua006:0/64] 2024-02-14 21:49:10,437 (trainer:756) INFO: 42epoch:train:6601-6700batch: iter_time=1.068e-04, forward_time=0.142, loss_ctc=77.193, loss_interctc_layer6=86.948, loss_interctc_layer12=71.716, loss_interctc_layer15=65.559, loss_interctc_layer21=80.190, loss=76.321, backward_time=0.207, grad_norm=83.181, clip=100.000, loss_scale=4.056e+31, optim_step_time=0.138, optim0_lr0=6.214e-05, train_time=1.645 +[gpua006:0/64] 2024-02-14 21:51:21,974 (trainer:756) INFO: 42epoch:train:6701-6800batch: iter_time=1.035e-04, forward_time=0.142, loss_ctc=78.104, loss_interctc_layer6=82.369, loss_interctc_layer12=68.624, loss_interctc_layer15=63.296, loss_interctc_layer21=80.896, loss=74.658, backward_time=0.207, grad_norm=72.061, clip=100.000, loss_scale=4.056e+31, optim_step_time=0.138, optim0_lr0=6.214e-05, train_time=1.315 +[gpua006:0/64] 2024-02-14 21:53:26,570 (trainer:756) INFO: 42epoch:train:6801-6900batch: iter_time=9.849e-05, forward_time=0.145, loss_ctc=76.885, loss_interctc_layer6=87.611, loss_interctc_layer12=72.989, loss_interctc_layer15=67.140, loss_interctc_layer21=79.361, loss=76.797, backward_time=0.208, grad_norm=78.824, clip=100.000, loss_scale=4.056e+31, optim_step_time=0.138, optim0_lr0=6.213e-05, train_time=1.246 +[gpua006:0/64] 2024-02-14 21:55:33,585 (trainer:756) INFO: 42epoch:train:6901-7000batch: iter_time=9.645e-05, forward_time=0.142, loss_ctc=66.790, loss_interctc_layer6=74.536, loss_interctc_layer12=61.061, loss_interctc_layer15=55.633, loss_interctc_layer21=69.245, loss=65.453, backward_time=0.208, grad_norm=58.935, clip=100.000, loss_scale=4.056e+31, optim_step_time=0.139, optim0_lr0=6.213e-05, train_time=1.270 +[gpua006:0/64] 2024-02-14 21:57:25,136 (trainer:756) INFO: 42epoch:train:7001-7100batch: iter_time=9.760e-05, forward_time=0.144, loss_ctc=81.824, loss_interctc_layer6=86.304, loss_interctc_layer12=71.347, loss_interctc_layer15=65.359, loss_interctc_layer21=84.832, loss=77.933, backward_time=0.209, grad_norm=86.736, clip=100.000, loss_scale=4.056e+31, optim_step_time=0.139, optim0_lr0=6.212e-05, train_time=1.115 +[gpua006:0/64] 2024-02-14 21:59:29,729 (trainer:756) INFO: 42epoch:train:7101-7200batch: iter_time=9.630e-05, forward_time=0.144, loss_ctc=77.199, loss_interctc_layer6=92.246, loss_interctc_layer12=77.620, loss_interctc_layer15=72.172, loss_interctc_layer21=80.083, loss=79.864, backward_time=0.208, grad_norm=82.598, clip=100.000, loss_scale=4.056e+31, optim_step_time=0.139, optim0_lr0=6.212e-05, train_time=1.246 +[gpua006:0/64] 2024-02-14 22:02:26,049 (trainer:756) INFO: 42epoch:train:7201-7300batch: iter_time=1.052e-04, forward_time=0.277, loss_ctc=72.381, loss_interctc_layer6=76.937, loss_interctc_layer12=63.429, loss_interctc_layer15=57.958, loss_interctc_layer21=75.020, loss=69.145, backward_time=0.247, grad_norm=76.195, clip=100.000, loss_scale=4.056e+31, optim_step_time=0.144, optim0_lr0=6.211e-05, train_time=1.763 +[gpua006:0/64] 2024-02-14 22:04:38,931 (trainer:756) INFO: 42epoch:train:7301-7400batch: iter_time=1.004e-04, forward_time=0.144, loss_ctc=67.746, loss_interctc_layer6=89.013, loss_interctc_layer12=74.220, loss_interctc_layer15=68.081, loss_interctc_layer21=70.389, loss=73.890, backward_time=0.207, grad_norm=94.212, clip=100.000, loss_scale=4.056e+31, optim_step_time=0.138, optim0_lr0=6.211e-05, train_time=1.328 +[gpua006:0/64] 2024-02-14 22:06:45,962 (trainer:756) INFO: 42epoch:train:7401-7500batch: iter_time=9.168e-05, forward_time=0.143, loss_ctc=76.556, loss_interctc_layer6=85.104, loss_interctc_layer12=70.153, loss_interctc_layer15=64.189, loss_interctc_layer21=79.478, loss=75.096, backward_time=0.207, grad_norm=75.815, clip=100.000, loss_scale=4.056e+31, optim_step_time=0.137, optim0_lr0=6.210e-05, train_time=1.271 +[gpua006:0/64] 2024-02-14 22:07:05,992 (multiple_iter_factory:32) INFO: Building 6th iter-factory... +[gpua006:0/64] 2024-02-14 22:07:24,644 (s2t:401) INFO: Optional Data Names: ('text_prev', 'text_ctc', 'text_spk2', 'text_spk3', 'text_spk4') +[gpua006:0/64] 2024-02-14 22:07:28,090 (abs_task:1660) INFO: [train] dataset: +ESPnetDataset( + speech: {"path": "exp/s2t_stats_raw_bpe50000/splits12/wav.scp/split.4", "type": "kaldi_ark"} + text_prev: {"path": "exp/s2t_stats_raw_bpe50000/splits12/text.prev/split.4", "type": "text"} + text_ctc: {"path": "exp/s2t_stats_raw_bpe50000/splits12/text.ctc/split.4", "type": "text"} + text: {"path": "exp/s2t_stats_raw_bpe50000/splits12/text/split.4", "type": "text"} + preprocess: ) +[gpua006:0/64] 2024-02-14 22:07:28,090 (abs_task:1661) INFO: [train] Batch sampler: UnsortedBatchSampler(N-batch=19027, batch_size=256, key_file=exp/s2t_stats_raw_bpe50000/splits12/speech_shape/split.4, +[gpua006:0/64] 2024-02-14 22:07:28,106 (abs_task:1662) INFO: [train] mini-batch sizes summary: N-batch=19027, mean=256.0, min=256, max=257 +[gpua006:0/64] 2024-02-14 22:14:00,032 (trainer:756) INFO: 42epoch:train:7501-7600batch: iter_time=3.259, forward_time=0.143, loss_ctc=66.238, loss_interctc_layer6=71.159, loss_interctc_layer12=58.918, loss_interctc_layer15=54.124, loss_interctc_layer21=68.838, loss=63.855, backward_time=0.208, grad_norm=66.617, clip=100.000, loss_scale=4.056e+31, optim_step_time=0.138, optim0_lr0=6.210e-05, train_time=4.340 +[gpua006:0/64] 2024-02-14 22:15:33,830 (trainer:756) INFO: 42epoch:train:7601-7700batch: iter_time=8.987e-05, forward_time=0.142, loss_ctc=73.839, loss_interctc_layer6=76.611, loss_interctc_layer12=62.927, loss_interctc_layer15=57.446, loss_interctc_layer21=76.538, loss=69.472, backward_time=0.208, grad_norm=86.976, clip=100.000, loss_scale=4.056e+31, optim_step_time=0.138, optim0_lr0=6.209e-05, train_time=0.938 +[gpua006:0/64] 2024-02-14 22:17:27,541 (trainer:756) INFO: 42epoch:train:7701-7800batch: iter_time=9.295e-05, forward_time=0.144, loss_ctc=94.981, loss_interctc_layer6=91.102, loss_interctc_layer12=75.049, loss_interctc_layer15=68.504, loss_interctc_layer21=98.717, loss=85.671, backward_time=0.209, grad_norm=76.498, clip=100.000, loss_scale=4.056e+31, optim_step_time=0.138, optim0_lr0=6.209e-05, train_time=1.136 +[gpua006:0/64] 2024-02-14 22:19:49,147 (trainer:756) INFO: 42epoch:train:7801-7900batch: iter_time=9.572e-05, forward_time=0.212, loss_ctc=72.104, loss_interctc_layer6=80.455, loss_interctc_layer12=66.172, loss_interctc_layer15=60.418, loss_interctc_layer21=75.131, loss=70.856, backward_time=0.302, grad_norm=77.762, clip=100.000, loss_scale=4.056e+31, optim_step_time=0.146, optim0_lr0=6.208e-05, train_time=1.416 +[gpua006:0/64] 2024-02-14 22:22:08,446 (trainer:756) INFO: 42epoch:train:7901-8000batch: iter_time=9.473e-05, forward_time=0.144, loss_ctc=89.483, loss_interctc_layer6=90.312, loss_interctc_layer12=74.959, loss_interctc_layer15=68.901, loss_interctc_layer21=93.178, loss=83.367, backward_time=0.207, grad_norm=76.947, clip=100.000, loss_scale=4.056e+31, optim_step_time=0.138, optim0_lr0=6.208e-05, train_time=1.393 +[gpua006:0/64] 2024-02-14 22:24:05,381 (trainer:756) INFO: 42epoch:train:8001-8100batch: iter_time=8.908e-05, forward_time=0.143, loss_ctc=81.238, loss_interctc_layer6=84.482, loss_interctc_layer12=70.044, loss_interctc_layer15=64.117, loss_interctc_layer21=84.022, loss=76.781, backward_time=0.207, grad_norm=75.390, clip=100.000, loss_scale=4.056e+31, optim_step_time=0.138, optim0_lr0=6.207e-05, train_time=1.169 +[gpua006:0/64] 2024-02-14 22:27:06,037 (trainer:756) INFO: 42epoch:train:8101-8200batch: iter_time=8.793e-05, forward_time=0.143, loss_ctc=81.897, loss_interctc_layer6=85.416, loss_interctc_layer12=70.710, loss_interctc_layer15=64.847, loss_interctc_layer21=84.740, loss=77.522, backward_time=0.206, grad_norm=82.575, clip=100.000, loss_scale=4.056e+31, optim_step_time=0.137, optim0_lr0=6.207e-05, train_time=1.806 +[gpua006:0/64] 2024-02-14 22:29:08,678 (trainer:756) INFO: 42epoch:train:8201-8300batch: iter_time=8.764e-05, forward_time=0.142, loss_ctc=73.064, loss_interctc_layer6=77.022, loss_interctc_layer12=63.278, loss_interctc_layer15=57.687, loss_interctc_layer21=75.818, loss=69.374, backward_time=0.206, grad_norm=73.366, clip=100.000, loss_scale=4.056e+31, optim_step_time=0.138, optim0_lr0=6.206e-05, train_time=1.227 +[gpua006:0/64] 2024-02-14 22:31:31,518 (trainer:756) INFO: 42epoch:train:8301-8400batch: iter_time=9.003e-05, forward_time=0.143, loss_ctc=88.211, loss_interctc_layer6=93.670, loss_interctc_layer12=78.591, loss_interctc_layer15=72.862, loss_interctc_layer21=91.326, loss=84.932, backward_time=0.207, grad_norm=92.239, clip=100.000, loss_scale=4.056e+31, optim_step_time=0.138, optim0_lr0=6.206e-05, train_time=1.428 +[gpua006:0/64] 2024-02-14 22:33:41,452 (trainer:687) WARNING: The grad norm is nan. Skipping updating the model. +[gpua006:0/64] 2024-02-14 22:33:59,101 (trainer:756) INFO: 42epoch:train:8401-8500batch: iter_time=9.659e-05, forward_time=0.143, loss_ctc=79.099, loss_interctc_layer6=81.971, loss_interctc_layer12=67.873, loss_interctc_layer15=62.296, loss_interctc_layer21=81.791, loss=74.606, backward_time=0.206, grad_norm=74.161, clip=100.000, loss_scale=4.466e+31, optim_step_time=0.138, optim0_lr0=6.205e-05, train_time=1.476 +[gpua006:0/64] 2024-02-14 22:35:53,864 (trainer:756) INFO: 42epoch:train:8501-8600batch: iter_time=9.325e-05, forward_time=0.143, loss_ctc=73.425, loss_interctc_layer6=86.297, loss_interctc_layer12=72.201, loss_interctc_layer15=66.334, loss_interctc_layer21=76.062, loss=74.864, backward_time=0.208, grad_norm=90.808, clip=100.000, loss_scale=4.056e+31, optim_step_time=0.138, optim0_lr0=6.205e-05, train_time=1.147 +[gpua006:0/64] 2024-02-14 22:38:09,963 (trainer:756) INFO: 42epoch:train:8601-8700batch: iter_time=9.124e-05, forward_time=0.143, loss_ctc=79.067, loss_interctc_layer6=81.357, loss_interctc_layer12=67.739, loss_interctc_layer15=62.281, loss_interctc_layer21=82.055, loss=74.500, backward_time=0.207, grad_norm=74.413, clip=100.000, loss_scale=4.056e+31, optim_step_time=0.138, optim0_lr0=6.204e-05, train_time=1.360 +[gpua006:0/64] 2024-02-14 22:39:48,208 (multiple_iter_factory:32) INFO: Building 7th iter-factory... +[gpua006:0/64] 2024-02-14 22:40:06,984 (s2t:401) INFO: Optional Data Names: ('text_prev', 'text_ctc', 'text_spk2', 'text_spk3', 'text_spk4') +[gpua006:0/64] 2024-02-14 22:40:10,638 (abs_task:1660) INFO: [train] dataset: +ESPnetDataset( + speech: {"path": "exp/s2t_stats_raw_bpe50000/splits12/wav.scp/split.10", "type": "kaldi_ark"} + text_prev: {"path": "exp/s2t_stats_raw_bpe50000/splits12/text.prev/split.10", "type": "text"} + text_ctc: {"path": "exp/s2t_stats_raw_bpe50000/splits12/text.ctc/split.10", "type": "text"} + text: {"path": "exp/s2t_stats_raw_bpe50000/splits12/text/split.10", "type": "text"} + preprocess: ) +[gpua006:0/64] 2024-02-14 22:40:10,638 (abs_task:1661) INFO: [train] Batch sampler: UnsortedBatchSampler(N-batch=19027, batch_size=256, key_file=exp/s2t_stats_raw_bpe50000/splits12/speech_shape/split.10, +[gpua006:0/64] 2024-02-14 22:40:10,642 (abs_task:1662) INFO: [train] mini-batch sizes summary: N-batch=19027, mean=256.0, min=256, max=257 +[gpua006:0/64] 2024-02-14 22:46:19,508 (trainer:756) INFO: 42epoch:train:8701-8800batch: iter_time=3.484, forward_time=0.327, loss_ctc=67.190, loss_interctc_layer6=73.940, loss_interctc_layer12=60.316, loss_interctc_layer15=54.842, loss_interctc_layer21=69.672, loss=65.192, backward_time=0.252, grad_norm=61.122, clip=100.000, loss_scale=4.056e+31, optim_step_time=0.144, optim0_lr0=6.204e-05, train_time=4.896 +[gpua006:0/64] 2024-02-14 22:48:06,862 (trainer:756) INFO: 42epoch:train:8801-8900batch: iter_time=8.366e-05, forward_time=0.145, loss_ctc=69.175, loss_interctc_layer6=75.109, loss_interctc_layer12=62.148, loss_interctc_layer15=57.065, loss_interctc_layer21=71.719, loss=67.043, backward_time=0.208, grad_norm=71.933, clip=100.000, loss_scale=4.056e+31, optim_step_time=0.138, optim0_lr0=6.203e-05, train_time=1.074 +[gpua006:0/64] 2024-02-14 22:50:16,716 (trainer:756) INFO: 42epoch:train:8901-9000batch: iter_time=8.837e-05, forward_time=0.144, loss_ctc=94.062, loss_interctc_layer6=88.092, loss_interctc_layer12=72.469, loss_interctc_layer15=66.136, loss_interctc_layer21=97.560, loss=83.664, backward_time=0.209, grad_norm=85.397, clip=100.000, loss_scale=4.056e+31, optim_step_time=0.138, optim0_lr0=6.203e-05, train_time=1.298 +[gpua006:0/64] 2024-02-14 22:52:39,101 (trainer:756) INFO: 42epoch:train:9001-9100batch: iter_time=9.030e-05, forward_time=0.142, loss_ctc=81.382, loss_interctc_layer6=85.180, loss_interctc_layer12=70.210, loss_interctc_layer15=64.158, loss_interctc_layer21=84.816, loss=77.149, backward_time=0.206, grad_norm=70.395, clip=100.000, loss_scale=4.056e+31, optim_step_time=0.138, optim0_lr0=6.202e-05, train_time=1.424 +[gpua006:0/64] 2024-02-14 22:54:54,496 (trainer:756) INFO: 42epoch:train:9101-9200batch: iter_time=8.583e-05, forward_time=0.143, loss_ctc=80.233, loss_interctc_layer6=86.343, loss_interctc_layer12=71.045, loss_interctc_layer15=65.031, loss_interctc_layer21=83.287, loss=77.188, backward_time=0.207, grad_norm=85.608, clip=100.000, loss_scale=4.056e+31, optim_step_time=0.138, optim0_lr0=6.202e-05, train_time=1.354 +[gpua006:0/64] 2024-02-14 22:57:22,253 (trainer:756) INFO: 42epoch:train:9201-9300batch: iter_time=8.477e-05, forward_time=0.155, loss_ctc=80.598, loss_interctc_layer6=82.208, loss_interctc_layer12=68.440, loss_interctc_layer15=62.992, loss_interctc_layer21=83.636, loss=75.575, backward_time=0.207, grad_norm=87.512, clip=100.000, loss_scale=4.056e+31, optim_step_time=0.138, optim0_lr0=6.201e-05, train_time=1.477 +[gpua006:0/64] 2024-02-14 22:59:41,644 (trainer:756) INFO: 42epoch:train:9301-9400batch: iter_time=8.840e-05, forward_time=0.144, loss_ctc=79.222, loss_interctc_layer6=86.764, loss_interctc_layer12=72.145, loss_interctc_layer15=66.331, loss_interctc_layer21=82.113, loss=77.315, backward_time=0.209, grad_norm=78.883, clip=100.000, loss_scale=4.056e+31, optim_step_time=0.139, optim0_lr0=6.201e-05, train_time=1.394 +[gpua006:0/64] 2024-02-14 23:02:04,670 (trainer:756) INFO: 42epoch:train:9401-9500batch: iter_time=9.002e-05, forward_time=0.239, loss_ctc=72.985, loss_interctc_layer6=74.268, loss_interctc_layer12=60.663, loss_interctc_layer15=55.165, loss_interctc_layer21=75.644, loss=67.745, backward_time=0.274, grad_norm=60.743, clip=100.000, loss_scale=4.056e+31, optim_step_time=0.146, optim0_lr0=6.200e-05, train_time=1.429 +[gpua006:0/64] 2024-02-14 23:05:09,297 (trainer:756) INFO: 42epoch:train:9501-9600batch: iter_time=9.210e-05, forward_time=0.143, loss_ctc=85.926, loss_interctc_layer6=85.770, loss_interctc_layer12=70.905, loss_interctc_layer15=65.075, loss_interctc_layer21=88.998, loss=79.335, backward_time=0.208, grad_norm=87.038, clip=100.000, loss_scale=4.056e+31, optim_step_time=0.139, optim0_lr0=6.200e-05, train_time=1.847 +[gpua006:0/64] 2024-02-14 23:07:58,723 (trainer:756) INFO: 42epoch:train:9601-9700batch: iter_time=9.381e-05, forward_time=0.144, loss_ctc=84.345, loss_interctc_layer6=91.438, loss_interctc_layer12=76.770, loss_interctc_layer15=71.127, loss_interctc_layer21=87.078, loss=82.151, backward_time=0.209, grad_norm=83.038, clip=100.000, loss_scale=4.056e+31, optim_step_time=0.139, optim0_lr0=6.199e-05, train_time=1.693 +[gpua006:0/64] 2024-02-14 23:08:25,844 (trainer:687) WARNING: The grad norm is nan. Skipping updating the model. +[gpua006:0/64] 2024-02-14 23:10:28,056 (trainer:756) INFO: 42epoch:train:9701-9800batch: iter_time=9.010e-05, forward_time=0.143, loss_ctc=76.656, loss_interctc_layer6=75.985, loss_interctc_layer12=62.649, loss_interctc_layer15=57.323, loss_interctc_layer21=79.440, loss=70.411, backward_time=0.209, grad_norm=74.106, clip=100.000, loss_scale=2.520e+31, optim_step_time=0.139, optim0_lr0=6.199e-05, train_time=1.494 +[gpua006:0/64] 2024-02-14 23:12:03,616 (trainer:756) INFO: 42epoch:train:9801-9900batch: iter_time=8.794e-05, forward_time=0.144, loss_ctc=71.035, loss_interctc_layer6=88.410, loss_interctc_layer12=73.581, loss_interctc_layer15=67.533, loss_interctc_layer21=73.576, loss=74.827, backward_time=0.209, grad_norm=86.446, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.138, optim0_lr0=6.198e-05, train_time=0.955 +[gpua006:0/64] 2024-02-14 23:12:07,400 (trainer:687) WARNING: The grad norm is nan. Skipping updating the model. +[gpua006:0/64] 2024-02-14 23:13:39,756 (trainer:756) INFO: 42epoch:train:9901-10000batch: iter_time=8.626e-05, forward_time=0.143, loss_ctc=83.261, loss_interctc_layer6=84.763, loss_interctc_layer12=69.864, loss_interctc_layer15=63.951, loss_interctc_layer21=86.308, loss=77.630, backward_time=0.208, grad_norm=78.779, clip=100.000, loss_scale=1.045e+31, optim_step_time=0.138, optim0_lr0=6.198e-05, train_time=0.961 +[gpua006:0/64] 2024-02-14 23:13:59,785 (multiple_iter_factory:32) INFO: Building 8th iter-factory... +[gpua006:0/64] 2024-02-14 23:14:18,406 (s2t:401) INFO: Optional Data Names: ('text_prev', 'text_ctc', 'text_spk2', 'text_spk3', 'text_spk4') +[gpua006:0/64] 2024-02-14 23:14:21,844 (abs_task:1660) INFO: [train] dataset: +ESPnetDataset( + speech: {"path": "exp/s2t_stats_raw_bpe50000/splits12/wav.scp/split.6", "type": "kaldi_ark"} + text_prev: {"path": "exp/s2t_stats_raw_bpe50000/splits12/text.prev/split.6", "type": "text"} + text_ctc: {"path": "exp/s2t_stats_raw_bpe50000/splits12/text.ctc/split.6", "type": "text"} + text: {"path": "exp/s2t_stats_raw_bpe50000/splits12/text/split.6", "type": "text"} + preprocess: ) +[gpua006:0/64] 2024-02-14 23:14:21,844 (abs_task:1661) INFO: [train] Batch sampler: UnsortedBatchSampler(N-batch=19027, batch_size=256, key_file=exp/s2t_stats_raw_bpe50000/splits12/speech_shape/split.6, +[gpua006:0/64] 2024-02-14 23:14:21,935 (abs_task:1662) INFO: [train] mini-batch sizes summary: N-batch=19027, mean=256.0, min=256, max=257 +[gpua006:0/64] 2024-02-14 23:21:33,216 (trainer:756) INFO: 42epoch:train:10001-10100batch: iter_time=3.407, forward_time=0.221, loss_ctc=63.634, loss_interctc_layer6=70.008, loss_interctc_layer12=57.827, loss_interctc_layer15=53.057, loss_interctc_layer21=65.950, loss=62.095, backward_time=0.222, grad_norm=69.974, clip=100.000, loss_scale=1.014e+31, optim_step_time=0.141, optim0_lr0=6.197e-05, train_time=4.734 +[gpua006:0/64] 2024-02-14 23:23:07,942 (trainer:756) INFO: 42epoch:train:10101-10200batch: iter_time=9.557e-05, forward_time=0.143, loss_ctc=74.520, loss_interctc_layer6=76.344, loss_interctc_layer12=62.719, loss_interctc_layer15=57.243, loss_interctc_layer21=77.272, loss=69.620, backward_time=0.208, grad_norm=64.995, clip=100.000, loss_scale=1.014e+31, optim_step_time=0.138, optim0_lr0=6.197e-05, train_time=0.947 +[gpua006:0/64] 2024-02-14 23:25:04,544 (trainer:756) INFO: 42epoch:train:10201-10300batch: iter_time=1.048e-04, forward_time=0.144, loss_ctc=94.158, loss_interctc_layer6=89.816, loss_interctc_layer12=73.955, loss_interctc_layer15=67.440, loss_interctc_layer21=97.797, loss=84.633, backward_time=0.208, grad_norm=88.581, clip=100.000, loss_scale=1.014e+31, optim_step_time=0.138, optim0_lr0=6.196e-05, train_time=1.165 +[gpua006:0/64] 2024-02-14 23:26:57,305 (trainer:756) INFO: 42epoch:train:10301-10400batch: iter_time=9.944e-05, forward_time=0.145, loss_ctc=71.919, loss_interctc_layer6=80.310, loss_interctc_layer12=66.044, loss_interctc_layer15=60.200, loss_interctc_layer21=74.875, loss=70.670, backward_time=0.208, grad_norm=67.518, clip=100.000, loss_scale=1.014e+31, optim_step_time=0.138, optim0_lr0=6.196e-05, train_time=1.129 +[gpua006:0/64] 2024-02-14 23:30:08,313 (trainer:756) INFO: 42epoch:train:10401-10500batch: iter_time=9.879e-05, forward_time=0.144, loss_ctc=87.845, loss_interctc_layer6=89.071, loss_interctc_layer12=74.007, loss_interctc_layer15=67.851, loss_interctc_layer21=91.313, loss=82.017, backward_time=0.207, grad_norm=84.020, clip=100.000, loss_scale=1.014e+31, optim_step_time=0.138, optim0_lr0=6.195e-05, train_time=1.910 +[gpua006:0/64] 2024-02-14 23:32:11,712 (trainer:756) INFO: 42epoch:train:10501-10600batch: iter_time=1.013e-04, forward_time=0.198, loss_ctc=80.440, loss_interctc_layer6=83.744, loss_interctc_layer12=69.091, loss_interctc_layer15=63.439, loss_interctc_layer21=83.475, loss=76.038, backward_time=0.234, grad_norm=92.432, clip=100.000, loss_scale=1.014e+31, optim_step_time=0.143, optim0_lr0=6.195e-05, train_time=1.234 +[gpua006:0/64] 2024-02-14 23:34:35,299 (trainer:756) INFO: 42epoch:train:10601-10700batch: iter_time=1.045e-04, forward_time=0.207, loss_ctc=82.153, loss_interctc_layer6=85.159, loss_interctc_layer12=70.457, loss_interctc_layer15=64.551, loss_interctc_layer21=85.035, loss=77.471, backward_time=0.232, grad_norm=75.129, clip=100.000, loss_scale=1.014e+31, optim_step_time=0.140, optim0_lr0=6.194e-05, train_time=1.435 +[gpua006:0/64] 2024-02-14 23:36:41,141 (trainer:756) INFO: 42epoch:train:10701-10800batch: iter_time=1.028e-04, forward_time=0.144, loss_ctc=73.796, loss_interctc_layer6=77.368, loss_interctc_layer12=63.642, loss_interctc_layer15=57.995, loss_interctc_layer21=76.573, loss=69.875, backward_time=0.208, grad_norm=60.987, clip=100.000, loss_scale=1.014e+31, optim_step_time=0.139, optim0_lr0=6.194e-05, train_time=1.259 +[gpua006:0/64] 2024-02-14 23:38:45,089 (trainer:756) INFO: 42epoch:train:10801-10900batch: iter_time=9.940e-05, forward_time=0.144, loss_ctc=88.220, loss_interctc_layer6=94.053, loss_interctc_layer12=78.764, loss_interctc_layer15=72.889, loss_interctc_layer21=91.362, loss=85.058, backward_time=0.208, grad_norm=91.065, clip=100.000, loss_scale=1.014e+31, optim_step_time=0.139, optim0_lr0=6.193e-05, train_time=1.238 +[gpua006:0/64] 2024-02-14 23:40:44,556 (trainer:756) INFO: 42epoch:train:10901-11000batch: iter_time=1.030e-04, forward_time=0.145, loss_ctc=78.281, loss_interctc_layer6=81.204, loss_interctc_layer12=67.256, loss_interctc_layer15=61.620, loss_interctc_layer21=81.146, loss=73.902, backward_time=0.208, grad_norm=76.172, clip=100.000, loss_scale=1.014e+31, optim_step_time=0.138, optim0_lr0=6.193e-05, train_time=1.195 +[gpua006:0/64] 2024-02-14 23:43:11,152 (trainer:756) INFO: 42epoch:train:11001-11100batch: iter_time=9.356e-05, forward_time=0.144, loss_ctc=72.987, loss_interctc_layer6=86.034, loss_interctc_layer12=71.337, loss_interctc_layer15=65.758, loss_interctc_layer21=75.713, loss=74.366, backward_time=0.208, grad_norm=100.868, clip=100.000, loss_scale=1.014e+31, optim_step_time=0.139, optim0_lr0=6.192e-05, train_time=1.466 +[gpua006:0/64] 2024-02-14 23:45:57,914 (trainer:756) INFO: 42epoch:train:11101-11200batch: iter_time=7.783e-04, forward_time=0.165, loss_ctc=78.573, loss_interctc_layer6=80.884, loss_interctc_layer12=67.364, loss_interctc_layer15=61.915, loss_interctc_layer21=81.472, loss=74.042, backward_time=0.215, grad_norm=82.023, clip=100.000, loss_scale=1.014e+31, optim_step_time=0.140, optim0_lr0=6.192e-05, train_time=1.666 +[gpua006:0/64] 2024-02-14 23:47:20,490 (multiple_iter_factory:32) INFO: Building 9th iter-factory... +[gpua006:0/64] 2024-02-14 23:47:39,525 (s2t:401) INFO: Optional Data Names: ('text_prev', 'text_ctc', 'text_spk2', 'text_spk3', 'text_spk4') +[gpua006:0/64] 2024-02-14 23:47:42,956 (abs_task:1660) INFO: [train] dataset: +ESPnetDataset( + speech: {"path": "exp/s2t_stats_raw_bpe50000/splits12/wav.scp/split.11", "type": "kaldi_ark"} + text_prev: {"path": "exp/s2t_stats_raw_bpe50000/splits12/text.prev/split.11", "type": "text"} + text_ctc: {"path": "exp/s2t_stats_raw_bpe50000/splits12/text.ctc/split.11", "type": "text"} + text: {"path": "exp/s2t_stats_raw_bpe50000/splits12/text/split.11", "type": "text"} + preprocess: ) +[gpua006:0/64] 2024-02-14 23:47:42,956 (abs_task:1661) INFO: [train] Batch sampler: UnsortedBatchSampler(N-batch=19027, batch_size=256, key_file=exp/s2t_stats_raw_bpe50000/splits12/speech_shape/split.11, +[gpua006:0/64] 2024-02-14 23:47:43,018 (abs_task:1662) INFO: [train] mini-batch sizes summary: N-batch=19027, mean=256.0, min=256, max=257 +[gpua006:0/64] 2024-02-14 23:53:44,944 (trainer:756) INFO: 42epoch:train:11201-11300batch: iter_time=3.392, forward_time=0.211, loss_ctc=65.919, loss_interctc_layer6=73.531, loss_interctc_layer12=60.087, loss_interctc_layer15=54.649, loss_interctc_layer21=68.498, loss=64.537, backward_time=0.241, grad_norm=65.613, clip=100.000, loss_scale=1.014e+31, optim_step_time=0.140, optim0_lr0=6.191e-05, train_time=4.671 +[gpua006:0/64] 2024-02-14 23:55:19,304 (trainer:756) INFO: 42epoch:train:11301-11400batch: iter_time=9.126e-05, forward_time=0.143, loss_ctc=65.794, loss_interctc_layer6=75.103, loss_interctc_layer12=62.104, loss_interctc_layer15=57.253, loss_interctc_layer21=68.138, loss=65.678, backward_time=0.209, grad_norm=101.525, clip=100.000, loss_scale=1.014e+31, optim_step_time=0.137, optim0_lr0=6.191e-05, train_time=0.943 +[gpua006:0/64] 2024-02-14 23:57:20,535 (trainer:756) INFO: 42epoch:train:11401-11500batch: iter_time=9.077e-05, forward_time=0.145, loss_ctc=91.418, loss_interctc_layer6=88.018, loss_interctc_layer12=72.356, loss_interctc_layer15=66.100, loss_interctc_layer21=94.865, loss=82.551, backward_time=0.210, grad_norm=78.671, clip=100.000, loss_scale=1.014e+31, optim_step_time=0.138, optim0_lr0=6.190e-05, train_time=1.213 +[gpua006:0/64] 2024-02-14 23:59:36,738 (trainer:756) INFO: 42epoch:train:11501-11600batch: iter_time=9.431e-05, forward_time=0.143, loss_ctc=78.896, loss_interctc_layer6=85.041, loss_interctc_layer12=70.138, loss_interctc_layer15=64.053, loss_interctc_layer21=82.372, loss=76.100, backward_time=0.207, grad_norm=73.048, clip=100.000, loss_scale=1.014e+31, optim_step_time=0.138, optim0_lr0=6.190e-05, train_time=1.361 +[gpua006:0/64] 2024-02-15 00:01:48,365 (trainer:756) INFO: 42epoch:train:11601-11700batch: iter_time=9.211e-05, forward_time=0.145, loss_ctc=76.762, loss_interctc_layer6=86.224, loss_interctc_layer12=71.090, loss_interctc_layer15=64.942, loss_interctc_layer21=79.855, loss=75.775, backward_time=0.209, grad_norm=80.313, clip=100.000, loss_scale=1.014e+31, optim_step_time=0.138, optim0_lr0=6.189e-05, train_time=1.317 +[gpua006:0/64] 2024-02-15 00:05:10,580 (trainer:756) INFO: 42epoch:train:11701-11800batch: iter_time=9.412e-05, forward_time=0.145, loss_ctc=77.225, loss_interctc_layer6=81.862, loss_interctc_layer12=68.007, loss_interctc_layer15=62.805, loss_interctc_layer21=80.294, loss=74.039, backward_time=0.206, grad_norm=74.245, clip=100.000, loss_scale=1.014e+31, optim_step_time=0.137, optim0_lr0=6.189e-05, train_time=2.022 +[gpua006:0/64] 2024-02-15 00:07:09,195 (trainer:756) INFO: 42epoch:train:11801-11900batch: iter_time=9.481e-05, forward_time=0.177, loss_ctc=76.733, loss_interctc_layer6=87.064, loss_interctc_layer12=72.515, loss_interctc_layer15=66.679, loss_interctc_layer21=79.407, loss=76.479, backward_time=0.213, grad_norm=85.748, clip=100.000, loss_scale=1.014e+31, optim_step_time=0.139, optim0_lr0=6.188e-05, train_time=1.184 +[gpua006:0/64] 2024-02-15 00:08:53,426 (trainer:687) WARNING: The grad norm is nan. Skipping updating the model. +[gpua006:0/64] 2024-02-15 00:09:54,210 (trainer:756) INFO: 42epoch:train:11901-12000batch: iter_time=8.929e-05, forward_time=0.158, loss_ctc=66.038, loss_interctc_layer6=73.482, loss_interctc_layer12=59.965, loss_interctc_layer15=54.542, loss_interctc_layer21=68.542, loss=64.514, backward_time=0.211, grad_norm=63.013, clip=100.000, loss_scale=1.639e+31, optim_step_time=0.138, optim0_lr0=6.188e-05, train_time=1.651 +[gpua006:0/64] 2024-02-15 00:12:22,553 (trainer:756) INFO: 42epoch:train:12001-12100batch: iter_time=9.165e-05, forward_time=0.180, loss_ctc=81.330, loss_interctc_layer6=85.303, loss_interctc_layer12=70.414, loss_interctc_layer15=64.444, loss_interctc_layer21=84.460, loss=77.190, backward_time=0.250, grad_norm=79.450, clip=100.000, loss_scale=1.014e+31, optim_step_time=0.139, optim0_lr0=6.187e-05, train_time=1.483 +[gpua006:0/64] 2024-02-15 00:14:42,795 (trainer:756) INFO: 42epoch:train:12101-12200batch: iter_time=1.015e-04, forward_time=0.142, loss_ctc=76.671, loss_interctc_layer6=91.343, loss_interctc_layer12=76.524, loss_interctc_layer15=70.919, loss_interctc_layer21=79.576, loss=79.007, backward_time=0.207, grad_norm=100.413, clip=100.000, loss_scale=1.014e+31, optim_step_time=0.137, optim0_lr0=6.187e-05, train_time=1.402 +[gpua006:0/64] 2024-02-15 00:16:59,093 (trainer:756) INFO: 42epoch:train:12201-12300batch: iter_time=9.169e-05, forward_time=0.142, loss_ctc=72.113, loss_interctc_layer6=76.319, loss_interctc_layer12=62.905, loss_interctc_layer15=57.578, loss_interctc_layer21=74.853, loss=68.754, backward_time=0.206, grad_norm=67.937, clip=100.000, loss_scale=1.014e+31, optim_step_time=0.137, optim0_lr0=6.186e-05, train_time=1.363 +[gpua006:0/64] 2024-02-15 00:21:01,406 (trainer:756) INFO: 42epoch:train:12301-12400batch: iter_time=1.034e-04, forward_time=0.143, loss_ctc=67.139, loss_interctc_layer6=88.245, loss_interctc_layer12=73.292, loss_interctc_layer15=67.408, loss_interctc_layer21=69.700, loss=73.157, backward_time=0.206, grad_norm=74.074, clip=100.000, loss_scale=1.014e+31, optim_step_time=0.138, optim0_lr0=6.186e-05, train_time=2.423 +[gpua006:0/64] 2024-02-15 00:22:50,539 (trainer:756) INFO: 42epoch:train:12401-12500batch: iter_time=8.800e-05, forward_time=0.155, loss_ctc=76.314, loss_interctc_layer6=84.358, loss_interctc_layer12=69.428, loss_interctc_layer15=63.415, loss_interctc_layer21=79.299, loss=74.563, backward_time=0.210, grad_norm=91.776, clip=100.000, loss_scale=1.014e+31, optim_step_time=0.138, optim0_lr0=6.185e-05, train_time=1.091 +[gpua006:0/64] 2024-02-15 00:23:10,585 (multiple_iter_factory:32) INFO: Building 10th iter-factory... +[gpua006:0/64] 2024-02-15 00:23:29,296 (s2t:401) INFO: Optional Data Names: ('text_prev', 'text_ctc', 'text_spk2', 'text_spk3', 'text_spk4') +[gpua006:0/64] 2024-02-15 00:23:32,770 (abs_task:1660) INFO: [train] dataset: +ESPnetDataset( + speech: {"path": "exp/s2t_stats_raw_bpe50000/splits12/wav.scp/split.8", "type": "kaldi_ark"} + text_prev: {"path": "exp/s2t_stats_raw_bpe50000/splits12/text.prev/split.8", "type": "text"} + text_ctc: {"path": "exp/s2t_stats_raw_bpe50000/splits12/text.ctc/split.8", "type": "text"} + text: {"path": "exp/s2t_stats_raw_bpe50000/splits12/text/split.8", "type": "text"} + preprocess: ) +[gpua006:0/64] 2024-02-15 00:23:32,770 (abs_task:1661) INFO: [train] Batch sampler: UnsortedBatchSampler(N-batch=19027, batch_size=256, key_file=exp/s2t_stats_raw_bpe50000/splits12/speech_shape/split.8, +[gpua006:0/64] 2024-02-15 00:23:32,785 (abs_task:1662) INFO: [train] mini-batch sizes summary: N-batch=19027, mean=256.0, min=256, max=257 +[gpua006:0/64] 2024-02-15 00:30:19,355 (trainer:756) INFO: 42epoch:train:12501-12600batch: iter_time=3.391, forward_time=0.241, loss_ctc=64.810, loss_interctc_layer6=69.789, loss_interctc_layer12=58.005, loss_interctc_layer15=53.263, loss_interctc_layer21=67.442, loss=62.662, backward_time=0.228, grad_norm=66.479, clip=100.000, loss_scale=1.014e+31, optim_step_time=0.141, optim0_lr0=6.185e-05, train_time=4.487 +[gpua006:0/64] 2024-02-15 00:32:25,894 (trainer:756) INFO: 42epoch:train:12601-12700batch: iter_time=1.052e-04, forward_time=0.143, loss_ctc=73.992, loss_interctc_layer6=76.435, loss_interctc_layer12=62.947, loss_interctc_layer15=57.445, loss_interctc_layer21=76.809, loss=69.526, backward_time=0.208, grad_norm=81.077, clip=100.000, loss_scale=1.014e+31, optim_step_time=0.138, optim0_lr0=6.184e-05, train_time=1.267 +[gpua006:0/64] 2024-02-15 00:35:18,564 (trainer:756) INFO: 42epoch:train:12701-12800batch: iter_time=1.100e-04, forward_time=0.257, loss_ctc=93.368, loss_interctc_layer6=89.508, loss_interctc_layer12=73.524, loss_interctc_layer15=66.961, loss_interctc_layer21=97.049, loss=84.082, backward_time=0.253, grad_norm=69.732, clip=100.000, loss_scale=1.014e+31, optim_step_time=0.145, optim0_lr0=6.184e-05, train_time=1.725 +[gpua006:0/64] 2024-02-15 00:37:21,257 (trainer:756) INFO: 42epoch:train:12801-12900batch: iter_time=1.112e-04, forward_time=0.144, loss_ctc=70.858, loss_interctc_layer6=79.835, loss_interctc_layer12=65.513, loss_interctc_layer15=59.738, loss_interctc_layer21=73.816, loss=69.952, backward_time=0.209, grad_norm=75.511, clip=100.000, loss_scale=1.014e+31, optim_step_time=0.139, optim0_lr0=6.183e-05, train_time=1.229 +[gpua006:0/64] 2024-02-15 00:40:23,446 (trainer:756) INFO: 42epoch:train:12901-13000batch: iter_time=1.191e-04, forward_time=0.147, loss_ctc=88.271, loss_interctc_layer6=89.181, loss_interctc_layer12=73.988, loss_interctc_layer15=67.923, loss_interctc_layer21=91.635, loss=82.200, backward_time=0.208, grad_norm=82.401, clip=100.000, loss_scale=1.014e+31, optim_step_time=0.139, optim0_lr0=6.183e-05, train_time=1.821 +[gpua006:0/64] 2024-02-15 00:43:14,396 (trainer:756) INFO: 42epoch:train:13001-13100batch: iter_time=1.119e-04, forward_time=0.144, loss_ctc=80.092, loss_interctc_layer6=83.930, loss_interctc_layer12=69.201, loss_interctc_layer15=63.525, loss_interctc_layer21=83.119, loss=75.973, backward_time=0.208, grad_norm=76.774, clip=100.000, loss_scale=1.014e+31, optim_step_time=0.139, optim0_lr0=6.183e-05, train_time=1.710 +[gpua006:0/64] 2024-02-15 00:46:20,548 (trainer:756) INFO: 42epoch:train:13101-13200batch: iter_time=6.082e-04, forward_time=0.289, loss_ctc=81.468, loss_interctc_layer6=84.608, loss_interctc_layer12=69.836, loss_interctc_layer15=64.055, loss_interctc_layer21=84.299, loss=76.853, backward_time=0.244, grad_norm=88.723, clip=100.000, loss_scale=1.014e+31, optim_step_time=0.143, optim0_lr0=6.182e-05, train_time=1.860 +[gpua006:0/64] 2024-02-15 00:48:17,219 (trainer:756) INFO: 42epoch:train:13201-13300batch: iter_time=1.054e-04, forward_time=0.143, loss_ctc=72.928, loss_interctc_layer6=76.881, loss_interctc_layer12=63.176, loss_interctc_layer15=57.577, loss_interctc_layer21=75.647, loss=69.242, backward_time=0.208, grad_norm=67.463, clip=100.000, loss_scale=1.014e+31, optim_step_time=0.138, optim0_lr0=6.182e-05, train_time=1.166 +[gpua006:0/64] 2024-02-15 00:50:59,170 (trainer:756) INFO: 42epoch:train:13301-13400batch: iter_time=1.034e-04, forward_time=0.145, loss_ctc=86.601, loss_interctc_layer6=92.732, loss_interctc_layer12=77.587, loss_interctc_layer15=71.495, loss_interctc_layer21=89.899, loss=83.663, backward_time=0.208, grad_norm=122.140, clip=100.000, loss_scale=1.014e+31, optim_step_time=0.138, optim0_lr0=6.181e-05, train_time=1.621 +[gpua006:0/64] 2024-02-15 00:53:29,656 (trainer:756) INFO: 42epoch:train:13401-13500batch: iter_time=8.457e-05, forward_time=0.143, loss_ctc=76.909, loss_interctc_layer6=80.746, loss_interctc_layer12=66.669, loss_interctc_layer15=61.048, loss_interctc_layer21=79.737, loss=73.022, backward_time=0.208, grad_norm=73.622, clip=100.000, loss_scale=1.014e+31, optim_step_time=0.137, optim0_lr0=6.181e-05, train_time=1.504 +[gpua006:0/64] 2024-02-15 00:56:19,959 (trainer:756) INFO: 42epoch:train:13501-13600batch: iter_time=8.686e-05, forward_time=0.176, loss_ctc=73.345, loss_interctc_layer6=86.319, loss_interctc_layer12=71.933, loss_interctc_layer15=65.963, loss_interctc_layer21=76.044, loss=74.721, backward_time=0.243, grad_norm=114.107, clip=100.000, loss_scale=1.014e+31, optim_step_time=0.141, optim0_lr0=6.180e-05, train_time=1.703 +[gpua006:0/64] 2024-02-15 00:58:53,349 (trainer:756) INFO: 42epoch:train:13601-13700batch: iter_time=9.468e-05, forward_time=0.214, loss_ctc=78.048, loss_interctc_layer6=80.740, loss_interctc_layer12=67.103, loss_interctc_layer15=61.515, loss_interctc_layer21=80.945, loss=73.670, backward_time=0.238, grad_norm=75.500, clip=100.000, loss_scale=1.014e+31, optim_step_time=0.141, optim0_lr0=6.180e-05, train_time=1.532 +[gpua006:0/64] 2024-02-15 01:00:05,069 (multiple_iter_factory:32) INFO: Building 11th iter-factory... +[gpua006:0/64] 2024-02-15 01:00:24,128 (s2t:401) INFO: Optional Data Names: ('text_prev', 'text_ctc', 'text_spk2', 'text_spk3', 'text_spk4') +[gpua006:0/64] 2024-02-15 01:00:27,878 (abs_task:1660) INFO: [train] dataset: +ESPnetDataset( + speech: {"path": "exp/s2t_stats_raw_bpe50000/splits12/wav.scp/split.5", "type": "kaldi_ark"} + text_prev: {"path": "exp/s2t_stats_raw_bpe50000/splits12/text.prev/split.5", "type": "text"} + text_ctc: {"path": "exp/s2t_stats_raw_bpe50000/splits12/text.ctc/split.5", "type": "text"} + text: {"path": "exp/s2t_stats_raw_bpe50000/splits12/text/split.5", "type": "text"} + preprocess: ) +[gpua006:0/64] 2024-02-15 01:00:27,878 (abs_task:1661) INFO: [train] Batch sampler: UnsortedBatchSampler(N-batch=19027, batch_size=256, key_file=exp/s2t_stats_raw_bpe50000/splits12/speech_shape/split.5, +[gpua006:0/64] 2024-02-15 01:00:27,881 (abs_task:1662) INFO: [train] mini-batch sizes summary: N-batch=19027, mean=256.0, min=256, max=257 +[gpua006:0/64] 2024-02-15 01:06:37,393 (trainer:756) INFO: 42epoch:train:13701-13800batch: iter_time=3.493, forward_time=0.144, loss_ctc=64.895, loss_interctc_layer6=73.699, loss_interctc_layer12=60.204, loss_interctc_layer15=54.627, loss_interctc_layer21=67.485, loss=64.182, backward_time=0.209, grad_norm=524.082, clip=100.000, loss_scale=1.014e+31, optim_step_time=0.138, optim0_lr0=6.179e-05, train_time=4.641 +[gpua006:0/64] 2024-02-15 01:06:44,901 (trainer:687) WARNING: The grad norm is nan. Skipping updating the model. +[gpua006:0/64] 2024-02-15 01:08:11,959 (trainer:756) INFO: 42epoch:train:13801-13900batch: iter_time=9.537e-05, forward_time=0.141, loss_ctc=65.155, loss_interctc_layer6=74.119, loss_interctc_layer12=61.351, loss_interctc_layer15=56.226, loss_interctc_layer21=67.602, loss=64.891, backward_time=0.207, grad_norm=66.966, clip=100.000, loss_scale=5.429e+30, optim_step_time=0.138, optim0_lr0=6.179e-05, train_time=0.946 +[gpua006:0/64] 2024-02-15 01:11:03,976 (trainer:756) INFO: 42epoch:train:13901-14000batch: iter_time=9.486e-05, forward_time=0.144, loss_ctc=91.914, loss_interctc_layer6=87.445, loss_interctc_layer12=71.828, loss_interctc_layer15=65.400, loss_interctc_layer21=95.329, loss=82.383, backward_time=0.207, grad_norm=72.369, clip=100.000, loss_scale=5.071e+30, optim_step_time=0.138, optim0_lr0=6.178e-05, train_time=1.720 +[gpua006:0/64] 2024-02-15 01:12:58,805 (trainer:756) INFO: 42epoch:train:14001-14100batch: iter_time=9.482e-05, forward_time=0.143, loss_ctc=78.674, loss_interctc_layer6=84.932, loss_interctc_layer12=69.911, loss_interctc_layer15=63.814, loss_interctc_layer21=82.115, loss=75.889, backward_time=0.208, grad_norm=81.329, clip=100.000, loss_scale=5.071e+30, optim_step_time=0.138, optim0_lr0=6.178e-05, train_time=1.148 +[gpua006:0/64] 2024-02-15 01:15:19,747 (trainer:756) INFO: 42epoch:train:14101-14200batch: iter_time=6.248e-04, forward_time=0.245, loss_ctc=74.499, loss_interctc_layer6=85.311, loss_interctc_layer12=70.038, loss_interctc_layer15=63.926, loss_interctc_layer21=77.516, loss=74.258, backward_time=0.255, grad_norm=66.857, clip=100.000, loss_scale=5.071e+30, optim_step_time=0.143, optim0_lr0=6.177e-05, train_time=1.409 +[gpua006:0/64] 2024-02-15 01:17:38,596 (trainer:756) INFO: 42epoch:train:14201-14300batch: iter_time=9.635e-05, forward_time=0.144, loss_ctc=77.647, loss_interctc_layer6=82.175, loss_interctc_layer12=68.337, loss_interctc_layer15=62.782, loss_interctc_layer21=80.971, loss=74.383, backward_time=0.209, grad_norm=84.552, clip=100.000, loss_scale=5.071e+30, optim_step_time=0.138, optim0_lr0=6.177e-05, train_time=1.387 +[gpua006:0/64] 2024-02-15 01:19:50,009 (trainer:756) INFO: 42epoch:train:14301-14400batch: iter_time=9.703e-05, forward_time=0.144, loss_ctc=76.848, loss_interctc_layer6=86.893, loss_interctc_layer12=72.351, loss_interctc_layer15=66.473, loss_interctc_layer21=79.530, loss=76.419, backward_time=0.209, grad_norm=80.728, clip=100.000, loss_scale=5.071e+30, optim_step_time=0.138, optim0_lr0=6.176e-05, train_time=1.315 +[gpua006:0/64] 2024-02-15 01:22:28,270 (trainer:756) INFO: 42epoch:train:14401-14500batch: iter_time=9.909e-05, forward_time=0.142, loss_ctc=65.730, loss_interctc_layer6=73.477, loss_interctc_layer12=59.971, loss_interctc_layer15=54.542, loss_interctc_layer21=68.066, loss=64.357, backward_time=0.208, grad_norm=66.268, clip=100.000, loss_scale=5.071e+30, optim_step_time=0.138, optim0_lr0=6.176e-05, train_time=1.583 +[gpua006:0/64] 2024-02-15 01:24:52,435 (trainer:756) INFO: 42epoch:train:14501-14600batch: iter_time=9.541e-05, forward_time=0.144, loss_ctc=80.839, loss_interctc_layer6=85.559, loss_interctc_layer12=70.700, loss_interctc_layer15=64.649, loss_interctc_layer21=83.902, loss=77.130, backward_time=0.208, grad_norm=96.868, clip=100.000, loss_scale=5.071e+30, optim_step_time=0.138, optim0_lr0=6.175e-05, train_time=1.441 +[gpua006:0/64] 2024-02-15 01:26:52,883 (trainer:756) INFO: 42epoch:train:14601-14700batch: iter_time=1.045e-04, forward_time=0.143, loss_ctc=76.024, loss_interctc_layer6=91.019, loss_interctc_layer12=76.321, loss_interctc_layer15=70.579, loss_interctc_layer21=78.993, loss=78.587, backward_time=0.207, grad_norm=79.867, clip=100.000, loss_scale=5.071e+30, optim_step_time=0.138, optim0_lr0=6.175e-05, train_time=1.204 +[gpua006:0/64] 2024-02-15 01:29:56,102 (trainer:756) INFO: 42epoch:train:14701-14800batch: iter_time=4.960e-04, forward_time=0.232, loss_ctc=71.811, loss_interctc_layer6=76.677, loss_interctc_layer12=63.292, loss_interctc_layer15=57.895, loss_interctc_layer21=74.465, loss=68.828, backward_time=0.256, grad_norm=75.662, clip=100.000, loss_scale=5.071e+30, optim_step_time=0.147, optim0_lr0=6.174e-05, train_time=1.832 +[gpua006:0/64] 2024-02-15 01:32:32,514 (trainer:756) INFO: 42epoch:train:14801-14900batch: iter_time=1.063e-04, forward_time=0.144, loss_ctc=67.002, loss_interctc_layer6=88.668, loss_interctc_layer12=73.976, loss_interctc_layer15=68.053, loss_interctc_layer21=69.520, loss=73.444, backward_time=0.208, grad_norm=100.229, clip=100.000, loss_scale=5.071e+30, optim_step_time=0.138, optim0_lr0=6.174e-05, train_time=1.564 +[gpua006:0/64] 2024-02-15 01:34:14,299 (trainer:756) INFO: 42epoch:train:14901-15000batch: iter_time=9.758e-05, forward_time=0.144, loss_ctc=75.011, loss_interctc_layer6=83.996, loss_interctc_layer12=69.189, loss_interctc_layer15=63.327, loss_interctc_layer21=77.811, loss=73.867, backward_time=0.210, grad_norm=82.197, clip=100.000, loss_scale=5.071e+30, optim_step_time=0.139, optim0_lr0=6.173e-05, train_time=1.016 +[gpua006:0/64] 2024-02-15 02:06:17,906 (trainer:355) INFO: 42epoch results: [train] iter_time=0.275, forward_time=0.163, loss_ctc=77.173, loss_interctc_layer6=83.397, loss_interctc_layer12=69.071, loss_interctc_layer15=63.342, loss_interctc_layer21=80.062, loss=74.609, backward_time=0.216, grad_norm=85.375, clip=100.000, loss_scale=2.528e+31, optim_step_time=0.139, optim0_lr0=6.210e-05, train_time=1.622, time=6 hours, 46 minutes and 2.35 seconds, total_count=630000, gpu_max_cached_mem_GB=33.436, [valid] loss_ctc=40.197, cer_ctc=0.186, loss_interctc_layer6=46.094, cer_interctc_layer6=0.205, loss_interctc_layer12=33.246, cer_interctc_layer12=0.140, loss_interctc_layer15=29.010, cer_interctc_layer15=0.116, loss_interctc_layer21=42.634, cer_interctc_layer21=0.199, loss=38.236, time=31 minutes and 39.73 seconds, total_count=196182, gpu_max_cached_mem_GB=33.436 +[gpua006:0/64] 2024-02-15 02:06:39,098 (trainer:410) INFO: The best model has been updated: valid.total_count +[gpua006:0/64] 2024-02-15 02:06:39,348 (trainer:464) INFO: The model files were removed: exp/s2t_train_s2t_multitask-ctc_ebf27_conv2d8_size1024_raw_bpe50000/32epoch.pth, exp/s2t_train_s2t_multitask-ctc_ebf27_conv2d8_size1024_raw_bpe50000/33epoch.pth, exp/s2t_train_s2t_multitask-ctc_ebf27_conv2d8_size1024_raw_bpe50000/37epoch.pth +[gpua006:0/64] 2024-02-15 02:06:39,348 (trainer:289) INFO: 43/45epoch started. Estimated time to finish: 21 hours, 34 minutes and 28.93 seconds +[gpua006:0/64] 2024-02-15 02:06:39,364 (multiple_iter_factory:32) INFO: Building 0th iter-factory... +[gpua006:0/64] 2024-02-15 02:06:57,449 (s2t:401) INFO: Optional Data Names: ('text_prev', 'text_ctc', 'text_spk2', 'text_spk3', 'text_spk4') +[gpua006:0/64] 2024-02-15 02:07:00,911 (abs_task:1660) INFO: [train] dataset: +ESPnetDataset( + speech: {"path": "exp/s2t_stats_raw_bpe50000/splits12/wav.scp/split.3", "type": "kaldi_ark"} + text_prev: {"path": "exp/s2t_stats_raw_bpe50000/splits12/text.prev/split.3", "type": "text"} + text_ctc: {"path": "exp/s2t_stats_raw_bpe50000/splits12/text.ctc/split.3", "type": "text"} + text: {"path": "exp/s2t_stats_raw_bpe50000/splits12/text/split.3", "type": "text"} + preprocess: ) +[gpua006:0/64] 2024-02-15 02:07:00,911 (abs_task:1661) INFO: [train] Batch sampler: UnsortedBatchSampler(N-batch=19027, batch_size=256, key_file=exp/s2t_stats_raw_bpe50000/splits12/speech_shape/split.3, +[gpua006:0/64] 2024-02-15 02:07:00,915 (abs_task:1662) INFO: [train] mini-batch sizes summary: N-batch=19027, mean=256.0, min=256, max=257 +[gpua006:0/64] 2024-02-15 02:13:09,594 (trainer:756) INFO: 43epoch:train:1-100batch: iter_time=2.813, forward_time=0.195, loss_ctc=76.331, loss_interctc_layer6=86.193, loss_interctc_layer12=71.712, loss_interctc_layer15=66.017, loss_interctc_layer21=78.974, loss=75.845, backward_time=0.222, grad_norm=81.284, clip=100.000, loss_scale=5.071e+30, optim_step_time=0.168, optim0_lr0=6.173e-05, train_time=3.902 +[gpua006:0/64] 2024-02-15 02:14:49,898 (trainer:756) INFO: 43epoch:train:101-200batch: iter_time=9.161e-05, forward_time=0.143, loss_ctc=67.770, loss_interctc_layer6=78.515, loss_interctc_layer12=64.953, loss_interctc_layer15=59.535, loss_interctc_layer21=70.188, loss=68.192, backward_time=0.208, grad_norm=64.206, clip=100.000, loss_scale=5.071e+30, optim_step_time=0.138, optim0_lr0=6.172e-05, train_time=1.003 +[gpua006:0/64] 2024-02-15 02:17:09,879 (trainer:756) INFO: 43epoch:train:201-300batch: iter_time=9.160e-05, forward_time=0.292, loss_ctc=65.530, loss_interctc_layer6=80.278, loss_interctc_layer12=66.203, loss_interctc_layer15=60.565, loss_interctc_layer21=67.663, loss=68.048, backward_time=0.259, grad_norm=95.962, clip=100.000, loss_scale=5.071e+30, optim_step_time=0.144, optim0_lr0=6.172e-05, train_time=1.398 +[gpua006:0/64] 2024-02-15 02:19:12,707 (trainer:756) INFO: 43epoch:train:301-400batch: iter_time=9.466e-05, forward_time=0.143, loss_ctc=65.588, loss_interctc_layer6=80.157, loss_interctc_layer12=66.156, loss_interctc_layer15=60.546, loss_interctc_layer21=67.838, loss=68.057, backward_time=0.208, grad_norm=98.494, clip=100.000, loss_scale=5.071e+30, optim_step_time=0.137, optim0_lr0=6.171e-05, train_time=1.229 +[gpua006:0/64] 2024-02-15 02:21:09,122 (trainer:756) INFO: 43epoch:train:401-500batch: iter_time=8.844e-05, forward_time=0.144, loss_ctc=77.227, loss_interctc_layer6=90.729, loss_interctc_layer12=74.894, loss_interctc_layer15=68.488, loss_interctc_layer21=80.041, loss=78.276, backward_time=0.207, grad_norm=90.474, clip=100.000, loss_scale=5.071e+30, optim_step_time=0.137, optim0_lr0=6.171e-05, train_time=1.165 +[gpua006:0/64] 2024-02-15 02:23:05,587 (trainer:756) INFO: 43epoch:train:501-600batch: iter_time=9.295e-05, forward_time=0.142, loss_ctc=72.050, loss_interctc_layer6=88.411, loss_interctc_layer12=73.486, loss_interctc_layer15=67.403, loss_interctc_layer21=74.429, loss=75.156, backward_time=0.206, grad_norm=86.526, clip=100.000, loss_scale=5.071e+30, optim_step_time=0.137, optim0_lr0=6.170e-05, train_time=1.164 +[gpua006:0/64] 2024-02-15 02:25:24,244 (trainer:756) INFO: 43epoch:train:601-700batch: iter_time=9.864e-05, forward_time=0.170, loss_ctc=62.617, loss_interctc_layer6=76.679, loss_interctc_layer12=64.024, loss_interctc_layer15=58.888, loss_interctc_layer21=64.594, loss=65.360, backward_time=0.225, grad_norm=81.755, clip=100.000, loss_scale=5.071e+30, optim_step_time=0.138, optim0_lr0=6.170e-05, train_time=1.386 +[gpua006:0/64] 2024-02-15 02:27:37,372 (trainer:756) INFO: 43epoch:train:701-800batch: iter_time=1.955e-04, forward_time=0.185, loss_ctc=70.887, loss_interctc_layer6=80.031, loss_interctc_layer12=66.043, loss_interctc_layer15=60.447, loss_interctc_layer21=73.206, loss=70.123, backward_time=0.296, grad_norm=83.180, clip=100.000, loss_scale=5.071e+30, optim_step_time=0.143, optim0_lr0=6.169e-05, train_time=1.331 +[gpua006:0/64] 2024-02-15 02:30:09,980 (trainer:756) INFO: 43epoch:train:801-900batch: iter_time=8.913e-05, forward_time=0.142, loss_ctc=62.893, loss_interctc_layer6=72.560, loss_interctc_layer12=59.841, loss_interctc_layer15=54.709, loss_interctc_layer21=65.209, loss=63.042, backward_time=0.204, grad_norm=86.012, clip=100.000, loss_scale=9.736e+30, optim_step_time=0.137, optim0_lr0=6.169e-05, train_time=1.525 +[gpua006:0/64] 2024-02-15 02:32:19,409 (trainer:756) INFO: 43epoch:train:901-1000batch: iter_time=8.988e-05, forward_time=0.163, loss_ctc=60.579, loss_interctc_layer6=70.229, loss_interctc_layer12=58.268, loss_interctc_layer15=53.400, loss_interctc_layer21=62.870, loss=61.069, backward_time=0.205, grad_norm=63.300, clip=100.000, loss_scale=1.014e+31, optim_step_time=0.137, optim0_lr0=6.168e-05, train_time=1.295 +[gpua006:0/64] 2024-02-15 02:34:26,337 (trainer:756) INFO: 43epoch:train:1001-1100batch: iter_time=9.527e-05, forward_time=0.142, loss_ctc=79.817, loss_interctc_layer6=83.522, loss_interctc_layer12=69.522, loss_interctc_layer15=64.107, loss_interctc_layer21=82.814, loss=75.957, backward_time=0.206, grad_norm=89.035, clip=100.000, loss_scale=1.014e+31, optim_step_time=0.138, optim0_lr0=6.168e-05, train_time=1.268 +[gpua006:0/64] 2024-02-15 02:36:43,983 (trainer:756) INFO: 43epoch:train:1101-1200batch: iter_time=9.462e-05, forward_time=0.143, loss_ctc=57.467, loss_interctc_layer6=70.738, loss_interctc_layer12=58.304, loss_interctc_layer15=53.283, loss_interctc_layer21=59.453, loss=59.849, backward_time=0.207, grad_norm=78.030, clip=100.000, loss_scale=1.014e+31, optim_step_time=0.138, optim0_lr0=6.167e-05, train_time=1.377 +[gpua006:0/64] 2024-02-15 02:38:08,420 (multiple_iter_factory:32) INFO: Building 1th iter-factory... +[gpua006:0/64] 2024-02-15 02:38:27,001 (s2t:401) INFO: Optional Data Names: ('text_prev', 'text_ctc', 'text_spk2', 'text_spk3', 'text_spk4') +[gpua006:0/64] 2024-02-15 02:38:30,401 (abs_task:1660) INFO: [train] dataset: +ESPnetDataset( + speech: {"path": "exp/s2t_stats_raw_bpe50000/splits12/wav.scp/split.2", "type": "kaldi_ark"} + text_prev: {"path": "exp/s2t_stats_raw_bpe50000/splits12/text.prev/split.2", "type": "text"} + text_ctc: {"path": "exp/s2t_stats_raw_bpe50000/splits12/text.ctc/split.2", "type": "text"} + text: {"path": "exp/s2t_stats_raw_bpe50000/splits12/text/split.2", "type": "text"} + preprocess: ) +[gpua006:0/64] 2024-02-15 02:38:30,401 (abs_task:1661) INFO: [train] Batch sampler: UnsortedBatchSampler(N-batch=19027, batch_size=256, key_file=exp/s2t_stats_raw_bpe50000/splits12/speech_shape/split.2, +[gpua006:0/64] 2024-02-15 02:38:30,409 (abs_task:1662) INFO: [train] mini-batch sizes summary: N-batch=19027, mean=256.0, min=256, max=257 +[gpua006:0/64] 2024-02-15 02:44:14,178 (trainer:756) INFO: 43epoch:train:1201-1300batch: iter_time=3.102, forward_time=0.182, loss_ctc=63.597, loss_interctc_layer6=73.908, loss_interctc_layer12=60.771, loss_interctc_layer15=55.588, loss_interctc_layer21=66.040, loss=63.981, backward_time=0.218, grad_norm=61.963, clip=100.000, loss_scale=1.014e+31, optim_step_time=0.140, optim0_lr0=6.167e-05, train_time=4.502 +[gpua006:0/64] 2024-02-15 02:45:54,142 (trainer:756) INFO: 43epoch:train:1301-1400batch: iter_time=9.485e-05, forward_time=0.143, loss_ctc=81.451, loss_interctc_layer6=86.824, loss_interctc_layer12=71.777, loss_interctc_layer15=65.719, loss_interctc_layer21=84.357, loss=78.026, backward_time=0.208, grad_norm=80.590, clip=100.000, loss_scale=1.014e+31, optim_step_time=0.138, optim0_lr0=6.166e-05, train_time=0.999 +[gpua006:0/64] 2024-02-15 02:48:17,092 (trainer:756) INFO: 43epoch:train:1401-1500batch: iter_time=9.432e-05, forward_time=0.142, loss_ctc=66.772, loss_interctc_layer6=78.143, loss_interctc_layer12=64.433, loss_interctc_layer15=58.989, loss_interctc_layer21=68.947, loss=67.457, backward_time=0.207, grad_norm=62.597, clip=100.000, loss_scale=1.014e+31, optim_step_time=0.138, optim0_lr0=6.166e-05, train_time=1.429 +[gpua006:0/64] 2024-02-15 02:51:12,398 (trainer:756) INFO: 43epoch:train:1501-1600batch: iter_time=9.594e-05, forward_time=0.143, loss_ctc=67.878, loss_interctc_layer6=78.343, loss_interctc_layer12=64.641, loss_interctc_layer15=59.201, loss_interctc_layer21=70.254, loss=68.064, backward_time=0.208, grad_norm=69.402, clip=100.000, loss_scale=1.014e+31, optim_step_time=0.138, optim0_lr0=6.165e-05, train_time=1.753 +[gpua006:0/64] 2024-02-15 02:53:09,980 (trainer:756) INFO: 43epoch:train:1601-1700batch: iter_time=8.905e-05, forward_time=0.155, loss_ctc=74.573, loss_interctc_layer6=84.082, loss_interctc_layer12=69.150, loss_interctc_layer15=63.089, loss_interctc_layer21=77.119, loss=73.603, backward_time=0.213, grad_norm=80.763, clip=100.000, loss_scale=1.014e+31, optim_step_time=0.140, optim0_lr0=6.165e-05, train_time=1.176 +[gpua006:0/64] 2024-02-15 02:55:27,570 (trainer:756) INFO: 43epoch:train:1701-1800batch: iter_time=8.865e-05, forward_time=0.244, loss_ctc=75.242, loss_interctc_layer6=93.341, loss_interctc_layer12=77.379, loss_interctc_layer15=71.036, loss_interctc_layer21=77.745, loss=78.949, backward_time=0.233, grad_norm=106.405, clip=100.000, loss_scale=1.014e+31, optim_step_time=0.142, optim0_lr0=6.164e-05, train_time=1.375 +[gpua006:0/64] 2024-02-15 02:57:40,286 (trainer:756) INFO: 43epoch:train:1801-1900batch: iter_time=8.561e-05, forward_time=0.143, loss_ctc=68.141, loss_interctc_layer6=79.200, loss_interctc_layer12=65.939, loss_interctc_layer15=60.477, loss_interctc_layer21=70.487, loss=68.849, backward_time=0.208, grad_norm=78.235, clip=100.000, loss_scale=1.014e+31, optim_step_time=0.138, optim0_lr0=6.164e-05, train_time=1.327 +[gpua006:0/64] 2024-02-15 02:59:59,358 (trainer:756) INFO: 43epoch:train:1901-2000batch: iter_time=8.956e-05, forward_time=0.144, loss_ctc=69.770, loss_interctc_layer6=79.995, loss_interctc_layer12=66.034, loss_interctc_layer15=60.410, loss_interctc_layer21=72.097, loss=69.661, backward_time=0.208, grad_norm=94.029, clip=100.000, loss_scale=1.014e+31, optim_step_time=0.138, optim0_lr0=6.163e-05, train_time=1.390 +[gpua006:0/64] 2024-02-15 03:02:24,137 (trainer:756) INFO: 43epoch:train:2001-2100batch: iter_time=9.475e-05, forward_time=0.143, loss_ctc=70.192, loss_interctc_layer6=75.589, loss_interctc_layer12=61.927, loss_interctc_layer15=56.448, loss_interctc_layer21=72.835, loss=67.398, backward_time=0.206, grad_norm=80.210, clip=100.000, loss_scale=1.014e+31, optim_step_time=0.138, optim0_lr0=6.163e-05, train_time=1.448 +[gpua006:0/64] 2024-02-15 03:04:47,167 (trainer:756) INFO: 43epoch:train:2101-2200batch: iter_time=8.976e-05, forward_time=0.141, loss_ctc=58.304, loss_interctc_layer6=65.838, loss_interctc_layer12=54.216, loss_interctc_layer15=49.668, loss_interctc_layer21=60.537, loss=57.713, backward_time=0.207, grad_norm=62.272, clip=100.000, loss_scale=1.014e+31, optim_step_time=0.138, optim0_lr0=6.162e-05, train_time=1.430 +[gpua006:0/64] 2024-02-15 03:05:28,334 (trainer:687) WARNING: The grad norm is nan. Skipping updating the model. +[gpua006:0/64] 2024-02-15 03:07:13,652 (trainer:756) INFO: 43epoch:train:2201-2300batch: iter_time=8.880e-05, forward_time=0.163, loss_ctc=77.254, loss_interctc_layer6=80.167, loss_interctc_layer12=66.470, loss_interctc_layer15=60.913, loss_interctc_layer21=80.017, loss=72.964, backward_time=0.211, grad_norm=64.267, clip=100.000, loss_scale=6.505e+30, optim_step_time=0.147, optim0_lr0=6.162e-05, train_time=1.464 +[gpua006:0/64] 2024-02-15 03:09:34,146 (trainer:756) INFO: 43epoch:train:2301-2400batch: iter_time=2.155e-04, forward_time=0.186, loss_ctc=69.233, loss_interctc_layer6=74.433, loss_interctc_layer12=62.204, loss_interctc_layer15=57.364, loss_interctc_layer21=71.661, loss=66.979, backward_time=0.240, grad_norm=93.522, clip=100.000, loss_scale=5.071e+30, optim_step_time=0.143, optim0_lr0=6.161e-05, train_time=1.405 +[gpua006:0/64] 2024-02-15 03:11:22,979 (trainer:756) INFO: 43epoch:train:2401-2500batch: iter_time=8.614e-05, forward_time=0.160, loss_ctc=64.810, loss_interctc_layer6=73.855, loss_interctc_layer12=60.634, loss_interctc_layer15=55.503, loss_interctc_layer21=67.265, loss=64.413, backward_time=0.213, grad_norm=69.570, clip=100.000, loss_scale=5.071e+30, optim_step_time=0.140, optim0_lr0=6.161e-05, train_time=1.088 +[gpua006:0/64] 2024-02-15 03:11:43,056 (multiple_iter_factory:32) INFO: Building 2th iter-factory... +[gpua006:0/64] 2024-02-15 03:12:01,627 (s2t:401) INFO: Optional Data Names: ('text_prev', 'text_ctc', 'text_spk2', 'text_spk3', 'text_spk4') +[gpua006:0/64] 2024-02-15 03:12:05,023 (abs_task:1660) INFO: [train] dataset: +ESPnetDataset( + speech: {"path": "exp/s2t_stats_raw_bpe50000/splits12/wav.scp/split.0", "type": "kaldi_ark"} + text_prev: {"path": "exp/s2t_stats_raw_bpe50000/splits12/text.prev/split.0", "type": "text"} + text_ctc: {"path": "exp/s2t_stats_raw_bpe50000/splits12/text.ctc/split.0", "type": "text"} + text: {"path": "exp/s2t_stats_raw_bpe50000/splits12/text/split.0", "type": "text"} + preprocess: ) +[gpua006:0/64] 2024-02-15 03:12:05,023 (abs_task:1661) INFO: [train] Batch sampler: UnsortedBatchSampler(N-batch=19027, batch_size=256, key_file=exp/s2t_stats_raw_bpe50000/splits12/speech_shape/split.0, +[gpua006:0/64] 2024-02-15 03:12:05,104 (abs_task:1662) INFO: [train] mini-batch sizes summary: N-batch=19027, mean=256.0, min=256, max=257 +[gpua006:0/64] 2024-02-15 03:18:39,529 (trainer:756) INFO: 43epoch:train:2501-2600batch: iter_time=2.978, forward_time=0.144, loss_ctc=77.990, loss_interctc_layer6=85.623, loss_interctc_layer12=70.961, loss_interctc_layer15=64.989, loss_interctc_layer21=80.743, loss=76.061, backward_time=0.208, grad_norm=103.796, clip=100.000, loss_scale=5.071e+30, optim_step_time=0.139, optim0_lr0=6.160e-05, train_time=4.365 +[gpua006:0/64] 2024-02-15 03:20:46,388 (trainer:756) INFO: 43epoch:train:2601-2700batch: iter_time=1.024e-04, forward_time=0.143, loss_ctc=68.035, loss_interctc_layer6=76.459, loss_interctc_layer12=62.991, loss_interctc_layer15=57.549, loss_interctc_layer21=70.419, loss=67.091, backward_time=0.207, grad_norm=73.488, clip=100.000, loss_scale=5.071e+30, optim_step_time=0.139, optim0_lr0=6.160e-05, train_time=1.268 +[gpua006:0/64] 2024-02-15 03:22:46,820 (trainer:756) INFO: 43epoch:train:2701-2800batch: iter_time=8.630e-05, forward_time=0.143, loss_ctc=67.251, loss_interctc_layer6=79.553, loss_interctc_layer12=65.658, loss_interctc_layer15=60.070, loss_interctc_layer21=69.403, loss=68.387, backward_time=0.209, grad_norm=73.266, clip=100.000, loss_scale=5.071e+30, optim_step_time=0.138, optim0_lr0=6.160e-05, train_time=1.204 +[gpua006:0/64] 2024-02-15 03:24:44,383 (trainer:756) INFO: 43epoch:train:2801-2900batch: iter_time=8.819e-05, forward_time=0.143, loss_ctc=69.828, loss_interctc_layer6=79.477, loss_interctc_layer12=65.403, loss_interctc_layer15=59.806, loss_interctc_layer21=72.445, loss=69.392, backward_time=0.209, grad_norm=71.625, clip=100.000, loss_scale=5.071e+30, optim_step_time=0.139, optim0_lr0=6.159e-05, train_time=1.175 +[gpua006:0/64] 2024-02-15 03:27:05,411 (trainer:756) INFO: 43epoch:train:2901-3000batch: iter_time=8.560e-05, forward_time=0.144, loss_ctc=79.534, loss_interctc_layer6=89.632, loss_interctc_layer12=73.726, loss_interctc_layer15=67.427, loss_interctc_layer21=82.440, loss=78.552, backward_time=0.208, grad_norm=80.958, clip=100.000, loss_scale=5.071e+30, optim_step_time=0.139, optim0_lr0=6.159e-05, train_time=1.411 +[gpua006:0/64] 2024-02-15 03:29:36,449 (trainer:756) INFO: 43epoch:train:3001-3100batch: iter_time=8.587e-05, forward_time=0.159, loss_ctc=72.479, loss_interctc_layer6=87.603, loss_interctc_layer12=72.689, loss_interctc_layer15=66.657, loss_interctc_layer21=74.867, loss=74.859, backward_time=0.216, grad_norm=102.337, clip=100.000, loss_scale=5.071e+30, optim_step_time=0.139, optim0_lr0=6.158e-05, train_time=1.510 +[gpua006:0/64] 2024-02-15 03:31:52,709 (trainer:756) INFO: 43epoch:train:3101-3200batch: iter_time=8.631e-05, forward_time=0.142, loss_ctc=62.962, loss_interctc_layer6=76.375, loss_interctc_layer12=63.560, loss_interctc_layer15=58.381, loss_interctc_layer21=64.959, loss=65.247, backward_time=0.208, grad_norm=183.788, clip=100.000, loss_scale=5.071e+30, optim_step_time=0.138, optim0_lr0=6.158e-05, train_time=1.362 +[gpua006:0/64] 2024-02-15 03:34:38,190 (trainer:756) INFO: 43epoch:train:3201-3300batch: iter_time=8.441e-05, forward_time=0.187, loss_ctc=72.115, loss_interctc_layer6=79.206, loss_interctc_layer12=65.244, loss_interctc_layer15=59.589, loss_interctc_layer21=74.589, loss=70.149, backward_time=0.231, grad_norm=76.735, clip=100.000, loss_scale=5.071e+30, optim_step_time=0.140, optim0_lr0=6.157e-05, train_time=1.654 +[gpua006:0/64] 2024-02-15 03:36:31,893 (trainer:756) INFO: 43epoch:train:3301-3400batch: iter_time=8.953e-05, forward_time=0.196, loss_ctc=64.315, loss_interctc_layer6=71.519, loss_interctc_layer12=58.866, loss_interctc_layer15=53.646, loss_interctc_layer21=66.736, loss=63.016, backward_time=0.217, grad_norm=97.500, clip=100.000, loss_scale=5.071e+30, optim_step_time=0.143, optim0_lr0=6.157e-05, train_time=1.137 +[gpua006:0/64] 2024-02-15 03:38:10,887 (trainer:756) INFO: 43epoch:train:3401-3500batch: iter_time=8.443e-05, forward_time=0.142, loss_ctc=63.844, loss_interctc_layer6=69.785, loss_interctc_layer12=57.663, loss_interctc_layer15=52.773, loss_interctc_layer21=66.246, loss=62.062, backward_time=0.208, grad_norm=165.572, clip=100.000, loss_scale=5.071e+30, optim_step_time=0.138, optim0_lr0=6.156e-05, train_time=0.990 +[gpua006:0/64] 2024-02-15 03:40:01,695 (trainer:756) INFO: 43epoch:train:3501-3600batch: iter_time=1.003e-04, forward_time=0.142, loss_ctc=78.384, loss_interctc_layer6=81.061, loss_interctc_layer12=67.442, loss_interctc_layer15=62.094, loss_interctc_layer21=81.298, loss=74.056, backward_time=0.208, grad_norm=85.827, clip=100.000, loss_scale=5.071e+30, optim_step_time=0.137, optim0_lr0=6.156e-05, train_time=1.108 +[gpua006:0/64] 2024-02-15 03:43:22,895 (trainer:756) INFO: 43epoch:train:3601-3700batch: iter_time=9.996e-05, forward_time=0.143, loss_ctc=62.201, loss_interctc_layer6=70.771, loss_interctc_layer12=58.289, loss_interctc_layer15=53.331, loss_interctc_layer21=64.454, loss=61.809, backward_time=0.206, grad_norm=68.772, clip=100.000, loss_scale=5.071e+30, optim_step_time=0.137, optim0_lr0=6.155e-05, train_time=2.012 +[gpua006:0/64] 2024-02-15 03:44:36,649 (multiple_iter_factory:32) INFO: Building 3th iter-factory... +[gpua006:0/64] 2024-02-15 03:44:55,217 (s2t:401) INFO: Optional Data Names: ('text_prev', 'text_ctc', 'text_spk2', 'text_spk3', 'text_spk4') +[gpua006:0/64] 2024-02-15 03:44:58,729 (abs_task:1660) INFO: [train] dataset: +ESPnetDataset( + speech: {"path": "exp/s2t_stats_raw_bpe50000/splits12/wav.scp/split.4", "type": "kaldi_ark"} + text_prev: {"path": "exp/s2t_stats_raw_bpe50000/splits12/text.prev/split.4", "type": "text"} + text_ctc: {"path": "exp/s2t_stats_raw_bpe50000/splits12/text.ctc/split.4", "type": "text"} + text: {"path": "exp/s2t_stats_raw_bpe50000/splits12/text/split.4", "type": "text"} + preprocess: ) +[gpua006:0/64] 2024-02-15 03:44:58,729 (abs_task:1661) INFO: [train] Batch sampler: UnsortedBatchSampler(N-batch=19027, batch_size=256, key_file=exp/s2t_stats_raw_bpe50000/splits12/speech_shape/split.4, +[gpua006:0/64] 2024-02-15 03:44:58,964 (abs_task:1662) INFO: [train] mini-batch sizes summary: N-batch=19027, mean=256.0, min=256, max=257 +[gpua006:0/64] 2024-02-15 03:50:34,862 (trainer:756) INFO: 43epoch:train:3701-3800batch: iter_time=3.135, forward_time=0.199, loss_ctc=66.200, loss_interctc_layer6=74.023, loss_interctc_layer12=60.890, loss_interctc_layer15=55.541, loss_interctc_layer21=68.541, loss=65.039, backward_time=0.217, grad_norm=80.594, clip=100.000, loss_scale=5.071e+30, optim_step_time=0.140, optim0_lr0=6.155e-05, train_time=4.319 +[gpua006:0/64] 2024-02-15 03:52:53,204 (trainer:756) INFO: 43epoch:train:3801-3900batch: iter_time=8.464e-05, forward_time=0.144, loss_ctc=80.472, loss_interctc_layer6=86.327, loss_interctc_layer12=71.392, loss_interctc_layer15=65.231, loss_interctc_layer21=83.264, loss=77.337, backward_time=0.209, grad_norm=80.172, clip=100.000, loss_scale=5.071e+30, optim_step_time=0.139, optim0_lr0=6.154e-05, train_time=1.384 +[gpua006:0/64] 2024-02-15 03:54:56,539 (trainer:756) INFO: 43epoch:train:3901-4000batch: iter_time=8.167e-05, forward_time=0.143, loss_ctc=66.409, loss_interctc_layer6=77.682, loss_interctc_layer12=64.012, loss_interctc_layer15=58.552, loss_interctc_layer21=68.829, loss=67.097, backward_time=0.209, grad_norm=67.973, clip=100.000, loss_scale=5.071e+30, optim_step_time=0.139, optim0_lr0=6.154e-05, train_time=1.233 +[gpua006:0/64] 2024-02-15 03:56:41,963 (trainer:756) INFO: 43epoch:train:4001-4100batch: iter_time=9.055e-05, forward_time=0.143, loss_ctc=67.810, loss_interctc_layer6=78.698, loss_interctc_layer12=64.986, loss_interctc_layer15=59.455, loss_interctc_layer21=70.125, loss=68.215, backward_time=0.208, grad_norm=81.962, clip=100.000, loss_scale=5.071e+30, optim_step_time=0.139, optim0_lr0=6.153e-05, train_time=1.054 +[gpua006:0/64] 2024-02-15 03:59:11,535 (trainer:756) INFO: 43epoch:train:4101-4200batch: iter_time=9.158e-05, forward_time=0.143, loss_ctc=73.440, loss_interctc_layer6=83.325, loss_interctc_layer12=68.427, loss_interctc_layer15=62.389, loss_interctc_layer21=76.178, loss=72.752, backward_time=0.208, grad_norm=73.520, clip=100.000, loss_scale=5.071e+30, optim_step_time=0.138, optim0_lr0=6.153e-05, train_time=1.495 +[gpua006:0/64] 2024-02-15 04:00:45,513 (trainer:756) INFO: 43epoch:train:4201-4300batch: iter_time=9.196e-05, forward_time=0.143, loss_ctc=74.427, loss_interctc_layer6=92.276, loss_interctc_layer12=76.427, loss_interctc_layer15=70.077, loss_interctc_layer21=76.882, loss=78.018, backward_time=0.209, grad_norm=78.045, clip=100.000, loss_scale=8.671e+30, optim_step_time=0.137, optim0_lr0=6.152e-05, train_time=0.940 +[gpua006:0/64] 2024-02-15 04:03:41,310 (trainer:756) INFO: 43epoch:train:4301-4400batch: iter_time=8.509e-05, forward_time=0.276, loss_ctc=67.657, loss_interctc_layer6=78.716, loss_interctc_layer12=65.400, loss_interctc_layer15=59.907, loss_interctc_layer21=69.939, loss=68.324, backward_time=0.245, grad_norm=87.737, clip=100.000, loss_scale=1.014e+31, optim_step_time=0.150, optim0_lr0=6.152e-05, train_time=1.757 +[gpua006:0/64] 2024-02-15 04:05:48,424 (trainer:756) INFO: 43epoch:train:4401-4500batch: iter_time=8.411e-05, forward_time=0.148, loss_ctc=68.596, loss_interctc_layer6=79.340, loss_interctc_layer12=65.487, loss_interctc_layer15=59.907, loss_interctc_layer21=70.936, loss=68.854, backward_time=0.220, grad_norm=71.742, clip=100.000, loss_scale=1.014e+31, optim_step_time=0.138, optim0_lr0=6.151e-05, train_time=1.271 +[gpua006:0/64] 2024-02-15 04:08:05,143 (trainer:756) INFO: 43epoch:train:4501-4600batch: iter_time=9.403e-05, forward_time=0.143, loss_ctc=69.077, loss_interctc_layer6=74.926, loss_interctc_layer12=61.267, loss_interctc_layer15=55.794, loss_interctc_layer21=71.658, loss=66.544, backward_time=0.208, grad_norm=69.726, clip=100.000, loss_scale=1.014e+31, optim_step_time=0.138, optim0_lr0=6.151e-05, train_time=1.367 +[gpua006:0/64] 2024-02-15 04:10:40,369 (trainer:756) INFO: 43epoch:train:4601-4700batch: iter_time=9.008e-05, forward_time=0.141, loss_ctc=58.122, loss_interctc_layer6=65.075, loss_interctc_layer12=53.796, loss_interctc_layer15=49.346, loss_interctc_layer21=60.347, loss=57.337, backward_time=0.206, grad_norm=79.236, clip=100.000, loss_scale=1.014e+31, optim_step_time=0.137, optim0_lr0=6.150e-05, train_time=1.552 +[gpua006:0/64] 2024-02-15 04:13:07,635 (trainer:756) INFO: 43epoch:train:4701-4800batch: iter_time=9.188e-05, forward_time=0.142, loss_ctc=75.943, loss_interctc_layer6=80.031, loss_interctc_layer12=66.410, loss_interctc_layer15=60.941, loss_interctc_layer21=78.791, loss=72.423, backward_time=0.205, grad_norm=70.253, clip=100.000, loss_scale=1.014e+31, optim_step_time=0.137, optim0_lr0=6.150e-05, train_time=1.472 +[gpua006:0/64] 2024-02-15 04:15:05,905 (trainer:756) INFO: 43epoch:train:4801-4900batch: iter_time=8.912e-05, forward_time=0.142, loss_ctc=66.995, loss_interctc_layer6=74.015, loss_interctc_layer12=61.139, loss_interctc_layer15=56.100, loss_interctc_layer21=69.319, loss=65.514, backward_time=0.207, grad_norm=75.804, clip=100.000, loss_scale=1.014e+31, optim_step_time=0.138, optim0_lr0=6.149e-05, train_time=1.182 +[gpua006:0/64] 2024-02-15 04:15:45,952 (trainer:687) WARNING: The grad norm is nan. Skipping updating the model. +[gpua006:0/64] 2024-02-15 04:17:39,571 (trainer:756) INFO: 43epoch:train:4901-5000batch: iter_time=9.496e-05, forward_time=0.144, loss_ctc=65.537, loss_interctc_layer6=73.905, loss_interctc_layer12=60.600, loss_interctc_layer15=55.305, loss_interctc_layer21=68.032, loss=64.676, backward_time=0.206, grad_norm=72.813, clip=100.000, loss_scale=6.556e+30, optim_step_time=0.137, optim0_lr0=6.149e-05, train_time=1.536 +[gpua006:0/64] 2024-02-15 04:17:59,630 (multiple_iter_factory:32) INFO: Building 4th iter-factory... +[gpua006:0/64] 2024-02-15 04:18:18,436 (s2t:401) INFO: Optional Data Names: ('text_prev', 'text_ctc', 'text_spk2', 'text_spk3', 'text_spk4') +[gpua006:0/64] 2024-02-15 04:18:21,899 (abs_task:1660) INFO: [train] dataset: +ESPnetDataset( + speech: {"path": "exp/s2t_stats_raw_bpe50000/splits12/wav.scp/split.1", "type": "kaldi_ark"} + text_prev: {"path": "exp/s2t_stats_raw_bpe50000/splits12/text.prev/split.1", "type": "text"} + text_ctc: {"path": "exp/s2t_stats_raw_bpe50000/splits12/text.ctc/split.1", "type": "text"} + text: {"path": "exp/s2t_stats_raw_bpe50000/splits12/text/split.1", "type": "text"} + preprocess: ) +[gpua006:0/64] 2024-02-15 04:18:21,900 (abs_task:1661) INFO: [train] Batch sampler: UnsortedBatchSampler(N-batch=19027, batch_size=256, key_file=exp/s2t_stats_raw_bpe50000/splits12/speech_shape/split.1, +[gpua006:0/64] 2024-02-15 04:18:21,933 (abs_task:1662) INFO: [train] mini-batch sizes summary: N-batch=19027, mean=256.0, min=256, max=257 +[gpua006:0/64] 2024-02-15 04:24:58,151 (trainer:756) INFO: 43epoch:train:5001-5100batch: iter_time=3.229, forward_time=0.205, loss_ctc=75.896, loss_interctc_layer6=84.902, loss_interctc_layer12=70.263, loss_interctc_layer15=64.498, loss_interctc_layer21=78.653, loss=74.843, backward_time=0.219, grad_norm=76.999, clip=100.000, loss_scale=5.071e+30, optim_step_time=0.140, optim0_lr0=6.148e-05, train_time=4.384 +[gpua006:0/64] 2024-02-15 04:26:45,985 (trainer:756) INFO: 43epoch:train:5101-5200batch: iter_time=8.440e-05, forward_time=0.142, loss_ctc=66.878, loss_interctc_layer6=76.814, loss_interctc_layer12=63.472, loss_interctc_layer15=58.065, loss_interctc_layer21=69.174, loss=66.881, backward_time=0.207, grad_norm=70.878, clip=100.000, loss_scale=5.071e+30, optim_step_time=0.137, optim0_lr0=6.148e-05, train_time=1.080 +[gpua006:0/64] 2024-02-15 04:29:05,513 (trainer:756) INFO: 43epoch:train:5201-5300batch: iter_time=9.441e-05, forward_time=0.143, loss_ctc=65.948, loss_interctc_layer6=79.943, loss_interctc_layer12=65.973, loss_interctc_layer15=60.292, loss_interctc_layer21=68.307, loss=68.093, backward_time=0.208, grad_norm=77.765, clip=100.000, loss_scale=5.071e+30, optim_step_time=0.137, optim0_lr0=6.147e-05, train_time=1.395 +[gpua006:0/64] 2024-02-15 04:31:12,939 (trainer:756) INFO: 43epoch:train:5301-5400batch: iter_time=9.024e-05, forward_time=0.145, loss_ctc=64.528, loss_interctc_layer6=79.277, loss_interctc_layer12=65.177, loss_interctc_layer15=59.543, loss_interctc_layer21=66.946, loss=67.094, backward_time=0.209, grad_norm=68.986, clip=100.000, loss_scale=5.071e+30, optim_step_time=0.137, optim0_lr0=6.147e-05, train_time=1.274 +[gpua006:0/64] 2024-02-15 04:32:56,493 (trainer:756) INFO: 43epoch:train:5401-5500batch: iter_time=8.961e-05, forward_time=0.143, loss_ctc=76.867, loss_interctc_layer6=90.148, loss_interctc_layer12=74.289, loss_interctc_layer15=67.921, loss_interctc_layer21=79.759, loss=77.797, backward_time=0.209, grad_norm=76.749, clip=100.000, loss_scale=5.071e+30, optim_step_time=0.137, optim0_lr0=6.146e-05, train_time=1.035 +[gpua006:0/64] 2024-02-15 04:35:46,977 (trainer:756) INFO: 43epoch:train:5501-5600batch: iter_time=8.947e-05, forward_time=0.143, loss_ctc=70.908, loss_interctc_layer6=87.419, loss_interctc_layer12=72.566, loss_interctc_layer15=66.344, loss_interctc_layer21=73.269, loss=74.101, backward_time=0.206, grad_norm=83.135, clip=100.000, loss_scale=5.071e+30, optim_step_time=0.137, optim0_lr0=6.146e-05, train_time=1.705 +[gpua006:0/64] 2024-02-15 04:39:00,246 (trainer:756) INFO: 43epoch:train:5601-5700batch: iter_time=9.134e-05, forward_time=0.142, loss_ctc=61.349, loss_interctc_layer6=75.631, loss_interctc_layer12=62.899, loss_interctc_layer15=57.845, loss_interctc_layer21=63.222, loss=64.189, backward_time=0.205, grad_norm=73.891, clip=100.000, loss_scale=5.071e+30, optim_step_time=0.137, optim0_lr0=6.145e-05, train_time=1.932 +[gpua006:0/64] 2024-02-15 04:41:16,223 (trainer:756) INFO: 43epoch:train:5701-5800batch: iter_time=1.013e-04, forward_time=0.143, loss_ctc=71.061, loss_interctc_layer6=79.108, loss_interctc_layer12=65.014, loss_interctc_layer15=59.341, loss_interctc_layer21=73.637, loss=69.632, backward_time=0.207, grad_norm=78.845, clip=100.000, loss_scale=5.071e+30, optim_step_time=0.137, optim0_lr0=6.145e-05, train_time=1.360 +[gpua006:0/64] 2024-02-15 04:43:36,604 (trainer:756) INFO: 43epoch:train:5801-5900batch: iter_time=9.653e-05, forward_time=0.144, loss_ctc=61.413, loss_interctc_layer6=71.413, loss_interctc_layer12=58.704, loss_interctc_layer15=53.494, loss_interctc_layer21=63.760, loss=61.757, backward_time=0.208, grad_norm=71.353, clip=100.000, loss_scale=5.071e+30, optim_step_time=0.137, optim0_lr0=6.144e-05, train_time=1.404 +[gpua006:0/64] 2024-02-15 04:45:49,532 (trainer:756) INFO: 43epoch:train:5901-6000batch: iter_time=1.005e-04, forward_time=0.284, loss_ctc=59.912, loss_interctc_layer6=68.791, loss_interctc_layer12=56.633, loss_interctc_layer15=51.743, loss_interctc_layer21=62.293, loss=59.874, backward_time=0.248, grad_norm=57.803, clip=100.000, loss_scale=5.071e+30, optim_step_time=0.142, optim0_lr0=6.144e-05, train_time=1.328 +[gpua006:0/64] 2024-02-15 04:48:13,729 (trainer:756) INFO: 43epoch:train:6001-6100batch: iter_time=1.005e-04, forward_time=0.143, loss_ctc=78.681, loss_interctc_layer6=81.303, loss_interctc_layer12=67.487, loss_interctc_layer15=62.076, loss_interctc_layer21=81.563, loss=74.222, backward_time=0.208, grad_norm=88.961, clip=100.000, loss_scale=5.071e+30, optim_step_time=0.138, optim0_lr0=6.144e-05, train_time=1.442 +[gpua006:0/64] 2024-02-15 04:50:28,086 (trainer:756) INFO: 43epoch:train:6101-6200batch: iter_time=8.979e-05, forward_time=0.143, loss_ctc=57.941, loss_interctc_layer6=70.593, loss_interctc_layer12=58.148, loss_interctc_layer15=53.168, loss_interctc_layer21=60.107, loss=59.991, backward_time=0.208, grad_norm=76.557, clip=100.000, loss_scale=5.071e+30, optim_step_time=0.137, optim0_lr0=6.143e-05, train_time=1.344 +[gpua006:0/64] 2024-02-15 04:51:48,960 (multiple_iter_factory:32) INFO: Building 5th iter-factory... +[gpua006:0/64] 2024-02-15 04:52:07,711 (s2t:401) INFO: Optional Data Names: ('text_prev', 'text_ctc', 'text_spk2', 'text_spk3', 'text_spk4') +[gpua006:0/64] 2024-02-15 04:52:11,098 (abs_task:1660) INFO: [train] dataset: +ESPnetDataset( + speech: {"path": "exp/s2t_stats_raw_bpe50000/splits12/wav.scp/split.5", "type": "kaldi_ark"} + text_prev: {"path": "exp/s2t_stats_raw_bpe50000/splits12/text.prev/split.5", "type": "text"} + text_ctc: {"path": "exp/s2t_stats_raw_bpe50000/splits12/text.ctc/split.5", "type": "text"} + text: {"path": "exp/s2t_stats_raw_bpe50000/splits12/text/split.5", "type": "text"} + preprocess: ) +[gpua006:0/64] 2024-02-15 04:52:11,098 (abs_task:1661) INFO: [train] Batch sampler: UnsortedBatchSampler(N-batch=19027, batch_size=256, key_file=exp/s2t_stats_raw_bpe50000/splits12/speech_shape/split.5, +[gpua006:0/64] 2024-02-15 04:52:11,132 (abs_task:1662) INFO: [train] mini-batch sizes summary: N-batch=19027, mean=256.0, min=256, max=257 +[gpua006:0/64] 2024-02-15 04:57:47,689 (trainer:756) INFO: 43epoch:train:6201-6300batch: iter_time=3.195, forward_time=0.185, loss_ctc=62.242, loss_interctc_layer6=73.220, loss_interctc_layer12=60.184, loss_interctc_layer15=54.897, loss_interctc_layer21=64.723, loss=63.053, backward_time=0.217, grad_norm=66.231, clip=100.000, loss_scale=5.071e+30, optim_step_time=0.138, optim0_lr0=6.143e-05, train_time=4.395 +[gpua006:0/64] 2024-02-15 04:59:55,997 (trainer:756) INFO: 43epoch:train:6301-6400batch: iter_time=9.293e-05, forward_time=0.143, loss_ctc=78.046, loss_interctc_layer6=86.088, loss_interctc_layer12=71.152, loss_interctc_layer15=65.161, loss_interctc_layer21=80.924, loss=76.274, backward_time=0.207, grad_norm=93.006, clip=100.000, loss_scale=5.071e+30, optim_step_time=0.138, optim0_lr0=6.142e-05, train_time=1.283 +[gpua006:0/64] 2024-02-15 05:02:22,747 (trainer:756) INFO: 43epoch:train:6401-6500batch: iter_time=9.032e-05, forward_time=0.195, loss_ctc=64.574, loss_interctc_layer6=76.817, loss_interctc_layer12=63.315, loss_interctc_layer15=57.797, loss_interctc_layer21=66.862, loss=65.873, backward_time=0.304, grad_norm=101.440, clip=100.000, loss_scale=5.071e+30, optim_step_time=0.144, optim0_lr0=6.142e-05, train_time=1.467 +[gpua006:0/64] 2024-02-15 05:05:07,711 (trainer:756) INFO: 43epoch:train:6501-6600batch: iter_time=9.660e-05, forward_time=0.143, loss_ctc=64.342, loss_interctc_layer6=78.524, loss_interctc_layer12=64.748, loss_interctc_layer15=59.526, loss_interctc_layer21=66.479, loss=66.724, backward_time=0.205, grad_norm=81.176, clip=100.000, loss_scale=5.071e+30, optim_step_time=0.139, optim0_lr0=6.141e-05, train_time=1.649 +[gpua006:0/64] 2024-02-15 05:07:55,832 (trainer:756) INFO: 43epoch:train:6601-6700batch: iter_time=1.079e-04, forward_time=0.256, loss_ctc=70.042, loss_interctc_layer6=82.954, loss_interctc_layer12=68.186, loss_interctc_layer15=62.075, loss_interctc_layer21=72.580, loss=71.167, backward_time=0.247, grad_norm=70.874, clip=100.000, loss_scale=5.071e+30, optim_step_time=0.155, optim0_lr0=6.141e-05, train_time=1.681 +[gpua006:0/64] 2024-02-15 05:09:51,035 (trainer:756) INFO: 43epoch:train:6701-6800batch: iter_time=9.903e-05, forward_time=0.143, loss_ctc=72.043, loss_interctc_layer6=91.595, loss_interctc_layer12=75.972, loss_interctc_layer15=69.700, loss_interctc_layer21=74.375, loss=76.737, backward_time=0.208, grad_norm=131.954, clip=100.000, loss_scale=5.071e+30, optim_step_time=0.138, optim0_lr0=6.140e-05, train_time=1.152 +[gpua006:0/64] 2024-02-15 05:12:15,283 (trainer:756) INFO: 43epoch:train:6801-6900batch: iter_time=1.001e-04, forward_time=0.143, loss_ctc=65.653, loss_interctc_layer6=78.074, loss_interctc_layer12=64.902, loss_interctc_layer15=59.349, loss_interctc_layer21=67.919, loss=67.179, backward_time=0.208, grad_norm=75.592, clip=100.000, loss_scale=5.071e+30, optim_step_time=0.139, optim0_lr0=6.140e-05, train_time=1.441 +[gpua006:0/64] 2024-02-15 05:14:35,467 (trainer:756) INFO: 43epoch:train:6901-7000batch: iter_time=3.129e-04, forward_time=0.200, loss_ctc=65.338, loss_interctc_layer6=78.892, loss_interctc_layer12=64.980, loss_interctc_layer15=59.436, loss_interctc_layer21=67.494, loss=67.228, backward_time=0.328, grad_norm=65.476, clip=100.000, loss_scale=8.620e+30, optim_step_time=0.143, optim0_lr0=6.139e-05, train_time=1.402 +[gpua006:0/64] 2024-02-15 05:17:03,942 (trainer:756) INFO: 43epoch:train:7001-7100batch: iter_time=8.716e-05, forward_time=0.142, loss_ctc=67.467, loss_interctc_layer6=74.782, loss_interctc_layer12=61.148, loss_interctc_layer15=55.608, loss_interctc_layer21=70.008, loss=65.802, backward_time=0.207, grad_norm=69.164, clip=100.000, loss_scale=1.014e+31, optim_step_time=0.137, optim0_lr0=6.139e-05, train_time=1.485 +[gpua006:0/64] 2024-02-15 05:19:12,499 (trainer:756) INFO: 43epoch:train:7101-7200batch: iter_time=1.003e-04, forward_time=0.141, loss_ctc=56.272, loss_interctc_layer6=65.253, loss_interctc_layer12=53.889, loss_interctc_layer15=49.230, loss_interctc_layer21=58.464, loss=56.622, backward_time=0.206, grad_norm=81.165, clip=100.000, loss_scale=1.014e+31, optim_step_time=0.137, optim0_lr0=6.138e-05, train_time=1.285 +[gpua006:0/64] 2024-02-15 05:21:25,322 (trainer:756) INFO: 43epoch:train:7201-7300batch: iter_time=8.967e-05, forward_time=0.143, loss_ctc=71.519, loss_interctc_layer6=79.712, loss_interctc_layer12=66.114, loss_interctc_layer15=60.707, loss_interctc_layer21=74.343, loss=70.479, backward_time=0.206, grad_norm=94.466, clip=100.000, loss_scale=1.014e+31, optim_step_time=0.137, optim0_lr0=6.138e-05, train_time=1.328 +[gpua006:0/64] 2024-02-15 05:24:09,801 (trainer:756) INFO: 43epoch:train:7301-7400batch: iter_time=8.777e-05, forward_time=0.201, loss_ctc=66.183, loss_interctc_layer6=73.891, loss_interctc_layer12=61.350, loss_interctc_layer15=56.232, loss_interctc_layer21=68.486, loss=65.228, backward_time=0.299, grad_norm=76.510, clip=100.000, loss_scale=1.014e+31, optim_step_time=0.142, optim0_lr0=6.137e-05, train_time=1.644 +[gpua006:0/64] 2024-02-15 05:26:34,292 (trainer:756) INFO: 43epoch:train:7401-7500batch: iter_time=9.395e-05, forward_time=0.186, loss_ctc=61.287, loss_interctc_layer6=73.728, loss_interctc_layer12=60.433, loss_interctc_layer15=55.121, loss_interctc_layer21=63.683, loss=62.850, backward_time=0.215, grad_norm=90.905, clip=100.000, loss_scale=1.014e+31, optim_step_time=0.139, optim0_lr0=6.137e-05, train_time=1.445 +[gpua006:0/64] 2024-02-15 05:26:54,321 (multiple_iter_factory:32) INFO: Building 6th iter-factory... +[gpua006:0/64] 2024-02-15 05:27:12,938 (s2t:401) INFO: Optional Data Names: ('text_prev', 'text_ctc', 'text_spk2', 'text_spk3', 'text_spk4') +[gpua006:0/64] 2024-02-15 05:27:16,580 (abs_task:1660) INFO: [train] dataset: +ESPnetDataset( + speech: {"path": "exp/s2t_stats_raw_bpe50000/splits12/wav.scp/split.10", "type": "kaldi_ark"} + text_prev: {"path": "exp/s2t_stats_raw_bpe50000/splits12/text.prev/split.10", "type": "text"} + text_ctc: {"path": "exp/s2t_stats_raw_bpe50000/splits12/text.ctc/split.10", "type": "text"} + text: {"path": "exp/s2t_stats_raw_bpe50000/splits12/text/split.10", "type": "text"} + preprocess: ) +[gpua006:0/64] 2024-02-15 05:27:16,580 (abs_task:1661) INFO: [train] Batch sampler: UnsortedBatchSampler(N-batch=19027, batch_size=256, key_file=exp/s2t_stats_raw_bpe50000/splits12/speech_shape/split.10, +[gpua006:0/64] 2024-02-15 05:27:16,634 (abs_task:1662) INFO: [train] mini-batch sizes summary: N-batch=19027, mean=256.0, min=256, max=257 +[gpua006:0/64] 2024-02-15 05:34:28,982 (trainer:756) INFO: 43epoch:train:7501-7600batch: iter_time=3.567, forward_time=0.145, loss_ctc=78.697, loss_interctc_layer6=84.979, loss_interctc_layer12=70.313, loss_interctc_layer15=64.507, loss_interctc_layer21=81.559, loss=76.011, backward_time=0.209, grad_norm=119.319, clip=100.000, loss_scale=1.014e+31, optim_step_time=0.139, optim0_lr0=6.136e-05, train_time=4.747 +[gpua006:0/64] 2024-02-15 05:36:06,980 (trainer:756) INFO: 43epoch:train:7601-7700batch: iter_time=1.013e-04, forward_time=0.144, loss_ctc=67.392, loss_interctc_layer6=76.530, loss_interctc_layer12=63.002, loss_interctc_layer15=57.614, loss_interctc_layer21=69.843, loss=66.876, backward_time=0.209, grad_norm=111.032, clip=100.000, loss_scale=1.014e+31, optim_step_time=0.139, optim0_lr0=6.136e-05, train_time=0.980 +[gpua006:0/64] 2024-02-15 05:38:11,828 (trainer:756) INFO: 43epoch:train:7701-7800batch: iter_time=1.001e-04, forward_time=0.144, loss_ctc=66.514, loss_interctc_layer6=79.630, loss_interctc_layer12=65.545, loss_interctc_layer15=59.923, loss_interctc_layer21=68.818, loss=68.086, backward_time=0.209, grad_norm=78.041, clip=100.000, loss_scale=1.014e+31, optim_step_time=0.139, optim0_lr0=6.135e-05, train_time=1.248 +[gpua006:0/64] 2024-02-15 05:40:19,422 (trainer:756) INFO: 43epoch:train:7801-7900batch: iter_time=1.062e-04, forward_time=0.144, loss_ctc=69.736, loss_interctc_layer6=79.083, loss_interctc_layer12=65.076, loss_interctc_layer15=59.446, loss_interctc_layer21=72.239, loss=69.116, backward_time=0.208, grad_norm=72.218, clip=100.000, loss_scale=1.014e+31, optim_step_time=0.138, optim0_lr0=6.135e-05, train_time=1.276 +[gpua006:0/64] 2024-02-15 05:42:19,654 (trainer:756) INFO: 43epoch:train:7901-8000batch: iter_time=8.308e-05, forward_time=0.221, loss_ctc=78.671, loss_interctc_layer6=89.118, loss_interctc_layer12=73.314, loss_interctc_layer15=67.012, loss_interctc_layer21=81.650, loss=77.953, backward_time=0.298, grad_norm=100.833, clip=100.000, loss_scale=1.014e+31, optim_step_time=0.145, optim0_lr0=6.134e-05, train_time=1.201 +[gpua006:0/64] 2024-02-15 05:45:13,113 (trainer:756) INFO: 43epoch:train:8001-8100batch: iter_time=8.506e-05, forward_time=0.142, loss_ctc=71.340, loss_interctc_layer6=86.441, loss_interctc_layer12=71.315, loss_interctc_layer15=65.362, loss_interctc_layer21=73.852, loss=73.662, backward_time=0.207, grad_norm=76.131, clip=100.000, loss_scale=1.014e+31, optim_step_time=0.138, optim0_lr0=6.134e-05, train_time=1.735 +[gpua006:0/64] 2024-02-15 05:47:52,540 (trainer:756) INFO: 43epoch:train:8101-8200batch: iter_time=8.289e-05, forward_time=0.142, loss_ctc=63.193, loss_interctc_layer6=75.543, loss_interctc_layer12=62.827, loss_interctc_layer15=57.672, loss_interctc_layer21=65.240, loss=64.895, backward_time=0.206, grad_norm=76.951, clip=100.000, loss_scale=1.014e+31, optim_step_time=0.138, optim0_lr0=6.133e-05, train_time=1.594 +[gpua006:0/64] 2024-02-15 05:49:51,006 (trainer:756) INFO: 43epoch:train:8201-8300batch: iter_time=8.207e-05, forward_time=0.143, loss_ctc=72.984, loss_interctc_layer6=79.001, loss_interctc_layer12=64.968, loss_interctc_layer15=59.311, loss_interctc_layer21=75.802, loss=70.413, backward_time=0.208, grad_norm=120.607, clip=100.000, loss_scale=1.014e+31, optim_step_time=0.138, optim0_lr0=6.133e-05, train_time=1.184 +[gpua006:0/64] 2024-02-15 05:52:13,713 (trainer:756) INFO: 43epoch:train:8301-8400batch: iter_time=7.896e-05, forward_time=0.142, loss_ctc=64.294, loss_interctc_layer6=71.636, loss_interctc_layer12=59.050, loss_interctc_layer15=53.735, loss_interctc_layer21=66.695, loss=63.082, backward_time=0.208, grad_norm=65.952, clip=100.000, loss_scale=1.014e+31, optim_step_time=0.138, optim0_lr0=6.132e-05, train_time=1.427 +[gpua006:0/64] 2024-02-15 05:54:46,008 (trainer:756) INFO: 43epoch:train:8401-8500batch: iter_time=8.526e-05, forward_time=0.142, loss_ctc=64.714, loss_interctc_layer6=69.462, loss_interctc_layer12=57.400, loss_interctc_layer15=52.504, loss_interctc_layer21=67.252, loss=62.267, backward_time=0.208, grad_norm=109.761, clip=100.000, loss_scale=1.014e+31, optim_step_time=0.139, optim0_lr0=6.132e-05, train_time=1.523 +[gpua006:0/64] 2024-02-15 05:56:36,894 (trainer:756) INFO: 43epoch:train:8501-8600batch: iter_time=8.564e-05, forward_time=0.142, loss_ctc=81.091, loss_interctc_layer6=81.376, loss_interctc_layer12=67.951, loss_interctc_layer15=62.548, loss_interctc_layer21=83.881, loss=75.369, backward_time=0.207, grad_norm=94.556, clip=100.000, loss_scale=1.014e+31, optim_step_time=0.138, optim0_lr0=6.131e-05, train_time=1.109 +[gpua006:0/64] 2024-02-15 05:58:14,106 (trainer:756) INFO: 43epoch:train:8601-8700batch: iter_time=8.340e-05, forward_time=0.141, loss_ctc=61.653, loss_interctc_layer6=70.504, loss_interctc_layer12=58.061, loss_interctc_layer15=53.098, loss_interctc_layer21=63.916, loss=61.446, backward_time=0.207, grad_norm=87.269, clip=100.000, loss_scale=1.014e+31, optim_step_time=0.138, optim0_lr0=6.131e-05, train_time=0.972 +[gpua006:0/64] 2024-02-15 05:59:59,132 (multiple_iter_factory:32) INFO: Building 7th iter-factory... +[gpua006:0/64] 2024-02-15 06:00:17,892 (s2t:401) INFO: Optional Data Names: ('text_prev', 'text_ctc', 'text_spk2', 'text_spk3', 'text_spk4') +[gpua006:0/64] 2024-02-15 06:00:21,423 (abs_task:1660) INFO: [train] dataset: +ESPnetDataset( + speech: {"path": "exp/s2t_stats_raw_bpe50000/splits12/wav.scp/split.7", "type": "kaldi_ark"} + text_prev: {"path": "exp/s2t_stats_raw_bpe50000/splits12/text.prev/split.7", "type": "text"} + text_ctc: {"path": "exp/s2t_stats_raw_bpe50000/splits12/text.ctc/split.7", "type": "text"} + text: {"path": "exp/s2t_stats_raw_bpe50000/splits12/text/split.7", "type": "text"} + preprocess: ) +[gpua006:0/64] 2024-02-15 06:00:21,423 (abs_task:1661) INFO: [train] Batch sampler: UnsortedBatchSampler(N-batch=19027, batch_size=256, key_file=exp/s2t_stats_raw_bpe50000/splits12/speech_shape/split.7, +[gpua006:0/64] 2024-02-15 06:00:21,441 (abs_task:1662) INFO: [train] mini-batch sizes summary: N-batch=19027, mean=256.0, min=256, max=257 +[gpua006:0/64] 2024-02-15 06:06:05,037 (trainer:756) INFO: 43epoch:train:8701-8800batch: iter_time=3.267, forward_time=0.195, loss_ctc=64.944, loss_interctc_layer6=73.887, loss_interctc_layer12=60.697, loss_interctc_layer15=55.454, loss_interctc_layer21=67.406, loss=64.478, backward_time=0.218, grad_norm=176.182, clip=100.000, loss_scale=1.014e+31, optim_step_time=0.140, optim0_lr0=6.131e-05, train_time=4.708 +[gpua006:0/64] 2024-02-15 06:07:41,998 (trainer:756) INFO: 43epoch:train:8801-8900batch: iter_time=8.592e-05, forward_time=0.142, loss_ctc=78.037, loss_interctc_layer6=86.240, loss_interctc_layer12=71.184, loss_interctc_layer15=65.071, loss_interctc_layer21=80.903, loss=76.287, backward_time=0.207, grad_norm=99.958, clip=100.000, loss_scale=1.014e+31, optim_step_time=0.137, optim0_lr0=6.130e-05, train_time=0.970 +[gpua006:0/64] 2024-02-15 06:09:27,231 (trainer:756) INFO: 43epoch:train:8901-9000batch: iter_time=8.425e-05, forward_time=0.141, loss_ctc=64.605, loss_interctc_layer6=77.215, loss_interctc_layer12=63.494, loss_interctc_layer15=57.930, loss_interctc_layer21=66.891, loss=66.027, backward_time=0.207, grad_norm=82.679, clip=100.000, loss_scale=1.724e+31, optim_step_time=0.137, optim0_lr0=6.130e-05, train_time=1.052 +[gpua006:0/64] 2024-02-15 06:12:14,004 (trainer:756) INFO: 43epoch:train:9001-9100batch: iter_time=8.636e-05, forward_time=0.142, loss_ctc=64.425, loss_interctc_layer6=78.564, loss_interctc_layer12=64.920, loss_interctc_layer15=59.434, loss_interctc_layer21=66.548, loss=66.778, backward_time=0.206, grad_norm=77.357, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.137, optim0_lr0=6.129e-05, train_time=1.667 +[gpua006:0/64] 2024-02-15 06:14:07,523 (trainer:756) INFO: 43epoch:train:9101-9200batch: iter_time=8.580e-05, forward_time=0.142, loss_ctc=69.612, loss_interctc_layer6=82.740, loss_interctc_layer12=67.991, loss_interctc_layer15=61.868, loss_interctc_layer21=72.280, loss=70.898, backward_time=0.207, grad_norm=112.482, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.137, optim0_lr0=6.129e-05, train_time=1.135 +[gpua006:0/64] 2024-02-15 06:16:25,350 (trainer:756) INFO: 43epoch:train:9201-9300batch: iter_time=8.390e-05, forward_time=0.142, loss_ctc=72.186, loss_interctc_layer6=91.116, loss_interctc_layer12=75.449, loss_interctc_layer15=69.213, loss_interctc_layer21=74.564, loss=76.506, backward_time=0.206, grad_norm=98.484, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.137, optim0_lr0=6.128e-05, train_time=1.378 +[gpua006:0/64] 2024-02-15 06:18:52,979 (trainer:756) INFO: 43epoch:train:9301-9400batch: iter_time=8.529e-05, forward_time=0.143, loss_ctc=66.332, loss_interctc_layer6=78.485, loss_interctc_layer12=65.161, loss_interctc_layer15=59.904, loss_interctc_layer21=68.494, loss=67.675, backward_time=0.206, grad_norm=78.905, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.137, optim0_lr0=6.128e-05, train_time=1.476 +[gpua006:0/64] 2024-02-15 06:21:46,402 (trainer:756) INFO: 43epoch:train:9401-9500batch: iter_time=8.662e-05, forward_time=0.154, loss_ctc=65.614, loss_interctc_layer6=79.333, loss_interctc_layer12=65.487, loss_interctc_layer15=59.885, loss_interctc_layer21=67.865, loss=67.637, backward_time=0.218, grad_norm=68.576, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.139, optim0_lr0=6.127e-05, train_time=1.734 +[gpua006:0/64] 2024-02-15 06:23:48,498 (trainer:756) INFO: 43epoch:train:9501-9600batch: iter_time=8.641e-05, forward_time=0.289, loss_ctc=67.637, loss_interctc_layer6=74.415, loss_interctc_layer12=60.880, loss_interctc_layer15=55.347, loss_interctc_layer21=70.296, loss=65.715, backward_time=0.254, grad_norm=82.166, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.151, optim0_lr0=6.127e-05, train_time=1.220 +[gpua006:0/64] 2024-02-15 06:25:42,648 (trainer:756) INFO: 43epoch:train:9601-9700batch: iter_time=8.659e-05, forward_time=0.142, loss_ctc=55.978, loss_interctc_layer6=65.792, loss_interctc_layer12=54.371, loss_interctc_layer15=49.736, loss_interctc_layer21=58.173, loss=56.810, backward_time=0.207, grad_norm=71.234, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.138, optim0_lr0=6.126e-05, train_time=1.142 +[gpua006:0/64] 2024-02-15 06:27:49,783 (trainer:756) INFO: 43epoch:train:9701-9800batch: iter_time=8.553e-05, forward_time=0.142, loss_ctc=71.592, loss_interctc_layer6=79.386, loss_interctc_layer12=65.836, loss_interctc_layer15=60.454, loss_interctc_layer21=74.320, loss=70.318, backward_time=0.205, grad_norm=82.480, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.138, optim0_lr0=6.126e-05, train_time=1.271 +[gpua006:0/64] 2024-02-15 06:30:18,468 (trainer:756) INFO: 43epoch:train:9801-9900batch: iter_time=8.691e-05, forward_time=0.141, loss_ctc=65.478, loss_interctc_layer6=73.237, loss_interctc_layer12=60.790, loss_interctc_layer15=55.862, loss_interctc_layer21=67.792, loss=64.632, backward_time=0.205, grad_norm=76.652, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.138, optim0_lr0=6.125e-05, train_time=1.487 +[gpua006:0/64] 2024-02-15 06:31:54,960 (trainer:756) INFO: 43epoch:train:9901-10000batch: iter_time=8.553e-05, forward_time=0.142, loss_ctc=60.365, loss_interctc_layer6=73.137, loss_interctc_layer12=59.945, loss_interctc_layer15=54.618, loss_interctc_layer21=62.574, loss=62.128, backward_time=0.207, grad_norm=73.944, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.139, optim0_lr0=6.125e-05, train_time=0.965 +[gpua006:0/64] 2024-02-15 06:32:14,990 (multiple_iter_factory:32) INFO: Building 8th iter-factory... +[gpua006:0/64] 2024-02-15 06:32:34,287 (s2t:401) INFO: Optional Data Names: ('text_prev', 'text_ctc', 'text_spk2', 'text_spk3', 'text_spk4') +[gpua006:0/64] 2024-02-15 06:32:37,686 (abs_task:1660) INFO: [train] dataset: +ESPnetDataset( + speech: {"path": "exp/s2t_stats_raw_bpe50000/splits12/wav.scp/split.8", "type": "kaldi_ark"} + text_prev: {"path": "exp/s2t_stats_raw_bpe50000/splits12/text.prev/split.8", "type": "text"} + text_ctc: {"path": "exp/s2t_stats_raw_bpe50000/splits12/text.ctc/split.8", "type": "text"} + text: {"path": "exp/s2t_stats_raw_bpe50000/splits12/text/split.8", "type": "text"} + preprocess: ) +[gpua006:0/64] 2024-02-15 06:32:37,686 (abs_task:1661) INFO: [train] Batch sampler: UnsortedBatchSampler(N-batch=19027, batch_size=256, key_file=exp/s2t_stats_raw_bpe50000/splits12/speech_shape/split.8, +[gpua006:0/64] 2024-02-15 06:32:37,752 (abs_task:1662) INFO: [train] mini-batch sizes summary: N-batch=19027, mean=256.0, min=256, max=257 +[gpua006:0/64] 2024-02-15 06:39:09,994 (trainer:756) INFO: 43epoch:train:10001-10100batch: iter_time=3.228, forward_time=0.145, loss_ctc=77.787, loss_interctc_layer6=84.996, loss_interctc_layer12=70.279, loss_interctc_layer15=64.584, loss_interctc_layer21=80.514, loss=75.632, backward_time=0.209, grad_norm=154.261, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.138, optim0_lr0=6.124e-05, train_time=4.350 +[gpua006:0/64] 2024-02-15 06:41:11,061 (trainer:756) INFO: 43epoch:train:10101-10200batch: iter_time=6.971e-04, forward_time=0.291, loss_ctc=67.635, loss_interctc_layer6=75.919, loss_interctc_layer12=62.503, loss_interctc_layer15=57.062, loss_interctc_layer21=70.158, loss=66.655, backward_time=0.247, grad_norm=74.077, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.143, optim0_lr0=6.124e-05, train_time=1.210 +[gpua006:0/64] 2024-02-15 06:43:03,370 (trainer:756) INFO: 43epoch:train:10201-10300batch: iter_time=9.205e-05, forward_time=0.144, loss_ctc=66.845, loss_interctc_layer6=79.490, loss_interctc_layer12=65.447, loss_interctc_layer15=59.814, loss_interctc_layer21=69.187, loss=68.157, backward_time=0.208, grad_norm=78.368, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.138, optim0_lr0=6.123e-05, train_time=1.122 +[gpua006:0/64] 2024-02-15 06:44:54,231 (trainer:756) INFO: 43epoch:train:10301-10400batch: iter_time=9.451e-05, forward_time=0.143, loss_ctc=69.297, loss_interctc_layer6=78.865, loss_interctc_layer12=64.683, loss_interctc_layer15=59.052, loss_interctc_layer21=71.787, loss=68.737, backward_time=0.207, grad_norm=162.554, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.137, optim0_lr0=6.123e-05, train_time=1.110 +[gpua006:0/64] 2024-02-15 06:47:31,909 (trainer:756) INFO: 43epoch:train:10401-10500batch: iter_time=9.456e-05, forward_time=0.143, loss_ctc=78.716, loss_interctc_layer6=88.946, loss_interctc_layer12=72.981, loss_interctc_layer15=66.630, loss_interctc_layer21=81.645, loss=77.784, backward_time=0.207, grad_norm=78.190, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.137, optim0_lr0=6.122e-05, train_time=1.577 +[gpua006:0/64] 2024-02-15 06:49:31,912 (trainer:756) INFO: 43epoch:train:10501-10600batch: iter_time=9.763e-05, forward_time=0.143, loss_ctc=70.870, loss_interctc_layer6=86.541, loss_interctc_layer12=71.468, loss_interctc_layer15=65.271, loss_interctc_layer21=73.275, loss=73.485, backward_time=0.207, grad_norm=86.570, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.137, optim0_lr0=6.122e-05, train_time=1.200 +[gpua006:0/64] 2024-02-15 06:51:15,789 (trainer:756) INFO: 43epoch:train:10601-10700batch: iter_time=9.316e-05, forward_time=0.143, loss_ctc=62.361, loss_interctc_layer6=75.603, loss_interctc_layer12=62.761, loss_interctc_layer15=57.518, loss_interctc_layer21=64.332, loss=64.515, backward_time=0.211, grad_norm=69.250, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.137, optim0_lr0=6.121e-05, train_time=1.039 +[gpua006:0/64] 2024-02-15 06:53:53,290 (trainer:756) INFO: 43epoch:train:10701-10800batch: iter_time=9.622e-05, forward_time=0.144, loss_ctc=71.902, loss_interctc_layer6=78.872, loss_interctc_layer12=64.805, loss_interctc_layer15=59.117, loss_interctc_layer21=74.406, loss=69.820, backward_time=0.209, grad_norm=84.300, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.138, optim0_lr0=6.121e-05, train_time=1.575 +[gpua006:0/64] 2024-02-15 06:56:05,370 (trainer:756) INFO: 43epoch:train:10801-10900batch: iter_time=9.768e-05, forward_time=0.144, loss_ctc=63.843, loss_interctc_layer6=70.485, loss_interctc_layer12=57.908, loss_interctc_layer15=52.681, loss_interctc_layer21=66.249, loss=62.233, backward_time=0.209, grad_norm=172.972, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.138, optim0_lr0=6.120e-05, train_time=1.321 +[gpua006:0/64] 2024-02-15 06:59:04,998 (trainer:756) INFO: 43epoch:train:10901-11000batch: iter_time=1.046e-04, forward_time=0.251, loss_ctc=62.907, loss_interctc_layer6=68.554, loss_interctc_layer12=56.520, loss_interctc_layer15=51.659, loss_interctc_layer21=65.304, loss=60.989, backward_time=0.243, grad_norm=105.384, clip=100.000, loss_scale=3.448e+31, optim_step_time=0.142, optim0_lr0=6.120e-05, train_time=1.796 +[gpua006:0/64] 2024-02-15 07:01:17,455 (trainer:756) INFO: 43epoch:train:11001-11100batch: iter_time=9.482e-05, forward_time=0.143, loss_ctc=79.478, loss_interctc_layer6=81.947, loss_interctc_layer12=68.439, loss_interctc_layer15=62.858, loss_interctc_layer21=82.371, loss=75.019, backward_time=0.207, grad_norm=84.070, clip=100.000, loss_scale=4.056e+31, optim_step_time=0.138, optim0_lr0=6.119e-05, train_time=1.324 +[gpua006:0/64] 2024-02-15 07:03:19,246 (trainer:756) INFO: 43epoch:train:11101-11200batch: iter_time=9.524e-05, forward_time=0.142, loss_ctc=61.743, loss_interctc_layer6=70.005, loss_interctc_layer12=57.610, loss_interctc_layer15=52.614, loss_interctc_layer21=63.872, loss=61.169, backward_time=0.207, grad_norm=67.516, clip=100.000, loss_scale=4.056e+31, optim_step_time=0.137, optim0_lr0=6.119e-05, train_time=1.217 +[gpua006:0/64] 2024-02-15 07:04:39,058 (multiple_iter_factory:32) INFO: Building 9th iter-factory... +[gpua006:0/64] 2024-02-15 07:04:57,938 (s2t:401) INFO: Optional Data Names: ('text_prev', 'text_ctc', 'text_spk2', 'text_spk3', 'text_spk4') +[gpua006:0/64] 2024-02-15 07:05:01,327 (abs_task:1660) INFO: [train] dataset: +ESPnetDataset( + speech: {"path": "exp/s2t_stats_raw_bpe50000/splits12/wav.scp/split.11", "type": "kaldi_ark"} + text_prev: {"path": "exp/s2t_stats_raw_bpe50000/splits12/text.prev/split.11", "type": "text"} + text_ctc: {"path": "exp/s2t_stats_raw_bpe50000/splits12/text.ctc/split.11", "type": "text"} + text: {"path": "exp/s2t_stats_raw_bpe50000/splits12/text/split.11", "type": "text"} + preprocess: ) +[gpua006:0/64] 2024-02-15 07:05:01,327 (abs_task:1661) INFO: [train] Batch sampler: UnsortedBatchSampler(N-batch=19027, batch_size=256, key_file=exp/s2t_stats_raw_bpe50000/splits12/speech_shape/split.11, +[gpua006:0/64] 2024-02-15 07:05:01,333 (abs_task:1662) INFO: [train] mini-batch sizes summary: N-batch=19027, mean=256.0, min=256, max=257 +[gpua006:0/64] 2024-02-15 07:10:35,848 (trainer:756) INFO: 43epoch:train:11201-11300batch: iter_time=3.112, forward_time=0.143, loss_ctc=66.097, loss_interctc_layer6=74.383, loss_interctc_layer12=61.226, loss_interctc_layer15=55.986, loss_interctc_layer21=68.531, loss=65.245, backward_time=0.208, grad_norm=61.970, clip=100.000, loss_scale=4.056e+31, optim_step_time=0.138, optim0_lr0=6.119e-05, train_time=4.367 +[gpua006:0/64] 2024-02-15 07:12:10,793 (trainer:756) INFO: 43epoch:train:11301-11400batch: iter_time=8.881e-05, forward_time=0.143, loss_ctc=77.730, loss_interctc_layer6=86.445, loss_interctc_layer12=71.432, loss_interctc_layer15=65.404, loss_interctc_layer21=80.514, loss=76.305, backward_time=0.209, grad_norm=80.371, clip=100.000, loss_scale=4.056e+31, optim_step_time=0.138, optim0_lr0=6.118e-05, train_time=0.949 +[gpua006:0/64] 2024-02-15 07:14:08,988 (trainer:756) INFO: 43epoch:train:11401-11500batch: iter_time=8.869e-05, forward_time=0.143, loss_ctc=64.396, loss_interctc_layer6=76.981, loss_interctc_layer12=63.341, loss_interctc_layer15=57.969, loss_interctc_layer21=66.722, loss=65.882, backward_time=0.210, grad_norm=93.752, clip=100.000, loss_scale=4.056e+31, optim_step_time=0.137, optim0_lr0=6.118e-05, train_time=1.182 +[gpua006:0/64] 2024-02-15 07:16:29,766 (trainer:756) INFO: 43epoch:train:11501-11600batch: iter_time=9.835e-05, forward_time=0.142, loss_ctc=63.665, loss_interctc_layer6=77.934, loss_interctc_layer12=64.270, loss_interctc_layer15=58.827, loss_interctc_layer21=65.857, loss=66.111, backward_time=0.207, grad_norm=74.878, clip=100.000, loss_scale=4.056e+31, optim_step_time=0.137, optim0_lr0=6.117e-05, train_time=1.408 +[gpua006:0/64] 2024-02-15 07:18:51,040 (trainer:756) INFO: 43epoch:train:11601-11700batch: iter_time=9.916e-05, forward_time=0.150, loss_ctc=69.862, loss_interctc_layer6=83.044, loss_interctc_layer12=68.126, loss_interctc_layer15=62.085, loss_interctc_layer21=72.432, loss=71.110, backward_time=0.208, grad_norm=107.474, clip=100.000, loss_scale=4.056e+31, optim_step_time=0.138, optim0_lr0=6.117e-05, train_time=1.413 +[gpua006:0/64] 2024-02-15 07:21:34,649 (trainer:756) INFO: 43epoch:train:11701-11800batch: iter_time=9.127e-05, forward_time=0.230, loss_ctc=72.579, loss_interctc_layer6=91.585, loss_interctc_layer12=75.849, loss_interctc_layer15=69.500, loss_interctc_layer21=75.026, loss=76.908, backward_time=0.255, grad_norm=87.274, clip=100.000, loss_scale=4.056e+31, optim_step_time=0.143, optim0_lr0=6.116e-05, train_time=1.636 +[gpua006:0/64] 2024-02-15 07:24:08,572 (trainer:756) INFO: 43epoch:train:11801-11900batch: iter_time=9.610e-05, forward_time=0.143, loss_ctc=65.860, loss_interctc_layer6=78.046, loss_interctc_layer12=64.704, loss_interctc_layer15=59.068, loss_interctc_layer21=68.170, loss=67.170, backward_time=0.207, grad_norm=72.988, clip=100.000, loss_scale=4.056e+31, optim_step_time=0.138, optim0_lr0=6.116e-05, train_time=1.539 +[gpua006:0/64] 2024-02-15 07:26:12,013 (trainer:756) INFO: 43epoch:train:11901-12000batch: iter_time=9.605e-05, forward_time=0.143, loss_ctc=65.017, loss_interctc_layer6=79.226, loss_interctc_layer12=65.314, loss_interctc_layer15=59.630, loss_interctc_layer21=67.249, loss=67.287, backward_time=0.208, grad_norm=86.483, clip=100.000, loss_scale=4.056e+31, optim_step_time=0.137, optim0_lr0=6.115e-05, train_time=1.233 +[gpua006:0/64] 2024-02-15 07:28:30,558 (trainer:756) INFO: 43epoch:train:12001-12100batch: iter_time=9.559e-05, forward_time=0.144, loss_ctc=67.569, loss_interctc_layer6=74.528, loss_interctc_layer12=60.831, loss_interctc_layer15=55.271, loss_interctc_layer21=70.056, loss=65.651, backward_time=0.208, grad_norm=63.753, clip=100.000, loss_scale=4.056e+31, optim_step_time=0.138, optim0_lr0=6.115e-05, train_time=1.386 +[gpua006:0/64] 2024-02-15 07:30:12,732 (trainer:756) INFO: 43epoch:train:12101-12200batch: iter_time=8.909e-05, forward_time=0.141, loss_ctc=54.826, loss_interctc_layer6=64.688, loss_interctc_layer12=53.327, loss_interctc_layer15=48.732, loss_interctc_layer21=56.938, loss=55.702, backward_time=0.208, grad_norm=59.080, clip=100.000, loss_scale=4.056e+31, optim_step_time=0.137, optim0_lr0=6.114e-05, train_time=1.022 +[gpua006:0/64] 2024-02-15 07:32:32,799 (trainer:756) INFO: 43epoch:train:12201-12300batch: iter_time=9.085e-05, forward_time=0.142, loss_ctc=71.924, loss_interctc_layer6=79.390, loss_interctc_layer12=65.793, loss_interctc_layer15=60.347, loss_interctc_layer21=74.756, loss=70.442, backward_time=0.208, grad_norm=87.616, clip=100.000, loss_scale=4.056e+31, optim_step_time=0.138, optim0_lr0=6.114e-05, train_time=1.400 +[gpua006:0/64] 2024-02-15 07:35:09,405 (trainer:756) INFO: 43epoch:train:12301-12400batch: iter_time=9.855e-05, forward_time=0.141, loss_ctc=65.521, loss_interctc_layer6=73.570, loss_interctc_layer12=60.892, loss_interctc_layer15=55.918, loss_interctc_layer21=67.763, loss=64.733, backward_time=0.206, grad_norm=71.924, clip=100.000, loss_scale=4.056e+31, optim_step_time=0.137, optim0_lr0=6.113e-05, train_time=1.566 +[gpua006:0/64] 2024-02-15 07:37:30,070 (trainer:756) INFO: 43epoch:train:12401-12500batch: iter_time=9.277e-05, forward_time=0.142, loss_ctc=60.844, loss_interctc_layer6=73.619, loss_interctc_layer12=60.360, loss_interctc_layer15=55.026, loss_interctc_layer21=63.266, loss=62.623, backward_time=0.207, grad_norm=63.127, clip=100.000, loss_scale=4.056e+31, optim_step_time=0.137, optim0_lr0=6.113e-05, train_time=1.406 +[gpua006:0/64] 2024-02-15 07:37:50,100 (multiple_iter_factory:32) INFO: Building 10th iter-factory... +[gpua006:0/64] 2024-02-15 07:38:09,075 (s2t:401) INFO: Optional Data Names: ('text_prev', 'text_ctc', 'text_spk2', 'text_spk3', 'text_spk4') +[gpua006:0/64] 2024-02-15 07:38:12,468 (abs_task:1660) INFO: [train] dataset: +ESPnetDataset( + speech: {"path": "exp/s2t_stats_raw_bpe50000/splits12/wav.scp/split.9", "type": "kaldi_ark"} + text_prev: {"path": "exp/s2t_stats_raw_bpe50000/splits12/text.prev/split.9", "type": "text"} + text_ctc: {"path": "exp/s2t_stats_raw_bpe50000/splits12/text.ctc/split.9", "type": "text"} + text: {"path": "exp/s2t_stats_raw_bpe50000/splits12/text/split.9", "type": "text"} + preprocess: ) +[gpua006:0/64] 2024-02-15 07:38:12,468 (abs_task:1661) INFO: [train] Batch sampler: UnsortedBatchSampler(N-batch=19027, batch_size=256, key_file=exp/s2t_stats_raw_bpe50000/splits12/speech_shape/split.9, +[gpua006:0/64] 2024-02-15 07:38:12,480 (abs_task:1662) INFO: [train] mini-batch sizes summary: N-batch=19027, mean=256.0, min=256, max=257 +[gpua006:0/64] 2024-02-15 07:45:00,269 (trainer:756) INFO: 43epoch:train:12501-12600batch: iter_time=3.192, forward_time=0.173, loss_ctc=74.819, loss_interctc_layer6=84.143, loss_interctc_layer12=69.446, loss_interctc_layer15=63.578, loss_interctc_layer21=77.509, loss=73.899, backward_time=0.215, grad_norm=84.937, clip=100.000, loss_scale=4.056e+31, optim_step_time=0.140, optim0_lr0=6.112e-05, train_time=4.500 +[gpua006:0/64] 2024-02-15 07:46:57,001 (trainer:756) INFO: 43epoch:train:12601-12700batch: iter_time=9.925e-05, forward_time=0.143, loss_ctc=65.282, loss_interctc_layer6=75.586, loss_interctc_layer12=62.061, loss_interctc_layer15=56.587, loss_interctc_layer21=67.613, loss=65.426, backward_time=0.207, grad_norm=78.442, clip=100.000, loss_scale=4.056e+31, optim_step_time=0.139, optim0_lr0=6.112e-05, train_time=1.169 +[gpua006:0/64] 2024-02-15 07:49:20,213 (trainer:756) INFO: 43epoch:train:12701-12800batch: iter_time=9.841e-05, forward_time=0.144, loss_ctc=65.363, loss_interctc_layer6=79.707, loss_interctc_layer12=65.566, loss_interctc_layer15=59.871, loss_interctc_layer21=67.644, loss=67.630, backward_time=0.208, grad_norm=85.701, clip=100.000, loss_scale=4.056e+31, optim_step_time=0.140, optim0_lr0=6.111e-05, train_time=1.432 +[gpua006:0/64] 2024-02-15 07:51:51,118 (trainer:756) INFO: 43epoch:train:12801-12900batch: iter_time=9.954e-05, forward_time=0.144, loss_ctc=63.888, loss_interctc_layer6=78.892, loss_interctc_layer12=64.726, loss_interctc_layer15=59.122, loss_interctc_layer21=66.258, loss=66.577, backward_time=0.209, grad_norm=114.462, clip=100.000, loss_scale=4.056e+31, optim_step_time=0.140, optim0_lr0=6.111e-05, train_time=1.509 +[gpua006:0/64] 2024-02-15 07:54:10,118 (trainer:756) INFO: 43epoch:train:12901-13000batch: iter_time=9.557e-05, forward_time=0.194, loss_ctc=75.594, loss_interctc_layer6=88.688, loss_interctc_layer12=72.908, loss_interctc_layer15=66.522, loss_interctc_layer21=78.524, loss=76.447, backward_time=0.244, grad_norm=78.304, clip=100.000, loss_scale=6.896e+31, optim_step_time=0.143, optim0_lr0=6.110e-05, train_time=1.390 +[gpua006:0/64] 2024-02-15 07:55:42,889 (trainer:687) WARNING: The grad norm is nan. Skipping updating the model. +[gpua006:0/64] 2024-02-15 07:56:41,161 (trainer:756) INFO: 43epoch:train:13001-13100batch: iter_time=9.522e-05, forward_time=0.202, loss_ctc=69.381, loss_interctc_layer6=85.655, loss_interctc_layer12=70.763, loss_interctc_layer15=64.693, loss_interctc_layer21=71.657, loss=72.430, backward_time=0.224, grad_norm=107.409, clip=100.000, loss_scale=6.392e+31, optim_step_time=0.142, optim0_lr0=6.110e-05, train_time=1.510 +[gpua006:0/64] 2024-02-15 07:59:26,438 (trainer:756) INFO: 43epoch:train:13101-13200batch: iter_time=9.246e-05, forward_time=0.144, loss_ctc=60.877, loss_interctc_layer6=75.511, loss_interctc_layer12=62.668, loss_interctc_layer15=57.435, loss_interctc_layer21=62.835, loss=63.865, backward_time=0.209, grad_norm=82.637, clip=100.000, loss_scale=4.056e+31, optim_step_time=0.139, optim0_lr0=6.110e-05, train_time=1.653 +[gpua006:0/64] 2024-02-15 08:02:05,044 (trainer:756) INFO: 43epoch:train:13201-13300batch: iter_time=9.396e-05, forward_time=0.147, loss_ctc=70.516, loss_interctc_layer6=78.840, loss_interctc_layer12=64.698, loss_interctc_layer15=58.977, loss_interctc_layer21=73.216, loss=69.249, backward_time=0.209, grad_norm=74.077, clip=100.000, loss_scale=4.056e+31, optim_step_time=0.139, optim0_lr0=6.109e-05, train_time=1.586 +[gpua006:0/64] 2024-02-15 08:04:15,362 (trainer:756) INFO: 43epoch:train:13301-13400batch: iter_time=9.341e-05, forward_time=0.143, loss_ctc=61.273, loss_interctc_layer6=71.139, loss_interctc_layer12=58.376, loss_interctc_layer15=53.171, loss_interctc_layer21=63.755, loss=61.543, backward_time=0.209, grad_norm=77.289, clip=100.000, loss_scale=4.056e+31, optim_step_time=0.139, optim0_lr0=6.109e-05, train_time=1.303 +[gpua006:0/64] 2024-02-15 08:06:18,052 (trainer:756) INFO: 43epoch:train:13401-13500batch: iter_time=9.719e-05, forward_time=0.143, loss_ctc=60.070, loss_interctc_layer6=69.011, loss_interctc_layer12=56.931, loss_interctc_layer15=52.049, loss_interctc_layer21=62.468, loss=60.106, backward_time=0.209, grad_norm=66.006, clip=100.000, loss_scale=4.056e+31, optim_step_time=0.139, optim0_lr0=6.108e-05, train_time=1.227 +[gpua006:0/64] 2024-02-15 08:08:55,666 (trainer:756) INFO: 43epoch:train:13501-13600batch: iter_time=9.863e-05, forward_time=0.172, loss_ctc=78.536, loss_interctc_layer6=81.100, loss_interctc_layer12=67.293, loss_interctc_layer15=61.717, loss_interctc_layer21=81.389, loss=74.007, backward_time=0.212, grad_norm=80.252, clip=100.000, loss_scale=4.056e+31, optim_step_time=0.143, optim0_lr0=6.108e-05, train_time=1.576 +[gpua006:0/64] 2024-02-15 08:11:01,519 (trainer:756) INFO: 43epoch:train:13601-13700batch: iter_time=9.787e-05, forward_time=0.176, loss_ctc=57.386, loss_interctc_layer6=69.899, loss_interctc_layer12=57.450, loss_interctc_layer15=52.523, loss_interctc_layer21=59.509, loss=59.353, backward_time=0.232, grad_norm=58.856, clip=100.000, loss_scale=4.056e+31, optim_step_time=0.140, optim0_lr0=6.107e-05, train_time=1.258 +[gpua006:0/64] 2024-02-15 08:13:05,705 (multiple_iter_factory:32) INFO: Building 11th iter-factory... +[gpua006:0/64] 2024-02-15 08:13:24,582 (s2t:401) INFO: Optional Data Names: ('text_prev', 'text_ctc', 'text_spk2', 'text_spk3', 'text_spk4') +[gpua006:0/64] 2024-02-15 08:13:28,046 (abs_task:1660) INFO: [train] dataset: +ESPnetDataset( + speech: {"path": "exp/s2t_stats_raw_bpe50000/splits12/wav.scp/split.6", "type": "kaldi_ark"} + text_prev: {"path": "exp/s2t_stats_raw_bpe50000/splits12/text.prev/split.6", "type": "text"} + text_ctc: {"path": "exp/s2t_stats_raw_bpe50000/splits12/text.ctc/split.6", "type": "text"} + text: {"path": "exp/s2t_stats_raw_bpe50000/splits12/text/split.6", "type": "text"} + preprocess: ) +[gpua006:0/64] 2024-02-15 08:13:28,046 (abs_task:1661) INFO: [train] Batch sampler: UnsortedBatchSampler(N-batch=19027, batch_size=256, key_file=exp/s2t_stats_raw_bpe50000/splits12/speech_shape/split.6, +[gpua006:0/64] 2024-02-15 08:13:28,050 (abs_task:1662) INFO: [train] mini-batch sizes summary: N-batch=19027, mean=256.0, min=256, max=257 +[gpua006:0/64] 2024-02-15 08:19:31,445 (trainer:756) INFO: 43epoch:train:13701-13800batch: iter_time=3.208, forward_time=0.184, loss_ctc=62.656, loss_interctc_layer6=73.065, loss_interctc_layer12=59.919, loss_interctc_layer15=54.726, loss_interctc_layer21=65.168, loss=63.107, backward_time=0.219, grad_norm=93.542, clip=100.000, loss_scale=4.056e+31, optim_step_time=0.140, optim0_lr0=6.107e-05, train_time=5.099 +[gpua006:0/64] 2024-02-15 08:21:05,475 (trainer:756) INFO: 43epoch:train:13801-13900batch: iter_time=7.972e-05, forward_time=0.143, loss_ctc=80.472, loss_interctc_layer6=85.612, loss_interctc_layer12=70.501, loss_interctc_layer15=64.505, loss_interctc_layer21=83.408, loss=76.900, backward_time=0.209, grad_norm=96.503, clip=100.000, loss_scale=4.056e+31, optim_step_time=0.138, optim0_lr0=6.106e-05, train_time=0.940 +[gpua006:0/64] 2024-02-15 08:22:47,639 (trainer:756) INFO: 43epoch:train:13901-14000batch: iter_time=8.839e-05, forward_time=0.185, loss_ctc=65.298, loss_interctc_layer6=76.752, loss_interctc_layer12=63.159, loss_interctc_layer15=57.674, loss_interctc_layer21=67.671, loss=66.111, backward_time=0.236, grad_norm=134.606, clip=100.000, loss_scale=4.056e+31, optim_step_time=0.140, optim0_lr0=6.106e-05, train_time=1.021 +[gpua006:0/64] 2024-02-15 08:25:25,187 (trainer:756) INFO: 43epoch:train:14001-14100batch: iter_time=8.488e-05, forward_time=0.167, loss_ctc=67.325, loss_interctc_layer6=78.123, loss_interctc_layer12=64.519, loss_interctc_layer15=58.940, loss_interctc_layer21=69.629, loss=67.707, backward_time=0.224, grad_norm=87.292, clip=100.000, loss_scale=4.056e+31, optim_step_time=0.141, optim0_lr0=6.105e-05, train_time=1.575 +[gpua006:0/64] 2024-02-15 08:27:24,318 (trainer:756) INFO: 43epoch:train:14101-14200batch: iter_time=8.817e-05, forward_time=0.162, loss_ctc=73.515, loss_interctc_layer6=83.065, loss_interctc_layer12=68.031, loss_interctc_layer15=62.028, loss_interctc_layer21=76.339, loss=72.596, backward_time=0.219, grad_norm=71.859, clip=100.000, loss_scale=4.056e+31, optim_step_time=0.139, optim0_lr0=6.105e-05, train_time=1.191 +[gpua006:0/64] 2024-02-15 08:29:43,325 (trainer:756) INFO: 43epoch:train:14201-14300batch: iter_time=8.778e-05, forward_time=0.143, loss_ctc=73.491, loss_interctc_layer6=90.906, loss_interctc_layer12=75.138, loss_interctc_layer15=68.920, loss_interctc_layer21=76.158, loss=76.923, backward_time=0.209, grad_norm=92.765, clip=100.000, loss_scale=4.056e+31, optim_step_time=0.138, optim0_lr0=6.104e-05, train_time=1.390 +[gpua006:0/64] 2024-02-15 08:32:18,892 (trainer:756) INFO: 43epoch:train:14301-14400batch: iter_time=8.571e-05, forward_time=0.142, loss_ctc=66.886, loss_interctc_layer6=77.699, loss_interctc_layer12=64.384, loss_interctc_layer15=58.924, loss_interctc_layer21=69.257, loss=67.430, backward_time=0.208, grad_norm=84.358, clip=100.000, loss_scale=4.056e+31, optim_step_time=0.138, optim0_lr0=6.104e-05, train_time=1.555 +[gpua006:0/64] 2024-02-15 08:35:13,670 (trainer:756) INFO: 43epoch:train:14401-14500batch: iter_time=8.560e-05, forward_time=0.170, loss_ctc=68.316, loss_interctc_layer6=78.316, loss_interctc_layer12=64.481, loss_interctc_layer15=58.857, loss_interctc_layer21=70.667, loss=68.127, backward_time=0.227, grad_norm=94.539, clip=100.000, loss_scale=4.056e+31, optim_step_time=0.139, optim0_lr0=6.103e-05, train_time=1.748 +[gpua006:0/64] 2024-02-15 08:35:44,900 (trainer:687) WARNING: The grad norm is nan. Skipping updating the model. +[gpua006:0/64] 2024-02-15 08:37:10,228 (trainer:756) INFO: 43epoch:train:14501-14600batch: iter_time=9.184e-05, forward_time=0.190, loss_ctc=68.983, loss_interctc_layer6=74.550, loss_interctc_layer12=60.769, loss_interctc_layer15=55.242, loss_interctc_layer21=71.611, loss=66.231, backward_time=0.223, grad_norm=62.299, clip=100.000, loss_scale=2.438e+31, optim_step_time=0.142, optim0_lr0=6.103e-05, train_time=1.165 +[gpua006:0/64] 2024-02-15 08:39:26,053 (trainer:756) INFO: 43epoch:train:14601-14700batch: iter_time=8.847e-05, forward_time=0.164, loss_ctc=58.464, loss_interctc_layer6=65.674, loss_interctc_layer12=54.222, loss_interctc_layer15=49.779, loss_interctc_layer21=60.785, loss=57.785, backward_time=0.209, grad_norm=86.618, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.140, optim0_lr0=6.102e-05, train_time=1.358 +[gpua006:0/64] 2024-02-15 08:41:37,014 (trainer:756) INFO: 43epoch:train:14701-14800batch: iter_time=8.501e-05, forward_time=0.143, loss_ctc=76.382, loss_interctc_layer6=79.511, loss_interctc_layer12=65.707, loss_interctc_layer15=60.205, loss_interctc_layer21=79.418, loss=72.245, backward_time=0.208, grad_norm=86.278, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.139, optim0_lr0=6.102e-05, train_time=1.309 +[gpua006:0/64] 2024-02-15 08:44:02,715 (trainer:756) INFO: 43epoch:train:14801-14900batch: iter_time=8.669e-05, forward_time=0.158, loss_ctc=67.448, loss_interctc_layer6=72.987, loss_interctc_layer12=60.328, loss_interctc_layer15=55.325, loss_interctc_layer21=69.460, loss=65.110, backward_time=0.218, grad_norm=78.687, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.140, optim0_lr0=6.101e-05, train_time=1.457 +[gpua006:0/64] 2024-02-15 08:46:40,629 (trainer:756) INFO: 43epoch:train:14901-15000batch: iter_time=8.734e-05, forward_time=0.143, loss_ctc=64.713, loss_interctc_layer6=73.743, loss_interctc_layer12=60.558, loss_interctc_layer15=55.174, loss_interctc_layer21=67.071, loss=64.252, backward_time=0.208, grad_norm=69.042, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.139, optim0_lr0=6.101e-05, train_time=1.579 +[gpua006:0/64] 2024-02-15 09:19:27,461 (trainer:355) INFO: 43epoch results: [train] iter_time=0.254, forward_time=0.159, loss_ctc=68.302, loss_interctc_layer6=78.431, loss_interctc_layer12=64.728, loss_interctc_layer15=59.217, loss_interctc_layer21=70.756, loss=68.287, backward_time=0.216, grad_norm=85.495, clip=100.000, loss_scale=1.782e+31, optim_step_time=0.139, optim0_lr0=6.137e-05, train_time=1.600, time=6 hours, 40 minutes and 25.3 seconds, total_count=645000, gpu_max_cached_mem_GB=33.436, [valid] loss_ctc=40.212, cer_ctc=0.187, loss_interctc_layer6=45.885, cer_interctc_layer6=0.203, loss_interctc_layer12=33.292, cer_interctc_layer12=0.139, loss_interctc_layer15=29.212, cer_interctc_layer15=0.116, loss_interctc_layer21=42.652, cer_interctc_layer21=0.198, loss=38.251, time=32 minutes and 22.75 seconds, total_count=200853, gpu_max_cached_mem_GB=33.436 +[gpua006:0/64] 2024-02-15 09:19:47,545 (trainer:410) INFO: The best model has been updated: valid.total_count +[gpua006:0/64] 2024-02-15 09:19:47,632 (trainer:289) INFO: 44/45epoch started. Estimated time to finish: 14 hours, 23 minutes and 32.17 seconds +[gpua006:0/64] 2024-02-15 09:19:47,651 (multiple_iter_factory:32) INFO: Building 0th iter-factory... +[gpua006:0/64] 2024-02-15 09:20:05,577 (s2t:401) INFO: Optional Data Names: ('text_prev', 'text_ctc', 'text_spk2', 'text_spk3', 'text_spk4') +[gpua006:0/64] 2024-02-15 09:20:08,923 (abs_task:1660) INFO: [train] dataset: +ESPnetDataset( + speech: {"path": "exp/s2t_stats_raw_bpe50000/splits12/wav.scp/split.9", "type": "kaldi_ark"} + text_prev: {"path": "exp/s2t_stats_raw_bpe50000/splits12/text.prev/split.9", "type": "text"} + text_ctc: {"path": "exp/s2t_stats_raw_bpe50000/splits12/text.ctc/split.9", "type": "text"} + text: {"path": "exp/s2t_stats_raw_bpe50000/splits12/text/split.9", "type": "text"} + preprocess: ) +[gpua006:0/64] 2024-02-15 09:20:08,923 (abs_task:1661) INFO: [train] Batch sampler: UnsortedBatchSampler(N-batch=19027, batch_size=256, key_file=exp/s2t_stats_raw_bpe50000/splits12/speech_shape/split.9, +[gpua006:0/64] 2024-02-15 09:20:08,926 (abs_task:1662) INFO: [train] mini-batch sizes summary: N-batch=19027, mean=256.0, min=256, max=257 +[gpua006:0/64] 2024-02-15 09:26:34,234 (trainer:756) INFO: 44epoch:train:1-100batch: iter_time=2.708, forward_time=0.183, loss_ctc=77.634, loss_interctc_layer6=82.759, loss_interctc_layer12=68.578, loss_interctc_layer15=62.920, loss_interctc_layer21=80.777, loss=74.533, backward_time=0.220, grad_norm=77.508, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.142, optim0_lr0=6.100e-05, train_time=4.065 +[gpua006:0/64] 2024-02-15 09:28:09,873 (trainer:756) INFO: 44epoch:train:101-200batch: iter_time=9.650e-05, forward_time=0.141, loss_ctc=56.285, loss_interctc_layer6=67.481, loss_interctc_layer12=55.657, loss_interctc_layer15=50.911, loss_interctc_layer21=58.459, loss=57.758, backward_time=0.208, grad_norm=225.145, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.139, optim0_lr0=6.100e-05, train_time=0.957 +[gpua006:0/64] 2024-02-15 09:30:12,180 (trainer:756) INFO: 44epoch:train:201-300batch: iter_time=3.802e-04, forward_time=0.280, loss_ctc=63.433, loss_interctc_layer6=70.136, loss_interctc_layer12=57.925, loss_interctc_layer15=52.978, loss_interctc_layer21=65.663, loss=62.027, backward_time=0.290, grad_norm=66.814, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.147, optim0_lr0=6.100e-05, train_time=1.221 +[gpua006:0/64] 2024-02-15 09:30:21,881 (trainer:687) WARNING: The grad norm is nan. Skipping updating the model. +[gpua006:0/64] 2024-02-15 09:31:47,615 (trainer:756) INFO: 44epoch:train:301-400batch: iter_time=1.031e-04, forward_time=0.143, loss_ctc=83.750, loss_interctc_layer6=87.694, loss_interctc_layer12=72.805, loss_interctc_layer15=66.694, loss_interctc_layer21=86.749, loss=79.538, backward_time=0.209, grad_norm=86.668, clip=100.000, loss_scale=1.096e+31, optim_step_time=0.137, optim0_lr0=6.099e-05, train_time=0.956 +[gpua006:0/64] 2024-02-15 09:34:13,687 (trainer:756) INFO: 44epoch:train:401-500batch: iter_time=1.080e-04, forward_time=0.142, loss_ctc=72.036, loss_interctc_layer6=74.782, loss_interctc_layer12=61.802, loss_interctc_layer15=56.494, loss_interctc_layer21=74.877, loss=67.998, backward_time=0.207, grad_norm=86.313, clip=100.000, loss_scale=1.014e+31, optim_step_time=0.138, optim0_lr0=6.099e-05, train_time=1.460 +[gpua006:0/64] 2024-02-15 09:36:05,010 (trainer:756) INFO: 44epoch:train:501-600batch: iter_time=1.027e-04, forward_time=0.141, loss_ctc=75.509, loss_interctc_layer6=81.772, loss_interctc_layer12=68.288, loss_interctc_layer15=62.879, loss_interctc_layer21=78.419, loss=73.373, backward_time=0.206, grad_norm=86.640, clip=100.000, loss_scale=1.014e+31, optim_step_time=0.137, optim0_lr0=6.098e-05, train_time=1.113 +[gpua006:0/64] 2024-02-15 09:38:20,012 (trainer:756) INFO: 44epoch:train:601-700batch: iter_time=9.836e-05, forward_time=0.143, loss_ctc=84.275, loss_interctc_layer6=94.667, loss_interctc_layer12=78.727, loss_interctc_layer15=72.431, loss_interctc_layer21=87.114, loss=83.443, backward_time=0.206, grad_norm=83.936, clip=100.000, loss_scale=1.014e+31, optim_step_time=0.137, optim0_lr0=6.098e-05, train_time=1.350 +[gpua006:0/64] 2024-02-15 09:40:44,189 (trainer:756) INFO: 44epoch:train:701-800batch: iter_time=6.955e-04, forward_time=0.242, loss_ctc=72.003, loss_interctc_layer6=88.599, loss_interctc_layer12=73.802, loss_interctc_layer15=68.325, loss_interctc_layer21=74.531, loss=75.452, backward_time=0.281, grad_norm=91.316, clip=100.000, loss_scale=1.014e+31, optim_step_time=0.151, optim0_lr0=6.097e-05, train_time=1.440 +[gpua006:0/64] 2024-02-15 09:42:46,722 (trainer:756) INFO: 44epoch:train:801-900batch: iter_time=9.510e-05, forward_time=0.143, loss_ctc=61.342, loss_interctc_layer6=74.789, loss_interctc_layer12=61.730, loss_interctc_layer15=56.616, loss_interctc_layer21=63.514, loss=63.598, backward_time=0.206, grad_norm=77.887, clip=100.000, loss_scale=1.014e+31, optim_step_time=0.138, optim0_lr0=6.097e-05, train_time=1.227 +[gpua006:0/64] 2024-02-15 09:44:59,746 (trainer:756) INFO: 44epoch:train:901-1000batch: iter_time=9.190e-05, forward_time=0.142, loss_ctc=81.914, loss_interctc_layer6=87.631, loss_interctc_layer12=72.955, loss_interctc_layer15=67.088, loss_interctc_layer21=84.970, loss=78.912, backward_time=0.205, grad_norm=81.784, clip=100.000, loss_scale=1.014e+31, optim_step_time=0.138, optim0_lr0=6.096e-05, train_time=1.330 +[gpua006:0/64] 2024-02-15 09:47:06,494 (trainer:756) INFO: 44epoch:train:1001-1100batch: iter_time=8.965e-05, forward_time=0.142, loss_ctc=79.491, loss_interctc_layer6=90.534, loss_interctc_layer12=75.991, loss_interctc_layer15=70.323, loss_interctc_layer21=82.499, loss=79.767, backward_time=0.206, grad_norm=108.245, clip=100.000, loss_scale=1.014e+31, optim_step_time=0.137, optim0_lr0=6.096e-05, train_time=1.268 +[gpua006:0/64] 2024-02-15 09:49:40,081 (trainer:756) INFO: 44epoch:train:1101-1200batch: iter_time=3.980e-04, forward_time=0.280, loss_ctc=71.262, loss_interctc_layer6=78.790, loss_interctc_layer12=65.577, loss_interctc_layer15=60.213, loss_interctc_layer21=73.972, loss=69.963, backward_time=0.257, grad_norm=80.323, clip=100.000, loss_scale=1.014e+31, optim_step_time=0.150, optim0_lr0=6.095e-05, train_time=1.536 +[gpua006:0/64] 2024-02-15 09:50:58,757 (multiple_iter_factory:32) INFO: Building 1th iter-factory... +[gpua006:0/64] 2024-02-15 09:51:17,571 (s2t:401) INFO: Optional Data Names: ('text_prev', 'text_ctc', 'text_spk2', 'text_spk3', 'text_spk4') +[gpua006:0/64] 2024-02-15 09:51:21,212 (abs_task:1660) INFO: [train] dataset: +ESPnetDataset( + speech: {"path": "exp/s2t_stats_raw_bpe50000/splits12/wav.scp/split.1", "type": "kaldi_ark"} + text_prev: {"path": "exp/s2t_stats_raw_bpe50000/splits12/text.prev/split.1", "type": "text"} + text_ctc: {"path": "exp/s2t_stats_raw_bpe50000/splits12/text.ctc/split.1", "type": "text"} + text: {"path": "exp/s2t_stats_raw_bpe50000/splits12/text/split.1", "type": "text"} + preprocess: ) +[gpua006:0/64] 2024-02-15 09:51:21,212 (abs_task:1661) INFO: [train] Batch sampler: UnsortedBatchSampler(N-batch=19027, batch_size=256, key_file=exp/s2t_stats_raw_bpe50000/splits12/speech_shape/split.1, +[gpua006:0/64] 2024-02-15 09:51:21,215 (abs_task:1662) INFO: [train] mini-batch sizes summary: N-batch=19027, mean=256.0, min=256, max=257 +[gpua006:0/64] 2024-02-15 09:56:33,500 (trainer:756) INFO: 44epoch:train:1201-1300batch: iter_time=2.941, forward_time=0.142, loss_ctc=74.335, loss_interctc_layer6=85.030, loss_interctc_layer12=70.251, loss_interctc_layer15=64.351, loss_interctc_layer21=77.000, loss=74.193, backward_time=0.206, grad_norm=103.885, clip=100.000, loss_scale=1.014e+31, optim_step_time=0.138, optim0_lr0=6.095e-05, train_time=4.134 +[gpua006:0/64] 2024-02-15 09:58:29,049 (trainer:756) INFO: 44epoch:train:1301-1400batch: iter_time=8.761e-05, forward_time=0.142, loss_ctc=66.133, loss_interctc_layer6=72.987, loss_interctc_layer12=60.305, loss_interctc_layer15=55.279, loss_interctc_layer21=68.606, loss=64.662, backward_time=0.207, grad_norm=122.171, clip=100.000, loss_scale=1.014e+31, optim_step_time=0.138, optim0_lr0=6.094e-05, train_time=1.155 +[gpua006:0/64] 2024-02-15 10:00:15,775 (trainer:756) INFO: 44epoch:train:1401-1500batch: iter_time=9.453e-05, forward_time=0.141, loss_ctc=62.126, loss_interctc_layer6=70.812, loss_interctc_layer12=58.540, loss_interctc_layer15=53.596, loss_interctc_layer21=64.318, loss=61.878, backward_time=0.207, grad_norm=86.717, clip=100.000, loss_scale=1.014e+31, optim_step_time=0.138, optim0_lr0=6.094e-05, train_time=1.066 +[gpua006:0/64] 2024-02-15 10:02:08,666 (trainer:756) INFO: 44epoch:train:1501-1600batch: iter_time=9.377e-05, forward_time=0.141, loss_ctc=58.164, loss_interctc_layer6=69.414, loss_interctc_layer12=57.305, loss_interctc_layer15=52.457, loss_interctc_layer21=60.131, loss=59.494, backward_time=0.207, grad_norm=85.828, clip=100.000, loss_scale=1.014e+31, optim_step_time=0.138, optim0_lr0=6.093e-05, train_time=1.129 +[gpua006:0/64] 2024-02-15 10:04:58,067 (trainer:756) INFO: 44epoch:train:1601-1700batch: iter_time=3.349e-04, forward_time=0.268, loss_ctc=97.147, loss_interctc_layer6=92.159, loss_interctc_layer12=76.026, loss_interctc_layer15=69.560, loss_interctc_layer21=100.989, loss=87.176, backward_time=0.235, grad_norm=79.610, clip=100.000, loss_scale=1.014e+31, optim_step_time=0.166, optim0_lr0=6.093e-05, train_time=1.692 +[gpua006:0/64] 2024-02-15 10:07:12,258 (trainer:756) INFO: 44epoch:train:1701-1800batch: iter_time=1.160e-04, forward_time=0.144, loss_ctc=64.532, loss_interctc_layer6=71.773, loss_interctc_layer12=58.958, loss_interctc_layer15=53.900, loss_interctc_layer21=67.033, loss=63.239, backward_time=0.208, grad_norm=65.881, clip=100.000, loss_scale=1.014e+31, optim_step_time=0.139, optim0_lr0=6.092e-05, train_time=1.343 +[gpua006:0/64] 2024-02-15 10:09:15,250 (trainer:756) INFO: 44epoch:train:1801-1900batch: iter_time=9.277e-05, forward_time=0.142, loss_ctc=77.346, loss_interctc_layer6=87.375, loss_interctc_layer12=72.798, loss_interctc_layer15=67.033, loss_interctc_layer21=80.395, loss=76.990, backward_time=0.205, grad_norm=79.665, clip=100.000, loss_scale=1.014e+31, optim_step_time=0.138, optim0_lr0=6.092e-05, train_time=1.229 +[gpua006:0/64] 2024-02-15 10:11:12,192 (trainer:756) INFO: 44epoch:train:1901-2000batch: iter_time=8.839e-05, forward_time=0.143, loss_ctc=80.738, loss_interctc_layer6=96.610, loss_interctc_layer12=80.613, loss_interctc_layer15=74.359, loss_interctc_layer21=83.480, loss=83.160, backward_time=0.207, grad_norm=108.961, clip=100.000, loss_scale=1.014e+31, optim_step_time=0.138, optim0_lr0=6.092e-05, train_time=1.170 +[gpua006:0/64] 2024-02-15 10:13:11,644 (trainer:756) INFO: 44epoch:train:2001-2100batch: iter_time=9.328e-05, forward_time=0.143, loss_ctc=61.435, loss_interctc_layer6=74.387, loss_interctc_layer12=61.334, loss_interctc_layer15=56.118, loss_interctc_layer21=63.606, loss=63.376, backward_time=0.208, grad_norm=74.889, clip=100.000, loss_scale=1.014e+31, optim_step_time=0.138, optim0_lr0=6.091e-05, train_time=1.194 +[gpua006:0/64] 2024-02-15 10:15:26,870 (trainer:756) INFO: 44epoch:train:2101-2200batch: iter_time=9.571e-05, forward_time=0.144, loss_ctc=72.654, loss_interctc_layer6=83.287, loss_interctc_layer12=69.218, loss_interctc_layer15=63.724, loss_interctc_layer21=75.368, loss=72.850, backward_time=0.207, grad_norm=81.610, clip=100.000, loss_scale=1.014e+31, optim_step_time=0.138, optim0_lr0=6.091e-05, train_time=1.352 +[gpua006:0/64] 2024-02-15 10:18:19,011 (trainer:756) INFO: 44epoch:train:2201-2300batch: iter_time=8.756e-05, forward_time=0.143, loss_ctc=79.780, loss_interctc_layer6=84.069, loss_interctc_layer12=69.821, loss_interctc_layer15=63.973, loss_interctc_layer21=82.915, loss=76.112, backward_time=0.208, grad_norm=134.447, clip=100.000, loss_scale=1.014e+31, optim_step_time=0.139, optim0_lr0=6.090e-05, train_time=1.721 +[gpua006:0/64] 2024-02-15 10:21:07,978 (trainer:756) INFO: 44epoch:train:2301-2400batch: iter_time=9.003e-05, forward_time=0.215, loss_ctc=74.122, loss_interctc_layer6=83.755, loss_interctc_layer12=69.977, loss_interctc_layer15=64.478, loss_interctc_layer21=76.778, loss=73.822, backward_time=0.292, grad_norm=78.012, clip=100.000, loss_scale=1.937e+31, optim_step_time=0.142, optim0_lr0=6.090e-05, train_time=1.686 +[gpua006:0/64] 2024-02-15 10:23:28,297 (trainer:756) INFO: 44epoch:train:2401-2500batch: iter_time=8.081e-05, forward_time=0.145, loss_ctc=70.965, loss_interctc_layer6=82.973, loss_interctc_layer12=68.963, loss_interctc_layer15=63.352, loss_interctc_layer21=73.620, loss=71.975, backward_time=0.208, grad_norm=73.331, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.138, optim0_lr0=6.089e-05, train_time=1.406 +[gpua006:0/64] 2024-02-15 10:23:48,328 (multiple_iter_factory:32) INFO: Building 2th iter-factory... +[gpua006:0/64] 2024-02-15 10:24:07,360 (s2t:401) INFO: Optional Data Names: ('text_prev', 'text_ctc', 'text_spk2', 'text_spk3', 'text_spk4') +[gpua006:0/64] 2024-02-15 10:24:10,802 (abs_task:1660) INFO: [train] dataset: +ESPnetDataset( + speech: {"path": "exp/s2t_stats_raw_bpe50000/splits12/wav.scp/split.11", "type": "kaldi_ark"} + text_prev: {"path": "exp/s2t_stats_raw_bpe50000/splits12/text.prev/split.11", "type": "text"} + text_ctc: {"path": "exp/s2t_stats_raw_bpe50000/splits12/text.ctc/split.11", "type": "text"} + text: {"path": "exp/s2t_stats_raw_bpe50000/splits12/text/split.11", "type": "text"} + preprocess: ) +[gpua006:0/64] 2024-02-15 10:24:10,802 (abs_task:1661) INFO: [train] Batch sampler: UnsortedBatchSampler(N-batch=19027, batch_size=256, key_file=exp/s2t_stats_raw_bpe50000/splits12/speech_shape/split.11, +[gpua006:0/64] 2024-02-15 10:24:10,813 (abs_task:1662) INFO: [train] mini-batch sizes summary: N-batch=19027, mean=256.0, min=256, max=257 +[gpua006:0/64] 2024-02-15 10:32:12,464 (trainer:756) INFO: 44epoch:train:2501-2600batch: iter_time=3.641, forward_time=0.144, loss_ctc=76.482, loss_interctc_layer6=81.703, loss_interctc_layer12=67.578, loss_interctc_layer15=61.873, loss_interctc_layer21=79.506, loss=73.429, backward_time=0.207, grad_norm=72.969, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.137, optim0_lr0=6.089e-05, train_time=5.240 +[gpua006:0/64] 2024-02-15 10:34:40,861 (trainer:756) INFO: 44epoch:train:2601-2700batch: iter_time=8.971e-05, forward_time=0.142, loss_ctc=55.469, loss_interctc_layer6=66.842, loss_interctc_layer12=54.931, loss_interctc_layer15=50.244, loss_interctc_layer21=57.412, loss=56.980, backward_time=0.208, grad_norm=58.430, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.138, optim0_lr0=6.088e-05, train_time=1.485 +[gpua006:0/64] 2024-02-15 10:37:02,825 (trainer:756) INFO: 44epoch:train:2701-2800batch: iter_time=1.009e-04, forward_time=0.143, loss_ctc=63.297, loss_interctc_layer6=70.216, loss_interctc_layer12=57.886, loss_interctc_layer15=52.967, loss_interctc_layer21=65.561, loss=61.985, backward_time=0.208, grad_norm=69.757, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.138, optim0_lr0=6.088e-05, train_time=1.419 +[gpua006:0/64] 2024-02-15 10:39:59,823 (trainer:756) INFO: 44epoch:train:2801-2900batch: iter_time=1.029e-04, forward_time=0.143, loss_ctc=81.267, loss_interctc_layer6=86.948, loss_interctc_layer12=71.852, loss_interctc_layer15=65.954, loss_interctc_layer21=84.325, loss=78.069, backward_time=0.208, grad_norm=82.667, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.137, optim0_lr0=6.087e-05, train_time=1.770 +[gpua006:0/64] 2024-02-15 10:42:29,224 (trainer:756) INFO: 44epoch:train:2901-3000batch: iter_time=1.074e-04, forward_time=0.143, loss_ctc=69.721, loss_interctc_layer6=74.033, loss_interctc_layer12=61.042, loss_interctc_layer15=55.892, loss_interctc_layer21=72.480, loss=66.634, backward_time=0.207, grad_norm=75.206, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.138, optim0_lr0=6.087e-05, train_time=1.493 +[gpua006:0/64] 2024-02-15 10:45:37,677 (trainer:756) INFO: 44epoch:train:3001-3100batch: iter_time=1.145e-04, forward_time=0.142, loss_ctc=73.336, loss_interctc_layer6=79.669, loss_interctc_layer12=66.154, loss_interctc_layer15=60.779, loss_interctc_layer21=76.257, loss=71.239, backward_time=0.227, grad_norm=69.876, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.141, optim0_lr0=6.086e-05, train_time=1.885 +[gpua006:0/64] 2024-02-15 10:48:37,450 (trainer:756) INFO: 44epoch:train:3101-3200batch: iter_time=1.103e-04, forward_time=0.331, loss_ctc=84.247, loss_interctc_layer6=93.512, loss_interctc_layer12=77.452, loss_interctc_layer15=71.060, loss_interctc_layer21=87.382, loss=82.731, backward_time=0.320, grad_norm=71.658, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.146, optim0_lr0=6.086e-05, train_time=1.795 +[gpua006:0/64] 2024-02-15 10:51:59,411 (trainer:756) INFO: 44epoch:train:3201-3300batch: iter_time=1.062e-04, forward_time=0.143, loss_ctc=70.151, loss_interctc_layer6=86.252, loss_interctc_layer12=71.696, loss_interctc_layer15=65.897, loss_interctc_layer21=72.654, loss=73.330, backward_time=0.209, grad_norm=116.488, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.139, optim0_lr0=6.085e-05, train_time=2.022 +[gpua006:0/64] 2024-02-15 10:54:55,408 (trainer:756) INFO: 44epoch:train:3301-3400batch: iter_time=1.092e-04, forward_time=0.143, loss_ctc=59.716, loss_interctc_layer6=73.643, loss_interctc_layer12=60.640, loss_interctc_layer15=55.488, loss_interctc_layer21=61.837, loss=62.265, backward_time=0.208, grad_norm=86.081, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.139, optim0_lr0=6.085e-05, train_time=1.758 +[gpua006:0/64] 2024-02-15 10:58:15,940 (trainer:756) INFO: 44epoch:train:3401-3500batch: iter_time=1.048e-04, forward_time=0.144, loss_ctc=80.048, loss_interctc_layer6=85.667, loss_interctc_layer12=70.938, loss_interctc_layer15=65.184, loss_interctc_layer21=82.897, loss=76.947, backward_time=0.208, grad_norm=75.580, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.139, optim0_lr0=6.084e-05, train_time=2.007 +[gpua006:0/64] 2024-02-15 11:00:49,025 (trainer:756) INFO: 44epoch:train:3501-3600batch: iter_time=1.128e-04, forward_time=0.143, loss_ctc=79.114, loss_interctc_layer6=89.694, loss_interctc_layer12=75.392, loss_interctc_layer15=69.596, loss_interctc_layer21=81.909, loss=79.141, backward_time=0.209, grad_norm=83.173, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.139, optim0_lr0=6.084e-05, train_time=1.531 +[gpua006:0/64] 2024-02-15 11:03:13,795 (trainer:756) INFO: 44epoch:train:3601-3700batch: iter_time=1.067e-04, forward_time=0.145, loss_ctc=69.951, loss_interctc_layer6=78.325, loss_interctc_layer12=65.014, loss_interctc_layer15=59.592, loss_interctc_layer21=72.622, loss=69.101, backward_time=0.208, grad_norm=72.529, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.139, optim0_lr0=6.084e-05, train_time=1.447 +[gpua006:0/64] 2024-02-15 11:04:53,648 (multiple_iter_factory:32) INFO: Building 3th iter-factory... +[gpua006:0/64] 2024-02-15 11:05:12,294 (s2t:401) INFO: Optional Data Names: ('text_prev', 'text_ctc', 'text_spk2', 'text_spk3', 'text_spk4') +[gpua006:0/64] 2024-02-15 11:05:15,691 (abs_task:1660) INFO: [train] dataset: +ESPnetDataset( + speech: {"path": "exp/s2t_stats_raw_bpe50000/splits12/wav.scp/split.5", "type": "kaldi_ark"} + text_prev: {"path": "exp/s2t_stats_raw_bpe50000/splits12/text.prev/split.5", "type": "text"} + text_ctc: {"path": "exp/s2t_stats_raw_bpe50000/splits12/text.ctc/split.5", "type": "text"} + text: {"path": "exp/s2t_stats_raw_bpe50000/splits12/text/split.5", "type": "text"} + preprocess: ) +[gpua006:0/64] 2024-02-15 11:05:15,691 (abs_task:1661) INFO: [train] Batch sampler: UnsortedBatchSampler(N-batch=19027, batch_size=256, key_file=exp/s2t_stats_raw_bpe50000/splits12/speech_shape/split.5, +[gpua006:0/64] 2024-02-15 11:05:15,783 (abs_task:1662) INFO: [train] mini-batch sizes summary: N-batch=19027, mean=256.0, min=256, max=257 +[gpua006:0/64] 2024-02-15 11:15:28,642 (trainer:756) INFO: 44epoch:train:3701-3800batch: iter_time=5.571, forward_time=0.216, loss_ctc=73.940, loss_interctc_layer6=84.330, loss_interctc_layer12=69.636, loss_interctc_layer15=63.855, loss_interctc_layer21=76.722, loss=73.697, backward_time=0.239, grad_norm=89.647, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.142, optim0_lr0=6.083e-05, train_time=7.348 +[gpua006:0/64] 2024-02-15 11:17:15,048 (trainer:756) INFO: 44epoch:train:3801-3900batch: iter_time=8.877e-05, forward_time=0.158, loss_ctc=65.901, loss_interctc_layer6=72.760, loss_interctc_layer12=59.931, loss_interctc_layer15=54.895, loss_interctc_layer21=68.386, loss=64.375, backward_time=0.219, grad_norm=66.076, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.142, optim0_lr0=6.083e-05, train_time=1.064 +[gpua006:0/64] 2024-02-15 11:18:56,845 (trainer:756) INFO: 44epoch:train:3901-4000batch: iter_time=8.731e-05, forward_time=0.144, loss_ctc=61.496, loss_interctc_layer6=69.795, loss_interctc_layer12=57.591, loss_interctc_layer15=52.750, loss_interctc_layer21=63.752, loss=61.077, backward_time=0.210, grad_norm=71.413, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.145, optim0_lr0=6.082e-05, train_time=1.018 +[gpua006:0/64] 2024-02-15 11:21:57,707 (trainer:756) INFO: 44epoch:train:4001-4100batch: iter_time=0.002, forward_time=0.293, loss_ctc=57.337, loss_interctc_layer6=68.787, loss_interctc_layer12=56.707, loss_interctc_layer15=51.853, loss_interctc_layer21=59.425, loss=58.822, backward_time=0.488, grad_norm=75.194, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.157, optim0_lr0=6.082e-05, train_time=1.807 +[gpua006:0/64] 2024-02-15 11:25:03,749 (trainer:756) INFO: 44epoch:train:4101-4200batch: iter_time=9.629e-05, forward_time=0.146, loss_ctc=97.266, loss_interctc_layer6=91.456, loss_interctc_layer12=75.342, loss_interctc_layer15=68.980, loss_interctc_layer21=101.057, loss=86.820, backward_time=0.209, grad_norm=99.745, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.139, optim0_lr0=6.081e-05, train_time=1.862 +[gpua006:0/64] 2024-02-15 11:29:29,995 (trainer:756) INFO: 44epoch:train:4201-4300batch: iter_time=1.016e-04, forward_time=0.142, loss_ctc=64.508, loss_interctc_layer6=71.820, loss_interctc_layer12=59.249, loss_interctc_layer15=54.136, loss_interctc_layer21=67.107, loss=63.364, backward_time=0.206, grad_norm=87.387, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.138, optim0_lr0=6.081e-05, train_time=2.662 +[gpua006:0/64] 2024-02-15 11:34:23,370 (trainer:756) INFO: 44epoch:train:4301-4400batch: iter_time=0.003, forward_time=0.295, loss_ctc=77.120, loss_interctc_layer6=86.206, loss_interctc_layer12=71.819, loss_interctc_layer15=66.123, loss_interctc_layer21=80.205, loss=76.295, backward_time=0.513, grad_norm=99.411, clip=100.000, loss_scale=3.874e+31, optim_step_time=0.152, optim0_lr0=6.080e-05, train_time=2.932 +srun: Job step aborted: Waiting up to 32 seconds for job step to finish. +slurmstepd: error: *** STEP 2984114.0 ON gpua006 CANCELLED AT 2024-02-15T11:36:38 ***