diff --git "a/exp/s2t_train_s2t_multitask-ctc_ebf27_conv2d8_size1024_raw_bpe50000/train.17.log" "b/exp/s2t_train_s2t_multitask-ctc_ebf27_conv2d8_size1024_raw_bpe50000/train.17.log" new file mode 100644--- /dev/null +++ "b/exp/s2t_train_s2t_multitask-ctc_ebf27_conv2d8_size1024_raw_bpe50000/train.17.log" @@ -0,0 +1,3374 @@ +# Running on gpua007.delta.ncsa.illinois.edu +# Started at Fri Feb 2 19:11:44 CST 2024 +# SLURMD_NODENAME=gpua007 +# SLURM_CLUSTER_NAME=delta +# SLURM_CONF=/var/spool/slurmd/conf-cache/slurm.conf +# SLURM_CPUS_ON_NODE=64 +# SLURM_CPUS_PER_TASK=64 +# SLURM_EXPORT_ENV=PATH +# SLURM_GET_USER_ENV=1 +# SLURM_GPUS_ON_NODE=4 +# SLURM_GTIDS=0 +# SLURM_JOBID=2915965 +# SLURM_JOB_ACCOUNT=bbjs-delta-gpu +# SLURM_JOB_CPUS_PER_NODE='64(x16)' +# SLURM_JOB_END_TIME=1707095491 +# SLURM_JOB_GID=202 +# SLURM_JOB_GPUS=0,1,2,3 +# SLURM_JOB_ID=2915965 +# SLURM_JOB_NAME=exp/s2t_train_s2t_multitask-ctc_ebf27_conv2d8_size1024_raw_bpe50000/train.log +# SLURM_JOB_NODELIST='gpua[007-008,010-012,014,020,038,042,061,065,072,083-084,093,095]' +# SLURM_JOB_NUM_NODES=16 +# SLURM_JOB_PARTITION=gpuA100x4 +# SLURM_JOB_QOS=bbjs-delta-gpu +# SLURM_JOB_START_TIME=1706922691 +# SLURM_JOB_UID=68077 +# SLURM_JOB_USER=peng6 +# SLURM_LOCALID=0 +# SLURM_MEM_PER_NODE=240000 +# SLURM_MPI_TYPE=pmi2 +# SLURM_NNODES=16 +# SLURM_NODEID=0 +# SLURM_NODELIST='gpua[007-008,010-012,014,020,038,042,061,065,072,083-084,093,095]' +# SLURM_NODE_ALIASES='(null)' +# SLURM_OPEN_MODE=a +# SLURM_PRIO_PROCESS=0 +# SLURM_PROCID=0 +# SLURM_SUBMIT_DIR=/scratch/bbjs/peng6/espnet-owsm-ctc/egs2/owsm_v3.1_ctc/s2t1 +# SLURM_SUBMIT_HOST=dt-login03.delta.ncsa.illinois.edu +# SLURM_TASKS_PER_NODE='1(x16)' +# SLURM_TASK_PID=2608704 +# SLURM_TOPOLOGY_ADDR=ss00.ss05.gpua007 +# SLURM_TOPOLOGY_ADDR_PATTERN=switch.switch.node +# SLURM_WORKING_CLUSTER=delta:dt-sched:6817:9984:109 +# srun --export=ALL python3 -m espnet2.bin.s2t_train --use_preprocessor true --bpemodel data/token_list/bpe_unigram50000/bpe.model --token_type bpe --token_list data/token_list/bpe_unigram50000/tokens.txt --non_linguistic_symbols none --cleaner none --g2p none --valid_data_path_and_name_and_type dump/raw/dev_v3/wav.scp,speech,kaldi_ark --valid_shape_file exp/s2t_stats_raw_bpe50000/valid/speech_shape --resume true --fold_length 80000 --output_dir exp/s2t_train_s2t_multitask-ctc_ebf27_conv2d8_size1024_raw_bpe50000 --config conf/train_s2t_multitask-ctc_ebf27_conv2d8_size1024.yaml --frontend_conf fs=16k --normalize=global_mvn --normalize_conf stats_file=exp/s2t_stats_raw_bpe50000/train/feats_stats.npz --train_data_path_and_name_and_type exp/s2t_stats_raw_bpe50000/splits12/wav.scp,speech,kaldi_ark --train_shape_file exp/s2t_stats_raw_bpe50000/splits12/speech_shape --fold_length 150 --train_data_path_and_name_and_type exp/s2t_stats_raw_bpe50000/splits12/text.prev,text_prev,text --train_shape_file exp/s2t_stats_raw_bpe50000/splits12/text_prev_shape.bpe --fold_length 150 --train_data_path_and_name_and_type exp/s2t_stats_raw_bpe50000/splits12/text.ctc,text_ctc,text --train_shape_file exp/s2t_stats_raw_bpe50000/splits12/text_ctc_shape.bpe --fold_length 150 --train_data_path_and_name_and_type exp/s2t_stats_raw_bpe50000/splits12/text,text,text --train_shape_file exp/s2t_stats_raw_bpe50000/splits12/text_shape.bpe --multiple_iterator true --valid_data_path_and_name_and_type dump/raw/dev_v3/text.prev,text_prev,text --valid_shape_file exp/s2t_stats_raw_bpe50000/valid/text_prev_shape.bpe --valid_data_path_and_name_and_type dump/raw/dev_v3/text.ctc,text_ctc,text --valid_shape_file exp/s2t_stats_raw_bpe50000/valid/text_ctc_shape.bpe --valid_data_path_and_name_and_type dump/raw/dev_v3/text,text,text --valid_shape_file exp/s2t_stats_raw_bpe50000/valid/text_shape.bpe --ngpu 4 --multiprocessing_distributed true --dist_launcher slurm --dist_init_method file:///scratch/bbjs/peng6/espnet-owsm-ctc/egs2/owsm_v3.1_ctc/s2t1/exp/s2t_train_s2t_multitask-ctc_ebf27_conv2d8_size1024_raw_bpe50000/.dist_init_c46798ff-4480-462f-b8d1-a1755c22635c +/scratch/bbjs/peng6/espnet-owsm-ctc/tools/miniconda/envs/espnet/bin/python3 /scratch/bbjs/peng6/espnet-owsm-ctc/espnet2/bin/s2t_train.py --use_preprocessor true --bpemodel data/token_list/bpe_unigram50000/bpe.model --token_type bpe --token_list data/token_list/bpe_unigram50000/tokens.txt --non_linguistic_symbols none --cleaner none --g2p none --valid_data_path_and_name_and_type dump/raw/dev_v3/wav.scp,speech,kaldi_ark --valid_shape_file exp/s2t_stats_raw_bpe50000/valid/speech_shape --resume true --fold_length 80000 --output_dir exp/s2t_train_s2t_multitask-ctc_ebf27_conv2d8_size1024_raw_bpe50000 --config conf/train_s2t_multitask-ctc_ebf27_conv2d8_size1024.yaml --frontend_conf fs=16k --normalize=global_mvn --normalize_conf stats_file=exp/s2t_stats_raw_bpe50000/train/feats_stats.npz --train_data_path_and_name_and_type exp/s2t_stats_raw_bpe50000/splits12/wav.scp,speech,kaldi_ark --train_shape_file exp/s2t_stats_raw_bpe50000/splits12/speech_shape --fold_length 150 --train_data_path_and_name_and_type exp/s2t_stats_/scratch/bbjs/peng6/espnet-owsm-ctc/tools/miniconda/envs/espnet/bin/python3 /scratch/bbjs/peng6/espnet-owsm-ctc/espnet2/bin/s2t_train.py --use_preprocessor true --bpemodel data/token_list/bpe_unigram50000/bpe.model --token_type bpe --token_list data/token_list/bpe_unigram50000/tokens.txt --non_linguistic_symbols none --cleaner none --g2p none --valid_data_path_and_name_and_type dump/raw/dev_v3/wav.scp,speech,kaldi_ark --valid_shape_file exp/s2t_stats_raw_bpe50000/valid/speech_shape --resume true --fold_length 80000 --output_dir exp/s2t_train_s2t_multitask-ctc_ebf27_conv2d8_size1024_raw_bpe50000 --config conf/train_s2t_multitask-ctc_ebf27_conv2d8_size1024.yaml --frontend_conf fs=16k --normalize=global_mvn --normalize_conf stats_file=exp/s2t_stats_raw_bpe50000/train/feats_stats.npz --train_data_path_and_name_and_type exp/s2t_stats_raw_bpe50000/splits12/wav.scp,speech,kaldi_ark --train_shape_file exp/s2t_stats_raw_bpe50000/splits12/speech_shape --fold_length 150 --train_data_path_and_name_and_type exp/s2t_stats_/scratch/bbjs/peng6/espnet-owsm-ctc/tools/miniconda/envs/espnet/bin/python3 /scratch/bbjs/peng6/espnet-owsm-ctc/espnet2/bin/s2t_train.py --use_preprocessor true --bpemodel data/token_list/bpe_unigram50000/bpe.model --token_type bpe --token_list data/token_list/bpe_unigram50000/tokens.txt --non_linguistic_symbols none --cleaner none --g2p none --valid_data_path_and_name_and_type dump/raw/dev_v3/wav.scp,speech,kaldi_ark --valid_shape_file exp/s2t_stats_raw_bpe50000/valid/speech_shape --resume true --fold_length 80000 --output_dir exp/s2t_train_s2t_multitask-ctc_ebf27_conv2d8_size1024_raw_bpe50000 --config conf/train_s2t_multitask-ctc_ebf27_conv2d8_size1024.yaml --frontend_conf fs=16k --normalize=global_mvn --normalize_conf stats_file=exp/s2t_stats_raw_bpe50000/train/feats_stats.npz --train_data_path_and_name_and_type exp/s2t_stats_raw_bpe50000/splits12/wav.scp,speech,kaldi_ark --train_shape_file exp/s2t_stats_raw_bpe50000/splits12/speech_shape --fold_length 150 --train_data_path_and_name_and_type exp/s2t_stats_/scratch/bbjs/peng6/espnet-owsm-ctc/tools/miniconda/envs/espnet/bin/python3 /scratch/bbjs/peng6/espnet-owsm-ctc/espnet2/bin/s2t_train.py --use_preprocessor true --bpemodel data/token_list/bpe_unigram50000/bpe.model --token_type bpe --token_list data/token_list/bpe_unigram50000/tokens.txt --non_linguistic_symbols none --cleaner none --g2p none --valid_data_path_and_name_and_type dump/raw/dev_v3/wav.scp,speech,kaldi_ark --valid_shape_file exp/s2t_stats_raw_bpe50000/valid/speech_shape --resume true --fold_length 80000 --output_dir exp/s2t_train_s2t_multitask-ctc_ebf27_conv2d8_size1024_raw_bpe50000 --config conf/train_s2t_multitask-ctc_ebf27_conv2d8_size1024.yaml --frontend_conf fs=16k --normalize=global_mvn --normalize_conf stats_file=exp/s2t_stats_raw_bpe50000/train/feats_stats.npz --train_data_path_and_name_and_type exp/s2t_stats_raw_bpe50000/splits12/wav.scp,speech,kaldi_ark --train_shape_file exp/s2t_stats_raw_bpe50000/splits12/speech_shape --fold_length 150 --train_data_path_and_name_and_type exp/s2t_stats_/scratch/bbjs/peng6/espnet-owsm-ctc/tools/miniconda/envs/espnet/bin/python3 /scratch/bbjs/peng6/espnet-owsm-ctc/espnet2/bin/s2t_train.py --use_preprocessor true --bpemodel data/token_list/bpe_unigram50000/bpe.model --token_type bpe --token_list data/token_list/bpe_unigram50000/tokens.txt --non_linguistic_symbols none --cleaner none --g2p none --valid_data_path_and_name_and_type dump/raw/dev_v3/wav.scp,speech,kaldi_ark --valid_shape_file exp/s2t_stats_raw_bpe50000/valid/speech_shape --resume true --fold_length 80000 --output_dir exp/s2t_train_s2t_multitask-ctc_ebf27_conv2d8_size1024_raw_bpe50000 --config conf/train_s2t_multitask-ctc_ebf27_conv2d8_size1024.yaml --frontend_conf fs=16k --normalize=global_mvn --normalize_conf stats_file=exp/s2t_stats_raw_bpe50000/train/feats_stats.npz --train_data_path_and_name_and_type exp/s2t_stats_raw_bpe50000/splits12/wav.scp,speech,kaldi_ark --train_shape_file exp/s2t_stats_raw_bpe50000/splits12/speech_shape --fold_length 150 --train_data_path_and_name_and_type exp/s2t_stats_/scratch/bbjs/peng6/espnet-owsm-ctc/tools/miniconda/envs/espnet/bin/python3 /scratch/bbjs/peng6/espnet-owsm-ctc/espnet2/bin/s2t_train.py --use_preprocessor true --bpemodel data/token_list/bpe_unigram50000/bpe.model --token_type bpe --token_list data/token_list/bpe_unigram50000/tokens.txt --non_linguistic_symbols none --cleaner none --g2p none --valid_data_path_and_name_and_type dump/raw/dev_v3/wav.scp,speech,kaldi_ark --valid_shape_file exp/s2t_stats_raw_bpe50000/valid/speech_shape --resume true --fold_length 80000 --output_dir exp/s2t_train_s2t_multitask-ctc_ebf27_conv2d8_size1024_raw_bpe50000 --config conf/train_s2t_multitask-ctc_ebf27_conv2d8_size1024.yaml --frontend_conf fs=16k --normalize=global_mvn --normalize_conf stats_file=exp/s2t_stats_raw_bpe50000/train/feats_stats.npz --train_data_path_and_name_and_type exp/s2t_stats_raw_bpe50000/splits12/wav.scp,speech,kaldi_ark --train_shape_file exp/s2t_stats_raw_bpe50000/splits12/speech_shape --fold_length 150 --train_data_path_and_name_and_type exp/s2t_stats_/scratch/bbjs/peng6/espnet-owsm-ctc/tools/miniconda/envs/espnet/bin/python3 /scratch/bbjs/peng6/espnet-owsm-ctc/espnet2/bin/s2t_train.py --use_preprocessor true --bpemodel data/token_list/bpe_unigram50000/bpe.model --token_type bpe --token_list data/token_list/bpe_unigram50000/tokens.txt --non_linguistic_symbols none --cleaner none --g2p none --valid_data_path_and_name_and_type dump/raw/dev_v3/wav.scp,speech,kaldi_ark --valid_shape_file exp/s2t_stats_raw_bpe50000/valid/speech_shape --resume true --fold_length 80000 --output_dir exp/s2t_train_s2t_multitask-ctc_ebf27_conv2d8_size1024_raw_bpe50000 --config conf/train_s2t_multitask-ctc_ebf27_conv2d8_size1024.yaml --frontend_conf fs=16k --normalize=global_mvn --normalize_conf stats_file=exp/s2t_stats_raw_bpe50000/train/feats_stats.npz --train_data_path_and_name_and_type exp/s2t_stats_raw_bpe50000/splits12/wav.scp,speech,kaldi_ark --train_shape_file exp/s2t_stats_raw_bpe50000/splits12/speech_shape --fold_length 150 --train_data_path_and_name_and_type exp/s2t_stats_/scratch/bbjs/peng6/espnet-owsm-ctc/tools/miniconda/envs/espnet/bin/python3 /scratch/bbjs/peng6/espnet-owsm-ctc/espnet2/bin/s2t_train.py --use_preprocessor true --bpemodel data/token_list/bpe_unigram50000/bpe.model --token_type bpe --token_list data/token_list/bpe_unigram50000/tokens.txt --non_linguistic_symbols none --cleaner none --g2p none --valid_data_path_and_name_and_type dump/raw/dev_v3/wav.scp,speech,kaldi_ark --valid_shape_file exp/s2t_stats_raw_bpe50000/valid/speech_shape --resume true --fold_length 80000 --output_dir exp/s2t_train_s2t_multitask-ctc_ebf27_conv2d8_size1024_raw_bpe50000 --config conf/train_s2t_multitask-ctc_ebf27_conv2d8_size1024.yaml --frontend_conf fs=16k --normalize=global_mvn --normalize_conf stats_file=exp/s2t_stats_raw_bpe50000/train/feats_stats.npz --train_data_path_and_name_and_type exp/s2t_stats_raw_bpe50000/splits12/wav.scp,speech,kaldi_ark --train_shape_file exp/s2t_stats_raw_bpe50000/splits12/speech_shape --fold_length 150 --train_data_path_and_name_and_type exp/s2t_stats_/scratch/bbjs/peng6/espnet-owsm-ctc/tools/miniconda/envs/espnet/bin/python3 /scratch/bbjs/peng6/espnet-owsm-ctc/espnet2/bin/s2t_train.py --use_preprocessor true --bpemodel data/token_list/bpe_unigram50000/bpe.model --token_type bpe --token_list data/token_list/bpe_unigram50000/tokens.txt --non_linguistic_symbols none --cleaner none --g2p none --valid_data_path_and_name_and_type dump/raw/dev_v3/wav.scp,speech,kaldi_ark --valid_shape_file exp/s2t_stats_raw_bpe50000/valid/speech_shape --resume true --fold_length 80000 --output_dir exp/s2t_train_s2t_multitask-ctc_ebf27_conv2d8_size1024_raw_bpe50000 --config conf/train_s2t_multitask-ctc_ebf27_conv2d8_size1024.yaml --frontend_conf fs=16k --normalize=global_mvn --normalize_conf stats_file=exp/s2t_stats_raw_bpe50000/train/feats_stats.npz --train_data_path_and_name_and_type exp/s2t_stats_raw_bpe50000/splits12/wav.scp,speech,kaldi_ark --train_shape_file exp/s2t_stats_raw_bpe50000/splits12/speech_shape --fold_length 150 --train_data_path_and_name_and_type exp/s2t_stats_/scratch/bbjs/peng6/espnet-owsm-ctc/tools/miniconda/envs/espnet/bin/python3 /scratch/bbjs/peng6/espnet-owsm-ctc/espnet2/bin/s2t_train.py --use_preprocessor true --bpemodel data/token_list/bpe_unigram50000/bpe.model --token_type bpe --token_list data/token_list/bpe_unigram50000/tokens.txt --non_linguistic_symbols none --cleaner none --g2p none --valid_data_path_and_name_and_type dump/raw/dev_v3/wav.scp,speech,kaldi_ark --valid_shape_file exp/s2t_stats_raw_bpe50000/valid/speech_shape --resume true --fold_length 80000 --output_dir exp/s2t_train_s2t_multitask-ctc_ebf27_conv2d8_size1024_raw_bpe50000 --config conf/train_s2t_multitask-ctc_ebf27_conv2d8_size1024.yaml --frontend_conf fs=16k --normalize=global_mvn --normalize_conf stats_file=exp/s2t_stats_raw_bpe50000/train/feats_stats.npz --train_data_path_and_name_and_type exp/s2t_stats_raw_bpe50000/splits12/wav.scp,speech,kaldi_ark --train_shape_file exp/s2t_stats_raw_bpe50000/splits12/speech_shape --fold_length 150 --train_data_path_and_name_and_type exp/s2t_stats_/scratch/bbjs/peng6/espnet-owsm-ctc/tools/miniconda/envs/espnet/bin/python3 /scratch/bbjs/peng6/espnet-owsm-ctc/espnet2/bin/s2t_train.py --use_preprocessor true --bpemodel data/token_list/bpe_unigram50000/bpe.model --token_type bpe --token_list data/token_list/bpe_unigram50000/tokens.txt --non_linguistic_symbols none --cleaner none --g2p none --valid_data_path_and_name_and_type dump/raw/dev_v3/wav.scp,speech,kaldi_ark --valid_shape_file exp/s2t_stats_raw_bpe50000/valid/speech_shape --resume true --fold_length 80000 --output_dir exp/s2t_train_s2t_multitask-ctc_ebf27_conv2d8_size1024_raw_bpe50000 --config conf/train_s2t_multitask-ctc_ebf27_conv2d8_size1024.yaml --frontend_conf fs=16k --normalize=global_mvn --normalize_conf stats_file=exp/s2t_stats_raw_bpe50000/train/feats_stats.npz --train_data_path_and_name_and_type exp/s2t_stats_raw_bpe50000/splits12/wav.scp,speech,kaldi_ark --train_shape_file exp/s2t_stats_raw_bpe50000/splits12/speech_shape --fold_length 150 --train_data_path_and_name_and_type exp/s2t_stats_/scratch/bbjs/peng6/espnet-owsm-ctc/tools/miniconda/envs/espnet/bin/python3 /scratch/bbjs/peng6/espnet-owsm-ctc/espnet2/bin/s2t_train.py --use_preprocessor true --bpemodel data/token_list/bpe_unigram50000/bpe.model --token_type bpe --token_list data/token_list/bpe_unigram50000/tokens.txt --non_linguistic_symbols none --cleaner none --g2p none --valid_data_path_and_name_and_type dump/raw/dev_v3/wav.scp,speech,kaldi_ark --valid_shape_file exp/s2t_stats_raw_bpe50000/valid/speech_shape --resume true --fold_length 80000 --output_dir exp/s2t_train_s2t_multitask-ctc_ebf27_conv2d8_size1024_raw_bpe50000 --config conf/train_s2t_multitask-ctc_ebf27_conv2d8_size1024.yaml --frontend_conf fs=16k --normalize=global_mvn --normalize_conf stats_file=exp/s2t_stats_raw_bpe50000/train/feats_stats.npz --train_data_path_and_name_and_type exp/s2t_stats_raw_bpe50000/splits12/wav.scp,speech,kaldi_ark --train_shape_file exp/s2t_stats_raw_bpe50000/splits12/speech_shape --fold_length 150 --train_data_path_and_name_and_type exp/s2t_stats_/scratch/bbjs/peng6/espnet-owsm-ctc/tools/miniconda/envs/espnet/bin/python3 /scratch/bbjs/peng6/espnet-owsm-ctc/espnet2/bin/s2t_train.py --use_preprocessor true --bpemodel data/token_list/bpe_unigram50000/bpe.model --token_type bpe --token_list data/token_list/bpe_unigram50000/tokens.txt --non_linguistic_symbols none --cleaner none --g2p none --valid_data_path_and_name_and_type dump/raw/dev_v3/wav.scp,speech,kaldi_ark --valid_shape_file exp/s2t_stats_raw_bpe50000/valid/speech_shape --resume true --fold_length 80000 --output_dir exp/s2t_train_s2t_multitask-ctc_ebf27_conv2d8_size1024_raw_bpe50000 --config conf/train_s2t_multitask-ctc_ebf27_conv2d8_size1024.yaml --frontend_conf fs=16k --normalize=global_mvn --normalize_conf stats_file=exp/s2t_stats_raw_bpe50000/train/feats_stats.npz --train_data_path_and_name_and_type exp/s2t_stats_raw_bpe50000/splits12/wav.scp,speech,kaldi_ark --train_shape_file exp/s2t_stats_raw_bpe50000/splits12/speech_shape --fold_length 150 --train_data_path_and_name_and_type exp/s2t_stats_/scratch/bbjs/peng6/espnet-owsm-ctc/tools/miniconda/envs/espnet/bin/python3 /scratch/bbjs/peng6/espnet-owsm-ctc/espnet2/bin/s2t_train.py --use_preprocessor true --bpemodel data/token_list/bpe_unigram50000/bpe.model --token_type bpe --token_list data/token_list/bpe_unigram50000/tokens.txt --non_linguistic_symbols none --cleaner none --g2p none --valid_data_path_and_name_and_type dump/raw/dev_v3/wav.scp,speech,kaldi_ark --valid_shape_file exp/s2t_stats_raw_bpe50000/valid/speech_shape --resume true --fold_length 80000 --output_dir exp/s2t_train_s2t_multitask-ctc_ebf27_conv2d8_size1024_raw_bpe50000 --config conf/train_s2t_multitask-ctc_ebf27_conv2d8_size1024.yaml --frontend_conf fs=16k --normalize=global_mvn --normalize_conf stats_file=exp/s2t_stats_raw_bpe50000/train/feats_stats.npz --train_data_path_and_name_and_type exp/s2t_stats_raw_bpe50000/splits12/wav.scp,speech,kaldi_ark --train_shape_file exp/s2t_stats_raw_bpe50000/splits12/speech_shape --fold_length 150 --train_data_path_and_name_and_type exp/s2t_stats_/scratch/bbjs/peng6/espnet-owsm-ctc/tools/miniconda/envs/espnet/bin/python3 /scratch/bbjs/peng6/espnet-owsm-ctc/espnet2/bin/s2t_train.py --use_preprocessor true --bpemodel data/token_list/bpe_unigram50000/bpe.model --token_type bpe --token_list data/token_list/bpe_unigram50000/tokens.txt --non_linguistic_symbols none --cleaner none --g2p none --valid_data_path_and_name_and_type dump/raw/dev_v3/wav.scp,speech,kaldi_ark --valid_shape_file exp/s2t_stats_raw_bpe50000/valid/speech_shape --resume true --fold_length 80000 --output_dir exp/s2t_train_s2t_multitask-ctc_ebf27_conv2d8_size1024_raw_bpe50000 --config conf/train_s2t_multitask-ctc_ebf27_conv2d8_size1024.yaml --frontend_conf fs=16k --normalize=global_mvn --normalize_conf stats_file=exp/s2t_stats_raw_bpe50000/train/feats_stats.npz --train_data_path_and_name_and_type exp/s2t_stats_raw_bpe50000/splits12/wav.scp,speech,kaldi_ark --train_shape_file exp/s2t_stats_raw_bpe50000/splits12/speech_shape --fold_length 150 --train_data_path_and_name_and_type exp/s2t_stats_/scratch/bbjs/peng6/espnet-owsm-ctc/tools/miniconda/envs/espnet/bin/python3 /scratch/bbjs/peng6/espnet-owsm-ctc/espnet2/bin/s2t_train.py --use_preprocessor true --bpemodel data/token_list/bpe_unigram50000/bpe.model --token_type bpe --token_list data/token_list/bpe_unigram50000/tokens.txt --non_linguistic_symbols none --cleaner none --g2p none --valid_data_path_and_name_and_type dump/raw/dev_v3/wav.scp,speech,kaldi_ark --valid_shape_file exp/s2t_stats_raw_bpe50000/valid/speech_shape --resume true --fold_length 80000 --output_dir exp/s2t_train_s2t_multitask-ctc_ebf27_conv2d8_size1024_raw_bpe50000 --config conf/train_s2t_multitask-ctc_ebf27_conv2d8_size1024.yaml --frontend_conf fs=16k --normalize=global_mvn --normalize_conf stats_file=exp/s2t_stats_raw_bpe50000/train/feats_stats.npz --train_data_path_and_name_and_type exp/s2t_stats_raw_bpe50000/splits12/wav.scp,speech,kaldi_ark --train_shape_file exp/s2t_stats_raw_bpe50000/splits12/speech_shape --fold_length 150 --train_data_path_and_name_and_type exp/s2t_stats_raw_bpe50000/splits12/text.prev,text_prev,text --train_shape_file exp/s2t_stats_raw_bpe50000/splits12/text_prev_shape.bpe --fold_length 150 --train_data_path_and_name_and_type exp/s2t_stats_raw_bpe50000/splits12/text.ctc,text_ctc,text --train_shape_file exp/s2t_stats_raw_bpe50000/splits12/text_ctc_shape.bpe --fold_length 150 --train_data_path_and_name_and_type exp/s2t_stats_raw_bpe50000/splits12/text,text,text --train_shape_file exp/s2t_stats_raw_bpe50000/splits12/text_shape.bpe --multiple_iterator true --valid_data_path_and_name_and_type dump/raw/dev_v3/text.prev,text_prev,text --valid_shape_file exp/s2t_stats_raw_bpe50000/valid/text_prev_shape.bpe --valid_data_path_and_name_and_type dump/raw/dev_v3/text.ctc,text_ctc,text --valid_shape_file exp/s2t_stats_raw_bpe50000/valid/text_ctc_shape.bpe --valid_data_path_and_name_and_type dump/raw/dev_v3/text,text,text --valid_shape_file exp/s2t_stats_raw_bpe50000/valid/text_shape.bpe --ngpu 4 --multiprocessing_distributed true --dist_launcher slurm --dist_init_method fraw_bpe50000/splits12/text.prev,text_prev,text --train_shape_file exp/s2t_stats_raw_bpe50000/splits12/text_prev_shape.bpe --fold_length 150 --train_data_path_and_name_and_type exp/s2t_stats_raw_bpe50000/splits12/text.ctc,text_ctc,text --train_shape_file exp/s2t_stats_raw_bpe50000/splits12/text_ctc_shape.bpe --fold_length 150 --train_data_path_and_name_and_type exp/s2t_stats_raw_bpe50000/splits12/text,text,text --train_shape_file exp/s2t_stats_raw_bpe50000/splits12/text_shape.bpe --multiple_iterator true --valid_data_path_and_name_and_type dump/raw/dev_v3/text.prev,text_prev,text --valid_shape_file exp/s2t_stats_raw_bpe50000/valid/text_prev_shape.bpe --valid_data_path_and_name_and_type dump/raw/dev_v3/text.ctc,text_ctc,text --valid_shape_file exp/s2t_stats_raw_bpe50000/valid/text_ctc_shape.bpe --valid_data_path_and_name_and_type dump/raw/dev_v3/text,text,text --valid_shape_file exp/s2t_stats_raw_bpe50000/valid/text_shape.bpe --ngpu 4 --multiprocessing_distributed true --dist_launcher slurm --dist_init_method file:///scratch/bbjs/peng6/espnet-owsm-ctc/egs2/owsm_v3.1_ctc/s2t1/exp/s2t_train_s2t_multitask-ctc_ebf27_conv2d8_size1024_raw_bpe50000/.dist_init_c46798ff-4480-462f-b8d1-a1755c22635c +ile:///scratch/bbjs/peng6/espnet-owsm-ctc/egs2/owsm_v3.1_ctc/s2t1/exp/s2t_train_s2t_multitask-ctc_ebf27_conv2d8_size1024_raw_bpe50000/.dist_init_c46798ff-4480-462f-b8d1-a1755c22635c +raw_bpe50000/splits12/text.prev,text_prev,text --train_shape_file exp/s2t_stats_raw_bpe50000/splits12/text_prev_shape.bpe --fold_length 150 --train_data_path_and_name_and_type exp/s2t_stats_raw_bpe50000/splits12/text.ctc,text_ctc,text --train_shape_file exp/s2t_stats_raw_bpe50000/splits12/text_ctc_shape.bpe --fold_length 150 --train_data_path_and_name_and_type exp/s2t_stats_raw_bpe50000/splits12/text,text,text --train_shape_file exp/s2t_stats_raw_bpe50000/splits12/text_shape.bpe --multiple_iterator true --valid_data_path_and_name_and_type dump/raw/dev_v3/text.prev,text_prev,text --valid_shape_file exp/s2t_stats_raw_bpe50000/valid/text_prev_shape.bpe --valid_data_path_and_name_and_type dump/raw/dev_v3/text.ctc,text_ctc,text --valid_shape_file exp/s2t_stats_raw_bpe50000/valid/text_ctc_shape.bpe --valid_data_path_and_name_and_type dump/raw/dev_v3/text,text,text --valid_shape_file exp/s2t_stats_raw_bpe50000/valid/text_shape.bpe --ngpu 4 --multiprocessing_distributed true --dist_launcher slurm --dist_init_method fraw_bpe50000/splits12/text.prev,text_prev,text --train_shape_file exp/s2t_stats_raw_bpe50000/splits12/text_prev_shape.bpe --fold_length 150 --train_data_path_and_name_and_type exp/s2t_stats_raw_bpe50000/splits12/text.ctc,text_ctc,text --train_shape_file exp/s2t_stats_raw_bpe50000/splits12/text_ctc_shape.bpe --fold_length 150 --train_data_path_and_name_and_type exp/s2t_stats_raw_bpe50000/splits12/text,text,text --train_shape_file exp/s2t_stats_raw_bpe50000/splits12/text_shape.bpe --multiple_iterator true --valid_data_path_and_name_and_type dump/raw/dev_v3/text.prev,text_prev,text --valid_shape_file exp/s2t_stats_raw_bpe50000/valid/text_prev_shape.bpe --valid_data_path_and_name_and_type dump/raw/dev_v3/text.ctc,text_ctc,text --valid_shape_file exp/s2t_stats_raw_bpe50000/valid/text_ctc_shape.bpe --valid_data_path_and_name_and_type dump/raw/dev_v3/text,text,text --valid_shape_file exp/s2t_stats_raw_bpe50000/valid/text_shape.bpe --ngpu 4 --multiprocessing_distributed true --dist_launcher slurm --dist_init_method fraw_bpe50000/splits12/text.prev,text_prev,text --train_shape_file exp/s2t_stats_raw_bpe50000/splits12/text_prev_shape.bpe --fold_length 150 --train_data_path_and_name_and_type exp/s2t_stats_raw_bpe50000/splits12/text.ctc,text_ctc,text --train_shape_file exp/s2t_stats_raw_bpe50000/splits12/text_ctc_shape.bpe --fold_length 150 --train_data_path_and_name_and_type exp/s2t_stats_raw_bpe50000/splits12/text,text,text --train_shape_file exp/s2t_stats_raw_bpe50000/splits12/text_shape.bpe --multiple_iterator true --valid_data_path_and_name_and_type dump/raw/dev_v3/text.prev,text_prev,text --valid_shape_file exp/s2t_stats_raw_bpe50000/valid/text_prev_shape.bpe --valid_data_path_and_name_and_type dump/raw/dev_v3/text.ctc,text_ctc,text --valid_shape_file exp/s2t_stats_raw_bpe50000/valid/text_ctc_shape.bpe --valid_data_path_and_name_and_type dump/raw/dev_v3/text,text,text --valid_shape_file exp/s2t_stats_raw_bpe50000/valid/text_shape.bpe --ngpu 4 --multiprocessing_distributed true --dist_launcher slurm --dist_init_method file:///scratch/bbjs/peng6/espnet-owsm-ctc/egs2/owsm_v3.1_ctc/s2t1/exp/s2t_train_s2t_multitask-ctc_ebf27_conv2d8_size1024_raw_bpe50000/.dist_init_c46798ff-4480-462f-b8d1-a1755c22635c +ile:///scratch/bbjs/peng6/espnet-owsm-ctc/egs2/owsm_v3.1_ctc/s2t1/exp/s2t_train_s2t_multitask-ctc_ebf27_conv2d8_size1024_raw_bpe50000/.dist_init_c46798ff-4480-462f-b8d1-a1755c22635c +ile:///scratch/bbjs/peng6/espnet-owsm-ctc/egs2/owsm_v3.1_ctc/s2t1/exp/s2t_train_s2t_multitask-ctc_ebf27_conv2d8_size1024_raw_bpe50000/.dist_init_c46798ff-4480-462f-b8d1-a1755c22635c +raw_bpe50000/splits12/text.prev,text_prev,text --train_shape_file exp/s2t_stats_raw_bpe50000/splits12/text_prev_shape.bpe --fold_length 150 --train_data_path_and_name_and_type exp/s2t_stats_raw_bpe50000/splits12/text.ctc,text_ctc,text --train_shape_file exp/s2t_stats_raw_bpe50000/splits12/text_ctc_shape.bpe --fold_length 150 --train_data_path_and_name_and_type exp/s2t_stats_raw_bpe50000/splits12/text,text,text --train_shape_file exp/s2t_stats_raw_bpe50000/splits12/text_shape.bpe --multiple_iterator true --valid_data_path_and_name_and_type dump/raw/dev_v3/text.prev,text_prev,text --valid_shape_file exp/s2t_stats_raw_bpe50000/valid/text_prev_shape.bpe --valid_data_path_and_name_and_type dump/raw/dev_v3/text.ctc,text_ctc,text --valid_shape_file exp/s2t_stats_raw_bpe50000/valid/text_ctc_shape.bpe --valid_data_path_and_name_and_type dump/raw/dev_v3/text,text,text --valid_shape_file exp/s2t_stats_raw_bpe50000/valid/text_shape.bpe --ngpu 4 --multiprocessing_distributed true --dist_launcher slurm --dist_init_method fraw_bpe50000/splits12/text.prev,text_prev,text --train_shape_file exp/s2t_stats_raw_bpe50000/splits12/text_prev_shape.bpe --fold_length 150 --train_data_path_and_name_and_type exp/s2t_stats_raw_bpe50000/splits12/text.ctc,text_ctc,text --train_shape_file exp/s2t_stats_raw_bpe50000/splits12/text_ctc_shape.bpe --fold_length 150 --train_data_path_and_name_and_type exp/s2t_stats_raw_bpe50000/splits12/text,text,text --train_shape_file exp/s2t_stats_raw_bpe50000/splits12/text_shape.bpe --multiple_iterator true --valid_data_path_and_name_and_type dump/raw/dev_v3/text.prev,text_prev,text --valid_shape_file exp/s2t_stats_raw_bpe50000/valid/text_prev_shape.bpe --valid_data_path_and_name_and_type dump/raw/dev_v3/text.ctc,text_ctc,text --valid_shape_file exp/s2t_stats_raw_bpe50000/valid/text_ctc_shape.bpe --valid_data_path_and_name_and_type dump/raw/dev_v3/text,text,text --valid_shape_file exp/s2t_stats_raw_bpe50000/valid/text_shape.bpe --ngpu 4 --multiprocessing_distributed true --dist_launcher slurm --dist_init_method file:///scratch/bbjs/peng6/espnet-owsm-ctc/egs2/owsm_v3.1_ctc/s2t1/exp/s2t_train_s2t_multitask-ctc_ebf27_conv2d8_size1024_raw_bpe50000/.dist_init_c46798ff-4480-462f-b8d1-a1755c22635c +ile:///scratch/bbjs/peng6/espnet-owsm-ctc/egs2/owsm_v3.1_ctc/s2t1/exp/s2t_train_s2t_multitask-ctc_ebf27_conv2d8_size1024_raw_bpe50000/.dist_init_c46798ff-4480-462f-b8d1-a1755c22635c +raw_bpe50000/splits12/text.prev,text_prev,text --train_shape_file exp/s2t_stats_raw_bpe50000/splits12/text_prev_shape.bpe --fold_length 150 --train_data_path_and_name_and_type exp/s2t_stats_raw_bpe50000/splits12/text.ctc,text_ctc,text --train_shape_file exp/s2t_stats_raw_bpe50000/splits12/text_ctc_shape.bpe --fold_length 150 --train_data_path_and_name_and_type exp/s2t_stats_raw_bpe50000/splits12/text,text,text --train_shape_file exp/s2t_stats_raw_bpe50000/splits12/text_shape.bpe --multiple_iterator true --valid_data_path_and_name_and_type dump/raw/dev_v3/text.prev,text_prev,text --valid_shape_file exp/s2t_stats_raw_bpe50000/valid/text_prev_shape.bpe --valid_data_path_and_name_and_type dump/raw/dev_v3/text.ctc,text_ctc,text --valid_shape_file exp/s2t_stats_raw_bpe50000/valid/text_ctc_shape.bpe --valid_data_path_and_name_and_type dump/raw/dev_v3/text,text,text --valid_shape_file exp/s2t_stats_raw_bpe50000/valid/text_shape.bpe --ngpu 4 --multiprocessing_distributed true --dist_launcher slurm --dist_init_method file:///scratch/bbjs/peng6/espnet-owsm-ctc/egs2/owsm_v3.1_ctc/s2t1/exp/s2t_train_s2t_multitask-ctc_ebf27_conv2d8_size1024_raw_bpe50000/.dist_init_c46798ff-4480-462f-b8d1-a1755c22635c +raw_bpe50000/splits12/text.prev,text_prev,text --train_shape_file exp/s2t_stats_raw_bpe50000/splits12/text_prev_shape.bpe --fold_length 150 --train_data_path_and_name_and_type exp/s2t_stats_raw_bpe50000/splits12/text.ctc,text_ctc,text --train_shape_file exp/s2t_stats_raw_bpe50000/splits12/text_ctc_shape.bpe --fold_length 150 --train_data_path_and_name_and_type exp/s2t_stats_raw_bpe50000/splits12/text,text,text --train_shape_file exp/s2t_stats_raw_bpe50000/splits12/text_shape.bpe --multiple_iterator true --valid_data_path_and_name_and_type dump/raw/dev_v3/text.prev,text_prev,text --valid_shape_file exp/s2t_stats_raw_bpe50000/valid/text_prev_shape.bpe --valid_data_path_and_name_and_type dump/raw/dev_v3/text.ctc,text_ctc,text --valid_shape_file exp/s2t_stats_raw_bpe50000/valid/text_ctc_shape.bpe --valid_data_path_and_name_and_type dump/raw/dev_v3/text,text,text --valid_shape_file exp/s2t_stats_raw_bpe50000/valid/text_shape.bpe --ngpu 4 --multiprocessing_distributed true --dist_launcher slurm --dist_init_method fraw_bpe50000/splits12/text.prev,text_prev,text --train_shape_file exp/s2t_stats_raw_bpe50000/splits12/text_prev_shape.bpe --fold_length 150 --train_data_path_and_name_and_type exp/s2t_stats_raw_bpe50000/splits12/text.ctc,text_ctc,text --train_shape_file exp/s2t_stats_raw_bpe50000/splits12/text_ctc_shape.bpe --fold_length 150 --train_data_path_and_name_and_type exp/s2t_stats_raw_bpe50000/splits12/text,text,text --train_shape_file exp/s2t_stats_raw_bpe50000/splits12/text_shape.bpe --multiple_iterator true --valid_data_path_and_name_and_type dump/raw/dev_v3/text.prev,text_prev,text --valid_shape_file exp/s2t_stats_raw_bpe50000/valid/text_prev_shape.bpe --valid_data_path_and_name_and_type dump/raw/dev_v3/text.ctc,text_ctc,text --valid_shape_file exp/s2t_stats_raw_bpe50000/valid/text_ctc_shape.bpe --valid_data_path_and_name_and_type dump/raw/dev_v3/text,text,text --valid_shape_file exp/s2t_stats_raw_bpe50000/valid/text_shape.bpe --ngpu 4 --multiprocessing_distributed true --dist_launcher slurm --dist_init_method fraw_bpe50000/splits12/text.prev,text_prev,text --train_shape_file exp/s2t_stats_raw_bpe50000/splits12/text_prev_shape.bpe --fold_length 150 --train_data_path_and_name_and_type exp/s2t_stats_raw_bpe50000/splits12/text.ctc,text_ctc,text --train_shape_file exp/s2t_stats_raw_bpe50000/splits12/text_ctc_shape.bpe --fold_length 150 --train_data_path_and_name_and_type exp/s2t_stats_raw_bpe50000/splits12/text,text,text --train_shape_file exp/s2t_stats_raw_bpe50000/splits12/text_shape.bpe --multiple_iterator true --valid_data_path_and_name_and_type dump/raw/dev_v3/text.prev,text_prev,text --valid_shape_file exp/s2t_stats_raw_bpe50000/valid/text_prev_shape.bpe --valid_data_path_and_name_and_type dump/raw/dev_v3/text.ctc,text_ctc,text --valid_shape_file exp/s2t_stats_raw_bpe50000/valid/text_ctc_shape.bpe --valid_data_path_and_name_and_type dump/raw/dev_v3/text,text,text --valid_shape_file exp/s2t_stats_raw_bpe50000/valid/text_shape.bpe --ngpu 4 --multiprocessing_distributed true --dist_launcher slurm --dist_init_method file:///scratch/bbjs/peng6/espnet-owsm-ctc/egs2/owsm_v3.1_ctc/s2t1/exp/s2t_train_s2t_multitask-ctc_ebf27_conv2d8_size1024_raw_bpe50000/.dist_init_c46798ff-4480-462f-b8d1-a1755c22635c +ile:///scratch/bbjs/peng6/espnet-owsm-ctc/egs2/owsm_v3.1_ctc/s2t1/exp/s2t_train_s2t_multitask-ctc_ebf27_conv2d8_size1024_raw_bpe50000/.dist_init_c46798ff-4480-462f-b8d1-a1755c22635c +raw_bpe50000/splits12/text.prev,text_prev,text --train_shape_file exp/s2t_stats_raw_bpe50000/splits12/text_prev_shape.bpe --fold_length 150 --train_data_path_and_name_and_type exp/s2t_stats_raw_bpe50000/splits12/text.ctc,text_ctc,text --train_shape_file exp/s2t_stats_raw_bpe50000/splits12/text_ctc_shape.bpe --fold_length 150 --train_data_path_and_name_and_type exp/s2t_stats_raw_bpe50000/splits12/text,text,text --train_shape_file exp/s2t_stats_raw_bpe50000/splits12/text_shape.bpe --multiple_iterator true --valid_data_path_and_name_and_type dump/raw/dev_v3/text.prev,text_prev,text --valid_shape_file exp/s2t_stats_raw_bpe50000/valid/text_prev_shape.bpe --valid_data_path_and_name_and_type dump/raw/dev_v3/text.ctc,text_ctc,text --valid_shape_file exp/s2t_stats_raw_bpe50000/valid/text_ctc_shape.bpe --valid_data_path_and_name_and_type dump/raw/dev_v3/text,text,text --valid_shape_file exp/s2t_stats_raw_bpe50000/valid/text_shape.bpe --ngpu 4 --multiprocessing_distributed true --dist_launcher slurm --dist_init_method file:///scratch/bbjs/peng6/espnet-owsm-ctc/egs2/owsm_v3.1_ctc/s2t1/exp/s2t_train_s2t_multitask-ctc_ebf27_conv2d8_size1024_raw_bpe50000/.dist_init_c46798ff-4480-462f-b8d1-a1755c22635c +ile:///scratch/bbjs/peng6/espnet-owsm-ctc/egs2/owsm_v3.1_ctc/s2t1/exp/s2t_train_s2t_multitask-ctc_ebf27_conv2d8_size1024_raw_bpe50000/.dist_init_c46798ff-4480-462f-b8d1-a1755c22635c +raw_bpe50000/splits12/text.prev,text_prev,text --train_shape_file exp/s2t_stats_raw_bpe50000/splits12/text_prev_shape.bpe --fold_length 150 --train_data_path_and_name_and_type exp/s2t_stats_raw_bpe50000/splits12/text.ctc,text_ctc,text --train_shape_file exp/s2t_stats_raw_bpe50000/splits12/text_ctc_shape.bpe --fold_length 150 --train_data_path_and_name_and_type exp/s2t_stats_raw_bpe50000/splits12/text,text,text --train_shape_file exp/s2t_stats_raw_bpe50000/splits12/text_shape.bpe --multiple_iterator true --valid_data_path_and_name_and_type dump/raw/dev_v3/text.prev,text_prev,text --valid_shape_file exp/s2t_stats_raw_bpe50000/valid/text_prev_shape.bpe --valid_data_path_and_name_and_type dump/raw/dev_v3/text.ctc,text_ctc,text --valid_shape_file exp/s2t_stats_raw_bpe50000/valid/text_ctc_shape.bpe --valid_data_path_and_name_and_type dump/raw/dev_v3/text,text,text --valid_shape_file exp/s2t_stats_raw_bpe50000/valid/text_shape.bpe --ngpu 4 --multiprocessing_distributed true --dist_launcher slurm --dist_init_method file:///scratch/bbjs/peng6/espnet-owsm-ctc/egs2/owsm_v3.1_ctc/s2t1/exp/s2t_train_s2t_multitask-ctc_ebf27_conv2d8_size1024_raw_bpe50000/.dist_init_c46798ff-4480-462f-b8d1-a1755c22635c +raw_bpe50000/splits12/text.prev,text_prev,text --train_shape_file exp/s2t_stats_raw_bpe50000/splits12/text_prev_shape.bpe --fold_length 150 --train_data_path_and_name_and_type exp/s2t_stats_raw_bpe50000/splits12/text.ctc,text_ctc,text --train_shape_file exp/s2t_stats_raw_bpe50000/splits12/text_ctc_shape.bpe --fold_length 150 --train_data_path_and_name_and_type exp/s2t_stats_raw_bpe50000/splits12/text,text,text --train_shape_file exp/s2t_stats_raw_bpe50000/splits12/text_shape.bpe --multiple_iterator true --valid_data_path_and_name_and_type dump/raw/dev_v3/text.prev,text_prev,text --valid_shape_file exp/s2t_stats_raw_bpe50000/valid/text_prev_shape.bpe --valid_data_path_and_name_and_type dump/raw/dev_v3/text.ctc,text_ctc,text --valid_shape_file exp/s2t_stats_raw_bpe50000/valid/text_ctc_shape.bpe --valid_data_path_and_name_and_type dump/raw/dev_v3/text,text,text --valid_shape_file exp/s2t_stats_raw_bpe50000/valid/text_shape.bpe --ngpu 4 --multiprocessing_distributed true --dist_launcher slurm --dist_init_method fraw_bpe50000/splits12/text.prev,text_prev,text --train_shape_file exp/s2t_stats_raw_bpe50000/splits12/text_prev_shape.bpe --fold_length 150 --train_data_path_and_name_and_type exp/s2t_stats_raw_bpe50000/splits12/text.ctc,text_ctc,text --train_shape_file exp/s2t_stats_raw_bpe50000/splits12/text_ctc_shape.bpe --fold_length 150 --train_data_path_and_name_and_type exp/s2t_stats_raw_bpe50000/splits12/text,text,text --train_shape_file exp/s2t_stats_raw_bpe50000/splits12/text_shape.bpe --multiple_iterator true --valid_data_path_and_name_and_type dump/raw/dev_v3/text.prev,text_prev,text --valid_shape_file exp/s2t_stats_raw_bpe50000/valid/text_prev_shape.bpe --valid_data_path_and_name_and_type dump/raw/dev_v3/text.ctc,text_ctc,text --valid_shape_file exp/s2t_stats_raw_bpe50000/valid/text_ctc_shape.bpe --valid_data_path_and_name_and_type dump/raw/dev_v3/text,text,text --valid_shape_file exp/s2t_stats_raw_bpe50000/valid/text_shape.bpe --ngpu 4 --multiprocessing_distributed true --dist_launcher slurm --dist_init_method fraw_bpe50000/splits12/text.prev,text_prev,text --train_shape_file exp/s2t_stats_raw_bpe50000/splits12/text_prev_shape.bpe --fold_length 150 --train_data_path_and_name_and_type exp/s2t_stats_raw_bpe50000/splits12/text.ctc,text_ctc,text --train_shape_file exp/s2t_stats_raw_bpe50000/splits12/text_ctc_shape.bpe --fold_length 150 --train_data_path_and_name_and_type exp/s2t_stats_raw_bpe50000/splits12/text,text,text --train_shape_file exp/s2t_stats_raw_bpe50000/splits12/text_shape.bpe --multiple_iterator true --valid_data_path_and_name_and_type dump/raw/dev_v3/text.prev,text_prev,text --valid_shape_file exp/s2t_stats_raw_bpe50000/valid/text_prev_shape.bpe --valid_data_path_and_name_and_type dump/raw/dev_v3/text.ctc,text_ctc,text --valid_shape_file exp/s2t_stats_raw_bpe50000/valid/text_ctc_shape.bpe --valid_data_path_and_name_and_type dump/raw/dev_v3/text,text,text --valid_shape_file exp/s2t_stats_raw_bpe50000/valid/text_shape.bpe --ngpu 4 --multiprocessing_distributed true --dist_launcher slurm --dist_init_method file:///scratch/bbjs/peng6/espnet-owsm-ctc/egs2/owsm_v3.1_ctc/s2t1/exp/s2t_train_s2t_multitask-ctc_ebf27_conv2d8_size1024_raw_bpe50000/.dist_init_c46798ff-4480-462f-b8d1-a1755c22635c +ile:///scratch/bbjs/peng6/espnet-owsm-ctc/egs2/owsm_v3.1_ctc/s2t1/exp/s2t_train_s2t_multitask-ctc_ebf27_conv2d8_size1024_raw_bpe50000/.dist_init_c46798ff-4480-462f-b8d1-a1755c22635c +ile:///scratch/bbjs/peng6/espnet-owsm-ctc/egs2/owsm_v3.1_ctc/s2t1/exp/s2t_train_s2t_multitask-ctc_ebf27_conv2d8_size1024_raw_bpe50000/.dist_init_c46798ff-4480-462f-b8d1-a1755c22635c +[gpua007:0/64] 2024-02-02 19:14:50,169 (distributed_c10d:319) INFO: Added key: store_based_barrier_key:1 to store for rank: 0 +[gpua007:0/64] 2024-02-02 19:14:50,327 (distributed_c10d:353) INFO: Rank 0: Completed store-based barrier for key:store_based_barrier_key:1 with 64 nodes. +[gpua007:0/64] 2024-02-02 19:14:50,357 (s2t:420) INFO: Vocabulary size: 50002 +[gpua007:0/64] 2024-02-02 19:15:03,424 (abs_task:1270) INFO: pytorch.version=1.13.1, cuda.available=True, cudnn.version=8500, cudnn.benchmark=False, cudnn.deterministic=True +[gpua007:0/64] 2024-02-02 19:15:03,435 (abs_task:1271) INFO: Model structure: +ESPnetS2TCTCModel( + (frontend): DefaultFrontend( + (stft): Stft(n_fft=512, win_length=400, hop_length=160, center=True, normalized=False, onesided=True) + (frontend): Frontend() + (logmel): LogMel(sr=16000, n_fft=512, n_mels=80, fmin=0, fmax=8000.0, htk=False) + ) + (specaug): SpecAug( + (freq_mask): MaskAlongAxis(mask_width_range=[0, 27], num_mask=2, axis=freq) + (time_mask): MaskAlongAxisVariableMaxWidth(mask_width_ratio_range=[0.0, 0.05], num_mask=10, axis=time) + ) + (normalize): GlobalMVN(stats_file=exp/s2t_stats_raw_bpe50000/train/feats_stats.npz, norm_means=True, norm_vars=True) + (encoder): EBranchformerCTCEncoder( + (embed): Conv2dSubsampling8( + (conv): Sequential( + (0): Conv2d(1, 1024, kernel_size=(3, 3), stride=(2, 2)) + (1): ReLU() + (2): Conv2d(1024, 1024, kernel_size=(3, 3), stride=(2, 2)) + (3): ReLU() + (4): Conv2d(1024, 1024, kernel_size=(3, 3), stride=(2, 2)) + (5): ReLU() + ) + (out): Linear(in_features=9216, out_features=1024, bias=True) + (pos_enc): PositionalEncoding( + (dropout): Dropout(p=0.1, inplace=False) + ) + ) + (encoders): MultiSequential( + (0): EBranchformerEncoderLayer( + (attn): MultiHeadedAttention( + (linear_q): Linear(in_features=1024, out_features=1024, bias=True) + (linear_k): Linear(in_features=1024, out_features=1024, bias=True) + (linear_v): Linear(in_features=1024, out_features=1024, bias=True) + (linear_out): Linear(in_features=1024, out_features=1024, bias=True) + (dropout): Identity() + ) + (cgmlp): ConvolutionalGatingMLP( + (channel_proj1): Sequential( + (0): Linear(in_features=1024, out_features=4096, bias=True) + (1): GELU(approximate='none') + ) + (csgu): ConvolutionalSpatialGatingUnit( + (norm): LayerNorm((2048,), eps=1e-12, elementwise_affine=True) + (conv): Conv1d(2048, 2048, kernel_size=(31,), stride=(1,), padding=(15,), groups=2048) + (act): Identity() + (dropout): Dropout(p=0.1, inplace=False) + ) + (channel_proj2): Linear(in_features=2048, out_features=1024, bias=True) + ) + (feed_forward): PositionwiseFeedForward( + (w_1): Linear(in_features=1024, out_features=4096, bias=True) + (w_2): Linear(in_features=4096, out_features=1024, bias=True) + (dropout): Dropout(p=0.1, inplace=False) + (activation): Swish() + ) + (feed_forward_macaron): PositionwiseFeedForward( + (w_1): Linear(in_features=1024, out_features=4096, bias=True) + (w_2): Linear(in_features=4096, out_features=1024, bias=True) + (dropout): Dropout(p=0.1, inplace=False) + (activation): Swish() + ) + (norm_ff): LayerNorm((1024,), eps=1e-12, elementwise_affine=True) + (norm_ff_macaron): LayerNorm((1024,), eps=1e-12, elementwise_affine=True) + (norm_mha): LayerNorm((1024,), eps=1e-12, elementwise_affine=True) + (norm_mlp): LayerNorm((1024,), eps=1e-12, elementwise_affine=True) + (norm_final): LayerNorm((1024,), eps=1e-12, elementwise_affine=True) + (dropout): Dropout(p=0.1, inplace=False) + (depthwise_conv_fusion): Conv1d(2048, 2048, kernel_size=(31,), stride=(1,), padding=(15,), groups=2048) + (merge_proj): Linear(in_features=2048, out_features=1024, bias=True) + ) + (1): EBranchformerEncoderLayer( + (attn): MultiHeadedAttention( + (linear_q): Linear(in_features=1024, out_features=1024, bias=True) + (linear_k): Linear(in_features=1024, out_features=1024, bias=True) + (linear_v): Linear(in_features=1024, out_features=1024, bias=True) + (linear_out): Linear(in_features=1024, out_features=1024, bias=True) + (dropout): Identity() + ) + (cgmlp): ConvolutionalGatingMLP( + (channel_proj1): Sequential( + (0): Linear(in_features=1024, out_features=4096, bias=True) + (1): GELU(approximate='none') + ) + (csgu): ConvolutionalSpatialGatingUnit( + (norm): LayerNorm((2048,), eps=1e-12, elementwise_affine=True) + (conv): Conv1d(2048, 2048, kernel_size=(31,), stride=(1,), padding=(15,), groups=2048) + (act): Identity() + (dropout): Dropout(p=0.1, inplace=False) + ) + (channel_proj2): Linear(in_features=2048, out_features=1024, bias=True) + ) + (feed_forward): PositionwiseFeedForward( + (w_1): Linear(in_features=1024, out_features=4096, bias=True) + (w_2): Linear(in_features=4096, out_features=1024, bias=True) + (dropout): Dropout(p=0.1, inplace=False) + (activation): Swish() + ) + (feed_forward_macaron): PositionwiseFeedForward( + (w_1): Linear(in_features=1024, out_features=4096, bias=True) + (w_2): Linear(in_features=4096, out_features=1024, bias=True) + (dropout): Dropout(p=0.1, inplace=False) + (activation): Swish() + ) + (norm_ff): LayerNorm((1024,), eps=1e-12, elementwise_affine=True) + (norm_ff_macaron): LayerNorm((1024,), eps=1e-12, elementwise_affine=True) + (norm_mha): LayerNorm((1024,), eps=1e-12, elementwise_affine=True) + (norm_mlp): LayerNorm((1024,), eps=1e-12, elementwise_affine=True) + (norm_final): LayerNorm((1024,), eps=1e-12, elementwise_affine=True) + (dropout): Dropout(p=0.1, inplace=False) + (depthwise_conv_fusion): Conv1d(2048, 2048, kernel_size=(31,), stride=(1,), padding=(15,), groups=2048) + (merge_proj): Linear(in_features=2048, out_features=1024, bias=True) + ) + (2): EBranchformerEncoderLayer( + (attn): MultiHeadedAttention( + (linear_q): Linear(in_features=1024, out_features=1024, bias=True) + (linear_k): Linear(in_features=1024, out_features=1024, bias=True) + (linear_v): Linear(in_features=1024, out_features=1024, bias=True) + (linear_out): Linear(in_features=1024, out_features=1024, bias=True) + (dropout): Identity() + ) + (cgmlp): ConvolutionalGatingMLP( + (channel_proj1): Sequential( + (0): Linear(in_features=1024, out_features=4096, bias=True) + (1): GELU(approximate='none') + ) + (csgu): ConvolutionalSpatialGatingUnit( + (norm): LayerNorm((2048,), eps=1e-12, elementwise_affine=True) + (conv): Conv1d(2048, 2048, kernel_size=(31,), stride=(1,), padding=(15,), groups=2048) + (act): Identity() + (dropout): Dropout(p=0.1, inplace=False) + ) + (channel_proj2): Linear(in_features=2048, out_features=1024, bias=True) + ) + (feed_forward): PositionwiseFeedForward( + (w_1): Linear(in_features=1024, out_features=4096, bias=True) + (w_2): Linear(in_features=4096, out_features=1024, bias=True) + (dropout): Dropout(p=0.1, inplace=False) + (activation): Swish() + ) + (feed_forward_macaron): PositionwiseFeedForward( + (w_1): Linear(in_features=1024, out_features=4096, bias=True) + (w_2): Linear(in_features=4096, out_features=1024, bias=True) + (dropout): Dropout(p=0.1, inplace=False) + (activation): Swish() + ) + (norm_ff): LayerNorm((1024,), eps=1e-12, elementwise_affine=True) + (norm_ff_macaron): LayerNorm((1024,), eps=1e-12, elementwise_affine=True) + (norm_mha): LayerNorm((1024,), eps=1e-12, elementwise_affine=True) + (norm_mlp): LayerNorm((1024,), eps=1e-12, elementwise_affine=True) + (norm_final): LayerNorm((1024,), eps=1e-12, elementwise_affine=True) + (cross_attn): MultiHeadedAttention( + (linear_q): Linear(in_features=1024, out_features=1024, bias=True) + (linear_k): Linear(in_features=1024, out_features=1024, bias=True) + (linear_v): Linear(in_features=1024, out_features=1024, bias=True) + (linear_out): Linear(in_features=1024, out_features=1024, bias=True) + (dropout): Identity() + ) + (norm_cross_attn): LayerNorm((1024,), eps=1e-12, elementwise_affine=True) + (dropout): Dropout(p=0.1, inplace=False) + (depthwise_conv_fusion): Conv1d(2048, 2048, kernel_size=(31,), stride=(1,), padding=(15,), groups=2048) + (merge_proj): Linear(in_features=2048, out_features=1024, bias=True) + ) + (3): EBranchformerEncoderLayer( + (attn): MultiHeadedAttention( + (linear_q): Linear(in_features=1024, out_features=1024, bias=True) + (linear_k): Linear(in_features=1024, out_features=1024, bias=True) + (linear_v): Linear(in_features=1024, out_features=1024, bias=True) + (linear_out): Linear(in_features=1024, out_features=1024, bias=True) + (dropout): Identity() + ) + (cgmlp): ConvolutionalGatingMLP( + (channel_proj1): Sequential( + (0): Linear(in_features=1024, out_features=4096, bias=True) + (1): GELU(approximate='none') + ) + (csgu): ConvolutionalSpatialGatingUnit( + (norm): LayerNorm((2048,), eps=1e-12, elementwise_affine=True) + (conv): Conv1d(2048, 2048, kernel_size=(31,), stride=(1,), padding=(15,), groups=2048) + (act): Identity() + (dropout): Dropout(p=0.1, inplace=False) + ) + (channel_proj2): Linear(in_features=2048, out_features=1024, bias=True) + ) + (feed_forward): PositionwiseFeedForward( + (w_1): Linear(in_features=1024, out_features=4096, bias=True) + (w_2): Linear(in_features=4096, out_features=1024, bias=True) + (dropout): Dropout(p=0.1, inplace=False) + (activation): Swish() + ) + (feed_forward_macaron): PositionwiseFeedForward( + (w_1): Linear(in_features=1024, out_features=4096, bias=True) + (w_2): Linear(in_features=4096, out_features=1024, bias=True) + (dropout): Dropout(p=0.1, inplace=False) + (activation): Swish() + ) + (norm_ff): LayerNorm((1024,), eps=1e-12, elementwise_affine=True) + (norm_ff_macaron): LayerNorm((1024,), eps=1e-12, elementwise_affine=True) + (norm_mha): LayerNorm((1024,), eps=1e-12, elementwise_affine=True) + (norm_mlp): LayerNorm((1024,), eps=1e-12, elementwise_affine=True) + (norm_final): LayerNorm((1024,), eps=1e-12, elementwise_affine=True) + (dropout): Dropout(p=0.1, inplace=False) + (depthwise_conv_fusion): Conv1d(2048, 2048, kernel_size=(31,), stride=(1,), padding=(15,), groups=2048) + (merge_proj): Linear(in_features=2048, out_features=1024, bias=True) + ) + (4): EBranchformerEncoderLayer( + (attn): MultiHeadedAttention( + (linear_q): Linear(in_features=1024, out_features=1024, bias=True) + (linear_k): Linear(in_features=1024, out_features=1024, bias=True) + (linear_v): Linear(in_features=1024, out_features=1024, bias=True) + (linear_out): Linear(in_features=1024, out_features=1024, bias=True) + (dropout): Identity() + ) + (cgmlp): ConvolutionalGatingMLP( + (channel_proj1): Sequential( + (0): Linear(in_features=1024, out_features=4096, bias=True) + (1): GELU(approximate='none') + ) + (csgu): ConvolutionalSpatialGatingUnit( + (norm): LayerNorm((2048,), eps=1e-12, elementwise_affine=True) + (conv): Conv1d(2048, 2048, kernel_size=(31,), stride=(1,), padding=(15,), groups=2048) + (act): Identity() + (dropout): Dropout(p=0.1, inplace=False) + ) + (channel_proj2): Linear(in_features=2048, out_features=1024, bias=True) + ) + (feed_forward): PositionwiseFeedForward( + (w_1): Linear(in_features=1024, out_features=4096, bias=True) + (w_2): Linear(in_features=4096, out_features=1024, bias=True) + (dropout): Dropout(p=0.1, inplace=False) + (activation): Swish() + ) + (feed_forward_macaron): PositionwiseFeedForward( + (w_1): Linear(in_features=1024, out_features=4096, bias=True) + (w_2): Linear(in_features=4096, out_features=1024, bias=True) + (dropout): Dropout(p=0.1, inplace=False) + (activation): Swish() + ) + (norm_ff): LayerNorm((1024,), eps=1e-12, elementwise_affine=True) + (norm_ff_macaron): LayerNorm((1024,), eps=1e-12, elementwise_affine=True) + (norm_mha): LayerNorm((1024,), eps=1e-12, elementwise_affine=True) + (norm_mlp): LayerNorm((1024,), eps=1e-12, elementwise_affine=True) + (norm_final): LayerNorm((1024,), eps=1e-12, elementwise_affine=True) + (dropout): Dropout(p=0.1, inplace=False) + (depthwise_conv_fusion): Conv1d(2048, 2048, kernel_size=(31,), stride=(1,), padding=(15,), groups=2048) + (merge_proj): Linear(in_features=2048, out_features=1024, bias=True) + ) + (5): EBranchformerEncoderLayer( + (attn): MultiHeadedAttention( + (linear_q): Linear(in_features=1024, out_features=1024, bias=True) + (linear_k): Linear(in_features=1024, out_features=1024, bias=True) + (linear_v): Linear(in_features=1024, out_features=1024, bias=True) + (linear_out): Linear(in_features=1024, out_features=1024, bias=True) + (dropout): Identity() + ) + (cgmlp): ConvolutionalGatingMLP( + (channel_proj1): Sequential( + (0): Linear(in_features=1024, out_features=4096, bias=True) + (1): GELU(approximate='none') + ) + (csgu): ConvolutionalSpatialGatingUnit( + (norm): LayerNorm((2048,), eps=1e-12, elementwise_affine=True) + (conv): Conv1d(2048, 2048, kernel_size=(31,), stride=(1,), padding=(15,), groups=2048) + (act): Identity() + (dropout): Dropout(p=0.1, inplace=False) + ) + (channel_proj2): Linear(in_features=2048, out_features=1024, bias=True) + ) + (feed_forward): PositionwiseFeedForward( + (w_1): Linear(in_features=1024, out_features=4096, bias=True) + (w_2): Linear(in_features=4096, out_features=1024, bias=True) + (dropout): Dropout(p=0.1, inplace=False) + (activation): Swish() + ) + (feed_forward_macaron): PositionwiseFeedForward( + (w_1): Linear(in_features=1024, out_features=4096, bias=True) + (w_2): Linear(in_features=4096, out_features=1024, bias=True) + (dropout): Dropout(p=0.1, inplace=False) + (activation): Swish() + ) + (norm_ff): LayerNorm((1024,), eps=1e-12, elementwise_affine=True) + (norm_ff_macaron): LayerNorm((1024,), eps=1e-12, elementwise_affine=True) + (norm_mha): LayerNorm((1024,), eps=1e-12, elementwise_affine=True) + (norm_mlp): LayerNorm((1024,), eps=1e-12, elementwise_affine=True) + (norm_final): LayerNorm((1024,), eps=1e-12, elementwise_affine=True) + (cross_attn): MultiHeadedAttention( + (linear_q): Linear(in_features=1024, out_features=1024, bias=True) + (linear_k): Linear(in_features=1024, out_features=1024, bias=True) + (linear_v): Linear(in_features=1024, out_features=1024, bias=True) + (linear_out): Linear(in_features=1024, out_features=1024, bias=True) + (dropout): Identity() + ) + (norm_cross_attn): LayerNorm((1024,), eps=1e-12, elementwise_affine=True) + (dropout): Dropout(p=0.1, inplace=False) + (depthwise_conv_fusion): Conv1d(2048, 2048, kernel_size=(31,), stride=(1,), padding=(15,), groups=2048) + (merge_proj): Linear(in_features=2048, out_features=1024, bias=True) + ) + (6): EBranchformerEncoderLayer( + (attn): MultiHeadedAttention( + (linear_q): Linear(in_features=1024, out_features=1024, bias=True) + (linear_k): Linear(in_features=1024, out_features=1024, bias=True) + (linear_v): Linear(in_features=1024, out_features=1024, bias=True) + (linear_out): Linear(in_features=1024, out_features=1024, bias=True) + (dropout): Identity() + ) + (cgmlp): ConvolutionalGatingMLP( + (channel_proj1): Sequential( + (0): Linear(in_features=1024, out_features=4096, bias=True) + (1): GELU(approximate='none') + ) + (csgu): ConvolutionalSpatialGatingUnit( + (norm): LayerNorm((2048,), eps=1e-12, elementwise_affine=True) + (conv): Conv1d(2048, 2048, kernel_size=(31,), stride=(1,), padding=(15,), groups=2048) + (act): Identity() + (dropout): Dropout(p=0.1, inplace=False) + ) + (channel_proj2): Linear(in_features=2048, out_features=1024, bias=True) + ) + (feed_forward): PositionwiseFeedForward( + (w_1): Linear(in_features=1024, out_features=4096, bias=True) + (w_2): Linear(in_features=4096, out_features=1024, bias=True) + (dropout): Dropout(p=0.1, inplace=False) + (activation): Swish() + ) + (feed_forward_macaron): PositionwiseFeedForward( + (w_1): Linear(in_features=1024, out_features=4096, bias=True) + (w_2): Linear(in_features=4096, out_features=1024, bias=True) + (dropout): Dropout(p=0.1, inplace=False) + (activation): Swish() + ) + (norm_ff): LayerNorm((1024,), eps=1e-12, elementwise_affine=True) + (norm_ff_macaron): LayerNorm((1024,), eps=1e-12, elementwise_affine=True) + (norm_mha): LayerNorm((1024,), eps=1e-12, elementwise_affine=True) + (norm_mlp): LayerNorm((1024,), eps=1e-12, elementwise_affine=True) + (norm_final): LayerNorm((1024,), eps=1e-12, elementwise_affine=True) + (dropout): Dropout(p=0.1, inplace=False) + (depthwise_conv_fusion): Conv1d(2048, 2048, kernel_size=(31,), stride=(1,), padding=(15,), groups=2048) + (merge_proj): Linear(in_features=2048, out_features=1024, bias=True) + ) + (7): EBranchformerEncoderLayer( + (attn): MultiHeadedAttention( + (linear_q): Linear(in_features=1024, out_features=1024, bias=True) + (linear_k): Linear(in_features=1024, out_features=1024, bias=True) + (linear_v): Linear(in_features=1024, out_features=1024, bias=True) + (linear_out): Linear(in_features=1024, out_features=1024, bias=True) + (dropout): Identity() + ) + (cgmlp): ConvolutionalGatingMLP( + (channel_proj1): Sequential( + (0): Linear(in_features=1024, out_features=4096, bias=True) + (1): GELU(approximate='none') + ) + (csgu): ConvolutionalSpatialGatingUnit( + (norm): LayerNorm((2048,), eps=1e-12, elementwise_affine=True) + (conv): Conv1d(2048, 2048, kernel_size=(31,), stride=(1,), padding=(15,), groups=2048) + (act): Identity() + (dropout): Dropout(p=0.1, inplace=False) + ) + (channel_proj2): Linear(in_features=2048, out_features=1024, bias=True) + ) + (feed_forward): PositionwiseFeedForward( + (w_1): Linear(in_features=1024, out_features=4096, bias=True) + (w_2): Linear(in_features=4096, out_features=1024, bias=True) + (dropout): Dropout(p=0.1, inplace=False) + (activation): Swish() + ) + (feed_forward_macaron): PositionwiseFeedForward( + (w_1): Linear(in_features=1024, out_features=4096, bias=True) + (w_2): Linear(in_features=4096, out_features=1024, bias=True) + (dropout): Dropout(p=0.1, inplace=False) + (activation): Swish() + ) + (norm_ff): LayerNorm((1024,), eps=1e-12, elementwise_affine=True) + (norm_ff_macaron): LayerNorm((1024,), eps=1e-12, elementwise_affine=True) + (norm_mha): LayerNorm((1024,), eps=1e-12, elementwise_affine=True) + (norm_mlp): LayerNorm((1024,), eps=1e-12, elementwise_affine=True) + (norm_final): LayerNorm((1024,), eps=1e-12, elementwise_affine=True) + (dropout): Dropout(p=0.1, inplace=False) + (depthwise_conv_fusion): Conv1d(2048, 2048, kernel_size=(31,), stride=(1,), padding=(15,), groups=2048) + (merge_proj): Linear(in_features=2048, out_features=1024, bias=True) + ) + (8): EBranchformerEncoderLayer( + (attn): MultiHeadedAttention( + (linear_q): Linear(in_features=1024, out_features=1024, bias=True) + (linear_k): Linear(in_features=1024, out_features=1024, bias=True) + (linear_v): Linear(in_features=1024, out_features=1024, bias=True) + (linear_out): Linear(in_features=1024, out_features=1024, bias=True) + (dropout): Identity() + ) + (cgmlp): ConvolutionalGatingMLP( + (channel_proj1): Sequential( + (0): Linear(in_features=1024, out_features=4096, bias=True) + (1): GELU(approximate='none') + ) + (csgu): ConvolutionalSpatialGatingUnit( + (norm): LayerNorm((2048,), eps=1e-12, elementwise_affine=True) + (conv): Conv1d(2048, 2048, kernel_size=(31,), stride=(1,), padding=(15,), groups=2048) + (act): Identity() + (dropout): Dropout(p=0.1, inplace=False) + ) + (channel_proj2): Linear(in_features=2048, out_features=1024, bias=True) + ) + (feed_forward): PositionwiseFeedForward( + (w_1): Linear(in_features=1024, out_features=4096, bias=True) + (w_2): Linear(in_features=4096, out_features=1024, bias=True) + (dropout): Dropout(p=0.1, inplace=False) + (activation): Swish() + ) + (feed_forward_macaron): PositionwiseFeedForward( + (w_1): Linear(in_features=1024, out_features=4096, bias=True) + (w_2): Linear(in_features=4096, out_features=1024, bias=True) + (dropout): Dropout(p=0.1, inplace=False) + (activation): Swish() + ) + (norm_ff): LayerNorm((1024,), eps=1e-12, elementwise_affine=True) + (norm_ff_macaron): LayerNorm((1024,), eps=1e-12, elementwise_affine=True) + (norm_mha): LayerNorm((1024,), eps=1e-12, elementwise_affine=True) + (norm_mlp): LayerNorm((1024,), eps=1e-12, elementwise_affine=True) + (norm_final): LayerNorm((1024,), eps=1e-12, elementwise_affine=True) + (cross_attn): MultiHeadedAttention( + (linear_q): Linear(in_features=1024, out_features=1024, bias=True) + (linear_k): Linear(in_features=1024, out_features=1024, bias=True) + (linear_v): Linear(in_features=1024, out_features=1024, bias=True) + (linear_out): Linear(in_features=1024, out_features=1024, bias=True) + (dropout): Identity() + ) + (norm_cross_attn): LayerNorm((1024,), eps=1e-12, elementwise_affine=True) + (dropout): Dropout(p=0.1, inplace=False) + (depthwise_conv_fusion): Conv1d(2048, 2048, kernel_size=(31,), stride=(1,), padding=(15,), groups=2048) + (merge_proj): Linear(in_features=2048, out_features=1024, bias=True) + ) + (9): EBranchformerEncoderLayer( + (attn): MultiHeadedAttention( + (linear_q): Linear(in_features=1024, out_features=1024, bias=True) + (linear_k): Linear(in_features=1024, out_features=1024, bias=True) + (linear_v): Linear(in_features=1024, out_features=1024, bias=True) + (linear_out): Linear(in_features=1024, out_features=1024, bias=True) + (dropout): Identity() + ) + (cgmlp): ConvolutionalGatingMLP( + (channel_proj1): Sequential( + (0): Linear(in_features=1024, out_features=4096, bias=True) + (1): GELU(approximate='none') + ) + (csgu): ConvolutionalSpatialGatingUnit( + (norm): LayerNorm((2048,), eps=1e-12, elementwise_affine=True) + (conv): Conv1d(2048, 2048, kernel_size=(31,), stride=(1,), padding=(15,), groups=2048) + (act): Identity() + (dropout): Dropout(p=0.1, inplace=False) + ) + (channel_proj2): Linear(in_features=2048, out_features=1024, bias=True) + ) + (feed_forward): PositionwiseFeedForward( + (w_1): Linear(in_features=1024, out_features=4096, bias=True) + (w_2): Linear(in_features=4096, out_features=1024, bias=True) + (dropout): Dropout(p=0.1, inplace=False) + (activation): Swish() + ) + (feed_forward_macaron): PositionwiseFeedForward( + (w_1): Linear(in_features=1024, out_features=4096, bias=True) + (w_2): Linear(in_features=4096, out_features=1024, bias=True) + (dropout): Dropout(p=0.1, inplace=False) + (activation): Swish() + ) + (norm_ff): LayerNorm((1024,), eps=1e-12, elementwise_affine=True) + (norm_ff_macaron): LayerNorm((1024,), eps=1e-12, elementwise_affine=True) + (norm_mha): LayerNorm((1024,), eps=1e-12, elementwise_affine=True) + (norm_mlp): LayerNorm((1024,), eps=1e-12, elementwise_affine=True) + (norm_final): LayerNorm((1024,), eps=1e-12, elementwise_affine=True) + (dropout): Dropout(p=0.1, inplace=False) + (depthwise_conv_fusion): Conv1d(2048, 2048, kernel_size=(31,), stride=(1,), padding=(15,), groups=2048) + (merge_proj): Linear(in_features=2048, out_features=1024, bias=True) + ) + (10): EBranchformerEncoderLayer( + (attn): MultiHeadedAttention( + (linear_q): Linear(in_features=1024, out_features=1024, bias=True) + (linear_k): Linear(in_features=1024, out_features=1024, bias=True) + (linear_v): Linear(in_features=1024, out_features=1024, bias=True) + (linear_out): Linear(in_features=1024, out_features=1024, bias=True) + (dropout): Identity() + ) + (cgmlp): ConvolutionalGatingMLP( + (channel_proj1): Sequential( + (0): Linear(in_features=1024, out_features=4096, bias=True) + (1): GELU(approximate='none') + ) + (csgu): ConvolutionalSpatialGatingUnit( + (norm): LayerNorm((2048,), eps=1e-12, elementwise_affine=True) + (conv): Conv1d(2048, 2048, kernel_size=(31,), stride=(1,), padding=(15,), groups=2048) + (act): Identity() + (dropout): Dropout(p=0.1, inplace=False) + ) + (channel_proj2): Linear(in_features=2048, out_features=1024, bias=True) + ) + (feed_forward): PositionwiseFeedForward( + (w_1): Linear(in_features=1024, out_features=4096, bias=True) + (w_2): Linear(in_features=4096, out_features=1024, bias=True) + (dropout): Dropout(p=0.1, inplace=False) + (activation): Swish() + ) + (feed_forward_macaron): PositionwiseFeedForward( + (w_1): Linear(in_features=1024, out_features=4096, bias=True) + (w_2): Linear(in_features=4096, out_features=1024, bias=True) + (dropout): Dropout(p=0.1, inplace=False) + (activation): Swish() + ) + (norm_ff): LayerNorm((1024,), eps=1e-12, elementwise_affine=True) + (norm_ff_macaron): LayerNorm((1024,), eps=1e-12, elementwise_affine=True) + (norm_mha): LayerNorm((1024,), eps=1e-12, elementwise_affine=True) + (norm_mlp): LayerNorm((1024,), eps=1e-12, elementwise_affine=True) + (norm_final): LayerNorm((1024,), eps=1e-12, elementwise_affine=True) + (dropout): Dropout(p=0.1, inplace=False) + (depthwise_conv_fusion): Conv1d(2048, 2048, kernel_size=(31,), stride=(1,), padding=(15,), groups=2048) + (merge_proj): Linear(in_features=2048, out_features=1024, bias=True) + ) + (11): EBranchformerEncoderLayer( + (attn): MultiHeadedAttention( + (linear_q): Linear(in_features=1024, out_features=1024, bias=True) + (linear_k): Linear(in_features=1024, out_features=1024, bias=True) + (linear_v): Linear(in_features=1024, out_features=1024, bias=True) + (linear_out): Linear(in_features=1024, out_features=1024, bias=True) + (dropout): Identity() + ) + (cgmlp): ConvolutionalGatingMLP( + (channel_proj1): Sequential( + (0): Linear(in_features=1024, out_features=4096, bias=True) + (1): GELU(approximate='none') + ) + (csgu): ConvolutionalSpatialGatingUnit( + (norm): LayerNorm((2048,), eps=1e-12, elementwise_affine=True) + (conv): Conv1d(2048, 2048, kernel_size=(31,), stride=(1,), padding=(15,), groups=2048) + (act): Identity() + (dropout): Dropout(p=0.1, inplace=False) + ) + (channel_proj2): Linear(in_features=2048, out_features=1024, bias=True) + ) + (feed_forward): PositionwiseFeedForward( + (w_1): Linear(in_features=1024, out_features=4096, bias=True) + (w_2): Linear(in_features=4096, out_features=1024, bias=True) + (dropout): Dropout(p=0.1, inplace=False) + (activation): Swish() + ) + (feed_forward_macaron): PositionwiseFeedForward( + (w_1): Linear(in_features=1024, out_features=4096, bias=True) + (w_2): Linear(in_features=4096, out_features=1024, bias=True) + (dropout): Dropout(p=0.1, inplace=False) + (activation): Swish() + ) + (norm_ff): LayerNorm((1024,), eps=1e-12, elementwise_affine=True) + (norm_ff_macaron): LayerNorm((1024,), eps=1e-12, elementwise_affine=True) + (norm_mha): LayerNorm((1024,), eps=1e-12, elementwise_affine=True) + (norm_mlp): LayerNorm((1024,), eps=1e-12, elementwise_affine=True) + (norm_final): LayerNorm((1024,), eps=1e-12, elementwise_affine=True) + (cross_attn): MultiHeadedAttention( + (linear_q): Linear(in_features=1024, out_features=1024, bias=True) + (linear_k): Linear(in_features=1024, out_features=1024, bias=True) + (linear_v): Linear(in_features=1024, out_features=1024, bias=True) + (linear_out): Linear(in_features=1024, out_features=1024, bias=True) + (dropout): Identity() + ) + (norm_cross_attn): LayerNorm((1024,), eps=1e-12, elementwise_affine=True) + (dropout): Dropout(p=0.1, inplace=False) + (depthwise_conv_fusion): Conv1d(2048, 2048, kernel_size=(31,), stride=(1,), padding=(15,), groups=2048) + (merge_proj): Linear(in_features=2048, out_features=1024, bias=True) + ) + (12): EBranchformerEncoderLayer( + (attn): MultiHeadedAttention( + (linear_q): Linear(in_features=1024, out_features=1024, bias=True) + (linear_k): Linear(in_features=1024, out_features=1024, bias=True) + (linear_v): Linear(in_features=1024, out_features=1024, bias=True) + (linear_out): Linear(in_features=1024, out_features=1024, bias=True) + (dropout): Identity() + ) + (cgmlp): ConvolutionalGatingMLP( + (channel_proj1): Sequential( + (0): Linear(in_features=1024, out_features=4096, bias=True) + (1): GELU(approximate='none') + ) + (csgu): ConvolutionalSpatialGatingUnit( + (norm): LayerNorm((2048,), eps=1e-12, elementwise_affine=True) + (conv): Conv1d(2048, 2048, kernel_size=(31,), stride=(1,), padding=(15,), groups=2048) + (act): Identity() + (dropout): Dropout(p=0.1, inplace=False) + ) + (channel_proj2): Linear(in_features=2048, out_features=1024, bias=True) + ) + (feed_forward): PositionwiseFeedForward( + (w_1): Linear(in_features=1024, out_features=4096, bias=True) + (w_2): Linear(in_features=4096, out_features=1024, bias=True) + (dropout): Dropout(p=0.1, inplace=False) + (activation): Swish() + ) + (feed_forward_macaron): PositionwiseFeedForward( + (w_1): Linear(in_features=1024, out_features=4096, bias=True) + (w_2): Linear(in_features=4096, out_features=1024, bias=True) + (dropout): Dropout(p=0.1, inplace=False) + (activation): Swish() + ) + (norm_ff): LayerNorm((1024,), eps=1e-12, elementwise_affine=True) + (norm_ff_macaron): LayerNorm((1024,), eps=1e-12, elementwise_affine=True) + (norm_mha): LayerNorm((1024,), eps=1e-12, elementwise_affine=True) + (norm_mlp): LayerNorm((1024,), eps=1e-12, elementwise_affine=True) + (norm_final): LayerNorm((1024,), eps=1e-12, elementwise_affine=True) + (dropout): Dropout(p=0.1, inplace=False) + (depthwise_conv_fusion): Conv1d(2048, 2048, kernel_size=(31,), stride=(1,), padding=(15,), groups=2048) + (merge_proj): Linear(in_features=2048, out_features=1024, bias=True) + ) + (13): EBranchformerEncoderLayer( + (attn): MultiHeadedAttention( + (linear_q): Linear(in_features=1024, out_features=1024, bias=True) + (linear_k): Linear(in_features=1024, out_features=1024, bias=True) + (linear_v): Linear(in_features=1024, out_features=1024, bias=True) + (linear_out): Linear(in_features=1024, out_features=1024, bias=True) + (dropout): Identity() + ) + (cgmlp): ConvolutionalGatingMLP( + (channel_proj1): Sequential( + (0): Linear(in_features=1024, out_features=4096, bias=True) + (1): GELU(approximate='none') + ) + (csgu): ConvolutionalSpatialGatingUnit( + (norm): LayerNorm((2048,), eps=1e-12, elementwise_affine=True) + (conv): Conv1d(2048, 2048, kernel_size=(31,), stride=(1,), padding=(15,), groups=2048) + (act): Identity() + (dropout): Dropout(p=0.1, inplace=False) + ) + (channel_proj2): Linear(in_features=2048, out_features=1024, bias=True) + ) + (feed_forward): PositionwiseFeedForward( + (w_1): Linear(in_features=1024, out_features=4096, bias=True) + (w_2): Linear(in_features=4096, out_features=1024, bias=True) + (dropout): Dropout(p=0.1, inplace=False) + (activation): Swish() + ) + (feed_forward_macaron): PositionwiseFeedForward( + (w_1): Linear(in_features=1024, out_features=4096, bias=True) + (w_2): Linear(in_features=4096, out_features=1024, bias=True) + (dropout): Dropout(p=0.1, inplace=False) + (activation): Swish() + ) + (norm_ff): LayerNorm((1024,), eps=1e-12, elementwise_affine=True) + (norm_ff_macaron): LayerNorm((1024,), eps=1e-12, elementwise_affine=True) + (norm_mha): LayerNorm((1024,), eps=1e-12, elementwise_affine=True) + (norm_mlp): LayerNorm((1024,), eps=1e-12, elementwise_affine=True) + (norm_final): LayerNorm((1024,), eps=1e-12, elementwise_affine=True) + (dropout): Dropout(p=0.1, inplace=False) + (depthwise_conv_fusion): Conv1d(2048, 2048, kernel_size=(31,), stride=(1,), padding=(15,), groups=2048) + (merge_proj): Linear(in_features=2048, out_features=1024, bias=True) + ) + (14): EBranchformerEncoderLayer( + (attn): MultiHeadedAttention( + (linear_q): Linear(in_features=1024, out_features=1024, bias=True) + (linear_k): Linear(in_features=1024, out_features=1024, bias=True) + (linear_v): Linear(in_features=1024, out_features=1024, bias=True) + (linear_out): Linear(in_features=1024, out_features=1024, bias=True) + (dropout): Identity() + ) + (cgmlp): ConvolutionalGatingMLP( + (channel_proj1): Sequential( + (0): Linear(in_features=1024, out_features=4096, bias=True) + (1): GELU(approximate='none') + ) + (csgu): ConvolutionalSpatialGatingUnit( + (norm): LayerNorm((2048,), eps=1e-12, elementwise_affine=True) + (conv): Conv1d(2048, 2048, kernel_size=(31,), stride=(1,), padding=(15,), groups=2048) + (act): Identity() + (dropout): Dropout(p=0.1, inplace=False) + ) + (channel_proj2): Linear(in_features=2048, out_features=1024, bias=True) + ) + (feed_forward): PositionwiseFeedForward( + (w_1): Linear(in_features=1024, out_features=4096, bias=True) + (w_2): Linear(in_features=4096, out_features=1024, bias=True) + (dropout): Dropout(p=0.1, inplace=False) + (activation): Swish() + ) + (feed_forward_macaron): PositionwiseFeedForward( + (w_1): Linear(in_features=1024, out_features=4096, bias=True) + (w_2): Linear(in_features=4096, out_features=1024, bias=True) + (dropout): Dropout(p=0.1, inplace=False) + (activation): Swish() + ) + (norm_ff): LayerNorm((1024,), eps=1e-12, elementwise_affine=True) + (norm_ff_macaron): LayerNorm((1024,), eps=1e-12, elementwise_affine=True) + (norm_mha): LayerNorm((1024,), eps=1e-12, elementwise_affine=True) + (norm_mlp): LayerNorm((1024,), eps=1e-12, elementwise_affine=True) + (norm_final): LayerNorm((1024,), eps=1e-12, elementwise_affine=True) + (cross_attn): MultiHeadedAttention( + (linear_q): Linear(in_features=1024, out_features=1024, bias=True) + (linear_k): Linear(in_features=1024, out_features=1024, bias=True) + (linear_v): Linear(in_features=1024, out_features=1024, bias=True) + (linear_out): Linear(in_features=1024, out_features=1024, bias=True) + (dropout): Identity() + ) + (norm_cross_attn): LayerNorm((1024,), eps=1e-12, elementwise_affine=True) + (dropout): Dropout(p=0.1, inplace=False) + (depthwise_conv_fusion): Conv1d(2048, 2048, kernel_size=(31,), stride=(1,), padding=(15,), groups=2048) + (merge_proj): Linear(in_features=2048, out_features=1024, bias=True) + ) + (15): EBranchformerEncoderLayer( + (attn): MultiHeadedAttention( + (linear_q): Linear(in_features=1024, out_features=1024, bias=True) + (linear_k): Linear(in_features=1024, out_features=1024, bias=True) + (linear_v): Linear(in_features=1024, out_features=1024, bias=True) + (linear_out): Linear(in_features=1024, out_features=1024, bias=True) + (dropout): Identity() + ) + (cgmlp): ConvolutionalGatingMLP( + (channel_proj1): Sequential( + (0): Linear(in_features=1024, out_features=4096, bias=True) + (1): GELU(approximate='none') + ) + (csgu): ConvolutionalSpatialGatingUnit( + (norm): LayerNorm((2048,), eps=1e-12, elementwise_affine=True) + (conv): Conv1d(2048, 2048, kernel_size=(31,), stride=(1,), padding=(15,), groups=2048) + (act): Identity() + (dropout): Dropout(p=0.1, inplace=False) + ) + (channel_proj2): Linear(in_features=2048, out_features=1024, bias=True) + ) + (feed_forward): PositionwiseFeedForward( + (w_1): Linear(in_features=1024, out_features=4096, bias=True) + (w_2): Linear(in_features=4096, out_features=1024, bias=True) + (dropout): Dropout(p=0.1, inplace=False) + (activation): Swish() + ) + (feed_forward_macaron): PositionwiseFeedForward( + (w_1): Linear(in_features=1024, out_features=4096, bias=True) + (w_2): Linear(in_features=4096, out_features=1024, bias=True) + (dropout): Dropout(p=0.1, inplace=False) + (activation): Swish() + ) + (norm_ff): LayerNorm((1024,), eps=1e-12, elementwise_affine=True) + (norm_ff_macaron): LayerNorm((1024,), eps=1e-12, elementwise_affine=True) + (norm_mha): LayerNorm((1024,), eps=1e-12, elementwise_affine=True) + (norm_mlp): LayerNorm((1024,), eps=1e-12, elementwise_affine=True) + (norm_final): LayerNorm((1024,), eps=1e-12, elementwise_affine=True) + (dropout): Dropout(p=0.1, inplace=False) + (depthwise_conv_fusion): Conv1d(2048, 2048, kernel_size=(31,), stride=(1,), padding=(15,), groups=2048) + (merge_proj): Linear(in_features=2048, out_features=1024, bias=True) + ) + (16): EBranchformerEncoderLayer( + (attn): MultiHeadedAttention( + (linear_q): Linear(in_features=1024, out_features=1024, bias=True) + (linear_k): Linear(in_features=1024, out_features=1024, bias=True) + (linear_v): Linear(in_features=1024, out_features=1024, bias=True) + (linear_out): Linear(in_features=1024, out_features=1024, bias=True) + (dropout): Identity() + ) + (cgmlp): ConvolutionalGatingMLP( + (channel_proj1): Sequential( + (0): Linear(in_features=1024, out_features=4096, bias=True) + (1): GELU(approximate='none') + ) + (csgu): ConvolutionalSpatialGatingUnit( + (norm): LayerNorm((2048,), eps=1e-12, elementwise_affine=True) + (conv): Conv1d(2048, 2048, kernel_size=(31,), stride=(1,), padding=(15,), groups=2048) + (act): Identity() + (dropout): Dropout(p=0.1, inplace=False) + ) + (channel_proj2): Linear(in_features=2048, out_features=1024, bias=True) + ) + (feed_forward): PositionwiseFeedForward( + (w_1): Linear(in_features=1024, out_features=4096, bias=True) + (w_2): Linear(in_features=4096, out_features=1024, bias=True) + (dropout): Dropout(p=0.1, inplace=False) + (activation): Swish() + ) + (feed_forward_macaron): PositionwiseFeedForward( + (w_1): Linear(in_features=1024, out_features=4096, bias=True) + (w_2): Linear(in_features=4096, out_features=1024, bias=True) + (dropout): Dropout(p=0.1, inplace=False) + (activation): Swish() + ) + (norm_ff): LayerNorm((1024,), eps=1e-12, elementwise_affine=True) + (norm_ff_macaron): LayerNorm((1024,), eps=1e-12, elementwise_affine=True) + (norm_mha): LayerNorm((1024,), eps=1e-12, elementwise_affine=True) + (norm_mlp): LayerNorm((1024,), eps=1e-12, elementwise_affine=True) + (norm_final): LayerNorm((1024,), eps=1e-12, elementwise_affine=True) + (dropout): Dropout(p=0.1, inplace=False) + (depthwise_conv_fusion): Conv1d(2048, 2048, kernel_size=(31,), stride=(1,), padding=(15,), groups=2048) + (merge_proj): Linear(in_features=2048, out_features=1024, bias=True) + ) + (17): EBranchformerEncoderLayer( + (attn): MultiHeadedAttention( + (linear_q): Linear(in_features=1024, out_features=1024, bias=True) + (linear_k): Linear(in_features=1024, out_features=1024, bias=True) + (linear_v): Linear(in_features=1024, out_features=1024, bias=True) + (linear_out): Linear(in_features=1024, out_features=1024, bias=True) + (dropout): Identity() + ) + (cgmlp): ConvolutionalGatingMLP( + (channel_proj1): Sequential( + (0): Linear(in_features=1024, out_features=4096, bias=True) + (1): GELU(approximate='none') + ) + (csgu): ConvolutionalSpatialGatingUnit( + (norm): LayerNorm((2048,), eps=1e-12, elementwise_affine=True) + (conv): Conv1d(2048, 2048, kernel_size=(31,), stride=(1,), padding=(15,), groups=2048) + (act): Identity() + (dropout): Dropout(p=0.1, inplace=False) + ) + (channel_proj2): Linear(in_features=2048, out_features=1024, bias=True) + ) + (feed_forward): PositionwiseFeedForward( + (w_1): Linear(in_features=1024, out_features=4096, bias=True) + (w_2): Linear(in_features=4096, out_features=1024, bias=True) + (dropout): Dropout(p=0.1, inplace=False) + (activation): Swish() + ) + (feed_forward_macaron): PositionwiseFeedForward( + (w_1): Linear(in_features=1024, out_features=4096, bias=True) + (w_2): Linear(in_features=4096, out_features=1024, bias=True) + (dropout): Dropout(p=0.1, inplace=False) + (activation): Swish() + ) + (norm_ff): LayerNorm((1024,), eps=1e-12, elementwise_affine=True) + (norm_ff_macaron): LayerNorm((1024,), eps=1e-12, elementwise_affine=True) + (norm_mha): LayerNorm((1024,), eps=1e-12, elementwise_affine=True) + (norm_mlp): LayerNorm((1024,), eps=1e-12, elementwise_affine=True) + (norm_final): LayerNorm((1024,), eps=1e-12, elementwise_affine=True) + (cross_attn): MultiHeadedAttention( + (linear_q): Linear(in_features=1024, out_features=1024, bias=True) + (linear_k): Linear(in_features=1024, out_features=1024, bias=True) + (linear_v): Linear(in_features=1024, out_features=1024, bias=True) + (linear_out): Linear(in_features=1024, out_features=1024, bias=True) + (dropout): Identity() + ) + (norm_cross_attn): LayerNorm((1024,), eps=1e-12, elementwise_affine=True) + (dropout): Dropout(p=0.1, inplace=False) + (depthwise_conv_fusion): Conv1d(2048, 2048, kernel_size=(31,), stride=(1,), padding=(15,), groups=2048) + (merge_proj): Linear(in_features=2048, out_features=1024, bias=True) + ) + (18): EBranchformerEncoderLayer( + (attn): MultiHeadedAttention( + (linear_q): Linear(in_features=1024, out_features=1024, bias=True) + (linear_k): Linear(in_features=1024, out_features=1024, bias=True) + (linear_v): Linear(in_features=1024, out_features=1024, bias=True) + (linear_out): Linear(in_features=1024, out_features=1024, bias=True) + (dropout): Identity() + ) + (cgmlp): ConvolutionalGatingMLP( + (channel_proj1): Sequential( + (0): Linear(in_features=1024, out_features=4096, bias=True) + (1): GELU(approximate='none') + ) + (csgu): ConvolutionalSpatialGatingUnit( + (norm): LayerNorm((2048,), eps=1e-12, elementwise_affine=True) + (conv): Conv1d(2048, 2048, kernel_size=(31,), stride=(1,), padding=(15,), groups=2048) + (act): Identity() + (dropout): Dropout(p=0.1, inplace=False) + ) + (channel_proj2): Linear(in_features=2048, out_features=1024, bias=True) + ) + (feed_forward): PositionwiseFeedForward( + (w_1): Linear(in_features=1024, out_features=4096, bias=True) + (w_2): Linear(in_features=4096, out_features=1024, bias=True) + (dropout): Dropout(p=0.1, inplace=False) + (activation): Swish() + ) + (feed_forward_macaron): PositionwiseFeedForward( + (w_1): Linear(in_features=1024, out_features=4096, bias=True) + (w_2): Linear(in_features=4096, out_features=1024, bias=True) + (dropout): Dropout(p=0.1, inplace=False) + (activation): Swish() + ) + (norm_ff): LayerNorm((1024,), eps=1e-12, elementwise_affine=True) + (norm_ff_macaron): LayerNorm((1024,), eps=1e-12, elementwise_affine=True) + (norm_mha): LayerNorm((1024,), eps=1e-12, elementwise_affine=True) + (norm_mlp): LayerNorm((1024,), eps=1e-12, elementwise_affine=True) + (norm_final): LayerNorm((1024,), eps=1e-12, elementwise_affine=True) + (dropout): Dropout(p=0.1, inplace=False) + (depthwise_conv_fusion): Conv1d(2048, 2048, kernel_size=(31,), stride=(1,), padding=(15,), groups=2048) + (merge_proj): Linear(in_features=2048, out_features=1024, bias=True) + ) + (19): EBranchformerEncoderLayer( + (attn): MultiHeadedAttention( + (linear_q): Linear(in_features=1024, out_features=1024, bias=True) + (linear_k): Linear(in_features=1024, out_features=1024, bias=True) + (linear_v): Linear(in_features=1024, out_features=1024, bias=True) + (linear_out): Linear(in_features=1024, out_features=1024, bias=True) + (dropout): Identity() + ) + (cgmlp): ConvolutionalGatingMLP( + (channel_proj1): Sequential( + (0): Linear(in_features=1024, out_features=4096, bias=True) + (1): GELU(approximate='none') + ) + (csgu): ConvolutionalSpatialGatingUnit( + (norm): LayerNorm((2048,), eps=1e-12, elementwise_affine=True) + (conv): Conv1d(2048, 2048, kernel_size=(31,), stride=(1,), padding=(15,), groups=2048) + (act): Identity() + (dropout): Dropout(p=0.1, inplace=False) + ) + (channel_proj2): Linear(in_features=2048, out_features=1024, bias=True) + ) + (feed_forward): PositionwiseFeedForward( + (w_1): Linear(in_features=1024, out_features=4096, bias=True) + (w_2): Linear(in_features=4096, out_features=1024, bias=True) + (dropout): Dropout(p=0.1, inplace=False) + (activation): Swish() + ) + (feed_forward_macaron): PositionwiseFeedForward( + (w_1): Linear(in_features=1024, out_features=4096, bias=True) + (w_2): Linear(in_features=4096, out_features=1024, bias=True) + (dropout): Dropout(p=0.1, inplace=False) + (activation): Swish() + ) + (norm_ff): LayerNorm((1024,), eps=1e-12, elementwise_affine=True) + (norm_ff_macaron): LayerNorm((1024,), eps=1e-12, elementwise_affine=True) + (norm_mha): LayerNorm((1024,), eps=1e-12, elementwise_affine=True) + (norm_mlp): LayerNorm((1024,), eps=1e-12, elementwise_affine=True) + (norm_final): LayerNorm((1024,), eps=1e-12, elementwise_affine=True) + (dropout): Dropout(p=0.1, inplace=False) + (depthwise_conv_fusion): Conv1d(2048, 2048, kernel_size=(31,), stride=(1,), padding=(15,), groups=2048) + (merge_proj): Linear(in_features=2048, out_features=1024, bias=True) + ) + (20): EBranchformerEncoderLayer( + (attn): MultiHeadedAttention( + (linear_q): Linear(in_features=1024, out_features=1024, bias=True) + (linear_k): Linear(in_features=1024, out_features=1024, bias=True) + (linear_v): Linear(in_features=1024, out_features=1024, bias=True) + (linear_out): Linear(in_features=1024, out_features=1024, bias=True) + (dropout): Identity() + ) + (cgmlp): ConvolutionalGatingMLP( + (channel_proj1): Sequential( + (0): Linear(in_features=1024, out_features=4096, bias=True) + (1): GELU(approximate='none') + ) + (csgu): ConvolutionalSpatialGatingUnit( + (norm): LayerNorm((2048,), eps=1e-12, elementwise_affine=True) + (conv): Conv1d(2048, 2048, kernel_size=(31,), stride=(1,), padding=(15,), groups=2048) + (act): Identity() + (dropout): Dropout(p=0.1, inplace=False) + ) + (channel_proj2): Linear(in_features=2048, out_features=1024, bias=True) + ) + (feed_forward): PositionwiseFeedForward( + (w_1): Linear(in_features=1024, out_features=4096, bias=True) + (w_2): Linear(in_features=4096, out_features=1024, bias=True) + (dropout): Dropout(p=0.1, inplace=False) + (activation): Swish() + ) + (feed_forward_macaron): PositionwiseFeedForward( + (w_1): Linear(in_features=1024, out_features=4096, bias=True) + (w_2): Linear(in_features=4096, out_features=1024, bias=True) + (dropout): Dropout(p=0.1, inplace=False) + (activation): Swish() + ) + (norm_ff): LayerNorm((1024,), eps=1e-12, elementwise_affine=True) + (norm_ff_macaron): LayerNorm((1024,), eps=1e-12, elementwise_affine=True) + (norm_mha): LayerNorm((1024,), eps=1e-12, elementwise_affine=True) + (norm_mlp): LayerNorm((1024,), eps=1e-12, elementwise_affine=True) + (norm_final): LayerNorm((1024,), eps=1e-12, elementwise_affine=True) + (cross_attn): MultiHeadedAttention( + (linear_q): Linear(in_features=1024, out_features=1024, bias=True) + (linear_k): Linear(in_features=1024, out_features=1024, bias=True) + (linear_v): Linear(in_features=1024, out_features=1024, bias=True) + (linear_out): Linear(in_features=1024, out_features=1024, bias=True) + (dropout): Identity() + ) + (norm_cross_attn): LayerNorm((1024,), eps=1e-12, elementwise_affine=True) + (dropout): Dropout(p=0.1, inplace=False) + (depthwise_conv_fusion): Conv1d(2048, 2048, kernel_size=(31,), stride=(1,), padding=(15,), groups=2048) + (merge_proj): Linear(in_features=2048, out_features=1024, bias=True) + ) + (21): EBranchformerEncoderLayer( + (attn): MultiHeadedAttention( + (linear_q): Linear(in_features=1024, out_features=1024, bias=True) + (linear_k): Linear(in_features=1024, out_features=1024, bias=True) + (linear_v): Linear(in_features=1024, out_features=1024, bias=True) + (linear_out): Linear(in_features=1024, out_features=1024, bias=True) + (dropout): Identity() + ) + (cgmlp): ConvolutionalGatingMLP( + (channel_proj1): Sequential( + (0): Linear(in_features=1024, out_features=4096, bias=True) + (1): GELU(approximate='none') + ) + (csgu): ConvolutionalSpatialGatingUnit( + (norm): LayerNorm((2048,), eps=1e-12, elementwise_affine=True) + (conv): Conv1d(2048, 2048, kernel_size=(31,), stride=(1,), padding=(15,), groups=2048) + (act): Identity() + (dropout): Dropout(p=0.1, inplace=False) + ) + (channel_proj2): Linear(in_features=2048, out_features=1024, bias=True) + ) + (feed_forward): PositionwiseFeedForward( + (w_1): Linear(in_features=1024, out_features=4096, bias=True) + (w_2): Linear(in_features=4096, out_features=1024, bias=True) + (dropout): Dropout(p=0.1, inplace=False) + (activation): Swish() + ) + (feed_forward_macaron): PositionwiseFeedForward( + (w_1): Linear(in_features=1024, out_features=4096, bias=True) + (w_2): Linear(in_features=4096, out_features=1024, bias=True) + (dropout): Dropout(p=0.1, inplace=False) + (activation): Swish() + ) + (norm_ff): LayerNorm((1024,), eps=1e-12, elementwise_affine=True) + (norm_ff_macaron): LayerNorm((1024,), eps=1e-12, elementwise_affine=True) + (norm_mha): LayerNorm((1024,), eps=1e-12, elementwise_affine=True) + (norm_mlp): LayerNorm((1024,), eps=1e-12, elementwise_affine=True) + (norm_final): LayerNorm((1024,), eps=1e-12, elementwise_affine=True) + (dropout): Dropout(p=0.1, inplace=False) + (depthwise_conv_fusion): Conv1d(2048, 2048, kernel_size=(31,), stride=(1,), padding=(15,), groups=2048) + (merge_proj): Linear(in_features=2048, out_features=1024, bias=True) + ) + (22): EBranchformerEncoderLayer( + (attn): MultiHeadedAttention( + (linear_q): Linear(in_features=1024, out_features=1024, bias=True) + (linear_k): Linear(in_features=1024, out_features=1024, bias=True) + (linear_v): Linear(in_features=1024, out_features=1024, bias=True) + (linear_out): Linear(in_features=1024, out_features=1024, bias=True) + (dropout): Identity() + ) + (cgmlp): ConvolutionalGatingMLP( + (channel_proj1): Sequential( + (0): Linear(in_features=1024, out_features=4096, bias=True) + (1): GELU(approximate='none') + ) + (csgu): ConvolutionalSpatialGatingUnit( + (norm): LayerNorm((2048,), eps=1e-12, elementwise_affine=True) + (conv): Conv1d(2048, 2048, kernel_size=(31,), stride=(1,), padding=(15,), groups=2048) + (act): Identity() + (dropout): Dropout(p=0.1, inplace=False) + ) + (channel_proj2): Linear(in_features=2048, out_features=1024, bias=True) + ) + (feed_forward): PositionwiseFeedForward( + (w_1): Linear(in_features=1024, out_features=4096, bias=True) + (w_2): Linear(in_features=4096, out_features=1024, bias=True) + (dropout): Dropout(p=0.1, inplace=False) + (activation): Swish() + ) + (feed_forward_macaron): PositionwiseFeedForward( + (w_1): Linear(in_features=1024, out_features=4096, bias=True) + (w_2): Linear(in_features=4096, out_features=1024, bias=True) + (dropout): Dropout(p=0.1, inplace=False) + (activation): Swish() + ) + (norm_ff): LayerNorm((1024,), eps=1e-12, elementwise_affine=True) + (norm_ff_macaron): LayerNorm((1024,), eps=1e-12, elementwise_affine=True) + (norm_mha): LayerNorm((1024,), eps=1e-12, elementwise_affine=True) + (norm_mlp): LayerNorm((1024,), eps=1e-12, elementwise_affine=True) + (norm_final): LayerNorm((1024,), eps=1e-12, elementwise_affine=True) + (dropout): Dropout(p=0.1, inplace=False) + (depthwise_conv_fusion): Conv1d(2048, 2048, kernel_size=(31,), stride=(1,), padding=(15,), groups=2048) + (merge_proj): Linear(in_features=2048, out_features=1024, bias=True) + ) + (23): EBranchformerEncoderLayer( + (attn): MultiHeadedAttention( + (linear_q): Linear(in_features=1024, out_features=1024, bias=True) + (linear_k): Linear(in_features=1024, out_features=1024, bias=True) + (linear_v): Linear(in_features=1024, out_features=1024, bias=True) + (linear_out): Linear(in_features=1024, out_features=1024, bias=True) + (dropout): Identity() + ) + (cgmlp): ConvolutionalGatingMLP( + (channel_proj1): Sequential( + (0): Linear(in_features=1024, out_features=4096, bias=True) + (1): GELU(approximate='none') + ) + (csgu): ConvolutionalSpatialGatingUnit( + (norm): LayerNorm((2048,), eps=1e-12, elementwise_affine=True) + (conv): Conv1d(2048, 2048, kernel_size=(31,), stride=(1,), padding=(15,), groups=2048) + (act): Identity() + (dropout): Dropout(p=0.1, inplace=False) + ) + (channel_proj2): Linear(in_features=2048, out_features=1024, bias=True) + ) + (feed_forward): PositionwiseFeedForward( + (w_1): Linear(in_features=1024, out_features=4096, bias=True) + (w_2): Linear(in_features=4096, out_features=1024, bias=True) + (dropout): Dropout(p=0.1, inplace=False) + (activation): Swish() + ) + (feed_forward_macaron): PositionwiseFeedForward( + (w_1): Linear(in_features=1024, out_features=4096, bias=True) + (w_2): Linear(in_features=4096, out_features=1024, bias=True) + (dropout): Dropout(p=0.1, inplace=False) + (activation): Swish() + ) + (norm_ff): LayerNorm((1024,), eps=1e-12, elementwise_affine=True) + (norm_ff_macaron): LayerNorm((1024,), eps=1e-12, elementwise_affine=True) + (norm_mha): LayerNorm((1024,), eps=1e-12, elementwise_affine=True) + (norm_mlp): LayerNorm((1024,), eps=1e-12, elementwise_affine=True) + (norm_final): LayerNorm((1024,), eps=1e-12, elementwise_affine=True) + (cross_attn): MultiHeadedAttention( + (linear_q): Linear(in_features=1024, out_features=1024, bias=True) + (linear_k): Linear(in_features=1024, out_features=1024, bias=True) + (linear_v): Linear(in_features=1024, out_features=1024, bias=True) + (linear_out): Linear(in_features=1024, out_features=1024, bias=True) + (dropout): Identity() + ) + (norm_cross_attn): LayerNorm((1024,), eps=1e-12, elementwise_affine=True) + (dropout): Dropout(p=0.1, inplace=False) + (depthwise_conv_fusion): Conv1d(2048, 2048, kernel_size=(31,), stride=(1,), padding=(15,), groups=2048) + (merge_proj): Linear(in_features=2048, out_features=1024, bias=True) + ) + (24): EBranchformerEncoderLayer( + (attn): MultiHeadedAttention( + (linear_q): Linear(in_features=1024, out_features=1024, bias=True) + (linear_k): Linear(in_features=1024, out_features=1024, bias=True) + (linear_v): Linear(in_features=1024, out_features=1024, bias=True) + (linear_out): Linear(in_features=1024, out_features=1024, bias=True) + (dropout): Identity() + ) + (cgmlp): ConvolutionalGatingMLP( + (channel_proj1): Sequential( + (0): Linear(in_features=1024, out_features=4096, bias=True) + (1): GELU(approximate='none') + ) + (csgu): ConvolutionalSpatialGatingUnit( + (norm): LayerNorm((2048,), eps=1e-12, elementwise_affine=True) + (conv): Conv1d(2048, 2048, kernel_size=(31,), stride=(1,), padding=(15,), groups=2048) + (act): Identity() + (dropout): Dropout(p=0.1, inplace=False) + ) + (channel_proj2): Linear(in_features=2048, out_features=1024, bias=True) + ) + (feed_forward): PositionwiseFeedForward( + (w_1): Linear(in_features=1024, out_features=4096, bias=True) + (w_2): Linear(in_features=4096, out_features=1024, bias=True) + (dropout): Dropout(p=0.1, inplace=False) + (activation): Swish() + ) + (feed_forward_macaron): PositionwiseFeedForward( + (w_1): Linear(in_features=1024, out_features=4096, bias=True) + (w_2): Linear(in_features=4096, out_features=1024, bias=True) + (dropout): Dropout(p=0.1, inplace=False) + (activation): Swish() + ) + (norm_ff): LayerNorm((1024,), eps=1e-12, elementwise_affine=True) + (norm_ff_macaron): LayerNorm((1024,), eps=1e-12, elementwise_affine=True) + (norm_mha): LayerNorm((1024,), eps=1e-12, elementwise_affine=True) + (norm_mlp): LayerNorm((1024,), eps=1e-12, elementwise_affine=True) + (norm_final): LayerNorm((1024,), eps=1e-12, elementwise_affine=True) + (dropout): Dropout(p=0.1, inplace=False) + (depthwise_conv_fusion): Conv1d(2048, 2048, kernel_size=(31,), stride=(1,), padding=(15,), groups=2048) + (merge_proj): Linear(in_features=2048, out_features=1024, bias=True) + ) + (25): EBranchformerEncoderLayer( + (attn): MultiHeadedAttention( + (linear_q): Linear(in_features=1024, out_features=1024, bias=True) + (linear_k): Linear(in_features=1024, out_features=1024, bias=True) + (linear_v): Linear(in_features=1024, out_features=1024, bias=True) + (linear_out): Linear(in_features=1024, out_features=1024, bias=True) + (dropout): Identity() + ) + (cgmlp): ConvolutionalGatingMLP( + (channel_proj1): Sequential( + (0): Linear(in_features=1024, out_features=4096, bias=True) + (1): GELU(approximate='none') + ) + (csgu): ConvolutionalSpatialGatingUnit( + (norm): LayerNorm((2048,), eps=1e-12, elementwise_affine=True) + (conv): Conv1d(2048, 2048, kernel_size=(31,), stride=(1,), padding=(15,), groups=2048) + (act): Identity() + (dropout): Dropout(p=0.1, inplace=False) + ) + (channel_proj2): Linear(in_features=2048, out_features=1024, bias=True) + ) + (feed_forward): PositionwiseFeedForward( + (w_1): Linear(in_features=1024, out_features=4096, bias=True) + (w_2): Linear(in_features=4096, out_features=1024, bias=True) + (dropout): Dropout(p=0.1, inplace=False) + (activation): Swish() + ) + (feed_forward_macaron): PositionwiseFeedForward( + (w_1): Linear(in_features=1024, out_features=4096, bias=True) + (w_2): Linear(in_features=4096, out_features=1024, bias=True) + (dropout): Dropout(p=0.1, inplace=False) + (activation): Swish() + ) + (norm_ff): LayerNorm((1024,), eps=1e-12, elementwise_affine=True) + (norm_ff_macaron): LayerNorm((1024,), eps=1e-12, elementwise_affine=True) + (norm_mha): LayerNorm((1024,), eps=1e-12, elementwise_affine=True) + (norm_mlp): LayerNorm((1024,), eps=1e-12, elementwise_affine=True) + (norm_final): LayerNorm((1024,), eps=1e-12, elementwise_affine=True) + (dropout): Dropout(p=0.1, inplace=False) + (depthwise_conv_fusion): Conv1d(2048, 2048, kernel_size=(31,), stride=(1,), padding=(15,), groups=2048) + (merge_proj): Linear(in_features=2048, out_features=1024, bias=True) + ) + (26): EBranchformerEncoderLayer( + (attn): MultiHeadedAttention( + (linear_q): Linear(in_features=1024, out_features=1024, bias=True) + (linear_k): Linear(in_features=1024, out_features=1024, bias=True) + (linear_v): Linear(in_features=1024, out_features=1024, bias=True) + (linear_out): Linear(in_features=1024, out_features=1024, bias=True) + (dropout): Identity() + ) + (cgmlp): ConvolutionalGatingMLP( + (channel_proj1): Sequential( + (0): Linear(in_features=1024, out_features=4096, bias=True) + (1): GELU(approximate='none') + ) + (csgu): ConvolutionalSpatialGatingUnit( + (norm): LayerNorm((2048,), eps=1e-12, elementwise_affine=True) + (conv): Conv1d(2048, 2048, kernel_size=(31,), stride=(1,), padding=(15,), groups=2048) + (act): Identity() + (dropout): Dropout(p=0.1, inplace=False) + ) + (channel_proj2): Linear(in_features=2048, out_features=1024, bias=True) + ) + (feed_forward): PositionwiseFeedForward( + (w_1): Linear(in_features=1024, out_features=4096, bias=True) + (w_2): Linear(in_features=4096, out_features=1024, bias=True) + (dropout): Dropout(p=0.1, inplace=False) + (activation): Swish() + ) + (feed_forward_macaron): PositionwiseFeedForward( + (w_1): Linear(in_features=1024, out_features=4096, bias=True) + (w_2): Linear(in_features=4096, out_features=1024, bias=True) + (dropout): Dropout(p=0.1, inplace=False) + (activation): Swish() + ) + (norm_ff): LayerNorm((1024,), eps=1e-12, elementwise_affine=True) + (norm_ff_macaron): LayerNorm((1024,), eps=1e-12, elementwise_affine=True) + (norm_mha): LayerNorm((1024,), eps=1e-12, elementwise_affine=True) + (norm_mlp): LayerNorm((1024,), eps=1e-12, elementwise_affine=True) + (norm_final): LayerNorm((1024,), eps=1e-12, elementwise_affine=True) + (cross_attn): MultiHeadedAttention( + (linear_q): Linear(in_features=1024, out_features=1024, bias=True) + (linear_k): Linear(in_features=1024, out_features=1024, bias=True) + (linear_v): Linear(in_features=1024, out_features=1024, bias=True) + (linear_out): Linear(in_features=1024, out_features=1024, bias=True) + (dropout): Identity() + ) + (norm_cross_attn): LayerNorm((1024,), eps=1e-12, elementwise_affine=True) + (dropout): Dropout(p=0.1, inplace=False) + (depthwise_conv_fusion): Conv1d(2048, 2048, kernel_size=(31,), stride=(1,), padding=(15,), groups=2048) + (merge_proj): Linear(in_features=2048, out_features=1024, bias=True) + ) + ) + (after_norm): LayerNorm((1024,), eps=1e-12, elementwise_affine=True) + (conditioning_layer): Linear(in_features=50002, out_features=1024, bias=True) + ) + (prompt_encoder): TransformerEncoder( + (encoders): MultiSequential( + (0): EncoderLayer( + (self_attn): MultiHeadedAttention( + (linear_q): Linear(in_features=512, out_features=512, bias=True) + (linear_k): Linear(in_features=512, out_features=512, bias=True) + (linear_v): Linear(in_features=512, out_features=512, bias=True) + (linear_out): Linear(in_features=512, out_features=512, bias=True) + (dropout): Identity() + ) + (feed_forward): PositionwiseFeedForward( + (w_1): Linear(in_features=512, out_features=2048, bias=True) + (w_2): Linear(in_features=2048, out_features=512, bias=True) + (dropout): Dropout(p=0.1, inplace=False) + (activation): ReLU() + ) + (norm1): LayerNorm((512,), eps=1e-12, elementwise_affine=True) + (norm2): LayerNorm((512,), eps=1e-12, elementwise_affine=True) + (dropout): Dropout(p=0.1, inplace=False) + ) + (1): EncoderLayer( + (self_attn): MultiHeadedAttention( + (linear_q): Linear(in_features=512, out_features=512, bias=True) + (linear_k): Linear(in_features=512, out_features=512, bias=True) + (linear_v): Linear(in_features=512, out_features=512, bias=True) + (linear_out): Linear(in_features=512, out_features=512, bias=True) + (dropout): Identity() + ) + (feed_forward): PositionwiseFeedForward( + (w_1): Linear(in_features=512, out_features=2048, bias=True) + (w_2): Linear(in_features=2048, out_features=512, bias=True) + (dropout): Dropout(p=0.1, inplace=False) + (activation): ReLU() + ) + (norm1): LayerNorm((512,), eps=1e-12, elementwise_affine=True) + (norm2): LayerNorm((512,), eps=1e-12, elementwise_affine=True) + (dropout): Dropout(p=0.1, inplace=False) + ) + (2): EncoderLayer( + (self_attn): MultiHeadedAttention( + (linear_q): Linear(in_features=512, out_features=512, bias=True) + (linear_k): Linear(in_features=512, out_features=512, bias=True) + (linear_v): Linear(in_features=512, out_features=512, bias=True) + (linear_out): Linear(in_features=512, out_features=512, bias=True) + (dropout): Identity() + ) + (feed_forward): PositionwiseFeedForward( + (w_1): Linear(in_features=512, out_features=2048, bias=True) + (w_2): Linear(in_features=2048, out_features=512, bias=True) + (dropout): Dropout(p=0.1, inplace=False) + (activation): ReLU() + ) + (norm1): LayerNorm((512,), eps=1e-12, elementwise_affine=True) + (norm2): LayerNorm((512,), eps=1e-12, elementwise_affine=True) + (dropout): Dropout(p=0.1, inplace=False) + ) + (3): EncoderLayer( + (self_attn): MultiHeadedAttention( + (linear_q): Linear(in_features=512, out_features=512, bias=True) + (linear_k): Linear(in_features=512, out_features=512, bias=True) + (linear_v): Linear(in_features=512, out_features=512, bias=True) + (linear_out): Linear(in_features=512, out_features=512, bias=True) + (dropout): Identity() + ) + (feed_forward): PositionwiseFeedForward( + (w_1): Linear(in_features=512, out_features=2048, bias=True) + (w_2): Linear(in_features=2048, out_features=512, bias=True) + (dropout): Dropout(p=0.1, inplace=False) + (activation): ReLU() + ) + (norm1): LayerNorm((512,), eps=1e-12, elementwise_affine=True) + (norm2): LayerNorm((512,), eps=1e-12, elementwise_affine=True) + (dropout): Dropout(p=0.1, inplace=False) + ) + ) + (after_norm): LayerNorm((512,), eps=1e-12, elementwise_affine=True) + ) + (embed): Embedding(50002, 512) + (pos_enc): PositionalEncoding( + (dropout): Dropout(p=0.0, inplace=False) + ) + (embed_proj): Linear(in_features=512, out_features=1024, bias=True) + (prompt_proj): Linear(in_features=512, out_features=1024, bias=True) + (ctc): CTC( + (ctc_lo): Linear(in_features=1024, out_features=50002, bias=True) + (ctc_loss): CTCLoss() + ) +) + +Model summary: + Class Name: ESPnetS2TCTCModel + Total Number of model parameters: 1.01 B + Number of trainable parameters: 1.01 B (100.0%) + Size: 4.02 GB + Type: torch.float32 +[gpua007:0/64] 2024-02-02 19:15:03,435 (abs_task:1274) INFO: Optimizer: +AdamW ( +Parameter Group 0 + amsgrad: False + betas: [0.9, 0.98] + capturable: False + eps: 1e-06 + foreach: None + initial_lr: 0.0002 + lr: 1.6666666666666667e-09 + maximize: False + weight_decay: 0.0 +) +[gpua007:0/64] 2024-02-02 19:15:03,435 (abs_task:1275) INFO: Scheduler: PiecewiseLinearWarmupLR(warmup_steps_list=[0, 30000, 60000], warmup_lr_list=[0.0, 5e-05, 0.0002]) +[gpua007:0/64] 2024-02-02 19:15:03,456 (abs_task:1284) INFO: Saving the configuration in exp/s2t_train_s2t_multitask-ctc_ebf27_conv2d8_size1024_raw_bpe50000/config.yaml +[gpua007:0/64] 2024-02-02 19:15:09,029 (s2t:401) INFO: Optional Data Names: ('text_prev', 'text_ctc', 'text_spk2', 'text_spk3', 'text_spk4') +[gpua007:0/64] 2024-02-02 19:15:09,948 (abs_task:1660) INFO: [valid] dataset: +ESPnetDataset( + speech: {"path": "dump/raw/dev_v3/wav.scp", "type": "kaldi_ark"} + text_prev: {"path": "dump/raw/dev_v3/text.prev", "type": "text"} + text_ctc: {"path": "dump/raw/dev_v3/text.ctc", "type": "text"} + text: {"path": "dump/raw/dev_v3/text", "type": "text"} + preprocess: ) +[gpua007:0/64] 2024-02-02 19:15:09,948 (abs_task:1661) INFO: [valid] Batch sampler: UnsortedBatchSampler(N-batch=4671, batch_size=256, key_file=exp/s2t_stats_raw_bpe50000/valid/speech_shape, +[gpua007:0/64] 2024-02-02 19:15:09,949 (abs_task:1662) INFO: [valid] mini-batch sizes summary: N-batch=4671, mean=256.0, min=256, max=257 +[gpua007:0/64] 2024-02-02 19:15:36,899 (trainer:167) INFO: The training was resumed using exp/s2t_train_s2t_multitask-ctc_ebf27_conv2d8_size1024_raw_bpe50000/checkpoint.pth +gpua007:2608783:2608783 [0] NCCL INFO Bootstrap : Using eth1:172.28.23.7<0> +gpua007:2608783:2608783 [0] NCCL INFO NET/Plugin : No plugin found (libnccl-net.so), using internal implementation +gpua007:2608783:2608783 [0] NCCL INFO cudaDriverVersion 12020 +NCCL version 2.14.3+cuda11.7 +[gpua007:0/64] 2024-02-02 19:15:42,964 (trainer:298) INFO: 17/45epoch started +[gpua007:0/64] 2024-02-02 19:15:43,003 (multiple_iter_factory:32) INFO: Building 0th iter-factory... +[gpua007:0/64] 2024-02-02 19:16:01,480 (s2t:401) INFO: Optional Data Names: ('text_prev', 'text_ctc', 'text_spk2', 'text_spk3', 'text_spk4') +[gpua007:0/64] 2024-02-02 19:16:04,882 (abs_task:1660) INFO: [train] dataset: +ESPnetDataset( + speech: {"path": "exp/s2t_stats_raw_bpe50000/splits12/wav.scp/split.7", "type": "kaldi_ark"} + text_prev: {"path": "exp/s2t_stats_raw_bpe50000/splits12/text.prev/split.7", "type": "text"} + text_ctc: {"path": "exp/s2t_stats_raw_bpe50000/splits12/text.ctc/split.7", "type": "text"} + text: {"path": "exp/s2t_stats_raw_bpe50000/splits12/text/split.7", "type": "text"} + preprocess: ) +[gpua007:0/64] 2024-02-02 19:16:04,882 (abs_task:1661) INFO: [train] Batch sampler: UnsortedBatchSampler(N-batch=19027, batch_size=256, key_file=exp/s2t_stats_raw_bpe50000/splits12/speech_shape/split.7, +[gpua007:0/64] 2024-02-02 19:16:04,885 (abs_task:1662) INFO: [train] mini-batch sizes summary: N-batch=19027, mean=256.0, min=256, max=257 +gpua072:1905326:1905326 [2] NCCL INFO cudaDriverVersion 12020 +gpua072:1905326:1905326 [2] NCCL INFO Bootstrap : Using eth1:172.28.23.72<0> +gpua072:1905326:1905326 [2] NCCL INFO NET/Plugin : No plugin found (libnccl-net.so), using internal implementation +gpua072:1905326:1905381 [2] NCCL INFO NET/IB : No device found. +gpua072:1905326:1905381 [2] NCCL INFO NET/Socket : Using [0]eth1:172.28.23.72<0> [1]hsn0:141.142.145.72<0> +gpua072:1905326:1905381 [2] NCCL INFO Using network Socket +gpua072:1905326:1905381 [2] NCCL INFO Setting affinity for GPU 2 to ffff0000 +gpua072:1905326:1905381 [2] NCCL INFO Trees [0] 47/-1/-1->46->45 [1] 47/-1/-1->46->45 +gpua072:1905326:1905381 [2] NCCL INFO Channel 00/0 : 46[85000] -> 47[c7000] via P2P/IPC/read +gpua072:1905326:1905381 [2] NCCL INFO Channel 01/0 : 46[85000] -> 47[c7000] via P2P/IPC/read +gpua072:1905326:1905381 [2] NCCL INFO Connected all rings +gpua072:1905326:1905381 [2] NCCL INFO Channel 00/0 : 46[85000] -> 45[46000] via P2P/IPC/read +gpua072:1905326:1905381 [2] NCCL INFO Channel 01/0 : 46[85000] -> 45[46000] via P2P/IPC/read +gpua072:1905326:1905381 [2] NCCL INFO Connected all trees +gpua072:1905326:1905381 [2] NCCL INFO threadThresholds 8/8/64 | 512/8/64 | 512 | 512 +gpua072:1905326:1905381 [2] NCCL INFO 2 coll channels, 2 p2p channels, 2 p2p channels per peer +gpua072:1905326:1905381 [2] NCCL INFO comm 0x561a37fe15d0 rank 46 nranks 64 cudaDev 2 busId 85000 - Init COMPLETE +gpua072:1905325:1905325 [1] NCCL INFO cudaDriverVersion 12020 +gpua072:1905325:1905325 [1] NCCL INFO Bootstrap : Using eth1:172.28.23.72<0> +gpua072:1905325:1905325 [1] NCCL INFO NET/Plugin : No plugin found (libnccl-net.so), using internal implementation +gpua072:1905325:1905382 [1] NCCL INFO NET/IB : No device found. +gpua072:1905325:1905382 [1] NCCL INFO NET/Socket : Using [0]eth1:172.28.23.72<0> [1]hsn0:141.142.145.72<0> +gpua072:1905325:1905382 [1] NCCL INFO Using network Socket +gpua072:1905325:1905382 [1] NCCL INFO Setting affinity for GPU 1 to ffff,00000000 +gpua072:1905325:1905382 [1] NCCL INFO Trees [0] 46/-1/-1->45->44 [1] 46/52/-1->45->44 +gpua072:1905325:1905382 [1] NCCL INFO Channel 00/0 : 45[46000] -> 46[85000] via P2P/IPC/read +gpua072:1905325:1905382 [1] NCCL INFO Channel 01/0 : 45[46000] -> 46[85000] via P2P/IPC/read +gpua072:1905325:1905382 [1] NCCL INFO Connected all rings +gpua072:1905325:1905382 [1] NCCL INFO Channel 01/0 : 45[46000] -> 52[7000] [send] via NET/Socket/1 +gpua072:1905325:1905382 [1] NCCL INFO Channel 01/0 : 52[7000] -> 45[46000] [receive] via NET/Socket/1 +gpua072:1905325:1905382 [1] NCCL INFO Channel 00/0 : 45[46000] -> 44[7000] via P2P/IPC/read +gpua072:1905325:1905382 [1] NCCL INFO Channel 01/0 : 45[46000] -> 44[7000] via P2P/IPC/read +gpua072:1905325:1905382 [1] NCCL INFO Connected all trees +gpua072:1905325:1905382 [1] NCCL INFO threadThresholds 8/8/64 | 512/8/64 | 512 | 512 +gpua072:1905325:1905382 [1] NCCL INFO 2 coll channels, 2 p2p channels, 2 p2p channels per peer +gpua072:1905325:1905382 [1] NCCL INFO comm 0x5588f702f630 rank 45 nranks 64 cudaDev 1 busId 46000 - Init COMPLETE +gpua072:1905324:1905324 [0] NCCL INFO cudaDriverVersion 12020 +gpua072:1905324:1905324 [0] NCCL INFO Bootstrap : Using eth1:172.28.23.72<0> +gpua072:1905324:1905324 [0] NCCL INFO NET/Plugin : No plugin found (libnccl-net.so), using internal implementation +gpua072:1905324:1905380 [0] NCCL INFO NET/IB : No device found. +gpua072:1905324:1905380 [0] NCCL INFO NET/Socket : Using [0]eth1:172.28.23.72<0> [1]hsn0:141.142.145.72<0> +gpua072:1905324:1905380 [0] NCCL INFO Using network Socket +gpua072:1905324:1905380 [0] NCCL INFO Setting affinity for GPU 0 to ffff0000,00000000 +gpua072:1905324:1905380 [0] NCCL INFO Trees [0] 45/-1/-1->44->40 [1] 45/36/-1->44->29 +gpua072:1905324:1905380 [0] NCCL INFO Channel 00/0 : 43[c7000] -> 44[7000] [receive] via NET/Socket/1 +gpua072:1905324:1905380 [0] NCCL INFO Channel 01/0 : 43[c7000] -> 44[7000] [receive] via NET/Socket/1 +gpua072:1905324:1905380 [0] NCCL INFO Channel 00/0 : 44[7000] -> 45[46000] via P2P/IPC/read +gpua072:1905324:1905380 [0] NCCL INFO Channel 01/0 : 44[7000] -> 45[46000] via P2P/IPC/read +gpua072:1905324:1905380 [0] NCCL INFO Connected all rings +gpua072:1905324:1905380 [0] NCCL INFO Channel 00/0 : 40[7000] -> 44[7000] [receive] via NET/Socket/1 +gpua072:1905324:1905380 [0] NCCL INFO Channel 01/0 : 36[7000] -> 44[7000] [receive] via NET/Socket/1 +gpua072:1905324:1905380 [0] NCCL INFO Channel 01/0 : 29[46000] -> 44[7000] [receive] via NET/Socket/1 +gpua072:1905324:1905380 [0] NCCL INFO Channel 01/0 : 44[7000] -> 29[46000] [send] via NET/Socket/1 +gpua072:1905324:1905380 [0] NCCL INFO Channel 01/0 : 44[7000] -> 36[7000] [send] via NET/Socket/1 +gpua072:1905324:1905380 [0] NCCL INFO Channel 00/0 : 44[7000] -> 40[7000] [send] via NET/Socket/1 +gpua072:1905324:1905380 [0] NCCL INFO Connected all trees +gpua072:1905324:1905380 [0] NCCL INFO threadThresholds 8/8/64 | 512/8/64 | 512 | 512 +gpua072:1905324:1905380 [0] NCCL INFO 2 coll channels, 2 p2p channels, 2 p2p channels per peer +gpua072:1905324:1905380 [0] NCCL INFO comm 0x5577fec8dda0 rank 44 nranks 64 cudaDev 0 busId 7000 - Init COMPLETE +gpua072:1905327:1905327 [3] NCCL INFO cudaDriverVersion 12020 +gpua072:1905327:1905327 [3] NCCL INFO Bootstrap : Using eth1:172.28.23.72<0> +gpua072:1905327:1905327 [3] NCCL INFO NET/Plugin : No plugin found (libnccl-net.so), using internal implementation +gpua072:1905327:1905383 [3] NCCL INFO NET/IB : No device found. +gpua072:1905327:1905383 [3] NCCL INFO NET/Socket : Using [0]eth1:172.28.23.72<0> [1]hsn0:141.142.145.72<0> +gpua072:1905327:1905383 [3] NCCL INFO Using network Socket +gpua072:1905327:1905383 [3] NCCL INFO Setting affinity for GPU 3 to ffff +gpua072:1905327:1905383 [3] NCCL INFO Trees [0] -1/-1/-1->47->46 [1] -1/-1/-1->47->46 +gpua072:1905327:1905383 [3] NCCL INFO Channel 00/0 : 47[c7000] -> 48[7000] [send] via NET/Socket/1 +gpua072:1905327:1905383 [3] NCCL INFO Channel 01/0 : 47[c7000] -> 48[7000] [send] via NET/Socket/1 +gpua072:1905327:1905383 [3] NCCL INFO Connected all rings +gpua072:1905327:1905383 [3] NCCL INFO Channel 00/0 : 47[c7000] -> 46[85000] via P2P/IPC/read +gpua072:1905327:1905383 [3] NCCL INFO Channel 01/0 : 47[c7000] -> 46[85000] via P2P/IPC/read +gpua072:1905327:1905383 [3] NCCL INFO Connected all trees +gpua072:1905327:1905383 [3] NCCL INFO threadThresholds 8/8/64 | 512/8/64 | 512 | 512 +gpua072:1905327:1905383 [3] NCCL INFO 2 coll channels, 2 p2p channels, 2 p2p channels per peer +gpua072:1905327:1905383 [3] NCCL INFO comm 0x55973b55bb30 rank 47 nranks 64 cudaDev 3 busId c7000 - Init COMPLETE +gpua095:3289150:3289150 [0] NCCL INFO cudaDriverVersion 12020 +gpua095:3289150:3289150 [0] NCCL INFO Bootstrap : Using eth1:172.28.23.95<0> +gpua095:3289150:3289150 [0] NCCL INFO NET/Plugin : No plugin found (libnccl-net.so), using internal implementation +gpua095:3289150:3289267 [0] NCCL INFO NET/IB : No device found. +gpua095:3289150:3289267 [0] NCCL INFO NET/Socket : Using [0]eth1:172.28.23.95<0> [1]hsn0:141.142.145.95<0> +gpua095:3289150:3289267 [0] NCCL INFO Using network Socket +gpua095:3289150:3289267 [0] NCCL INFO Setting affinity for GPU 0 to ffff0000,00000000 +gpua095:3289150:3289267 [0] NCCL INFO Trees [0] 61/-1/-1->60->56 [1] 61/28/-1->60->-1 +gpua095:3289150:3289267 [0] NCCL INFO Channel 00/0 : 59[c7000] -> 60[7000] [receive] via NET/Socket/1 +gpua095:3289150:3289267 [0] NCCL INFO Channel 01/0 : 59[c7000] -> 60[7000] [receive] via NET/Socket/1 +gpua095:3289150:3289267 [0] NCCL INFO Channel 00/0 : 60[7000] -> 61[46000] via P2P/IPC/read +gpua095:3289150:3289267 [0] NCCL INFO Channel 01/0 : 60[7000] -> 61[46000] via P2P/IPC/read +gpua095:3289150:3289267 [0] NCCL INFO Connected all rings +gpua095:3289150:3289267 [0] NCCL INFO Channel 00/0 : 56[7000] -> 60[7000] [receive] via NET/Socket/1 +gpua095:3289150:3289267 [0] NCCL INFO Channel 01/0 : 28[7000] -> 60[7000] [receive] via NET/Socket/1 +gpua095:3289150:3289267 [0] NCCL INFO Channel 01/0 : 60[7000] -> 28[7000] [send] via NET/Socket/1 +gpua095:3289150:3289267 [0] NCCL INFO Channel 00/0 : 60[7000] -> 56[7000] [send] via NET/Socket/1 +gpua095:3289150:3289267 [0] NCCL INFO Connected all trees +gpua095:3289150:3289267 [0] NCCL INFO threadThresholds 8/8/64 | 512/8/64 | 512 | 512 +gpua095:3289150:3289267 [0] NCCL INFO 2 coll channels, 2 p2p channels, 2 p2p channels per peer +gpua095:3289150:3289267 [0] NCCL INFO comm 0x55cf59555350 rank 60 nranks 64 cudaDev 0 busId 7000 - Init COMPLETE +gpua061:221445:221445 [2] NCCL INFO cudaDriverVersion 12020 +gpua061:221445:221445 [2] NCCL INFO Bootstrap : Using eth1:172.28.23.61<0> +gpua061:221445:221445 [2] NCCL INFO NET/Plugin : No plugin found (libnccl-net.so), using internal implementation +gpua061:221445:221502 [2] NCCL INFO NET/IB : No device found. +gpua061:221445:221502 [2] NCCL INFO NET/Socket : Using [0]eth1:172.28.23.61<0> [1]hsn0:141.142.145.61<0> +gpua061:221445:221502 [2] NCCL INFO Using network Socket +gpua061:221445:221502 [2] NCCL INFO Setting affinity for GPU 2 to ffff0000 +gpua061:221445:221502 [2] NCCL INFO Trees [0] 39/-1/-1->38->37 [1] 39/-1/-1->38->37 +gpua061:221445:221502 [2] NCCL INFO Channel 00/0 : 38[85000] -> 39[c7000] via P2P/IPC/read +gpua061:221445:221502 [2] NCCL INFO Channel 01/0 : 38[85000] -> 39[c7000] via P2P/IPC/read +gpua061:221445:221502 [2] NCCL INFO Connected all rings +gpua061:221445:221502 [2] NCCL INFO Channel 00/0 : 38[85000] -> 37[46000] via P2P/IPC/read +gpua061:221445:221502 [2] NCCL INFO Channel 01/0 : 38[85000] -> 37[46000] via P2P/IPC/read +gpua061:221445:221502 [2] NCCL INFO Connected all trees +gpua061:221445:221502 [2] NCCL INFO threadThresholds 8/8/64 | 512/8/64 | 512 | 512 +gpua061:221445:221502 [2] NCCL INFO 2 coll channels, 2 p2p channels, 2 p2p channels per peer +gpua061:221445:221502 [2] NCCL INFO comm 0x5635fd218d90 rank 38 nranks 64 cudaDev 2 busId 85000 - Init COMPLETE +gpua061:221446:221446 [3] NCCL INFO cudaDriverVersion 12020 +gpua061:221446:221446 [3] NCCL INFO Bootstrap : Using eth1:172.28.23.61<0> +gpua061:221446:221446 [3] NCCL INFO NET/Plugin : No plugin found (libnccl-net.so), using internal implementation +gpua061:221446:221504 [3] NCCL INFO NET/IB : No device found. +gpua061:221446:221504 [3] NCCL INFO NET/Socket : Using [0]eth1:172.28.23.61<0> [1]hsn0:141.142.145.61<0> +gpua061:221446:221504 [3] NCCL INFO Using network Socket +gpua061:221446:221504 [3] NCCL INFO Setting affinity for GPU 3 to ffff +gpua095:3289153:3289153 [3] NCCL INFO cudaDriverVersion 12020 +gpua095:3289153:3289153 [3] NCCL INFO Bootstrap : Using eth1:172.28.23.95<0> +gpua095:3289153:3289153 [3] NCCL INFO NET/Plugin : No plugin found (libnccl-net.so), using internal implementation +gpua095:3289153:3289266 [3] NCCL INFO NET/IB : No device found. +gpua095:3289153:3289266 [3] NCCL INFO NET/Socket : Using [0]eth1:172.28.23.95<0> [1]hsn0:141.142.145.95<0> +gpua095:3289153:3289266 [3] NCCL INFO Using network Socket +gpua095:3289153:3289266 [3] NCCL INFO Setting affinity for GPU 3 to ffff +gpua095:3289153:3289266 [3] NCCL INFO Trees [0] -1/-1/-1->63->62 [1] -1/-1/-1->63->62 +gpua095:3289153:3289266 [3] NCCL INFO Channel 00/0 : 63[c7000] -> 0[7000] [send] via NET/Socket/1 +gpua095:3289153:3289266 [3] NCCL INFO Channel 01/0 : 63[c7000] -> 0[7000] [send] via NET/Socket/1 +gpua095:3289153:3289266 [3] NCCL INFO Connected all rings +gpua095:3289153:3289266 [3] NCCL INFO Channel 00/0 : 63[c7000] -> 62[85000] via P2P/IPC/read +gpua061:221446:221504 [3] NCCL INFO Trees [0] -1/-1/-1->39->38 [1] -1/-1/-1->39->38 +gpua061:221446:221504 [3] NCCL INFO Channel 00/0 : 39[c7000] -> 40[7000] [send] via NET/Socket/1 +gpua061:221446:221504 [3] NCCL INFO Channel 01/0 : 39[c7000] -> 40[7000] [send] via NET/Socket/1 +gpua061:221446:221504 [3] NCCL INFO Connected all rings +gpua061:221446:221504 [3] NCCL INFO Channel 00/0 : 39[c7000] -> 38[85000] via P2P/IPC/read +gpua061:221446:221504 [3] NCCL INFO Channel 01/0 : 39[c7000] -> 38[85000] via P2P/IPC/read +gpua061:221446:221504 [3] NCCL INFO Connected all trees +gpua061:221446:221504 [3] NCCL INFO threadThresholds 8/8/64 | 512/8/64 | 512 | 512 +gpua061:221446:221504 [3] NCCL INFO 2 coll channels, 2 p2p channels, 2 p2p channels per peer +gpua061:221446:221504 [3] NCCL INFO comm 0x5587d354eac0 rank 39 nranks 64 cudaDev 3 busId c7000 - Init COMPLETE +gpua010:4003865:4003865 [1] NCCL INFO cudaDriverVersion 12020 +gpua010:4003865:4003865 [1] NCCL INFO Bootstrap : Using eth1:172.28.23.10<0> +gpua010:4003865:4003865 [1] NCCL INFO NET/Plugin : No plugin found (libnccl-net.so), using internal implementation +gpua010:4003865:4003929 [1] NCCL INFO NET/IB : No device found. +gpua010:4003865:4003929 [1] NCCL INFO NET/Socket : Using [0]eth1:172.28.23.10<0> [1]hsn0:141.142.145.10<0> +gpua010:4003865:4003929 [1] NCCL INFO Using network Socket +gpua010:4003865:4003929 [1] NCCL INFO Setting affinity for GPU 1 to ffff,00000000 +gpua010:4003865:4003929 [1] NCCL INFO Trees [0] 10/4/-1->9->8 [1] 10/-1/-1->9->8 +gpua010:4003865:4003929 [1] NCCL INFO Channel 00/0 : 9[46000] -> 10[85000] via P2P/IPC/read +gpua010:4003865:4003929 [1] NCCL INFO Channel 01/0 : 9[46000] -> 10[85000] via P2P/IPC/read +gpua010:4003865:4003929 [1] NCCL INFO Connected all rings +gpua010:4003865:4003929 [1] NCCL INFO Channel 00/0 : 4[7000] -> 9[46000] [receive] via NET/Socket/1 +gpua011:1153313:1153313 [0] NCCL INFO cudaDriverVersion 12020 +gpua011:1153313:1153313 [0] NCCL INFO Bootstrap : Using eth1:172.28.23.11<0> +gpua011:1153313:1153313 [0] NCCL INFO NET/Plugin : No plugin found (libnccl-net.so), using internal implementation +gpua011:1153313:1153374 [0] NCCL INFO NET/IB : No device found. +gpua011:1153313:1153374 [0] NCCL INFO NET/Socket : Using [0]eth1:172.28.23.11<0> [1]hsn0:141.142.145.11<0> +gpua011:1153313:1153374 [0] NCCL INFO Using network Socket +gpua011:1153313:1153374 [0] NCCL INFO Setting affinity for GPU 0 to ffff0000,00000000 +gpua011:1153313:1153374 [0] NCCL INFO Trees [0] 13/-1/-1->12->8 [1] 13/4/-1->12->28 +gpua011:1153313:1153374 [0] NCCL INFO Channel 00/0 : 11[c7000] -> 12[7000] [receive] via NET/Socket/1 +gpua011:1153313:1153374 [0] NCCL INFO Channel 01/0 : 11[c7000] -> 12[7000] [receive] via NET/Socket/1 +gpua011:1153313:1153374 [0] NCCL INFO Channel 00/0 : 12[7000] -> 13[46000] via P2P/IPC/read +gpua014:2738630:2738630 [0] NCCL INFO cudaDriverVersion 12020 +gpua014:2738630:2738630 [0] NCCL INFO Bootstrap : Using eth1:172.28.23.14<0> +gpua014:2738630:2738630 [0] NCCL INFO NET/Plugin : No plugin found (libnccl-net.so), using internal implementation +gpua014:2738630:2738696 [0] NCCL INFO NET/IB : No device found. +gpua014:2738630:2738696 [0] NCCL INFO NET/Socket : Using [0]eth1:172.28.23.14<0> [1]hsn0:141.142.145.14<0> +gpua014:2738630:2738696 [0] NCCL INFO Using network Socket +gpua014:2738630:2738696 [0] NCCL INFO Setting affinity for GPU 0 to ffff0000,00000000 +gpua014:2738630:2738696 [0] NCCL INFO Trees [0] 21/-1/-1->20->25 [1] 21/16/-1->20->13 +gpua014:2738630:2738696 [0] NCCL INFO Channel 00/0 : 19[c7000] -> 20[7000] [receive] via NET/Socket/1 +gpua014:2738630:2738696 [0] NCCL INFO Channel 01/0 : 19[c7000] -> 20[7000] [receive] via NET/Socket/1 +gpua014:2738630:2738696 [0] NCCL INFO Channel 00/0 : 20[7000] -> 21[46000] via P2P/IPC/read +gpua020:3893201:3893201 [3] NCCL INFO cudaDriverVersion 12020 +gpua020:3893201:3893201 [3] NCCL INFO Bootstrap : Using eth1:172.28.23.20<0> +gpua020:3893201:3893201 [3] NCCL INFO NET/Plugin : No plugin found (libnccl-net.so), using internal implementation +gpua020:3893201:3893258 [3] NCCL INFO NET/IB : No device found. +gpua020:3893201:3893258 [3] NCCL INFO NET/Socket : Using [0]eth1:172.28.23.20<0> [1]hsn0:141.142.145.20<0> +gpua020:3893201:3893258 [3] NCCL INFO Using network Socket +gpua020:3893201:3893258 [3] NCCL INFO Setting affinity for GPU 3 to ffff +gpua020:3893201:3893258 [3] NCCL INFO Trees [0] -1/-1/-1->27->26 [1] -1/-1/-1->27->26 +gpua020:3893201:3893258 [3] NCCL INFO Channel 00/0 : 27[c7000] -> 28[7000] [send] via NET/Socket/1 +gpua020:3893201:3893258 [3] NCCL INFO Channel 01/0 : 27[c7000] -> 28[7000] [send] via NET/Socket/1 +gpua020:3893201:3893258 [3] NCCL INFO Connected all rings +gpua020:3893201:3893258 [3] NCCL INFO Channel 00/0 : 27[c7000] -> 26[85000] via P2P/IPC/read +gpua038:4038364:4038364 [1] NCCL INFO cudaDriverVersion 12020 +gpua038:4038364:4038364 [1] NCCL INFO Bootstrap : Using eth1:172.28.23.38<0> +gpua038:4038364:4038364 [1] NCCL INFO NET/Plugin : No plugin found (libnccl-net.so), using internal implementation +gpua038:4038364:4038435 [1] NCCL INFO NET/IB : No device found. +gpua038:4038364:4038435 [1] NCCL INFO NET/Socket : Using [0]eth1:172.28.23.38<0> [1]hsn0:141.142.145.38<0> +gpua038:4038364:4038435 [1] NCCL INFO Using network Socket +gpua038:4038364:4038435 [1] NCCL INFO Setting affinity for GPU 1 to ffff,00000000 +gpua038:4038364:4038435 [1] NCCL INFO Trees [0] 30/-1/-1->29->28 [1] 30/44/-1->29->28 +gpua038:4038364:4038435 [1] NCCL INFO Channel 00/0 : 29[46000] -> 30[85000] via P2P/IPC/read +gpua038:4038364:4038435 [1] NCCL INFO Channel 01/0 : 29[46000] -> 30[85000] via P2P/IPC/read +gpua038:4038364:4038435 [1] NCCL INFO Connected all rings +gpua038:4038364:4038435 [1] NCCL INFO Channel 01/0 : 29[46000] -> 44[7000] [send] via NET/Socket/1 +gpua065:2980960:2980960 [1] NCCL INFO cudaDriverVersion 12020 +gpua065:2980960:2980960 [1] NCCL INFO Bootstrap : Using eth1:172.28.23.65<0> +gpua065:2980960:2980960 [1] NCCL INFO NET/Plugin : No plugin found (libnccl-net.so), using internal implementation +gpua065:2980960:2981030 [1] NCCL INFO NET/IB : No device found. +gpua065:2980960:2981030 [1] NCCL INFO NET/Socket : Using [0]eth1:172.28.23.65<0> [1]hsn0:141.142.145.65<0> +gpua065:2980960:2981030 [1] NCCL INFO Using network Socket +gpua065:2980960:2981030 [1] NCCL INFO Setting affinity for GPU 1 to ffff,00000000 +gpua065:2980960:2981030 [1] NCCL INFO Trees [0] 42/36/-1->41->40 [1] 42/-1/-1->41->40 +gpua065:2980960:2981030 [1] NCCL INFO Channel 00/0 : 41[46000] -> 42[85000] via P2P/IPC/read +gpua065:2980960:2981030 [1] NCCL INFO Channel 01/0 : 41[46000] -> 42[85000] via P2P/IPC/read +gpua065:2980960:2981030 [1] NCCL INFO Connected all rings +gpua065:2980960:2981030 [1] NCCL INFO Channel 00/0 : 36[7000] -> 41[46000] [receive] via NET/Socket/1 +gpua042:696908:696908 [3] NCCL INFO cudaDriverVersion 12020 +gpua042:696908:696908 [3] NCCL INFO Bootstrap : Using eth1:172.28.23.42<0> +gpua042:696908:696908 [3] NCCL INFO NET/Plugin : No plugin found (libnccl-net.so), using internal implementation +gpua042:696908:697144 [3] NCCL INFO NET/IB : No device found. +gpua042:696908:697144 [3] NCCL INFO NET/Socket : Using [0]eth1:172.28.23.42<0> [1]hsn0:141.142.145.42<0> +gpua042:696908:697144 [3] NCCL INFO Using network Socket +gpua042:696908:697144 [3] NCCL INFO Setting affinity for GPU 3 to ffff +gpua042:696908:697144 [3] NCCL INFO Trees [0] -1/-1/-1->35->34 [1] -1/-1/-1->35->34 +gpua042:696908:697144 [3] NCCL INFO Channel 00/0 : 35[c7000] -> 36[7000] [send] via NET/Socket/1 +gpua042:696908:697144 [3] NCCL INFO Channel 01/0 : 35[c7000] -> 36[7000] [send] via NET/Socket/1 +gpua042:696908:697144 [3] NCCL INFO Connected all rings +gpua042:696908:697144 [3] NCCL INFO Channel 00/0 : 35[c7000] -> 34[85000] via P2P/IPC/read +gpua095:3289153:3289266 [3] NCCL INFO Channel 01/0 : 63[c7000] -> 62[85000] via P2P/IPC/read +gpua095:3289153:3289266 [3] NCCL INFO Connected all trees +gpua095:3289153:3289266 [3] NCCL INFO threadThresholds 8/8/64 | 512/8/64 | 512 | 512 +gpua095:3289153:3289266 [3] NCCL INFO 2 coll channels, 2 p2p channels, 2 p2p channels per peer +gpua095:3289153:3289266 [3] NCCL INFO comm 0x55dc45d638a0 rank 63 nranks 64 cudaDev 3 busId c7000 - Init COMPLETE +gpua010:4003865:4003929 [1] NCCL INFO Channel 00/0 : 9[46000] -> 4[7000] [send] via NET/Socket/1 +gpua010:4003865:4003929 [1] NCCL INFO Channel 00/0 : 9[46000] -> 8[7000] via P2P/IPC/read +gpua010:4003865:4003929 [1] NCCL INFO Channel 01/0 : 9[46000] -> 8[7000] via P2P/IPC/read +gpua010:4003865:4003929 [1] NCCL INFO Connected all trees +gpua010:4003865:4003929 [1] NCCL INFO threadThresholds 8/8/64 | 512/8/64 | 512 | 512 +gpua010:4003865:4003929 [1] NCCL INFO 2 coll channels, 2 p2p channels, 2 p2p channels per peer +gpua010:4003865:4003929 [1] NCCL INFO comm 0x55fe9acbe9a0 rank 9 nranks 64 cudaDev 1 busId 46000 - Init COMPLETE +gpua010:4003864:4003864 [0] NCCL INFO cudaDriverVersion 12020 +gpua010:4003864:4003864 [0] NCCL INFO Bootstrap : Using eth1:172.28.23.10<0> +gpua010:4003864:4003864 [0] NCCL INFO NET/Plugin : No plugin found (libnccl-net.so), using internal implementation +gpua010:4003864:4003930 [0] NCCL INFO NET/IB : No device found. +gpua011:1153313:1153374 [0] NCCL INFO Channel 01/0 : 12[7000] -> 13[46000] via P2P/IPC/read +gpua011:1153313:1153374 [0] NCCL INFO Connected all rings +gpua011:1153313:1153374 [0] NCCL INFO Channel 00/0 : 8[7000] -> 12[7000] [receive] via NET/Socket/1 +gpua011:1153313:1153374 [0] NCCL INFO Channel 01/0 : 4[7000] -> 12[7000] [receive] via NET/Socket/1 +gpua011:1153313:1153374 [0] NCCL INFO Channel 01/0 : 12[7000] -> 28[7000] [send] via NET/Socket/1 +gpua011:1153313:1153374 [0] NCCL INFO Channel 01/0 : 28[7000] -> 12[7000] [receive] via NET/Socket/1 +gpua011:1153313:1153374 [0] NCCL INFO Channel 01/0 : 12[7000] -> 4[7000] [send] via NET/Socket/1 +gpua011:1153313:1153374 [0] NCCL INFO Channel 00/0 : 12[7000] -> 8[7000] [send] via NET/Socket/1 +gpua011:1153313:1153374 [0] NCCL INFO Connected all trees +gpua011:1153313:1153374 [0] NCCL INFO threadThresholds 8/8/64 | 512/8/64 | 512 | 512 +gpua011:1153313:1153374 [0] NCCL INFO 2 coll channels, 2 p2p channels, 2 p2p channels per peer +gpua014:2738630:2738696 [0] NCCL INFO Channel 01/0 : 20[7000] -> 21[46000] via P2P/IPC/read +gpua014:2738630:2738696 [0] NCCL INFO Connected all rings +gpua014:2738630:2738696 [0] NCCL INFO Channel 01/0 : 16[7000] -> 20[7000] [receive] via NET/Socket/1 +gpua014:2738630:2738696 [0] NCCL INFO Channel 00/0 : 20[7000] -> 25[46000] [send] via NET/Socket/1 +gpua014:2738630:2738696 [0] NCCL INFO Channel 01/0 : 13[46000] -> 20[7000] [receive] via NET/Socket/1 +gpua014:2738630:2738696 [0] NCCL INFO Channel 01/0 : 20[7000] -> 13[46000] [send] via NET/Socket/1 +gpua014:2738630:2738696 [0] NCCL INFO Channel 00/0 : 25[46000] -> 20[7000] [receive] via NET/Socket/1 +gpua014:2738630:2738696 [0] NCCL INFO Channel 01/0 : 20[7000] -> 16[7000] [send] via NET/Socket/1 +gpua014:2738630:2738696 [0] NCCL INFO Connected all trees +gpua014:2738630:2738696 [0] NCCL INFO threadThresholds 8/8/64 | 512/8/64 | 512 | 512 +gpua014:2738630:2738696 [0] NCCL INFO 2 coll channels, 2 p2p channels, 2 p2p channels per peer +gpua020:3893201:3893258 [3] NCCL INFO Channel 01/0 : 27[c7000] -> 26[85000] via P2P/IPC/read +gpua020:3893201:3893258 [3] NCCL INFO Connected all trees +gpua020:3893201:3893258 [3] NCCL INFO threadThresholds 8/8/64 | 512/8/64 | 512 | 512 +gpua020:3893201:3893258 [3] NCCL INFO 2 coll channels, 2 p2p channels, 2 p2p channels per peer +gpua020:3893201:3893258 [3] NCCL INFO comm 0x557013716a90 rank 27 nranks 64 cudaDev 3 busId c7000 - Init COMPLETE +gpua038:4038364:4038435 [1] NCCL INFO Channel 01/0 : 44[7000] -> 29[46000] [receive] via NET/Socket/1 +gpua038:4038364:4038435 [1] NCCL INFO Channel 00/0 : 29[46000] -> 28[7000] via P2P/IPC/read +gpua038:4038364:4038435 [1] NCCL INFO Channel 01/0 : 29[46000] -> 28[7000] via P2P/IPC/read +gpua038:4038364:4038435 [1] NCCL INFO Connected all trees +gpua038:4038364:4038435 [1] NCCL INFO threadThresholds 8/8/64 | 512/8/64 | 512 | 512 +gpua038:4038364:4038435 [1] NCCL INFO 2 coll channels, 2 p2p channels, 2 p2p channels per peer +gpua038:4038364:4038435 [1] NCCL INFO comm 0x55d23d850cf0 rank 29 nranks 64 cudaDev 1 busId 46000 - Init COMPLETE +gpua038:4038363:4038363 [0] NCCL INFO cudaDriverVersion 12020 +gpua038:4038363:4038363 [0] NCCL INFO Bootstrap : Using eth1:172.28.23.38<0> +gpua038:4038363:4038363 [0] NCCL INFO NET/Plugin : No plugin found (libnccl-net.so), using internal implementation +gpua038:4038363:4038434 [0] NCCL INFO NET/IB : No device found. +gpua065:2980960:2981030 [1] NCCL INFO Channel 00/0 : 41[46000] -> 36[7000] [send] via NET/Socket/1 +gpua065:2980960:2981030 [1] NCCL INFO Channel 00/0 : 41[46000] -> 40[7000] via P2P/IPC/read +gpua065:2980960:2981030 [1] NCCL INFO Channel 01/0 : 41[46000] -> 40[7000] via P2P/IPC/read +gpua065:2980960:2981030 [1] NCCL INFO Connected all trees +gpua065:2980960:2981030 [1] NCCL INFO threadThresholds 8/8/64 | 512/8/64 | 512 | 512 +gpua065:2980960:2981030 [1] NCCL INFO 2 coll channels, 2 p2p channels, 2 p2p channels per peer +gpua065:2980960:2981030 [1] NCCL INFO comm 0x557b33b4b110 rank 41 nranks 64 cudaDev 1 busId 46000 - Init COMPLETE +gpua042:696908:697144 [3] NCCL INFO Channel 01/0 : 35[c7000] -> 34[85000] via P2P/IPC/read +gpua042:696908:697144 [3] NCCL INFO Connected all trees +gpua042:696908:697144 [3] NCCL INFO threadThresholds 8/8/64 | 512/8/64 | 512 | 512 +gpua042:696908:697144 [3] NCCL INFO 2 coll channels, 2 p2p channels, 2 p2p channels per peer +gpua042:696908:697144 [3] NCCL INFO comm 0x55ca5bbcd060 rank 35 nranks 64 cudaDev 3 busId c7000 - Init COMPLETE +gpua095:3289152:3289152 [2] NCCL INFO cudaDriverVersion 12020 +gpua095:3289152:3289152 [2] NCCL INFO Bootstrap : Using eth1:172.28.23.95<0> +gpua095:3289152:3289152 [2] NCCL INFO NET/Plugin : No plugin found (libnccl-net.so), using internal implementation +gpua095:3289152:3289265 [2] NCCL INFO NET/IB : No device found. +gpua095:3289152:3289265 [2] NCCL INFO NET/Socket : Using [0]eth1:172.28.23.95<0> [1]hsn0:141.142.145.95<0> +gpua095:3289152:3289265 [2] NCCL INFO Using network Socket +gpua095:3289152:3289265 [2] NCCL INFO Setting affinity for GPU 2 to ffff0000 +gpua095:3289152:3289265 [2] NCCL INFO Trees [0] 63/-1/-1->62->61 [1] 63/-1/-1->62->61 +gpua095:3289152:3289265 [2] NCCL INFO Channel 00/0 : 62[85000] -> 63[c7000] via P2P/IPC/read +gpua095:3289152:3289265 [2] NCCL INFO Channel 01/0 : 62[85000] -> 63[c7000] via P2P/IPC/read +gpua095:3289152:3289265 [2] NCCL INFO Connected all rings +gpua095:3289152:3289265 [2] NCCL INFO Channel 00/0 : 62[85000] -> 61[46000] via P2P/IPC/read +gpua007:2608785:2608785 [2] NCCL INFO cudaDriverVersion 12020 +gpua007:2608785:2608785 [2] NCCL INFO Bootstrap : Using eth1:172.28.23.7<0> +gpua007:2608785:2608785 [2] NCCL INFO NET/Plugin : No plugin found (libnccl-net.so), using internal implementation +gpua007:2608785:2608850 [2] NCCL INFO NET/IB : No device found. +gpua007:2608785:2608850 [2] NCCL INFO NET/Socket : Using [0]eth1:172.28.23.7<0> [1]hsn0:141.142.145.7<0> +gpua007:2608785:2608850 [2] NCCL INFO Using network Socket +gpua007:2608785:2608850 [2] NCCL INFO Setting affinity for GPU 2 to ffff0000 +gpua007:2608785:2608850 [2] NCCL INFO Trees [0] 3/-1/-1->2->1 [1] 3/-1/-1->2->1 +gpua007:2608785:2608850 [2] NCCL INFO Channel 00/0 : 2[85000] -> 3[c7000] via P2P/IPC/read +gpua007:2608785:2608850 [2] NCCL INFO Channel 01/0 : 2[85000] -> 3[c7000] via P2P/IPC/read +gpua007:2608785:2608850 [2] NCCL INFO Connected all rings +gpua007:2608785:2608850 [2] NCCL INFO Channel 00/0 : 2[85000] -> 1[46000] via P2P/IPC/read +gpua010:4003864:4003930 [0] NCCL INFO NET/Socket : Using [0]eth1:172.28.23.10<0> [1]hsn0:141.142.145.10<0> +gpua010:4003864:4003930 [0] NCCL INFO Using network Socket +gpua010:4003864:4003930 [0] NCCL INFO Setting affinity for GPU 0 to ffff0000,00000000 +gpua010:4003864:4003930 [0] NCCL INFO Trees [0] 9/12/-1->8->17 [1] 9/-1/-1->8->5 +gpua010:4003864:4003930 [0] NCCL INFO Channel 00/0 : 7[c7000] -> 8[7000] [receive] via NET/Socket/1 +gpua010:4003864:4003930 [0] NCCL INFO Channel 01/0 : 7[c7000] -> 8[7000] [receive] via NET/Socket/1 +gpua010:4003864:4003930 [0] NCCL INFO Channel 00/0 : 8[7000] -> 9[46000] via P2P/IPC/read +gpua010:4003864:4003930 [0] NCCL INFO Channel 01/0 : 8[7000] -> 9[46000] via P2P/IPC/read +gpua010:4003864:4003930 [0] NCCL INFO Connected all rings +gpua010:4003864:4003930 [0] NCCL INFO Channel 01/0 : 5[46000] -> 8[7000] [receive] via NET/Socket/1 +gpua010:4003864:4003930 [0] NCCL INFO Channel 00/0 : 8[7000] -> 12[7000] [send] via NET/Socket/1 +gpua011:1153313:1153374 [0] NCCL INFO comm 0x563b0f7a8e40 rank 12 nranks 64 cudaDev 0 busId 7000 - Init COMPLETE +gpua014:2738630:2738696 [0] NCCL INFO comm 0x5568cfb4e360 rank 20 nranks 64 cudaDev 0 busId 7000 - Init COMPLETE +gpua014:2738631:2738631 [1] NCCL INFO cudaDriverVersion 12020 +gpua014:2738631:2738631 [1] NCCL INFO Bootstrap : Using eth1:172.28.23.14<0> +gpua014:2738631:2738631 [1] NCCL INFO NET/Plugin : No plugin found (libnccl-net.so), using internal implementation +gpua014:2738631:2738693 [1] NCCL INFO NET/IB : No device found. +gpua014:2738631:2738693 [1] NCCL INFO NET/Socket : Using [0]eth1:172.28.23.14<0> [1]hsn0:141.142.145.14<0> +gpua014:2738631:2738693 [1] NCCL INFO Using network Socket +gpua014:2738631:2738693 [1] NCCL INFO Setting affinity for GPU 1 to ffff,00000000 +gpua014:2738631:2738693 [1] NCCL INFO Trees [0] 22/-1/-1->21->20 [1] 22/24/-1->21->20 +gpua014:2738631:2738693 [1] NCCL INFO Channel 00/0 : 21[46000] -> 22[85000] via P2P/IPC/read +gpua014:2738631:2738693 [1] NCCL INFO Channel 01/0 : 21[46000] -> 22[85000] via P2P/IPC/read +gpua014:2738631:2738693 [1] NCCL INFO Connected all rings +gpua020:3893198:3893198 [0] NCCL INFO cudaDriverVersion 12020 +gpua020:3893198:3893198 [0] NCCL INFO Bootstrap : Using eth1:172.28.23.20<0> +gpua020:3893198:3893198 [0] NCCL INFO NET/Plugin : No plugin found (libnccl-net.so), using internal implementation +gpua020:3893198:3893257 [0] NCCL INFO NET/IB : No device found. +gpua020:3893198:3893257 [0] NCCL INFO NET/Socket : Using [0]eth1:172.28.23.20<0> [1]hsn0:141.142.145.20<0> +gpua020:3893198:3893257 [0] NCCL INFO Using network Socket +gpua020:3893198:3893257 [0] NCCL INFO Setting affinity for GPU 0 to ffff0000,00000000 +gpua020:3893198:3893257 [0] NCCL INFO Trees [0] 25/28/-1->24->16 [1] 25/-1/-1->24->21 +gpua020:3893198:3893257 [0] NCCL INFO Channel 00/0 : 23[c7000] -> 24[7000] [receive] via NET/Socket/1 +gpua020:3893198:3893257 [0] NCCL INFO Channel 01/0 : 23[c7000] -> 24[7000] [receive] via NET/Socket/1 +gpua020:3893198:3893257 [0] NCCL INFO Channel 00/0 : 24[7000] -> 25[46000] via P2P/IPC/read +gpua038:4038363:4038434 [0] NCCL INFO NET/Socket : Using [0]eth1:172.28.23.38<0> [1]hsn0:141.142.145.38<0> +gpua038:4038363:4038434 [0] NCCL INFO Using network Socket +gpua038:4038363:4038434 [0] NCCL INFO Setting affinity for GPU 0 to ffff0000,00000000 +gpua038:4038363:4038434 [0] NCCL INFO Trees [0] 29/-1/-1->28->24 [1] 29/12/-1->28->60 +gpua038:4038363:4038434 [0] NCCL INFO Channel 00/0 : 27[c7000] -> 28[7000] [receive] via NET/Socket/1 +gpua038:4038363:4038434 [0] NCCL INFO Channel 01/0 : 27[c7000] -> 28[7000] [receive] via NET/Socket/1 +gpua038:4038363:4038434 [0] NCCL INFO Channel 00/0 : 28[7000] -> 29[46000] via P2P/IPC/read +gpua038:4038363:4038434 [0] NCCL INFO Channel 01/0 : 28[7000] -> 29[46000] via P2P/IPC/read +gpua038:4038363:4038434 [0] NCCL INFO Connected all rings +gpua038:4038363:4038434 [0] NCCL INFO Channel 00/0 : 24[7000] -> 28[7000] [receive] via NET/Socket/1 +gpua038:4038363:4038434 [0] NCCL INFO Channel 01/0 : 12[7000] -> 28[7000] [receive] via NET/Socket/1 +gpua065:2980959:2980959 [0] NCCL INFO cudaDriverVersion 12020 +gpua065:2980959:2980959 [0] NCCL INFO Bootstrap : Using eth1:172.28.23.65<0> +gpua065:2980959:2980959 [0] NCCL INFO NET/Plugin : No plugin found (libnccl-net.so), using internal implementation +gpua065:2980959:2981029 [0] NCCL INFO NET/IB : No device found. +gpua065:2980959:2981029 [0] NCCL INFO NET/Socket : Using [0]eth1:172.28.23.65<0> [1]hsn0:141.142.145.65<0> +gpua065:2980959:2981029 [0] NCCL INFO Using network Socket +gpua065:2980959:2981029 [0] NCCL INFO Setting affinity for GPU 0 to ffff0000,00000000 +gpua065:2980959:2981029 [0] NCCL INFO Trees [0] 41/44/-1->40->49 [1] 41/-1/-1->40->37 +gpua065:2980959:2981029 [0] NCCL INFO Channel 00/0 : 39[c7000] -> 40[7000] [receive] via NET/Socket/1 +gpua065:2980959:2981029 [0] NCCL INFO Channel 01/0 : 39[c7000] -> 40[7000] [receive] via NET/Socket/1 +gpua065:2980959:2981029 [0] NCCL INFO Channel 00/0 : 40[7000] -> 41[46000] via P2P/IPC/read +gpua042:696907:696907 [2] NCCL INFO cudaDriverVersion 12020 +gpua042:696907:696907 [2] NCCL INFO Bootstrap : Using eth1:172.28.23.42<0> +gpua042:696907:696907 [2] NCCL INFO NET/Plugin : No plugin found (libnccl-net.so), using internal implementation +gpua042:696907:697145 [2] NCCL INFO NET/IB : No device found. +gpua042:696907:697145 [2] NCCL INFO NET/Socket : Using [0]eth1:172.28.23.42<0> [1]hsn0:141.142.145.42<0> +gpua042:696907:697145 [2] NCCL INFO Using network Socket +gpua042:696907:697145 [2] NCCL INFO Setting affinity for GPU 2 to ffff0000 +gpua042:696907:697145 [2] NCCL INFO Trees [0] 35/-1/-1->34->33 [1] 35/-1/-1->34->33 +gpua042:696907:697145 [2] NCCL INFO Channel 00/0 : 34[85000] -> 35[c7000] via P2P/IPC/read +gpua042:696907:697145 [2] NCCL INFO Channel 01/0 : 34[85000] -> 35[c7000] via P2P/IPC/read +gpua042:696907:697145 [2] NCCL INFO Connected all rings +gpua042:696907:697145 [2] NCCL INFO Channel 00/0 : 34[85000] -> 33[46000] via P2P/IPC/read +gpua095:3289152:3289265 [2] NCCL INFO Channel 01/0 : 62[85000] -> 61[46000] via P2P/IPC/read +gpua095:3289152:3289265 [2] NCCL INFO Connected all trees +gpua095:3289152:3289265 [2] NCCL INFO threadThresholds 8/8/64 | 512/8/64 | 512 | 512 +gpua095:3289152:3289265 [2] NCCL INFO 2 coll channels, 2 p2p channels, 2 p2p channels per peer +gpua095:3289152:3289265 [2] NCCL INFO comm 0x561ddeb0eb90 rank 62 nranks 64 cudaDev 2 busId 85000 - Init COMPLETE +gpua093:1409358:1409358 [2] NCCL INFO cudaDriverVersion 12020 +gpua093:1409358:1409358 [2] NCCL INFO Bootstrap : Using eth1:172.28.23.93<0> +gpua093:1409358:1409358 [2] NCCL INFO NET/Plugin : No plugin found (libnccl-net.so), using internal implementation +gpua093:1409358:1409424 [2] NCCL INFO NET/IB : No device found. +gpua093:1409358:1409424 [2] NCCL INFO NET/Socket : Using [0]eth1:172.28.23.93<0> [1]hsn0:141.142.145.93<0> +gpua093:1409358:1409424 [2] NCCL INFO Using network Socket +gpua093:1409358:1409424 [2] NCCL INFO Setting affinity for GPU 2 to ffff0000 +gpua093:1409358:1409424 [2] NCCL INFO Trees [0] 59/-1/-1->58->57 [1] 59/-1/-1->58->57 +gpua093:1409358:1409424 [2] NCCL INFO Channel 00/0 : 58[85000] -> 59[c7000] via P2P/IPC/read +gpua093:1409358:1409424 [2] NCCL INFO Channel 01/0 : 58[85000] -> 59[c7000] via P2P/IPC/read +gpua093:1409358:1409424 [2] NCCL INFO Connected all rings +gpua093:1409358:1409424 [2] NCCL INFO Channel 00/0 : 58[85000] -> 57[46000] via P2P/IPC/read +gpua007:2608785:2608850 [2] NCCL INFO Channel 01/0 : 2[85000] -> 1[46000] via P2P/IPC/read +gpua007:2608785:2608850 [2] NCCL INFO Connected all trees +gpua007:2608785:2608850 [2] NCCL INFO threadThresholds 8/8/64 | 512/8/64 | 512 | 512 +gpua007:2608785:2608850 [2] NCCL INFO 2 coll channels, 2 p2p channels, 2 p2p channels per peer +gpua007:2608785:2608850 [2] NCCL INFO comm 0x55fdb233ef90 rank 2 nranks 64 cudaDev 2 busId 85000 - Init COMPLETE +gpua010:4003864:4003930 [0] NCCL INFO Channel 00/0 : 8[7000] -> 17[46000] [send] via NET/Socket/1 +gpua010:4003864:4003930 [0] NCCL INFO Channel 00/0 : 17[46000] -> 8[7000] [receive] via NET/Socket/1 +gpua010:4003864:4003930 [0] NCCL INFO Channel 00/0 : 12[7000] -> 8[7000] [receive] via NET/Socket/1 +gpua010:4003864:4003930 [0] NCCL INFO Channel 01/0 : 8[7000] -> 5[46000] [send] via NET/Socket/1 +gpua010:4003864:4003930 [0] NCCL INFO Connected all trees +gpua010:4003864:4003930 [0] NCCL INFO threadThresholds 8/8/64 | 512/8/64 | 512 | 512 +gpua010:4003864:4003930 [0] NCCL INFO 2 coll channels, 2 p2p channels, 2 p2p channels per peer +gpua010:4003864:4003930 [0] NCCL INFO comm 0x556c1c4ff870 rank 8 nranks 64 cudaDev 0 busId 7000 - Init COMPLETE +gpua011:1153314:1153314 [1] NCCL INFO cudaDriverVersion 12020 +gpua011:1153314:1153314 [1] NCCL INFO Bootstrap : Using eth1:172.28.23.11<0> +gpua011:1153314:1153314 [1] NCCL INFO NET/Plugin : No plugin found (libnccl-net.so), using internal implementation +gpua011:1153314:1153373 [1] NCCL INFO NET/IB : No device found. +gpua011:1153314:1153373 [1] NCCL INFO NET/Socket : Using [0]eth1:172.28.23.11<0> [1]hsn0:141.142.145.11<0> +gpua011:1153314:1153373 [1] NCCL INFO Using network Socket +gpua011:1153314:1153373 [1] NCCL INFO Setting affinity for GPU 1 to ffff,00000000 +gpua011:1153314:1153373 [1] NCCL INFO Trees [0] 14/-1/-1->13->12 [1] 14/20/-1->13->12 +gpua011:1153314:1153373 [1] NCCL INFO Channel 00/0 : 13[46000] -> 14[85000] via P2P/IPC/read +gpua011:1153314:1153373 [1] NCCL INFO Channel 01/0 : 13[46000] -> 14[85000] via P2P/IPC/read +gpua011:1153314:1153373 [1] NCCL INFO Connected all rings +gpua011:1153314:1153373 [1] NCCL INFO Channel 01/0 : 13[46000] -> 20[7000] [send] via NET/Socket/1 +gpua014:2738631:2738693 [1] NCCL INFO Channel 01/0 : 21[46000] -> 24[7000] [send] via NET/Socket/1 +gpua014:2738631:2738693 [1] NCCL INFO Channel 01/0 : 24[7000] -> 21[46000] [receive] via NET/Socket/1 +gpua014:2738631:2738693 [1] NCCL INFO Channel 00/0 : 21[46000] -> 20[7000] via P2P/IPC/read +gpua014:2738631:2738693 [1] NCCL INFO Channel 01/0 : 21[46000] -> 20[7000] via P2P/IPC/read +gpua014:2738631:2738693 [1] NCCL INFO Connected all trees +gpua014:2738631:2738693 [1] NCCL INFO threadThresholds 8/8/64 | 512/8/64 | 512 | 512 +gpua014:2738631:2738693 [1] NCCL INFO 2 coll channels, 2 p2p channels, 2 p2p channels per peer +gpua014:2738631:2738693 [1] NCCL INFO comm 0x5574242bbdc0 rank 21 nranks 64 cudaDev 1 busId 46000 - Init COMPLETE +gpua020:3893198:3893257 [0] NCCL INFO Channel 01/0 : 24[7000] -> 25[46000] via P2P/IPC/read +gpua020:3893198:3893257 [0] NCCL INFO Connected all rings +gpua020:3893198:3893257 [0] NCCL INFO Channel 01/0 : 21[46000] -> 24[7000] [receive] via NET/Socket/1 +gpua020:3893198:3893257 [0] NCCL INFO Channel 00/0 : 24[7000] -> 28[7000] [send] via NET/Socket/1 +gpua020:3893198:3893257 [0] NCCL INFO Channel 00/0 : 16[7000] -> 24[7000] [receive] via NET/Socket/1 +gpua020:3893198:3893257 [0] NCCL INFO Channel 00/0 : 24[7000] -> 16[7000] [send] via NET/Socket/1 +gpua020:3893198:3893257 [0] NCCL INFO Channel 00/0 : 28[7000] -> 24[7000] [receive] via NET/Socket/1 +gpua020:3893198:3893257 [0] NCCL INFO Channel 01/0 : 24[7000] -> 21[46000] [send] via NET/Socket/1 +gpua020:3893198:3893257 [0] NCCL INFO Connected all trees +gpua020:3893198:3893257 [0] NCCL INFO threadThresholds 8/8/64 | 512/8/64 | 512 | 512 +gpua020:3893198:3893257 [0] NCCL INFO 2 coll channels, 2 p2p channels, 2 p2p channels per peer +gpua038:4038363:4038434 [0] NCCL INFO Channel 01/0 : 60[7000] -> 28[7000] [receive] via NET/Socket/1 +gpua038:4038363:4038434 [0] NCCL INFO Channel 01/0 : 28[7000] -> 60[7000] [send] via NET/Socket/1 +gpua038:4038363:4038434 [0] NCCL INFO Channel 01/0 : 28[7000] -> 12[7000] [send] via NET/Socket/1 +gpua038:4038363:4038434 [0] NCCL INFO Channel 00/0 : 28[7000] -> 24[7000] [send] via NET/Socket/1 +gpua038:4038363:4038434 [0] NCCL INFO Connected all trees +gpua038:4038363:4038434 [0] NCCL INFO threadThresholds 8/8/64 | 512/8/64 | 512 | 512 +gpua038:4038363:4038434 [0] NCCL INFO 2 coll channels, 2 p2p channels, 2 p2p channels per peer +gpua038:4038363:4038434 [0] NCCL INFO comm 0x559dafffa220 rank 28 nranks 64 cudaDev 0 busId 7000 - Init COMPLETE +gpua065:2980959:2981029 [0] NCCL INFO Channel 01/0 : 40[7000] -> 41[46000] via P2P/IPC/read +gpua065:2980959:2981029 [0] NCCL INFO Connected all rings +gpua065:2980959:2981029 [0] NCCL INFO Channel 01/0 : 37[46000] -> 40[7000] [receive] via NET/Socket/1 +gpua065:2980959:2981029 [0] NCCL INFO Channel 00/0 : 40[7000] -> 44[7000] [send] via NET/Socket/1 +gpua065:2980959:2981029 [0] NCCL INFO Channel 00/0 : 40[7000] -> 49[46000] [send] via NET/Socket/1 +gpua065:2980959:2981029 [0] NCCL INFO Channel 00/0 : 49[46000] -> 40[7000] [receive] via NET/Socket/1 +gpua065:2980959:2981029 [0] NCCL INFO Channel 00/0 : 44[7000] -> 40[7000] [receive] via NET/Socket/1 +gpua065:2980959:2981029 [0] NCCL INFO Channel 01/0 : 40[7000] -> 37[46000] [send] via NET/Socket/1 +gpua065:2980959:2981029 [0] NCCL INFO Connected all trees +gpua065:2980959:2981029 [0] NCCL INFO threadThresholds 8/8/64 | 512/8/64 | 512 | 512 +gpua065:2980959:2981029 [0] NCCL INFO 2 coll channels, 2 p2p channels, 2 p2p channels per peer +gpua042:696907:697145 [2] NCCL INFO Channel 01/0 : 34[85000] -> 33[46000] via P2P/IPC/read +gpua042:696907:697145 [2] NCCL INFO Connected all trees +gpua042:696907:697145 [2] NCCL INFO threadThresholds 8/8/64 | 512/8/64 | 512 | 512 +gpua042:696907:697145 [2] NCCL INFO 2 coll channels, 2 p2p channels, 2 p2p channels per peer +gpua042:696907:697145 [2] NCCL INFO comm 0x558016e1eed0 rank 34 nranks 64 cudaDev 2 busId 85000 - Init COMPLETE +gpua095:3289151:3289151 [1] NCCL INFO cudaDriverVersion 12020 +gpua095:3289151:3289151 [1] NCCL INFO Bootstrap : Using eth1:172.28.23.95<0> +gpua095:3289151:3289151 [1] NCCL INFO NET/Plugin : No plugin found (libnccl-net.so), using internal implementation +gpua095:3289151:3289268 [1] NCCL INFO NET/IB : No device found. +gpua095:3289151:3289268 [1] NCCL INFO NET/Socket : Using [0]eth1:172.28.23.95<0> [1]hsn0:141.142.145.95<0> +gpua095:3289151:3289268 [1] NCCL INFO Using network Socket +gpua095:3289151:3289268 [1] NCCL INFO Setting affinity for GPU 1 to ffff,00000000 +gpua095:3289151:3289268 [1] NCCL INFO Trees [0] 62/-1/-1->61->60 [1] 62/-1/-1->61->60 +gpua095:3289151:3289268 [1] NCCL INFO Channel 00/0 : 61[46000] -> 62[85000] via P2P/IPC/read +gpua095:3289151:3289268 [1] NCCL INFO Channel 01/0 : 61[46000] -> 62[85000] via P2P/IPC/read +gpua095:3289151:3289268 [1] NCCL INFO Connected all rings +gpua095:3289151:3289268 [1] NCCL INFO Channel 00/0 : 61[46000] -> 60[7000] via P2P/IPC/read +gpua093:1409358:1409424 [2] NCCL INFO Channel 01/0 : 58[85000] -> 57[46000] via P2P/IPC/read +gpua093:1409358:1409424 [2] NCCL INFO Connected all trees +gpua093:1409358:1409424 [2] NCCL INFO threadThresholds 8/8/64 | 512/8/64 | 512 | 512 +gpua093:1409358:1409424 [2] NCCL INFO 2 coll channels, 2 p2p channels, 2 p2p channels per peer +gpua093:1409358:1409424 [2] NCCL INFO comm 0x5608497f0150 rank 58 nranks 64 cudaDev 2 busId 85000 - Init COMPLETE +gpua093:1409359:1409359 [3] NCCL INFO cudaDriverVersion 12020 +gpua093:1409359:1409359 [3] NCCL INFO Bootstrap : Using eth1:172.28.23.93<0> +gpua093:1409359:1409359 [3] NCCL INFO NET/Plugin : No plugin found (libnccl-net.so), using internal implementation +gpua093:1409359:1409423 [3] NCCL INFO NET/IB : No device found. +gpua093:1409359:1409423 [3] NCCL INFO NET/Socket : Using [0]eth1:172.28.23.93<0> [1]hsn0:141.142.145.93<0> +gpua093:1409359:1409423 [3] NCCL INFO Using network Socket +gpua093:1409359:1409423 [3] NCCL INFO Setting affinity for GPU 3 to ffff +gpua007:2608783:2608852 [0] NCCL INFO NET/IB : No device found. +gpua007:2608783:2608852 [0] NCCL INFO NET/Socket : Using [0]eth1:172.28.23.7<0> [1]hsn0:141.142.145.7<0> +gpua007:2608783:2608852 [0] NCCL INFO Using network Socket +gpua007:2608783:2608852 [0] NCCL INFO Setting affinity for GPU 0 to ffff0000,00000000 +gpua007:2608783:2608852 [0] NCCL INFO Channel 00/02 : 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 +gpua007:2608783:2608852 [0] NCCL INFO Channel 01/02 : 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 +gpua007:2608783:2608852 [0] NCCL INFO Trees [0] 1/32/-1->0->-1 [1] 1/-1/-1->0->4 +gpua007:2608783:2608852 [0] NCCL INFO Channel 00/0 : 63[c7000] -> 0[7000] [receive] via NET/Socket/1 +gpua007:2608783:2608852 [0] NCCL INFO Channel 01/0 : 63[c7000] -> 0[7000] [receive] via NET/Socket/1 +gpua007:2608783:2608852 [0] NCCL INFO Channel 00/0 : 0[7000] -> 1[46000] via P2P/IPC/read +gpua011:1153314:1153373 [1] NCCL INFO Channel 01/0 : 20[7000] -> 13[46000] [receive] via NET/Socket/1 +gpua011:1153314:1153373 [1] NCCL INFO Channel 00/0 : 13[46000] -> 12[7000] via P2P/IPC/read +gpua011:1153314:1153373 [1] NCCL INFO Channel 01/0 : 13[46000] -> 12[7000] via P2P/IPC/read +gpua011:1153314:1153373 [1] NCCL INFO Connected all trees +gpua011:1153314:1153373 [1] NCCL INFO threadThresholds 8/8/64 | 512/8/64 | 512 | 512 +gpua011:1153314:1153373 [1] NCCL INFO 2 coll channels, 2 p2p channels, 2 p2p channels per peer +gpua011:1153314:1153373 [1] NCCL INFO comm 0x561d7d5608d0 rank 13 nranks 64 cudaDev 1 busId 46000 - Init COMPLETE +gpua014:2738633:2738633 [3] NCCL INFO cudaDriverVersion 12020 +gpua014:2738633:2738633 [3] NCCL INFO Bootstrap : Using eth1:172.28.23.14<0> +gpua014:2738633:2738633 [3] NCCL INFO NET/Plugin : No plugin found (libnccl-net.so), using internal implementation +gpua014:2738633:2738694 [3] NCCL INFO NET/IB : No device found. +gpua014:2738633:2738694 [3] NCCL INFO NET/Socket : Using [0]eth1:172.28.23.14<0> [1]hsn0:141.142.145.14<0> +gpua014:2738633:2738694 [3] NCCL INFO Using network Socket +gpua014:2738633:2738694 [3] NCCL INFO Setting affinity for GPU 3 to ffff +gpua014:2738633:2738694 [3] NCCL INFO Trees [0] -1/-1/-1->23->22 [1] -1/-1/-1->23->22 +gpua014:2738633:2738694 [3] NCCL INFO Channel 00/0 : 23[c7000] -> 24[7000] [send] via NET/Socket/1 +gpua014:2738633:2738694 [3] NCCL INFO Channel 01/0 : 23[c7000] -> 24[7000] [send] via NET/Socket/1 +gpua014:2738633:2738694 [3] NCCL INFO Connected all rings +gpua014:2738633:2738694 [3] NCCL INFO Channel 00/0 : 23[c7000] -> 22[85000] via P2P/IPC/read +gpua020:3893198:3893257 [0] NCCL INFO comm 0x562cffcec820 rank 24 nranks 64 cudaDev 0 busId 7000 - Init COMPLETE +gpua038:4038365:4038365 [2] NCCL INFO cudaDriverVersion 12020 +gpua038:4038365:4038365 [2] NCCL INFO Bootstrap : Using eth1:172.28.23.38<0> +gpua038:4038365:4038365 [2] NCCL INFO NET/Plugin : No plugin found (libnccl-net.so), using internal implementation +gpua038:4038365:4038433 [2] NCCL INFO NET/IB : No device found. +gpua038:4038365:4038433 [2] NCCL INFO NET/Socket : Using [0]eth1:172.28.23.38<0> [1]hsn0:141.142.145.38<0> +gpua038:4038365:4038433 [2] NCCL INFO Using network Socket +gpua038:4038365:4038433 [2] NCCL INFO Setting affinity for GPU 2 to ffff0000 +gpua038:4038365:4038433 [2] NCCL INFO Trees [0] 31/-1/-1->30->29 [1] 31/-1/-1->30->29 +gpua038:4038365:4038433 [2] NCCL INFO Channel 00/0 : 30[85000] -> 31[c7000] via P2P/IPC/read +gpua038:4038365:4038433 [2] NCCL INFO Channel 01/0 : 30[85000] -> 31[c7000] via P2P/IPC/read +gpua038:4038365:4038433 [2] NCCL INFO Connected all rings +gpua038:4038365:4038433 [2] NCCL INFO Channel 00/0 : 30[85000] -> 29[46000] via P2P/IPC/read +gpua065:2980959:2981029 [0] NCCL INFO comm 0x55c0cac35e40 rank 40 nranks 64 cudaDev 0 busId 7000 - Init COMPLETE +gpua042:696905:696905 [0] NCCL INFO cudaDriverVersion 12020 +gpua042:696905:696905 [0] NCCL INFO Bootstrap : Using eth1:172.28.23.42<0> +gpua042:696905:696905 [0] NCCL INFO NET/Plugin : No plugin found (libnccl-net.so), using internal implementation +gpua042:696905:697146 [0] NCCL INFO NET/IB : No device found. +gpua042:696905:697146 [0] NCCL INFO NET/Socket : Using [0]eth1:172.28.23.42<0> [1]hsn0:141.142.145.42<0> +gpua042:696905:697146 [0] NCCL INFO Using network Socket +gpua042:696905:697146 [0] NCCL INFO Setting affinity for GPU 0 to ffff0000,00000000 +gpua042:696905:697146 [0] NCCL INFO Trees [0] 33/48/-1->32->0 [1] 33/-1/-1->32->36 +gpua042:696905:697146 [0] NCCL INFO Channel 00/0 : 31[c7000] -> 32[7000] [receive] via NET/Socket/1 +gpua042:696905:697146 [0] NCCL INFO Channel 01/0 : 31[c7000] -> 32[7000] [receive] via NET/Socket/1 +gpua042:696905:697146 [0] NCCL INFO Channel 00/0 : 32[7000] -> 33[46000] via P2P/IPC/read +gpua042:696905:697146 [0] NCCL INFO Channel 01/0 : 32[7000] -> 33[46000] via P2P/IPC/read +gpua095:3289151:3289268 [1] NCCL INFO Channel 01/0 : 61[46000] -> 60[7000] via P2P/IPC/read +gpua095:3289151:3289268 [1] NCCL INFO Connected all trees +gpua095:3289151:3289268 [1] NCCL INFO threadThresholds 8/8/64 | 512/8/64 | 512 | 512 +gpua095:3289151:3289268 [1] NCCL INFO 2 coll channels, 2 p2p channels, 2 p2p channels per peer +gpua095:3289151:3289268 [1] NCCL INFO comm 0x556ad1204ed0 rank 61 nranks 64 cudaDev 1 busId 46000 - Init COMPLETE +gpua093:1409359:1409423 [3] NCCL INFO Trees [0] -1/-1/-1->59->58 [1] -1/-1/-1->59->58 +gpua093:1409359:1409423 [3] NCCL INFO Channel 00/0 : 59[c7000] -> 60[7000] [send] via NET/Socket/1 +gpua093:1409359:1409423 [3] NCCL INFO Channel 01/0 : 59[c7000] -> 60[7000] [send] via NET/Socket/1 +gpua093:1409359:1409423 [3] NCCL INFO Connected all rings +gpua093:1409359:1409423 [3] NCCL INFO Channel 00/0 : 59[c7000] -> 58[85000] via P2P/IPC/read +gpua093:1409359:1409423 [3] NCCL INFO Channel 01/0 : 59[c7000] -> 58[85000] via P2P/IPC/read +gpua093:1409359:1409423 [3] NCCL INFO Connected all trees +gpua093:1409359:1409423 [3] NCCL INFO threadThresholds 8/8/64 | 512/8/64 | 512 | 512 +gpua093:1409359:1409423 [3] NCCL INFO 2 coll channels, 2 p2p channels, 2 p2p channels per peer +gpua093:1409359:1409423 [3] NCCL INFO comm 0x557798b80960 rank 59 nranks 64 cudaDev 3 busId c7000 - Init COMPLETE +gpua007:2608783:2608852 [0] NCCL INFO Channel 01/0 : 0[7000] -> 1[46000] via P2P/IPC/read +gpua007:2608783:2608852 [0] NCCL INFO Connected all rings +gpua007:2608783:2608852 [0] NCCL INFO Channel 01/0 : 0[7000] -> 4[7000] [send] via NET/Socket/1 +gpua007:2608783:2608852 [0] NCCL INFO Channel 00/0 : 32[7000] -> 0[7000] [receive] via NET/Socket/1 +gpua007:2608783:2608852 [0] NCCL INFO Channel 00/0 : 0[7000] -> 32[7000] [send] via NET/Socket/1 +gpua007:2608783:2608852 [0] NCCL INFO Channel 01/0 : 4[7000] -> 0[7000] [receive] via NET/Socket/1 +gpua007:2608783:2608852 [0] NCCL INFO Connected all trees +gpua007:2608783:2608852 [0] NCCL INFO threadThresholds 8/8/64 | 512/8/64 | 512 | 512 +gpua007:2608783:2608852 [0] NCCL INFO 2 coll channels, 2 p2p channels, 2 p2p channels per peer +gpua007:2608783:2608852 [0] NCCL INFO comm 0x55bca0510910 rank 0 nranks 64 cudaDev 0 busId 7000 - Init COMPLETE +gpua011:1153316:1153316 [3] NCCL INFO cudaDriverVersion 12020 +gpua011:1153316:1153316 [3] NCCL INFO Bootstrap : Using eth1:172.28.23.11<0> +gpua011:1153316:1153316 [3] NCCL INFO NET/Plugin : No plugin found (libnccl-net.so), using internal implementation +gpua011:1153316:1153375 [3] NCCL INFO NET/IB : No device found. +gpua011:1153316:1153375 [3] NCCL INFO NET/Socket : Using [0]eth1:172.28.23.11<0> [1]hsn0:141.142.145.11<0> +gpua011:1153316:1153375 [3] NCCL INFO Using network Socket +gpua011:1153316:1153375 [3] NCCL INFO Setting affinity for GPU 3 to ffff +gpua011:1153316:1153375 [3] NCCL INFO Trees [0] -1/-1/-1->15->14 [1] -1/-1/-1->15->14 +gpua011:1153316:1153375 [3] NCCL INFO Channel 00/0 : 15[c7000] -> 16[7000] [send] via NET/Socket/1 +gpua011:1153316:1153375 [3] NCCL INFO Channel 01/0 : 15[c7000] -> 16[7000] [send] via NET/Socket/1 +gpua011:1153316:1153375 [3] NCCL INFO Connected all rings +gpua011:1153316:1153375 [3] NCCL INFO Channel 00/0 : 15[c7000] -> 14[85000] via P2P/IPC/read +gpua014:2738633:2738694 [3] NCCL INFO Channel 01/0 : 23[c7000] -> 22[85000] via P2P/IPC/read +gpua014:2738633:2738694 [3] NCCL INFO Connected all trees +gpua014:2738633:2738694 [3] NCCL INFO threadThresholds 8/8/64 | 512/8/64 | 512 | 512 +gpua014:2738633:2738694 [3] NCCL INFO 2 coll channels, 2 p2p channels, 2 p2p channels per peer +gpua014:2738633:2738694 [3] NCCL INFO comm 0x5653bb06b2a0 rank 23 nranks 64 cudaDev 3 busId c7000 - Init COMPLETE +gpua020:3893199:3893199 [1] NCCL INFO cudaDriverVersion 12020 +gpua020:3893199:3893199 [1] NCCL INFO Bootstrap : Using eth1:172.28.23.20<0> +gpua020:3893199:3893199 [1] NCCL INFO NET/Plugin : No plugin found (libnccl-net.so), using internal implementation +gpua020:3893199:3893256 [1] NCCL INFO NET/IB : No device found. +gpua020:3893199:3893256 [1] NCCL INFO NET/Socket : Using [0]eth1:172.28.23.20<0> [1]hsn0:141.142.145.20<0> +gpua020:3893199:3893256 [1] NCCL INFO Using network Socket +gpua020:3893199:3893256 [1] NCCL INFO Setting affinity for GPU 1 to ffff,00000000 +gpua020:3893199:3893256 [1] NCCL INFO Trees [0] 26/20/-1->25->24 [1] 26/-1/-1->25->24 +gpua020:3893199:3893256 [1] NCCL INFO Channel 00/0 : 25[46000] -> 26[85000] via P2P/IPC/read +gpua020:3893199:3893256 [1] NCCL INFO Channel 01/0 : 25[46000] -> 26[85000] via P2P/IPC/read +gpua020:3893199:3893256 [1] NCCL INFO Connected all rings +gpua020:3893199:3893256 [1] NCCL INFO Channel 00/0 : 20[7000] -> 25[46000] [receive] via NET/Socket/1 +gpua038:4038365:4038433 [2] NCCL INFO Channel 01/0 : 30[85000] -> 29[46000] via P2P/IPC/read +gpua038:4038365:4038433 [2] NCCL INFO Connected all trees +gpua038:4038365:4038433 [2] NCCL INFO threadThresholds 8/8/64 | 512/8/64 | 512 | 512 +gpua038:4038365:4038433 [2] NCCL INFO 2 coll channels, 2 p2p channels, 2 p2p channels per peer +gpua038:4038365:4038433 [2] NCCL INFO comm 0x560d1b2e0880 rank 30 nranks 64 cudaDev 2 busId 85000 - Init COMPLETE +gpua042:696905:697146 [0] NCCL INFO Connected all rings +gpua042:696905:697146 [0] NCCL INFO Channel 01/0 : 32[7000] -> 36[7000] [send] via NET/Socket/1 +gpua042:696905:697146 [0] NCCL INFO Channel 00/0 : 32[7000] -> 48[7000] [send] via NET/Socket/1 +gpua042:696905:697146 [0] NCCL INFO Channel 00/0 : 0[7000] -> 32[7000] [receive] via NET/Socket/1 +gpua042:696905:697146 [0] NCCL INFO Channel 00/0 : 32[7000] -> 0[7000] [send] via NET/Socket/1 +gpua042:696905:697146 [0] NCCL INFO Channel 00/0 : 48[7000] -> 32[7000] [receive] via NET/Socket/1 +gpua042:696905:697146 [0] NCCL INFO Channel 01/0 : 36[7000] -> 32[7000] [receive] via NET/Socket/1 +gpua042:696905:697146 [0] NCCL INFO Connected all trees +gpua042:696905:697146 [0] NCCL INFO threadThresholds 8/8/64 | 512/8/64 | 512 | 512 +gpua042:696905:697146 [0] NCCL INFO 2 coll channels, 2 p2p channels, 2 p2p channels per peer +gpua042:696905:697146 [0] NCCL INFO comm 0x563f56db3c00 rank 32 nranks 64 cudaDev 0 busId 7000 - Init COMPLETE +gpua093:1409356:1409356 [0] NCCL INFO cudaDriverVersion 12020 +gpua093:1409356:1409356 [0] NCCL INFO Bootstrap : Using eth1:172.28.23.93<0> +gpua093:1409356:1409356 [0] NCCL INFO NET/Plugin : No plugin found (libnccl-net.so), using internal implementation +gpua093:1409356:1409425 [0] NCCL INFO NET/IB : No device found. +gpua093:1409356:1409425 [0] NCCL INFO NET/Socket : Using [0]eth1:172.28.23.93<0> [1]hsn0:141.142.145.93<0> +gpua093:1409356:1409425 [0] NCCL INFO Using network Socket +gpua093:1409356:1409425 [0] NCCL INFO Setting affinity for GPU 0 to ffff0000,00000000 +gpua093:1409356:1409425 [0] NCCL INFO Trees [0] 57/60/-1->56->48 [1] 57/-1/-1->56->53 +gpua093:1409356:1409425 [0] NCCL INFO Channel 00/0 : 55[c7000] -> 56[7000] [receive] via NET/Socket/1 +gpua093:1409356:1409425 [0] NCCL INFO Channel 01/0 : 55[c7000] -> 56[7000] [receive] via NET/Socket/1 +gpua093:1409356:1409425 [0] NCCL INFO Channel 00/0 : 56[7000] -> 57[46000] via P2P/IPC/read +gpua007:2608784:2608784 [1] NCCL INFO cudaDriverVersion 12020 +gpua007:2608784:2608784 [1] NCCL INFO Bootstrap : Using eth1:172.28.23.7<0> +gpua007:2608784:2608784 [1] NCCL INFO NET/Plugin : No plugin found (libnccl-net.so), using internal implementation +gpua007:2608784:2608851 [1] NCCL INFO NET/IB : No device found. +gpua007:2608784:2608851 [1] NCCL INFO NET/Socket : Using [0]eth1:172.28.23.7<0> [1]hsn0:141.142.145.7<0> +gpua007:2608784:2608851 [1] NCCL INFO Using network Socket +gpua007:2608784:2608851 [1] NCCL INFO Setting affinity for GPU 1 to ffff,00000000 +gpua007:2608784:2608851 [1] NCCL INFO Trees [0] 2/-1/-1->1->0 [1] 2/-1/-1->1->0 +gpua007:2608784:2608851 [1] NCCL INFO Channel 00/0 : 1[46000] -> 2[85000] via P2P/IPC/read +gpua007:2608784:2608851 [1] NCCL INFO Channel 01/0 : 1[46000] -> 2[85000] via P2P/IPC/read +gpua007:2608784:2608851 [1] NCCL INFO Connected all rings +gpua007:2608784:2608851 [1] NCCL INFO Channel 00/0 : 1[46000] -> 0[7000] via P2P/IPC/read +gpua011:1153316:1153375 [3] NCCL INFO Channel 01/0 : 15[c7000] -> 14[85000] via P2P/IPC/read +gpua011:1153316:1153375 [3] NCCL INFO Connected all trees +gpua011:1153316:1153375 [3] NCCL INFO threadThresholds 8/8/64 | 512/8/64 | 512 | 512 +gpua011:1153316:1153375 [3] NCCL INFO 2 coll channels, 2 p2p channels, 2 p2p channels per peer +gpua011:1153316:1153375 [3] NCCL INFO comm 0x55e80c606a80 rank 15 nranks 64 cudaDev 3 busId c7000 - Init COMPLETE +gpua020:3893199:3893256 [1] NCCL INFO Channel 00/0 : 25[46000] -> 20[7000] [send] via NET/Socket/1 +gpua020:3893199:3893256 [1] NCCL INFO Channel 00/0 : 25[46000] -> 24[7000] via P2P/IPC/read +gpua020:3893199:3893256 [1] NCCL INFO Channel 01/0 : 25[46000] -> 24[7000] via P2P/IPC/read +gpua020:3893199:3893256 [1] NCCL INFO Connected all trees +gpua020:3893199:3893256 [1] NCCL INFO threadThresholds 8/8/64 | 512/8/64 | 512 | 512 +gpua020:3893199:3893256 [1] NCCL INFO 2 coll channels, 2 p2p channels, 2 p2p channels per peer +gpua020:3893199:3893256 [1] NCCL INFO comm 0x5606abafde60 rank 25 nranks 64 cudaDev 1 busId 46000 - Init COMPLETE +gpua038:4038366:4038366 [3] NCCL INFO cudaDriverVersion 12020 +gpua038:4038366:4038366 [3] NCCL INFO Bootstrap : Using eth1:172.28.23.38<0> +gpua038:4038366:4038366 [3] NCCL INFO NET/Plugin : No plugin found (libnccl-net.so), using internal implementation +gpua038:4038366:4038432 [3] NCCL INFO NET/IB : No device found. +gpua038:4038366:4038432 [3] NCCL INFO NET/Socket : Using [0]eth1:172.28.23.38<0> [1]hsn0:141.142.145.38<0> +gpua038:4038366:4038432 [3] NCCL INFO Using network Socket +gpua038:4038366:4038432 [3] NCCL INFO Setting affinity for GPU 3 to ffff +gpua038:4038366:4038432 [3] NCCL INFO Trees [0] -1/-1/-1->31->30 [1] -1/-1/-1->31->30 +gpua038:4038366:4038432 [3] NCCL INFO Channel 00/0 : 31[c7000] -> 32[7000] [send] via NET/Socket/1 +gpua038:4038366:4038432 [3] NCCL INFO Channel 01/0 : 31[c7000] -> 32[7000] [send] via NET/Socket/1 +gpua038:4038366:4038432 [3] NCCL INFO Connected all rings +gpua038:4038366:4038432 [3] NCCL INFO Channel 00/0 : 31[c7000] -> 30[85000] via P2P/IPC/read +gpua042:696906:696906 [1] NCCL INFO cudaDriverVersion 12020 +gpua042:696906:696906 [1] NCCL INFO Bootstrap : Using eth1:172.28.23.42<0> +gpua042:696906:696906 [1] NCCL INFO NET/Plugin : No plugin found (libnccl-net.so), using internal implementation +gpua042:696906:697143 [1] NCCL INFO NET/IB : No device found. +gpua042:696906:697143 [1] NCCL INFO NET/Socket : Using [0]eth1:172.28.23.42<0> [1]hsn0:141.142.145.42<0> +gpua042:696906:697143 [1] NCCL INFO Using network Socket +gpua042:696906:697143 [1] NCCL INFO Setting affinity for GPU 1 to ffff,00000000 +gpua042:696906:697143 [1] NCCL INFO Trees [0] 34/16/-1->33->32 [1] 34/-1/-1->33->32 +gpua042:696906:697143 [1] NCCL INFO Channel 00/0 : 33[46000] -> 34[85000] via P2P/IPC/read +gpua042:696906:697143 [1] NCCL INFO Channel 01/0 : 33[46000] -> 34[85000] via P2P/IPC/read +gpua042:696906:697143 [1] NCCL INFO Connected all rings +gpua042:696906:697143 [1] NCCL INFO Channel 00/0 : 16[7000] -> 33[46000] [receive] via NET/Socket/1 +gpua093:1409356:1409425 [0] NCCL INFO Channel 01/0 : 56[7000] -> 57[46000] via P2P/IPC/read +gpua093:1409356:1409425 [0] NCCL INFO Connected all rings +gpua093:1409356:1409425 [0] NCCL INFO Channel 01/0 : 53[46000] -> 56[7000] [receive] via NET/Socket/1 +gpua093:1409356:1409425 [0] NCCL INFO Channel 00/0 : 56[7000] -> 60[7000] [send] via NET/Socket/1 +gpua093:1409356:1409425 [0] NCCL INFO Channel 00/0 : 48[7000] -> 56[7000] [receive] via NET/Socket/1 +gpua093:1409356:1409425 [0] NCCL INFO Channel 00/0 : 56[7000] -> 48[7000] [send] via NET/Socket/1 +gpua093:1409356:1409425 [0] NCCL INFO Channel 00/0 : 60[7000] -> 56[7000] [receive] via NET/Socket/1 +gpua093:1409356:1409425 [0] NCCL INFO Channel 01/0 : 56[7000] -> 53[46000] [send] via NET/Socket/1 +gpua093:1409356:1409425 [0] NCCL INFO Connected all trees +gpua093:1409356:1409425 [0] NCCL INFO threadThresholds 8/8/64 | 512/8/64 | 512 | 512 +gpua093:1409356:1409425 [0] NCCL INFO 2 coll channels, 2 p2p channels, 2 p2p channels per peer +gpua007:2608784:2608851 [1] NCCL INFO Channel 01/0 : 1[46000] -> 0[7000] via P2P/IPC/read +gpua007:2608784:2608851 [1] NCCL INFO Connected all trees +gpua007:2608784:2608851 [1] NCCL INFO threadThresholds 8/8/64 | 512/8/64 | 512 | 512 +gpua007:2608784:2608851 [1] NCCL INFO 2 coll channels, 2 p2p channels, 2 p2p channels per peer +gpua007:2608784:2608851 [1] NCCL INFO comm 0x5624ff560fd0 rank 1 nranks 64 cudaDev 1 busId 46000 - Init COMPLETE +gpua038:4038366:4038432 [3] NCCL INFO Channel 01/0 : 31[c7000] -> 30[85000] via P2P/IPC/read +gpua038:4038366:4038432 [3] NCCL INFO Connected all trees +gpua038:4038366:4038432 [3] NCCL INFO threadThresholds 8/8/64 | 512/8/64 | 512 | 512 +gpua038:4038366:4038432 [3] NCCL INFO 2 coll channels, 2 p2p channels, 2 p2p channels per peer +gpua038:4038366:4038432 [3] NCCL INFO comm 0x55ac99922f50 rank 31 nranks 64 cudaDev 3 busId c7000 - Init COMPLETE +gpua065:2980961:2980961 [2] NCCL INFO cudaDriverVersion 12020 +gpua065:2980961:2980961 [2] NCCL INFO Bootstrap : Using eth1:172.28.23.65<0> +gpua065:2980961:2980961 [2] NCCL INFO NET/Plugin : No plugin found (libnccl-net.so), using internal implementation +gpua065:2980961:2981031 [2] NCCL INFO NET/IB : No device found. +gpua065:2980961:2981031 [2] NCCL INFO NET/Socket : Using [0]eth1:172.28.23.65<0> [1]hsn0:141.142.145.65<0> +gpua065:2980961:2981031 [2] NCCL INFO Using network Socket +gpua065:2980961:2981031 [2] NCCL INFO Setting affinity for GPU 2 to ffff0000 +gpua065:2980961:2981031 [2] NCCL INFO Trees [0] 43/-1/-1->42->41 [1] 43/-1/-1->42->41 +gpua065:2980961:2981031 [2] NCCL INFO Channel 00/0 : 42[85000] -> 43[c7000] via P2P/IPC/read +gpua065:2980961:2981031 [2] NCCL INFO Channel 01/0 : 42[85000] -> 43[c7000] via P2P/IPC/read +gpua065:2980961:2981031 [2] NCCL INFO Connected all rings +gpua065:2980961:2981031 [2] NCCL INFO Channel 00/0 : 42[85000] -> 41[46000] via P2P/IPC/read +gpua042:696906:697143 [1] NCCL INFO Channel 00/0 : 33[46000] -> 16[7000] [send] via NET/Socket/1 +gpua042:696906:697143 [1] NCCL INFO Channel 00/0 : 33[46000] -> 32[7000] via P2P/IPC/read +gpua042:696906:697143 [1] NCCL INFO Channel 01/0 : 33[46000] -> 32[7000] via P2P/IPC/read +gpua042:696906:697143 [1] NCCL INFO Connected all trees +gpua042:696906:697143 [1] NCCL INFO threadThresholds 8/8/64 | 512/8/64 | 512 | 512 +gpua042:696906:697143 [1] NCCL INFO 2 coll channels, 2 p2p channels, 2 p2p channels per peer +gpua042:696906:697143 [1] NCCL INFO comm 0x55df1b589050 rank 33 nranks 64 cudaDev 1 busId 46000 - Init COMPLETE +gpua093:1409356:1409425 [0] NCCL INFO comm 0x55cd0b2fb510 rank 56 nranks 64 cudaDev 0 busId 7000 - Init COMPLETE +gpua093:1409357:1409357 [1] NCCL INFO cudaDriverVersion 12020 +gpua093:1409357:1409357 [1] NCCL INFO Bootstrap : Using eth1:172.28.23.93<0> +gpua093:1409357:1409357 [1] NCCL INFO NET/Plugin : No plugin found (libnccl-net.so), using internal implementation +gpua093:1409357:1409422 [1] NCCL INFO NET/IB : No device found. +gpua093:1409357:1409422 [1] NCCL INFO NET/Socket : Using [0]eth1:172.28.23.93<0> [1]hsn0:141.142.145.93<0> +gpua093:1409357:1409422 [1] NCCL INFO Using network Socket +gpua093:1409357:1409422 [1] NCCL INFO Setting affinity for GPU 1 to ffff,00000000 +gpua093:1409357:1409422 [1] NCCL INFO Trees [0] 58/52/-1->57->56 [1] 58/-1/-1->57->56 +gpua093:1409357:1409422 [1] NCCL INFO Channel 00/0 : 57[46000] -> 58[85000] via P2P/IPC/read +gpua093:1409357:1409422 [1] NCCL INFO Channel 01/0 : 57[46000] -> 58[85000] via P2P/IPC/read +gpua093:1409357:1409422 [1] NCCL INFO Connected all rings +gpua012:2375596:2375596 [3] NCCL INFO cudaDriverVersion 12020 +gpua012:2375596:2375596 [3] NCCL INFO Bootstrap : Using eth1:172.28.23.12<0> +gpua012:2375596:2375596 [3] NCCL INFO NET/Plugin : No plugin found (libnccl-net.so), using internal implementation +gpua012:2375596:2375661 [3] NCCL INFO NET/IB : No device found. +gpua012:2375596:2375661 [3] NCCL INFO NET/Socket : Using [0]eth1:172.28.23.12<0> [1]hsn0:141.142.145.12<0> +gpua012:2375596:2375661 [3] NCCL INFO Using network Socket +gpua012:2375596:2375661 [3] NCCL INFO Setting affinity for GPU 3 to ffff +gpua012:2375596:2375661 [3] NCCL INFO Trees [0] -1/-1/-1->19->18 [1] -1/-1/-1->19->18 +gpua012:2375596:2375661 [3] NCCL INFO Channel 00/0 : 19[c7000] -> 20[7000] [send] via NET/Socket/1 +gpua012:2375596:2375661 [3] NCCL INFO Channel 01/0 : 19[c7000] -> 20[7000] [send] via NET/Socket/1 +gpua012:2375596:2375661 [3] NCCL INFO Connected all rings +gpua012:2375596:2375661 [3] NCCL INFO Channel 00/0 : 19[c7000] -> 18[85000] via P2P/IPC/read +gpua065:2980961:2981031 [2] NCCL INFO Channel 01/0 : 42[85000] -> 41[46000] via P2P/IPC/read +gpua065:2980961:2981031 [2] NCCL INFO Connected all trees +gpua065:2980961:2981031 [2] NCCL INFO threadThresholds 8/8/64 | 512/8/64 | 512 | 512 +gpua065:2980961:2981031 [2] NCCL INFO 2 coll channels, 2 p2p channels, 2 p2p channels per peer +gpua065:2980961:2981031 [2] NCCL INFO comm 0x561fe27cb980 rank 42 nranks 64 cudaDev 2 busId 85000 - Init COMPLETE +gpua093:1409357:1409422 [1] NCCL INFO Channel 00/0 : 52[7000] -> 57[46000] [receive] via NET/Socket/1 +gpua093:1409357:1409422 [1] NCCL INFO Channel 00/0 : 57[46000] -> 52[7000] [send] via NET/Socket/1 +gpua093:1409357:1409422 [1] NCCL INFO Channel 00/0 : 57[46000] -> 56[7000] via P2P/IPC/read +gpua093:1409357:1409422 [1] NCCL INFO Channel 01/0 : 57[46000] -> 56[7000] via P2P/IPC/read +gpua093:1409357:1409422 [1] NCCL INFO Connected all trees +gpua093:1409357:1409422 [1] NCCL INFO threadThresholds 8/8/64 | 512/8/64 | 512 | 512 +gpua093:1409357:1409422 [1] NCCL INFO 2 coll channels, 2 p2p channels, 2 p2p channels per peer +gpua093:1409357:1409422 [1] NCCL INFO comm 0x55eb2dfa0010 rank 57 nranks 64 cudaDev 1 busId 46000 - Init COMPLETE +gpua012:2375596:2375661 [3] NCCL INFO Channel 01/0 : 19[c7000] -> 18[85000] via P2P/IPC/read +gpua012:2375596:2375661 [3] NCCL INFO Connected all trees +gpua012:2375596:2375661 [3] NCCL INFO threadThresholds 8/8/64 | 512/8/64 | 512 | 512 +gpua012:2375596:2375661 [3] NCCL INFO 2 coll channels, 2 p2p channels, 2 p2p channels per peer +gpua012:2375596:2375661 [3] NCCL INFO comm 0x5575efab1170 rank 19 nranks 64 cudaDev 3 busId c7000 - Init COMPLETE +gpua012:2375594:2375594 [1] NCCL INFO cudaDriverVersion 12020 +gpua012:2375594:2375594 [1] NCCL INFO Bootstrap : Using eth1:172.28.23.12<0> +gpua012:2375594:2375594 [1] NCCL INFO NET/Plugin : No plugin found (libnccl-net.so), using internal implementation +gpua012:2375594:2375659 [1] NCCL INFO NET/IB : No device found. +gpua012:2375594:2375659 [1] NCCL INFO NET/Socket : Using [0]eth1:172.28.23.12<0> [1]hsn0:141.142.145.12<0> +gpua012:2375594:2375659 [1] NCCL INFO Using network Socket +gpua012:2375594:2375659 [1] NCCL INFO Setting affinity for GPU 1 to ffff,00000000 +gpua012:2375594:2375659 [1] NCCL INFO Trees [0] 18/8/-1->17->16 [1] 18/-1/-1->17->16 +gpua012:2375594:2375659 [1] NCCL INFO Channel 00/0 : 17[46000] -> 18[85000] via P2P/IPC/read +gpua012:2375594:2375659 [1] NCCL INFO Channel 01/0 : 17[46000] -> 18[85000] via P2P/IPC/read +gpua012:2375594:2375659 [1] NCCL INFO Connected all rings +gpua012:2375594:2375659 [1] NCCL INFO Channel 00/0 : 8[7000] -> 17[46000] [receive] via NET/Socket/1 +gpua065:2980962:2980962 [3] NCCL INFO cudaDriverVersion 12020 +gpua065:2980962:2980962 [3] NCCL INFO Bootstrap : Using eth1:172.28.23.65<0> +gpua065:2980962:2980962 [3] NCCL INFO NET/Plugin : No plugin found (libnccl-net.so), using internal implementation +gpua065:2980962:2981028 [3] NCCL INFO NET/IB : No device found. +gpua065:2980962:2981028 [3] NCCL INFO NET/Socket : Using [0]eth1:172.28.23.65<0> [1]hsn0:141.142.145.65<0> +gpua065:2980962:2981028 [3] NCCL INFO Using network Socket +gpua065:2980962:2981028 [3] NCCL INFO Setting affinity for GPU 3 to ffff +gpua065:2980962:2981028 [3] NCCL INFO Trees [0] -1/-1/-1->43->42 [1] -1/-1/-1->43->42 +gpua065:2980962:2981028 [3] NCCL INFO Channel 00/0 : 43[c7000] -> 44[7000] [send] via NET/Socket/1 +gpua065:2980962:2981028 [3] NCCL INFO Channel 01/0 : 43[c7000] -> 44[7000] [send] via NET/Socket/1 +gpua065:2980962:2981028 [3] NCCL INFO Connected all rings +gpua065:2980962:2981028 [3] NCCL INFO Channel 00/0 : 43[c7000] -> 42[85000] via P2P/IPC/read +gpua011:1153315:1153315 [2] NCCL INFO cudaDriverVersion 12020 +gpua011:1153315:1153315 [2] NCCL INFO Bootstrap : Using eth1:172.28.23.11<0> +gpua011:1153315:1153315 [2] NCCL INFO NET/Plugin : No plugin found (libnccl-net.so), using internal implementation +gpua011:1153315:1153372 [2] NCCL INFO NET/IB : No device found. +gpua011:1153315:1153372 [2] NCCL INFO NET/Socket : Using [0]eth1:172.28.23.11<0> [1]hsn0:141.142.145.11<0> +gpua011:1153315:1153372 [2] NCCL INFO Using network Socket +gpua011:1153315:1153372 [2] NCCL INFO Setting affinity for GPU 2 to ffff0000 +gpua011:1153315:1153372 [2] NCCL INFO Trees [0] 15/-1/-1->14->13 [1] 15/-1/-1->14->13 +gpua011:1153315:1153372 [2] NCCL INFO Channel 00/0 : 14[85000] -> 15[c7000] via P2P/IPC/read +gpua011:1153315:1153372 [2] NCCL INFO Channel 01/0 : 14[85000] -> 15[c7000] via P2P/IPC/read +gpua011:1153315:1153372 [2] NCCL INFO Connected all rings +gpua011:1153315:1153372 [2] NCCL INFO Channel 00/0 : 14[85000] -> 13[46000] via P2P/IPC/read +gpua012:2375594:2375659 [1] NCCL INFO Channel 00/0 : 17[46000] -> 8[7000] [send] via NET/Socket/1 +gpua012:2375594:2375659 [1] NCCL INFO Channel 00/0 : 17[46000] -> 16[7000] via P2P/IPC/read +gpua012:2375594:2375659 [1] NCCL INFO Channel 01/0 : 17[46000] -> 16[7000] via P2P/IPC/read +gpua012:2375594:2375659 [1] NCCL INFO Connected all trees +gpua012:2375594:2375659 [1] NCCL INFO threadThresholds 8/8/64 | 512/8/64 | 512 | 512 +gpua012:2375594:2375659 [1] NCCL INFO 2 coll channels, 2 p2p channels, 2 p2p channels per peer +gpua012:2375594:2375659 [1] NCCL INFO comm 0x555a5645fdf0 rank 17 nranks 64 cudaDev 1 busId 46000 - Init COMPLETE +gpua065:2980962:2981028 [3] NCCL INFO Channel 01/0 : 43[c7000] -> 42[85000] via P2P/IPC/read +gpua065:2980962:2981028 [3] NCCL INFO Connected all trees +gpua065:2980962:2981028 [3] NCCL INFO threadThresholds 8/8/64 | 512/8/64 | 512 | 512 +gpua065:2980962:2981028 [3] NCCL INFO 2 coll channels, 2 p2p channels, 2 p2p channels per peer +gpua065:2980962:2981028 [3] NCCL INFO comm 0x560fe399ee20 rank 43 nranks 64 cudaDev 3 busId c7000 - Init COMPLETE +gpua011:1153315:1153372 [2] NCCL INFO Channel 01/0 : 14[85000] -> 13[46000] via P2P/IPC/read +gpua011:1153315:1153372 [2] NCCL INFO Connected all trees +gpua011:1153315:1153372 [2] NCCL INFO threadThresholds 8/8/64 | 512/8/64 | 512 | 512 +gpua011:1153315:1153372 [2] NCCL INFO 2 coll channels, 2 p2p channels, 2 p2p channels per peer +gpua011:1153315:1153372 [2] NCCL INFO comm 0x55588bc372c0 rank 14 nranks 64 cudaDev 2 busId 85000 - Init COMPLETE +gpua020:3893200:3893200 [2] NCCL INFO cudaDriverVersion 12020 +gpua020:3893200:3893200 [2] NCCL INFO Bootstrap : Using eth1:172.28.23.20<0> +gpua020:3893200:3893200 [2] NCCL INFO NET/Plugin : No plugin found (libnccl-net.so), using internal implementation +gpua020:3893200:3893259 [2] NCCL INFO NET/IB : No device found. +gpua020:3893200:3893259 [2] NCCL INFO NET/Socket : Using [0]eth1:172.28.23.20<0> [1]hsn0:141.142.145.20<0> +gpua020:3893200:3893259 [2] NCCL INFO Using network Socket +gpua020:3893200:3893259 [2] NCCL INFO Setting affinity for GPU 2 to ffff0000 +gpua020:3893200:3893259 [2] NCCL INFO Trees [0] 27/-1/-1->26->25 [1] 27/-1/-1->26->25 +gpua020:3893200:3893259 [2] NCCL INFO Channel 00/0 : 26[85000] -> 27[c7000] via P2P/IPC/read +gpua020:3893200:3893259 [2] NCCL INFO Channel 01/0 : 26[85000] -> 27[c7000] via P2P/IPC/read +gpua020:3893200:3893259 [2] NCCL INFO Connected all rings +gpua020:3893200:3893259 [2] NCCL INFO Channel 00/0 : 26[85000] -> 25[46000] via P2P/IPC/read +gpua020:3893200:3893259 [2] NCCL INFO Channel 01/0 : 26[85000] -> 25[46000] via P2P/IPC/read +gpua020:3893200:3893259 [2] NCCL INFO Connected all trees +gpua020:3893200:3893259 [2] NCCL INFO threadThresholds 8/8/64 | 512/8/64 | 512 | 512 +gpua020:3893200:3893259 [2] NCCL INFO 2 coll channels, 2 p2p channels, 2 p2p channels per peer +gpua020:3893200:3893259 [2] NCCL INFO comm 0x56176f5709c0 rank 26 nranks 64 cudaDev 2 busId 85000 - Init COMPLETE +gpua012:2375593:2375593 [0] NCCL INFO cudaDriverVersion 12020 +gpua012:2375593:2375593 [0] NCCL INFO Bootstrap : Using eth1:172.28.23.12<0> +gpua012:2375593:2375593 [0] NCCL INFO NET/Plugin : No plugin found (libnccl-net.so), using internal implementation +gpua012:2375593:2375660 [0] NCCL INFO NET/IB : No device found. +gpua012:2375593:2375660 [0] NCCL INFO NET/Socket : Using [0]eth1:172.28.23.12<0> [1]hsn0:141.142.145.12<0> +gpua012:2375593:2375660 [0] NCCL INFO Using network Socket +gpua012:2375593:2375660 [0] NCCL INFO Setting affinity for GPU 0 to ffff0000,00000000 +gpua012:2375593:2375660 [0] NCCL INFO Trees [0] 17/24/-1->16->33 [1] 17/-1/-1->16->20 +gpua012:2375593:2375660 [0] NCCL INFO Channel 00/0 : 15[c7000] -> 16[7000] [receive] via NET/Socket/1 +gpua012:2375593:2375660 [0] NCCL INFO Channel 01/0 : 15[c7000] -> 16[7000] [receive] via NET/Socket/1 +gpua012:2375593:2375660 [0] NCCL INFO Channel 00/0 : 16[7000] -> 17[46000] via P2P/IPC/read +gpua012:2375593:2375660 [0] NCCL INFO Channel 01/0 : 16[7000] -> 17[46000] via P2P/IPC/read +gpua012:2375593:2375660 [0] NCCL INFO Connected all rings +gpua012:2375593:2375660 [0] NCCL INFO Channel 01/0 : 16[7000] -> 20[7000] [send] via NET/Socket/1 +gpua012:2375593:2375660 [0] NCCL INFO Channel 00/0 : 16[7000] -> 24[7000] [send] via NET/Socket/1 +gpua012:2375593:2375660 [0] NCCL INFO Channel 00/0 : 16[7000] -> 33[46000] [send] via NET/Socket/1 +gpua012:2375593:2375660 [0] NCCL INFO Channel 00/0 : 33[46000] -> 16[7000] [receive] via NET/Socket/1 +gpua012:2375593:2375660 [0] NCCL INFO Channel 00/0 : 24[7000] -> 16[7000] [receive] via NET/Socket/1 +gpua012:2375593:2375660 [0] NCCL INFO Channel 01/0 : 20[7000] -> 16[7000] [receive] via NET/Socket/1 +gpua012:2375593:2375660 [0] NCCL INFO Connected all trees +gpua012:2375593:2375660 [0] NCCL INFO threadThresholds 8/8/64 | 512/8/64 | 512 | 512 +gpua012:2375593:2375660 [0] NCCL INFO 2 coll channels, 2 p2p channels, 2 p2p channels per peer +gpua061:221443:221443 [0] NCCL INFO cudaDriverVersion 12020 +gpua061:221443:221443 [0] NCCL INFO Bootstrap : Using eth1:172.28.23.61<0> +gpua061:221443:221443 [0] NCCL INFO NET/Plugin : No plugin found (libnccl-net.so), using internal implementation +gpua061:221443:221501 [0] NCCL INFO NET/IB : No device found. +gpua061:221443:221501 [0] NCCL INFO NET/Socket : Using [0]eth1:172.28.23.61<0> [1]hsn0:141.142.145.61<0> +gpua061:221443:221501 [0] NCCL INFO Using network Socket +gpua061:221443:221501 [0] NCCL INFO Setting affinity for GPU 0 to ffff0000,00000000 +gpua061:221443:221501 [0] NCCL INFO Trees [0] 37/-1/-1->36->41 [1] 37/32/-1->36->44 +gpua061:221443:221501 [0] NCCL INFO Channel 00/0 : 35[c7000] -> 36[7000] [receive] via NET/Socket/1 +gpua061:221443:221501 [0] NCCL INFO Channel 01/0 : 35[c7000] -> 36[7000] [receive] via NET/Socket/1 +gpua061:221443:221501 [0] NCCL INFO Channel 00/0 : 36[7000] -> 37[46000] via P2P/IPC/read +gpua061:221443:221501 [0] NCCL INFO Channel 01/0 : 36[7000] -> 37[46000] via P2P/IPC/read +gpua012:2375593:2375660 [0] NCCL INFO comm 0x558c9a234b50 rank 16 nranks 64 cudaDev 0 busId 7000 - Init COMPLETE +gpua061:221443:221501 [0] NCCL INFO Connected all rings +gpua061:221443:221501 [0] NCCL INFO Channel 01/0 : 32[7000] -> 36[7000] [receive] via NET/Socket/1 +gpua061:221443:221501 [0] NCCL INFO Channel 00/0 : 36[7000] -> 41[46000] [send] via NET/Socket/1 +gpua061:221443:221501 [0] NCCL INFO Channel 01/0 : 36[7000] -> 44[7000] [send] via NET/Socket/1 +gpua061:221443:221501 [0] NCCL INFO Channel 01/0 : 44[7000] -> 36[7000] [receive] via NET/Socket/1 +gpua061:221443:221501 [0] NCCL INFO Channel 00/0 : 41[46000] -> 36[7000] [receive] via NET/Socket/1 +gpua061:221443:221501 [0] NCCL INFO Channel 01/0 : 36[7000] -> 32[7000] [send] via NET/Socket/1 +gpua061:221443:221501 [0] NCCL INFO Connected all trees +gpua061:221443:221501 [0] NCCL INFO threadThresholds 8/8/64 | 512/8/64 | 512 | 512 +gpua061:221443:221501 [0] NCCL INFO 2 coll channels, 2 p2p channels, 2 p2p channels per peer +gpua061:221443:221501 [0] NCCL INFO comm 0x55c2b9e24e10 rank 36 nranks 64 cudaDev 0 busId 7000 - Init COMPLETE +gpua014:2738632:2738632 [2] NCCL INFO cudaDriverVersion 12020 +gpua014:2738632:2738632 [2] NCCL INFO Bootstrap : Using eth1:172.28.23.14<0> +gpua014:2738632:2738632 [2] NCCL INFO NET/Plugin : No plugin found (libnccl-net.so), using internal implementation +gpua014:2738632:2738695 [2] NCCL INFO NET/IB : No device found. +gpua014:2738632:2738695 [2] NCCL INFO NET/Socket : Using [0]eth1:172.28.23.14<0> [1]hsn0:141.142.145.14<0> +gpua014:2738632:2738695 [2] NCCL INFO Using network Socket +gpua014:2738632:2738695 [2] NCCL INFO Setting affinity for GPU 2 to ffff0000 +gpua014:2738632:2738695 [2] NCCL INFO Trees [0] 23/-1/-1->22->21 [1] 23/-1/-1->22->21 +gpua014:2738632:2738695 [2] NCCL INFO Channel 00/0 : 22[85000] -> 23[c7000] via P2P/IPC/read +gpua014:2738632:2738695 [2] NCCL INFO Channel 01/0 : 22[85000] -> 23[c7000] via P2P/IPC/read +gpua014:2738632:2738695 [2] NCCL INFO Connected all rings +gpua014:2738632:2738695 [2] NCCL INFO Channel 00/0 : 22[85000] -> 21[46000] via P2P/IPC/read +gpua061:221444:221444 [1] NCCL INFO cudaDriverVersion 12020 +gpua061:221444:221444 [1] NCCL INFO Bootstrap : Using eth1:172.28.23.61<0> +gpua061:221444:221444 [1] NCCL INFO NET/Plugin : No plugin found (libnccl-net.so), using internal implementation +gpua061:221444:221503 [1] NCCL INFO NET/IB : No device found. +gpua061:221444:221503 [1] NCCL INFO NET/Socket : Using [0]eth1:172.28.23.61<0> [1]hsn0:141.142.145.61<0> +gpua061:221444:221503 [1] NCCL INFO Using network Socket +gpua061:221444:221503 [1] NCCL INFO Setting affinity for GPU 1 to ffff,00000000 +gpua061:221444:221503 [1] NCCL INFO Trees [0] 38/-1/-1->37->36 [1] 38/40/-1->37->36 +gpua061:221444:221503 [1] NCCL INFO Channel 00/0 : 37[46000] -> 38[85000] via P2P/IPC/read +gpua061:221444:221503 [1] NCCL INFO Channel 01/0 : 37[46000] -> 38[85000] via P2P/IPC/read +gpua061:221444:221503 [1] NCCL INFO Connected all rings +gpua061:221444:221503 [1] NCCL INFO Channel 01/0 : 37[46000] -> 40[7000] [send] via NET/Socket/1 +gpua007:2608786:2608786 [3] NCCL INFO cudaDriverVersion 12020 +gpua007:2608786:2608786 [3] NCCL INFO Bootstrap : Using eth1:172.28.23.7<0> +gpua007:2608786:2608786 [3] NCCL INFO NET/Plugin : No plugin found (libnccl-net.so), using internal implementation +gpua007:2608786:2608853 [3] NCCL INFO NET/IB : No device found. +gpua007:2608786:2608853 [3] NCCL INFO NET/Socket : Using [0]eth1:172.28.23.7<0> [1]hsn0:141.142.145.7<0> +gpua007:2608786:2608853 [3] NCCL INFO Using network Socket +gpua007:2608786:2608853 [3] NCCL INFO Setting affinity for GPU 3 to ffff +gpua007:2608786:2608853 [3] NCCL INFO Trees [0] -1/-1/-1->3->2 [1] -1/-1/-1->3->2 +gpua007:2608786:2608853 [3] NCCL INFO Channel 00/0 : 3[c7000] -> 4[7000] [send] via NET/Socket/1 +gpua007:2608786:2608853 [3] NCCL INFO Channel 01/0 : 3[c7000] -> 4[7000] [send] via NET/Socket/1 +gpua007:2608786:2608853 [3] NCCL INFO Connected all rings +gpua007:2608786:2608853 [3] NCCL INFO Channel 00/0 : 3[c7000] -> 2[85000] via P2P/IPC/read +gpua014:2738632:2738695 [2] NCCL INFO Channel 01/0 : 22[85000] -> 21[46000] via P2P/IPC/read +gpua014:2738632:2738695 [2] NCCL INFO Connected all trees +gpua014:2738632:2738695 [2] NCCL INFO threadThresholds 8/8/64 | 512/8/64 | 512 | 512 +gpua014:2738632:2738695 [2] NCCL INFO 2 coll channels, 2 p2p channels, 2 p2p channels per peer +gpua014:2738632:2738695 [2] NCCL INFO comm 0x56100344fdd0 rank 22 nranks 64 cudaDev 2 busId 85000 - Init COMPLETE +gpua061:221444:221503 [1] NCCL INFO Channel 01/0 : 40[7000] -> 37[46000] [receive] via NET/Socket/1 +gpua061:221444:221503 [1] NCCL INFO Channel 00/0 : 37[46000] -> 36[7000] via P2P/IPC/read +gpua061:221444:221503 [1] NCCL INFO Channel 01/0 : 37[46000] -> 36[7000] via P2P/IPC/read +gpua061:221444:221503 [1] NCCL INFO Connected all trees +gpua061:221444:221503 [1] NCCL INFO threadThresholds 8/8/64 | 512/8/64 | 512 | 512 +gpua061:221444:221503 [1] NCCL INFO 2 coll channels, 2 p2p channels, 2 p2p channels per peer +gpua061:221444:221503 [1] NCCL INFO comm 0x5630d8554a90 rank 37 nranks 64 cudaDev 1 busId 46000 - Init COMPLETE +gpua007:2608786:2608853 [3] NCCL INFO Channel 01/0 : 3[c7000] -> 2[85000] via P2P/IPC/read +gpua007:2608786:2608853 [3] NCCL INFO Connected all trees +gpua007:2608786:2608853 [3] NCCL INFO threadThresholds 8/8/64 | 512/8/64 | 512 | 512 +gpua007:2608786:2608853 [3] NCCL INFO 2 coll channels, 2 p2p channels, 2 p2p channels per peer +gpua007:2608786:2608853 [3] NCCL INFO comm 0x55fe6c7195e0 rank 3 nranks 64 cudaDev 3 busId c7000 - Init COMPLETE +gpua010:4003867:4003867 [3] NCCL INFO cudaDriverVersion 12020 +gpua010:4003867:4003867 [3] NCCL INFO Bootstrap : Using eth1:172.28.23.10<0> +gpua010:4003867:4003867 [3] NCCL INFO NET/Plugin : No plugin found (libnccl-net.so), using internal implementation +gpua010:4003867:4003931 [3] NCCL INFO NET/IB : No device found. +gpua010:4003867:4003931 [3] NCCL INFO NET/Socket : Using [0]eth1:172.28.23.10<0> [1]hsn0:141.142.145.10<0> +gpua010:4003867:4003931 [3] NCCL INFO Using network Socket +gpua010:4003867:4003931 [3] NCCL INFO Setting affinity for GPU 3 to ffff +gpua010:4003867:4003931 [3] NCCL INFO Trees [0] -1/-1/-1->11->10 [1] -1/-1/-1->11->10 +gpua010:4003867:4003931 [3] NCCL INFO Channel 00/0 : 11[c7000] -> 12[7000] [send] via NET/Socket/1 +gpua010:4003867:4003931 [3] NCCL INFO Channel 01/0 : 11[c7000] -> 12[7000] [send] via NET/Socket/1 +gpua010:4003867:4003931 [3] NCCL INFO Connected all rings +gpua010:4003867:4003931 [3] NCCL INFO Channel 00/0 : 11[c7000] -> 10[85000] via P2P/IPC/read +gpua010:4003867:4003931 [3] NCCL INFO Channel 01/0 : 11[c7000] -> 10[85000] via P2P/IPC/read +gpua010:4003867:4003931 [3] NCCL INFO Connected all trees +gpua010:4003867:4003931 [3] NCCL INFO threadThresholds 8/8/64 | 512/8/64 | 512 | 512 +gpua010:4003867:4003931 [3] NCCL INFO 2 coll channels, 2 p2p channels, 2 p2p channels per peer +gpua010:4003867:4003931 [3] NCCL INFO comm 0x55fe3d4795d0 rank 11 nranks 64 cudaDev 3 busId c7000 - Init COMPLETE +gpua010:4003866:4003866 [2] NCCL INFO cudaDriverVersion 12020 +gpua010:4003866:4003866 [2] NCCL INFO Bootstrap : Using eth1:172.28.23.10<0> +gpua010:4003866:4003866 [2] NCCL INFO NET/Plugin : No plugin found (libnccl-net.so), using internal implementation +gpua010:4003866:4003928 [2] NCCL INFO NET/IB : No device found. +gpua010:4003866:4003928 [2] NCCL INFO NET/Socket : Using [0]eth1:172.28.23.10<0> [1]hsn0:141.142.145.10<0> +gpua010:4003866:4003928 [2] NCCL INFO Using network Socket +gpua010:4003866:4003928 [2] NCCL INFO Setting affinity for GPU 2 to ffff0000 +gpua010:4003866:4003928 [2] NCCL INFO Trees [0] 11/-1/-1->10->9 [1] 11/-1/-1->10->9 +gpua010:4003866:4003928 [2] NCCL INFO Channel 00/0 : 10[85000] -> 11[c7000] via P2P/IPC/read +gpua010:4003866:4003928 [2] NCCL INFO Channel 01/0 : 10[85000] -> 11[c7000] via P2P/IPC/read +gpua010:4003866:4003928 [2] NCCL INFO Connected all rings +gpua010:4003866:4003928 [2] NCCL INFO Channel 00/0 : 10[85000] -> 9[46000] via P2P/IPC/read +gpua010:4003866:4003928 [2] NCCL INFO Channel 01/0 : 10[85000] -> 9[46000] via P2P/IPC/read +gpua010:4003866:4003928 [2] NCCL INFO Connected all trees +gpua010:4003866:4003928 [2] NCCL INFO threadThresholds 8/8/64 | 512/8/64 | 512 | 512 +gpua010:4003866:4003928 [2] NCCL INFO 2 coll channels, 2 p2p channels, 2 p2p channels per peer +gpua010:4003866:4003928 [2] NCCL INFO comm 0x5555ad295740 rank 10 nranks 64 cudaDev 2 busId 85000 - Init COMPLETE +gpua012:2375595:2375595 [2] NCCL INFO cudaDriverVersion 12020 +gpua012:2375595:2375595 [2] NCCL INFO Bootstrap : Using eth1:172.28.23.12<0> +gpua012:2375595:2375595 [2] NCCL INFO NET/Plugin : No plugin found (libnccl-net.so), using internal implementation +gpua012:2375595:2375658 [2] NCCL INFO NET/IB : No device found. +gpua012:2375595:2375658 [2] NCCL INFO NET/Socket : Using [0]eth1:172.28.23.12<0> [1]hsn0:141.142.145.12<0> +gpua012:2375595:2375658 [2] NCCL INFO Using network Socket +gpua012:2375595:2375658 [2] NCCL INFO Setting affinity for GPU 2 to ffff0000 +gpua012:2375595:2375658 [2] NCCL INFO Trees [0] 19/-1/-1->18->17 [1] 19/-1/-1->18->17 +gpua012:2375595:2375658 [2] NCCL INFO Channel 00/0 : 18[85000] -> 19[c7000] via P2P/IPC/read +gpua012:2375595:2375658 [2] NCCL INFO Channel 01/0 : 18[85000] -> 19[c7000] via P2P/IPC/read +gpua012:2375595:2375658 [2] NCCL INFO Connected all rings +gpua012:2375595:2375658 [2] NCCL INFO Channel 00/0 : 18[85000] -> 17[46000] via P2P/IPC/read +gpua012:2375595:2375658 [2] NCCL INFO Channel 01/0 : 18[85000] -> 17[46000] via P2P/IPC/read +gpua012:2375595:2375658 [2] NCCL INFO Connected all trees +gpua012:2375595:2375658 [2] NCCL INFO threadThresholds 8/8/64 | 512/8/64 | 512 | 512 +gpua012:2375595:2375658 [2] NCCL INFO 2 coll channels, 2 p2p channels, 2 p2p channels per peer +gpua012:2375595:2375658 [2] NCCL INFO comm 0x555c0543c9f0 rank 18 nranks 64 cudaDev 2 busId 85000 - Init COMPLETE +gpua008:3023446:3023446 [1] NCCL INFO cudaDriverVersion 12020 +gpua008:3023446:3023446 [1] NCCL INFO Bootstrap : Using eth1:172.28.23.8<0> +gpua008:3023446:3023446 [1] NCCL INFO NET/Plugin : No plugin found (libnccl-net.so), using internal implementation +gpua008:3023446:3023509 [1] NCCL INFO NET/IB : No device found. +gpua008:3023446:3023509 [1] NCCL INFO NET/Socket : Using [0]eth1:172.28.23.8<0> [1]hsn0:141.142.145.8<0> +gpua008:3023446:3023509 [1] NCCL INFO Using network Socket +gpua008:3023446:3023509 [1] NCCL INFO Setting affinity for GPU 1 to ffff,00000000 +gpua008:3023446:3023509 [1] NCCL INFO Trees [0] 6/-1/-1->5->4 [1] 6/8/-1->5->4 +gpua008:3023446:3023509 [1] NCCL INFO Channel 00/0 : 5[46000] -> 6[85000] via P2P/IPC/read +gpua008:3023446:3023509 [1] NCCL INFO Channel 01/0 : 5[46000] -> 6[85000] via P2P/IPC/read +gpua008:3023446:3023509 [1] NCCL INFO Connected all rings +gpua008:3023446:3023509 [1] NCCL INFO Channel 01/0 : 5[46000] -> 8[7000] [send] via NET/Socket/1 +gpua008:3023446:3023509 [1] NCCL INFO Channel 01/0 : 8[7000] -> 5[46000] [receive] via NET/Socket/1 +gpua008:3023446:3023509 [1] NCCL INFO Channel 00/0 : 5[46000] -> 4[7000] via P2P/IPC/read +gpua008:3023446:3023509 [1] NCCL INFO Channel 01/0 : 5[46000] -> 4[7000] via P2P/IPC/read +gpua008:3023446:3023509 [1] NCCL INFO Connected all trees +gpua008:3023446:3023509 [1] NCCL INFO threadThresholds 8/8/64 | 512/8/64 | 512 | 512 +gpua008:3023446:3023509 [1] NCCL INFO 2 coll channels, 2 p2p channels, 2 p2p channels per peer +gpua008:3023446:3023509 [1] NCCL INFO comm 0x55a86c0fa1e0 rank 5 nranks 64 cudaDev 1 busId 46000 - Init COMPLETE +gpua083:459829:459829 [2] NCCL INFO cudaDriverVersion 12020 +gpua083:459829:459829 [2] NCCL INFO Bootstrap : Using eth1:172.28.23.83<0> +gpua083:459829:459829 [2] NCCL INFO NET/Plugin : No plugin found (libnccl-net.so), using internal implementation +gpua083:459829:459882 [2] NCCL INFO NET/IB : No device found. +gpua083:459829:459882 [2] NCCL INFO NET/Socket : Using [0]eth1:172.28.23.83<0> [1]hsn0:141.142.145.83<0> +gpua083:459829:459882 [2] NCCL INFO Using network Socket +gpua083:459829:459882 [2] NCCL INFO Setting affinity for GPU 2 to ffff0000 +gpua083:459829:459882 [2] NCCL INFO Trees [0] 51/-1/-1->50->49 [1] 51/-1/-1->50->49 +gpua083:459829:459882 [2] NCCL INFO Channel 00/0 : 50[85000] -> 51[c7000] via P2P/IPC/read +gpua083:459829:459882 [2] NCCL INFO Channel 01/0 : 50[85000] -> 51[c7000] via P2P/IPC/read +gpua083:459829:459882 [2] NCCL INFO Connected all rings +gpua083:459829:459882 [2] NCCL INFO Channel 00/0 : 50[85000] -> 49[46000] via P2P/IPC/read +gpua084:3892530:3892530 [2] NCCL INFO cudaDriverVersion 12020 +gpua084:3892530:3892530 [2] NCCL INFO Bootstrap : Using eth1:172.28.23.84<0> +gpua084:3892530:3892530 [2] NCCL INFO NET/Plugin : No plugin found (libnccl-net.so), using internal implementation +gpua084:3892530:3892587 [2] NCCL INFO NET/IB : No device found. +gpua084:3892530:3892587 [2] NCCL INFO NET/Socket : Using [0]eth1:172.28.23.84<0> [1]hsn0:141.142.145.84<0> +gpua084:3892530:3892587 [2] NCCL INFO Using network Socket +gpua084:3892530:3892587 [2] NCCL INFO Setting affinity for GPU 2 to ffff0000 +gpua084:3892530:3892587 [2] NCCL INFO Trees [0] 55/-1/-1->54->53 [1] 55/-1/-1->54->53 +gpua084:3892530:3892587 [2] NCCL INFO Channel 00/0 : 54[85000] -> 55[c7000] via P2P/IPC/read +gpua084:3892530:3892587 [2] NCCL INFO Channel 01/0 : 54[85000] -> 55[c7000] via P2P/IPC/read +gpua084:3892530:3892587 [2] NCCL INFO Connected all rings +gpua084:3892530:3892587 [2] NCCL INFO Channel 00/0 : 54[85000] -> 53[46000] via P2P/IPC/read +gpua008:3023448:3023448 [3] NCCL INFO cudaDriverVersion 12020 +gpua008:3023448:3023448 [3] NCCL INFO Bootstrap : Using eth1:172.28.23.8<0> +gpua008:3023448:3023448 [3] NCCL INFO NET/Plugin : No plugin found (libnccl-net.so), using internal implementation +gpua008:3023448:3023507 [3] NCCL INFO NET/IB : No device found. +gpua008:3023448:3023507 [3] NCCL INFO NET/Socket : Using [0]eth1:172.28.23.8<0> [1]hsn0:141.142.145.8<0> +gpua008:3023448:3023507 [3] NCCL INFO Using network Socket +gpua008:3023448:3023507 [3] NCCL INFO Setting affinity for GPU 3 to ffff +gpua008:3023448:3023507 [3] NCCL INFO Trees [0] -1/-1/-1->7->6 [1] -1/-1/-1->7->6 +gpua008:3023448:3023507 [3] NCCL INFO Channel 00/0 : 7[c7000] -> 8[7000] [send] via NET/Socket/1 +gpua008:3023448:3023507 [3] NCCL INFO Channel 01/0 : 7[c7000] -> 8[7000] [send] via NET/Socket/1 +gpua008:3023448:3023507 [3] NCCL INFO Connected all rings +gpua008:3023448:3023507 [3] NCCL INFO Channel 00/0 : 7[c7000] -> 6[85000] via P2P/IPC/read +gpua083:459829:459882 [2] NCCL INFO Channel 01/0 : 50[85000] -> 49[46000] via P2P/IPC/read +gpua083:459829:459882 [2] NCCL INFO Connected all trees +gpua083:459829:459882 [2] NCCL INFO threadThresholds 8/8/64 | 512/8/64 | 512 | 512 +gpua083:459829:459882 [2] NCCL INFO 2 coll channels, 2 p2p channels, 2 p2p channels per peer +gpua083:459829:459882 [2] NCCL INFO comm 0x55bf3e4719e0 rank 50 nranks 64 cudaDev 2 busId 85000 - Init COMPLETE +gpua084:3892530:3892587 [2] NCCL INFO Channel 01/0 : 54[85000] -> 53[46000] via P2P/IPC/read +gpua084:3892530:3892587 [2] NCCL INFO Connected all trees +gpua084:3892530:3892587 [2] NCCL INFO threadThresholds 8/8/64 | 512/8/64 | 512 | 512 +gpua084:3892530:3892587 [2] NCCL INFO 2 coll channels, 2 p2p channels, 2 p2p channels per peer +gpua084:3892530:3892587 [2] NCCL INFO comm 0x55e68d0cfc10 rank 54 nranks 64 cudaDev 2 busId 85000 - Init COMPLETE +gpua008:3023448:3023507 [3] NCCL INFO Channel 01/0 : 7[c7000] -> 6[85000] via P2P/IPC/read +gpua008:3023448:3023507 [3] NCCL INFO Connected all trees +gpua008:3023448:3023507 [3] NCCL INFO threadThresholds 8/8/64 | 512/8/64 | 512 | 512 +gpua008:3023448:3023507 [3] NCCL INFO 2 coll channels, 2 p2p channels, 2 p2p channels per peer +gpua008:3023448:3023507 [3] NCCL INFO comm 0x5634dc1d2a00 rank 7 nranks 64 cudaDev 3 busId c7000 - Init COMPLETE +gpua083:459830:459830 [3] NCCL INFO cudaDriverVersion 12020 +gpua083:459830:459830 [3] NCCL INFO Bootstrap : Using eth1:172.28.23.83<0> +gpua083:459830:459830 [3] NCCL INFO NET/Plugin : No plugin found (libnccl-net.so), using internal implementation +gpua083:459830:459883 [3] NCCL INFO NET/IB : No device found. +gpua083:459830:459883 [3] NCCL INFO NET/Socket : Using [0]eth1:172.28.23.83<0> [1]hsn0:141.142.145.83<0> +gpua083:459830:459883 [3] NCCL INFO Using network Socket +gpua083:459830:459883 [3] NCCL INFO Setting affinity for GPU 3 to ffff +gpua083:459830:459883 [3] NCCL INFO Trees [0] -1/-1/-1->51->50 [1] -1/-1/-1->51->50 +gpua083:459830:459883 [3] NCCL INFO Channel 00/0 : 51[c7000] -> 52[7000] [send] via NET/Socket/1 +gpua083:459830:459883 [3] NCCL INFO Channel 01/0 : 51[c7000] -> 52[7000] [send] via NET/Socket/1 +gpua083:459830:459883 [3] NCCL INFO Connected all rings +gpua083:459830:459883 [3] NCCL INFO Channel 00/0 : 51[c7000] -> 50[85000] via P2P/IPC/read +gpua084:3892529:3892529 [1] NCCL INFO cudaDriverVersion 12020 +gpua084:3892529:3892529 [1] NCCL INFO Bootstrap : Using eth1:172.28.23.84<0> +gpua084:3892529:3892529 [1] NCCL INFO NET/Plugin : No plugin found (libnccl-net.so), using internal implementation +gpua084:3892529:3892589 [1] NCCL INFO NET/IB : No device found. +gpua084:3892529:3892589 [1] NCCL INFO NET/Socket : Using [0]eth1:172.28.23.84<0> [1]hsn0:141.142.145.84<0> +gpua084:3892529:3892589 [1] NCCL INFO Using network Socket +gpua084:3892529:3892589 [1] NCCL INFO Setting affinity for GPU 1 to ffff,00000000 +gpua084:3892529:3892589 [1] NCCL INFO Trees [0] 54/-1/-1->53->52 [1] 54/56/-1->53->52 +gpua084:3892529:3892589 [1] NCCL INFO Channel 00/0 : 53[46000] -> 54[85000] via P2P/IPC/read +gpua084:3892529:3892589 [1] NCCL INFO Channel 01/0 : 53[46000] -> 54[85000] via P2P/IPC/read +gpua084:3892529:3892589 [1] NCCL INFO Connected all rings +gpua084:3892529:3892589 [1] NCCL INFO Channel 01/0 : 53[46000] -> 56[7000] [send] via NET/Socket/1 +gpua008:3023447:3023447 [2] NCCL INFO cudaDriverVersion 12020 +gpua008:3023447:3023447 [2] NCCL INFO Bootstrap : Using eth1:172.28.23.8<0> +gpua008:3023447:3023447 [2] NCCL INFO NET/Plugin : No plugin found (libnccl-net.so), using internal implementation +gpua008:3023447:3023508 [2] NCCL INFO NET/IB : No device found. +gpua008:3023447:3023508 [2] NCCL INFO NET/Socket : Using [0]eth1:172.28.23.8<0> [1]hsn0:141.142.145.8<0> +gpua008:3023447:3023508 [2] NCCL INFO Using network Socket +gpua008:3023447:3023508 [2] NCCL INFO Setting affinity for GPU 2 to ffff0000 +gpua008:3023447:3023508 [2] NCCL INFO Trees [0] 7/-1/-1->6->5 [1] 7/-1/-1->6->5 +gpua008:3023447:3023508 [2] NCCL INFO Channel 00/0 : 6[85000] -> 7[c7000] via P2P/IPC/read +gpua008:3023447:3023508 [2] NCCL INFO Channel 01/0 : 6[85000] -> 7[c7000] via P2P/IPC/read +gpua008:3023447:3023508 [2] NCCL INFO Connected all rings +gpua008:3023447:3023508 [2] NCCL INFO Channel 00/0 : 6[85000] -> 5[46000] via P2P/IPC/read +gpua083:459830:459883 [3] NCCL INFO Channel 01/0 : 51[c7000] -> 50[85000] via P2P/IPC/read +gpua083:459830:459883 [3] NCCL INFO Connected all trees +gpua083:459830:459883 [3] NCCL INFO threadThresholds 8/8/64 | 512/8/64 | 512 | 512 +gpua083:459830:459883 [3] NCCL INFO 2 coll channels, 2 p2p channels, 2 p2p channels per peer +gpua083:459830:459883 [3] NCCL INFO comm 0x5599896f9410 rank 51 nranks 64 cudaDev 3 busId c7000 - Init COMPLETE +gpua084:3892529:3892589 [1] NCCL INFO Channel 01/0 : 56[7000] -> 53[46000] [receive] via NET/Socket/1 +gpua084:3892529:3892589 [1] NCCL INFO Channel 00/0 : 53[46000] -> 52[7000] via P2P/IPC/read +gpua084:3892529:3892589 [1] NCCL INFO Channel 01/0 : 53[46000] -> 52[7000] via P2P/IPC/read +gpua084:3892529:3892589 [1] NCCL INFO Connected all trees +gpua084:3892529:3892589 [1] NCCL INFO threadThresholds 8/8/64 | 512/8/64 | 512 | 512 +gpua084:3892529:3892589 [1] NCCL INFO 2 coll channels, 2 p2p channels, 2 p2p channels per peer +gpua084:3892529:3892589 [1] NCCL INFO comm 0x5607281165a0 rank 53 nranks 64 cudaDev 1 busId 46000 - Init COMPLETE +gpua008:3023447:3023508 [2] NCCL INFO Channel 01/0 : 6[85000] -> 5[46000] via P2P/IPC/read +gpua008:3023447:3023508 [2] NCCL INFO Connected all trees +gpua008:3023447:3023508 [2] NCCL INFO threadThresholds 8/8/64 | 512/8/64 | 512 | 512 +gpua008:3023447:3023508 [2] NCCL INFO 2 coll channels, 2 p2p channels, 2 p2p channels per peer +gpua008:3023447:3023508 [2] NCCL INFO comm 0x55b396691720 rank 6 nranks 64 cudaDev 2 busId 85000 - Init COMPLETE +gpua083:459828:459828 [1] NCCL INFO cudaDriverVersion 12020 +gpua083:459828:459828 [1] NCCL INFO Bootstrap : Using eth1:172.28.23.83<0> +gpua083:459828:459828 [1] NCCL INFO NET/Plugin : No plugin found (libnccl-net.so), using internal implementation +gpua083:459828:459884 [1] NCCL INFO NET/IB : No device found. +gpua083:459828:459884 [1] NCCL INFO NET/Socket : Using [0]eth1:172.28.23.83<0> [1]hsn0:141.142.145.83<0> +gpua083:459828:459884 [1] NCCL INFO Using network Socket +gpua083:459828:459884 [1] NCCL INFO Setting affinity for GPU 1 to ffff,00000000 +gpua083:459828:459884 [1] NCCL INFO Trees [0] 50/40/-1->49->48 [1] 50/-1/-1->49->48 +gpua083:459828:459884 [1] NCCL INFO Channel 00/0 : 49[46000] -> 50[85000] via P2P/IPC/read +gpua083:459828:459884 [1] NCCL INFO Channel 01/0 : 49[46000] -> 50[85000] via P2P/IPC/read +gpua083:459828:459884 [1] NCCL INFO Connected all rings +gpua083:459828:459884 [1] NCCL INFO Channel 00/0 : 40[7000] -> 49[46000] [receive] via NET/Socket/1 +gpua084:3892528:3892528 [0] NCCL INFO cudaDriverVersion 12020 +gpua084:3892528:3892528 [0] NCCL INFO Bootstrap : Using eth1:172.28.23.84<0> +gpua084:3892528:3892528 [0] NCCL INFO NET/Plugin : No plugin found (libnccl-net.so), using internal implementation +gpua084:3892528:3892588 [0] NCCL INFO NET/IB : No device found. +gpua084:3892528:3892588 [0] NCCL INFO NET/Socket : Using [0]eth1:172.28.23.84<0> [1]hsn0:141.142.145.84<0> +gpua084:3892528:3892588 [0] NCCL INFO Using network Socket +gpua084:3892528:3892588 [0] NCCL INFO Setting affinity for GPU 0 to ffff0000,00000000 +gpua084:3892528:3892588 [0] NCCL INFO Trees [0] 53/-1/-1->52->57 [1] 53/48/-1->52->45 +gpua084:3892528:3892588 [0] NCCL INFO Channel 00/0 : 51[c7000] -> 52[7000] [receive] via NET/Socket/1 +gpua084:3892528:3892588 [0] NCCL INFO Channel 01/0 : 51[c7000] -> 52[7000] [receive] via NET/Socket/1 +gpua084:3892528:3892588 [0] NCCL INFO Channel 00/0 : 52[7000] -> 53[46000] via P2P/IPC/read +gpua008:3023445:3023445 [0] NCCL INFO cudaDriverVersion 12020 +gpua008:3023445:3023445 [0] NCCL INFO Bootstrap : Using eth1:172.28.23.8<0> +gpua008:3023445:3023445 [0] NCCL INFO NET/Plugin : No plugin found (libnccl-net.so), using internal implementation +gpua008:3023445:3023506 [0] NCCL INFO NET/IB : No device found. +gpua008:3023445:3023506 [0] NCCL INFO NET/Socket : Using [0]eth1:172.28.23.8<0> [1]hsn0:141.142.145.8<0> +gpua008:3023445:3023506 [0] NCCL INFO Using network Socket +gpua008:3023445:3023506 [0] NCCL INFO Setting affinity for GPU 0 to ffff0000,00000000 +gpua008:3023445:3023506 [0] NCCL INFO Trees [0] 5/-1/-1->4->9 [1] 5/0/-1->4->12 +gpua008:3023445:3023506 [0] NCCL INFO Channel 00/0 : 3[c7000] -> 4[7000] [receive] via NET/Socket/1 +gpua008:3023445:3023506 [0] NCCL INFO Channel 01/0 : 3[c7000] -> 4[7000] [receive] via NET/Socket/1 +gpua008:3023445:3023506 [0] NCCL INFO Channel 00/0 : 4[7000] -> 5[46000] via P2P/IPC/read +gpua083:459828:459884 [1] NCCL INFO Channel 00/0 : 49[46000] -> 40[7000] [send] via NET/Socket/1 +gpua083:459828:459884 [1] NCCL INFO Channel 00/0 : 49[46000] -> 48[7000] via P2P/IPC/read +gpua083:459828:459884 [1] NCCL INFO Channel 01/0 : 49[46000] -> 48[7000] via P2P/IPC/read +gpua083:459828:459884 [1] NCCL INFO Connected all trees +gpua083:459828:459884 [1] NCCL INFO threadThresholds 8/8/64 | 512/8/64 | 512 | 512 +gpua083:459828:459884 [1] NCCL INFO 2 coll channels, 2 p2p channels, 2 p2p channels per peer +gpua083:459828:459884 [1] NCCL INFO comm 0x564895f76430 rank 49 nranks 64 cudaDev 1 busId 46000 - Init COMPLETE +gpua084:3892528:3892588 [0] NCCL INFO Channel 01/0 : 52[7000] -> 53[46000] via P2P/IPC/read +gpua084:3892528:3892588 [0] NCCL INFO Connected all rings +gpua084:3892528:3892588 [0] NCCL INFO Channel 01/0 : 48[7000] -> 52[7000] [receive] via NET/Socket/1 +gpua084:3892528:3892588 [0] NCCL INFO Channel 00/0 : 52[7000] -> 57[46000] [send] via NET/Socket/1 +gpua084:3892528:3892588 [0] NCCL INFO Channel 01/0 : 45[46000] -> 52[7000] [receive] via NET/Socket/1 +gpua084:3892528:3892588 [0] NCCL INFO Channel 01/0 : 52[7000] -> 45[46000] [send] via NET/Socket/1 +gpua084:3892528:3892588 [0] NCCL INFO Channel 00/0 : 57[46000] -> 52[7000] [receive] via NET/Socket/1 +gpua084:3892528:3892588 [0] NCCL INFO Channel 01/0 : 52[7000] -> 48[7000] [send] via NET/Socket/1 +gpua084:3892528:3892588 [0] NCCL INFO Connected all trees +gpua084:3892528:3892588 [0] NCCL INFO threadThresholds 8/8/64 | 512/8/64 | 512 | 512 +gpua084:3892528:3892588 [0] NCCL INFO 2 coll channels, 2 p2p channels, 2 p2p channels per peer +gpua008:3023445:3023506 [0] NCCL INFO Channel 01/0 : 4[7000] -> 5[46000] via P2P/IPC/read +gpua008:3023445:3023506 [0] NCCL INFO Connected all rings +gpua008:3023445:3023506 [0] NCCL INFO Channel 01/0 : 0[7000] -> 4[7000] [receive] via NET/Socket/1 +gpua008:3023445:3023506 [0] NCCL INFO Channel 00/0 : 4[7000] -> 9[46000] [send] via NET/Socket/1 +gpua008:3023445:3023506 [0] NCCL INFO Channel 01/0 : 4[7000] -> 12[7000] [send] via NET/Socket/1 +gpua008:3023445:3023506 [0] NCCL INFO Channel 01/0 : 12[7000] -> 4[7000] [receive] via NET/Socket/1 +gpua008:3023445:3023506 [0] NCCL INFO Channel 00/0 : 9[46000] -> 4[7000] [receive] via NET/Socket/1 +gpua008:3023445:3023506 [0] NCCL INFO Channel 01/0 : 4[7000] -> 0[7000] [send] via NET/Socket/1 +gpua008:3023445:3023506 [0] NCCL INFO Connected all trees +gpua008:3023445:3023506 [0] NCCL INFO threadThresholds 8/8/64 | 512/8/64 | 512 | 512 +gpua008:3023445:3023506 [0] NCCL INFO 2 coll channels, 2 p2p channels, 2 p2p channels per peer +gpua084:3892528:3892588 [0] NCCL INFO comm 0x56381636b570 rank 52 nranks 64 cudaDev 0 busId 7000 - Init COMPLETE +gpua008:3023445:3023506 [0] NCCL INFO comm 0x55ac1f9a2620 rank 4 nranks 64 cudaDev 0 busId 7000 - Init COMPLETE +gpua084:3892531:3892531 [3] NCCL INFO cudaDriverVersion 12020 +gpua084:3892531:3892531 [3] NCCL INFO Bootstrap : Using eth1:172.28.23.84<0> +gpua084:3892531:3892531 [3] NCCL INFO NET/Plugin : No plugin found (libnccl-net.so), using internal implementation +gpua084:3892531:3892586 [3] NCCL INFO NET/IB : No device found. +gpua084:3892531:3892586 [3] NCCL INFO NET/Socket : Using [0]eth1:172.28.23.84<0> [1]hsn0:141.142.145.84<0> +gpua084:3892531:3892586 [3] NCCL INFO Using network Socket +gpua084:3892531:3892586 [3] NCCL INFO Setting affinity for GPU 3 to ffff +gpua084:3892531:3892586 [3] NCCL INFO Trees [0] -1/-1/-1->55->54 [1] -1/-1/-1->55->54 +gpua084:3892531:3892586 [3] NCCL INFO Channel 00/0 : 55[c7000] -> 56[7000] [send] via NET/Socket/1 +gpua084:3892531:3892586 [3] NCCL INFO Channel 01/0 : 55[c7000] -> 56[7000] [send] via NET/Socket/1 +gpua084:3892531:3892586 [3] NCCL INFO Connected all rings +gpua084:3892531:3892586 [3] NCCL INFO Channel 00/0 : 55[c7000] -> 54[85000] via P2P/IPC/read +gpua083:459827:459827 [0] NCCL INFO cudaDriverVersion 12020 +gpua083:459827:459827 [0] NCCL INFO Bootstrap : Using eth1:172.28.23.83<0> +gpua083:459827:459827 [0] NCCL INFO NET/Plugin : No plugin found (libnccl-net.so), using internal implementation +gpua083:459827:459885 [0] NCCL INFO NET/IB : No device found. +gpua083:459827:459885 [0] NCCL INFO NET/Socket : Using [0]eth1:172.28.23.83<0> [1]hsn0:141.142.145.83<0> +gpua083:459827:459885 [0] NCCL INFO Using network Socket +gpua083:459827:459885 [0] NCCL INFO Setting affinity for GPU 0 to ffff0000,00000000 +gpua083:459827:459885 [0] NCCL INFO Trees [0] 49/56/-1->48->32 [1] 49/-1/-1->48->52 +gpua083:459827:459885 [0] NCCL INFO Channel 00/0 : 47[c7000] -> 48[7000] [receive] via NET/Socket/1 +gpua083:459827:459885 [0] NCCL INFO Channel 01/0 : 47[c7000] -> 48[7000] [receive] via NET/Socket/1 +gpua083:459827:459885 [0] NCCL INFO Channel 00/0 : 48[7000] -> 49[46000] via P2P/IPC/read +gpua083:459827:459885 [0] NCCL INFO Channel 01/0 : 48[7000] -> 49[46000] via P2P/IPC/read +gpua084:3892531:3892586 [3] NCCL INFO Channel 01/0 : 55[c7000] -> 54[85000] via P2P/IPC/read +gpua084:3892531:3892586 [3] NCCL INFO Connected all trees +gpua084:3892531:3892586 [3] NCCL INFO threadThresholds 8/8/64 | 512/8/64 | 512 | 512 +gpua084:3892531:3892586 [3] NCCL INFO 2 coll channels, 2 p2p channels, 2 p2p channels per peer +gpua084:3892531:3892586 [3] NCCL INFO comm 0x5574523e6310 rank 55 nranks 64 cudaDev 3 busId c7000 - Init COMPLETE +gpua083:459827:459885 [0] NCCL INFO Connected all rings +gpua083:459827:459885 [0] NCCL INFO Channel 01/0 : 48[7000] -> 52[7000] [send] via NET/Socket/1 +gpua083:459827:459885 [0] NCCL INFO Channel 00/0 : 48[7000] -> 56[7000] [send] via NET/Socket/1 +gpua083:459827:459885 [0] NCCL INFO Channel 00/0 : 32[7000] -> 48[7000] [receive] via NET/Socket/1 +gpua083:459827:459885 [0] NCCL INFO Channel 00/0 : 48[7000] -> 32[7000] [send] via NET/Socket/1 +gpua083:459827:459885 [0] NCCL INFO Channel 00/0 : 56[7000] -> 48[7000] [receive] via NET/Socket/1 +gpua083:459827:459885 [0] NCCL INFO Channel 01/0 : 52[7000] -> 48[7000] [receive] via NET/Socket/1 +gpua083:459827:459885 [0] NCCL INFO Connected all trees +gpua083:459827:459885 [0] NCCL INFO threadThresholds 8/8/64 | 512/8/64 | 512 | 512 +gpua083:459827:459885 [0] NCCL INFO 2 coll channels, 2 p2p channels, 2 p2p channels per peer +gpua083:459827:459885 [0] NCCL INFO comm 0x563eb1461310 rank 48 nranks 64 cudaDev 0 busId 7000 - Init COMPLETE +[gpua007:0/64] 2024-02-02 19:23:16,653 (distributed:1027) INFO: Reducer buckets have been rebuilt in this iteration. +[gpua007:0/64] 2024-02-02 19:28:05,502 (trainer:753) INFO: 17epoch:train:1-100batch: iter_time=3.457, forward_time=0.259, loss_ctc=70.186, loss_interctc_layer6=80.927, loss_interctc_layer12=67.659, loss_interctc_layer15=62.477, loss_interctc_layer21=71.989, loss=70.647, backward_time=0.510, grad_norm=82.473, clip=100.000, loss_scale=5.071e+30, optim_step_time=0.137, optim0_lr0=9.999e-05, train_time=7.424 +[gpua007:0/64] 2024-02-02 19:33:34,700 (trainer:753) INFO: 17epoch:train:101-200batch: iter_time=9.144e-05, forward_time=0.141, loss_ctc=66.516, loss_interctc_layer6=77.663, loss_interctc_layer12=64.889, loss_interctc_layer15=59.425, loss_interctc_layer21=68.452, loss=67.389, backward_time=0.530, grad_norm=79.255, clip=100.000, loss_scale=5.071e+30, optim_step_time=0.136, optim0_lr0=9.997e-05, train_time=3.292 +[gpua007:0/64] 2024-02-02 19:38:57,797 (trainer:753) INFO: 17epoch:train:201-300batch: iter_time=9.482e-05, forward_time=0.139, loss_ctc=69.576, loss_interctc_layer6=77.480, loss_interctc_layer12=65.158, loss_interctc_layer15=60.038, loss_interctc_layer21=71.719, loss=68.794, backward_time=0.480, grad_norm=76.260, clip=100.000, loss_scale=5.071e+30, optim_step_time=0.136, optim0_lr0=9.995e-05, train_time=3.231 +[gpua007:0/64] 2024-02-02 19:45:15,888 (trainer:753) INFO: 17epoch:train:301-400batch: iter_time=1.061e-04, forward_time=0.212, loss_ctc=95.955, loss_interctc_layer6=101.214, loss_interctc_layer12=84.729, loss_interctc_layer15=78.247, loss_interctc_layer21=98.546, loss=91.738, backward_time=0.630, grad_norm=84.928, clip=100.000, loss_scale=5.071e+30, optim_step_time=0.143, optim0_lr0=9.993e-05, train_time=3.780 +[gpua007:0/64] 2024-02-02 19:50:51,592 (trainer:753) INFO: 17epoch:train:401-500batch: iter_time=9.363e-05, forward_time=0.140, loss_ctc=74.062, loss_interctc_layer6=83.868, loss_interctc_layer12=71.012, loss_interctc_layer15=65.838, loss_interctc_layer21=76.372, loss=74.230, backward_time=0.515, grad_norm=64.713, clip=100.000, loss_scale=5.071e+30, optim_step_time=0.137, optim0_lr0=9.991e-05, train_time=3.357 +[gpua007:0/64] 2024-02-02 19:56:36,812 (trainer:753) INFO: 17epoch:train:501-600batch: iter_time=9.604e-05, forward_time=0.140, loss_ctc=82.577, loss_interctc_layer6=88.641, loss_interctc_layer12=75.692, loss_interctc_layer15=69.524, loss_interctc_layer21=85.310, loss=80.349, backward_time=0.533, grad_norm=81.175, clip=100.000, loss_scale=5.071e+30, optim_step_time=0.137, optim0_lr0=9.989e-05, train_time=3.452 +[gpua007:0/64] 2024-02-02 20:02:16,932 (trainer:753) INFO: 17epoch:train:601-700batch: iter_time=1.010e-04, forward_time=0.140, loss_ctc=104.394, loss_interctc_layer6=106.344, loss_interctc_layer12=89.821, loss_interctc_layer15=83.321, loss_interctc_layer21=107.360, loss=98.248, backward_time=0.580, grad_norm=85.431, clip=100.000, loss_scale=5.071e+30, optim_step_time=0.137, optim0_lr0=9.987e-05, train_time=3.401 +[gpua007:0/64] 2024-02-02 20:07:27,290 (trainer:753) INFO: 17epoch:train:701-800batch: iter_time=1.071e-04, forward_time=0.140, loss_ctc=74.928, loss_interctc_layer6=82.455, loss_interctc_layer12=69.291, loss_interctc_layer15=63.997, loss_interctc_layer21=76.975, loss=73.529, backward_time=0.484, grad_norm=100.769, clip=100.000, loss_scale=5.071e+30, optim_step_time=0.137, optim0_lr0=9.985e-05, train_time=3.103 +[gpua007:0/64] 2024-02-02 20:13:03,617 (trainer:753) INFO: 17epoch:train:801-900batch: iter_time=1.060e-04, forward_time=0.181, loss_ctc=95.036, loss_interctc_layer6=103.632, loss_interctc_layer12=86.364, loss_interctc_layer15=79.531, loss_interctc_layer21=97.771, loss=92.467, backward_time=0.529, grad_norm=82.449, clip=100.000, loss_scale=5.071e+30, optim_step_time=0.139, optim0_lr0=9.983e-05, train_time=3.363 +[gpua007:0/64] 2024-02-02 20:19:06,189 (trainer:753) INFO: 17epoch:train:901-1000batch: iter_time=1.070e-04, forward_time=0.177, loss_ctc=85.716, loss_interctc_layer6=97.114, loss_interctc_layer12=81.989, loss_interctc_layer15=75.936, loss_interctc_layer21=87.921, loss=85.735, backward_time=0.591, grad_norm=80.851, clip=100.000, loss_scale=5.071e+30, optim_step_time=0.137, optim0_lr0=9.981e-05, train_time=3.624 +[gpua007:0/64] 2024-02-02 20:24:32,353 (trainer:753) INFO: 17epoch:train:1001-1100batch: iter_time=9.171e-05, forward_time=0.141, loss_ctc=97.607, loss_interctc_layer6=103.176, loss_interctc_layer12=87.408, loss_interctc_layer15=81.352, loss_interctc_layer21=100.076, loss=93.924, backward_time=0.499, grad_norm=90.953, clip=100.000, loss_scale=5.476e+30, optim_step_time=0.136, optim0_lr0=9.979e-05, train_time=3.263 +[gpua007:0/64] 2024-02-02 20:29:58,224 (trainer:753) INFO: 17epoch:train:1101-1200batch: iter_time=9.369e-05, forward_time=0.140, loss_ctc=72.241, loss_interctc_layer6=83.008, loss_interctc_layer12=69.466, loss_interctc_layer15=64.060, loss_interctc_layer21=74.403, loss=72.636, backward_time=0.508, grad_norm=64.181, clip=100.000, loss_scale=1.014e+31, optim_step_time=0.136, optim0_lr0=9.977e-05, train_time=3.258 +[gpua007:0/64] 2024-02-02 20:33:01,482 (multiple_iter_factory:32) INFO: Building 1th iter-factory... +[gpua007:0/64] 2024-02-02 20:33:20,113 (s2t:401) INFO: Optional Data Names: ('text_prev', 'text_ctc', 'text_spk2', 'text_spk3', 'text_spk4') +[gpua007:0/64] 2024-02-02 20:33:23,498 (abs_task:1660) INFO: [train] dataset: +ESPnetDataset( + speech: {"path": "exp/s2t_stats_raw_bpe50000/splits12/wav.scp/split.1", "type": "kaldi_ark"} + text_prev: {"path": "exp/s2t_stats_raw_bpe50000/splits12/text.prev/split.1", "type": "text"} + text_ctc: {"path": "exp/s2t_stats_raw_bpe50000/splits12/text.ctc/split.1", "type": "text"} + text: {"path": "exp/s2t_stats_raw_bpe50000/splits12/text/split.1", "type": "text"} + preprocess: ) +[gpua007:0/64] 2024-02-02 20:33:23,498 (abs_task:1661) INFO: [train] Batch sampler: UnsortedBatchSampler(N-batch=19027, batch_size=256, key_file=exp/s2t_stats_raw_bpe50000/splits12/speech_shape/split.1, +[gpua007:0/64] 2024-02-02 20:33:23,501 (abs_task:1662) INFO: [train] mini-batch sizes summary: N-batch=19027, mean=256.0, min=256, max=257 +[gpua007:0/64] 2024-02-02 20:45:34,644 (trainer:753) INFO: 17epoch:train:1201-1300batch: iter_time=2.303, forward_time=0.142, loss_ctc=80.356, loss_interctc_layer6=94.028, loss_interctc_layer12=78.406, loss_interctc_layer15=72.226, loss_interctc_layer21=82.539, loss=81.511, backward_time=0.931, grad_norm=64.644, clip=100.000, loss_scale=1.014e+31, optim_step_time=0.136, optim0_lr0=9.975e-05, train_time=9.364 +[gpua007:0/64] 2024-02-02 21:04:57,059 (trainer:753) INFO: 17epoch:train:1301-1400batch: iter_time=9.918e-05, forward_time=0.142, loss_ctc=65.838, loss_interctc_layer6=75.724, loss_interctc_layer12=63.277, loss_interctc_layer15=58.304, loss_interctc_layer21=67.822, loss=66.193, backward_time=2.172, grad_norm=77.361, clip=100.000, loss_scale=1.014e+31, optim_step_time=0.137, optim0_lr0=9.972e-05, train_time=11.624 +[gpua007:0/64] 2024-02-02 21:10:03,611 (trainer:753) INFO: 17epoch:train:1401-1500batch: iter_time=9.345e-05, forward_time=0.141, loss_ctc=62.980, loss_interctc_layer6=75.843, loss_interctc_layer12=63.333, loss_interctc_layer15=58.237, loss_interctc_layer21=64.698, loss=65.018, backward_time=0.447, grad_norm=72.221, clip=100.000, loss_scale=1.014e+31, optim_step_time=0.136, optim0_lr0=9.970e-05, train_time=3.065 +[gpua007:0/64] 2024-02-02 21:14:48,585 (trainer:753) INFO: 17epoch:train:1501-1600batch: iter_time=9.077e-05, forward_time=0.142, loss_ctc=83.613, loss_interctc_layer6=87.353, loss_interctc_layer12=72.990, loss_interctc_layer15=67.367, loss_interctc_layer21=86.365, loss=79.538, backward_time=0.403, grad_norm=68.606, clip=100.000, loss_scale=1.014e+31, optim_step_time=0.136, optim0_lr0=9.968e-05, train_time=2.850 +[gpua007:0/64] 2024-02-02 21:20:17,076 (trainer:753) INFO: 17epoch:train:1601-1700batch: iter_time=9.445e-05, forward_time=0.142, loss_ctc=84.193, loss_interctc_layer6=92.112, loss_interctc_layer12=76.714, loss_interctc_layer15=70.680, loss_interctc_layer21=86.471, loss=82.034, backward_time=0.515, grad_norm=70.405, clip=100.000, loss_scale=1.014e+31, optim_step_time=0.136, optim0_lr0=9.966e-05, train_time=3.285 +[gpua007:0/64] 2024-02-02 21:26:20,368 (trainer:753) INFO: 17epoch:train:1701-1800batch: iter_time=9.398e-05, forward_time=0.142, loss_ctc=84.280, loss_interctc_layer6=89.918, loss_interctc_layer12=75.555, loss_interctc_layer15=69.838, loss_interctc_layer21=86.988, loss=81.316, backward_time=0.548, grad_norm=86.538, clip=100.000, loss_scale=1.014e+31, optim_step_time=0.136, optim0_lr0=9.964e-05, train_time=3.633 +[gpua007:0/64] 2024-02-02 21:31:40,797 (trainer:753) INFO: 17epoch:train:1801-1900batch: iter_time=8.937e-05, forward_time=0.201, loss_ctc=88.877, loss_interctc_layer6=97.692, loss_interctc_layer12=80.832, loss_interctc_layer15=74.538, loss_interctc_layer21=91.238, loss=86.635, backward_time=0.453, grad_norm=99.094, clip=100.000, loss_scale=1.014e+31, optim_step_time=0.137, optim0_lr0=9.962e-05, train_time=3.204 +[gpua007:0/64] 2024-02-02 21:37:18,956 (trainer:753) INFO: 17epoch:train:1901-2000batch: iter_time=8.886e-05, forward_time=0.191, loss_ctc=81.092, loss_interctc_layer6=89.008, loss_interctc_layer12=74.546, loss_interctc_layer15=68.776, loss_interctc_layer21=83.493, loss=79.383, backward_time=0.485, grad_norm=70.130, clip=100.000, loss_scale=1.014e+31, optim_step_time=0.138, optim0_lr0=9.960e-05, train_time=3.381 +[gpua007:0/64] 2024-02-02 21:42:36,697 (trainer:753) INFO: 17epoch:train:2001-2100batch: iter_time=9.423e-05, forward_time=0.142, loss_ctc=77.768, loss_interctc_layer6=85.932, loss_interctc_layer12=71.399, loss_interctc_layer15=65.748, loss_interctc_layer21=80.058, loss=76.181, backward_time=0.496, grad_norm=65.657, clip=100.000, loss_scale=1.014e+31, optim_step_time=0.136, optim0_lr0=9.958e-05, train_time=3.178 +[gpua007:0/64] 2024-02-02 21:47:40,891 (trainer:753) INFO: 17epoch:train:2101-2200batch: iter_time=9.191e-05, forward_time=0.143, loss_ctc=101.050, loss_interctc_layer6=107.738, loss_interctc_layer12=90.439, loss_interctc_layer15=83.752, loss_interctc_layer21=104.064, loss=97.409, backward_time=0.405, grad_norm=81.113, clip=100.000, loss_scale=1.014e+31, optim_step_time=0.136, optim0_lr0=9.956e-05, train_time=3.042 +[gpua007:0/64] 2024-02-02 21:52:40,627 (trainer:753) INFO: 17epoch:train:2201-2300batch: iter_time=9.636e-05, forward_time=0.143, loss_ctc=84.381, loss_interctc_layer6=93.474, loss_interctc_layer12=78.660, loss_interctc_layer15=72.586, loss_interctc_layer21=86.622, loss=83.145, backward_time=0.435, grad_norm=77.966, clip=100.000, loss_scale=1.014e+31, optim_step_time=0.136, optim0_lr0=9.954e-05, train_time=2.997 +[gpua007:0/64] 2024-02-02 21:57:49,903 (trainer:753) INFO: 17epoch:train:2301-2400batch: iter_time=9.414e-05, forward_time=0.143, loss_ctc=90.331, loss_interctc_layer6=97.870, loss_interctc_layer12=83.174, loss_interctc_layer15=77.399, loss_interctc_layer21=92.909, loss=88.337, backward_time=0.445, grad_norm=107.188, clip=100.000, loss_scale=1.014e+31, optim_step_time=0.136, optim0_lr0=9.952e-05, train_time=3.093 +[gpua007:0/64] 2024-02-02 22:02:36,312 (trainer:753) INFO: 17epoch:train:2401-2500batch: iter_time=8.988e-05, forward_time=0.141, loss_ctc=75.597, loss_interctc_layer6=90.848, loss_interctc_layer12=76.115, loss_interctc_layer15=69.905, loss_interctc_layer21=77.587, loss=78.011, backward_time=0.415, grad_norm=73.304, clip=100.000, loss_scale=1.014e+31, optim_step_time=0.136, optim0_lr0=9.950e-05, train_time=2.864 +[gpua007:0/64] 2024-02-02 22:02:56,337 (multiple_iter_factory:32) INFO: Building 2th iter-factory... +[gpua007:0/64] 2024-02-02 22:03:15,031 (s2t:401) INFO: Optional Data Names: ('text_prev', 'text_ctc', 'text_spk2', 'text_spk3', 'text_spk4') +[gpua007:0/64] 2024-02-02 22:03:18,475 (abs_task:1660) INFO: [train] dataset: +ESPnetDataset( + speech: {"path": "exp/s2t_stats_raw_bpe50000/splits12/wav.scp/split.5", "type": "kaldi_ark"} + text_prev: {"path": "exp/s2t_stats_raw_bpe50000/splits12/text.prev/split.5", "type": "text"} + text_ctc: {"path": "exp/s2t_stats_raw_bpe50000/splits12/text.ctc/split.5", "type": "text"} + text: {"path": "exp/s2t_stats_raw_bpe50000/splits12/text/split.5", "type": "text"} + preprocess: ) +[gpua007:0/64] 2024-02-02 22:03:18,476 (abs_task:1661) INFO: [train] Batch sampler: UnsortedBatchSampler(N-batch=19027, batch_size=256, key_file=exp/s2t_stats_raw_bpe50000/splits12/speech_shape/split.5, +[gpua007:0/64] 2024-02-02 22:03:18,479 (abs_task:1662) INFO: [train] mini-batch sizes summary: N-batch=19027, mean=256.0, min=256, max=257 +[gpua007:0/64] 2024-02-02 22:12:19,150 (trainer:753) INFO: 17epoch:train:2501-2600batch: iter_time=2.792, forward_time=0.142, loss_ctc=68.794, loss_interctc_layer6=79.900, loss_interctc_layer12=66.802, loss_interctc_layer15=61.623, loss_interctc_layer21=70.740, loss=69.571, backward_time=0.407, grad_norm=63.850, clip=100.000, loss_scale=1.014e+31, optim_step_time=0.137, optim0_lr0=9.948e-05, train_time=5.828 +[gpua007:0/64] 2024-02-02 22:17:32,250 (trainer:753) INFO: 17epoch:train:2601-2700batch: iter_time=8.660e-05, forward_time=0.141, loss_ctc=65.547, loss_interctc_layer6=75.631, loss_interctc_layer12=62.625, loss_interctc_layer15=57.461, loss_interctc_layer21=67.570, loss=65.767, backward_time=0.504, grad_norm=58.764, clip=100.000, loss_scale=1.014e+31, optim_step_time=0.136, optim0_lr0=9.946e-05, train_time=3.131 +[gpua007:0/64] 2024-02-02 22:22:34,689 (trainer:753) INFO: 17epoch:train:2701-2800batch: iter_time=8.609e-05, forward_time=0.141, loss_ctc=67.491, loss_interctc_layer6=75.931, loss_interctc_layer12=63.370, loss_interctc_layer15=58.247, loss_interctc_layer21=69.616, loss=66.931, backward_time=0.479, grad_norm=70.703, clip=100.000, loss_scale=1.014e+31, optim_step_time=0.136, optim0_lr0=9.944e-05, train_time=3.024 +[gpua007:0/64] 2024-02-02 22:28:01,691 (trainer:753) INFO: 17epoch:train:2801-2900batch: iter_time=8.898e-05, forward_time=0.142, loss_ctc=95.183, loss_interctc_layer6=99.948, loss_interctc_layer12=83.684, loss_interctc_layer15=77.151, loss_interctc_layer21=97.649, loss=90.723, backward_time=0.472, grad_norm=78.256, clip=100.000, loss_scale=1.014e+31, optim_step_time=0.136, optim0_lr0=9.942e-05, train_time=3.270 +[gpua007:0/64] 2024-02-02 22:33:11,062 (trainer:753) INFO: 17epoch:train:2901-3000batch: iter_time=1.006e-04, forward_time=0.141, loss_ctc=72.741, loss_interctc_layer6=82.068, loss_interctc_layer12=69.131, loss_interctc_layer15=63.831, loss_interctc_layer21=75.039, loss=72.562, backward_time=0.421, grad_norm=87.066, clip=100.000, loss_scale=1.014e+31, optim_step_time=0.136, optim0_lr0=9.940e-05, train_time=3.093 +[gpua007:0/64] 2024-02-02 22:38:20,197 (trainer:753) INFO: 17epoch:train:3001-3100batch: iter_time=1.003e-04, forward_time=0.142, loss_ctc=77.940, loss_interctc_layer6=86.745, loss_interctc_layer12=72.398, loss_interctc_layer15=66.532, loss_interctc_layer21=80.560, loss=76.835, backward_time=0.455, grad_norm=75.578, clip=100.000, loss_scale=1.095e+31, optim_step_time=0.136, optim0_lr0=9.938e-05, train_time=3.091 +[gpua007:0/64] 2024-02-02 22:40:51,147 (trainer:684) WARNING: The grad norm is nan. Skipping updating the model. +[gpua007:0/64] 2024-02-02 22:43:36,077 (trainer:753) INFO: 17epoch:train:3101-3200batch: iter_time=9.297e-05, forward_time=0.142, loss_ctc=100.194, loss_interctc_layer6=103.362, loss_interctc_layer12=86.306, loss_interctc_layer15=79.545, loss_interctc_layer21=102.901, loss=94.461, backward_time=0.449, grad_norm=84.570, clip=100.000, loss_scale=1.465e+31, optim_step_time=0.136, optim0_lr0=9.936e-05, train_time=3.159 +[gpua007:0/64] 2024-02-02 22:48:49,452 (trainer:753) INFO: 17epoch:train:3201-3300batch: iter_time=9.851e-05, forward_time=0.142, loss_ctc=73.521, loss_interctc_layer6=81.353, loss_interctc_layer12=68.010, loss_interctc_layer15=62.851, loss_interctc_layer21=75.537, loss=72.255, backward_time=0.466, grad_norm=64.580, clip=100.000, loss_scale=1.014e+31, optim_step_time=0.136, optim0_lr0=9.933e-05, train_time=3.134 +[gpua007:0/64] 2024-02-02 22:54:08,075 (trainer:753) INFO: 17epoch:train:3301-3400batch: iter_time=1.037e-04, forward_time=0.143, loss_ctc=95.095, loss_interctc_layer6=102.657, loss_interctc_layer12=85.458, loss_interctc_layer15=78.704, loss_interctc_layer21=97.814, loss=91.946, backward_time=0.473, grad_norm=71.672, clip=100.000, loss_scale=1.014e+31, optim_step_time=0.137, optim0_lr0=9.931e-05, train_time=3.186 +[gpua007:0/64] 2024-02-02 22:59:44,939 (trainer:753) INFO: 17epoch:train:3401-3500batch: iter_time=1.005e-04, forward_time=0.143, loss_ctc=83.450, loss_interctc_layer6=96.240, loss_interctc_layer12=80.974, loss_interctc_layer15=74.851, loss_interctc_layer21=85.846, loss=84.272, backward_time=0.453, grad_norm=65.468, clip=100.000, loss_scale=1.014e+31, optim_step_time=0.137, optim0_lr0=9.929e-05, train_time=3.368 +[gpua007:0/64] 2024-02-02 23:04:40,514 (trainer:753) INFO: 17epoch:train:3501-3600batch: iter_time=9.218e-05, forward_time=0.143, loss_ctc=96.113, loss_interctc_layer6=101.265, loss_interctc_layer12=85.312, loss_interctc_layer15=79.401, loss_interctc_layer21=98.126, loss=92.044, backward_time=0.436, grad_norm=103.159, clip=100.000, loss_scale=1.014e+31, optim_step_time=0.137, optim0_lr0=9.927e-05, train_time=2.956 +[gpua007:0/64] 2024-02-02 23:10:07,897 (trainer:753) INFO: 17epoch:train:3601-3700batch: iter_time=8.834e-05, forward_time=0.143, loss_ctc=70.715, loss_interctc_layer6=81.957, loss_interctc_layer12=68.258, loss_interctc_layer15=63.034, loss_interctc_layer21=72.475, loss=71.288, backward_time=0.483, grad_norm=72.413, clip=100.000, loss_scale=1.014e+31, optim_step_time=0.136, optim0_lr0=9.925e-05, train_time=3.274 +[gpua007:0/64] 2024-02-02 23:13:22,958 (multiple_iter_factory:32) INFO: Building 3th iter-factory... +[gpua007:0/64] 2024-02-02 23:13:41,621 (s2t:401) INFO: Optional Data Names: ('text_prev', 'text_ctc', 'text_spk2', 'text_spk3', 'text_spk4') +[gpua007:0/64] 2024-02-02 23:13:45,010 (abs_task:1660) INFO: [train] dataset: +ESPnetDataset( + speech: {"path": "exp/s2t_stats_raw_bpe50000/splits12/wav.scp/split.3", "type": "kaldi_ark"} + text_prev: {"path": "exp/s2t_stats_raw_bpe50000/splits12/text.prev/split.3", "type": "text"} + text_ctc: {"path": "exp/s2t_stats_raw_bpe50000/splits12/text.ctc/split.3", "type": "text"} + text: {"path": "exp/s2t_stats_raw_bpe50000/splits12/text/split.3", "type": "text"} + preprocess: ) +[gpua007:0/64] 2024-02-02 23:13:45,010 (abs_task:1661) INFO: [train] Batch sampler: UnsortedBatchSampler(N-batch=19027, batch_size=256, key_file=exp/s2t_stats_raw_bpe50000/splits12/speech_shape/split.3, +[gpua007:0/64] 2024-02-02 23:13:45,014 (abs_task:1662) INFO: [train] mini-batch sizes summary: N-batch=19027, mean=256.0, min=256, max=257 +[gpua007:0/64] 2024-02-02 23:24:37,979 (trainer:753) INFO: 17epoch:train:3701-3800batch: iter_time=2.678, forward_time=0.250, loss_ctc=80.109, loss_interctc_layer6=93.245, loss_interctc_layer12=77.793, loss_interctc_layer15=71.541, loss_interctc_layer21=82.475, loss=81.033, backward_time=0.642, grad_norm=72.399, clip=100.000, loss_scale=1.014e+31, optim_step_time=0.144, optim0_lr0=9.923e-05, train_time=8.701 +[gpua007:0/64] 2024-02-02 23:33:04,555 (trainer:753) INFO: 17epoch:train:3801-3900batch: iter_time=9.457e-05, forward_time=0.141, loss_ctc=64.281, loss_interctc_layer6=75.401, loss_interctc_layer12=62.724, loss_interctc_layer15=57.559, loss_interctc_layer21=66.277, loss=65.248, backward_time=0.786, grad_norm=56.135, clip=100.000, loss_scale=1.014e+31, optim_step_time=0.137, optim0_lr0=9.921e-05, train_time=5.066 +[gpua007:0/64] 2024-02-02 23:40:59,581 (trainer:753) INFO: 17epoch:train:3901-4000batch: iter_time=9.889e-05, forward_time=0.142, loss_ctc=60.819, loss_interctc_layer6=73.853, loss_interctc_layer12=61.268, loss_interctc_layer15=56.356, loss_interctc_layer21=62.585, loss=62.976, backward_time=0.940, grad_norm=57.024, clip=100.000, loss_scale=1.014e+31, optim_step_time=0.137, optim0_lr0=9.919e-05, train_time=4.750 +[gpua007:0/64] 2024-02-02 23:47:52,409 (trainer:753) INFO: 17epoch:train:4001-4100batch: iter_time=2.939e-04, forward_time=0.197, loss_ctc=83.557, loss_interctc_layer6=87.400, loss_interctc_layer12=73.234, loss_interctc_layer15=67.268, loss_interctc_layer21=85.831, loss=79.458, backward_time=0.699, grad_norm=68.337, clip=100.000, loss_scale=1.014e+31, optim_step_time=0.142, optim0_lr0=9.917e-05, train_time=4.127 +[gpua007:0/64] 2024-02-02 23:55:08,005 (trainer:753) INFO: 17epoch:train:4101-4200batch: iter_time=1.116e-04, forward_time=0.144, loss_ctc=83.130, loss_interctc_layer6=91.399, loss_interctc_layer12=76.134, loss_interctc_layer15=69.853, loss_interctc_layer21=85.396, loss=81.182, backward_time=0.692, grad_norm=63.994, clip=100.000, loss_scale=1.014e+31, optim_step_time=0.137, optim0_lr0=9.915e-05, train_time=4.356 +[gpua007:0/64] 2024-02-03 00:01:09,415 (trainer:753) INFO: 17epoch:train:4201-4300batch: iter_time=1.086e-04, forward_time=0.142, loss_ctc=84.464, loss_interctc_layer6=90.132, loss_interctc_layer12=76.356, loss_interctc_layer15=70.884, loss_interctc_layer21=87.183, loss=81.804, backward_time=0.556, grad_norm=74.783, clip=100.000, loss_scale=1.014e+31, optim_step_time=0.137, optim0_lr0=9.913e-05, train_time=3.614 +[gpua007:0/64] 2024-02-03 00:08:00,897 (trainer:753) INFO: 17epoch:train:4301-4400batch: iter_time=1.043e-04, forward_time=0.175, loss_ctc=88.429, loss_interctc_layer6=95.450, loss_interctc_layer12=79.425, loss_interctc_layer15=72.866, loss_interctc_layer21=91.092, loss=85.452, backward_time=0.743, grad_norm=76.441, clip=100.000, loss_scale=1.014e+31, optim_step_time=0.140, optim0_lr0=9.911e-05, train_time=4.114 +[gpua007:0/64] 2024-02-03 00:14:10,046 (trainer:753) INFO: 17epoch:train:4401-4500batch: iter_time=9.639e-05, forward_time=0.207, loss_ctc=80.282, loss_interctc_layer6=88.019, loss_interctc_layer12=73.570, loss_interctc_layer15=67.623, loss_interctc_layer21=82.710, loss=78.441, backward_time=0.743, grad_norm=63.959, clip=100.000, loss_scale=1.014e+31, optim_step_time=0.137, optim0_lr0=9.909e-05, train_time=3.691 +[gpua007:0/64] 2024-02-03 00:19:58,324 (trainer:753) INFO: 17epoch:train:4501-4600batch: iter_time=1.027e-04, forward_time=0.142, loss_ctc=77.541, loss_interctc_layer6=85.232, loss_interctc_layer12=71.161, loss_interctc_layer15=65.405, loss_interctc_layer21=79.839, loss=75.836, backward_time=0.564, grad_norm=73.374, clip=100.000, loss_scale=1.014e+31, optim_step_time=0.136, optim0_lr0=9.907e-05, train_time=3.483 +[gpua007:0/64] 2024-02-03 00:26:33,238 (trainer:753) INFO: 17epoch:train:4601-4700batch: iter_time=1.028e-04, forward_time=0.143, loss_ctc=100.119, loss_interctc_layer6=107.108, loss_interctc_layer12=89.795, loss_interctc_layer15=82.915, loss_interctc_layer21=102.851, loss=96.557, backward_time=0.693, grad_norm=73.366, clip=100.000, loss_scale=1.014e+31, optim_step_time=0.136, optim0_lr0=9.905e-05, train_time=3.949 +[gpua007:0/64] 2024-02-03 00:32:43,192 (trainer:753) INFO: 17epoch:train:4701-4800batch: iter_time=1.052e-04, forward_time=0.166, loss_ctc=83.392, loss_interctc_layer6=92.679, loss_interctc_layer12=77.603, loss_interctc_layer15=71.641, loss_interctc_layer21=85.735, loss=82.210, backward_time=0.632, grad_norm=70.252, clip=100.000, loss_scale=1.014e+31, optim_step_time=0.138, optim0_lr0=9.903e-05, train_time=3.699 +[gpua007:0/64] 2024-02-03 00:38:56,115 (trainer:753) INFO: 17epoch:train:4801-4900batch: iter_time=6.127e-04, forward_time=0.224, loss_ctc=88.934, loss_interctc_layer6=96.974, loss_interctc_layer12=82.390, loss_interctc_layer15=75.958, loss_interctc_layer21=91.791, loss=87.209, backward_time=0.575, grad_norm=83.828, clip=100.000, loss_scale=1.014e+31, optim_step_time=0.140, optim0_lr0=9.901e-05, train_time=3.728 +[gpua007:0/64] 2024-02-03 00:45:18,658 (trainer:753) INFO: 17epoch:train:4901-5000batch: iter_time=1.018e-04, forward_time=0.143, loss_ctc=75.361, loss_interctc_layer6=90.305, loss_interctc_layer12=75.199, loss_interctc_layer15=68.954, loss_interctc_layer21=77.488, loss=77.461, backward_time=0.717, grad_norm=69.787, clip=100.000, loss_scale=1.014e+31, optim_step_time=0.136, optim0_lr0=9.899e-05, train_time=3.826 +[gpua007:0/64] 2024-02-03 00:45:33,544 (multiple_iter_factory:32) INFO: Building 4th iter-factory... +[gpua007:0/64] 2024-02-03 00:45:51,884 (s2t:401) INFO: Optional Data Names: ('text_prev', 'text_ctc', 'text_spk2', 'text_spk3', 'text_spk4') +[gpua007:0/64] 2024-02-03 00:45:55,508 (abs_task:1660) INFO: [train] dataset: +ESPnetDataset( + speech: {"path": "exp/s2t_stats_raw_bpe50000/splits12/wav.scp/split.4", "type": "kaldi_ark"} + text_prev: {"path": "exp/s2t_stats_raw_bpe50000/splits12/text.prev/split.4", "type": "text"} + text_ctc: {"path": "exp/s2t_stats_raw_bpe50000/splits12/text.ctc/split.4", "type": "text"} + text: {"path": "exp/s2t_stats_raw_bpe50000/splits12/text/split.4", "type": "text"} + preprocess: ) +[gpua007:0/64] 2024-02-03 00:45:55,508 (abs_task:1661) INFO: [train] Batch sampler: UnsortedBatchSampler(N-batch=19027, batch_size=256, key_file=exp/s2t_stats_raw_bpe50000/splits12/speech_shape/split.4, +[gpua007:0/64] 2024-02-03 00:45:55,512 (abs_task:1662) INFO: [train] mini-batch sizes summary: N-batch=19027, mean=256.0, min=256, max=257 +[gpua007:0/64] 2024-02-03 00:58:50,681 (trainer:753) INFO: 17epoch:train:5001-5100batch: iter_time=3.286, forward_time=0.260, loss_ctc=73.148, loss_interctc_layer6=79.510, loss_interctc_layer12=66.509, loss_interctc_layer15=61.095, loss_interctc_layer21=75.704, loss=71.193, backward_time=0.409, grad_norm=57.649, clip=100.000, loss_scale=1.014e+31, optim_step_time=0.149, optim0_lr0=9.897e-05, train_time=8.119 +[gpua007:0/64] 2024-02-03 01:04:14,556 (trainer:753) INFO: 17epoch:train:5101-5200batch: iter_time=8.846e-05, forward_time=0.267, loss_ctc=68.512, loss_interctc_layer6=75.703, loss_interctc_layer12=62.717, loss_interctc_layer15=57.478, loss_interctc_layer21=70.570, loss=66.996, backward_time=0.432, grad_norm=69.120, clip=100.000, loss_scale=1.572e+31, optim_step_time=0.140, optim0_lr0=9.895e-05, train_time=3.239 +[gpua007:0/64] 2024-02-03 01:10:09,777 (trainer:753) INFO: 17epoch:train:5201-5300batch: iter_time=9.028e-05, forward_time=0.280, loss_ctc=74.228, loss_interctc_layer6=74.987, loss_interctc_layer12=62.542, loss_interctc_layer15=57.378, loss_interctc_layer21=76.916, loss=69.210, backward_time=0.524, grad_norm=61.702, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.143, optim0_lr0=9.893e-05, train_time=3.552 +[gpua007:0/64] 2024-02-03 01:15:18,185 (trainer:753) INFO: 17epoch:train:5301-5400batch: iter_time=1.014e-04, forward_time=0.249, loss_ctc=97.166, loss_interctc_layer6=98.290, loss_interctc_layer12=81.741, loss_interctc_layer15=75.026, loss_interctc_layer21=100.216, loss=90.488, backward_time=0.445, grad_norm=94.141, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.141, optim0_lr0=9.891e-05, train_time=3.083 +[gpua007:0/64] 2024-02-03 01:21:52,411 (trainer:753) INFO: 17epoch:train:5401-5500batch: iter_time=1.103e-04, forward_time=0.242, loss_ctc=74.867, loss_interctc_layer6=81.889, loss_interctc_layer12=68.694, loss_interctc_layer15=63.482, loss_interctc_layer21=77.267, loss=73.240, backward_time=0.721, grad_norm=67.802, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.147, optim0_lr0=9.889e-05, train_time=3.942 +[gpua007:0/64] 2024-02-03 01:27:30,121 (trainer:753) INFO: 17epoch:train:5501-5600batch: iter_time=7.000e-04, forward_time=0.260, loss_ctc=83.841, loss_interctc_layer6=86.824, loss_interctc_layer12=72.506, loss_interctc_layer15=66.388, loss_interctc_layer21=86.406, loss=79.193, backward_time=0.570, grad_norm=89.408, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.143, optim0_lr0=9.887e-05, train_time=3.375 +[gpua007:0/64] 2024-02-03 01:32:56,915 (trainer:753) INFO: 17epoch:train:5601-5700batch: iter_time=9.416e-05, forward_time=0.310, loss_ctc=103.561, loss_interctc_layer6=101.401, loss_interctc_layer12=85.071, loss_interctc_layer15=78.282, loss_interctc_layer21=106.899, loss=95.043, backward_time=0.489, grad_norm=98.924, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.150, optim0_lr0=9.885e-05, train_time=3.270 +[gpua007:0/64] 2024-02-03 01:39:18,164 (trainer:753) INFO: 17epoch:train:5701-5800batch: iter_time=2.541e-04, forward_time=0.300, loss_ctc=78.684, loss_interctc_layer6=80.682, loss_interctc_layer12=67.112, loss_interctc_layer15=61.824, loss_interctc_layer21=80.995, loss=73.859, backward_time=0.608, grad_norm=69.078, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.146, optim0_lr0=9.883e-05, train_time=3.812 +[gpua007:0/64] 2024-02-03 01:45:08,549 (trainer:753) INFO: 17epoch:train:5801-5900batch: iter_time=0.002, forward_time=0.280, loss_ctc=97.408, loss_interctc_layer6=101.289, loss_interctc_layer12=84.045, loss_interctc_layer15=77.236, loss_interctc_layer21=100.458, loss=92.087, backward_time=0.594, grad_norm=79.906, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.148, optim0_lr0=9.881e-05, train_time=3.502 +[gpua007:0/64] 2024-02-03 01:50:44,881 (trainer:753) INFO: 17epoch:train:5901-6000batch: iter_time=0.001, forward_time=0.352, loss_ctc=85.826, loss_interctc_layer6=95.453, loss_interctc_layer12=80.248, loss_interctc_layer15=74.216, loss_interctc_layer21=88.302, loss=84.809, backward_time=0.571, grad_norm=117.925, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.157, optim0_lr0=9.879e-05, train_time=3.365 +[gpua007:0/64] 2024-02-03 01:56:41,143 (trainer:753) INFO: 17epoch:train:6001-6100batch: iter_time=0.001, forward_time=0.357, loss_ctc=100.001, loss_interctc_layer6=101.033, loss_interctc_layer12=85.166, loss_interctc_layer15=78.857, loss_interctc_layer21=103.176, loss=93.647, backward_time=0.562, grad_norm=105.020, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.152, optim0_lr0=9.877e-05, train_time=3.562 +[gpua007:0/64] 2024-02-03 02:03:18,388 (trainer:753) INFO: 17epoch:train:6101-6200batch: iter_time=0.001, forward_time=0.325, loss_ctc=73.498, loss_interctc_layer6=81.466, loss_interctc_layer12=68.054, loss_interctc_layer15=62.542, loss_interctc_layer21=75.786, loss=72.269, backward_time=0.629, grad_norm=59.377, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.150, optim0_lr0=9.875e-05, train_time=3.974 +[gpua007:0/64] 2024-02-03 02:06:06,375 (multiple_iter_factory:32) INFO: Building 5th iter-factory... +[gpua007:0/64] 2024-02-03 02:06:25,478 (s2t:401) INFO: Optional Data Names: ('text_prev', 'text_ctc', 'text_spk2', 'text_spk3', 'text_spk4') +[gpua007:0/64] 2024-02-03 02:06:29,009 (abs_task:1660) INFO: [train] dataset: +ESPnetDataset( + speech: {"path": "exp/s2t_stats_raw_bpe50000/splits12/wav.scp/split.11", "type": "kaldi_ark"} + text_prev: {"path": "exp/s2t_stats_raw_bpe50000/splits12/text.prev/split.11", "type": "text"} + text_ctc: {"path": "exp/s2t_stats_raw_bpe50000/splits12/text.ctc/split.11", "type": "text"} + text: {"path": "exp/s2t_stats_raw_bpe50000/splits12/text/split.11", "type": "text"} + preprocess: ) +[gpua007:0/64] 2024-02-03 02:06:29,009 (abs_task:1661) INFO: [train] Batch sampler: UnsortedBatchSampler(N-batch=19027, batch_size=256, key_file=exp/s2t_stats_raw_bpe50000/splits12/speech_shape/split.11, +[gpua007:0/64] 2024-02-03 02:06:29,012 (abs_task:1662) INFO: [train] mini-batch sizes summary: N-batch=19027, mean=256.0, min=256, max=257 +[gpua007:0/64] 2024-02-03 02:15:56,067 (trainer:753) INFO: 17epoch:train:6201-6300batch: iter_time=2.687, forward_time=0.259, loss_ctc=82.435, loss_interctc_layer6=92.691, loss_interctc_layer12=77.345, loss_interctc_layer15=70.986, loss_interctc_layer21=84.983, loss=81.688, backward_time=0.429, grad_norm=76.465, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.143, optim0_lr0=9.873e-05, train_time=7.576 +[gpua007:0/64] 2024-02-03 02:20:50,126 (trainer:753) INFO: 17epoch:train:6301-6400batch: iter_time=9.269e-05, forward_time=0.141, loss_ctc=64.499, loss_interctc_layer6=75.227, loss_interctc_layer12=62.456, loss_interctc_layer15=57.384, loss_interctc_layer21=66.575, loss=65.228, backward_time=0.420, grad_norm=102.994, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.136, optim0_lr0=9.871e-05, train_time=2.941 +[gpua007:0/64] 2024-02-03 02:26:05,318 (trainer:753) INFO: 17epoch:train:6401-6500batch: iter_time=9.273e-05, forward_time=0.141, loss_ctc=61.241, loss_interctc_layer6=74.214, loss_interctc_layer12=61.787, loss_interctc_layer15=56.755, loss_interctc_layer21=62.999, loss=63.399, backward_time=0.458, grad_norm=60.654, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.136, optim0_lr0=9.869e-05, train_time=3.152 +[gpua007:0/64] 2024-02-03 02:31:43,127 (trainer:753) INFO: 17epoch:train:6501-6600batch: iter_time=9.414e-05, forward_time=0.224, loss_ctc=82.316, loss_interctc_layer6=86.530, loss_interctc_layer12=72.179, loss_interctc_layer15=66.441, loss_interctc_layer21=85.176, loss=78.528, backward_time=0.511, grad_norm=128.207, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.152, optim0_lr0=9.867e-05, train_time=3.377 +[gpua007:0/64] 2024-02-03 02:37:04,393 (trainer:753) INFO: 17epoch:train:6601-6700batch: iter_time=1.038e-04, forward_time=0.142, loss_ctc=82.515, loss_interctc_layer6=91.074, loss_interctc_layer12=75.631, loss_interctc_layer15=69.466, loss_interctc_layer21=85.494, loss=80.836, backward_time=0.438, grad_norm=79.319, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.136, optim0_lr0=9.865e-05, train_time=3.213 +[gpua007:0/64] 2024-02-03 02:42:26,919 (trainer:753) INFO: 17epoch:train:6701-6800batch: iter_time=1.044e-04, forward_time=0.198, loss_ctc=84.625, loss_interctc_layer6=90.125, loss_interctc_layer12=76.446, loss_interctc_layer15=70.196, loss_interctc_layer21=87.717, loss=81.822, backward_time=0.486, grad_norm=82.974, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.145, optim0_lr0=9.863e-05, train_time=3.225 +[gpua007:0/64] 2024-02-03 02:48:04,947 (trainer:753) INFO: 17epoch:train:6801-6900batch: iter_time=9.794e-05, forward_time=0.142, loss_ctc=87.261, loss_interctc_layer6=94.358, loss_interctc_layer12=78.294, loss_interctc_layer15=71.692, loss_interctc_layer21=89.864, loss=84.294, backward_time=0.493, grad_norm=74.654, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.137, optim0_lr0=9.861e-05, train_time=3.379 +[gpua007:0/64] 2024-02-03 02:54:03,529 (trainer:753) INFO: 17epoch:train:6901-7000batch: iter_time=9.365e-05, forward_time=0.225, loss_ctc=80.339, loss_interctc_layer6=87.736, loss_interctc_layer12=73.322, loss_interctc_layer15=67.503, loss_interctc_layer21=82.748, loss=78.329, backward_time=0.576, grad_norm=65.180, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.141, optim0_lr0=9.859e-05, train_time=3.586 +[gpua007:0/64] 2024-02-03 02:59:07,239 (trainer:753) INFO: 17epoch:train:7001-7100batch: iter_time=9.015e-05, forward_time=0.141, loss_ctc=77.293, loss_interctc_layer6=84.687, loss_interctc_layer12=70.418, loss_interctc_layer15=64.735, loss_interctc_layer21=79.680, loss=75.363, backward_time=0.456, grad_norm=61.704, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.136, optim0_lr0=9.857e-05, train_time=3.037 +[gpua007:0/64] 2024-02-03 03:04:28,670 (trainer:753) INFO: 17epoch:train:7101-7200batch: iter_time=9.734e-05, forward_time=0.190, loss_ctc=99.994, loss_interctc_layer6=106.616, loss_interctc_layer12=89.654, loss_interctc_layer15=82.958, loss_interctc_layer21=102.740, loss=96.392, backward_time=0.556, grad_norm=102.992, clip=100.000, loss_scale=3.144e+31, optim_step_time=0.143, optim0_lr0=9.855e-05, train_time=3.214 +[gpua007:0/64] 2024-02-03 03:10:29,497 (trainer:753) INFO: 17epoch:train:7201-7300batch: iter_time=9.771e-05, forward_time=0.224, loss_ctc=83.217, loss_interctc_layer6=92.478, loss_interctc_layer12=77.735, loss_interctc_layer15=71.455, loss_interctc_layer21=85.506, loss=82.078, backward_time=0.510, grad_norm=64.519, clip=100.000, loss_scale=4.056e+31, optim_step_time=0.138, optim0_lr0=9.853e-05, train_time=3.609 +[gpua007:0/64] 2024-02-03 03:16:11,060 (trainer:753) INFO: 17epoch:train:7301-7400batch: iter_time=9.787e-05, forward_time=0.172, loss_ctc=87.748, loss_interctc_layer6=95.365, loss_interctc_layer12=80.926, loss_interctc_layer15=75.380, loss_interctc_layer21=90.127, loss=85.909, backward_time=0.469, grad_norm=75.371, clip=100.000, loss_scale=4.056e+31, optim_step_time=0.138, optim0_lr0=9.851e-05, train_time=3.415 +[gpua007:0/64] 2024-02-03 03:21:53,710 (trainer:753) INFO: 17epoch:train:7401-7500batch: iter_time=4.857e-04, forward_time=0.201, loss_ctc=74.684, loss_interctc_layer6=89.880, loss_interctc_layer12=74.857, loss_interctc_layer15=68.767, loss_interctc_layer21=76.938, loss=77.025, backward_time=0.536, grad_norm=85.148, clip=100.000, loss_scale=4.056e+31, optim_step_time=0.136, optim0_lr0=9.849e-05, train_time=3.425 +[gpua007:0/64] 2024-02-03 03:22:07,065 (multiple_iter_factory:32) INFO: Building 6th iter-factory... +[gpua007:0/64] 2024-02-03 03:22:25,594 (s2t:401) INFO: Optional Data Names: ('text_prev', 'text_ctc', 'text_spk2', 'text_spk3', 'text_spk4') +[gpua007:0/64] 2024-02-03 03:22:29,045 (abs_task:1660) INFO: [train] dataset: +ESPnetDataset( + speech: {"path": "exp/s2t_stats_raw_bpe50000/splits12/wav.scp/split.0", "type": "kaldi_ark"} + text_prev: {"path": "exp/s2t_stats_raw_bpe50000/splits12/text.prev/split.0", "type": "text"} + text_ctc: {"path": "exp/s2t_stats_raw_bpe50000/splits12/text.ctc/split.0", "type": "text"} + text: {"path": "exp/s2t_stats_raw_bpe50000/splits12/text/split.0", "type": "text"} + preprocess: ) +[gpua007:0/64] 2024-02-03 03:22:29,046 (abs_task:1661) INFO: [train] Batch sampler: UnsortedBatchSampler(N-batch=19027, batch_size=256, key_file=exp/s2t_stats_raw_bpe50000/splits12/speech_shape/split.0, +[gpua007:0/64] 2024-02-03 03:22:29,049 (abs_task:1662) INFO: [train] mini-batch sizes summary: N-batch=19027, mean=256.0, min=256, max=257 +[gpua007:0/64] 2024-02-03 03:37:37,465 (trainer:753) INFO: 17epoch:train:7501-7600batch: iter_time=2.787, forward_time=0.142, loss_ctc=72.565, loss_interctc_layer6=79.384, loss_interctc_layer12=66.289, loss_interctc_layer15=61.003, loss_interctc_layer21=74.829, loss=70.814, backward_time=0.778, grad_norm=61.714, clip=100.000, loss_scale=4.056e+31, optim_step_time=0.137, optim0_lr0=9.847e-05, train_time=9.439 +[gpua007:0/64] 2024-02-03 03:46:58,224 (trainer:753) INFO: 17epoch:train:7601-7700batch: iter_time=8.960e-05, forward_time=0.142, loss_ctc=67.489, loss_interctc_layer6=75.073, loss_interctc_layer12=62.017, loss_interctc_layer15=56.740, loss_interctc_layer21=69.688, loss=66.201, backward_time=1.118, grad_norm=60.603, clip=100.000, loss_scale=4.056e+31, optim_step_time=0.137, optim0_lr0=9.845e-05, train_time=5.607 +[gpua007:0/64] 2024-02-03 03:54:51,932 (trainer:753) INFO: 17epoch:train:7701-7800batch: iter_time=9.925e-05, forward_time=0.212, loss_ctc=73.456, loss_interctc_layer6=75.126, loss_interctc_layer12=62.500, loss_interctc_layer15=57.352, loss_interctc_layer21=75.985, loss=68.884, backward_time=0.883, grad_norm=65.698, clip=100.000, loss_scale=4.056e+31, optim_step_time=0.142, optim0_lr0=9.843e-05, train_time=4.736 +[gpua007:0/64] 2024-02-03 04:01:42,739 (trainer:753) INFO: 17epoch:train:7801-7900batch: iter_time=5.935e-04, forward_time=0.194, loss_ctc=97.534, loss_interctc_layer6=98.455, loss_interctc_layer12=82.256, loss_interctc_layer15=75.738, loss_interctc_layer21=100.505, loss=90.898, backward_time=0.692, grad_norm=70.428, clip=100.000, loss_scale=4.056e+31, optim_step_time=0.137, optim0_lr0=9.841e-05, train_time=4.109 +[gpua007:0/64] 2024-02-03 04:07:53,624 (trainer:753) INFO: 17epoch:train:7901-8000batch: iter_time=1.010e-04, forward_time=0.142, loss_ctc=74.108, loss_interctc_layer6=81.060, loss_interctc_layer12=68.195, loss_interctc_layer15=62.834, loss_interctc_layer21=76.253, loss=72.490, backward_time=0.543, grad_norm=67.248, clip=100.000, loss_scale=4.056e+31, optim_step_time=0.136, optim0_lr0=9.839e-05, train_time=3.708 +[gpua007:0/64] 2024-02-03 04:13:54,369 (trainer:753) INFO: 17epoch:train:8001-8100batch: iter_time=9.537e-05, forward_time=0.142, loss_ctc=82.702, loss_interctc_layer6=85.625, loss_interctc_layer12=71.290, loss_interctc_layer15=65.572, loss_interctc_layer21=85.224, loss=78.083, backward_time=0.547, grad_norm=77.059, clip=100.000, loss_scale=4.056e+31, optim_step_time=0.136, optim0_lr0=9.837e-05, train_time=3.608 +[gpua007:0/64] 2024-02-03 04:19:56,760 (trainer:753) INFO: 17epoch:train:8101-8200batch: iter_time=9.230e-05, forward_time=0.142, loss_ctc=103.139, loss_interctc_layer6=100.871, loss_interctc_layer12=84.012, loss_interctc_layer15=77.326, loss_interctc_layer21=105.550, loss=94.180, backward_time=0.586, grad_norm=79.420, clip=100.000, loss_scale=4.056e+31, optim_step_time=0.135, optim0_lr0=9.835e-05, train_time=3.624 +[gpua007:0/64] 2024-02-03 04:25:54,597 (trainer:753) INFO: 17epoch:train:8201-8300batch: iter_time=4.284e-04, forward_time=0.216, loss_ctc=77.672, loss_interctc_layer6=80.191, loss_interctc_layer12=66.939, loss_interctc_layer15=61.524, loss_interctc_layer21=80.116, loss=73.288, backward_time=0.562, grad_norm=68.098, clip=100.000, loss_scale=4.056e+31, optim_step_time=0.140, optim0_lr0=9.833e-05, train_time=3.577 +[gpua007:0/64] 2024-02-03 04:31:57,301 (trainer:753) INFO: 17epoch:train:8301-8400batch: iter_time=9.859e-05, forward_time=0.160, loss_ctc=98.142, loss_interctc_layer6=102.132, loss_interctc_layer12=84.947, loss_interctc_layer15=77.901, loss_interctc_layer21=101.269, loss=92.878, backward_time=0.544, grad_norm=87.422, clip=100.000, loss_scale=4.056e+31, optim_step_time=0.137, optim0_lr0=9.831e-05, train_time=3.627 +[gpua007:0/64] 2024-02-03 04:38:06,123 (trainer:753) INFO: 17epoch:train:8401-8500batch: iter_time=9.247e-05, forward_time=0.143, loss_ctc=86.721, loss_interctc_layer6=95.711, loss_interctc_layer12=80.522, loss_interctc_layer15=74.419, loss_interctc_layer21=89.457, loss=85.366, backward_time=0.558, grad_norm=74.325, clip=100.000, loss_scale=4.056e+31, optim_step_time=0.136, optim0_lr0=9.829e-05, train_time=3.688 +[gpua007:0/64] 2024-02-03 04:44:03,459 (trainer:753) INFO: 17epoch:train:8501-8600batch: iter_time=1.046e-04, forward_time=0.143, loss_ctc=101.014, loss_interctc_layer6=101.640, loss_interctc_layer12=85.352, loss_interctc_layer15=78.875, loss_interctc_layer21=103.620, loss=94.100, backward_time=0.600, grad_norm=112.513, clip=100.000, loss_scale=4.056e+31, optim_step_time=0.137, optim0_lr0=9.827e-05, train_time=3.572 +[gpua007:0/64] 2024-02-03 04:49:45,886 (trainer:753) INFO: 17epoch:train:8601-8700batch: iter_time=1.072e-04, forward_time=0.142, loss_ctc=73.579, loss_interctc_layer6=81.137, loss_interctc_layer12=67.713, loss_interctc_layer15=62.196, loss_interctc_layer21=75.524, loss=72.030, backward_time=0.580, grad_norm=63.995, clip=100.000, loss_scale=4.056e+31, optim_step_time=0.137, optim0_lr0=9.825e-05, train_time=3.425 +[gpua007:0/64] 2024-02-03 04:52:45,718 (multiple_iter_factory:32) INFO: Building 7th iter-factory... +[gpua007:0/64] 2024-02-03 04:53:04,042 (s2t:401) INFO: Optional Data Names: ('text_prev', 'text_ctc', 'text_spk2', 'text_spk3', 'text_spk4') +[gpua007:0/64] 2024-02-03 04:53:07,414 (abs_task:1660) INFO: [train] dataset: +ESPnetDataset( + speech: {"path": "exp/s2t_stats_raw_bpe50000/splits12/wav.scp/split.10", "type": "kaldi_ark"} + text_prev: {"path": "exp/s2t_stats_raw_bpe50000/splits12/text.prev/split.10", "type": "text"} + text_ctc: {"path": "exp/s2t_stats_raw_bpe50000/splits12/text.ctc/split.10", "type": "text"} + text: {"path": "exp/s2t_stats_raw_bpe50000/splits12/text/split.10", "type": "text"} + preprocess: ) +[gpua007:0/64] 2024-02-03 04:53:07,414 (abs_task:1661) INFO: [train] Batch sampler: UnsortedBatchSampler(N-batch=19027, batch_size=256, key_file=exp/s2t_stats_raw_bpe50000/splits12/speech_shape/split.10, +[gpua007:0/64] 2024-02-03 04:53:07,417 (abs_task:1662) INFO: [train] mini-batch sizes summary: N-batch=19027, mean=256.0, min=256, max=257 +[gpua007:0/64] 2024-02-03 05:01:24,211 (trainer:753) INFO: 17epoch:train:8701-8800batch: iter_time=2.368, forward_time=0.153, loss_ctc=83.554, loss_interctc_layer6=93.169, loss_interctc_layer12=77.692, loss_interctc_layer15=71.402, loss_interctc_layer21=86.031, loss=82.370, backward_time=0.466, grad_norm=62.989, clip=100.000, loss_scale=4.056e+31, optim_step_time=0.138, optim0_lr0=9.823e-05, train_time=6.983 +[gpua007:0/64] 2024-02-03 05:06:40,045 (trainer:753) INFO: 17epoch:train:8801-8900batch: iter_time=9.179e-05, forward_time=0.144, loss_ctc=68.194, loss_interctc_layer6=74.576, loss_interctc_layer12=61.794, loss_interctc_layer15=56.550, loss_interctc_layer21=70.317, loss=66.286, backward_time=0.418, grad_norm=62.322, clip=100.000, loss_scale=4.056e+31, optim_step_time=0.137, optim0_lr0=9.821e-05, train_time=3.158 +[gpua007:0/64] 2024-02-03 05:12:07,812 (trainer:753) INFO: 17epoch:train:8901-9000batch: iter_time=9.074e-05, forward_time=0.187, loss_ctc=64.011, loss_interctc_layer6=74.659, loss_interctc_layer12=62.097, loss_interctc_layer15=57.015, loss_interctc_layer21=65.819, loss=64.720, backward_time=0.501, grad_norm=59.765, clip=100.000, loss_scale=4.056e+31, optim_step_time=0.145, optim0_lr0=9.819e-05, train_time=3.277 +[gpua007:0/64] 2024-02-03 05:17:35,953 (trainer:753) INFO: 17epoch:train:9001-9100batch: iter_time=9.074e-05, forward_time=0.160, loss_ctc=89.945, loss_interctc_layer6=86.453, loss_interctc_layer12=72.269, loss_interctc_layer15=66.415, loss_interctc_layer21=93.133, loss=81.643, backward_time=0.469, grad_norm=90.802, clip=100.000, loss_scale=4.056e+31, optim_step_time=0.137, optim0_lr0=9.817e-05, train_time=3.281 +[gpua007:0/64] 2024-02-03 05:22:42,457 (trainer:753) INFO: 17epoch:train:9101-9200batch: iter_time=8.879e-05, forward_time=0.144, loss_ctc=85.078, loss_interctc_layer6=90.614, loss_interctc_layer12=75.445, loss_interctc_layer15=69.161, loss_interctc_layer21=87.460, loss=81.552, backward_time=0.420, grad_norm=76.041, clip=100.000, loss_scale=6.288e+31, optim_step_time=0.136, optim0_lr0=9.815e-05, train_time=3.065 +[gpua007:0/64] 2024-02-03 05:28:35,098 (trainer:753) INFO: 17epoch:train:9201-9300batch: iter_time=8.950e-05, forward_time=0.141, loss_ctc=91.138, loss_interctc_layer6=89.203, loss_interctc_layer12=75.053, loss_interctc_layer15=69.130, loss_interctc_layer21=94.095, loss=83.724, backward_time=0.529, grad_norm=96.027, clip=100.000, loss_scale=8.113e+31, optim_step_time=0.136, optim0_lr0=9.813e-05, train_time=3.526 +[gpua007:0/64] 2024-02-03 05:33:49,776 (trainer:753) INFO: 17epoch:train:9301-9400batch: iter_time=9.114e-05, forward_time=0.142, loss_ctc=86.787, loss_interctc_layer6=94.504, loss_interctc_layer12=78.205, loss_interctc_layer15=71.865, loss_interctc_layer21=88.872, loss=84.047, backward_time=0.429, grad_norm=83.350, clip=100.000, loss_scale=8.113e+31, optim_step_time=0.136, optim0_lr0=9.811e-05, train_time=3.145 +[gpua007:0/64] 2024-02-03 05:39:18,308 (trainer:753) INFO: 17epoch:train:9401-9500batch: iter_time=9.265e-05, forward_time=0.147, loss_ctc=85.141, loss_interctc_layer6=86.510, loss_interctc_layer12=72.056, loss_interctc_layer15=66.223, loss_interctc_layer21=87.746, loss=79.535, backward_time=0.515, grad_norm=78.791, clip=100.000, loss_scale=8.113e+31, optim_step_time=0.138, optim0_lr0=9.809e-05, train_time=3.286 +[gpua007:0/64] 2024-02-03 05:44:48,477 (trainer:753) INFO: 17epoch:train:9501-9600batch: iter_time=8.882e-05, forward_time=0.152, loss_ctc=80.102, loss_interctc_layer6=84.940, loss_interctc_layer12=70.609, loss_interctc_layer15=64.834, loss_interctc_layer21=82.563, loss=76.610, backward_time=0.487, grad_norm=85.150, clip=100.000, loss_scale=8.113e+31, optim_step_time=0.137, optim0_lr0=9.807e-05, train_time=3.301 +[gpua007:0/64] 2024-02-03 05:50:34,218 (trainer:753) INFO: 17epoch:train:9601-9700batch: iter_time=9.359e-05, forward_time=0.176, loss_ctc=103.990, loss_interctc_layer6=107.010, loss_interctc_layer12=89.801, loss_interctc_layer15=82.961, loss_interctc_layer21=107.132, loss=98.179, backward_time=0.519, grad_norm=71.748, clip=100.000, loss_scale=8.113e+31, optim_step_time=0.138, optim0_lr0=9.805e-05, train_time=3.457 +[gpua007:0/64] 2024-02-03 05:55:48,575 (trainer:753) INFO: 17epoch:train:9701-9800batch: iter_time=3.431e-04, forward_time=0.177, loss_ctc=86.669, loss_interctc_layer6=92.515, loss_interctc_layer12=77.343, loss_interctc_layer15=71.640, loss_interctc_layer21=89.404, loss=83.514, backward_time=0.445, grad_norm=69.417, clip=100.000, loss_scale=8.113e+31, optim_step_time=0.139, optim0_lr0=9.803e-05, train_time=3.143 +[gpua007:0/64] 2024-02-03 06:01:27,986 (trainer:753) INFO: 17epoch:train:9801-9900batch: iter_time=9.291e-05, forward_time=0.161, loss_ctc=93.501, loss_interctc_layer6=95.049, loss_interctc_layer12=79.929, loss_interctc_layer15=74.046, loss_interctc_layer21=95.798, loss=87.665, backward_time=0.493, grad_norm=89.018, clip=100.000, loss_scale=8.113e+31, optim_step_time=0.136, optim0_lr0=9.801e-05, train_time=3.393 +[gpua007:0/64] 2024-02-03 06:07:01,229 (trainer:753) INFO: 17epoch:train:9901-10000batch: iter_time=8.605e-05, forward_time=0.142, loss_ctc=77.983, loss_interctc_layer6=90.112, loss_interctc_layer12=74.868, loss_interctc_layer15=68.662, loss_interctc_layer21=80.425, loss=78.410, backward_time=0.481, grad_norm=70.439, clip=100.000, loss_scale=8.113e+31, optim_step_time=0.136, optim0_lr0=9.799e-05, train_time=3.333 +[gpua007:0/64] 2024-02-03 06:07:21,229 (multiple_iter_factory:32) INFO: Building 8th iter-factory... +[gpua007:0/64] 2024-02-03 06:07:39,729 (s2t:401) INFO: Optional Data Names: ('text_prev', 'text_ctc', 'text_spk2', 'text_spk3', 'text_spk4') +[gpua007:0/64] 2024-02-03 06:07:43,168 (abs_task:1660) INFO: [train] dataset: +ESPnetDataset( + speech: {"path": "exp/s2t_stats_raw_bpe50000/splits12/wav.scp/split.8", "type": "kaldi_ark"} + text_prev: {"path": "exp/s2t_stats_raw_bpe50000/splits12/text.prev/split.8", "type": "text"} + text_ctc: {"path": "exp/s2t_stats_raw_bpe50000/splits12/text.ctc/split.8", "type": "text"} + text: {"path": "exp/s2t_stats_raw_bpe50000/splits12/text/split.8", "type": "text"} + preprocess: ) +[gpua007:0/64] 2024-02-03 06:07:43,168 (abs_task:1661) INFO: [train] Batch sampler: UnsortedBatchSampler(N-batch=19027, batch_size=256, key_file=exp/s2t_stats_raw_bpe50000/splits12/speech_shape/split.8, +[gpua007:0/64] 2024-02-03 06:07:43,171 (abs_task:1662) INFO: [train] mini-batch sizes summary: N-batch=19027, mean=256.0, min=256, max=257 +[gpua007:0/64] 2024-02-03 06:21:24,701 (trainer:753) INFO: 17epoch:train:10001-10100batch: iter_time=2.700, forward_time=0.142, loss_ctc=72.150, loss_interctc_layer6=79.828, loss_interctc_layer12=66.588, loss_interctc_layer15=61.258, loss_interctc_layer21=74.339, loss=70.833, backward_time=0.436, grad_norm=58.390, clip=100.000, loss_scale=8.113e+31, optim_step_time=0.137, optim0_lr0=9.797e-05, train_time=8.634 +[gpua007:0/64] 2024-02-03 06:27:11,525 (trainer:753) INFO: 17epoch:train:10101-10200batch: iter_time=9.194e-05, forward_time=0.142, loss_ctc=66.540, loss_interctc_layer6=74.313, loss_interctc_layer12=61.384, loss_interctc_layer15=56.112, loss_interctc_layer21=68.501, loss=65.370, backward_time=0.501, grad_norm=58.612, clip=100.000, loss_scale=8.113e+31, optim_step_time=0.137, optim0_lr0=9.796e-05, train_time=3.468 +[gpua007:0/64] 2024-02-03 06:28:13,910 (trainer:684) WARNING: The grad norm is nan. Skipping updating the model. +[gpua007:0/64] 2024-02-03 06:32:38,464 (trainer:753) INFO: 17epoch:train:10201-10300batch: iter_time=8.827e-05, forward_time=0.141, loss_ctc=72.722, loss_interctc_layer6=74.818, loss_interctc_layer12=62.252, loss_interctc_layer15=57.208, loss_interctc_layer21=75.103, loss=68.421, backward_time=0.532, grad_norm=75.348, clip=100.000, loss_scale=4.876e+31, optim_step_time=0.137, optim0_lr0=9.794e-05, train_time=3.269 +[gpua007:0/64] 2024-02-03 06:38:22,600 (trainer:753) INFO: 17epoch:train:10301-10400batch: iter_time=9.591e-05, forward_time=0.145, loss_ctc=96.809, loss_interctc_layer6=98.328, loss_interctc_layer12=81.960, loss_interctc_layer15=75.287, loss_interctc_layer21=99.940, loss=90.465, backward_time=0.484, grad_norm=85.941, clip=100.000, loss_scale=4.056e+31, optim_step_time=0.138, optim0_lr0=9.792e-05, train_time=3.441 +[gpua007:0/64] 2024-02-03 06:43:46,131 (trainer:753) INFO: 17epoch:train:10401-10500batch: iter_time=9.059e-05, forward_time=0.151, loss_ctc=73.554, loss_interctc_layer6=80.939, loss_interctc_layer12=67.948, loss_interctc_layer15=62.539, loss_interctc_layer21=75.755, loss=72.147, backward_time=0.523, grad_norm=112.698, clip=100.000, loss_scale=4.056e+31, optim_step_time=0.140, optim0_lr0=9.790e-05, train_time=3.235 +[gpua007:0/64] 2024-02-03 06:49:34,806 (trainer:753) INFO: 17epoch:train:10501-10600batch: iter_time=9.515e-05, forward_time=0.155, loss_ctc=83.176, loss_interctc_layer6=86.320, loss_interctc_layer12=72.120, loss_interctc_layer15=66.265, loss_interctc_layer21=85.599, loss=78.696, backward_time=0.548, grad_norm=72.750, clip=100.000, loss_scale=4.056e+31, optim_step_time=0.138, optim0_lr0=9.788e-05, train_time=3.487 +[gpua007:0/64] 2024-02-03 06:54:43,015 (trainer:753) INFO: 17epoch:train:10601-10700batch: iter_time=9.218e-05, forward_time=0.196, loss_ctc=102.166, loss_interctc_layer6=99.815, loss_interctc_layer12=83.184, loss_interctc_layer15=76.469, loss_interctc_layer21=105.139, loss=93.354, backward_time=0.432, grad_norm=73.730, clip=100.000, loss_scale=4.056e+31, optim_step_time=0.143, optim0_lr0=9.786e-05, train_time=3.081 +[gpua007:0/64] 2024-02-03 07:00:43,710 (trainer:753) INFO: 17epoch:train:10701-10800batch: iter_time=9.784e-05, forward_time=0.146, loss_ctc=77.316, loss_interctc_layer6=79.759, loss_interctc_layer12=66.450, loss_interctc_layer15=61.256, loss_interctc_layer21=79.762, loss=72.909, backward_time=0.569, grad_norm=61.801, clip=100.000, loss_scale=4.056e+31, optim_step_time=0.137, optim0_lr0=9.784e-05, train_time=3.608 +[gpua007:0/64] 2024-02-03 07:06:45,881 (trainer:753) INFO: 17epoch:train:10801-10900batch: iter_time=9.647e-05, forward_time=0.173, loss_ctc=97.086, loss_interctc_layer6=100.801, loss_interctc_layer12=83.697, loss_interctc_layer15=76.979, loss_interctc_layer21=100.096, loss=91.732, backward_time=0.556, grad_norm=71.811, clip=100.000, loss_scale=4.056e+31, optim_step_time=0.138, optim0_lr0=9.782e-05, train_time=3.621 +[gpua007:0/64] 2024-02-03 07:12:15,838 (trainer:753) INFO: 17epoch:train:10901-11000batch: iter_time=9.128e-05, forward_time=0.143, loss_ctc=85.552, loss_interctc_layer6=95.131, loss_interctc_layer12=80.212, loss_interctc_layer15=73.849, loss_interctc_layer21=87.570, loss=84.463, backward_time=0.465, grad_norm=79.147, clip=100.000, loss_scale=4.056e+31, optim_step_time=0.137, optim0_lr0=9.780e-05, train_time=3.300 +[gpua007:0/64] 2024-02-03 07:17:47,679 (trainer:753) INFO: 17epoch:train:11001-11100batch: iter_time=9.088e-05, forward_time=0.142, loss_ctc=100.136, loss_interctc_layer6=101.288, loss_interctc_layer12=85.535, loss_interctc_layer15=79.736, loss_interctc_layer21=103.096, loss=93.958, backward_time=0.503, grad_norm=72.992, clip=100.000, loss_scale=4.056e+31, optim_step_time=0.137, optim0_lr0=9.778e-05, train_time=3.318 +[gpua007:0/64] 2024-02-03 07:22:59,498 (trainer:753) INFO: 17epoch:train:11101-11200batch: iter_time=1.013e-04, forward_time=0.141, loss_ctc=72.990, loss_interctc_layer6=81.315, loss_interctc_layer12=67.830, loss_interctc_layer15=62.287, loss_interctc_layer21=75.260, loss=71.936, backward_time=0.431, grad_norm=93.795, clip=100.000, loss_scale=4.056e+31, optim_step_time=0.137, optim0_lr0=9.776e-05, train_time=3.118 +[gpua007:0/64] 2024-02-03 07:26:06,039 (multiple_iter_factory:32) INFO: Building 9th iter-factory... +[gpua007:0/64] 2024-02-03 07:26:24,882 (s2t:401) INFO: Optional Data Names: ('text_prev', 'text_ctc', 'text_spk2', 'text_spk3', 'text_spk4') +[gpua007:0/64] 2024-02-03 07:26:28,688 (abs_task:1660) INFO: [train] dataset: +ESPnetDataset( + speech: {"path": "exp/s2t_stats_raw_bpe50000/splits12/wav.scp/split.2", "type": "kaldi_ark"} + text_prev: {"path": "exp/s2t_stats_raw_bpe50000/splits12/text.prev/split.2", "type": "text"} + text_ctc: {"path": "exp/s2t_stats_raw_bpe50000/splits12/text.ctc/split.2", "type": "text"} + text: {"path": "exp/s2t_stats_raw_bpe50000/splits12/text/split.2", "type": "text"} + preprocess: ) +[gpua007:0/64] 2024-02-03 07:26:28,688 (abs_task:1661) INFO: [train] Batch sampler: UnsortedBatchSampler(N-batch=19027, batch_size=256, key_file=exp/s2t_stats_raw_bpe50000/splits12/speech_shape/split.2, +[gpua007:0/64] 2024-02-03 07:26:28,691 (abs_task:1662) INFO: [train] mini-batch sizes summary: N-batch=19027, mean=256.0, min=256, max=257 +[gpua007:0/64] 2024-02-03 07:33:31,464 (trainer:753) INFO: 17epoch:train:11201-11300batch: iter_time=3.164, forward_time=0.167, loss_ctc=82.777, loss_interctc_layer6=92.482, loss_interctc_layer12=77.062, loss_interctc_layer15=70.760, loss_interctc_layer21=85.259, loss=81.668, backward_time=0.450, grad_norm=61.406, clip=100.000, loss_scale=4.056e+31, optim_step_time=0.139, optim0_lr0=9.774e-05, train_time=6.319 +[gpua007:0/64] 2024-02-03 07:38:48,801 (trainer:753) INFO: 17epoch:train:11301-11400batch: iter_time=8.982e-05, forward_time=0.172, loss_ctc=68.207, loss_interctc_layer6=74.674, loss_interctc_layer12=61.883, loss_interctc_layer15=56.732, loss_interctc_layer21=70.325, loss=66.364, backward_time=0.471, grad_norm=72.850, clip=100.000, loss_scale=4.056e+31, optim_step_time=0.138, optim0_lr0=9.772e-05, train_time=3.173 +[gpua007:0/64] 2024-02-03 07:43:56,755 (trainer:753) INFO: 17epoch:train:11401-11500batch: iter_time=8.400e-05, forward_time=0.153, loss_ctc=62.600, loss_interctc_layer6=73.280, loss_interctc_layer12=60.722, loss_interctc_layer15=55.705, loss_interctc_layer21=64.469, loss=63.356, backward_time=0.419, grad_norm=56.553, clip=100.000, loss_scale=4.056e+31, optim_step_time=0.136, optim0_lr0=9.770e-05, train_time=3.080 +[gpua007:0/64] 2024-02-03 07:49:22,008 (trainer:753) INFO: 17epoch:train:11501-11600batch: iter_time=8.990e-05, forward_time=0.142, loss_ctc=89.134, loss_interctc_layer6=86.616, loss_interctc_layer12=72.179, loss_interctc_layer15=66.249, loss_interctc_layer21=92.362, loss=81.308, backward_time=0.545, grad_norm=63.491, clip=100.000, loss_scale=4.056e+31, optim_step_time=0.136, optim0_lr0=9.768e-05, train_time=3.251 +[gpua007:0/64] 2024-02-03 07:54:03,089 (trainer:753) INFO: 17epoch:train:11601-11700batch: iter_time=8.926e-05, forward_time=0.142, loss_ctc=84.813, loss_interctc_layer6=90.405, loss_interctc_layer12=75.124, loss_interctc_layer15=68.911, loss_interctc_layer21=87.395, loss=81.329, backward_time=0.388, grad_norm=85.535, clip=100.000, loss_scale=4.056e+31, optim_step_time=0.136, optim0_lr0=9.766e-05, train_time=2.811 +[gpua007:0/64] 2024-02-03 07:59:12,250 (trainer:753) INFO: 17epoch:train:11701-11800batch: iter_time=8.691e-05, forward_time=0.142, loss_ctc=90.075, loss_interctc_layer6=88.282, loss_interctc_layer12=74.439, loss_interctc_layer15=69.057, loss_interctc_layer21=93.135, loss=82.998, backward_time=0.443, grad_norm=80.502, clip=100.000, loss_scale=4.056e+31, optim_step_time=0.136, optim0_lr0=9.764e-05, train_time=3.092 +[gpua007:0/64] 2024-02-03 08:04:40,601 (trainer:753) INFO: 17epoch:train:11801-11900batch: iter_time=9.204e-05, forward_time=0.144, loss_ctc=85.754, loss_interctc_layer6=93.276, loss_interctc_layer12=77.504, loss_interctc_layer15=71.123, loss_interctc_layer21=88.074, loss=83.146, backward_time=0.488, grad_norm=76.112, clip=100.000, loss_scale=4.056e+31, optim_step_time=0.136, optim0_lr0=9.762e-05, train_time=3.283 +[gpua007:0/64] 2024-02-03 08:09:56,763 (trainer:753) INFO: 17epoch:train:11901-12000batch: iter_time=9.278e-05, forward_time=0.142, loss_ctc=84.828, loss_interctc_layer6=86.942, loss_interctc_layer12=72.353, loss_interctc_layer15=66.501, loss_interctc_layer21=87.543, loss=79.634, backward_time=0.457, grad_norm=69.769, clip=100.000, loss_scale=4.056e+31, optim_step_time=0.136, optim0_lr0=9.760e-05, train_time=3.161 +[gpua007:0/64] 2024-02-03 08:15:34,177 (trainer:753) INFO: 17epoch:train:12001-12100batch: iter_time=1.034e-04, forward_time=0.141, loss_ctc=78.907, loss_interctc_layer6=84.022, loss_interctc_layer12=69.707, loss_interctc_layer15=64.046, loss_interctc_layer21=81.169, loss=75.570, backward_time=0.462, grad_norm=64.540, clip=100.000, loss_scale=4.056e+31, optim_step_time=0.136, optim0_lr0=9.759e-05, train_time=3.374 +[gpua007:0/64] 2024-02-03 08:20:53,857 (trainer:753) INFO: 17epoch:train:12101-12200batch: iter_time=9.407e-05, forward_time=0.142, loss_ctc=102.969, loss_interctc_layer6=106.087, loss_interctc_layer12=88.889, loss_interctc_layer15=82.077, loss_interctc_layer21=105.997, loss=97.204, backward_time=0.440, grad_norm=83.423, clip=100.000, loss_scale=4.056e+31, optim_step_time=0.136, optim0_lr0=9.757e-05, train_time=3.195 +[gpua007:0/64] 2024-02-03 08:25:58,851 (trainer:753) INFO: 17epoch:train:12201-12300batch: iter_time=9.759e-05, forward_time=0.156, loss_ctc=85.909, loss_interctc_layer6=91.813, loss_interctc_layer12=76.723, loss_interctc_layer15=70.848, loss_interctc_layer21=88.402, loss=82.739, backward_time=0.432, grad_norm=68.370, clip=100.000, loss_scale=7.261e+31, optim_step_time=0.139, optim0_lr0=9.755e-05, train_time=3.051 +[gpua007:0/64] 2024-02-03 08:31:03,236 (trainer:753) INFO: 17epoch:train:12301-12400batch: iter_time=5.917e-04, forward_time=0.189, loss_ctc=92.175, loss_interctc_layer6=95.027, loss_interctc_layer12=79.747, loss_interctc_layer15=73.597, loss_interctc_layer21=94.986, loss=87.106, backward_time=0.442, grad_norm=108.458, clip=100.000, loss_scale=8.113e+31, optim_step_time=0.140, optim0_lr0=9.753e-05, train_time=3.043 +[gpua007:0/64] 2024-02-03 08:35:57,131 (trainer:753) INFO: 17epoch:train:12401-12500batch: iter_time=8.863e-05, forward_time=0.169, loss_ctc=77.363, loss_interctc_layer6=89.309, loss_interctc_layer12=74.154, loss_interctc_layer15=68.065, loss_interctc_layer21=79.697, loss=77.717, backward_time=0.442, grad_norm=66.992, clip=100.000, loss_scale=8.113e+31, optim_step_time=0.136, optim0_lr0=9.751e-05, train_time=2.939 +[gpua007:0/64] 2024-02-03 08:36:17,183 (multiple_iter_factory:32) INFO: Building 10th iter-factory... +[gpua007:0/64] 2024-02-03 08:36:35,835 (s2t:401) INFO: Optional Data Names: ('text_prev', 'text_ctc', 'text_spk2', 'text_spk3', 'text_spk4') +[gpua007:0/64] 2024-02-03 08:36:39,305 (abs_task:1660) INFO: [train] dataset: +ESPnetDataset( + speech: {"path": "exp/s2t_stats_raw_bpe50000/splits12/wav.scp/split.6", "type": "kaldi_ark"} + text_prev: {"path": "exp/s2t_stats_raw_bpe50000/splits12/text.prev/split.6", "type": "text"} + text_ctc: {"path": "exp/s2t_stats_raw_bpe50000/splits12/text.ctc/split.6", "type": "text"} + text: {"path": "exp/s2t_stats_raw_bpe50000/splits12/text/split.6", "type": "text"} + preprocess: ) +[gpua007:0/64] 2024-02-03 08:36:39,305 (abs_task:1661) INFO: [train] Batch sampler: UnsortedBatchSampler(N-batch=19027, batch_size=256, key_file=exp/s2t_stats_raw_bpe50000/splits12/speech_shape/split.6, +[gpua007:0/64] 2024-02-03 08:36:39,308 (abs_task:1662) INFO: [train] mini-batch sizes summary: N-batch=19027, mean=256.0, min=256, max=257 +[gpua007:0/64] 2024-02-03 08:51:00,456 (trainer:753) INFO: 17epoch:train:12501-12600batch: iter_time=2.947, forward_time=0.141, loss_ctc=71.024, loss_interctc_layer6=78.951, loss_interctc_layer12=65.609, loss_interctc_layer15=60.353, loss_interctc_layer21=73.171, loss=69.821, backward_time=0.410, grad_norm=75.084, clip=100.000, loss_scale=8.113e+31, optim_step_time=0.135, optim0_lr0=9.749e-05, train_time=9.033 +[gpua007:0/64] 2024-02-03 08:57:29,846 (trainer:753) INFO: 17epoch:train:12601-12700batch: iter_time=8.864e-05, forward_time=0.141, loss_ctc=67.173, loss_interctc_layer6=75.065, loss_interctc_layer12=61.892, loss_interctc_layer15=56.604, loss_interctc_layer21=69.331, loss=66.013, backward_time=0.606, grad_norm=65.804, clip=100.000, loss_scale=8.113e+31, optim_step_time=0.135, optim0_lr0=9.747e-05, train_time=3.894 +[gpua007:0/64] 2024-02-03 09:03:54,965 (trainer:753) INFO: 17epoch:train:12701-12800batch: iter_time=9.208e-05, forward_time=0.177, loss_ctc=72.171, loss_interctc_layer6=74.245, loss_interctc_layer12=61.760, loss_interctc_layer15=56.769, loss_interctc_layer21=74.480, loss=67.885, backward_time=0.562, grad_norm=68.188, clip=100.000, loss_scale=8.113e+31, optim_step_time=0.137, optim0_lr0=9.745e-05, train_time=3.851 +[gpua007:0/64] 2024-02-03 09:10:18,429 (trainer:753) INFO: 17epoch:train:12801-12900batch: iter_time=9.020e-05, forward_time=0.183, loss_ctc=96.804, loss_interctc_layer6=98.608, loss_interctc_layer12=82.129, loss_interctc_layer15=75.489, loss_interctc_layer21=99.796, loss=90.565, backward_time=0.568, grad_norm=77.453, clip=100.000, loss_scale=8.113e+31, optim_step_time=0.137, optim0_lr0=9.743e-05, train_time=3.833 +[gpua007:0/64] 2024-02-03 09:15:52,776 (trainer:753) INFO: 17epoch:train:12901-13000batch: iter_time=8.562e-05, forward_time=0.173, loss_ctc=74.048, loss_interctc_layer6=81.136, loss_interctc_layer12=68.156, loss_interctc_layer15=62.831, loss_interctc_layer21=76.349, loss=72.504, backward_time=0.524, grad_norm=76.629, clip=100.000, loss_scale=8.113e+31, optim_step_time=0.135, optim0_lr0=9.741e-05, train_time=3.344 +[gpua007:0/64] 2024-02-03 09:21:11,197 (trainer:753) INFO: 17epoch:train:13001-13100batch: iter_time=9.198e-05, forward_time=0.142, loss_ctc=83.948, loss_interctc_layer6=86.295, loss_interctc_layer12=72.173, loss_interctc_layer15=66.194, loss_interctc_layer21=86.960, loss=79.114, backward_time=0.472, grad_norm=80.211, clip=100.000, loss_scale=8.113e+31, optim_step_time=0.135, optim0_lr0=9.739e-05, train_time=3.184 +[gpua007:0/64] 2024-02-03 09:27:27,029 (trainer:753) INFO: 17epoch:train:13101-13200batch: iter_time=9.152e-05, forward_time=0.142, loss_ctc=100.530, loss_interctc_layer6=100.714, loss_interctc_layer12=84.134, loss_interctc_layer15=77.340, loss_interctc_layer21=103.582, loss=93.260, backward_time=0.611, grad_norm=80.835, clip=100.000, loss_scale=8.113e+31, optim_step_time=0.135, optim0_lr0=9.737e-05, train_time=3.758 +[gpua007:0/64] 2024-02-03 09:33:06,476 (trainer:753) INFO: 17epoch:train:13201-13300batch: iter_time=8.817e-05, forward_time=0.141, loss_ctc=75.262, loss_interctc_layer6=79.878, loss_interctc_layer12=66.428, loss_interctc_layer15=61.068, loss_interctc_layer21=77.494, loss=72.026, backward_time=0.511, grad_norm=64.711, clip=100.000, loss_scale=8.113e+31, optim_step_time=0.135, optim0_lr0=9.735e-05, train_time=3.394 +[gpua007:0/64] 2024-02-03 09:38:49,304 (trainer:753) INFO: 17epoch:train:13301-13400batch: iter_time=8.726e-05, forward_time=0.142, loss_ctc=97.576, loss_interctc_layer6=100.940, loss_interctc_layer12=83.894, loss_interctc_layer15=76.923, loss_interctc_layer21=100.764, loss=92.020, backward_time=0.524, grad_norm=71.122, clip=100.000, loss_scale=8.113e+31, optim_step_time=0.135, optim0_lr0=9.733e-05, train_time=3.429 +[gpua007:0/64] 2024-02-03 09:44:41,496 (trainer:753) INFO: 17epoch:train:13401-13500batch: iter_time=9.019e-05, forward_time=0.142, loss_ctc=85.871, loss_interctc_layer6=95.074, loss_interctc_layer12=79.973, loss_interctc_layer15=73.904, loss_interctc_layer21=88.451, loss=84.655, backward_time=0.530, grad_norm=72.012, clip=100.000, loss_scale=8.113e+31, optim_step_time=0.135, optim0_lr0=9.732e-05, train_time=3.521 +[gpua007:0/64] 2024-02-03 09:50:15,413 (trainer:753) INFO: 17epoch:train:13501-13600batch: iter_time=9.086e-05, forward_time=0.142, loss_ctc=99.589, loss_interctc_layer6=100.262, loss_interctc_layer12=84.553, loss_interctc_layer15=78.254, loss_interctc_layer21=102.996, loss=93.131, backward_time=0.498, grad_norm=88.455, clip=100.000, loss_scale=8.113e+31, optim_step_time=0.135, optim0_lr0=9.730e-05, train_time=3.339 +[gpua007:0/64] 2024-02-03 09:55:49,281 (trainer:753) INFO: 17epoch:train:13601-13700batch: iter_time=8.743e-05, forward_time=0.175, loss_ctc=72.153, loss_interctc_layer6=80.709, loss_interctc_layer12=67.257, loss_interctc_layer15=61.756, loss_interctc_layer21=74.228, loss=71.221, backward_time=0.509, grad_norm=59.866, clip=100.000, loss_scale=8.113e+31, optim_step_time=0.141, optim0_lr0=9.728e-05, train_time=3.338 +[gpua007:0/64] 2024-02-03 09:58:49,461 (multiple_iter_factory:32) INFO: Building 11th iter-factory... +[gpua007:0/64] 2024-02-03 09:59:08,447 (s2t:401) INFO: Optional Data Names: ('text_prev', 'text_ctc', 'text_spk2', 'text_spk3', 'text_spk4') +[gpua007:0/64] 2024-02-03 09:59:12,032 (abs_task:1660) INFO: [train] dataset: +ESPnetDataset( + speech: {"path": "exp/s2t_stats_raw_bpe50000/splits12/wav.scp/split.9", "type": "kaldi_ark"} + text_prev: {"path": "exp/s2t_stats_raw_bpe50000/splits12/text.prev/split.9", "type": "text"} + text_ctc: {"path": "exp/s2t_stats_raw_bpe50000/splits12/text.ctc/split.9", "type": "text"} + text: {"path": "exp/s2t_stats_raw_bpe50000/splits12/text/split.9", "type": "text"} + preprocess: ) +[gpua007:0/64] 2024-02-03 09:59:12,032 (abs_task:1661) INFO: [train] Batch sampler: UnsortedBatchSampler(N-batch=19027, batch_size=256, key_file=exp/s2t_stats_raw_bpe50000/splits12/speech_shape/split.9, +[gpua007:0/64] 2024-02-03 09:59:12,036 (abs_task:1662) INFO: [train] mini-batch sizes summary: N-batch=19027, mean=256.0, min=256, max=257 +[gpua007:0/64] 2024-02-03 10:09:54,911 (trainer:753) INFO: 17epoch:train:13701-13800batch: iter_time=4.658, forward_time=0.182, loss_ctc=82.293, loss_interctc_layer6=92.469, loss_interctc_layer12=76.805, loss_interctc_layer15=70.452, loss_interctc_layer21=84.887, loss=81.381, backward_time=0.474, grad_norm=57.253, clip=100.000, loss_scale=8.113e+31, optim_step_time=0.138, optim0_lr0=9.726e-05, train_time=8.456 +[gpua007:0/64] 2024-02-03 10:15:00,451 (trainer:753) INFO: 17epoch:train:13801-13900batch: iter_time=8.622e-05, forward_time=0.141, loss_ctc=64.223, loss_interctc_layer6=73.886, loss_interctc_layer12=61.121, loss_interctc_layer15=56.091, loss_interctc_layer21=66.332, loss=64.331, backward_time=0.426, grad_norm=62.935, clip=100.000, loss_scale=8.113e+31, optim_step_time=0.136, optim0_lr0=9.724e-05, train_time=3.055 +[gpua007:0/64] 2024-02-03 10:20:25,707 (trainer:753) INFO: 17epoch:train:13901-14000batch: iter_time=3.506e-04, forward_time=0.260, loss_ctc=61.802, loss_interctc_layer6=74.447, loss_interctc_layer12=61.909, loss_interctc_layer15=56.724, loss_interctc_layer21=63.668, loss=63.710, backward_time=0.465, grad_norm=70.938, clip=100.000, loss_scale=8.113e+31, optim_step_time=0.140, optim0_lr0=9.722e-05, train_time=3.252 +[gpua007:0/64] 2024-02-03 10:26:07,554 (trainer:753) INFO: 17epoch:train:14001-14100batch: iter_time=8.360e-05, forward_time=0.142, loss_ctc=82.747, loss_interctc_layer6=85.844, loss_interctc_layer12=71.558, loss_interctc_layer15=65.378, loss_interctc_layer21=85.855, loss=78.277, backward_time=0.509, grad_norm=73.517, clip=100.000, loss_scale=8.113e+31, optim_step_time=0.135, optim0_lr0=9.720e-05, train_time=3.419 +[gpua007:0/64] 2024-02-03 10:31:38,993 (trainer:753) INFO: 17epoch:train:14101-14200batch: iter_time=8.855e-05, forward_time=0.238, loss_ctc=83.123, loss_interctc_layer6=90.461, loss_interctc_layer12=75.084, loss_interctc_layer15=68.890, loss_interctc_layer21=85.558, loss=80.623, backward_time=0.481, grad_norm=71.832, clip=100.000, loss_scale=8.113e+31, optim_step_time=0.138, optim0_lr0=9.718e-05, train_time=3.314 +[gpua007:0/64] 2024-02-03 10:36:44,325 (trainer:753) INFO: 17epoch:train:14201-14300batch: iter_time=9.150e-05, forward_time=0.153, loss_ctc=83.870, loss_interctc_layer6=88.754, loss_interctc_layer12=74.540, loss_interctc_layer15=68.659, loss_interctc_layer21=86.682, loss=80.501, backward_time=0.427, grad_norm=97.399, clip=100.000, loss_scale=1.452e+32, optim_step_time=0.135, optim0_lr0=9.716e-05, train_time=3.053 +[gpua007:0/64] 2024-02-03 10:41:57,369 (trainer:753) INFO: 17epoch:train:14301-14400batch: iter_time=8.855e-05, forward_time=0.142, loss_ctc=84.744, loss_interctc_layer6=92.585, loss_interctc_layer12=76.522, loss_interctc_layer15=70.046, loss_interctc_layer21=87.365, loss=82.252, backward_time=0.438, grad_norm=66.633, clip=100.000, loss_scale=1.623e+32, optim_step_time=0.135, optim0_lr0=9.714e-05, train_time=3.130 +[gpua007:0/64] 2024-02-03 10:44:38,623 (trainer:684) WARNING: The grad norm is nan. Skipping updating the model. +[gpua007:0/64] 2024-02-03 10:47:44,864 (trainer:753) INFO: 17epoch:train:14401-14500batch: iter_time=8.648e-05, forward_time=0.243, loss_ctc=78.334, loss_interctc_layer6=85.831, loss_interctc_layer12=71.210, loss_interctc_layer15=65.449, loss_interctc_layer21=80.989, loss=76.363, backward_time=0.536, grad_norm=65.739, clip=100.000, loss_scale=1.155e+32, optim_step_time=0.142, optim0_lr0=9.712e-05, train_time=3.474 +[gpua007:0/64] 2024-02-03 10:53:23,324 (trainer:753) INFO: 17epoch:train:14501-14600batch: iter_time=8.650e-05, forward_time=0.141, loss_ctc=76.032, loss_interctc_layer6=83.529, loss_interctc_layer12=69.305, loss_interctc_layer15=63.648, loss_interctc_layer21=78.433, loss=74.189, backward_time=0.484, grad_norm=62.062, clip=100.000, loss_scale=8.113e+31, optim_step_time=0.135, optim0_lr0=9.711e-05, train_time=3.384 +[gpua007:0/64] 2024-02-03 10:58:50,286 (trainer:753) INFO: 17epoch:train:14601-14700batch: iter_time=8.913e-05, forward_time=0.248, loss_ctc=99.646, loss_interctc_layer6=105.561, loss_interctc_layer12=88.398, loss_interctc_layer15=81.676, loss_interctc_layer21=102.506, loss=95.558, backward_time=0.492, grad_norm=68.941, clip=100.000, loss_scale=8.113e+31, optim_step_time=0.139, optim0_lr0=9.709e-05, train_time=3.270 +[gpua007:0/64] 2024-02-03 11:04:08,552 (trainer:753) INFO: 17epoch:train:14701-14800batch: iter_time=8.576e-05, forward_time=0.146, loss_ctc=82.198, loss_interctc_layer6=91.542, loss_interctc_layer12=76.662, loss_interctc_layer15=70.436, loss_interctc_layer21=84.655, loss=81.098, backward_time=0.461, grad_norm=77.263, clip=100.000, loss_scale=8.113e+31, optim_step_time=0.135, optim0_lr0=9.707e-05, train_time=3.182 +[gpua007:0/64] 2024-02-03 11:09:29,203 (trainer:753) INFO: 17epoch:train:14801-14900batch: iter_time=1.814e-04, forward_time=0.250, loss_ctc=87.009, loss_interctc_layer6=94.357, loss_interctc_layer12=79.785, loss_interctc_layer15=73.486, loss_interctc_layer21=89.749, loss=84.877, backward_time=0.468, grad_norm=81.351, clip=100.000, loss_scale=8.113e+31, optim_step_time=0.140, optim0_lr0=9.705e-05, train_time=3.206 +[gpua007:0/64] 2024-02-03 11:14:42,888 (trainer:753) INFO: 17epoch:train:14901-15000batch: iter_time=8.038e-05, forward_time=0.149, loss_ctc=74.379, loss_interctc_layer6=89.459, loss_interctc_layer12=74.375, loss_interctc_layer15=68.302, loss_interctc_layer21=76.425, loss=76.588, backward_time=0.448, grad_norm=66.298, clip=100.000, loss_scale=8.113e+31, optim_step_time=0.136, optim0_lr0=9.703e-05, train_time=3.138 +[gpua007:0/64] 2024-02-03 11:44:49,372 (trainer:352) INFO: 17epoch results: [train] iter_time=0.239, forward_time=0.172, loss_ctc=82.345, loss_interctc_layer6=88.920, loss_interctc_layer12=74.316, loss_interctc_layer15=68.425, loss_interctc_layer21=84.817, loss=79.765, backward_time=0.536, grad_norm=75.864, clip=100.000, loss_scale=3.871e+31, optim_step_time=0.138, optim0_lr0=9.849e-05, train_time=3.836, time=15 hours, 59 minutes and 23.71 seconds, total_count=255000, gpu_max_cached_mem_GB=34.398, [valid] loss_ctc=49.202, cer_ctc=0.225, loss_interctc_layer6=53.350, cer_interctc_layer6=0.242, loss_interctc_layer12=40.847, cer_interctc_layer12=0.173, loss_interctc_layer15=36.425, cer_interctc_layer15=0.148, loss_interctc_layer21=51.345, cer_interctc_layer21=0.238, loss=46.234, time=29 minutes and 42.62 seconds, total_count=79407, gpu_max_cached_mem_GB=34.398 +[gpua007:0/64] 2024-02-03 11:45:21,834 (trainer:407) INFO: The best model has been updated: valid.cer_ctc, valid.loss_ctc, valid.total_count +[gpua007:0/64] 2024-02-03 11:45:22,469 (trainer:461) INFO: The model files were removed: exp/s2t_train_s2t_multitask-ctc_ebf27_conv2d8_size1024_raw_bpe50000/11epoch.pth, exp/s2t_train_s2t_multitask-ctc_ebf27_conv2d8_size1024_raw_bpe50000/12epoch.pth +[gpua007:0/64] 2024-02-03 11:45:22,818 (trainer:286) INFO: 18/45epoch started. Estimated time to finish: 2 weeks, 5 days and 5 hours +[gpua007:0/64] 2024-02-03 11:45:24,020 (multiple_iter_factory:32) INFO: Building 0th iter-factory... +[gpua007:0/64] 2024-02-03 11:45:42,572 (s2t:401) INFO: Optional Data Names: ('text_prev', 'text_ctc', 'text_spk2', 'text_spk3', 'text_spk4') +[gpua007:0/64] 2024-02-03 11:45:46,209 (abs_task:1660) INFO: [train] dataset: +ESPnetDataset( + speech: {"path": "exp/s2t_stats_raw_bpe50000/splits12/wav.scp/split.11", "type": "kaldi_ark"} + text_prev: {"path": "exp/s2t_stats_raw_bpe50000/splits12/text.prev/split.11", "type": "text"} + text_ctc: {"path": "exp/s2t_stats_raw_bpe50000/splits12/text.ctc/split.11", "type": "text"} + text: {"path": "exp/s2t_stats_raw_bpe50000/splits12/text/split.11", "type": "text"} + preprocess: ) +[gpua007:0/64] 2024-02-03 11:45:46,209 (abs_task:1661) INFO: [train] Batch sampler: UnsortedBatchSampler(N-batch=19027, batch_size=256, key_file=exp/s2t_stats_raw_bpe50000/splits12/speech_shape/split.11, +[gpua007:0/64] 2024-02-03 11:45:46,258 (abs_task:1662) INFO: [train] mini-batch sizes summary: N-batch=19027, mean=256.0, min=256, max=257 +[gpua007:0/64] 2024-02-03 11:57:40,956 (trainer:753) INFO: 18epoch:train:1-100batch: iter_time=4.547, forward_time=0.174, loss_ctc=72.959, loss_interctc_layer6=81.949, loss_interctc_layer12=68.948, loss_interctc_layer15=63.756, loss_interctc_layer21=75.350, loss=72.592, backward_time=0.394, grad_norm=94.141, clip=100.000, loss_scale=8.113e+31, optim_step_time=0.138, optim0_lr0=9.701e-05, train_time=7.374 +[gpua007:0/64] 2024-02-03 12:02:56,601 (trainer:753) INFO: 18epoch:train:101-200batch: iter_time=9.175e-05, forward_time=0.140, loss_ctc=74.258, loss_interctc_layer6=80.356, loss_interctc_layer12=67.277, loss_interctc_layer15=61.951, loss_interctc_layer21=76.463, loss=72.061, backward_time=0.452, grad_norm=57.894, clip=100.000, loss_scale=8.113e+31, optim_step_time=0.137, optim0_lr0=9.699e-05, train_time=3.156 +[gpua007:0/64] 2024-02-03 12:08:05,711 (trainer:753) INFO: 18epoch:train:201-300batch: iter_time=9.787e-05, forward_time=0.143, loss_ctc=94.517, loss_interctc_layer6=94.031, loss_interctc_layer12=79.472, loss_interctc_layer15=73.473, loss_interctc_layer21=97.102, loss=87.719, backward_time=0.424, grad_norm=81.744, clip=100.000, loss_scale=8.113e+31, optim_step_time=0.137, optim0_lr0=9.697e-05, train_time=3.091 +[gpua007:0/64] 2024-02-03 12:13:07,012 (trainer:753) INFO: 18epoch:train:301-400batch: iter_time=1.005e-04, forward_time=0.141, loss_ctc=76.545, loss_interctc_layer6=82.537, loss_interctc_layer12=69.160, loss_interctc_layer15=63.610, loss_interctc_layer21=78.915, loss=74.153, backward_time=0.403, grad_norm=70.756, clip=100.000, loss_scale=8.113e+31, optim_step_time=0.137, optim0_lr0=9.695e-05, train_time=3.013 +[gpua007:0/64] 2024-02-03 12:18:57,880 (trainer:753) INFO: 18epoch:train:401-500batch: iter_time=6.299e-04, forward_time=0.212, loss_ctc=87.614, loss_interctc_layer6=90.137, loss_interctc_layer12=75.542, loss_interctc_layer15=69.704, loss_interctc_layer21=90.285, loss=82.656, backward_time=0.532, grad_norm=70.036, clip=100.000, loss_scale=8.113e+31, optim_step_time=0.147, optim0_lr0=9.693e-05, train_time=3.506 +[gpua007:0/64] 2024-02-03 12:24:24,960 (trainer:753) INFO: 18epoch:train:501-600batch: iter_time=3.719e-04, forward_time=0.211, loss_ctc=103.978, loss_interctc_layer6=108.958, loss_interctc_layer12=92.139, loss_interctc_layer15=85.527, loss_interctc_layer21=106.933, loss=99.507, backward_time=0.482, grad_norm=82.783, clip=100.000, loss_scale=8.113e+31, optim_step_time=0.145, optim0_lr0=9.691e-05, train_time=3.272 +[gpua007:0/64] 2024-02-03 12:27:37,450 (trainer:684) WARNING: The grad norm is nan. Skipping updating the model. +[gpua007:0/64] 2024-02-03 12:29:53,395 (trainer:753) INFO: 18epoch:train:601-700batch: iter_time=9.311e-05, forward_time=0.143, loss_ctc=81.026, loss_interctc_layer6=94.134, loss_interctc_layer12=79.886, loss_interctc_layer15=74.019, loss_interctc_layer21=83.063, loss=82.426, backward_time=0.445, grad_norm=84.679, clip=100.000, loss_scale=6.679e+31, optim_step_time=0.137, optim0_lr0=9.690e-05, train_time=3.284 +[gpua007:0/64] 2024-02-03 12:35:28,996 (trainer:684) WARNING: The grad norm is nan. Skipping updating the model. +[gpua007:0/64] 2024-02-03 12:35:43,777 (trainer:753) INFO: 18epoch:train:701-800batch: iter_time=9.064e-05, forward_time=0.245, loss_ctc=87.202, loss_interctc_layer6=96.853, loss_interctc_layer12=81.006, loss_interctc_layer15=74.826, loss_interctc_layer21=89.362, loss=85.850, backward_time=0.456, grad_norm=82.027, clip=100.000, loss_scale=3.954e+31, optim_step_time=0.179, optim0_lr0=9.688e-05, train_time=3.504 +[gpua007:0/64] 2024-02-03 12:40:55,555 (trainer:753) INFO: 18epoch:train:801-900batch: iter_time=8.813e-05, forward_time=0.195, loss_ctc=96.386, loss_interctc_layer6=96.517, loss_interctc_layer12=80.264, loss_interctc_layer15=74.086, loss_interctc_layer21=99.058, loss=89.262, backward_time=0.507, grad_norm=95.046, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.145, optim0_lr0=9.686e-05, train_time=3.118 +[gpua007:0/64] 2024-02-03 12:46:31,185 (trainer:753) INFO: 18epoch:train:901-1000batch: iter_time=9.159e-05, forward_time=0.142, loss_ctc=83.203, loss_interctc_layer6=90.847, loss_interctc_layer12=76.828, loss_interctc_layer15=71.303, loss_interctc_layer21=85.626, loss=81.561, backward_time=0.517, grad_norm=71.966, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.138, optim0_lr0=9.684e-05, train_time=3.356 +[gpua007:0/64] 2024-02-03 12:52:09,686 (trainer:753) INFO: 18epoch:train:1001-1100batch: iter_time=2.986e-04, forward_time=0.210, loss_ctc=78.885, loss_interctc_layer6=86.472, loss_interctc_layer12=73.355, loss_interctc_layer15=68.273, loss_interctc_layer21=81.073, loss=77.612, backward_time=0.559, grad_norm=65.651, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.146, optim0_lr0=9.682e-05, train_time=3.384 +[gpua007:0/64] 2024-02-03 12:57:59,751 (trainer:753) INFO: 18epoch:train:1101-1200batch: iter_time=9.430e-05, forward_time=0.204, loss_ctc=77.522, loss_interctc_layer6=94.326, loss_interctc_layer12=79.998, loss_interctc_layer15=74.050, loss_interctc_layer21=79.546, loss=81.088, backward_time=0.501, grad_norm=67.301, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.149, optim0_lr0=9.680e-05, train_time=3.501 +[gpua007:0/64] 2024-02-03 13:01:03,429 (multiple_iter_factory:32) INFO: Building 1th iter-factory... +[gpua007:0/64] 2024-02-03 13:01:22,189 (s2t:401) INFO: Optional Data Names: ('text_prev', 'text_ctc', 'text_spk2', 'text_spk3', 'text_spk4') +[gpua007:0/64] 2024-02-03 13:01:26,045 (abs_task:1660) INFO: [train] dataset: +ESPnetDataset( + speech: {"path": "exp/s2t_stats_raw_bpe50000/splits12/wav.scp/split.2", "type": "kaldi_ark"} + text_prev: {"path": "exp/s2t_stats_raw_bpe50000/splits12/text.prev/split.2", "type": "text"} + text_ctc: {"path": "exp/s2t_stats_raw_bpe50000/splits12/text.ctc/split.2", "type": "text"} + text: {"path": "exp/s2t_stats_raw_bpe50000/splits12/text/split.2", "type": "text"} + preprocess: ) +[gpua007:0/64] 2024-02-03 13:01:26,045 (abs_task:1661) INFO: [train] Batch sampler: UnsortedBatchSampler(N-batch=19027, batch_size=256, key_file=exp/s2t_stats_raw_bpe50000/splits12/speech_shape/split.2, +[gpua007:0/64] 2024-02-03 13:01:26,048 (abs_task:1662) INFO: [train] mini-batch sizes summary: N-batch=19027, mean=256.0, min=256, max=257 +[gpua007:0/64] 2024-02-03 13:13:19,432 (trainer:753) INFO: 18epoch:train:1201-1300batch: iter_time=2.881, forward_time=0.143, loss_ctc=82.948, loss_interctc_layer6=93.442, loss_interctc_layer12=79.703, loss_interctc_layer15=74.258, loss_interctc_layer21=85.563, loss=83.183, backward_time=0.418, grad_norm=126.414, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.137, optim0_lr0=9.678e-05, train_time=9.197 +[gpua007:0/64] 2024-02-03 13:18:12,375 (trainer:753) INFO: 18epoch:train:1301-1400batch: iter_time=9.897e-05, forward_time=0.142, loss_ctc=77.838, loss_interctc_layer6=79.733, loss_interctc_layer12=66.445, loss_interctc_layer15=61.112, loss_interctc_layer21=80.344, loss=73.094, backward_time=0.433, grad_norm=83.184, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.137, optim0_lr0=9.676e-05, train_time=2.929 +[gpua007:0/64] 2024-02-03 13:23:12,612 (trainer:753) INFO: 18epoch:train:1401-1500batch: iter_time=9.946e-05, forward_time=0.152, loss_ctc=97.695, loss_interctc_layer6=91.188, loss_interctc_layer12=76.198, loss_interctc_layer15=69.915, loss_interctc_layer21=100.855, loss=87.170, backward_time=0.428, grad_norm=69.734, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.139, optim0_lr0=9.674e-05, train_time=3.002 +[gpua007:0/64] 2024-02-03 13:28:22,816 (trainer:753) INFO: 18epoch:train:1501-1600batch: iter_time=9.953e-05, forward_time=0.206, loss_ctc=79.458, loss_interctc_layer6=83.553, loss_interctc_layer12=70.147, loss_interctc_layer15=64.739, loss_interctc_layer21=81.766, loss=75.933, backward_time=0.506, grad_norm=61.782, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.145, optim0_lr0=9.673e-05, train_time=3.101 +[gpua007:0/64] 2024-02-03 13:33:48,123 (trainer:753) INFO: 18epoch:train:1601-1700batch: iter_time=9.777e-05, forward_time=0.142, loss_ctc=89.583, loss_interctc_layer6=88.371, loss_interctc_layer12=73.990, loss_interctc_layer15=68.221, loss_interctc_layer21=92.207, loss=82.474, backward_time=0.501, grad_norm=58.877, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.137, optim0_lr0=9.671e-05, train_time=3.252 +[gpua007:0/64] 2024-02-03 13:39:16,939 (trainer:753) INFO: 18epoch:train:1701-1800batch: iter_time=1.009e-04, forward_time=0.142, loss_ctc=100.432, loss_interctc_layer6=97.966, loss_interctc_layer12=81.533, loss_interctc_layer15=75.077, loss_interctc_layer21=103.443, loss=91.690, backward_time=0.492, grad_norm=88.652, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.137, optim0_lr0=9.669e-05, train_time=3.289 +[gpua007:0/64] 2024-02-03 13:44:46,772 (trainer:753) INFO: 18epoch:train:1801-1900batch: iter_time=9.505e-05, forward_time=0.143, loss_ctc=89.088, loss_interctc_layer6=98.321, loss_interctc_layer12=83.010, loss_interctc_layer15=76.803, loss_interctc_layer21=91.236, loss=87.692, backward_time=0.478, grad_norm=86.379, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.137, optim0_lr0=9.667e-05, train_time=3.298 +[gpua007:0/64] 2024-02-03 13:49:58,679 (trainer:753) INFO: 18epoch:train:1901-2000batch: iter_time=2.155e-04, forward_time=0.180, loss_ctc=92.347, loss_interctc_layer6=98.416, loss_interctc_layer12=82.856, loss_interctc_layer15=76.639, loss_interctc_layer21=94.789, loss=89.009, backward_time=0.463, grad_norm=88.739, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.148, optim0_lr0=9.665e-05, train_time=3.119 +[gpua007:0/64] 2024-02-03 13:54:59,354 (trainer:753) INFO: 18epoch:train:2001-2100batch: iter_time=1.415e-04, forward_time=0.179, loss_ctc=92.856, loss_interctc_layer6=92.005, loss_interctc_layer12=76.333, loss_interctc_layer15=70.500, loss_interctc_layer21=95.345, loss=85.408, backward_time=0.422, grad_norm=87.607, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.138, optim0_lr0=9.663e-05, train_time=3.006 +[gpua007:0/64] 2024-02-03 14:00:27,083 (trainer:753) INFO: 18epoch:train:2101-2200batch: iter_time=9.092e-05, forward_time=0.145, loss_ctc=89.924, loss_interctc_layer6=91.186, loss_interctc_layer12=76.116, loss_interctc_layer15=70.045, loss_interctc_layer21=92.647, loss=83.984, backward_time=0.489, grad_norm=67.996, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.137, optim0_lr0=9.661e-05, train_time=3.276 +[gpua007:0/64] 2024-02-03 14:05:13,726 (trainer:753) INFO: 18epoch:train:2201-2300batch: iter_time=9.345e-05, forward_time=0.141, loss_ctc=78.754, loss_interctc_layer6=85.406, loss_interctc_layer12=71.865, loss_interctc_layer15=66.352, loss_interctc_layer21=81.175, loss=76.710, backward_time=0.421, grad_norm=84.592, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.137, optim0_lr0=9.659e-05, train_time=2.868 +[gpua007:0/64] 2024-02-03 14:10:04,864 (trainer:753) INFO: 18epoch:train:2301-2400batch: iter_time=9.566e-05, forward_time=0.151, loss_ctc=82.960, loss_interctc_layer6=93.947, loss_interctc_layer12=79.570, loss_interctc_layer15=73.806, loss_interctc_layer21=85.266, loss=83.110, backward_time=0.403, grad_norm=91.665, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.138, optim0_lr0=9.658e-05, train_time=2.912 +[gpua007:0/64] 2024-02-03 14:15:29,191 (trainer:753) INFO: 18epoch:train:2401-2500batch: iter_time=9.532e-05, forward_time=0.213, loss_ctc=90.423, loss_interctc_layer6=98.422, loss_interctc_layer12=83.309, loss_interctc_layer15=77.388, loss_interctc_layer21=93.111, loss=88.531, backward_time=0.483, grad_norm=84.272, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.147, optim0_lr0=9.656e-05, train_time=3.243 +[gpua007:0/64] 2024-02-03 14:15:48,676 (multiple_iter_factory:32) INFO: Building 2th iter-factory... +[gpua007:0/64] 2024-02-03 14:16:07,107 (s2t:401) INFO: Optional Data Names: ('text_prev', 'text_ctc', 'text_spk2', 'text_spk3', 'text_spk4') +[gpua007:0/64] 2024-02-03 14:16:10,651 (abs_task:1660) INFO: [train] dataset: +ESPnetDataset( + speech: {"path": "exp/s2t_stats_raw_bpe50000/splits12/wav.scp/split.8", "type": "kaldi_ark"} + text_prev: {"path": "exp/s2t_stats_raw_bpe50000/splits12/text.prev/split.8", "type": "text"} + text_ctc: {"path": "exp/s2t_stats_raw_bpe50000/splits12/text.ctc/split.8", "type": "text"} + text: {"path": "exp/s2t_stats_raw_bpe50000/splits12/text/split.8", "type": "text"} + preprocess: ) +[gpua007:0/64] 2024-02-03 14:16:10,651 (abs_task:1661) INFO: [train] Batch sampler: UnsortedBatchSampler(N-batch=19027, batch_size=256, key_file=exp/s2t_stats_raw_bpe50000/splits12/speech_shape/split.8, +[gpua007:0/64] 2024-02-03 14:16:10,654 (abs_task:1662) INFO: [train] mini-batch sizes summary: N-batch=19027, mean=256.0, min=256, max=257 +[gpua007:0/64] 2024-02-03 14:26:34,648 (trainer:753) INFO: 18epoch:train:2501-2600batch: iter_time=2.765, forward_time=0.145, loss_ctc=76.594, loss_interctc_layer6=80.597, loss_interctc_layer12=67.563, loss_interctc_layer15=62.334, loss_interctc_layer21=79.108, loss=73.239, backward_time=0.458, grad_norm=84.526, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.137, optim0_lr0=9.654e-05, train_time=6.655 +[gpua007:0/64] 2024-02-03 14:31:46,573 (trainer:753) INFO: 18epoch:train:2601-2700batch: iter_time=9.175e-05, forward_time=0.142, loss_ctc=79.298, loss_interctc_layer6=79.757, loss_interctc_layer12=66.568, loss_interctc_layer15=61.215, loss_interctc_layer21=81.808, loss=73.729, backward_time=0.414, grad_norm=64.389, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.137, optim0_lr0=9.652e-05, train_time=3.118 +[gpua007:0/64] 2024-02-03 14:38:05,500 (trainer:753) INFO: 18epoch:train:2701-2800batch: iter_time=9.787e-05, forward_time=0.143, loss_ctc=93.425, loss_interctc_layer6=92.146, loss_interctc_layer12=77.331, loss_interctc_layer15=71.161, loss_interctc_layer21=96.293, loss=86.071, backward_time=0.764, grad_norm=90.455, clip=100.000, loss_scale=2.130e+31, optim_step_time=0.137, optim0_lr0=9.650e-05, train_time=3.789 +[gpua007:0/64] 2024-02-03 14:43:08,985 (trainer:753) INFO: 18epoch:train:2801-2900batch: iter_time=9.967e-05, forward_time=0.142, loss_ctc=78.928, loss_interctc_layer6=82.071, loss_interctc_layer12=68.385, loss_interctc_layer15=62.951, loss_interctc_layer21=81.585, loss=74.784, backward_time=0.421, grad_norm=58.260, clip=100.000, loss_scale=4.056e+31, optim_step_time=0.137, optim0_lr0=9.648e-05, train_time=3.035 +[gpua007:0/64] 2024-02-03 14:49:13,331 (trainer:753) INFO: 18epoch:train:2901-3000batch: iter_time=9.814e-05, forward_time=0.171, loss_ctc=90.119, loss_interctc_layer6=89.424, loss_interctc_layer12=74.758, loss_interctc_layer15=68.878, loss_interctc_layer21=92.440, loss=83.124, backward_time=0.484, grad_norm=63.059, clip=100.000, loss_scale=4.056e+31, optim_step_time=0.143, optim0_lr0=9.646e-05, train_time=3.644 +[gpua007:0/64] 2024-02-03 14:54:45,497 (trainer:753) INFO: 18epoch:train:3001-3100batch: iter_time=9.671e-05, forward_time=0.233, loss_ctc=103.757, loss_interctc_layer6=105.252, loss_interctc_layer12=87.838, loss_interctc_layer15=81.010, loss_interctc_layer21=106.453, loss=96.862, backward_time=0.490, grad_norm=74.038, clip=100.000, loss_scale=4.056e+31, optim_step_time=0.142, optim0_lr0=9.644e-05, train_time=3.321 +[gpua007:0/64] 2024-02-03 15:00:05,986 (trainer:753) INFO: 18epoch:train:3101-3200batch: iter_time=9.262e-05, forward_time=0.142, loss_ctc=83.901, loss_interctc_layer6=91.004, loss_interctc_layer12=76.637, loss_interctc_layer15=70.955, loss_interctc_layer21=86.358, loss=81.771, backward_time=0.440, grad_norm=73.558, clip=100.000, loss_scale=4.056e+31, optim_step_time=0.138, optim0_lr0=9.643e-05, train_time=3.205 +[gpua007:0/64] 2024-02-03 15:04:56,431 (trainer:753) INFO: 18epoch:train:3201-3300batch: iter_time=9.952e-05, forward_time=0.144, loss_ctc=86.860, loss_interctc_layer6=95.632, loss_interctc_layer12=80.007, loss_interctc_layer15=73.881, loss_interctc_layer21=89.200, loss=85.116, backward_time=0.407, grad_norm=70.227, clip=100.000, loss_scale=4.056e+31, optim_step_time=0.138, optim0_lr0=9.641e-05, train_time=2.904 +[gpua007:0/64] 2024-02-03 15:10:20,727 (trainer:753) INFO: 18epoch:train:3301-3400batch: iter_time=9.965e-05, forward_time=0.143, loss_ctc=99.675, loss_interctc_layer6=93.680, loss_interctc_layer12=77.654, loss_interctc_layer15=71.422, loss_interctc_layer21=103.313, loss=89.149, backward_time=0.496, grad_norm=70.543, clip=100.000, loss_scale=4.056e+31, optim_step_time=0.138, optim0_lr0=9.639e-05, train_time=3.241 +[gpua007:0/64] 2024-02-03 15:15:17,918 (trainer:753) INFO: 18epoch:train:3401-3500batch: iter_time=9.463e-05, forward_time=0.142, loss_ctc=85.740, loss_interctc_layer6=89.837, loss_interctc_layer12=75.736, loss_interctc_layer15=70.059, loss_interctc_layer21=88.356, loss=81.945, backward_time=0.427, grad_norm=68.858, clip=100.000, loss_scale=4.056e+31, optim_step_time=0.138, optim0_lr0=9.637e-05, train_time=2.972 +[gpua007:0/64] 2024-02-03 15:20:22,585 (trainer:753) INFO: 18epoch:train:3501-3600batch: iter_time=1.023e-04, forward_time=0.153, loss_ctc=78.902, loss_interctc_layer6=86.461, loss_interctc_layer12=73.153, loss_interctc_layer15=67.764, loss_interctc_layer21=81.131, loss=77.482, backward_time=0.420, grad_norm=65.176, clip=100.000, loss_scale=4.056e+31, optim_step_time=0.140, optim0_lr0=9.635e-05, train_time=3.048 +[gpua007:0/64] 2024-02-03 15:25:42,129 (trainer:753) INFO: 18epoch:train:3601-3700batch: iter_time=1.004e-04, forward_time=0.223, loss_ctc=78.810, loss_interctc_layer6=92.932, loss_interctc_layer12=78.343, loss_interctc_layer15=72.483, loss_interctc_layer21=81.070, loss=80.728, backward_time=0.490, grad_norm=73.114, clip=100.000, loss_scale=4.056e+31, optim_step_time=0.140, optim0_lr0=9.633e-05, train_time=3.196 +[gpua007:0/64] 2024-02-03 15:28:35,866 (multiple_iter_factory:32) INFO: Building 3th iter-factory... +[gpua007:0/64] 2024-02-03 15:28:54,649 (s2t:401) INFO: Optional Data Names: ('text_prev', 'text_ctc', 'text_spk2', 'text_spk3', 'text_spk4') +[gpua007:0/64] 2024-02-03 15:28:58,173 (abs_task:1660) INFO: [train] dataset: +ESPnetDataset( + speech: {"path": "exp/s2t_stats_raw_bpe50000/splits12/wav.scp/split.10", "type": "kaldi_ark"} + text_prev: {"path": "exp/s2t_stats_raw_bpe50000/splits12/text.prev/split.10", "type": "text"} + text_ctc: {"path": "exp/s2t_stats_raw_bpe50000/splits12/text.ctc/split.10", "type": "text"} + text: {"path": "exp/s2t_stats_raw_bpe50000/splits12/text/split.10", "type": "text"} + preprocess: ) +[gpua007:0/64] 2024-02-03 15:28:58,173 (abs_task:1661) INFO: [train] Batch sampler: UnsortedBatchSampler(N-batch=19027, batch_size=256, key_file=exp/s2t_stats_raw_bpe50000/splits12/speech_shape/split.10, +[gpua007:0/64] 2024-02-03 15:28:58,177 (abs_task:1662) INFO: [train] mini-batch sizes summary: N-batch=19027, mean=256.0, min=256, max=257 +[gpua007:0/64] 2024-02-03 15:41:49,933 (trainer:753) INFO: 18epoch:train:3701-3800batch: iter_time=5.231, forward_time=0.217, loss_ctc=83.220, loss_interctc_layer6=91.680, loss_interctc_layer12=77.430, loss_interctc_layer15=71.865, loss_interctc_layer21=85.497, loss=81.938, backward_time=0.406, grad_norm=86.571, clip=100.000, loss_scale=4.056e+31, optim_step_time=0.140, optim0_lr0=9.631e-05, train_time=9.678 +[gpua007:0/64] 2024-02-03 15:46:43,005 (trainer:753) INFO: 18epoch:train:3801-3900batch: iter_time=8.715e-05, forward_time=0.143, loss_ctc=76.485, loss_interctc_layer6=79.322, loss_interctc_layer12=66.097, loss_interctc_layer15=60.717, loss_interctc_layer21=78.927, loss=72.309, backward_time=0.423, grad_norm=71.704, clip=100.000, loss_scale=4.056e+31, optim_step_time=0.137, optim0_lr0=9.630e-05, train_time=2.930 +[gpua007:0/64] 2024-02-03 15:52:01,108 (trainer:753) INFO: 18epoch:train:3901-4000batch: iter_time=8.340e-05, forward_time=0.229, loss_ctc=96.560, loss_interctc_layer6=90.335, loss_interctc_layer12=75.388, loss_interctc_layer15=69.216, loss_interctc_layer21=99.607, loss=86.221, backward_time=0.485, grad_norm=62.590, clip=100.000, loss_scale=4.056e+31, optim_step_time=0.146, optim0_lr0=9.628e-05, train_time=3.180 +[gpua007:0/64] 2024-02-03 15:57:00,735 (trainer:753) INFO: 18epoch:train:4001-4100batch: iter_time=8.920e-05, forward_time=0.164, loss_ctc=78.678, loss_interctc_layer6=82.577, loss_interctc_layer12=69.057, loss_interctc_layer15=63.545, loss_interctc_layer21=80.923, loss=74.956, backward_time=0.432, grad_norm=70.964, clip=100.000, loss_scale=4.056e+31, optim_step_time=0.138, optim0_lr0=9.626e-05, train_time=2.997 +[gpua007:0/64] 2024-02-03 16:02:51,592 (trainer:753) INFO: 18epoch:train:4101-4200batch: iter_time=3.342e-04, forward_time=0.236, loss_ctc=89.644, loss_interctc_layer6=88.252, loss_interctc_layer12=73.795, loss_interctc_layer15=67.921, loss_interctc_layer21=92.467, loss=82.416, backward_time=0.539, grad_norm=67.954, clip=100.000, loss_scale=4.056e+31, optim_step_time=0.141, optim0_lr0=9.624e-05, train_time=3.506 +[gpua007:0/64] 2024-02-03 16:08:15,814 (trainer:753) INFO: 18epoch:train:4201-4300batch: iter_time=9.362e-05, forward_time=0.142, loss_ctc=97.151, loss_interctc_layer6=95.302, loss_interctc_layer12=79.240, loss_interctc_layer15=73.078, loss_interctc_layer21=100.368, loss=89.028, backward_time=0.491, grad_norm=83.317, clip=100.000, loss_scale=4.056e+31, optim_step_time=0.138, optim0_lr0=9.622e-05, train_time=3.244 +[gpua007:0/64] 2024-02-03 16:13:09,837 (trainer:753) INFO: 18epoch:train:4301-4400batch: iter_time=9.263e-05, forward_time=0.143, loss_ctc=88.131, loss_interctc_layer6=97.552, loss_interctc_layer12=82.100, loss_interctc_layer15=75.872, loss_interctc_layer21=90.503, loss=86.832, backward_time=0.418, grad_norm=77.457, clip=100.000, loss_scale=4.056e+31, optim_step_time=0.138, optim0_lr0=9.620e-05, train_time=2.941 +[gpua007:0/64] 2024-02-03 16:18:26,356 (trainer:753) INFO: 18epoch:train:4401-4500batch: iter_time=3.951e-04, forward_time=0.245, loss_ctc=91.865, loss_interctc_layer6=97.701, loss_interctc_layer12=82.080, loss_interctc_layer15=75.741, loss_interctc_layer21=94.287, loss=88.335, backward_time=0.435, grad_norm=82.888, clip=100.000, loss_scale=4.056e+31, optim_step_time=0.141, optim0_lr0=9.618e-05, train_time=3.163 +[gpua007:0/64] 2024-02-03 16:23:45,925 (trainer:753) INFO: 18epoch:train:4501-4600batch: iter_time=8.743e-05, forward_time=0.144, loss_ctc=91.481, loss_interctc_layer6=90.715, loss_interctc_layer12=75.851, loss_interctc_layer15=69.994, loss_interctc_layer21=94.202, loss=84.449, backward_time=0.472, grad_norm=69.670, clip=100.000, loss_scale=4.056e+31, optim_step_time=0.138, optim0_lr0=9.617e-05, train_time=3.196 +[gpua007:0/64] 2024-02-03 16:29:03,699 (trainer:753) INFO: 18epoch:train:4601-4700batch: iter_time=2.890e-04, forward_time=0.235, loss_ctc=88.814, loss_interctc_layer6=90.568, loss_interctc_layer12=75.551, loss_interctc_layer15=69.501, loss_interctc_layer21=91.542, loss=83.195, backward_time=0.481, grad_norm=66.308, clip=100.000, loss_scale=4.056e+31, optim_step_time=0.142, optim0_lr0=9.615e-05, train_time=3.178 +[gpua007:0/64] 2024-02-03 16:34:40,839 (trainer:753) INFO: 18epoch:train:4701-4800batch: iter_time=9.274e-05, forward_time=0.142, loss_ctc=78.160, loss_interctc_layer6=84.933, loss_interctc_layer12=71.303, loss_interctc_layer15=65.786, loss_interctc_layer21=80.684, loss=76.173, backward_time=0.499, grad_norm=79.147, clip=100.000, loss_scale=4.259e+31, optim_step_time=0.137, optim0_lr0=9.613e-05, train_time=3.370 +[gpua007:0/64] 2024-02-03 16:39:58,995 (trainer:753) INFO: 18epoch:train:4801-4900batch: iter_time=8.805e-05, forward_time=0.142, loss_ctc=81.271, loss_interctc_layer6=92.505, loss_interctc_layer12=78.255, loss_interctc_layer15=72.204, loss_interctc_layer21=83.567, loss=81.560, backward_time=0.452, grad_norm=79.130, clip=100.000, loss_scale=8.113e+31, optim_step_time=0.137, optim0_lr0=9.611e-05, train_time=3.182 +[gpua007:0/64] 2024-02-03 16:45:16,726 (trainer:753) INFO: 18epoch:train:4901-5000batch: iter_time=4.337e-04, forward_time=0.237, loss_ctc=88.837, loss_interctc_layer6=97.451, loss_interctc_layer12=82.481, loss_interctc_layer15=76.506, loss_interctc_layer21=91.336, loss=87.322, backward_time=0.477, grad_norm=72.675, clip=100.000, loss_scale=8.113e+31, optim_step_time=0.142, optim0_lr0=9.609e-05, train_time=3.178 +[gpua007:0/64] 2024-02-03 16:45:36,782 (multiple_iter_factory:32) INFO: Building 4th iter-factory... +[gpua007:0/64] 2024-02-03 16:45:55,745 (s2t:401) INFO: Optional Data Names: ('text_prev', 'text_ctc', 'text_spk2', 'text_spk3', 'text_spk4') +[gpua007:0/64] 2024-02-03 16:45:59,311 (abs_task:1660) INFO: [train] dataset: +ESPnetDataset( + speech: {"path": "exp/s2t_stats_raw_bpe50000/splits12/wav.scp/split.6", "type": "kaldi_ark"} + text_prev: {"path": "exp/s2t_stats_raw_bpe50000/splits12/text.prev/split.6", "type": "text"} + text_ctc: {"path": "exp/s2t_stats_raw_bpe50000/splits12/text.ctc/split.6", "type": "text"} + text: {"path": "exp/s2t_stats_raw_bpe50000/splits12/text/split.6", "type": "text"} + preprocess: ) +[gpua007:0/64] 2024-02-03 16:45:59,311 (abs_task:1661) INFO: [train] Batch sampler: UnsortedBatchSampler(N-batch=19027, batch_size=256, key_file=exp/s2t_stats_raw_bpe50000/splits12/speech_shape/split.6, +[gpua007:0/64] 2024-02-03 16:45:59,314 (abs_task:1662) INFO: [train] mini-batch sizes summary: N-batch=19027, mean=256.0, min=256, max=257 +[gpua007:0/64] 2024-02-03 16:56:22,426 (trainer:753) INFO: 18epoch:train:5001-5100batch: iter_time=3.226, forward_time=0.142, loss_ctc=76.496, loss_interctc_layer6=80.078, loss_interctc_layer12=67.160, loss_interctc_layer15=62.023, loss_interctc_layer21=78.957, loss=72.943, backward_time=0.397, grad_norm=76.525, clip=100.000, loss_scale=8.113e+31, optim_step_time=0.138, optim0_lr0=9.607e-05, train_time=6.657 +[gpua007:0/64] 2024-02-03 17:01:18,485 (trainer:753) INFO: 18epoch:train:5101-5200batch: iter_time=9.699e-05, forward_time=0.141, loss_ctc=78.734, loss_interctc_layer6=79.227, loss_interctc_layer12=65.992, loss_interctc_layer15=60.626, loss_interctc_layer21=81.207, loss=73.157, backward_time=0.419, grad_norm=51.698, clip=100.000, loss_scale=8.113e+31, optim_step_time=0.137, optim0_lr0=9.605e-05, train_time=2.960 +[gpua007:0/64] 2024-02-03 17:06:09,486 (trainer:753) INFO: 18epoch:train:5201-5300batch: iter_time=9.891e-05, forward_time=0.142, loss_ctc=93.976, loss_interctc_layer6=91.896, loss_interctc_layer12=77.020, loss_interctc_layer15=71.017, loss_interctc_layer21=96.802, loss=86.142, backward_time=0.413, grad_norm=65.684, clip=100.000, loss_scale=8.113e+31, optim_step_time=0.138, optim0_lr0=9.604e-05, train_time=2.910 +[gpua007:0/64] 2024-02-03 17:11:42,074 (trainer:753) INFO: 18epoch:train:5301-5400batch: iter_time=1.053e-04, forward_time=0.142, loss_ctc=78.441, loss_interctc_layer6=81.706, loss_interctc_layer12=68.056, loss_interctc_layer15=62.550, loss_interctc_layer21=81.018, loss=74.354, backward_time=0.483, grad_norm=66.237, clip=100.000, loss_scale=8.113e+31, optim_step_time=0.138, optim0_lr0=9.602e-05, train_time=3.326 +[gpua007:0/64] 2024-02-03 17:16:51,979 (trainer:753) INFO: 18epoch:train:5401-5500batch: iter_time=9.811e-05, forward_time=0.142, loss_ctc=88.707, loss_interctc_layer6=88.867, loss_interctc_layer12=74.102, loss_interctc_layer15=68.184, loss_interctc_layer21=91.431, loss=82.258, backward_time=0.456, grad_norm=58.685, clip=100.000, loss_scale=8.113e+31, optim_step_time=0.138, optim0_lr0=9.600e-05, train_time=3.099 +[gpua007:0/64] 2024-02-03 17:22:04,283 (trainer:753) INFO: 18epoch:train:5501-5600batch: iter_time=1.018e-04, forward_time=0.143, loss_ctc=102.141, loss_interctc_layer6=103.333, loss_interctc_layer12=86.533, loss_interctc_layer15=79.685, loss_interctc_layer21=105.432, loss=95.425, backward_time=0.434, grad_norm=75.462, clip=100.000, loss_scale=8.113e+31, optim_step_time=0.138, optim0_lr0=9.598e-05, train_time=3.123 +[gpua007:0/64] 2024-02-03 17:27:17,509 (trainer:753) INFO: 18epoch:train:5601-5700batch: iter_time=1.019e-04, forward_time=0.142, loss_ctc=83.892, loss_interctc_layer6=91.093, loss_interctc_layer12=76.456, loss_interctc_layer15=70.682, loss_interctc_layer21=86.034, loss=81.631, backward_time=0.441, grad_norm=75.586, clip=100.000, loss_scale=8.113e+31, optim_step_time=0.137, optim0_lr0=9.596e-05, train_time=3.132 +[gpua007:0/64] 2024-02-03 17:32:21,777 (trainer:753) INFO: 18epoch:train:5701-5800batch: iter_time=1.085e-04, forward_time=0.144, loss_ctc=86.825, loss_interctc_layer6=95.105, loss_interctc_layer12=79.360, loss_interctc_layer15=73.083, loss_interctc_layer21=89.051, loss=84.685, backward_time=0.416, grad_norm=66.945, clip=100.000, loss_scale=8.113e+31, optim_step_time=0.138, optim0_lr0=9.594e-05, train_time=3.042 +[gpua007:0/64] 2024-02-03 17:37:32,029 (trainer:753) INFO: 18epoch:train:5801-5900batch: iter_time=1.061e-04, forward_time=0.143, loss_ctc=97.968, loss_interctc_layer6=92.607, loss_interctc_layer12=77.025, loss_interctc_layer15=70.492, loss_interctc_layer21=100.783, loss=87.775, backward_time=0.464, grad_norm=70.657, clip=100.000, loss_scale=8.113e+31, optim_step_time=0.137, optim0_lr0=9.593e-05, train_time=3.102 +[gpua007:0/64] 2024-02-03 17:42:35,969 (trainer:753) INFO: 18epoch:train:5901-6000batch: iter_time=1.082e-04, forward_time=0.143, loss_ctc=85.265, loss_interctc_layer6=89.061, loss_interctc_layer12=74.680, loss_interctc_layer15=69.010, loss_interctc_layer21=87.952, loss=81.194, backward_time=0.431, grad_norm=90.421, clip=100.000, loss_scale=8.113e+31, optim_step_time=0.137, optim0_lr0=9.591e-05, train_time=3.037 +[gpua007:0/64] 2024-02-03 17:48:25,759 (trainer:753) INFO: 18epoch:train:6001-6100batch: iter_time=1.055e-04, forward_time=0.272, loss_ctc=78.382, loss_interctc_layer6=85.268, loss_interctc_layer12=72.006, loss_interctc_layer15=66.716, loss_interctc_layer21=80.686, loss=76.612, backward_time=0.528, grad_norm=60.054, clip=100.000, loss_scale=8.113e+31, optim_step_time=0.148, optim0_lr0=9.589e-05, train_time=3.500 +[gpua007:0/64] 2024-02-03 17:53:34,363 (trainer:753) INFO: 18epoch:train:6101-6200batch: iter_time=9.624e-05, forward_time=0.143, loss_ctc=78.521, loss_interctc_layer6=92.721, loss_interctc_layer12=78.077, loss_interctc_layer15=72.221, loss_interctc_layer21=80.798, loss=80.468, backward_time=0.464, grad_norm=72.903, clip=100.000, loss_scale=8.113e+31, optim_step_time=0.138, optim0_lr0=9.587e-05, train_time=3.086 +[gpua007:0/64] 2024-02-03 17:56:25,567 (multiple_iter_factory:32) INFO: Building 5th iter-factory... +[gpua007:0/64] 2024-02-03 17:56:44,536 (s2t:401) INFO: Optional Data Names: ('text_prev', 'text_ctc', 'text_spk2', 'text_spk3', 'text_spk4') +[gpua007:0/64] 2024-02-03 17:56:48,307 (abs_task:1660) INFO: [train] dataset: +ESPnetDataset( + speech: {"path": "exp/s2t_stats_raw_bpe50000/splits12/wav.scp/split.1", "type": "kaldi_ark"} + text_prev: {"path": "exp/s2t_stats_raw_bpe50000/splits12/text.prev/split.1", "type": "text"} + text_ctc: {"path": "exp/s2t_stats_raw_bpe50000/splits12/text.ctc/split.1", "type": "text"} + text: {"path": "exp/s2t_stats_raw_bpe50000/splits12/text/split.1", "type": "text"} + preprocess: ) +[gpua007:0/64] 2024-02-03 17:56:48,308 (abs_task:1661) INFO: [train] Batch sampler: UnsortedBatchSampler(N-batch=19027, batch_size=256, key_file=exp/s2t_stats_raw_bpe50000/splits12/speech_shape/split.1, +[gpua007:0/64] 2024-02-03 17:56:48,311 (abs_task:1662) INFO: [train] mini-batch sizes summary: N-batch=19027, mean=256.0, min=256, max=257 +[gpua007:0/64] 2024-02-03 18:03:54,920 (trainer:753) INFO: 18epoch:train:6201-6300batch: iter_time=3.152, forward_time=0.143, loss_ctc=82.581, loss_interctc_layer6=91.764, loss_interctc_layer12=77.284, loss_interctc_layer15=71.615, loss_interctc_layer21=85.109, loss=81.671, backward_time=0.415, grad_norm=74.587, clip=100.000, loss_scale=8.113e+31, optim_step_time=0.138, optim0_lr0=9.585e-05, train_time=6.205 +[gpua007:0/64] 2024-02-03 18:05:37,361 (trainer:684) WARNING: The grad norm is nan. Skipping updating the model. +[gpua007:0/64] 2024-02-03 18:08:39,360 (trainer:753) INFO: 18epoch:train:6301-6400batch: iter_time=9.357e-05, forward_time=0.142, loss_ctc=70.970, loss_interctc_layer6=78.861, loss_interctc_layer12=65.610, loss_interctc_layer15=60.334, loss_interctc_layer21=73.650, loss=69.885, backward_time=0.390, grad_norm=66.679, clip=100.000, loss_scale=5.532e+31, optim_step_time=0.138, optim0_lr0=9.583e-05, train_time=2.844 +[gpua007:0/64] 2024-02-03 18:13:45,841 (trainer:753) INFO: 18epoch:train:6401-6500batch: iter_time=9.269e-05, forward_time=0.142, loss_ctc=93.089, loss_interctc_layer6=90.152, loss_interctc_layer12=75.267, loss_interctc_layer15=69.143, loss_interctc_layer21=96.158, loss=84.762, backward_time=0.436, grad_norm=84.794, clip=100.000, loss_scale=4.056e+31, optim_step_time=0.138, optim0_lr0=9.582e-05, train_time=3.065 +[gpua007:0/64] 2024-02-03 18:18:58,518 (trainer:753) INFO: 18epoch:train:6501-6600batch: iter_time=9.814e-05, forward_time=0.141, loss_ctc=75.865, loss_interctc_layer6=82.152, loss_interctc_layer12=68.471, loss_interctc_layer15=62.956, loss_interctc_layer21=78.050, loss=73.499, backward_time=0.432, grad_norm=59.939, clip=100.000, loss_scale=4.056e+31, optim_step_time=0.138, optim0_lr0=9.580e-05, train_time=3.127 +[gpua007:0/64] 2024-02-03 18:24:03,190 (trainer:753) INFO: 18epoch:train:6601-6700batch: iter_time=9.124e-05, forward_time=0.141, loss_ctc=84.977, loss_interctc_layer6=87.963, loss_interctc_layer12=73.384, loss_interctc_layer15=67.552, loss_interctc_layer21=87.587, loss=80.293, backward_time=0.452, grad_norm=62.441, clip=100.000, loss_scale=4.056e+31, optim_step_time=0.138, optim0_lr0=9.578e-05, train_time=3.046 +[gpua007:0/64] 2024-02-03 18:29:04,225 (trainer:753) INFO: 18epoch:train:6701-6800batch: iter_time=9.600e-05, forward_time=0.144, loss_ctc=93.973, loss_interctc_layer6=94.673, loss_interctc_layer12=78.992, loss_interctc_layer15=72.297, loss_interctc_layer21=97.548, loss=87.497, backward_time=0.449, grad_norm=69.623, clip=100.000, loss_scale=4.056e+31, optim_step_time=0.138, optim0_lr0=9.576e-05, train_time=3.010 +[gpua007:0/64] 2024-02-03 18:33:50,346 (trainer:753) INFO: 18epoch:train:6801-6900batch: iter_time=9.775e-05, forward_time=0.143, loss_ctc=84.337, loss_interctc_layer6=96.705, loss_interctc_layer12=81.076, loss_interctc_layer15=74.872, loss_interctc_layer21=86.682, loss=84.735, backward_time=0.397, grad_norm=68.965, clip=100.000, loss_scale=4.056e+31, optim_step_time=0.138, optim0_lr0=9.574e-05, train_time=2.861 +[gpua007:0/64] 2024-02-03 18:39:29,542 (trainer:753) INFO: 18epoch:train:6901-7000batch: iter_time=9.946e-05, forward_time=0.143, loss_ctc=86.109, loss_interctc_layer6=98.055, loss_interctc_layer12=82.131, loss_interctc_layer15=75.929, loss_interctc_layer21=88.331, loss=86.111, backward_time=0.469, grad_norm=98.665, clip=100.000, loss_scale=4.056e+31, optim_step_time=0.138, optim0_lr0=9.572e-05, train_time=3.392 +[gpua007:0/64] 2024-02-03 18:44:52,576 (trainer:753) INFO: 18epoch:train:7001-7100batch: iter_time=9.642e-05, forward_time=0.142, loss_ctc=87.179, loss_interctc_layer6=89.548, loss_interctc_layer12=74.596, loss_interctc_layer15=68.701, loss_interctc_layer21=89.801, loss=81.965, backward_time=0.454, grad_norm=87.715, clip=100.000, loss_scale=4.056e+31, optim_step_time=0.138, optim0_lr0=9.571e-05, train_time=3.230 +[gpua007:0/64] 2024-02-03 18:50:02,585 (trainer:753) INFO: 18epoch:train:7101-7200batch: iter_time=9.394e-05, forward_time=0.223, loss_ctc=86.084, loss_interctc_layer6=90.540, loss_interctc_layer12=75.379, loss_interctc_layer15=69.151, loss_interctc_layer21=88.799, loss=81.991, backward_time=0.457, grad_norm=59.865, clip=100.000, loss_scale=4.056e+31, optim_step_time=0.142, optim0_lr0=9.569e-05, train_time=3.100 +[gpua007:0/64] 2024-02-03 18:54:59,966 (trainer:684) WARNING: The grad norm is nan. Skipping updating the model. +[gpua007:0/64] 2024-02-03 18:55:25,081 (trainer:753) INFO: 18epoch:train:7201-7300batch: iter_time=9.993e-05, forward_time=0.181, loss_ctc=74.869, loss_interctc_layer6=83.996, loss_interctc_layer12=70.283, loss_interctc_layer15=64.837, loss_interctc_layer21=77.127, loss=74.222, backward_time=0.470, grad_norm=67.412, clip=100.000, loss_scale=3.872e+31, optim_step_time=0.138, optim0_lr0=9.567e-05, train_time=3.223 +[gpua007:0/64] 2024-02-03 19:00:28,530 (trainer:753) INFO: 18epoch:train:7301-7400batch: iter_time=9.719e-05, forward_time=0.142, loss_ctc=79.052, loss_interctc_layer6=92.388, loss_interctc_layer12=77.698, loss_interctc_layer15=71.735, loss_interctc_layer21=81.354, loss=80.446, backward_time=0.448, grad_norm=65.011, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.138, optim0_lr0=9.565e-05, train_time=3.036 +[gpua007:0/64] 2024-02-03 19:05:59,286 (trainer:753) INFO: 18epoch:train:7401-7500batch: iter_time=1.001e-04, forward_time=0.144, loss_ctc=83.818, loss_interctc_layer6=96.991, loss_interctc_layer12=81.679, loss_interctc_layer15=75.886, loss_interctc_layer21=86.285, loss=84.932, backward_time=0.525, grad_norm=81.242, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.138, optim0_lr0=9.563e-05, train_time=3.307 +[gpua007:0/64] 2024-02-03 19:06:19,330 (multiple_iter_factory:32) INFO: Building 6th iter-factory... +[gpua007:0/64] 2024-02-03 19:06:38,583 (s2t:401) INFO: Optional Data Names: ('text_prev', 'text_ctc', 'text_spk2', 'text_spk3', 'text_spk4') +[gpua007:0/64] 2024-02-03 19:06:42,292 (abs_task:1660) INFO: [train] dataset: +ESPnetDataset( + speech: {"path": "exp/s2t_stats_raw_bpe50000/splits12/wav.scp/split.9", "type": "kaldi_ark"} + text_prev: {"path": "exp/s2t_stats_raw_bpe50000/splits12/text.prev/split.9", "type": "text"} + text_ctc: {"path": "exp/s2t_stats_raw_bpe50000/splits12/text.ctc/split.9", "type": "text"} + text: {"path": "exp/s2t_stats_raw_bpe50000/splits12/text/split.9", "type": "text"} + preprocess: ) +[gpua007:0/64] 2024-02-03 19:06:42,292 (abs_task:1661) INFO: [train] Batch sampler: UnsortedBatchSampler(N-batch=19027, batch_size=256, key_file=exp/s2t_stats_raw_bpe50000/splits12/speech_shape/split.9, +[gpua007:0/64] 2024-02-03 19:06:42,296 (abs_task:1662) INFO: [train] mini-batch sizes summary: N-batch=19027, mean=256.0, min=256, max=257 +[gpua007:0/64] 2024-02-03 19:19:46,274 (trainer:753) INFO: 18epoch:train:7501-7600batch: iter_time=3.240, forward_time=0.143, loss_ctc=70.796, loss_interctc_layer6=79.473, loss_interctc_layer12=66.539, loss_interctc_layer15=61.266, loss_interctc_layer21=73.170, loss=70.249, backward_time=0.438, grad_norm=59.462, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.139, optim0_lr0=9.561e-05, train_time=8.270 +[gpua007:0/64] 2024-02-03 19:25:10,211 (trainer:753) INFO: 18epoch:train:7601-7700batch: iter_time=9.120e-05, forward_time=0.142, loss_ctc=73.391, loss_interctc_layer6=79.243, loss_interctc_layer12=65.953, loss_interctc_layer15=60.586, loss_interctc_layer21=75.875, loss=71.010, backward_time=0.479, grad_norm=58.105, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.138, optim0_lr0=9.560e-05, train_time=3.239 +[gpua007:0/64] 2024-02-03 19:30:41,111 (trainer:753) INFO: 18epoch:train:7701-7800batch: iter_time=9.603e-05, forward_time=0.142, loss_ctc=91.994, loss_interctc_layer6=91.115, loss_interctc_layer12=76.118, loss_interctc_layer15=70.094, loss_interctc_layer21=94.800, loss=84.824, backward_time=0.493, grad_norm=71.351, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.138, optim0_lr0=9.558e-05, train_time=3.309 +[gpua007:0/64] 2024-02-03 19:35:55,158 (trainer:753) INFO: 18epoch:train:7801-7900batch: iter_time=1.017e-04, forward_time=0.142, loss_ctc=73.985, loss_interctc_layer6=81.406, loss_interctc_layer12=67.723, loss_interctc_layer15=61.962, loss_interctc_layer21=76.329, loss=72.281, backward_time=0.419, grad_norm=59.076, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.138, optim0_lr0=9.556e-05, train_time=3.140 +[gpua007:0/64] 2024-02-03 19:41:43,223 (trainer:753) INFO: 18epoch:train:7901-8000batch: iter_time=1.092e-04, forward_time=0.142, loss_ctc=85.176, loss_interctc_layer6=88.608, loss_interctc_layer12=73.820, loss_interctc_layer15=67.764, loss_interctc_layer21=87.735, loss=80.621, backward_time=0.523, grad_norm=71.991, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.138, optim0_lr0=9.554e-05, train_time=3.480 +[gpua007:0/64] 2024-02-03 19:47:29,166 (trainer:753) INFO: 18epoch:train:8001-8100batch: iter_time=1.108e-04, forward_time=0.143, loss_ctc=98.090, loss_interctc_layer6=102.836, loss_interctc_layer12=85.796, loss_interctc_layer15=79.054, loss_interctc_layer21=100.970, loss=93.349, backward_time=0.494, grad_norm=103.440, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.138, optim0_lr0=9.552e-05, train_time=3.459 +[gpua007:0/64] 2024-02-03 19:53:05,384 (trainer:753) INFO: 18epoch:train:8101-8200batch: iter_time=1.103e-04, forward_time=0.142, loss_ctc=77.551, loss_interctc_layer6=91.194, loss_interctc_layer12=76.576, loss_interctc_layer15=70.682, loss_interctc_layer21=79.834, loss=79.167, backward_time=0.488, grad_norm=75.411, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.138, optim0_lr0=9.551e-05, train_time=3.362 +[gpua007:0/64] 2024-02-03 19:58:35,500 (trainer:753) INFO: 18epoch:train:8201-8300batch: iter_time=1.089e-04, forward_time=0.202, loss_ctc=84.997, loss_interctc_layer6=94.356, loss_interctc_layer12=78.485, loss_interctc_layer15=72.188, loss_interctc_layer21=87.178, loss=83.441, backward_time=0.478, grad_norm=76.135, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.143, optim0_lr0=9.549e-05, train_time=3.301 +[gpua007:0/64] 2024-02-03 20:04:14,492 (trainer:753) INFO: 18epoch:train:8301-8400batch: iter_time=5.740e-04, forward_time=0.189, loss_ctc=93.002, loss_interctc_layer6=92.343, loss_interctc_layer12=76.336, loss_interctc_layer15=70.203, loss_interctc_layer21=95.970, loss=85.571, backward_time=0.530, grad_norm=81.757, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.139, optim0_lr0=9.547e-05, train_time=3.390 +[gpua007:0/64] 2024-02-03 20:09:19,156 (trainer:753) INFO: 18epoch:train:8401-8500batch: iter_time=1.085e-04, forward_time=0.143, loss_ctc=81.425, loss_interctc_layer6=89.304, loss_interctc_layer12=74.945, loss_interctc_layer15=69.198, loss_interctc_layer21=83.975, loss=79.769, backward_time=0.424, grad_norm=65.758, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.138, optim0_lr0=9.545e-05, train_time=3.046 +[gpua007:0/64] 2024-02-03 20:15:23,584 (trainer:753) INFO: 18epoch:train:8501-8600batch: iter_time=1.052e-04, forward_time=0.142, loss_ctc=77.507, loss_interctc_layer6=84.430, loss_interctc_layer12=71.085, loss_interctc_layer15=65.712, loss_interctc_layer21=79.592, loss=75.665, backward_time=0.555, grad_norm=55.979, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.138, optim0_lr0=9.543e-05, train_time=3.645 +[gpua007:0/64] 2024-02-03 20:20:55,524 (trainer:753) INFO: 18epoch:train:8601-8700batch: iter_time=1.098e-04, forward_time=0.143, loss_ctc=74.853, loss_interctc_layer6=92.097, loss_interctc_layer12=77.634, loss_interctc_layer15=71.548, loss_interctc_layer21=76.800, loss=78.586, backward_time=0.462, grad_norm=65.454, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.138, optim0_lr0=9.542e-05, train_time=3.319 +[gpua007:0/64] 2024-02-03 20:23:44,690 (multiple_iter_factory:32) INFO: Building 7th iter-factory... +[gpua007:0/64] 2024-02-03 20:24:03,869 (s2t:401) INFO: Optional Data Names: ('text_prev', 'text_ctc', 'text_spk2', 'text_spk3', 'text_spk4') +[gpua007:0/64] 2024-02-03 20:24:07,415 (abs_task:1660) INFO: [train] dataset: +ESPnetDataset( + speech: {"path": "exp/s2t_stats_raw_bpe50000/splits12/wav.scp/split.7", "type": "kaldi_ark"} + text_prev: {"path": "exp/s2t_stats_raw_bpe50000/splits12/text.prev/split.7", "type": "text"} + text_ctc: {"path": "exp/s2t_stats_raw_bpe50000/splits12/text.ctc/split.7", "type": "text"} + text: {"path": "exp/s2t_stats_raw_bpe50000/splits12/text/split.7", "type": "text"} + preprocess: ) +[gpua007:0/64] 2024-02-03 20:24:07,416 (abs_task:1661) INFO: [train] Batch sampler: UnsortedBatchSampler(N-batch=19027, batch_size=256, key_file=exp/s2t_stats_raw_bpe50000/splits12/speech_shape/split.7, +[gpua007:0/64] 2024-02-03 20:24:07,419 (abs_task:1662) INFO: [train] mini-batch sizes summary: N-batch=19027, mean=256.0, min=256, max=257 +[gpua007:0/64] 2024-02-03 20:34:25,817 (trainer:753) INFO: 18epoch:train:8701-8800batch: iter_time=3.271, forward_time=0.143, loss_ctc=78.828, loss_interctc_layer6=90.993, loss_interctc_layer12=76.529, loss_interctc_layer15=70.842, loss_interctc_layer21=81.016, loss=79.642, backward_time=0.408, grad_norm=112.288, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.138, optim0_lr0=9.540e-05, train_time=8.103 +[gpua007:0/64] 2024-02-03 20:39:16,474 (trainer:753) INFO: 18epoch:train:8801-8900batch: iter_time=8.911e-05, forward_time=0.142, loss_ctc=70.526, loss_interctc_layer6=78.993, loss_interctc_layer12=65.711, loss_interctc_layer15=60.404, loss_interctc_layer21=72.792, loss=69.685, backward_time=0.405, grad_norm=55.375, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.139, optim0_lr0=9.538e-05, train_time=2.906 +[gpua007:0/64] 2024-02-03 20:44:16,714 (trainer:753) INFO: 18epoch:train:8901-9000batch: iter_time=9.012e-05, forward_time=0.142, loss_ctc=92.064, loss_interctc_layer6=90.505, loss_interctc_layer12=75.371, loss_interctc_layer15=69.252, loss_interctc_layer21=94.916, loss=84.422, backward_time=0.423, grad_norm=63.093, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.138, optim0_lr0=9.536e-05, train_time=3.002 +[gpua007:0/64] 2024-02-03 20:49:24,207 (trainer:753) INFO: 18epoch:train:9001-9100batch: iter_time=9.558e-05, forward_time=0.141, loss_ctc=75.072, loss_interctc_layer6=81.300, loss_interctc_layer12=67.550, loss_interctc_layer15=62.130, loss_interctc_layer21=77.595, loss=72.729, backward_time=0.443, grad_norm=57.895, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.138, optim0_lr0=9.534e-05, train_time=3.075 +[gpua007:0/64] 2024-02-03 20:54:17,667 (trainer:753) INFO: 18epoch:train:9101-9200batch: iter_time=9.939e-05, forward_time=0.142, loss_ctc=84.922, loss_interctc_layer6=87.588, loss_interctc_layer12=73.204, loss_interctc_layer15=67.321, loss_interctc_layer21=87.594, loss=80.126, backward_time=0.424, grad_norm=55.193, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.138, optim0_lr0=9.532e-05, train_time=2.934 +[gpua007:0/64] 2024-02-03 21:00:01,586 (trainer:753) INFO: 18epoch:train:9201-9300batch: iter_time=1.018e-04, forward_time=0.142, loss_ctc=93.194, loss_interctc_layer6=94.421, loss_interctc_layer12=78.408, loss_interctc_layer15=72.078, loss_interctc_layer21=95.989, loss=86.818, backward_time=0.526, grad_norm=62.941, clip=100.000, loss_scale=2.211e+31, optim_step_time=0.138, optim0_lr0=9.531e-05, train_time=3.439 +[gpua007:0/64] 2024-02-03 21:06:06,317 (trainer:753) INFO: 18epoch:train:9301-9400batch: iter_time=3.374e-04, forward_time=0.215, loss_ctc=83.273, loss_interctc_layer6=95.751, loss_interctc_layer12=80.160, loss_interctc_layer15=74.004, loss_interctc_layer21=85.659, loss=83.770, backward_time=0.589, grad_norm=70.041, clip=100.000, loss_scale=4.056e+31, optim_step_time=0.148, optim0_lr0=9.529e-05, train_time=3.646 +[gpua007:0/64] 2024-02-03 21:11:15,267 (trainer:753) INFO: 18epoch:train:9401-9500batch: iter_time=9.648e-05, forward_time=0.143, loss_ctc=84.958, loss_interctc_layer6=97.329, loss_interctc_layer12=81.486, loss_interctc_layer15=74.999, loss_interctc_layer21=87.399, loss=85.234, backward_time=0.448, grad_norm=118.998, clip=100.000, loss_scale=4.056e+31, optim_step_time=0.138, optim0_lr0=9.527e-05, train_time=3.090 +[gpua007:0/64] 2024-02-03 21:16:26,858 (trainer:753) INFO: 18epoch:train:9501-9600batch: iter_time=9.799e-05, forward_time=0.144, loss_ctc=86.643, loss_interctc_layer6=89.808, loss_interctc_layer12=74.775, loss_interctc_layer15=68.849, loss_interctc_layer21=89.257, loss=81.866, backward_time=0.430, grad_norm=86.694, clip=100.000, loss_scale=4.056e+31, optim_step_time=0.138, optim0_lr0=9.525e-05, train_time=3.114 +[gpua007:0/64] 2024-02-03 21:21:19,224 (trainer:753) INFO: 18epoch:train:9601-9700batch: iter_time=9.112e-05, forward_time=0.182, loss_ctc=85.643, loss_interctc_layer6=89.298, loss_interctc_layer12=74.149, loss_interctc_layer15=67.975, loss_interctc_layer21=88.348, loss=81.083, backward_time=0.395, grad_norm=92.341, clip=100.000, loss_scale=4.056e+31, optim_step_time=0.138, optim0_lr0=9.523e-05, train_time=2.925 +[gpua007:0/64] 2024-02-03 21:26:07,536 (trainer:753) INFO: 18epoch:train:9701-9800batch: iter_time=9.981e-05, forward_time=0.141, loss_ctc=74.025, loss_interctc_layer6=83.687, loss_interctc_layer12=70.057, loss_interctc_layer15=64.743, loss_interctc_layer21=76.267, loss=73.756, backward_time=0.418, grad_norm=61.058, clip=100.000, loss_scale=4.056e+31, optim_step_time=0.137, optim0_lr0=9.522e-05, train_time=2.883 +[gpua007:0/64] 2024-02-03 21:31:39,884 (trainer:753) INFO: 18epoch:train:9801-9900batch: iter_time=9.639e-05, forward_time=0.142, loss_ctc=78.550, loss_interctc_layer6=91.650, loss_interctc_layer12=77.198, loss_interctc_layer15=71.178, loss_interctc_layer21=80.559, loss=79.827, backward_time=0.480, grad_norm=65.184, clip=100.000, loss_scale=4.056e+31, optim_step_time=0.137, optim0_lr0=9.520e-05, train_time=3.323 +[gpua007:0/64] 2024-02-03 21:37:45,130 (trainer:753) INFO: 18epoch:train:9901-10000batch: iter_time=9.382e-05, forward_time=0.143, loss_ctc=83.944, loss_interctc_layer6=97.195, loss_interctc_layer12=81.838, loss_interctc_layer15=75.749, loss_interctc_layer21=86.191, loss=84.984, backward_time=0.513, grad_norm=77.158, clip=100.000, loss_scale=4.056e+31, optim_step_time=0.137, optim0_lr0=9.518e-05, train_time=3.652 +[gpua007:0/64] 2024-02-03 21:38:05,204 (multiple_iter_factory:32) INFO: Building 8th iter-factory... +[gpua007:0/64] 2024-02-03 21:38:24,287 (s2t:401) INFO: Optional Data Names: ('text_prev', 'text_ctc', 'text_spk2', 'text_spk3', 'text_spk4') +[gpua007:0/64] 2024-02-03 21:38:27,790 (abs_task:1660) INFO: [train] dataset: +ESPnetDataset( + speech: {"path": "exp/s2t_stats_raw_bpe50000/splits12/wav.scp/split.5", "type": "kaldi_ark"} + text_prev: {"path": "exp/s2t_stats_raw_bpe50000/splits12/text.prev/split.5", "type": "text"} + text_ctc: {"path": "exp/s2t_stats_raw_bpe50000/splits12/text.ctc/split.5", "type": "text"} + text: {"path": "exp/s2t_stats_raw_bpe50000/splits12/text/split.5", "type": "text"} + preprocess: ) +[gpua007:0/64] 2024-02-03 21:38:27,790 (abs_task:1661) INFO: [train] Batch sampler: UnsortedBatchSampler(N-batch=19027, batch_size=256, key_file=exp/s2t_stats_raw_bpe50000/splits12/speech_shape/split.5, +[gpua007:0/64] 2024-02-03 21:38:27,798 (abs_task:1662) INFO: [train] mini-batch sizes summary: N-batch=19027, mean=256.0, min=256, max=257 +[gpua007:0/64] 2024-02-03 21:56:16,969 (trainer:753) INFO: 18epoch:train:10001-10100batch: iter_time=3.233, forward_time=0.142, loss_ctc=71.001, loss_interctc_layer6=80.162, loss_interctc_layer12=67.020, loss_interctc_layer15=61.736, loss_interctc_layer21=73.429, loss=70.670, backward_time=0.969, grad_norm=118.469, clip=100.000, loss_scale=4.056e+31, optim_step_time=0.137, optim0_lr0=9.516e-05, train_time=11.118 +[gpua007:0/64] 2024-02-03 22:07:06,250 (trainer:753) INFO: 18epoch:train:10101-10200batch: iter_time=9.871e-05, forward_time=0.144, loss_ctc=72.839, loss_interctc_layer6=78.985, loss_interctc_layer12=65.766, loss_interctc_layer15=60.424, loss_interctc_layer21=75.267, loss=70.656, backward_time=0.814, grad_norm=63.225, clip=100.000, loss_scale=4.056e+31, optim_step_time=0.138, optim0_lr0=9.514e-05, train_time=6.493 +[gpua007:0/64] 2024-02-03 22:17:04,439 (trainer:753) INFO: 18epoch:train:10201-10300batch: iter_time=8.884e-05, forward_time=0.143, loss_ctc=91.343, loss_interctc_layer6=91.300, loss_interctc_layer12=76.152, loss_interctc_layer15=70.101, loss_interctc_layer21=94.075, loss=84.594, backward_time=0.911, grad_norm=80.352, clip=100.000, loss_scale=4.056e+31, optim_step_time=0.137, optim0_lr0=9.513e-05, train_time=5.982 +[gpua007:0/64] 2024-02-03 22:25:28,276 (trainer:753) INFO: 18epoch:train:10301-10400batch: iter_time=1.002e-04, forward_time=0.142, loss_ctc=73.189, loss_interctc_layer6=80.540, loss_interctc_layer12=66.978, loss_interctc_layer15=61.436, loss_interctc_layer21=75.574, loss=71.543, backward_time=0.722, grad_norm=55.625, clip=100.000, loss_scale=4.056e+31, optim_step_time=0.138, optim0_lr0=9.511e-05, train_time=5.038 +[gpua007:0/64] 2024-02-03 22:31:04,353 (trainer:753) INFO: 18epoch:train:10401-10500batch: iter_time=1.022e-04, forward_time=0.142, loss_ctc=85.141, loss_interctc_layer6=88.765, loss_interctc_layer12=73.916, loss_interctc_layer15=67.951, loss_interctc_layer21=87.660, loss=80.686, backward_time=0.502, grad_norm=97.198, clip=100.000, loss_scale=4.056e+31, optim_step_time=0.137, optim0_lr0=9.509e-05, train_time=3.361 +[gpua007:0/64] 2024-02-03 22:38:14,763 (trainer:753) INFO: 18epoch:train:10501-10600batch: iter_time=9.770e-05, forward_time=0.143, loss_ctc=96.912, loss_interctc_layer6=102.288, loss_interctc_layer12=85.255, loss_interctc_layer15=78.516, loss_interctc_layer21=100.021, loss=92.598, backward_time=0.649, grad_norm=66.769, clip=100.000, loss_scale=4.056e+31, optim_step_time=0.137, optim0_lr0=9.507e-05, train_time=4.304 +[gpua007:0/64] 2024-02-03 22:47:34,253 (trainer:753) INFO: 18epoch:train:10601-10700batch: iter_time=1.724e-04, forward_time=0.143, loss_ctc=77.027, loss_interctc_layer6=90.365, loss_interctc_layer12=76.314, loss_interctc_layer15=70.145, loss_interctc_layer21=79.209, loss=78.612, backward_time=1.064, grad_norm=81.414, clip=100.000, loss_scale=4.056e+31, optim_step_time=0.137, optim0_lr0=9.506e-05, train_time=5.595 +[gpua007:0/64] 2024-02-03 22:57:31,282 (trainer:753) INFO: 18epoch:train:10701-10800batch: iter_time=9.409e-05, forward_time=0.163, loss_ctc=84.977, loss_interctc_layer6=94.623, loss_interctc_layer12=78.889, loss_interctc_layer15=72.689, loss_interctc_layer21=87.258, loss=83.687, backward_time=1.254, grad_norm=128.862, clip=100.000, loss_scale=4.056e+31, optim_step_time=0.148, optim0_lr0=9.504e-05, train_time=5.969 +[gpua007:0/64] 2024-02-03 23:05:19,094 (trainer:753) INFO: 18epoch:train:10801-10900batch: iter_time=9.736e-05, forward_time=0.143, loss_ctc=91.135, loss_interctc_layer6=91.008, loss_interctc_layer12=75.179, loss_interctc_layer15=68.782, loss_interctc_layer21=94.190, loss=84.059, backward_time=0.878, grad_norm=71.687, clip=100.000, loss_scale=4.056e+31, optim_step_time=0.138, optim0_lr0=9.502e-05, train_time=4.679 +[gpua007:0/64] 2024-02-03 23:13:44,314 (trainer:753) INFO: 18epoch:train:10901-11000batch: iter_time=1.020e-04, forward_time=0.143, loss_ctc=80.729, loss_interctc_layer6=88.610, loss_interctc_layer12=74.234, loss_interctc_layer15=68.464, loss_interctc_layer21=83.123, loss=79.032, backward_time=0.906, grad_norm=66.100, clip=100.000, loss_scale=4.056e+31, optim_step_time=0.138, optim0_lr0=9.500e-05, train_time=5.052 +[gpua007:0/64] 2024-02-03 23:20:28,180 (trainer:753) INFO: 18epoch:train:11001-11100batch: iter_time=9.785e-05, forward_time=0.142, loss_ctc=76.339, loss_interctc_layer6=84.501, loss_interctc_layer12=71.088, loss_interctc_layer15=65.626, loss_interctc_layer21=78.675, loss=75.246, backward_time=0.619, grad_norm=59.340, clip=100.000, loss_scale=4.056e+31, optim_step_time=0.137, optim0_lr0=9.498e-05, train_time=4.038 +[gpua007:0/64] 2024-02-03 23:25:55,719 (trainer:753) INFO: 18epoch:train:11101-11200batch: iter_time=8.961e-05, forward_time=0.202, loss_ctc=74.892, loss_interctc_layer6=92.448, loss_interctc_layer12=77.775, loss_interctc_layer15=71.929, loss_interctc_layer21=77.112, loss=78.831, backward_time=0.485, grad_norm=69.999, clip=100.000, loss_scale=4.056e+31, optim_step_time=0.141, optim0_lr0=9.497e-05, train_time=3.275 +[gpua007:0/64] 2024-02-03 23:28:44,234 (multiple_iter_factory:32) INFO: Building 9th iter-factory... +[gpua007:0/64] 2024-02-03 23:29:02,807 (s2t:401) INFO: Optional Data Names: ('text_prev', 'text_ctc', 'text_spk2', 'text_spk3', 'text_spk4') +[gpua007:0/64] 2024-02-03 23:29:06,313 (abs_task:1660) INFO: [train] dataset: +ESPnetDataset( + speech: {"path": "exp/s2t_stats_raw_bpe50000/splits12/wav.scp/split.0", "type": "kaldi_ark"} + text_prev: {"path": "exp/s2t_stats_raw_bpe50000/splits12/text.prev/split.0", "type": "text"} + text_ctc: {"path": "exp/s2t_stats_raw_bpe50000/splits12/text.ctc/split.0", "type": "text"} + text: {"path": "exp/s2t_stats_raw_bpe50000/splits12/text/split.0", "type": "text"} + preprocess: ) +[gpua007:0/64] 2024-02-03 23:29:06,313 (abs_task:1661) INFO: [train] Batch sampler: UnsortedBatchSampler(N-batch=19027, batch_size=256, key_file=exp/s2t_stats_raw_bpe50000/splits12/speech_shape/split.0, +[gpua007:0/64] 2024-02-03 23:29:06,316 (abs_task:1662) INFO: [train] mini-batch sizes summary: N-batch=19027, mean=256.0, min=256, max=257 +[gpua007:0/64] 2024-02-03 23:39:57,807 (trainer:753) INFO: 18epoch:train:11201-11300batch: iter_time=2.105, forward_time=0.143, loss_ctc=80.762, loss_interctc_layer6=90.707, loss_interctc_layer12=76.294, loss_interctc_layer15=70.563, loss_interctc_layer21=83.235, loss=80.312, backward_time=0.466, grad_norm=68.943, clip=100.000, loss_scale=4.422e+31, optim_step_time=0.137, optim0_lr0=9.495e-05, train_time=8.421 +[gpua007:0/64] 2024-02-03 23:44:53,311 (trainer:753) INFO: 18epoch:train:11301-11400batch: iter_time=8.383e-05, forward_time=0.144, loss_ctc=76.022, loss_interctc_layer6=78.403, loss_interctc_layer12=65.078, loss_interctc_layer15=59.708, loss_interctc_layer21=78.685, loss=71.579, backward_time=0.438, grad_norm=88.351, clip=100.000, loss_scale=8.113e+31, optim_step_time=0.137, optim0_lr0=9.493e-05, train_time=2.955 +[gpua007:0/64] 2024-02-03 23:50:29,200 (trainer:753) INFO: 18epoch:train:11401-11500batch: iter_time=9.379e-05, forward_time=0.142, loss_ctc=95.329, loss_interctc_layer6=89.595, loss_interctc_layer12=74.389, loss_interctc_layer15=68.537, loss_interctc_layer21=98.296, loss=85.229, backward_time=0.484, grad_norm=63.697, clip=100.000, loss_scale=8.113e+31, optim_step_time=0.137, optim0_lr0=9.491e-05, train_time=3.359 +[gpua007:0/64] 2024-02-03 23:55:31,886 (trainer:753) INFO: 18epoch:train:11501-11600batch: iter_time=9.542e-05, forward_time=0.141, loss_ctc=78.826, loss_interctc_layer6=81.832, loss_interctc_layer12=68.820, loss_interctc_layer15=63.154, loss_interctc_layer21=80.965, loss=74.719, backward_time=0.432, grad_norm=68.509, clip=100.000, loss_scale=8.113e+31, optim_step_time=0.137, optim0_lr0=9.489e-05, train_time=3.027 +[gpua007:0/64] 2024-02-04 00:00:33,290 (trainer:753) INFO: 18epoch:train:11601-11700batch: iter_time=9.624e-05, forward_time=0.142, loss_ctc=87.437, loss_interctc_layer6=86.790, loss_interctc_layer12=72.473, loss_interctc_layer15=66.557, loss_interctc_layer21=90.278, loss=80.707, backward_time=0.436, grad_norm=69.254, clip=100.000, loss_scale=8.113e+31, optim_step_time=0.137, optim0_lr0=9.488e-05, train_time=3.014 +[gpua007:0/64] 2024-02-04 00:06:04,819 (trainer:753) INFO: 18epoch:train:11701-11800batch: iter_time=1.056e-04, forward_time=0.142, loss_ctc=98.368, loss_interctc_layer6=93.931, loss_interctc_layer12=78.260, loss_interctc_layer15=71.801, loss_interctc_layer21=101.571, loss=88.786, backward_time=0.510, grad_norm=81.862, clip=100.000, loss_scale=8.113e+31, optim_step_time=0.137, optim0_lr0=9.486e-05, train_time=3.315 +[gpua007:0/64] 2024-02-04 00:11:59,505 (trainer:753) INFO: 18epoch:train:11801-11900batch: iter_time=1.016e-04, forward_time=0.143, loss_ctc=86.564, loss_interctc_layer6=95.639, loss_interctc_layer12=80.253, loss_interctc_layer15=74.057, loss_interctc_layer21=89.162, loss=85.135, backward_time=0.524, grad_norm=73.964, clip=100.000, loss_scale=8.113e+31, optim_step_time=0.137, optim0_lr0=9.484e-05, train_time=3.547 +[gpua007:0/64] 2024-02-04 00:17:13,431 (trainer:753) INFO: 18epoch:train:11901-12000batch: iter_time=9.031e-05, forward_time=0.143, loss_ctc=90.964, loss_interctc_layer6=96.988, loss_interctc_layer12=81.244, loss_interctc_layer15=74.858, loss_interctc_layer21=93.532, loss=87.517, backward_time=0.422, grad_norm=82.015, clip=100.000, loss_scale=8.113e+31, optim_step_time=0.138, optim0_lr0=9.482e-05, train_time=3.139 +[gpua007:0/64] 2024-02-04 00:23:21,481 (trainer:753) INFO: 18epoch:train:12001-12100batch: iter_time=1.033e-04, forward_time=0.162, loss_ctc=89.768, loss_interctc_layer6=89.019, loss_interctc_layer12=74.136, loss_interctc_layer15=68.323, loss_interctc_layer21=92.584, loss=82.766, backward_time=0.581, grad_norm=65.879, clip=100.000, loss_scale=8.113e+31, optim_step_time=0.138, optim0_lr0=9.481e-05, train_time=3.679 +[gpua007:0/64] 2024-02-04 00:28:25,801 (trainer:753) INFO: 18epoch:train:12101-12200batch: iter_time=9.161e-05, forward_time=0.170, loss_ctc=88.231, loss_interctc_layer6=89.268, loss_interctc_layer12=74.057, loss_interctc_layer15=67.859, loss_interctc_layer21=91.079, loss=82.099, backward_time=0.426, grad_norm=63.385, clip=100.000, loss_scale=8.113e+31, optim_step_time=0.140, optim0_lr0=9.479e-05, train_time=3.043 +[gpua007:0/64] 2024-02-04 00:33:58,715 (trainer:753) INFO: 18epoch:train:12201-12300batch: iter_time=9.210e-05, forward_time=0.141, loss_ctc=76.523, loss_interctc_layer6=83.783, loss_interctc_layer12=70.040, loss_interctc_layer15=64.490, loss_interctc_layer21=78.679, loss=74.703, backward_time=0.473, grad_norm=53.549, clip=100.000, loss_scale=8.113e+31, optim_step_time=0.137, optim0_lr0=9.477e-05, train_time=3.330 +[gpua007:0/64] 2024-02-04 00:39:21,760 (trainer:753) INFO: 18epoch:train:12301-12400batch: iter_time=9.865e-05, forward_time=0.204, loss_ctc=79.868, loss_interctc_layer6=91.654, loss_interctc_layer12=77.291, loss_interctc_layer15=71.285, loss_interctc_layer21=81.812, loss=80.382, backward_time=0.480, grad_norm=63.479, clip=100.000, loss_scale=8.113e+31, optim_step_time=0.143, optim0_lr0=9.475e-05, train_time=3.230 +[gpua007:0/64] 2024-02-04 00:45:13,424 (trainer:753) INFO: 18epoch:train:12401-12500batch: iter_time=9.416e-05, forward_time=0.158, loss_ctc=87.190, loss_interctc_layer6=95.670, loss_interctc_layer12=80.716, loss_interctc_layer15=74.776, loss_interctc_layer21=89.814, loss=85.633, backward_time=0.497, grad_norm=78.590, clip=100.000, loss_scale=8.113e+31, optim_step_time=0.141, optim0_lr0=9.473e-05, train_time=3.516 +[gpua007:0/64] 2024-02-04 00:45:33,488 (multiple_iter_factory:32) INFO: Building 10th iter-factory... +[gpua007:0/64] 2024-02-04 00:45:52,039 (s2t:401) INFO: Optional Data Names: ('text_prev', 'text_ctc', 'text_spk2', 'text_spk3', 'text_spk4') +[gpua007:0/64] 2024-02-04 00:45:55,538 (abs_task:1660) INFO: [train] dataset: +ESPnetDataset( + speech: {"path": "exp/s2t_stats_raw_bpe50000/splits12/wav.scp/split.4", "type": "kaldi_ark"} + text_prev: {"path": "exp/s2t_stats_raw_bpe50000/splits12/text.prev/split.4", "type": "text"} + text_ctc: {"path": "exp/s2t_stats_raw_bpe50000/splits12/text.ctc/split.4", "type": "text"} + text: {"path": "exp/s2t_stats_raw_bpe50000/splits12/text/split.4", "type": "text"} + preprocess: ) +[gpua007:0/64] 2024-02-04 00:45:55,538 (abs_task:1661) INFO: [train] Batch sampler: UnsortedBatchSampler(N-batch=19027, batch_size=256, key_file=exp/s2t_stats_raw_bpe50000/splits12/speech_shape/split.4, +[gpua007:0/64] 2024-02-04 00:45:55,541 (abs_task:1662) INFO: [train] mini-batch sizes summary: N-batch=19027, mean=256.0, min=256, max=257 +[gpua007:0/64] 2024-02-04 00:56:10,618 (trainer:753) INFO: 18epoch:train:12501-12600batch: iter_time=3.370, forward_time=0.146, loss_ctc=75.392, loss_interctc_layer6=79.581, loss_interctc_layer12=66.338, loss_interctc_layer15=61.158, loss_interctc_layer21=77.870, loss=72.068, backward_time=0.395, grad_norm=82.535, clip=100.000, loss_scale=8.113e+31, optim_step_time=0.139, optim0_lr0=9.472e-05, train_time=6.572 +[gpua007:0/64] 2024-02-04 01:01:38,142 (trainer:753) INFO: 18epoch:train:12601-12700batch: iter_time=9.059e-05, forward_time=0.142, loss_ctc=78.261, loss_interctc_layer6=79.027, loss_interctc_layer12=65.674, loss_interctc_layer15=60.283, loss_interctc_layer21=80.643, loss=72.778, backward_time=0.441, grad_norm=70.582, clip=100.000, loss_scale=8.113e+31, optim_step_time=0.139, optim0_lr0=9.470e-05, train_time=3.275 +[gpua007:0/64] 2024-02-04 01:06:18,436 (trainer:753) INFO: 18epoch:train:12701-12800batch: iter_time=9.297e-05, forward_time=0.143, loss_ctc=92.366, loss_interctc_layer6=91.659, loss_interctc_layer12=76.295, loss_interctc_layer15=70.237, loss_interctc_layer21=94.945, loss=85.100, backward_time=0.390, grad_norm=67.354, clip=100.000, loss_scale=8.113e+31, optim_step_time=0.139, optim0_lr0=9.468e-05, train_time=2.803 +[gpua007:0/64] 2024-02-04 01:11:30,699 (trainer:753) INFO: 18epoch:train:12801-12900batch: iter_time=9.962e-05, forward_time=0.142, loss_ctc=78.405, loss_interctc_layer6=81.060, loss_interctc_layer12=67.255, loss_interctc_layer15=61.635, loss_interctc_layer21=81.019, loss=73.875, backward_time=0.467, grad_norm=56.929, clip=100.000, loss_scale=8.113e+31, optim_step_time=0.139, optim0_lr0=9.466e-05, train_time=3.122 +[gpua007:0/64] 2024-02-04 01:16:47,090 (trainer:753) INFO: 18epoch:train:12901-13000batch: iter_time=1.042e-04, forward_time=0.142, loss_ctc=88.407, loss_interctc_layer6=88.323, loss_interctc_layer12=73.529, loss_interctc_layer15=67.597, loss_interctc_layer21=91.171, loss=81.805, backward_time=0.508, grad_norm=55.891, clip=100.000, loss_scale=8.113e+31, optim_step_time=0.139, optim0_lr0=9.465e-05, train_time=3.164 +[gpua007:0/64] 2024-02-04 01:18:22,096 (trainer:684) WARNING: The grad norm is nan. Skipping updating the model. +[gpua007:0/64] 2024-02-04 01:22:11,031 (trainer:753) INFO: 18epoch:train:13001-13100batch: iter_time=1.005e-04, forward_time=0.144, loss_ctc=100.561, loss_interctc_layer6=101.979, loss_interctc_layer12=85.167, loss_interctc_layer15=78.643, loss_interctc_layer21=103.910, loss=94.052, backward_time=0.420, grad_norm=78.950, clip=100.000, loss_scale=5.204e+31, optim_step_time=0.139, optim0_lr0=9.463e-05, train_time=3.239 +[gpua007:0/64] 2024-02-04 01:27:28,335 (trainer:753) INFO: 18epoch:train:13101-13200batch: iter_time=9.761e-05, forward_time=0.202, loss_ctc=82.543, loss_interctc_layer6=90.286, loss_interctc_layer12=75.666, loss_interctc_layer15=69.652, loss_interctc_layer21=84.942, loss=80.618, backward_time=0.478, grad_norm=76.070, clip=100.000, loss_scale=4.056e+31, optim_step_time=0.140, optim0_lr0=9.461e-05, train_time=3.172 +[gpua007:0/64] 2024-02-04 01:33:52,893 (trainer:753) INFO: 18epoch:train:13201-13300batch: iter_time=9.853e-05, forward_time=0.144, loss_ctc=85.835, loss_interctc_layer6=94.239, loss_interctc_layer12=78.599, loss_interctc_layer15=72.228, loss_interctc_layer21=88.136, loss=83.807, backward_time=0.554, grad_norm=76.714, clip=100.000, loss_scale=4.056e+31, optim_step_time=0.139, optim0_lr0=9.459e-05, train_time=3.845 +[gpua007:0/64] 2024-02-04 01:39:26,760 (trainer:753) INFO: 18epoch:train:13301-13400batch: iter_time=9.420e-05, forward_time=0.221, loss_ctc=96.979, loss_interctc_layer6=91.073, loss_interctc_layer12=75.184, loss_interctc_layer15=68.763, loss_interctc_layer21=100.144, loss=86.429, backward_time=0.510, grad_norm=69.196, clip=100.000, loss_scale=4.056e+31, optim_step_time=0.143, optim0_lr0=9.458e-05, train_time=3.339 +[gpua007:0/64] 2024-02-04 01:44:55,010 (trainer:753) INFO: 18epoch:train:13401-13500batch: iter_time=9.491e-05, forward_time=0.143, loss_ctc=84.666, loss_interctc_layer6=88.828, loss_interctc_layer12=74.381, loss_interctc_layer15=68.590, loss_interctc_layer21=87.211, loss=80.735, backward_time=0.485, grad_norm=63.908, clip=100.000, loss_scale=4.056e+31, optim_step_time=0.138, optim0_lr0=9.456e-05, train_time=3.282 +[gpua007:0/64] 2024-02-04 01:50:07,602 (trainer:753) INFO: 18epoch:train:13501-13600batch: iter_time=9.297e-05, forward_time=0.142, loss_ctc=76.319, loss_interctc_layer6=83.934, loss_interctc_layer12=70.701, loss_interctc_layer15=65.451, loss_interctc_layer21=78.495, loss=74.980, backward_time=0.467, grad_norm=68.228, clip=100.000, loss_scale=4.056e+31, optim_step_time=0.138, optim0_lr0=9.454e-05, train_time=3.126 +[gpua007:0/64] 2024-02-04 01:55:17,506 (trainer:753) INFO: 18epoch:train:13601-13700batch: iter_time=9.510e-05, forward_time=0.144, loss_ctc=77.428, loss_interctc_layer6=92.070, loss_interctc_layer12=77.600, loss_interctc_layer15=71.493, loss_interctc_layer21=79.595, loss=79.637, backward_time=0.415, grad_norm=85.281, clip=100.000, loss_scale=4.056e+31, optim_step_time=0.138, optim0_lr0=9.452e-05, train_time=3.099 +[gpua007:0/64] 2024-02-04 01:58:02,371 (multiple_iter_factory:32) INFO: Building 11th iter-factory... +[gpua007:0/64] 2024-02-04 01:58:21,559 (s2t:401) INFO: Optional Data Names: ('text_prev', 'text_ctc', 'text_spk2', 'text_spk3', 'text_spk4') +[gpua007:0/64] 2024-02-04 01:58:25,100 (abs_task:1660) INFO: [train] dataset: +ESPnetDataset( + speech: {"path": "exp/s2t_stats_raw_bpe50000/splits12/wav.scp/split.3", "type": "kaldi_ark"} + text_prev: {"path": "exp/s2t_stats_raw_bpe50000/splits12/text.prev/split.3", "type": "text"} + text_ctc: {"path": "exp/s2t_stats_raw_bpe50000/splits12/text.ctc/split.3", "type": "text"} + text: {"path": "exp/s2t_stats_raw_bpe50000/splits12/text/split.3", "type": "text"} + preprocess: ) +[gpua007:0/64] 2024-02-04 01:58:25,100 (abs_task:1661) INFO: [train] Batch sampler: UnsortedBatchSampler(N-batch=19027, batch_size=256, key_file=exp/s2t_stats_raw_bpe50000/splits12/speech_shape/split.3, +[gpua007:0/64] 2024-02-04 01:58:25,103 (abs_task:1662) INFO: [train] mini-batch sizes summary: N-batch=19027, mean=256.0, min=256, max=257 +[gpua007:0/64] 2024-02-04 02:10:10,879 (trainer:753) INFO: 18epoch:train:13701-13800batch: iter_time=3.758, forward_time=0.143, loss_ctc=80.392, loss_interctc_layer6=90.443, loss_interctc_layer12=75.975, loss_interctc_layer15=70.373, loss_interctc_layer21=82.987, loss=80.034, backward_time=0.395, grad_norm=67.316, clip=100.000, loss_scale=4.056e+31, optim_step_time=0.137, optim0_lr0=9.451e-05, train_time=8.934 +[gpua007:0/64] 2024-02-04 02:15:17,905 (trainer:753) INFO: 18epoch:train:13801-13900batch: iter_time=8.348e-05, forward_time=0.142, loss_ctc=70.291, loss_interctc_layer6=78.518, loss_interctc_layer12=65.188, loss_interctc_layer15=59.988, loss_interctc_layer21=72.807, loss=69.358, backward_time=0.443, grad_norm=53.948, clip=100.000, loss_scale=4.056e+31, optim_step_time=0.137, optim0_lr0=9.449e-05, train_time=3.070 +[gpua007:0/64] 2024-02-04 02:20:32,053 (trainer:753) INFO: 18epoch:train:13901-14000batch: iter_time=8.661e-05, forward_time=0.142, loss_ctc=91.681, loss_interctc_layer6=89.858, loss_interctc_layer12=74.760, loss_interctc_layer15=68.689, loss_interctc_layer21=94.659, loss=83.929, backward_time=0.492, grad_norm=73.062, clip=100.000, loss_scale=4.056e+31, optim_step_time=0.137, optim0_lr0=9.447e-05, train_time=3.141 +[gpua007:0/64] 2024-02-04 02:25:50,122 (trainer:753) INFO: 18epoch:train:14001-14100batch: iter_time=9.090e-05, forward_time=0.160, loss_ctc=74.458, loss_interctc_layer6=81.265, loss_interctc_layer12=67.587, loss_interctc_layer15=62.084, loss_interctc_layer21=76.800, loss=72.439, backward_time=0.442, grad_norm=58.304, clip=100.000, loss_scale=4.056e+31, optim_step_time=0.141, optim0_lr0=9.445e-05, train_time=3.180 +[gpua007:0/64] 2024-02-04 02:31:02,153 (trainer:753) INFO: 18epoch:train:14101-14200batch: iter_time=8.949e-05, forward_time=0.158, loss_ctc=84.143, loss_interctc_layer6=86.695, loss_interctc_layer12=72.302, loss_interctc_layer15=66.379, loss_interctc_layer21=86.895, loss=79.283, backward_time=0.457, grad_norm=63.405, clip=100.000, loss_scale=4.056e+31, optim_step_time=0.139, optim0_lr0=9.444e-05, train_time=3.119 +[gpua007:0/64] 2024-02-04 02:36:40,060 (trainer:753) INFO: 18epoch:train:14201-14300batch: iter_time=9.430e-05, forward_time=0.171, loss_ctc=93.353, loss_interctc_layer6=93.755, loss_interctc_layer12=77.876, loss_interctc_layer15=71.549, loss_interctc_layer21=96.286, loss=86.564, backward_time=0.460, grad_norm=76.511, clip=100.000, loss_scale=4.056e+31, optim_step_time=0.140, optim0_lr0=9.442e-05, train_time=3.380 +[gpua007:0/64] 2024-02-04 02:42:47,321 (trainer:753) INFO: 18epoch:train:14301-14400batch: iter_time=1.016e-04, forward_time=0.173, loss_ctc=83.071, loss_interctc_layer6=95.182, loss_interctc_layer12=79.686, loss_interctc_layer15=73.385, loss_interctc_layer21=85.484, loss=83.362, backward_time=0.531, grad_norm=74.355, clip=100.000, loss_scale=4.056e+31, optim_step_time=0.143, optim0_lr0=9.440e-05, train_time=3.673 +[gpua007:0/64] 2024-02-04 02:48:31,623 (trainer:753) INFO: 18epoch:train:14401-14500batch: iter_time=1.016e-04, forward_time=0.143, loss_ctc=85.038, loss_interctc_layer6=96.778, loss_interctc_layer12=80.929, loss_interctc_layer15=74.610, loss_interctc_layer21=87.352, loss=84.941, backward_time=0.558, grad_norm=79.251, clip=100.000, loss_scale=4.056e+31, optim_step_time=0.138, optim0_lr0=9.438e-05, train_time=3.443 +[gpua007:0/64] 2024-02-04 02:53:59,027 (trainer:753) INFO: 18epoch:train:14501-14600batch: iter_time=1.050e-04, forward_time=0.143, loss_ctc=86.698, loss_interctc_layer6=89.456, loss_interctc_layer12=74.333, loss_interctc_layer15=68.334, loss_interctc_layer21=88.930, loss=81.550, backward_time=0.505, grad_norm=66.794, clip=100.000, loss_scale=4.056e+31, optim_step_time=0.138, optim0_lr0=9.437e-05, train_time=3.274 +[gpua007:0/64] 2024-02-04 02:59:21,161 (trainer:753) INFO: 18epoch:train:14601-14700batch: iter_time=9.761e-05, forward_time=0.145, loss_ctc=84.895, loss_interctc_layer6=88.780, loss_interctc_layer12=73.700, loss_interctc_layer15=67.532, loss_interctc_layer21=87.642, loss=80.510, backward_time=0.494, grad_norm=63.941, clip=100.000, loss_scale=4.056e+31, optim_step_time=0.138, optim0_lr0=9.435e-05, train_time=3.221 +[gpua007:0/64] 2024-02-04 03:04:36,290 (trainer:753) INFO: 18epoch:train:14701-14800batch: iter_time=9.655e-05, forward_time=0.142, loss_ctc=72.861, loss_interctc_layer6=83.215, loss_interctc_layer12=69.695, loss_interctc_layer15=64.043, loss_interctc_layer21=74.975, loss=72.958, backward_time=0.485, grad_norm=64.677, clip=100.000, loss_scale=4.056e+31, optim_step_time=0.138, optim0_lr0=9.433e-05, train_time=3.151 +[gpua007:0/64] 2024-02-04 03:05:09,714 (trainer:684) WARNING: The grad norm is nan. Skipping updating the model. +[gpua007:0/64] 2024-02-04 03:10:22,547 (trainer:753) INFO: 18epoch:train:14801-14900batch: iter_time=1.000e-04, forward_time=0.142, loss_ctc=77.752, loss_interctc_layer6=91.185, loss_interctc_layer12=76.579, loss_interctc_layer15=70.702, loss_interctc_layer21=80.062, loss=79.256, backward_time=0.547, grad_norm=93.961, clip=100.000, loss_scale=2.151e+31, optim_step_time=0.138, optim0_lr0=9.431e-05, train_time=3.462 +[gpua007:0/64] 2024-02-04 03:15:41,039 (trainer:753) INFO: 18epoch:train:14901-15000batch: iter_time=9.425e-05, forward_time=0.142, loss_ctc=82.992, loss_interctc_layer6=96.807, loss_interctc_layer12=81.486, loss_interctc_layer15=75.652, loss_interctc_layer21=85.536, loss=84.495, backward_time=0.459, grad_norm=83.418, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.138, optim0_lr0=9.430e-05, train_time=3.185 +[gpua007:0/64] 2024-02-04 03:47:56,520 (trainer:352) INFO: 18epoch results: [train] iter_time=0.272, forward_time=0.158, loss_ctc=84.450, loss_interctc_layer6=89.849, loss_interctc_layer12=75.209, loss_interctc_layer15=69.329, loss_interctc_layer21=86.985, loss=81.165, backward_time=0.493, grad_norm=73.875, clip=100.000, loss_scale=4.556e+31, optim_step_time=0.139, optim0_lr0=9.563e-05, train_time=3.721, time=15 hours, 30 minutes and 43.87 seconds, total_count=270000, gpu_max_cached_mem_GB=34.398, [valid] loss_ctc=47.570, cer_ctc=0.221, loss_interctc_layer6=52.186, cer_interctc_layer6=0.236, loss_interctc_layer12=39.442, cer_interctc_layer12=0.169, loss_interctc_layer15=34.954, cer_interctc_layer15=0.142, loss_interctc_layer21=50.028, cer_interctc_layer21=0.233, loss=44.836, time=31 minutes and 49.47 seconds, total_count=84078, gpu_max_cached_mem_GB=34.398 +[gpua007:0/64] 2024-02-04 03:48:16,568 (trainer:407) INFO: The best model has been updated: valid.cer_ctc, valid.loss_ctc, valid.total_count +[gpua007:0/64] 2024-02-04 03:48:16,614 (trainer:461) INFO: The model files were removed: exp/s2t_train_s2t_multitask-ctc_ebf27_conv2d8_size1024_raw_bpe50000/13epoch.pth +[gpua007:0/64] 2024-02-04 03:48:16,720 (trainer:286) INFO: 19/45epoch started. Estimated time to finish: 2 weeks, 4 days and 7 hours +[gpua007:0/64] 2024-02-04 03:48:16,983 (multiple_iter_factory:32) INFO: Building 0th iter-factory... +[gpua007:0/64] 2024-02-04 03:48:35,946 (s2t:401) INFO: Optional Data Names: ('text_prev', 'text_ctc', 'text_spk2', 'text_spk3', 'text_spk4') +[gpua007:0/64] 2024-02-04 03:48:39,686 (abs_task:1660) INFO: [train] dataset: +ESPnetDataset( + speech: {"path": "exp/s2t_stats_raw_bpe50000/splits12/wav.scp/split.7", "type": "kaldi_ark"} + text_prev: {"path": "exp/s2t_stats_raw_bpe50000/splits12/text.prev/split.7", "type": "text"} + text_ctc: {"path": "exp/s2t_stats_raw_bpe50000/splits12/text.ctc/split.7", "type": "text"} + text: {"path": "exp/s2t_stats_raw_bpe50000/splits12/text/split.7", "type": "text"} + preprocess: ) +[gpua007:0/64] 2024-02-04 03:48:39,686 (abs_task:1661) INFO: [train] Batch sampler: UnsortedBatchSampler(N-batch=19027, batch_size=256, key_file=exp/s2t_stats_raw_bpe50000/splits12/speech_shape/split.7, +[gpua007:0/64] 2024-02-04 03:48:39,690 (abs_task:1662) INFO: [train] mini-batch sizes summary: N-batch=19027, mean=256.0, min=256, max=257 +[gpua007:0/64] 2024-02-04 03:58:43,828 (trainer:753) INFO: 19epoch:train:1-100batch: iter_time=3.120, forward_time=0.209, loss_ctc=84.763, loss_interctc_layer6=90.337, loss_interctc_layer12=75.997, loss_interctc_layer15=71.179, loss_interctc_layer21=87.336, loss=81.922, backward_time=0.400, grad_norm=104.797, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.138, optim0_lr0=9.428e-05, train_time=6.269 +[gpua007:0/64] 2024-02-04 04:03:22,522 (trainer:753) INFO: 19epoch:train:101-200batch: iter_time=9.783e-05, forward_time=0.140, loss_ctc=79.074, loss_interctc_layer6=83.666, loss_interctc_layer12=70.052, loss_interctc_layer15=64.836, loss_interctc_layer21=81.554, loss=75.836, backward_time=0.389, grad_norm=64.904, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.138, optim0_lr0=9.426e-05, train_time=2.787 +[gpua007:0/64] 2024-02-04 04:08:21,008 (trainer:753) INFO: 19epoch:train:201-300batch: iter_time=9.917e-05, forward_time=0.141, loss_ctc=76.626, loss_interctc_layer6=82.877, loss_interctc_layer12=69.989, loss_interctc_layer15=64.948, loss_interctc_layer21=78.790, loss=74.646, backward_time=0.436, grad_norm=69.449, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.138, optim0_lr0=9.424e-05, train_time=2.985 +[gpua007:0/64] 2024-02-04 04:13:15,600 (trainer:753) INFO: 19epoch:train:301-400batch: iter_time=1.067e-04, forward_time=0.143, loss_ctc=86.440, loss_interctc_layer6=85.851, loss_interctc_layer12=71.487, loss_interctc_layer15=65.642, loss_interctc_layer21=89.213, loss=79.727, backward_time=0.422, grad_norm=57.948, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.138, optim0_lr0=9.423e-05, train_time=2.946 +[gpua007:0/64] 2024-02-04 04:18:32,079 (trainer:753) INFO: 19epoch:train:401-500batch: iter_time=1.055e-04, forward_time=0.142, loss_ctc=89.350, loss_interctc_layer6=96.343, loss_interctc_layer12=80.337, loss_interctc_layer15=73.903, loss_interctc_layer21=92.328, loss=86.452, backward_time=0.461, grad_norm=80.793, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.138, optim0_lr0=9.421e-05, train_time=3.165 +[gpua007:0/64] 2024-02-04 04:23:49,852 (trainer:753) INFO: 19epoch:train:501-600batch: iter_time=1.034e-04, forward_time=0.142, loss_ctc=78.651, loss_interctc_layer6=90.692, loss_interctc_layer12=76.419, loss_interctc_layer15=70.571, loss_interctc_layer21=80.680, loss=79.403, backward_time=0.452, grad_norm=86.206, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.138, optim0_lr0=9.419e-05, train_time=3.178 +[gpua007:0/64] 2024-02-04 04:29:14,484 (trainer:753) INFO: 19epoch:train:601-700batch: iter_time=1.044e-04, forward_time=0.142, loss_ctc=92.749, loss_interctc_layer6=97.403, loss_interctc_layer12=81.860, loss_interctc_layer15=75.709, loss_interctc_layer21=95.372, loss=88.619, backward_time=0.481, grad_norm=99.681, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.138, optim0_lr0=9.417e-05, train_time=3.246 +[gpua007:0/64] 2024-02-04 04:34:28,989 (trainer:753) INFO: 19epoch:train:701-800batch: iter_time=1.032e-04, forward_time=0.142, loss_ctc=79.340, loss_interctc_layer6=90.165, loss_interctc_layer12=75.496, loss_interctc_layer15=69.695, loss_interctc_layer21=81.461, loss=79.232, backward_time=0.467, grad_norm=150.530, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.138, optim0_lr0=9.416e-05, train_time=3.145 +[gpua007:0/64] 2024-02-04 04:40:11,772 (trainer:753) INFO: 19epoch:train:801-900batch: iter_time=9.978e-05, forward_time=0.144, loss_ctc=88.741, loss_interctc_layer6=96.524, loss_interctc_layer12=81.112, loss_interctc_layer15=75.011, loss_interctc_layer21=91.424, loss=86.563, backward_time=0.542, grad_norm=65.975, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.138, optim0_lr0=9.414e-05, train_time=3.428 +[gpua007:0/64] 2024-02-04 04:45:27,103 (trainer:753) INFO: 19epoch:train:901-1000batch: iter_time=1.003e-04, forward_time=0.142, loss_ctc=77.841, loss_interctc_layer6=87.297, loss_interctc_layer12=73.138, loss_interctc_layer15=67.500, loss_interctc_layer21=80.016, loss=77.158, backward_time=0.446, grad_norm=63.760, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.137, optim0_lr0=9.412e-05, train_time=3.153 +[gpua007:0/64] 2024-02-04 04:50:49,068 (trainer:753) INFO: 19epoch:train:1001-1100batch: iter_time=9.638e-05, forward_time=0.142, loss_ctc=85.860, loss_interctc_layer6=87.603, loss_interctc_layer12=73.642, loss_interctc_layer15=68.019, loss_interctc_layer21=88.375, loss=80.700, backward_time=0.462, grad_norm=64.507, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.137, optim0_lr0=9.410e-05, train_time=3.219 +[gpua007:0/64] 2024-02-04 04:56:25,310 (trainer:753) INFO: 19epoch:train:1101-1200batch: iter_time=1.010e-04, forward_time=0.141, loss_ctc=62.726, loss_interctc_layer6=71.783, loss_interctc_layer12=59.783, loss_interctc_layer15=55.011, loss_interctc_layer21=64.575, loss=62.776, backward_time=0.462, grad_norm=55.105, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.137, optim0_lr0=9.409e-05, train_time=3.362 +[gpua007:0/64] 2024-02-04 04:59:35,970 (multiple_iter_factory:32) INFO: Building 1th iter-factory... +[gpua007:0/64] 2024-02-04 04:59:55,198 (s2t:401) INFO: Optional Data Names: ('text_prev', 'text_ctc', 'text_spk2', 'text_spk3', 'text_spk4') +[gpua007:0/64] 2024-02-04 04:59:58,703 (abs_task:1660) INFO: [train] dataset: +ESPnetDataset( + speech: {"path": "exp/s2t_stats_raw_bpe50000/splits12/wav.scp/split.5", "type": "kaldi_ark"} + text_prev: {"path": "exp/s2t_stats_raw_bpe50000/splits12/text.prev/split.5", "type": "text"} + text_ctc: {"path": "exp/s2t_stats_raw_bpe50000/splits12/text.ctc/split.5", "type": "text"} + text: {"path": "exp/s2t_stats_raw_bpe50000/splits12/text/split.5", "type": "text"} + preprocess: ) +[gpua007:0/64] 2024-02-04 04:59:58,703 (abs_task:1661) INFO: [train] Batch sampler: UnsortedBatchSampler(N-batch=19027, batch_size=256, key_file=exp/s2t_stats_raw_bpe50000/splits12/speech_shape/split.5, +[gpua007:0/64] 2024-02-04 04:59:58,707 (abs_task:1662) INFO: [train] mini-batch sizes summary: N-batch=19027, mean=256.0, min=256, max=257 +[gpua007:0/64] 2024-02-04 05:11:06,000 (trainer:753) INFO: 19epoch:train:1201-1300batch: iter_time=2.945, forward_time=0.272, loss_ctc=71.633, loss_interctc_layer6=84.980, loss_interctc_layer12=71.240, loss_interctc_layer15=65.769, loss_interctc_layer21=73.812, loss=73.487, backward_time=0.467, grad_norm=63.510, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.153, optim0_lr0=9.407e-05, train_time=8.805 +[gpua007:0/64] 2024-02-04 05:15:52,418 (trainer:753) INFO: 19epoch:train:1301-1400batch: iter_time=8.470e-05, forward_time=0.141, loss_ctc=86.219, loss_interctc_layer6=88.980, loss_interctc_layer12=74.257, loss_interctc_layer15=69.204, loss_interctc_layer21=88.647, loss=81.461, backward_time=0.407, grad_norm=67.943, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.137, optim0_lr0=9.405e-05, train_time=2.865 +[gpua007:0/64] 2024-02-04 05:21:11,069 (trainer:753) INFO: 19epoch:train:1401-1500batch: iter_time=8.443e-05, forward_time=0.142, loss_ctc=86.919, loss_interctc_layer6=88.450, loss_interctc_layer12=74.210, loss_interctc_layer15=68.481, loss_interctc_layer21=89.572, loss=81.526, backward_time=0.474, grad_norm=61.153, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.137, optim0_lr0=9.403e-05, train_time=3.186 +[gpua007:0/64] 2024-02-04 05:26:14,533 (trainer:753) INFO: 19epoch:train:1501-1600batch: iter_time=8.799e-05, forward_time=0.141, loss_ctc=66.750, loss_interctc_layer6=69.796, loss_interctc_layer12=58.003, loss_interctc_layer15=53.263, loss_interctc_layer21=69.045, loss=63.371, backward_time=0.424, grad_norm=55.535, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.137, optim0_lr0=9.402e-05, train_time=3.034 +[gpua007:0/64] 2024-02-04 05:31:21,796 (trainer:753) INFO: 19epoch:train:1601-1700batch: iter_time=8.774e-05, forward_time=0.142, loss_ctc=92.321, loss_interctc_layer6=97.445, loss_interctc_layer12=81.409, loss_interctc_layer15=74.898, loss_interctc_layer21=95.548, loss=88.324, backward_time=0.445, grad_norm=73.126, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.137, optim0_lr0=9.400e-05, train_time=3.072 +[gpua007:0/64] 2024-02-04 05:36:47,984 (trainer:753) INFO: 19epoch:train:1701-1800batch: iter_time=9.347e-05, forward_time=0.144, loss_ctc=85.929, loss_interctc_layer6=92.879, loss_interctc_layer12=77.865, loss_interctc_layer15=71.750, loss_interctc_layer21=88.428, loss=83.370, backward_time=0.468, grad_norm=72.001, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.137, optim0_lr0=9.398e-05, train_time=3.262 +[gpua007:0/64] 2024-02-04 05:42:18,846 (trainer:753) INFO: 19epoch:train:1801-1900batch: iter_time=8.910e-05, forward_time=0.142, loss_ctc=80.447, loss_interctc_layer6=91.470, loss_interctc_layer12=76.262, loss_interctc_layer15=70.097, loss_interctc_layer21=82.700, loss=80.195, backward_time=0.478, grad_norm=57.907, clip=100.000, loss_scale=3.915e+31, optim_step_time=0.137, optim0_lr0=9.397e-05, train_time=3.308 +[gpua007:0/64] 2024-02-04 05:47:53,573 (trainer:753) INFO: 19epoch:train:1901-2000batch: iter_time=8.804e-05, forward_time=0.143, loss_ctc=77.878, loss_interctc_layer6=89.689, loss_interctc_layer12=75.288, loss_interctc_layer15=69.668, loss_interctc_layer21=80.106, loss=78.526, backward_time=0.497, grad_norm=76.244, clip=100.000, loss_scale=4.056e+31, optim_step_time=0.137, optim0_lr0=9.395e-05, train_time=3.347 +[gpua007:0/64] 2024-02-04 05:53:02,880 (trainer:753) INFO: 19epoch:train:2001-2100batch: iter_time=9.709e-05, forward_time=0.143, loss_ctc=87.077, loss_interctc_layer6=95.740, loss_interctc_layer12=80.279, loss_interctc_layer15=74.098, loss_interctc_layer21=89.481, loss=85.335, backward_time=0.480, grad_norm=70.235, clip=100.000, loss_scale=4.056e+31, optim_step_time=0.137, optim0_lr0=9.393e-05, train_time=3.093 +[gpua007:0/64] 2024-02-04 05:58:06,828 (trainer:753) INFO: 19epoch:train:2101-2200batch: iter_time=9.087e-05, forward_time=0.142, loss_ctc=85.401, loss_interctc_layer6=94.221, loss_interctc_layer12=78.859, loss_interctc_layer15=72.744, loss_interctc_layer21=87.921, loss=83.829, backward_time=0.436, grad_norm=65.306, clip=100.000, loss_scale=4.056e+31, optim_step_time=0.137, optim0_lr0=9.391e-05, train_time=3.039 +[gpua007:0/64] 2024-02-04 06:03:28,033 (trainer:753) INFO: 19epoch:train:2201-2300batch: iter_time=9.258e-05, forward_time=0.142, loss_ctc=78.781, loss_interctc_layer6=81.963, loss_interctc_layer12=68.602, loss_interctc_layer15=63.320, loss_interctc_layer21=81.063, loss=74.746, backward_time=0.509, grad_norm=59.688, clip=100.000, loss_scale=4.056e+31, optim_step_time=0.137, optim0_lr0=9.390e-05, train_time=3.212 +[gpua007:0/64] 2024-02-04 06:08:30,240 (trainer:753) INFO: 19epoch:train:2301-2400batch: iter_time=9.230e-05, forward_time=0.141, loss_ctc=70.313, loss_interctc_layer6=77.335, loss_interctc_layer12=64.622, loss_interctc_layer15=59.512, loss_interctc_layer21=72.340, loss=68.824, backward_time=0.416, grad_norm=63.560, clip=100.000, loss_scale=4.056e+31, optim_step_time=0.137, optim0_lr0=9.388e-05, train_time=3.022 +[gpua007:0/64] 2024-02-04 06:13:42,621 (trainer:753) INFO: 19epoch:train:2401-2500batch: iter_time=8.882e-05, forward_time=0.173, loss_ctc=75.134, loss_interctc_layer6=82.526, loss_interctc_layer12=68.956, loss_interctc_layer15=63.446, loss_interctc_layer21=77.514, loss=73.515, backward_time=0.418, grad_norm=61.839, clip=100.000, loss_scale=4.056e+31, optim_step_time=0.138, optim0_lr0=9.386e-05, train_time=3.124 +[gpua007:0/64] 2024-02-04 06:13:52,543 (multiple_iter_factory:32) INFO: Building 2th iter-factory... +[gpua007:0/64] 2024-02-04 06:14:11,351 (s2t:401) INFO: Optional Data Names: ('text_prev', 'text_ctc', 'text_spk2', 'text_spk3', 'text_spk4') +[gpua007:0/64] 2024-02-04 06:14:15,084 (abs_task:1660) INFO: [train] dataset: +ESPnetDataset( + speech: {"path": "exp/s2t_stats_raw_bpe50000/splits12/wav.scp/split.6", "type": "kaldi_ark"} + text_prev: {"path": "exp/s2t_stats_raw_bpe50000/splits12/text.prev/split.6", "type": "text"} + text_ctc: {"path": "exp/s2t_stats_raw_bpe50000/splits12/text.ctc/split.6", "type": "text"} + text: {"path": "exp/s2t_stats_raw_bpe50000/splits12/text/split.6", "type": "text"} + preprocess: ) +[gpua007:0/64] 2024-02-04 06:14:15,084 (abs_task:1661) INFO: [train] Batch sampler: UnsortedBatchSampler(N-batch=19027, batch_size=256, key_file=exp/s2t_stats_raw_bpe50000/splits12/speech_shape/split.6, +[gpua007:0/64] 2024-02-04 06:14:15,087 (abs_task:1662) INFO: [train] mini-batch sizes summary: N-batch=19027, mean=256.0, min=256, max=257 +[gpua007:0/64] 2024-02-04 06:24:21,003 (trainer:753) INFO: 19epoch:train:2501-2600batch: iter_time=1.813, forward_time=0.174, loss_ctc=87.338, loss_interctc_layer6=88.422, loss_interctc_layer12=73.843, loss_interctc_layer15=68.039, loss_interctc_layer21=90.357, loss=81.600, backward_time=0.423, grad_norm=81.736, clip=100.000, loss_scale=4.056e+31, optim_step_time=0.139, optim0_lr0=9.384e-05, train_time=6.383 +[gpua007:0/64] 2024-02-04 06:29:09,127 (trainer:753) INFO: 19epoch:train:2601-2700batch: iter_time=8.302e-05, forward_time=0.141, loss_ctc=77.995, loss_interctc_layer6=80.314, loss_interctc_layer12=67.020, loss_interctc_layer15=61.549, loss_interctc_layer21=80.479, loss=73.471, backward_time=0.401, grad_norm=98.858, clip=100.000, loss_scale=4.056e+31, optim_step_time=0.138, optim0_lr0=9.383e-05, train_time=2.881 +[gpua007:0/64] 2024-02-04 06:34:20,460 (trainer:753) INFO: 19epoch:train:2701-2800batch: iter_time=8.631e-05, forward_time=0.142, loss_ctc=77.938, loss_interctc_layer6=81.297, loss_interctc_layer12=68.330, loss_interctc_layer15=63.168, loss_interctc_layer21=80.538, loss=74.254, backward_time=0.438, grad_norm=71.542, clip=100.000, loss_scale=4.056e+31, optim_step_time=0.138, optim0_lr0=9.381e-05, train_time=3.113 +[gpua007:0/64] 2024-02-04 06:37:38,507 (trainer:684) WARNING: The grad norm is nan. Skipping updating the model. +[gpua007:0/64] 2024-02-04 06:40:07,703 (trainer:753) INFO: 19epoch:train:2801-2900batch: iter_time=9.088e-05, forward_time=0.142, loss_ctc=90.848, loss_interctc_layer6=85.552, loss_interctc_layer12=71.229, loss_interctc_layer15=65.456, loss_interctc_layer21=93.848, loss=81.387, backward_time=0.509, grad_norm=65.997, clip=100.000, loss_scale=3.278e+31, optim_step_time=0.138, optim0_lr0=9.379e-05, train_time=3.472 +[gpua007:0/64] 2024-02-04 06:45:42,395 (trainer:753) INFO: 19epoch:train:2901-3000batch: iter_time=9.401e-05, forward_time=0.143, loss_ctc=101.013, loss_interctc_layer6=94.531, loss_interctc_layer12=78.818, loss_interctc_layer15=72.391, loss_interctc_layer21=104.806, loss=90.312, backward_time=0.481, grad_norm=76.166, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.138, optim0_lr0=9.378e-05, train_time=3.347 +[gpua007:0/64] 2024-02-04 06:51:16,709 (trainer:753) INFO: 19epoch:train:3001-3100batch: iter_time=8.818e-05, forward_time=0.165, loss_ctc=81.529, loss_interctc_layer6=89.623, loss_interctc_layer12=75.181, loss_interctc_layer15=69.514, loss_interctc_layer21=83.905, loss=79.950, backward_time=0.462, grad_norm=83.759, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.139, optim0_lr0=9.376e-05, train_time=3.343 +[gpua007:0/64] 2024-02-04 06:56:13,726 (trainer:753) INFO: 19epoch:train:3101-3200batch: iter_time=8.821e-05, forward_time=0.145, loss_ctc=92.213, loss_interctc_layer6=95.701, loss_interctc_layer12=80.183, loss_interctc_layer15=74.061, loss_interctc_layer21=94.917, loss=87.415, backward_time=0.417, grad_norm=71.166, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.138, optim0_lr0=9.374e-05, train_time=2.970 +[gpua007:0/64] 2024-02-04 07:01:22,034 (trainer:753) INFO: 19epoch:train:3201-3300batch: iter_time=9.225e-05, forward_time=0.159, loss_ctc=84.197, loss_interctc_layer6=89.102, loss_interctc_layer12=74.309, loss_interctc_layer15=68.464, loss_interctc_layer21=86.697, loss=80.554, backward_time=0.447, grad_norm=61.785, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.143, optim0_lr0=9.372e-05, train_time=3.082 +[gpua007:0/64] 2024-02-04 07:06:31,589 (trainer:753) INFO: 19epoch:train:3301-3400batch: iter_time=9.733e-05, forward_time=0.218, loss_ctc=91.241, loss_interctc_layer6=95.465, loss_interctc_layer12=79.912, loss_interctc_layer15=73.660, loss_interctc_layer21=94.262, loss=86.908, backward_time=0.472, grad_norm=62.257, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.140, optim0_lr0=9.371e-05, train_time=3.096 +[gpua007:0/64] 2024-02-04 07:11:47,533 (trainer:753) INFO: 19epoch:train:3401-3500batch: iter_time=8.875e-05, forward_time=0.142, loss_ctc=78.401, loss_interctc_layer6=85.951, loss_interctc_layer12=71.876, loss_interctc_layer15=66.349, loss_interctc_layer21=80.722, loss=76.660, backward_time=0.443, grad_norm=57.162, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.137, optim0_lr0=9.369e-05, train_time=3.157 +[gpua007:0/64] 2024-02-04 07:17:37,212 (trainer:753) INFO: 19epoch:train:3501-3600batch: iter_time=8.430e-05, forward_time=0.141, loss_ctc=90.479, loss_interctc_layer6=86.883, loss_interctc_layer12=72.638, loss_interctc_layer15=66.978, loss_interctc_layer21=93.082, loss=82.012, backward_time=0.635, grad_norm=71.719, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.137, optim0_lr0=9.367e-05, train_time=3.499 +[gpua007:0/64] 2024-02-04 07:23:01,680 (trainer:753) INFO: 19epoch:train:3601-3700batch: iter_time=8.637e-05, forward_time=0.141, loss_ctc=63.938, loss_interctc_layer6=71.286, loss_interctc_layer12=59.129, loss_interctc_layer15=54.341, loss_interctc_layer21=65.930, loss=62.925, backward_time=0.454, grad_norm=60.273, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.137, optim0_lr0=9.366e-05, train_time=3.244 +[gpua007:0/64] 2024-02-04 07:25:55,289 (multiple_iter_factory:32) INFO: Building 3th iter-factory... +[gpua007:0/64] 2024-02-04 07:26:14,451 (s2t:401) INFO: Optional Data Names: ('text_prev', 'text_ctc', 'text_spk2', 'text_spk3', 'text_spk4') +[gpua007:0/64] 2024-02-04 07:26:18,254 (abs_task:1660) INFO: [train] dataset: +ESPnetDataset( + speech: {"path": "exp/s2t_stats_raw_bpe50000/splits12/wav.scp/split.11", "type": "kaldi_ark"} + text_prev: {"path": "exp/s2t_stats_raw_bpe50000/splits12/text.prev/split.11", "type": "text"} + text_ctc: {"path": "exp/s2t_stats_raw_bpe50000/splits12/text.ctc/split.11", "type": "text"} + text: {"path": "exp/s2t_stats_raw_bpe50000/splits12/text/split.11", "type": "text"} + preprocess: ) +[gpua007:0/64] 2024-02-04 07:26:18,254 (abs_task:1661) INFO: [train] Batch sampler: UnsortedBatchSampler(N-batch=19027, batch_size=256, key_file=exp/s2t_stats_raw_bpe50000/splits12/speech_shape/split.11, +[gpua007:0/64] 2024-02-04 07:26:18,258 (abs_task:1662) INFO: [train] mini-batch sizes summary: N-batch=19027, mean=256.0, min=256, max=257 +[gpua007:0/64] 2024-02-04 07:33:56,504 (trainer:753) INFO: 19epoch:train:3701-3800batch: iter_time=3.422, forward_time=0.142, loss_ctc=73.520, loss_interctc_layer6=84.070, loss_interctc_layer12=70.298, loss_interctc_layer15=64.794, loss_interctc_layer21=75.790, loss=73.694, backward_time=0.409, grad_norm=57.180, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.137, optim0_lr0=9.364e-05, train_time=6.548 +[gpua007:0/64] 2024-02-04 07:38:45,808 (trainer:753) INFO: 19epoch:train:3801-3900batch: iter_time=8.910e-05, forward_time=0.167, loss_ctc=85.500, loss_interctc_layer6=88.707, loss_interctc_layer12=74.537, loss_interctc_layer15=68.294, loss_interctc_layer21=89.282, loss=81.264, backward_time=0.401, grad_norm=75.221, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.138, optim0_lr0=9.362e-05, train_time=2.893 +[gpua007:0/64] 2024-02-04 07:43:40,969 (trainer:753) INFO: 19epoch:train:3901-4000batch: iter_time=9.478e-05, forward_time=0.172, loss_ctc=87.000, loss_interctc_layer6=87.523, loss_interctc_layer12=73.459, loss_interctc_layer15=67.886, loss_interctc_layer21=89.850, loss=81.144, backward_time=0.400, grad_norm=78.007, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.138, optim0_lr0=9.360e-05, train_time=2.951 +[gpua007:0/64] 2024-02-04 07:48:32,339 (trainer:753) INFO: 19epoch:train:4001-4100batch: iter_time=8.686e-05, forward_time=0.177, loss_ctc=66.242, loss_interctc_layer6=69.241, loss_interctc_layer12=57.394, loss_interctc_layer15=52.700, loss_interctc_layer21=68.458, loss=62.807, backward_time=0.398, grad_norm=50.720, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.142, optim0_lr0=9.359e-05, train_time=2.912 +[gpua007:0/64] 2024-02-04 07:53:39,904 (trainer:753) INFO: 19epoch:train:4101-4200batch: iter_time=8.411e-05, forward_time=0.181, loss_ctc=92.200, loss_interctc_layer6=98.081, loss_interctc_layer12=81.851, loss_interctc_layer15=75.202, loss_interctc_layer21=95.399, loss=88.547, backward_time=0.466, grad_norm=72.104, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.138, optim0_lr0=9.357e-05, train_time=3.077 +[gpua007:0/64] 2024-02-04 07:58:58,813 (trainer:753) INFO: 19epoch:train:4201-4300batch: iter_time=8.531e-05, forward_time=0.187, loss_ctc=86.517, loss_interctc_layer6=92.845, loss_interctc_layer12=77.751, loss_interctc_layer15=71.690, loss_interctc_layer21=89.184, loss=83.598, backward_time=0.492, grad_norm=83.550, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.139, optim0_lr0=9.355e-05, train_time=3.189 +[gpua007:0/64] 2024-02-04 08:04:15,049 (trainer:753) INFO: 19epoch:train:4301-4400batch: iter_time=8.859e-05, forward_time=0.142, loss_ctc=80.608, loss_interctc_layer6=90.960, loss_interctc_layer12=75.825, loss_interctc_layer15=69.701, loss_interctc_layer21=83.048, loss=80.028, backward_time=0.515, grad_norm=93.262, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.137, optim0_lr0=9.354e-05, train_time=3.161 +[gpua007:0/64] 2024-02-04 08:09:12,784 (trainer:753) INFO: 19epoch:train:4401-4500batch: iter_time=8.991e-05, forward_time=0.142, loss_ctc=77.126, loss_interctc_layer6=89.241, loss_interctc_layer12=74.663, loss_interctc_layer15=68.988, loss_interctc_layer21=79.287, loss=77.861, backward_time=0.411, grad_norm=67.774, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.137, optim0_lr0=9.352e-05, train_time=2.979 +[gpua007:0/64] 2024-02-04 08:14:31,384 (trainer:753) INFO: 19epoch:train:4501-4600batch: iter_time=9.269e-05, forward_time=0.143, loss_ctc=85.696, loss_interctc_layer6=95.018, loss_interctc_layer12=79.329, loss_interctc_layer15=73.072, loss_interctc_layer21=88.204, loss=84.264, backward_time=0.436, grad_norm=75.508, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.137, optim0_lr0=9.350e-05, train_time=3.186 +[gpua007:0/64] 2024-02-04 08:19:47,829 (trainer:753) INFO: 19epoch:train:4601-4700batch: iter_time=8.899e-05, forward_time=0.142, loss_ctc=83.853, loss_interctc_layer6=93.355, loss_interctc_layer12=78.153, loss_interctc_layer15=71.889, loss_interctc_layer21=86.365, loss=82.723, backward_time=0.454, grad_norm=68.163, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.137, optim0_lr0=9.349e-05, train_time=3.164 +[gpua007:0/64] 2024-02-04 08:25:19,491 (trainer:753) INFO: 19epoch:train:4701-4800batch: iter_time=9.142e-05, forward_time=0.143, loss_ctc=77.863, loss_interctc_layer6=81.335, loss_interctc_layer12=68.114, loss_interctc_layer15=62.716, loss_interctc_layer21=80.262, loss=74.058, backward_time=0.475, grad_norm=88.485, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.137, optim0_lr0=9.347e-05, train_time=3.316 +[gpua007:0/64] 2024-02-04 08:30:37,615 (trainer:753) INFO: 19epoch:train:4801-4900batch: iter_time=9.340e-05, forward_time=0.141, loss_ctc=68.377, loss_interctc_layer6=76.261, loss_interctc_layer12=63.451, loss_interctc_layer15=58.320, loss_interctc_layer21=70.382, loss=67.358, backward_time=0.491, grad_norm=56.246, clip=100.000, loss_scale=2.799e+31, optim_step_time=0.137, optim0_lr0=9.345e-05, train_time=3.181 +[gpua007:0/64] 2024-02-04 08:35:31,752 (trainer:753) INFO: 19epoch:train:4901-5000batch: iter_time=9.120e-05, forward_time=0.158, loss_ctc=73.781, loss_interctc_layer6=81.851, loss_interctc_layer12=68.304, loss_interctc_layer15=62.724, loss_interctc_layer21=75.940, loss=72.520, backward_time=0.406, grad_norm=64.663, clip=100.000, loss_scale=4.056e+31, optim_step_time=0.138, optim0_lr0=9.343e-05, train_time=2.941 +[gpua007:0/64] 2024-02-04 08:35:51,783 (multiple_iter_factory:32) INFO: Building 4th iter-factory... +[gpua007:0/64] 2024-02-04 08:36:10,825 (s2t:401) INFO: Optional Data Names: ('text_prev', 'text_ctc', 'text_spk2', 'text_spk3', 'text_spk4') +[gpua007:0/64] 2024-02-04 08:36:14,288 (abs_task:1660) INFO: [train] dataset: +ESPnetDataset( + speech: {"path": "exp/s2t_stats_raw_bpe50000/splits12/wav.scp/split.0", "type": "kaldi_ark"} + text_prev: {"path": "exp/s2t_stats_raw_bpe50000/splits12/text.prev/split.0", "type": "text"} + text_ctc: {"path": "exp/s2t_stats_raw_bpe50000/splits12/text.ctc/split.0", "type": "text"} + text: {"path": "exp/s2t_stats_raw_bpe50000/splits12/text/split.0", "type": "text"} + preprocess: ) +[gpua007:0/64] 2024-02-04 08:36:14,288 (abs_task:1661) INFO: [train] Batch sampler: UnsortedBatchSampler(N-batch=19027, batch_size=256, key_file=exp/s2t_stats_raw_bpe50000/splits12/speech_shape/split.0, +[gpua007:0/64] 2024-02-04 08:36:14,500 (abs_task:1662) INFO: [train] mini-batch sizes summary: N-batch=19027, mean=256.0, min=256, max=257 +[gpua007:0/64] 2024-02-04 08:50:16,779 (trainer:753) INFO: 19epoch:train:5001-5100batch: iter_time=2.988, forward_time=0.181, loss_ctc=87.736, loss_interctc_layer6=88.661, loss_interctc_layer12=74.339, loss_interctc_layer15=68.753, loss_interctc_layer21=90.506, loss=81.999, backward_time=0.462, grad_norm=61.830, clip=100.000, loss_scale=4.056e+31, optim_step_time=0.138, optim0_lr0=9.342e-05, train_time=8.849 +[gpua007:0/64] 2024-02-04 08:56:12,988 (trainer:753) INFO: 19epoch:train:5101-5200batch: iter_time=8.479e-05, forward_time=0.143, loss_ctc=77.509, loss_interctc_layer6=79.792, loss_interctc_layer12=66.561, loss_interctc_layer15=61.136, loss_interctc_layer21=80.052, loss=73.010, backward_time=0.586, grad_norm=54.917, clip=100.000, loss_scale=4.056e+31, optim_step_time=0.137, optim0_lr0=9.340e-05, train_time=3.563 +[gpua007:0/64] 2024-02-04 09:03:07,408 (trainer:753) INFO: 19epoch:train:5201-5300batch: iter_time=8.728e-05, forward_time=0.142, loss_ctc=79.580, loss_interctc_layer6=81.782, loss_interctc_layer12=68.705, loss_interctc_layer15=63.595, loss_interctc_layer21=82.079, loss=75.148, backward_time=0.554, grad_norm=59.151, clip=100.000, loss_scale=4.056e+31, optim_step_time=0.137, optim0_lr0=9.338e-05, train_time=4.144 +[gpua007:0/64] 2024-02-04 09:10:06,831 (trainer:753) INFO: 19epoch:train:5301-5400batch: iter_time=9.074e-05, forward_time=0.142, loss_ctc=89.548, loss_interctc_layer6=84.170, loss_interctc_layer12=69.911, loss_interctc_layer15=64.005, loss_interctc_layer21=92.598, loss=80.046, backward_time=0.582, grad_norm=64.424, clip=100.000, loss_scale=4.056e+31, optim_step_time=0.137, optim0_lr0=9.337e-05, train_time=4.194 +[gpua007:0/64] 2024-02-04 09:17:28,767 (trainer:753) INFO: 19epoch:train:5401-5500batch: iter_time=9.030e-05, forward_time=0.142, loss_ctc=99.900, loss_interctc_layer6=94.853, loss_interctc_layer12=78.874, loss_interctc_layer15=72.396, loss_interctc_layer21=103.136, loss=89.832, backward_time=0.958, grad_norm=99.910, clip=100.000, loss_scale=4.056e+31, optim_step_time=0.137, optim0_lr0=9.335e-05, train_time=4.419 +[gpua007:0/64] 2024-02-04 09:23:19,625 (trainer:753) INFO: 19epoch:train:5501-5600batch: iter_time=8.885e-05, forward_time=0.143, loss_ctc=81.122, loss_interctc_layer6=89.805, loss_interctc_layer12=75.376, loss_interctc_layer15=69.583, loss_interctc_layer21=83.427, loss=79.863, backward_time=0.494, grad_norm=64.916, clip=100.000, loss_scale=4.056e+31, optim_step_time=0.137, optim0_lr0=9.333e-05, train_time=3.508 +[gpua007:0/64] 2024-02-04 09:30:11,116 (trainer:753) INFO: 19epoch:train:5601-5700batch: iter_time=0.001, forward_time=0.264, loss_ctc=93.190, loss_interctc_layer6=95.465, loss_interctc_layer12=79.961, loss_interctc_layer15=73.743, loss_interctc_layer21=96.139, loss=87.700, backward_time=0.824, grad_norm=87.554, clip=100.000, loss_scale=4.056e+31, optim_step_time=0.195, optim0_lr0=9.332e-05, train_time=4.113 +[gpua007:0/64] 2024-02-04 09:37:03,774 (trainer:753) INFO: 19epoch:train:5701-5800batch: iter_time=9.641e-05, forward_time=0.144, loss_ctc=83.673, loss_interctc_layer6=89.825, loss_interctc_layer12=74.920, loss_interctc_layer15=69.097, loss_interctc_layer21=86.202, loss=80.743, backward_time=0.630, grad_norm=62.953, clip=100.000, loss_scale=4.056e+31, optim_step_time=0.138, optim0_lr0=9.330e-05, train_time=4.128 +[gpua007:0/64] 2024-02-04 09:44:04,736 (trainer:753) INFO: 19epoch:train:5801-5900batch: iter_time=1.005e-04, forward_time=0.144, loss_ctc=89.830, loss_interctc_layer6=95.254, loss_interctc_layer12=79.624, loss_interctc_layer15=73.355, loss_interctc_layer21=92.747, loss=86.162, backward_time=0.723, grad_norm=119.676, clip=100.000, loss_scale=4.056e+31, optim_step_time=0.138, optim0_lr0=9.328e-05, train_time=4.209 +[gpua007:0/64] 2024-02-04 09:50:26,639 (trainer:753) INFO: 19epoch:train:5901-6000batch: iter_time=9.850e-05, forward_time=0.143, loss_ctc=77.969, loss_interctc_layer6=85.942, loss_interctc_layer12=71.786, loss_interctc_layer15=65.974, loss_interctc_layer21=80.112, loss=76.357, backward_time=0.599, grad_norm=87.305, clip=100.000, loss_scale=4.056e+31, optim_step_time=0.139, optim0_lr0=9.326e-05, train_time=3.820 +[gpua007:0/64] 2024-02-04 09:57:32,079 (trainer:753) INFO: 19epoch:train:6001-6100batch: iter_time=9.759e-05, forward_time=0.175, loss_ctc=89.700, loss_interctc_layer6=86.924, loss_interctc_layer12=72.658, loss_interctc_layer15=66.988, loss_interctc_layer21=92.344, loss=81.723, backward_time=0.705, grad_norm=58.646, clip=100.000, loss_scale=4.056e+31, optim_step_time=0.148, optim0_lr0=9.325e-05, train_time=4.254 +[gpua007:0/64] 2024-02-04 10:03:46,889 (trainer:753) INFO: 19epoch:train:6101-6200batch: iter_time=9.252e-05, forward_time=0.233, loss_ctc=62.021, loss_interctc_layer6=70.673, loss_interctc_layer12=58.529, loss_interctc_layer15=53.675, loss_interctc_layer21=63.910, loss=61.762, backward_time=0.629, grad_norm=59.396, clip=100.000, loss_scale=4.056e+31, optim_step_time=0.138, optim0_lr0=9.323e-05, train_time=3.746 +[gpua007:0/64] 2024-02-04 10:07:09,400 (multiple_iter_factory:32) INFO: Building 5th iter-factory... +[gpua007:0/64] 2024-02-04 10:07:28,796 (s2t:401) INFO: Optional Data Names: ('text_prev', 'text_ctc', 'text_spk2', 'text_spk3', 'text_spk4') +[gpua007:0/64] 2024-02-04 10:07:32,374 (abs_task:1660) INFO: [train] dataset: +ESPnetDataset( + speech: {"path": "exp/s2t_stats_raw_bpe50000/splits12/wav.scp/split.3", "type": "kaldi_ark"} + text_prev: {"path": "exp/s2t_stats_raw_bpe50000/splits12/text.prev/split.3", "type": "text"} + text_ctc: {"path": "exp/s2t_stats_raw_bpe50000/splits12/text.ctc/split.3", "type": "text"} + text: {"path": "exp/s2t_stats_raw_bpe50000/splits12/text/split.3", "type": "text"} + preprocess: ) +[gpua007:0/64] 2024-02-04 10:07:32,374 (abs_task:1661) INFO: [train] Batch sampler: UnsortedBatchSampler(N-batch=19027, batch_size=256, key_file=exp/s2t_stats_raw_bpe50000/splits12/speech_shape/split.3, +[gpua007:0/64] 2024-02-04 10:07:32,377 (abs_task:1662) INFO: [train] mini-batch sizes summary: N-batch=19027, mean=256.0, min=256, max=257 +[gpua007:0/64] 2024-02-04 10:15:01,903 (trainer:753) INFO: 19epoch:train:6201-6300batch: iter_time=3.363, forward_time=0.142, loss_ctc=73.787, loss_interctc_layer6=84.346, loss_interctc_layer12=70.466, loss_interctc_layer15=64.842, loss_interctc_layer21=76.150, loss=73.918, backward_time=0.525, grad_norm=61.831, clip=100.000, loss_scale=4.056e+31, optim_step_time=0.137, optim0_lr0=9.321e-05, train_time=6.752 +[gpua007:0/64] 2024-02-04 10:19:41,556 (trainer:753) INFO: 19epoch:train:6301-6400batch: iter_time=9.614e-05, forward_time=0.142, loss_ctc=85.854, loss_interctc_layer6=89.067, loss_interctc_layer12=74.260, loss_interctc_layer15=69.402, loss_interctc_layer21=88.420, loss=81.401, backward_time=0.395, grad_norm=66.958, clip=100.000, loss_scale=4.056e+31, optim_step_time=0.137, optim0_lr0=9.320e-05, train_time=2.796 +[gpua007:0/64] 2024-02-04 10:24:46,927 (trainer:753) INFO: 19epoch:train:6401-6500batch: iter_time=9.197e-05, forward_time=0.141, loss_ctc=85.407, loss_interctc_layer6=86.038, loss_interctc_layer12=72.150, loss_interctc_layer15=66.362, loss_interctc_layer21=88.402, loss=79.672, backward_time=0.428, grad_norm=60.398, clip=100.000, loss_scale=4.056e+31, optim_step_time=0.137, optim0_lr0=9.318e-05, train_time=3.054 +[gpua007:0/64] 2024-02-04 10:29:44,761 (trainer:753) INFO: 19epoch:train:6501-6600batch: iter_time=9.469e-05, forward_time=0.141, loss_ctc=66.314, loss_interctc_layer6=68.941, loss_interctc_layer12=57.047, loss_interctc_layer15=52.279, loss_interctc_layer21=68.797, loss=62.676, backward_time=0.444, grad_norm=51.394, clip=100.000, loss_scale=4.056e+31, optim_step_time=0.137, optim0_lr0=9.316e-05, train_time=2.978 +[gpua007:0/64] 2024-02-04 10:34:55,641 (trainer:753) INFO: 19epoch:train:6601-6700batch: iter_time=1.046e-04, forward_time=0.150, loss_ctc=90.586, loss_interctc_layer6=96.675, loss_interctc_layer12=80.679, loss_interctc_layer15=74.100, loss_interctc_layer21=93.606, loss=87.129, backward_time=0.481, grad_norm=72.969, clip=100.000, loss_scale=4.056e+31, optim_step_time=0.138, optim0_lr0=9.315e-05, train_time=3.109 +[gpua007:0/64] 2024-02-04 10:41:11,408 (trainer:753) INFO: 19epoch:train:6701-6800batch: iter_time=1.035e-04, forward_time=0.241, loss_ctc=85.111, loss_interctc_layer6=91.945, loss_interctc_layer12=76.865, loss_interctc_layer15=70.842, loss_interctc_layer21=87.807, loss=82.514, backward_time=0.599, grad_norm=94.981, clip=100.000, loss_scale=4.056e+31, optim_step_time=0.142, optim0_lr0=9.313e-05, train_time=3.757 +[gpua007:0/64] 2024-02-04 10:44:20,239 (trainer:684) WARNING: The grad norm is nan. Skipping updating the model. +[gpua007:0/64] 2024-02-04 10:46:09,627 (trainer:753) INFO: 19epoch:train:6801-6900batch: iter_time=9.871e-05, forward_time=0.145, loss_ctc=80.396, loss_interctc_layer6=91.000, loss_interctc_layer12=75.790, loss_interctc_layer15=69.627, loss_interctc_layer21=82.591, loss=79.881, backward_time=0.468, grad_norm=61.428, clip=100.000, loss_scale=4.179e+31, optim_step_time=0.137, optim0_lr0=9.311e-05, train_time=2.980 +[gpua007:0/64] 2024-02-04 10:51:51,226 (trainer:753) INFO: 19epoch:train:6901-7000batch: iter_time=1.032e-04, forward_time=0.143, loss_ctc=76.505, loss_interctc_layer6=88.245, loss_interctc_layer12=73.867, loss_interctc_layer15=68.259, loss_interctc_layer21=78.613, loss=77.098, backward_time=0.497, grad_norm=94.346, clip=100.000, loss_scale=4.056e+31, optim_step_time=0.137, optim0_lr0=9.310e-05, train_time=3.418 +[gpua007:0/64] 2024-02-04 10:56:55,000 (trainer:753) INFO: 19epoch:train:7001-7100batch: iter_time=9.643e-05, forward_time=0.143, loss_ctc=85.996, loss_interctc_layer6=95.396, loss_interctc_layer12=79.547, loss_interctc_layer15=73.320, loss_interctc_layer21=88.635, loss=84.579, backward_time=0.424, grad_norm=65.590, clip=100.000, loss_scale=4.056e+31, optim_step_time=0.137, optim0_lr0=9.308e-05, train_time=3.038 +[gpua007:0/64] 2024-02-04 11:01:46,418 (trainer:753) INFO: 19epoch:train:7101-7200batch: iter_time=8.594e-05, forward_time=0.142, loss_ctc=83.674, loss_interctc_layer6=93.584, loss_interctc_layer12=78.109, loss_interctc_layer15=71.836, loss_interctc_layer21=86.502, loss=82.741, backward_time=0.420, grad_norm=71.834, clip=100.000, loss_scale=4.056e+31, optim_step_time=0.137, optim0_lr0=9.306e-05, train_time=2.914 +[gpua007:0/64] 2024-02-04 11:06:48,328 (trainer:753) INFO: 19epoch:train:7201-7300batch: iter_time=8.544e-05, forward_time=0.141, loss_ctc=77.512, loss_interctc_layer6=81.247, loss_interctc_layer12=67.947, loss_interctc_layer15=62.614, loss_interctc_layer21=79.810, loss=73.826, backward_time=0.449, grad_norm=57.011, clip=100.000, loss_scale=4.056e+31, optim_step_time=0.137, optim0_lr0=9.305e-05, train_time=3.019 +[gpua007:0/64] 2024-02-04 11:11:49,163 (trainer:753) INFO: 19epoch:train:7301-7400batch: iter_time=8.564e-05, forward_time=0.140, loss_ctc=68.038, loss_interctc_layer6=75.470, loss_interctc_layer12=62.793, loss_interctc_layer15=57.600, loss_interctc_layer21=70.081, loss=66.796, backward_time=0.424, grad_norm=63.468, clip=100.000, loss_scale=4.056e+31, optim_step_time=0.137, optim0_lr0=9.303e-05, train_time=3.008 +[gpua007:0/64] 2024-02-04 11:17:15,259 (trainer:753) INFO: 19epoch:train:7401-7500batch: iter_time=8.772e-05, forward_time=0.143, loss_ctc=73.471, loss_interctc_layer6=82.044, loss_interctc_layer12=68.230, loss_interctc_layer15=62.631, loss_interctc_layer21=75.707, loss=72.417, backward_time=0.469, grad_norm=67.679, clip=100.000, loss_scale=4.056e+31, optim_step_time=0.137, optim0_lr0=9.301e-05, train_time=3.261 +[gpua007:0/64] 2024-02-04 11:17:34,706 (multiple_iter_factory:32) INFO: Building 6th iter-factory... +[gpua007:0/64] 2024-02-04 11:17:53,123 (s2t:401) INFO: Optional Data Names: ('text_prev', 'text_ctc', 'text_spk2', 'text_spk3', 'text_spk4') +[gpua007:0/64] 2024-02-04 11:17:56,656 (abs_task:1660) INFO: [train] dataset: +ESPnetDataset( + speech: {"path": "exp/s2t_stats_raw_bpe50000/splits12/wav.scp/split.4", "type": "kaldi_ark"} + text_prev: {"path": "exp/s2t_stats_raw_bpe50000/splits12/text.prev/split.4", "type": "text"} + text_ctc: {"path": "exp/s2t_stats_raw_bpe50000/splits12/text.ctc/split.4", "type": "text"} + text: {"path": "exp/s2t_stats_raw_bpe50000/splits12/text/split.4", "type": "text"} + preprocess: ) +[gpua007:0/64] 2024-02-04 11:17:56,656 (abs_task:1661) INFO: [train] Batch sampler: UnsortedBatchSampler(N-batch=19027, batch_size=256, key_file=exp/s2t_stats_raw_bpe50000/splits12/speech_shape/split.4, +[gpua007:0/64] 2024-02-04 11:17:56,667 (abs_task:1662) INFO: [train] mini-batch sizes summary: N-batch=19027, mean=256.0, min=256, max=257 +[gpua007:0/64] 2024-02-04 11:31:35,871 (trainer:753) INFO: 19epoch:train:7501-7600batch: iter_time=3.296, forward_time=0.174, loss_ctc=86.192, loss_interctc_layer6=87.420, loss_interctc_layer12=72.884, loss_interctc_layer15=67.203, loss_interctc_layer21=88.620, loss=80.464, backward_time=0.401, grad_norm=102.802, clip=100.000, loss_scale=4.056e+31, optim_step_time=0.141, optim0_lr0=9.300e-05, train_time=8.606 +srun: Job step aborted: Waiting up to 32 seconds for job step to finish. +slurmstepd: error: *** STEP 2915965.0 ON gpua007 CANCELLED AT 2024-02-04T11:32:35 ***