# Running on gpua003.delta.ncsa.illinois.edu # Started at Mon Feb 5 00:18:52 CST 2024 # SLURMD_NODENAME=gpua003 # SLURM_CLUSTER_NAME=delta # SLURM_CONF=/var/spool/slurmd/conf-cache/slurm.conf # SLURM_CPUS_ON_NODE=64 # SLURM_CPUS_PER_TASK=64 # SLURM_EXPORT_ENV=PATH # SLURM_GET_USER_ENV=1 # SLURM_GPUS_ON_NODE=4 # SLURM_GTIDS=0 # SLURM_JOBID=2937932 # SLURM_JOB_ACCOUNT=bbjs-delta-gpu # SLURM_JOB_CPUS_PER_NODE='64(x16)' # SLURM_JOB_END_TIME=1707286713 # SLURM_JOB_GID=202 # SLURM_JOB_GPUS=0,1,2,3 # SLURM_JOB_ID=2937932 # SLURM_JOB_NAME=exp/s2t_train_s2t_multitask-ctc_ebf27_conv2d8_size1024_raw_bpe50000/train.log # SLURM_JOB_NODELIST='gpua[003,014,025,027,029,036,048,054,057,068,075,077-080,096]' # SLURM_JOB_NUM_NODES=16 # SLURM_JOB_PARTITION=gpuA100x4 # SLURM_JOB_QOS=bbjs-delta-gpu # SLURM_JOB_START_TIME=1707113913 # SLURM_JOB_UID=68077 # SLURM_JOB_USER=peng6 # SLURM_LOCALID=0 # SLURM_MEM_PER_NODE=240000 # SLURM_MPI_TYPE=pmi2 # SLURM_NNODES=16 # SLURM_NODEID=0 # SLURM_NODELIST='gpua[003,014,025,027,029,036,048,054,057,068,075,077-080,096]' # SLURM_NODE_ALIASES='(null)' # SLURM_OPEN_MODE=a # SLURM_PRIO_PROCESS=0 # SLURM_PROCID=0 # SLURM_SUBMIT_DIR=/scratch/bbjs/peng6/espnet-owsm-ctc-2/egs2/owsm_v3.1_ctc/s2t1 # SLURM_SUBMIT_HOST=dt-login03.delta.ncsa.illinois.edu # SLURM_TASKS_PER_NODE='1(x16)' # SLURM_TASK_PID=274205 # SLURM_TOPOLOGY_ADDR=ss00.ss05.gpua003 # SLURM_TOPOLOGY_ADDR_PATTERN=switch.switch.node # SLURM_WORKING_CLUSTER=delta:dt-sched:6817:9984:109 # srun --export=ALL python3 -m espnet2.bin.s2t_train --use_preprocessor true --bpemodel data/token_list/bpe_unigram50000/bpe.model --token_type bpe --token_list data/token_list/bpe_unigram50000/tokens.txt --non_linguistic_symbols none --cleaner none --g2p none --valid_data_path_and_name_and_type dump/raw/dev_v3/wav.scp,speech,kaldi_ark --valid_shape_file exp/s2t_stats_raw_bpe50000/valid/speech_shape --resume true --fold_length 80000 --output_dir exp/s2t_train_s2t_multitask-ctc_ebf27_conv2d8_size1024_raw_bpe50000 --config conf/train_s2t_multitask-ctc_ebf27_conv2d8_size1024.yaml --frontend_conf fs=16k --normalize=global_mvn --normalize_conf stats_file=exp/s2t_stats_raw_bpe50000/train/feats_stats.npz --train_data_path_and_name_and_type exp/s2t_stats_raw_bpe50000/splits12/wav.scp,speech,kaldi_ark --train_shape_file exp/s2t_stats_raw_bpe50000/splits12/speech_shape --fold_length 150 --train_data_path_and_name_and_type exp/s2t_stats_raw_bpe50000/splits12/text.prev,text_prev,text --train_shape_file exp/s2t_stats_raw_bpe50000/splits12/text_prev_shape.bpe --fold_length 150 --train_data_path_and_name_and_type exp/s2t_stats_raw_bpe50000/splits12/text.ctc,text_ctc,text --train_shape_file exp/s2t_stats_raw_bpe50000/splits12/text_ctc_shape.bpe --fold_length 150 --train_data_path_and_name_and_type exp/s2t_stats_raw_bpe50000/splits12/text,text,text --train_shape_file exp/s2t_stats_raw_bpe50000/splits12/text_shape.bpe --multiple_iterator true --valid_data_path_and_name_and_type dump/raw/dev_v3/text.prev,text_prev,text --valid_shape_file exp/s2t_stats_raw_bpe50000/valid/text_prev_shape.bpe --valid_data_path_and_name_and_type dump/raw/dev_v3/text.ctc,text_ctc,text --valid_shape_file exp/s2t_stats_raw_bpe50000/valid/text_ctc_shape.bpe --valid_data_path_and_name_and_type dump/raw/dev_v3/text,text,text --valid_shape_file exp/s2t_stats_raw_bpe50000/valid/text_shape.bpe --ngpu 4 --multiprocessing_distributed true --dist_launcher slurm --dist_init_method file:///scratch/bbjs/peng6/espnet-owsm-ctc-2/egs2/owsm_v3.1_ctc/s2t1/exp/s2t_train_s2t_multitask-ctc_ebf27_conv2d8_size1024_raw_bpe50000/.dist_init_a23ef2e3-2c6c-4302-955b-dadd6d16d6c9 /scratch/bbjs/peng6/espnet-owsm-ctc-2/tools/miniconda/envs/espnet/bin/python3 /scratch/bbjs/peng6/espnet-owsm-ctc-2/espnet2/bin/s2t_train.py --use_preprocessor true --bpemodel data/token_list/bpe_unigram50000/bpe.model --token_type bpe --token_list data/token_list/bpe_unigram50000/tokens.txt --non_linguistic_symbols none --cleaner none --g2p none --valid_data_path_and_name_and_type dump/raw/dev_v3/wav.scp,speech,kaldi_ark --valid_shape_file exp/s2t_stats_raw_bpe50000/valid/speech_shape --resume true --fold_length 80000 --output_dir exp/s2t_train_s2t_multitask-ctc_ebf27_conv2d8_size1024_raw_bpe50000 --config conf/train_s2t_multitask-ctc_ebf27_conv2d8_size1024.yaml --frontend_conf fs=16k --normalize=global_mvn --normalize_conf stats_file=exp/s2t_stats_raw_bpe50000/train/feats_stats.npz --train_data_path_and_name_and_type exp/s2t_stats_raw_bpe50000/splits12/wav.scp,speech,kaldi_ark --train_shape_file exp/s2t_stats_raw_bpe50000/splits12/speech_shape --fold_length 150 --train_data_path_and_name_and_type exp/s2t_st/scratch/bbjs/peng6/espnet-owsm-ctc-2/tools/miniconda/envs/espnet/bin/python3 /scratch/bbjs/peng6/espnet-owsm-ctc-2/espnet2/bin/s2t_train.py --use_preprocessor true --bpemodel data/token_list/bpe_unigram50000/bpe.model --token_type bpe --token_list data/token_list/bpe_unigram50000/tokens.txt --non_linguistic_symbols none --cleaner none --g2p none --valid_data_path_and_name_and_type dump/raw/dev_v3/wav.scp,speech,kaldi_ark --valid_shape_file exp/s2t_stats_raw_bpe50000/valid/speech_shape --resume true --fold_length 80000 --output_dir exp/s2t_train_s2t_multitask-ctc_ebf27_conv2d8_size1024_raw_bpe50000 --config conf/train_s2t_multitask-ctc_ebf27_conv2d8_size1024.yaml --frontend_conf fs=16k --normalize=global_mvn --normalize_conf stats_file=exp/s2t_stats_raw_bpe50000/train/feats_stats.npz --train_data_path_and_name_and_type exp/s2t_stats_raw_bpe50000/splits12/wav.scp,speech,kaldi_ark --train_shape_file exp/s2t_stats_raw_bpe50000/splits12/speech_shape --fold_length 150 --train_data_path_and_name_and_type exp/s2t_st/scratch/bbjs/peng6/espnet-owsm-ctc-2/tools/miniconda/envs/espnet/bin/python3 /scratch/bbjs/peng6/espnet-owsm-ctc-2/espnet2/bin/s2t_train.py --use_preprocessor true --bpemodel data/token_list/bpe_unigram50000/bpe.model --token_type bpe --token_list data/token_list/bpe_unigram50000/tokens.txt --non_linguistic_symbols none --cleaner none --g2p none --valid_data_path_and_name_and_type dump/raw/dev_v3/wav.scp,speech,kaldi_ark --valid_shape_file exp/s2t_stats_raw_bpe50000/valid/speech_shape --resume true --fold_length 80000 --output_dir exp/s2t_train_s2t_multitask-ctc_ebf27_conv2d8_size1024_raw_bpe50000 --config conf/train_s2t_multitask-ctc_ebf27_conv2d8_size1024.yaml --frontend_conf fs=16k --normalize=global_mvn --normalize_conf stats_file=exp/s2t_stats_raw_bpe50000/train/feats_stats.npz --train_data_path_and_name_and_type exp/s2t_stats_raw_bpe50000/splits12/wav.scp,speech,kaldi_ark --train_shape_file exp/s2t_stats_raw_bpe50000/splits12/speech_shape --fold_length 150 --train_data_path_and_name_and_type exp/s2t_st/scratch/bbjs/peng6/espnet-owsm-ctc-2/tools/miniconda/envs/espnet/bin/python3 /scratch/bbjs/peng6/espnet-owsm-ctc-2/espnet2/bin/s2t_train.py --use_preprocessor true --bpemodel data/token_list/bpe_unigram50000/bpe.model --token_type bpe --token_list data/token_list/bpe_unigram50000/tokens.txt --non_linguistic_symbols none --cleaner none --g2p none --valid_data_path_and_name_and_type dump/raw/dev_v3/wav.scp,speech,kaldi_ark --valid_shape_file exp/s2t_stats_raw_bpe50000/valid/speech_shape --resume true --fold_length 80000 --output_dir exp/s2t_train_s2t_multitask-ctc_ebf27_conv2d8_size1024_raw_bpe50000 --config conf/train_s2t_multitask-ctc_ebf27_conv2d8_size1024.yaml --frontend_conf fs=16k --normalize=global_mvn --normalize_conf stats_file=exp/s2t_stats_raw_bpe50000/train/feats_stats.npz --train_data_path_and_name_and_type exp/s2t_stats_raw_bpe50000/splits12/wav.scp,speech,kaldi_ark --train_shape_file exp/s2t_stats_raw_bpe50000/splits12/speech_shape --fold_length 150 --train_data_path_and_name_and_type exp/s2t_st/scratch/bbjs/peng6/espnet-owsm-ctc-2/tools/miniconda/envs/espnet/bin/python3 /scratch/bbjs/peng6/espnet-owsm-ctc-2/espnet2/bin/s2t_train.py --use_preprocessor true --bpemodel data/token_list/bpe_unigram50000/bpe.model --token_type bpe --token_list data/token_list/bpe_unigram50000/tokens.txt --non_linguistic_symbols none --cleaner none --g2p none --valid_data_path_and_name_and_type dump/raw/dev_v3/wav.scp,speech,kaldi_ark --valid_shape_file exp/s2t_stats_raw_bpe50000/valid/speech_shape --resume true --fold_length 80000 --output_dir exp/s2t_train_s2t_multitask-ctc_ebf27_conv2d8_size1024_raw_bpe50000 --config conf/train_s2t_multitask-ctc_ebf27_conv2d8_size1024.yaml --frontend_conf fs=16k --normalize=global_mvn --normalize_conf stats_file=exp/s2t_stats_raw_bpe50000/train/feats_stats.npz --train_data_path_and_name_and_type exp/s2t_stats_raw_bpe50000/splits12/wav.scp,speech,kaldi_ark --train_shape_file exp/s2t_stats_raw_bpe50000/splits12/speech_shape --fold_length 150 --train_data_path_and_name_and_type exp/s2t_st/scratch/bbjs/peng6/espnet-owsm-ctc-2/tools/miniconda/envs/espnet/bin/python3 /scratch/bbjs/peng6/espnet-owsm-ctc-2/espnet2/bin/s2t_train.py --use_preprocessor true --bpemodel data/token_list/bpe_unigram50000/bpe.model --token_type bpe --token_list data/token_list/bpe_unigram50000/tokens.txt --non_linguistic_symbols none --cleaner none --g2p none --valid_data_path_and_name_and_type dump/raw/dev_v3/wav.scp,speech,kaldi_ark --valid_shape_file exp/s2t_stats_raw_bpe50000/valid/speech_shape --resume true --fold_length 80000 --output_dir exp/s2t_train_s2t_multitask-ctc_ebf27_conv2d8_size1024_raw_bpe50000 --config conf/train_s2t_multitask-ctc_ebf27_conv2d8_size1024.yaml --frontend_conf fs=16k --normalize=global_mvn --normalize_conf stats_file=exp/s2t_stats_raw_bpe50000/train/feats_stats.npz --train_data_path_and_name_and_type exp/s2t_stats_raw_bpe50000/splits12/wav.scp,speech,kaldi_ark --train_shape_file exp/s2t_stats_raw_bpe50000/splits12/speech_shape --fold_length 150 --train_data_path_and_name_and_type exp/s2t_st/scratch/bbjs/peng6/espnet-owsm-ctc-2/tools/miniconda/envs/espnet/bin/python3 /scratch/bbjs/peng6/espnet-owsm-ctc-2/espnet2/bin/s2t_train.py --use_preprocessor true --bpemodel data/token_list/bpe_unigram50000/bpe.model --token_type bpe --token_list data/token_list/bpe_unigram50000/tokens.txt --non_linguistic_symbols none --cleaner none --g2p none --valid_data_path_and_name_and_type dump/raw/dev_v3/wav.scp,speech,kaldi_ark --valid_shape_file exp/s2t_stats_raw_bpe50000/valid/speech_shape --resume true --fold_length 80000 --output_dir exp/s2t_train_s2t_multitask-ctc_ebf27_conv2d8_size1024_raw_bpe50000 --config conf/train_s2t_multitask-ctc_ebf27_conv2d8_size1024.yaml --frontend_conf fs=16k --normalize=global_mvn --normalize_conf stats_file=exp/s2t_stats_raw_bpe50000/train/feats_stats.npz --train_data_path_and_name_and_type exp/s2t_stats_raw_bpe50000/splits12/wav.scp,speech,kaldi_ark --train_shape_file exp/s2t_stats_raw_bpe50000/splits12/speech_shape --fold_length 150 --train_data_path_and_name_and_type exp/s2t_st/scratch/bbjs/peng6/espnet-owsm-ctc-2/tools/miniconda/envs/espnet/bin/python3 /scratch/bbjs/peng6/espnet-owsm-ctc-2/espnet2/bin/s2t_train.py --use_preprocessor true --bpemodel data/token_list/bpe_unigram50000/bpe.model --token_type bpe --token_list data/token_list/bpe_unigram50000/tokens.txt --non_linguistic_symbols none --cleaner none --g2p none --valid_data_path_and_name_and_type dump/raw/dev_v3/wav.scp,speech,kaldi_ark --valid_shape_file exp/s2t_stats_raw_bpe50000/valid/speech_shape --resume true --fold_length 80000 --output_dir exp/s2t_train_s2t_multitask-ctc_ebf27_conv2d8_size1024_raw_bpe50000 --config conf/train_s2t_multitask-ctc_ebf27_conv2d8_size1024.yaml --frontend_conf fs=16k --normalize=global_mvn --normalize_conf stats_file=exp/s2t_stats_raw_bpe50000/train/feats_stats.npz --train_data_path_and_name_and_type exp/s2t_stats_raw_bpe50000/splits12/wav.scp,speech,kaldi_ark --train_shape_file exp/s2t_stats_raw_bpe50000/splits12/speech_shape --fold_length 150 --train_data_path_and_name_and_type exp/s2t_st/scratch/bbjs/peng6/espnet-owsm-ctc-2/tools/miniconda/envs/espnet/bin/python3 /scratch/bbjs/peng6/espnet-owsm-ctc-2/espnet2/bin/s2t_train.py --use_preprocessor true --bpemodel data/token_list/bpe_unigram50000/bpe.model --token_type bpe --token_list data/token_list/bpe_unigram50000/tokens.txt --non_linguistic_symbols none --cleaner none --g2p none --valid_data_path_and_name_and_type dump/raw/dev_v3/wav.scp,speech,kaldi_ark --valid_shape_file exp/s2t_stats_raw_bpe50000/valid/speech_shape --resume true --fold_length 80000 --output_dir exp/s2t_train_s2t_multitask-ctc_ebf27_conv2d8_size1024_raw_bpe50000 --config conf/train_s2t_multitask-ctc_ebf27_conv2d8_size1024.yaml --frontend_conf fs=16k --normalize=global_mvn --normalize_conf stats_file=exp/s2t_stats_raw_bpe50000/train/feats_stats.npz --train_data_path_and_name_and_type exp/s2t_stats_raw_bpe50000/splits12/wav.scp,speech,kaldi_ark --train_shape_file exp/s2t_stats_raw_bpe50000/splits12/speech_shape --fold_length 150 --train_data_path_and_name_and_type exp/s2t_st/scratch/bbjs/peng6/espnet-owsm-ctc-2/tools/miniconda/envs/espnet/bin/python3 /scratch/bbjs/peng6/espnet-owsm-ctc-2/espnet2/bin/s2t_train.py --use_preprocessor true --bpemodel data/token_list/bpe_unigram50000/bpe.model --token_type bpe --token_list data/token_list/bpe_unigram50000/tokens.txt --non_linguistic_symbols none --cleaner none --g2p none --valid_data_path_and_name_and_type dump/raw/dev_v3/wav.scp,speech,kaldi_ark --valid_shape_file exp/s2t_stats_raw_bpe50000/valid/speech_shape --resume true --fold_length 80000 --output_dir exp/s2t_train_s2t_multitask-ctc_ebf27_conv2d8_size1024_raw_bpe50000 --config conf/train_s2t_multitask-ctc_ebf27_conv2d8_size1024.yaml --frontend_conf fs=16k --normalize=global_mvn --normalize_conf stats_file=exp/s2t_stats_raw_bpe50000/train/feats_stats.npz --train_data_path_and_name_and_type exp/s2t_stats_raw_bpe50000/splits12/wav.scp,speech,kaldi_ark --train_shape_file exp/s2t_stats_raw_bpe50000/splits12/speech_shape --fold_length 150 --train_data_path_and_name_and_type exp/s2t_st/scratch/bbjs/peng6/espnet-owsm-ctc-2/tools/miniconda/envs/espnet/bin/python3 /scratch/bbjs/peng6/espnet-owsm-ctc-2/espnet2/bin/s2t_train.py --use_preprocessor true --bpemodel data/token_list/bpe_unigram50000/bpe.model --token_type bpe --token_list data/token_list/bpe_unigram50000/tokens.txt --non_linguistic_symbols none --cleaner none --g2p none --valid_data_path_and_name_and_type dump/raw/dev_v3/wav.scp,speech,kaldi_ark --valid_shape_file exp/s2t_stats_raw_bpe50000/valid/speech_shape --resume true --fold_length 80000 --output_dir exp/s2t_train_s2t_multitask-ctc_ebf27_conv2d8_size1024_raw_bpe50000 --config conf/train_s2t_multitask-ctc_ebf27_conv2d8_size1024.yaml --frontend_conf fs=16k --normalize=global_mvn --normalize_conf stats_file=exp/s2t_stats_raw_bpe50000/train/feats_stats.npz --train_data_path_and_name_and_type exp/s2t_stats_raw_bpe50000/splits12/wav.scp,speech,kaldi_ark --train_shape_file exp/s2t_stats_raw_bpe50000/splits12/speech_shape --fold_length 150 --train_data_path_and_name_and_type exp/s2t_st/scratch/bbjs/peng6/espnet-owsm-ctc-2/tools/miniconda/envs/espnet/bin/python3 /scratch/bbjs/peng6/espnet-owsm-ctc-2/espnet2/bin/s2t_train.py --use_preprocessor true --bpemodel data/token_list/bpe_unigram50000/bpe.model --token_type bpe --token_list data/token_list/bpe_unigram50000/tokens.txt --non_linguistic_symbols none --cleaner none --g2p none --valid_data_path_and_name_and_type dump/raw/dev_v3/wav.scp,speech,kaldi_ark --valid_shape_file exp/s2t_stats_raw_bpe50000/valid/speech_shape --resume true --fold_length 80000 --output_dir exp/s2t_train_s2t_multitask-ctc_ebf27_conv2d8_size1024_raw_bpe50000 --config conf/train_s2t_multitask-ctc_ebf27_conv2d8_size1024.yaml --frontend_conf fs=16k --normalize=global_mvn --normalize_conf stats_file=exp/s2t_stats_raw_bpe50000/train/feats_stats.npz --train_data_path_and_name_and_type exp/s2t_stats_raw_bpe50000/splits12/wav.scp,speech,kaldi_ark --train_shape_file exp/s2t_stats_raw_bpe50000/splits12/speech_shape --fold_length 150 --train_data_path_and_name_and_type exp/s2t_st/scratch/bbjs/peng6/espnet-owsm-ctc-2/tools/miniconda/envs/espnet/bin/python3 /scratch/bbjs/peng6/espnet-owsm-ctc-2/espnet2/bin/s2t_train.py --use_preprocessor true --bpemodel data/token_list/bpe_unigram50000/bpe.model --token_type bpe --token_list data/token_list/bpe_unigram50000/tokens.txt --non_linguistic_symbols none --cleaner none --g2p none --valid_data_path_and_name_and_type dump/raw/dev_v3/wav.scp,speech,kaldi_ark --valid_shape_file exp/s2t_stats_raw_bpe50000/valid/speech_shape --resume true --fold_length 80000 --output_dir exp/s2t_train_s2t_multitask-ctc_ebf27_conv2d8_size1024_raw_bpe50000 --config conf/train_s2t_multitask-ctc_ebf27_conv2d8_size1024.yaml --frontend_conf fs=16k --normalize=global_mvn --normalize_conf stats_file=exp/s2t_stats_raw_bpe50000/train/feats_stats.npz --train_data_path_and_name_and_type exp/s2t_stats_raw_bpe50000/splits12/wav.scp,speech,kaldi_ark --train_shape_file exp/s2t_stats_raw_bpe50000/splits12/speech_shape --fold_length 150 --train_data_path_and_name_and_type exp/s2t_st/scratch/bbjs/peng6/espnet-owsm-ctc-2/tools/miniconda/envs/espnet/bin/python3 /scratch/bbjs/peng6/espnet-owsm-ctc-2/espnet2/bin/s2t_train.py --use_preprocessor true --bpemodel data/token_list/bpe_unigram50000/bpe.model --token_type bpe --token_list data/token_list/bpe_unigram50000/tokens.txt --non_linguistic_symbols none --cleaner none --g2p none --valid_data_path_and_name_and_type dump/raw/dev_v3/wav.scp,speech,kaldi_ark --valid_shape_file exp/s2t_stats_raw_bpe50000/valid/speech_shape --resume true --fold_length 80000 --output_dir exp/s2t_train_s2t_multitask-ctc_ebf27_conv2d8_size1024_raw_bpe50000 --config conf/train_s2t_multitask-ctc_ebf27_conv2d8_size1024.yaml --frontend_conf fs=16k --normalize=global_mvn --normalize_conf stats_file=exp/s2t_stats_raw_bpe50000/train/feats_stats.npz --train_data_path_and_name_and_type exp/s2t_stats_raw_bpe50000/splits12/wav.scp,speech,kaldi_ark --train_shape_file exp/s2t_stats_raw_bpe50000/splits12/speech_shape --fold_length 150 --train_data_path_and_name_and_type exp/s2t_st/scratch/bbjs/peng6/espnet-owsm-ctc-2/tools/miniconda/envs/espnet/bin/python3 /scratch/bbjs/peng6/espnet-owsm-ctc-2/espnet2/bin/s2t_train.py --use_preprocessor true --bpemodel data/token_list/bpe_unigram50000/bpe.model --token_type bpe --token_list data/token_list/bpe_unigram50000/tokens.txt --non_linguistic_symbols none --cleaner none --g2p none --valid_data_path_and_name_and_type dump/raw/dev_v3/wav.scp,speech,kaldi_ark --valid_shape_file exp/s2t_stats_raw_bpe50000/valid/speech_shape --resume true --fold_length 80000 --output_dir exp/s2t_train_s2t_multitask-ctc_ebf27_conv2d8_size1024_raw_bpe50000 --config conf/train_s2t_multitask-ctc_ebf27_conv2d8_size1024.yaml --frontend_conf fs=16k --normalize=global_mvn --normalize_conf stats_file=exp/s2t_stats_raw_bpe50000/train/feats_stats.npz --train_data_path_and_name_and_type exp/s2t_stats_raw_bpe50000/splits12/wav.scp,speech,kaldi_ark --train_shape_file exp/s2t_stats_raw_bpe50000/splits12/speech_shape --fold_length 150 --train_data_path_and_name_and_type exp/s2t_st/scratch/bbjs/peng6/espnet-owsm-ctc-2/tools/miniconda/envs/espnet/bin/python3 /scratch/bbjs/peng6/espnet-owsm-ctc-2/espnet2/bin/s2t_train.py --use_preprocessor true --bpemodel data/token_list/bpe_unigram50000/bpe.model --token_type bpe --token_list data/token_list/bpe_unigram50000/tokens.txt --non_linguistic_symbols none --cleaner none --g2p none --valid_data_path_and_name_and_type dump/raw/dev_v3/wav.scp,speech,kaldi_ark --valid_shape_file exp/s2t_stats_raw_bpe50000/valid/speech_shape --resume true --fold_length 80000 --output_dir exp/s2t_train_s2t_multitask-ctc_ebf27_conv2d8_size1024_raw_bpe50000 --config conf/train_s2t_multitask-ctc_ebf27_conv2d8_size1024.yaml --frontend_conf fs=16k --normalize=global_mvn --normalize_conf stats_file=exp/s2t_stats_raw_bpe50000/train/feats_stats.npz --train_data_path_and_name_and_type exp/s2t_stats_raw_bpe50000/splits12/wav.scp,speech,kaldi_ark --train_shape_file exp/s2t_stats_raw_bpe50000/splits12/speech_shape --fold_length 150 --train_data_path_and_name_and_type exp/s2t_stats_raw_bpe50000/splits12/text.prev,text_prev,text --train_shape_file exp/s2t_stats_raw_bpe50000/splits12/text_prev_shape.bpe --fold_length 150 --train_data_path_and_name_and_type exp/s2t_stats_raw_bpe50000/splits12/text.ctc,text_ctc,text --train_shape_file exp/s2t_stats_raw_bpe50000/splits12/text_ctc_shape.bpe --fold_length 150 --train_data_path_and_name_and_type exp/s2t_stats_raw_bpe50000/splits12/text,text,text --train_shape_file exp/s2t_stats_raw_bpe50000/splits12/text_shape.bpe --multiple_iterator true --valid_data_path_and_name_and_type dump/raw/dev_v3/text.prev,text_prev,text --valid_shape_file exp/s2t_stats_raw_bpe50000/valid/text_prev_shape.bpe --valid_data_path_and_name_and_type dump/raw/dev_v3/text.ctc,text_ctc,text --valid_shape_file exp/s2t_stats_raw_bpe50000/valid/text_ctc_shape.bpe --valid_data_path_and_name_and_type dump/raw/dev_v3/text,text,text --valid_shape_file exp/s2t_stats_raw_bpe50000/valid/text_shape.bpe --ngpu 4 --multiprocessing_distributed true --dist_launcher slurm --dist_init_method file:///scratch/bbjs/peng6/espnet-owsm-ctc-2/egs2/owsm_v3.1_ctc/s2t1/exp/s2t_train_s2t_multitask-ctc_ebf27_conv2d8_size1024_raw_bpe50000/.dist_init_a23ef2e3-2c6c-4302-955b-dadd6d16d6c9 ats_raw_bpe50000/splits12/text.prev,text_prev,text --train_shape_file exp/s2t_stats_raw_bpe50000/splits12/text_prev_shape.bpe --fold_length 150 --train_data_path_and_name_and_type exp/s2t_stats_raw_bpe50000/splits12/text.ctc,text_ctc,text --train_shape_file exp/s2t_stats_raw_bpe50000/splits12/text_ctc_shape.bpe --fold_length 150 --train_data_path_and_name_and_type exp/s2t_stats_raw_bpe50000/splits12/text,text,text --train_shape_file exp/s2t_stats_raw_bpe50000/splits12/text_shape.bpe --multiple_iterator true --valid_data_path_and_name_and_type dump/raw/dev_v3/text.prev,text_prev,text --valid_shape_file exp/s2t_stats_raw_bpe50000/valid/text_prev_shape.bpe --valid_data_path_and_name_and_type dump/raw/dev_v3/text.ctc,text_ctc,text --valid_shape_file exp/s2t_stats_raw_bpe50000/valid/text_ctc_shape.bpe --valid_data_path_and_name_and_type dump/raw/dev_v3/text,text,text --valid_shape_file exp/s2t_stats_raw_bpe50000/valid/text_shape.bpe --ngpu 4 --multiprocessing_distributed true --dist_launcher slurm --dist_init_methats_raw_bpe50000/splits12/text.prev,text_prev,text --train_shape_file exp/s2t_stats_raw_bpe50000/splits12/text_prev_shape.bpe --fold_length 150 --train_data_path_and_name_and_type exp/s2t_stats_raw_bpe50000/splits12/text.ctc,text_ctc,text --train_shape_file exp/s2t_stats_raw_bpe50000/splits12/text_ctc_shape.bpe --fold_length 150 --train_data_path_and_name_and_type exp/s2t_stats_raw_bpe50000/splits12/text,text,text --train_shape_file exp/s2t_stats_raw_bpe50000/splits12/text_shape.bpe --multiple_iterator true --valid_data_path_and_name_and_type dump/raw/dev_v3/text.prev,text_prev,text --valid_shape_file exp/s2t_stats_raw_bpe50000/valid/text_prev_shape.bpe --valid_data_path_and_name_and_type dump/raw/dev_v3/text.ctc,text_ctc,text --valid_shape_file exp/s2t_stats_raw_bpe50000/valid/text_ctc_shape.bpe --valid_data_path_and_name_and_type dump/raw/dev_v3/text,text,text --valid_shape_file exp/s2t_stats_raw_bpe50000/valid/text_shape.bpe --ngpu 4 --multiprocessing_distributed true --dist_launcher slurm --dist_init_method file:///scratch/bbjs/peng6/espnet-owsm-ctc-2/egs2/owsm_v3.1_ctc/s2t1/exp/s2t_train_s2t_multitask-ctc_ebf27_conv2d8_size1024_raw_bpe50000/.dist_init_a23ef2e3-2c6c-4302-955b-dadd6d16d6c9 od file:///scratch/bbjs/peng6/espnet-owsm-ctc-2/egs2/owsm_v3.1_ctc/s2t1/exp/s2t_train_s2t_multitask-ctc_ebf27_conv2d8_size1024_raw_bpe50000/.dist_init_a23ef2e3-2c6c-4302-955b-dadd6d16d6c9 ats_raw_bpe50000/splits12/text.prev,text_prev,text --train_shape_file exp/s2t_stats_raw_bpe50000/splits12/text_prev_shape.bpe --fold_length 150 --train_data_path_and_name_and_type exp/s2t_stats_raw_bpe50000/splits12/text.ctc,text_ctc,text --train_shape_file exp/s2t_stats_raw_bpe50000/splits12/text_ctc_shape.bpe --fold_length 150 --train_data_path_and_name_and_type exp/s2t_stats_raw_bpe50000/splits12/text,text,text --train_shape_file exp/s2t_stats_raw_bpe50000/splits12/text_shape.bpe --multiple_iterator true --valid_data_path_and_name_and_type dump/raw/dev_v3/text.prev,text_prev,text --valid_shape_file exp/s2t_stats_raw_bpe50000/valid/text_prev_shape.bpe --valid_data_path_and_name_and_type dump/raw/dev_v3/text.ctc,text_ctc,text --valid_shape_file exp/s2t_stats_raw_bpe50000/valid/text_ctc_shape.bpe --valid_data_path_and_name_and_type dump/raw/dev_v3/text,text,text --valid_shape_file exp/s2t_stats_raw_bpe50000/valid/text_shape.bpe --ngpu 4 --multiprocessing_distributed true --dist_launcher slurm --dist_init_methats_raw_bpe50000/splits12/text.prev,text_prev,text --train_shape_file exp/s2t_stats_raw_bpe50000/splits12/text_prev_shape.bpe --fold_length 150 --train_data_path_and_name_and_type exp/s2t_stats_raw_bpe50000/splits12/text.ctc,text_ctc,text --train_shape_file exp/s2t_stats_raw_bpe50000/splits12/text_ctc_shape.bpe --fold_length 150 --train_data_path_and_name_and_type exp/s2t_stats_raw_bpe50000/splits12/text,text,text --train_shape_file exp/s2t_stats_raw_bpe50000/splits12/text_shape.bpe --multiple_iterator true --valid_data_path_and_name_and_type dump/raw/dev_v3/text.prev,text_prev,text --valid_shape_file exp/s2t_stats_raw_bpe50000/valid/text_prev_shape.bpe --valid_data_path_and_name_and_type dump/raw/dev_v3/text.ctc,text_ctc,text --valid_shape_file exp/s2t_stats_raw_bpe50000/valid/text_ctc_shape.bpe --valid_data_path_and_name_and_type dump/raw/dev_v3/text,text,text --valid_shape_file exp/s2t_stats_raw_bpe50000/valid/text_shape.bpe --ngpu 4 --multiprocessing_distributed true --dist_launcher slurm --dist_init_method file:///scratch/bbjs/peng6/espnet-owsm-ctc-2/egs2/owsm_v3.1_ctc/s2t1/exp/s2t_train_s2t_multitask-ctc_ebf27_conv2d8_size1024_raw_bpe50000/.dist_init_a23ef2e3-2c6c-4302-955b-dadd6d16d6c9 ats_raw_bpe50000/splits12/text.prev,text_prev,text --train_shape_file exp/s2t_stats_raw_bpe50000/splits12/text_prev_shape.bpe --fold_length 150 --train_data_path_and_name_and_type exp/s2t_stats_raw_bpe50000/splits12/text.ctc,text_ctc,text --train_shape_file exp/s2t_stats_raw_bpe50000/splits12/text_ctc_shape.bpe --fold_length 150 --train_data_path_and_name_and_type exp/s2t_stats_raw_bpe50000/splits12/text,text,text --train_shape_file exp/s2t_stats_raw_bpe50000/splits12/text_shape.bpe --multiple_iterator true --valid_data_path_and_name_and_type dump/raw/dev_v3/text.prev,text_prev,text --valid_shape_file exp/s2t_stats_raw_bpe50000/valid/text_prev_shape.bpe --valid_data_path_and_name_and_type dump/raw/dev_v3/text.ctc,text_ctc,text --valid_shape_file exp/s2t_stats_raw_bpe50000/valid/text_ctc_shape.bpe --valid_data_path_and_name_and_type dump/raw/dev_v3/text,text,text --valid_shape_file exp/s2t_stats_raw_bpe50000/valid/text_shape.bpe --ngpu 4 --multiprocessing_distributed true --dist_launcher slurm --dist_init_method file:///scratch/bbjs/peng6/espnet-owsm-ctc-2/egs2/owsm_v3.1_ctc/s2t1/exp/s2t_train_s2t_multitask-ctc_ebf27_conv2d8_size1024_raw_bpe50000/.dist_init_a23ef2e3-2c6c-4302-955b-dadd6d16d6c9 od file:///scratch/bbjs/peng6/espnet-owsm-ctc-2/egs2/owsm_v3.1_ctc/s2t1/exp/s2t_train_s2t_multitask-ctc_ebf27_conv2d8_size1024_raw_bpe50000/.dist_init_a23ef2e3-2c6c-4302-955b-dadd6d16d6c9 ats_raw_bpe50000/splits12/text.prev,text_prev,text --train_shape_file exp/s2t_stats_raw_bpe50000/splits12/text_prev_shape.bpe --fold_length 150 --train_data_path_and_name_and_type exp/s2t_stats_raw_bpe50000/splits12/text.ctc,text_ctc,text --train_shape_file exp/s2t_stats_raw_bpe50000/splits12/text_ctc_shape.bpe --fold_length 150 --train_data_path_and_name_and_type exp/s2t_stats_raw_bpe50000/splits12/text,text,text --train_shape_file exp/s2t_stats_raw_bpe50000/splits12/text_shape.bpe --multiple_iterator true --valid_data_path_and_name_and_type dump/raw/dev_v3/text.prev,text_prev,text --valid_shape_file exp/s2t_stats_raw_bpe50000/valid/text_prev_shape.bpe --valid_data_path_and_name_and_type dump/raw/dev_v3/text.ctc,text_ctc,text --valid_shape_file exp/s2t_stats_raw_bpe50000/valid/text_ctc_shape.bpe --valid_data_path_and_name_and_type dump/raw/dev_v3/text,text,text --valid_shape_file exp/s2t_stats_raw_bpe50000/valid/text_shape.bpe --ngpu 4 --multiprocessing_distributed true --dist_launcher slurm --dist_init_methats_raw_bpe50000/splits12/text.prev,text_prev,text --train_shape_file exp/s2t_stats_raw_bpe50000/splits12/text_prev_shape.bpe --fold_length 150 --train_data_path_and_name_and_type exp/s2t_stats_raw_bpe50000/splits12/text.ctc,text_ctc,text --train_shape_file exp/s2t_stats_raw_bpe50000/splits12/text_ctc_shape.bpe --fold_length 150 --train_data_path_and_name_and_type exp/s2t_stats_raw_bpe50000/splits12/text,text,text --train_shape_file exp/s2t_stats_raw_bpe50000/splits12/text_shape.bpe --multiple_iterator true --valid_data_path_and_name_and_type dump/raw/dev_v3/text.prev,text_prev,text --valid_shape_file exp/s2t_stats_raw_bpe50000/valid/text_prev_shape.bpe --valid_data_path_and_name_and_type dump/raw/dev_v3/text.ctc,text_ctc,text --valid_shape_file exp/s2t_stats_raw_bpe50000/valid/text_ctc_shape.bpe --valid_data_path_and_name_and_type dump/raw/dev_v3/text,text,text --valid_shape_file exp/s2t_stats_raw_bpe50000/valid/text_shape.bpe --ngpu 4 --multiprocessing_distributed true --dist_launcher slurm --dist_init_methats_raw_bpe50000/splits12/text.prev,text_prev,text --train_shape_file exp/s2t_stats_raw_bpe50000/splits12/text_prev_shape.bpe --fold_length 150 --train_data_path_and_name_and_type exp/s2t_stats_raw_bpe50000/splits12/text.ctc,text_ctc,text --train_shape_file exp/s2t_stats_raw_bpe50000/splits12/text_ctc_shape.bpe --fold_length 150 --train_data_path_and_name_and_type exp/s2t_stats_raw_bpe50000/splits12/text,text,text --train_shape_file exp/s2t_stats_raw_bpe50000/splits12/text_shape.bpe --multiple_iterator true --valid_data_path_and_name_and_type dump/raw/dev_v3/text.prev,text_prev,text --valid_shape_file exp/s2t_stats_raw_bpe50000/valid/text_prev_shape.bpe --valid_data_path_and_name_and_type dump/raw/dev_v3/text.ctc,text_ctc,text --valid_shape_file exp/s2t_stats_raw_bpe50000/valid/text_ctc_shape.bpe --valid_data_path_and_name_and_type dump/raw/dev_v3/text,text,text --valid_shape_file exp/s2t_stats_raw_bpe50000/valid/text_shape.bpe --ngpu 4 --multiprocessing_distributed true --dist_launcher slurm --dist_init_method file:///scratch/bbjs/peng6/espnet-owsm-ctc-2/egs2/owsm_v3.1_ctc/s2t1/exp/s2t_train_s2t_multitask-ctc_ebf27_conv2d8_size1024_raw_bpe50000/.dist_init_a23ef2e3-2c6c-4302-955b-dadd6d16d6c9 ats_raw_bpe50000/splits12/text.prev,text_prev,text --train_shape_file exp/s2t_stats_raw_bpe50000/splits12/text_prev_shape.bpe --fold_length 150 --train_data_path_and_name_and_type exp/s2t_stats_raw_bpe50000/splits12/text.ctc,text_ctc,text --train_shape_file exp/s2t_stats_raw_bpe50000/splits12/text_ctc_shape.bpe --fold_length 150 --train_data_path_and_name_and_type exp/s2t_stats_raw_bpe50000/splits12/text,text,text --train_shape_file exp/s2t_stats_raw_bpe50000/splits12/text_shape.bpe --multiple_iterator true --valid_data_path_and_name_and_type dump/raw/dev_v3/text.prev,text_prev,text --valid_shape_file exp/s2t_stats_raw_bpe50000/valid/text_prev_shape.bpe --valid_data_path_and_name_and_type dump/raw/dev_v3/text.ctc,text_ctc,text --valid_shape_file exp/s2t_stats_raw_bpe50000/valid/text_ctc_shape.bpe --valid_data_path_and_name_and_type dump/raw/dev_v3/text,text,text --valid_shape_file exp/s2t_stats_raw_bpe50000/valid/text_shape.bpe --ngpu 4 --multiprocessing_distributed true --dist_launcher slurm --dist_init_method file:///scratch/bbjs/peng6/espnet-owsm-ctc-2/egs2/owsm_v3.1_ctc/s2t1/exp/s2t_train_s2t_multitask-ctc_ebf27_conv2d8_size1024_raw_bpe50000/.dist_init_a23ef2e3-2c6c-4302-955b-dadd6d16d6c9 ats_raw_bpe50000/splits12/text.prev,text_prev,text --train_shape_file exp/s2t_stats_raw_bpe50000/splits12/text_prev_shape.bpe --fold_length 150 --train_data_path_and_name_and_type exp/s2t_stats_raw_bpe50000/splits12/text.ctc,text_ctc,text --train_shape_file exp/s2t_stats_raw_bpe50000/splits12/text_ctc_shape.bpe --fold_length 150 --train_data_path_and_name_and_type exp/s2t_stats_raw_bpe50000/splits12/text,text,text --train_shape_file exp/s2t_stats_raw_bpe50000/splits12/text_shape.bpe --multiple_iterator true --valid_data_path_and_name_and_type dump/raw/dev_v3/text.prev,text_prev,text --valid_shape_file exp/s2t_stats_raw_bpe50000/valid/text_prev_shape.bpe --valid_data_path_and_name_and_type dump/raw/dev_v3/text.ctc,text_ctc,text --valid_shape_file exp/s2t_stats_raw_bpe50000/valid/text_ctc_shape.bpe --valid_data_path_and_name_and_type dump/raw/dev_v3/text,text,text --valid_shape_file exp/s2t_stats_raw_bpe50000/valid/text_shape.bpe --ngpu 4 --multiprocessing_distributed true --dist_launcher slurm --dist_init_method file:///scratch/bbjs/peng6/espnet-owsm-ctc-2/egs2/owsm_v3.1_ctc/s2t1/exp/s2t_train_s2t_multitask-ctc_ebf27_conv2d8_size1024_raw_bpe50000/.dist_init_a23ef2e3-2c6c-4302-955b-dadd6d16d6c9 od file:///scratch/bbjs/peng6/espnet-owsm-ctc-2/egs2/owsm_v3.1_ctc/s2t1/exp/s2t_train_s2t_multitask-ctc_ebf27_conv2d8_size1024_raw_bpe50000/.dist_init_a23ef2e3-2c6c-4302-955b-dadd6d16d6c9 od file:///scratch/bbjs/peng6/espnet-owsm-ctc-2/egs2/owsm_v3.1_ctc/s2t1/exp/s2t_train_s2t_multitask-ctc_ebf27_conv2d8_size1024_raw_bpe50000/.dist_init_a23ef2e3-2c6c-4302-955b-dadd6d16d6c9 ats_raw_bpe50000/splits12/text.prev,text_prev,text --train_shape_file exp/s2t_stats_raw_bpe50000/splits12/text_prev_shape.bpe --fold_length 150 --train_data_path_and_name_and_type exp/s2t_stats_raw_bpe50000/splits12/text.ctc,text_ctc,text --train_shape_file exp/s2t_stats_raw_bpe50000/splits12/text_ctc_shape.bpe --fold_length 150 --train_data_path_and_name_and_type exp/s2t_stats_raw_bpe50000/splits12/text,text,text --train_shape_file exp/s2t_stats_raw_bpe50000/splits12/text_shape.bpe --multiple_iterator true --valid_data_path_and_name_and_type dump/raw/dev_v3/text.prev,text_prev,text --valid_shape_file exp/s2t_stats_raw_bpe50000/valid/text_prev_shape.bpe --valid_data_path_and_name_and_type dump/raw/dev_v3/text.ctc,text_ctc,text --valid_shape_file exp/s2t_stats_raw_bpe50000/valid/text_ctc_shape.bpe --valid_data_path_and_name_and_type dump/raw/dev_v3/text,text,text --valid_shape_file exp/s2t_stats_raw_bpe50000/valid/text_shape.bpe --ngpu 4 --multiprocessing_distributed true --dist_launcher slurm --dist_init_methats_raw_bpe50000/splits12/text.prev,text_prev,text --train_shape_file exp/s2t_stats_raw_bpe50000/splits12/text_prev_shape.bpe --fold_length 150 --train_data_path_and_name_and_type exp/s2t_stats_raw_bpe50000/splits12/text.ctc,text_ctc,text --train_shape_file exp/s2t_stats_raw_bpe50000/splits12/text_ctc_shape.bpe --fold_length 150 --train_data_path_and_name_and_type exp/s2t_stats_raw_bpe50000/splits12/text,text,text --train_shape_file exp/s2t_stats_raw_bpe50000/splits12/text_shape.bpe --multiple_iterator true --valid_data_path_and_name_and_type dump/raw/dev_v3/text.prev,text_prev,text --valid_shape_file exp/s2t_stats_raw_bpe50000/valid/text_prev_shape.bpe --valid_data_path_and_name_and_type dump/raw/dev_v3/text.ctc,text_ctc,text --valid_shape_file exp/s2t_stats_raw_bpe50000/valid/text_ctc_shape.bpe --valid_data_path_and_name_and_type dump/raw/dev_v3/text,text,text --valid_shape_file exp/s2t_stats_raw_bpe50000/valid/text_shape.bpe --ngpu 4 --multiprocessing_distributed true --dist_launcher slurm --dist_init_method file:///scratch/bbjs/peng6/espnet-owsm-ctc-2/egs2/owsm_v3.1_ctc/s2t1/exp/s2t_train_s2t_multitask-ctc_ebf27_conv2d8_size1024_raw_bpe50000/.dist_init_a23ef2e3-2c6c-4302-955b-dadd6d16d6c9 od file:///scratch/bbjs/peng6/espnet-owsm-ctc-2/egs2/owsm_v3.1_ctc/s2t1/exp/s2t_train_s2t_multitask-ctc_ebf27_conv2d8_size1024_raw_bpe50000/.dist_init_a23ef2e3-2c6c-4302-955b-dadd6d16d6c9 ats_raw_bpe50000/splits12/text.prev,text_prev,text --train_shape_file exp/s2t_stats_raw_bpe50000/splits12/text_prev_shape.bpe --fold_length 150 --train_data_path_and_name_and_type exp/s2t_stats_raw_bpe50000/splits12/text.ctc,text_ctc,text --train_shape_file exp/s2t_stats_raw_bpe50000/splits12/text_ctc_shape.bpe --fold_length 150 --train_data_path_and_name_and_type exp/s2t_stats_raw_bpe50000/splits12/text,text,text --train_shape_file exp/s2t_stats_raw_bpe50000/splits12/text_shape.bpe --multiple_iterator true --valid_data_path_and_name_and_type dump/raw/dev_v3/text.prev,text_prev,text --valid_shape_file exp/s2t_stats_raw_bpe50000/valid/text_prev_shape.bpe --valid_data_path_and_name_and_type dump/raw/dev_v3/text.ctc,text_ctc,text --valid_shape_file exp/s2t_stats_raw_bpe50000/valid/text_ctc_shape.bpe --valid_data_path_and_name_and_type dump/raw/dev_v3/text,text,text --valid_shape_file exp/s2t_stats_raw_bpe50000/valid/text_shape.bpe --ngpu 4 --multiprocessing_distributed true --dist_launcher slurm --dist_init_methats_raw_bpe50000/splits12/text.prev,text_prev,text --train_shape_file exp/s2t_stats_raw_bpe50000/splits12/text_prev_shape.bpe --fold_length 150 --train_data_path_and_name_and_type exp/s2t_stats_raw_bpe50000/splits12/text.ctc,text_ctc,text --train_shape_file exp/s2t_stats_raw_bpe50000/splits12/text_ctc_shape.bpe --fold_length 150 --train_data_path_and_name_and_type exp/s2t_stats_raw_bpe50000/splits12/text,text,text --train_shape_file exp/s2t_stats_raw_bpe50000/splits12/text_shape.bpe --multiple_iterator true --valid_data_path_and_name_and_type dump/raw/dev_v3/text.prev,text_prev,text --valid_shape_file exp/s2t_stats_raw_bpe50000/valid/text_prev_shape.bpe --valid_data_path_and_name_and_type dump/raw/dev_v3/text.ctc,text_ctc,text --valid_shape_file exp/s2t_stats_raw_bpe50000/valid/text_ctc_shape.bpe --valid_data_path_and_name_and_type dump/raw/dev_v3/text,text,text --valid_shape_file exp/s2t_stats_raw_bpe50000/valid/text_shape.bpe --ngpu 4 --multiprocessing_distributed true --dist_launcher slurm --dist_init_method file:///scratch/bbjs/peng6/espnet-owsm-ctc-2/egs2/owsm_v3.1_ctc/s2t1/exp/s2t_train_s2t_multitask-ctc_ebf27_conv2d8_size1024_raw_bpe50000/.dist_init_a23ef2e3-2c6c-4302-955b-dadd6d16d6c9 ats_raw_bpe50000/splits12/text.prev,text_prev,text --train_shape_file exp/s2t_stats_raw_bpe50000/splits12/text_prev_shape.bpe --fold_length 150 --train_data_path_and_name_and_type exp/s2t_stats_raw_bpe50000/splits12/text.ctc,text_ctc,text --train_shape_file exp/s2t_stats_raw_bpe50000/splits12/text_ctc_shape.bpe --fold_length 150 --train_data_path_and_name_and_type exp/s2t_stats_raw_bpe50000/splits12/text,text,text --train_shape_file exp/s2t_stats_raw_bpe50000/splits12/text_shape.bpe --multiple_iterator true --valid_data_path_and_name_and_type dump/raw/dev_v3/text.prev,text_prev,text --valid_shape_file exp/s2t_stats_raw_bpe50000/valid/text_prev_shape.bpe --valid_data_path_and_name_and_type dump/raw/dev_v3/text.ctc,text_ctc,text --valid_shape_file exp/s2t_stats_raw_bpe50000/valid/text_ctc_shape.bpe --valid_data_path_and_name_and_type dump/raw/dev_v3/text,text,text --valid_shape_file exp/s2t_stats_raw_bpe50000/valid/text_shape.bpe --ngpu 4 --multiprocessing_distributed true --dist_launcher slurm --dist_init_method file:///scratch/bbjs/peng6/espnet-owsm-ctc-2/egs2/owsm_v3.1_ctc/s2t1/exp/s2t_train_s2t_multitask-ctc_ebf27_conv2d8_size1024_raw_bpe50000/.dist_init_a23ef2e3-2c6c-4302-955b-dadd6d16d6c9 od file:///scratch/bbjs/peng6/espnet-owsm-ctc-2/egs2/owsm_v3.1_ctc/s2t1/exp/s2t_train_s2t_multitask-ctc_ebf27_conv2d8_size1024_raw_bpe50000/.dist_init_a23ef2e3-2c6c-4302-955b-dadd6d16d6c9 [gpua003:0/64] 2024-02-05 00:24:28,240 (distributed_c10d:319) INFO: Added key: store_based_barrier_key:1 to store for rank: 0 [gpua003:0/64] 2024-02-05 00:24:29,224 (distributed_c10d:353) INFO: Rank 0: Completed store-based barrier for key:store_based_barrier_key:1 with 64 nodes. [gpua003:0/64] 2024-02-05 00:24:29,253 (s2t:420) INFO: Vocabulary size: 50002 [gpua003:0/64] 2024-02-05 00:24:45,463 (abs_task:1270) INFO: pytorch.version=1.13.1, cuda.available=True, cudnn.version=8500, cudnn.benchmark=False, cudnn.deterministic=True [gpua003:0/64] 2024-02-05 00:24:45,473 (abs_task:1271) INFO: Model structure: ESPnetS2TCTCModel( (frontend): DefaultFrontend( (stft): Stft(n_fft=512, win_length=400, hop_length=160, center=True, normalized=False, onesided=True) (frontend): Frontend() (logmel): LogMel(sr=16000, n_fft=512, n_mels=80, fmin=0, fmax=8000.0, htk=False) ) (specaug): SpecAug( (freq_mask): MaskAlongAxis(mask_width_range=[0, 27], num_mask=2, axis=freq) (time_mask): MaskAlongAxisVariableMaxWidth(mask_width_ratio_range=[0.0, 0.05], num_mask=10, axis=time) ) (normalize): GlobalMVN(stats_file=exp/s2t_stats_raw_bpe50000/train/feats_stats.npz, norm_means=True, norm_vars=True) (encoder): EBranchformerCTCEncoder( (embed): Conv2dSubsampling8( (conv): Sequential( (0): Conv2d(1, 1024, kernel_size=(3, 3), stride=(2, 2)) (1): ReLU() (2): Conv2d(1024, 1024, kernel_size=(3, 3), stride=(2, 2)) (3): ReLU() (4): Conv2d(1024, 1024, kernel_size=(3, 3), stride=(2, 2)) (5): ReLU() ) (out): Linear(in_features=9216, out_features=1024, bias=True) (pos_enc): PositionalEncoding( (dropout): Dropout(p=0.1, inplace=False) ) ) (encoders): MultiSequential( (0): EBranchformerEncoderLayer( (attn): MultiHeadedAttention( (linear_q): Linear(in_features=1024, out_features=1024, bias=True) (linear_k): Linear(in_features=1024, out_features=1024, bias=True) (linear_v): Linear(in_features=1024, out_features=1024, bias=True) (linear_out): Linear(in_features=1024, out_features=1024, bias=True) (dropout): Identity() ) (cgmlp): ConvolutionalGatingMLP( (channel_proj1): Sequential( (0): Linear(in_features=1024, out_features=4096, bias=True) (1): GELU(approximate='none') ) (csgu): ConvolutionalSpatialGatingUnit( (norm): LayerNorm((2048,), eps=1e-12, elementwise_affine=True) (conv): Conv1d(2048, 2048, kernel_size=(31,), stride=(1,), padding=(15,), groups=2048) (act): Identity() (dropout): Dropout(p=0.1, inplace=False) ) (channel_proj2): Linear(in_features=2048, out_features=1024, bias=True) ) (feed_forward): PositionwiseFeedForward( (w_1): Linear(in_features=1024, out_features=4096, bias=True) (w_2): Linear(in_features=4096, out_features=1024, bias=True) (dropout): Dropout(p=0.1, inplace=False) (activation): Swish() ) (feed_forward_macaron): PositionwiseFeedForward( (w_1): Linear(in_features=1024, out_features=4096, bias=True) (w_2): Linear(in_features=4096, out_features=1024, bias=True) (dropout): Dropout(p=0.1, inplace=False) (activation): Swish() ) (norm_ff): LayerNorm((1024,), eps=1e-12, elementwise_affine=True) (norm_ff_macaron): LayerNorm((1024,), eps=1e-12, elementwise_affine=True) (norm_mha): LayerNorm((1024,), eps=1e-12, elementwise_affine=True) (norm_mlp): LayerNorm((1024,), eps=1e-12, elementwise_affine=True) (norm_final): LayerNorm((1024,), eps=1e-12, elementwise_affine=True) (dropout): Dropout(p=0.1, inplace=False) (depthwise_conv_fusion): Conv1d(2048, 2048, kernel_size=(31,), stride=(1,), padding=(15,), groups=2048) (merge_proj): Linear(in_features=2048, out_features=1024, bias=True) ) (1): EBranchformerEncoderLayer( (attn): MultiHeadedAttention( (linear_q): Linear(in_features=1024, out_features=1024, bias=True) (linear_k): Linear(in_features=1024, out_features=1024, bias=True) (linear_v): Linear(in_features=1024, out_features=1024, bias=True) (linear_out): Linear(in_features=1024, out_features=1024, bias=True) (dropout): Identity() ) (cgmlp): ConvolutionalGatingMLP( (channel_proj1): Sequential( (0): Linear(in_features=1024, out_features=4096, bias=True) (1): GELU(approximate='none') ) (csgu): ConvolutionalSpatialGatingUnit( (norm): LayerNorm((2048,), eps=1e-12, elementwise_affine=True) (conv): Conv1d(2048, 2048, kernel_size=(31,), stride=(1,), padding=(15,), groups=2048) (act): Identity() (dropout): Dropout(p=0.1, inplace=False) ) (channel_proj2): Linear(in_features=2048, out_features=1024, bias=True) ) (feed_forward): PositionwiseFeedForward( (w_1): Linear(in_features=1024, out_features=4096, bias=True) (w_2): Linear(in_features=4096, out_features=1024, bias=True) (dropout): Dropout(p=0.1, inplace=False) (activation): Swish() ) (feed_forward_macaron): PositionwiseFeedForward( (w_1): Linear(in_features=1024, out_features=4096, bias=True) (w_2): Linear(in_features=4096, out_features=1024, bias=True) (dropout): Dropout(p=0.1, inplace=False) (activation): Swish() ) (norm_ff): LayerNorm((1024,), eps=1e-12, elementwise_affine=True) (norm_ff_macaron): LayerNorm((1024,), eps=1e-12, elementwise_affine=True) (norm_mha): LayerNorm((1024,), eps=1e-12, elementwise_affine=True) (norm_mlp): LayerNorm((1024,), eps=1e-12, elementwise_affine=True) (norm_final): LayerNorm((1024,), eps=1e-12, elementwise_affine=True) (dropout): Dropout(p=0.1, inplace=False) (depthwise_conv_fusion): Conv1d(2048, 2048, kernel_size=(31,), stride=(1,), padding=(15,), groups=2048) (merge_proj): Linear(in_features=2048, out_features=1024, bias=True) ) (2): EBranchformerEncoderLayer( (attn): MultiHeadedAttention( (linear_q): Linear(in_features=1024, out_features=1024, bias=True) (linear_k): Linear(in_features=1024, out_features=1024, bias=True) (linear_v): Linear(in_features=1024, out_features=1024, bias=True) (linear_out): Linear(in_features=1024, out_features=1024, bias=True) (dropout): Identity() ) (cgmlp): ConvolutionalGatingMLP( (channel_proj1): Sequential( (0): Linear(in_features=1024, out_features=4096, bias=True) (1): GELU(approximate='none') ) (csgu): ConvolutionalSpatialGatingUnit( (norm): LayerNorm((2048,), eps=1e-12, elementwise_affine=True) (conv): Conv1d(2048, 2048, kernel_size=(31,), stride=(1,), padding=(15,), groups=2048) (act): Identity() (dropout): Dropout(p=0.1, inplace=False) ) (channel_proj2): Linear(in_features=2048, out_features=1024, bias=True) ) (feed_forward): PositionwiseFeedForward( (w_1): Linear(in_features=1024, out_features=4096, bias=True) (w_2): Linear(in_features=4096, out_features=1024, bias=True) (dropout): Dropout(p=0.1, inplace=False) (activation): Swish() ) (feed_forward_macaron): PositionwiseFeedForward( (w_1): Linear(in_features=1024, out_features=4096, bias=True) (w_2): Linear(in_features=4096, out_features=1024, bias=True) (dropout): Dropout(p=0.1, inplace=False) (activation): Swish() ) (norm_ff): LayerNorm((1024,), eps=1e-12, elementwise_affine=True) (norm_ff_macaron): LayerNorm((1024,), eps=1e-12, elementwise_affine=True) (norm_mha): LayerNorm((1024,), eps=1e-12, elementwise_affine=True) (norm_mlp): LayerNorm((1024,), eps=1e-12, elementwise_affine=True) (norm_final): LayerNorm((1024,), eps=1e-12, elementwise_affine=True) (cross_attn): MultiHeadedAttention( (linear_q): Linear(in_features=1024, out_features=1024, bias=True) (linear_k): Linear(in_features=1024, out_features=1024, bias=True) (linear_v): Linear(in_features=1024, out_features=1024, bias=True) (linear_out): Linear(in_features=1024, out_features=1024, bias=True) (dropout): Identity() ) (norm_cross_attn): LayerNorm((1024,), eps=1e-12, elementwise_affine=True) (dropout): Dropout(p=0.1, inplace=False) (depthwise_conv_fusion): Conv1d(2048, 2048, kernel_size=(31,), stride=(1,), padding=(15,), groups=2048) (merge_proj): Linear(in_features=2048, out_features=1024, bias=True) ) (3): EBranchformerEncoderLayer( (attn): MultiHeadedAttention( (linear_q): Linear(in_features=1024, out_features=1024, bias=True) (linear_k): Linear(in_features=1024, out_features=1024, bias=True) (linear_v): Linear(in_features=1024, out_features=1024, bias=True) (linear_out): Linear(in_features=1024, out_features=1024, bias=True) (dropout): Identity() ) (cgmlp): ConvolutionalGatingMLP( (channel_proj1): Sequential( (0): Linear(in_features=1024, out_features=4096, bias=True) (1): GELU(approximate='none') ) (csgu): ConvolutionalSpatialGatingUnit( (norm): LayerNorm((2048,), eps=1e-12, elementwise_affine=True) (conv): Conv1d(2048, 2048, kernel_size=(31,), stride=(1,), padding=(15,), groups=2048) (act): Identity() (dropout): Dropout(p=0.1, inplace=False) ) (channel_proj2): Linear(in_features=2048, out_features=1024, bias=True) ) (feed_forward): PositionwiseFeedForward( (w_1): Linear(in_features=1024, out_features=4096, bias=True) (w_2): Linear(in_features=4096, out_features=1024, bias=True) (dropout): Dropout(p=0.1, inplace=False) (activation): Swish() ) (feed_forward_macaron): PositionwiseFeedForward( (w_1): Linear(in_features=1024, out_features=4096, bias=True) (w_2): Linear(in_features=4096, out_features=1024, bias=True) (dropout): Dropout(p=0.1, inplace=False) (activation): Swish() ) (norm_ff): LayerNorm((1024,), eps=1e-12, elementwise_affine=True) (norm_ff_macaron): LayerNorm((1024,), eps=1e-12, elementwise_affine=True) (norm_mha): LayerNorm((1024,), eps=1e-12, elementwise_affine=True) (norm_mlp): LayerNorm((1024,), eps=1e-12, elementwise_affine=True) (norm_final): LayerNorm((1024,), eps=1e-12, elementwise_affine=True) (dropout): Dropout(p=0.1, inplace=False) (depthwise_conv_fusion): Conv1d(2048, 2048, kernel_size=(31,), stride=(1,), padding=(15,), groups=2048) (merge_proj): Linear(in_features=2048, out_features=1024, bias=True) ) (4): EBranchformerEncoderLayer( (attn): MultiHeadedAttention( (linear_q): Linear(in_features=1024, out_features=1024, bias=True) (linear_k): Linear(in_features=1024, out_features=1024, bias=True) (linear_v): Linear(in_features=1024, out_features=1024, bias=True) (linear_out): Linear(in_features=1024, out_features=1024, bias=True) (dropout): Identity() ) (cgmlp): ConvolutionalGatingMLP( (channel_proj1): Sequential( (0): Linear(in_features=1024, out_features=4096, bias=True) (1): GELU(approximate='none') ) (csgu): ConvolutionalSpatialGatingUnit( (norm): LayerNorm((2048,), eps=1e-12, elementwise_affine=True) (conv): Conv1d(2048, 2048, kernel_size=(31,), stride=(1,), padding=(15,), groups=2048) (act): Identity() (dropout): Dropout(p=0.1, inplace=False) ) (channel_proj2): Linear(in_features=2048, out_features=1024, bias=True) ) (feed_forward): PositionwiseFeedForward( (w_1): Linear(in_features=1024, out_features=4096, bias=True) (w_2): Linear(in_features=4096, out_features=1024, bias=True) (dropout): Dropout(p=0.1, inplace=False) (activation): Swish() ) (feed_forward_macaron): PositionwiseFeedForward( (w_1): Linear(in_features=1024, out_features=4096, bias=True) (w_2): Linear(in_features=4096, out_features=1024, bias=True) (dropout): Dropout(p=0.1, inplace=False) (activation): Swish() ) (norm_ff): LayerNorm((1024,), eps=1e-12, elementwise_affine=True) (norm_ff_macaron): LayerNorm((1024,), eps=1e-12, elementwise_affine=True) (norm_mha): LayerNorm((1024,), eps=1e-12, elementwise_affine=True) (norm_mlp): LayerNorm((1024,), eps=1e-12, elementwise_affine=True) (norm_final): LayerNorm((1024,), eps=1e-12, elementwise_affine=True) (dropout): Dropout(p=0.1, inplace=False) (depthwise_conv_fusion): Conv1d(2048, 2048, kernel_size=(31,), stride=(1,), padding=(15,), groups=2048) (merge_proj): Linear(in_features=2048, out_features=1024, bias=True) ) (5): EBranchformerEncoderLayer( (attn): MultiHeadedAttention( (linear_q): Linear(in_features=1024, out_features=1024, bias=True) (linear_k): Linear(in_features=1024, out_features=1024, bias=True) (linear_v): Linear(in_features=1024, out_features=1024, bias=True) (linear_out): Linear(in_features=1024, out_features=1024, bias=True) (dropout): Identity() ) (cgmlp): ConvolutionalGatingMLP( (channel_proj1): Sequential( (0): Linear(in_features=1024, out_features=4096, bias=True) (1): GELU(approximate='none') ) (csgu): ConvolutionalSpatialGatingUnit( (norm): LayerNorm((2048,), eps=1e-12, elementwise_affine=True) (conv): Conv1d(2048, 2048, kernel_size=(31,), stride=(1,), padding=(15,), groups=2048) (act): Identity() (dropout): Dropout(p=0.1, inplace=False) ) (channel_proj2): Linear(in_features=2048, out_features=1024, bias=True) ) (feed_forward): PositionwiseFeedForward( (w_1): Linear(in_features=1024, out_features=4096, bias=True) (w_2): Linear(in_features=4096, out_features=1024, bias=True) (dropout): Dropout(p=0.1, inplace=False) (activation): Swish() ) (feed_forward_macaron): PositionwiseFeedForward( (w_1): Linear(in_features=1024, out_features=4096, bias=True) (w_2): Linear(in_features=4096, out_features=1024, bias=True) (dropout): Dropout(p=0.1, inplace=False) (activation): Swish() ) (norm_ff): LayerNorm((1024,), eps=1e-12, elementwise_affine=True) (norm_ff_macaron): LayerNorm((1024,), eps=1e-12, elementwise_affine=True) (norm_mha): LayerNorm((1024,), eps=1e-12, elementwise_affine=True) (norm_mlp): LayerNorm((1024,), eps=1e-12, elementwise_affine=True) (norm_final): LayerNorm((1024,), eps=1e-12, elementwise_affine=True) (cross_attn): MultiHeadedAttention( (linear_q): Linear(in_features=1024, out_features=1024, bias=True) (linear_k): Linear(in_features=1024, out_features=1024, bias=True) (linear_v): Linear(in_features=1024, out_features=1024, bias=True) (linear_out): Linear(in_features=1024, out_features=1024, bias=True) (dropout): Identity() ) (norm_cross_attn): LayerNorm((1024,), eps=1e-12, elementwise_affine=True) (dropout): Dropout(p=0.1, inplace=False) (depthwise_conv_fusion): Conv1d(2048, 2048, kernel_size=(31,), stride=(1,), padding=(15,), groups=2048) (merge_proj): Linear(in_features=2048, out_features=1024, bias=True) ) (6): EBranchformerEncoderLayer( (attn): MultiHeadedAttention( (linear_q): Linear(in_features=1024, out_features=1024, bias=True) (linear_k): Linear(in_features=1024, out_features=1024, bias=True) (linear_v): Linear(in_features=1024, out_features=1024, bias=True) (linear_out): Linear(in_features=1024, out_features=1024, bias=True) (dropout): Identity() ) (cgmlp): ConvolutionalGatingMLP( (channel_proj1): Sequential( (0): Linear(in_features=1024, out_features=4096, bias=True) (1): GELU(approximate='none') ) (csgu): ConvolutionalSpatialGatingUnit( (norm): LayerNorm((2048,), eps=1e-12, elementwise_affine=True) (conv): Conv1d(2048, 2048, kernel_size=(31,), stride=(1,), padding=(15,), groups=2048) (act): Identity() (dropout): Dropout(p=0.1, inplace=False) ) (channel_proj2): Linear(in_features=2048, out_features=1024, bias=True) ) (feed_forward): PositionwiseFeedForward( (w_1): Linear(in_features=1024, out_features=4096, bias=True) (w_2): Linear(in_features=4096, out_features=1024, bias=True) (dropout): Dropout(p=0.1, inplace=False) (activation): Swish() ) (feed_forward_macaron): PositionwiseFeedForward( (w_1): Linear(in_features=1024, out_features=4096, bias=True) (w_2): Linear(in_features=4096, out_features=1024, bias=True) (dropout): Dropout(p=0.1, inplace=False) (activation): Swish() ) (norm_ff): LayerNorm((1024,), eps=1e-12, elementwise_affine=True) (norm_ff_macaron): LayerNorm((1024,), eps=1e-12, elementwise_affine=True) (norm_mha): LayerNorm((1024,), eps=1e-12, elementwise_affine=True) (norm_mlp): LayerNorm((1024,), eps=1e-12, elementwise_affine=True) (norm_final): LayerNorm((1024,), eps=1e-12, elementwise_affine=True) (dropout): Dropout(p=0.1, inplace=False) (depthwise_conv_fusion): Conv1d(2048, 2048, kernel_size=(31,), stride=(1,), padding=(15,), groups=2048) (merge_proj): Linear(in_features=2048, out_features=1024, bias=True) ) (7): EBranchformerEncoderLayer( (attn): MultiHeadedAttention( (linear_q): Linear(in_features=1024, out_features=1024, bias=True) (linear_k): Linear(in_features=1024, out_features=1024, bias=True) (linear_v): Linear(in_features=1024, out_features=1024, bias=True) (linear_out): Linear(in_features=1024, out_features=1024, bias=True) (dropout): Identity() ) (cgmlp): ConvolutionalGatingMLP( (channel_proj1): Sequential( (0): Linear(in_features=1024, out_features=4096, bias=True) (1): GELU(approximate='none') ) (csgu): ConvolutionalSpatialGatingUnit( (norm): LayerNorm((2048,), eps=1e-12, elementwise_affine=True) (conv): Conv1d(2048, 2048, kernel_size=(31,), stride=(1,), padding=(15,), groups=2048) (act): Identity() (dropout): Dropout(p=0.1, inplace=False) ) (channel_proj2): Linear(in_features=2048, out_features=1024, bias=True) ) (feed_forward): PositionwiseFeedForward( (w_1): Linear(in_features=1024, out_features=4096, bias=True) (w_2): Linear(in_features=4096, out_features=1024, bias=True) (dropout): Dropout(p=0.1, inplace=False) (activation): Swish() ) (feed_forward_macaron): PositionwiseFeedForward( (w_1): Linear(in_features=1024, out_features=4096, bias=True) (w_2): Linear(in_features=4096, out_features=1024, bias=True) (dropout): Dropout(p=0.1, inplace=False) (activation): Swish() ) (norm_ff): LayerNorm((1024,), eps=1e-12, elementwise_affine=True) (norm_ff_macaron): LayerNorm((1024,), eps=1e-12, elementwise_affine=True) (norm_mha): LayerNorm((1024,), eps=1e-12, elementwise_affine=True) (norm_mlp): LayerNorm((1024,), eps=1e-12, elementwise_affine=True) (norm_final): LayerNorm((1024,), eps=1e-12, elementwise_affine=True) (dropout): Dropout(p=0.1, inplace=False) (depthwise_conv_fusion): Conv1d(2048, 2048, kernel_size=(31,), stride=(1,), padding=(15,), groups=2048) (merge_proj): Linear(in_features=2048, out_features=1024, bias=True) ) (8): EBranchformerEncoderLayer( (attn): MultiHeadedAttention( (linear_q): Linear(in_features=1024, out_features=1024, bias=True) (linear_k): Linear(in_features=1024, out_features=1024, bias=True) (linear_v): Linear(in_features=1024, out_features=1024, bias=True) (linear_out): Linear(in_features=1024, out_features=1024, bias=True) (dropout): Identity() ) (cgmlp): ConvolutionalGatingMLP( (channel_proj1): Sequential( (0): Linear(in_features=1024, out_features=4096, bias=True) (1): GELU(approximate='none') ) (csgu): ConvolutionalSpatialGatingUnit( (norm): LayerNorm((2048,), eps=1e-12, elementwise_affine=True) (conv): Conv1d(2048, 2048, kernel_size=(31,), stride=(1,), padding=(15,), groups=2048) (act): Identity() (dropout): Dropout(p=0.1, inplace=False) ) (channel_proj2): Linear(in_features=2048, out_features=1024, bias=True) ) (feed_forward): PositionwiseFeedForward( (w_1): Linear(in_features=1024, out_features=4096, bias=True) (w_2): Linear(in_features=4096, out_features=1024, bias=True) (dropout): Dropout(p=0.1, inplace=False) (activation): Swish() ) (feed_forward_macaron): PositionwiseFeedForward( (w_1): Linear(in_features=1024, out_features=4096, bias=True) (w_2): Linear(in_features=4096, out_features=1024, bias=True) (dropout): Dropout(p=0.1, inplace=False) (activation): Swish() ) (norm_ff): LayerNorm((1024,), eps=1e-12, elementwise_affine=True) (norm_ff_macaron): LayerNorm((1024,), eps=1e-12, elementwise_affine=True) (norm_mha): LayerNorm((1024,), eps=1e-12, elementwise_affine=True) (norm_mlp): LayerNorm((1024,), eps=1e-12, elementwise_affine=True) (norm_final): LayerNorm((1024,), eps=1e-12, elementwise_affine=True) (cross_attn): MultiHeadedAttention( (linear_q): Linear(in_features=1024, out_features=1024, bias=True) (linear_k): Linear(in_features=1024, out_features=1024, bias=True) (linear_v): Linear(in_features=1024, out_features=1024, bias=True) (linear_out): Linear(in_features=1024, out_features=1024, bias=True) (dropout): Identity() ) (norm_cross_attn): LayerNorm((1024,), eps=1e-12, elementwise_affine=True) (dropout): Dropout(p=0.1, inplace=False) (depthwise_conv_fusion): Conv1d(2048, 2048, kernel_size=(31,), stride=(1,), padding=(15,), groups=2048) (merge_proj): Linear(in_features=2048, out_features=1024, bias=True) ) (9): EBranchformerEncoderLayer( (attn): MultiHeadedAttention( (linear_q): Linear(in_features=1024, out_features=1024, bias=True) (linear_k): Linear(in_features=1024, out_features=1024, bias=True) (linear_v): Linear(in_features=1024, out_features=1024, bias=True) (linear_out): Linear(in_features=1024, out_features=1024, bias=True) (dropout): Identity() ) (cgmlp): ConvolutionalGatingMLP( (channel_proj1): Sequential( (0): Linear(in_features=1024, out_features=4096, bias=True) (1): GELU(approximate='none') ) (csgu): ConvolutionalSpatialGatingUnit( (norm): LayerNorm((2048,), eps=1e-12, elementwise_affine=True) (conv): Conv1d(2048, 2048, kernel_size=(31,), stride=(1,), padding=(15,), groups=2048) (act): Identity() (dropout): Dropout(p=0.1, inplace=False) ) (channel_proj2): Linear(in_features=2048, out_features=1024, bias=True) ) (feed_forward): PositionwiseFeedForward( (w_1): Linear(in_features=1024, out_features=4096, bias=True) (w_2): Linear(in_features=4096, out_features=1024, bias=True) (dropout): Dropout(p=0.1, inplace=False) (activation): Swish() ) (feed_forward_macaron): PositionwiseFeedForward( (w_1): Linear(in_features=1024, out_features=4096, bias=True) (w_2): Linear(in_features=4096, out_features=1024, bias=True) (dropout): Dropout(p=0.1, inplace=False) (activation): Swish() ) (norm_ff): LayerNorm((1024,), eps=1e-12, elementwise_affine=True) (norm_ff_macaron): LayerNorm((1024,), eps=1e-12, elementwise_affine=True) (norm_mha): LayerNorm((1024,), eps=1e-12, elementwise_affine=True) (norm_mlp): LayerNorm((1024,), eps=1e-12, elementwise_affine=True) (norm_final): LayerNorm((1024,), eps=1e-12, elementwise_affine=True) (dropout): Dropout(p=0.1, inplace=False) (depthwise_conv_fusion): Conv1d(2048, 2048, kernel_size=(31,), stride=(1,), padding=(15,), groups=2048) (merge_proj): Linear(in_features=2048, out_features=1024, bias=True) ) (10): EBranchformerEncoderLayer( (attn): MultiHeadedAttention( (linear_q): Linear(in_features=1024, out_features=1024, bias=True) (linear_k): Linear(in_features=1024, out_features=1024, bias=True) (linear_v): Linear(in_features=1024, out_features=1024, bias=True) (linear_out): Linear(in_features=1024, out_features=1024, bias=True) (dropout): Identity() ) (cgmlp): ConvolutionalGatingMLP( (channel_proj1): Sequential( (0): Linear(in_features=1024, out_features=4096, bias=True) (1): GELU(approximate='none') ) (csgu): ConvolutionalSpatialGatingUnit( (norm): LayerNorm((2048,), eps=1e-12, elementwise_affine=True) (conv): Conv1d(2048, 2048, kernel_size=(31,), stride=(1,), padding=(15,), groups=2048) (act): Identity() (dropout): Dropout(p=0.1, inplace=False) ) (channel_proj2): Linear(in_features=2048, out_features=1024, bias=True) ) (feed_forward): PositionwiseFeedForward( (w_1): Linear(in_features=1024, out_features=4096, bias=True) (w_2): Linear(in_features=4096, out_features=1024, bias=True) (dropout): Dropout(p=0.1, inplace=False) (activation): Swish() ) (feed_forward_macaron): PositionwiseFeedForward( (w_1): Linear(in_features=1024, out_features=4096, bias=True) (w_2): Linear(in_features=4096, out_features=1024, bias=True) (dropout): Dropout(p=0.1, inplace=False) (activation): Swish() ) (norm_ff): LayerNorm((1024,), eps=1e-12, elementwise_affine=True) (norm_ff_macaron): LayerNorm((1024,), eps=1e-12, elementwise_affine=True) (norm_mha): LayerNorm((1024,), eps=1e-12, elementwise_affine=True) (norm_mlp): LayerNorm((1024,), eps=1e-12, elementwise_affine=True) (norm_final): LayerNorm((1024,), eps=1e-12, elementwise_affine=True) (dropout): Dropout(p=0.1, inplace=False) (depthwise_conv_fusion): Conv1d(2048, 2048, kernel_size=(31,), stride=(1,), padding=(15,), groups=2048) (merge_proj): Linear(in_features=2048, out_features=1024, bias=True) ) (11): EBranchformerEncoderLayer( (attn): MultiHeadedAttention( (linear_q): Linear(in_features=1024, out_features=1024, bias=True) (linear_k): Linear(in_features=1024, out_features=1024, bias=True) (linear_v): Linear(in_features=1024, out_features=1024, bias=True) (linear_out): Linear(in_features=1024, out_features=1024, bias=True) (dropout): Identity() ) (cgmlp): ConvolutionalGatingMLP( (channel_proj1): Sequential( (0): Linear(in_features=1024, out_features=4096, bias=True) (1): GELU(approximate='none') ) (csgu): ConvolutionalSpatialGatingUnit( (norm): LayerNorm((2048,), eps=1e-12, elementwise_affine=True) (conv): Conv1d(2048, 2048, kernel_size=(31,), stride=(1,), padding=(15,), groups=2048) (act): Identity() (dropout): Dropout(p=0.1, inplace=False) ) (channel_proj2): Linear(in_features=2048, out_features=1024, bias=True) ) (feed_forward): PositionwiseFeedForward( (w_1): Linear(in_features=1024, out_features=4096, bias=True) (w_2): Linear(in_features=4096, out_features=1024, bias=True) (dropout): Dropout(p=0.1, inplace=False) (activation): Swish() ) (feed_forward_macaron): PositionwiseFeedForward( (w_1): Linear(in_features=1024, out_features=4096, bias=True) (w_2): Linear(in_features=4096, out_features=1024, bias=True) (dropout): Dropout(p=0.1, inplace=False) (activation): Swish() ) (norm_ff): LayerNorm((1024,), eps=1e-12, elementwise_affine=True) (norm_ff_macaron): LayerNorm((1024,), eps=1e-12, elementwise_affine=True) (norm_mha): LayerNorm((1024,), eps=1e-12, elementwise_affine=True) (norm_mlp): LayerNorm((1024,), eps=1e-12, elementwise_affine=True) (norm_final): LayerNorm((1024,), eps=1e-12, elementwise_affine=True) (cross_attn): MultiHeadedAttention( (linear_q): Linear(in_features=1024, out_features=1024, bias=True) (linear_k): Linear(in_features=1024, out_features=1024, bias=True) (linear_v): Linear(in_features=1024, out_features=1024, bias=True) (linear_out): Linear(in_features=1024, out_features=1024, bias=True) (dropout): Identity() ) (norm_cross_attn): LayerNorm((1024,), eps=1e-12, elementwise_affine=True) (dropout): Dropout(p=0.1, inplace=False) (depthwise_conv_fusion): Conv1d(2048, 2048, kernel_size=(31,), stride=(1,), padding=(15,), groups=2048) (merge_proj): Linear(in_features=2048, out_features=1024, bias=True) ) (12): EBranchformerEncoderLayer( (attn): MultiHeadedAttention( (linear_q): Linear(in_features=1024, out_features=1024, bias=True) (linear_k): Linear(in_features=1024, out_features=1024, bias=True) (linear_v): Linear(in_features=1024, out_features=1024, bias=True) (linear_out): Linear(in_features=1024, out_features=1024, bias=True) (dropout): Identity() ) (cgmlp): ConvolutionalGatingMLP( (channel_proj1): Sequential( (0): Linear(in_features=1024, out_features=4096, bias=True) (1): GELU(approximate='none') ) (csgu): ConvolutionalSpatialGatingUnit( (norm): LayerNorm((2048,), eps=1e-12, elementwise_affine=True) (conv): Conv1d(2048, 2048, kernel_size=(31,), stride=(1,), padding=(15,), groups=2048) (act): Identity() (dropout): Dropout(p=0.1, inplace=False) ) (channel_proj2): Linear(in_features=2048, out_features=1024, bias=True) ) (feed_forward): PositionwiseFeedForward( (w_1): Linear(in_features=1024, out_features=4096, bias=True) (w_2): Linear(in_features=4096, out_features=1024, bias=True) (dropout): Dropout(p=0.1, inplace=False) (activation): Swish() ) (feed_forward_macaron): PositionwiseFeedForward( (w_1): Linear(in_features=1024, out_features=4096, bias=True) (w_2): Linear(in_features=4096, out_features=1024, bias=True) (dropout): Dropout(p=0.1, inplace=False) (activation): Swish() ) (norm_ff): LayerNorm((1024,), eps=1e-12, elementwise_affine=True) (norm_ff_macaron): LayerNorm((1024,), eps=1e-12, elementwise_affine=True) (norm_mha): LayerNorm((1024,), eps=1e-12, elementwise_affine=True) (norm_mlp): LayerNorm((1024,), eps=1e-12, elementwise_affine=True) (norm_final): LayerNorm((1024,), eps=1e-12, elementwise_affine=True) (dropout): Dropout(p=0.1, inplace=False) (depthwise_conv_fusion): Conv1d(2048, 2048, kernel_size=(31,), stride=(1,), padding=(15,), groups=2048) (merge_proj): Linear(in_features=2048, out_features=1024, bias=True) ) (13): EBranchformerEncoderLayer( (attn): MultiHeadedAttention( (linear_q): Linear(in_features=1024, out_features=1024, bias=True) (linear_k): Linear(in_features=1024, out_features=1024, bias=True) (linear_v): Linear(in_features=1024, out_features=1024, bias=True) (linear_out): Linear(in_features=1024, out_features=1024, bias=True) (dropout): Identity() ) (cgmlp): ConvolutionalGatingMLP( (channel_proj1): Sequential( (0): Linear(in_features=1024, out_features=4096, bias=True) (1): GELU(approximate='none') ) (csgu): ConvolutionalSpatialGatingUnit( (norm): LayerNorm((2048,), eps=1e-12, elementwise_affine=True) (conv): Conv1d(2048, 2048, kernel_size=(31,), stride=(1,), padding=(15,), groups=2048) (act): Identity() (dropout): Dropout(p=0.1, inplace=False) ) (channel_proj2): Linear(in_features=2048, out_features=1024, bias=True) ) (feed_forward): PositionwiseFeedForward( (w_1): Linear(in_features=1024, out_features=4096, bias=True) (w_2): Linear(in_features=4096, out_features=1024, bias=True) (dropout): Dropout(p=0.1, inplace=False) (activation): Swish() ) (feed_forward_macaron): PositionwiseFeedForward( (w_1): Linear(in_features=1024, out_features=4096, bias=True) (w_2): Linear(in_features=4096, out_features=1024, bias=True) (dropout): Dropout(p=0.1, inplace=False) (activation): Swish() ) (norm_ff): LayerNorm((1024,), eps=1e-12, elementwise_affine=True) (norm_ff_macaron): LayerNorm((1024,), eps=1e-12, elementwise_affine=True) (norm_mha): LayerNorm((1024,), eps=1e-12, elementwise_affine=True) (norm_mlp): LayerNorm((1024,), eps=1e-12, elementwise_affine=True) (norm_final): LayerNorm((1024,), eps=1e-12, elementwise_affine=True) (dropout): Dropout(p=0.1, inplace=False) (depthwise_conv_fusion): Conv1d(2048, 2048, kernel_size=(31,), stride=(1,), padding=(15,), groups=2048) (merge_proj): Linear(in_features=2048, out_features=1024, bias=True) ) (14): EBranchformerEncoderLayer( (attn): MultiHeadedAttention( (linear_q): Linear(in_features=1024, out_features=1024, bias=True) (linear_k): Linear(in_features=1024, out_features=1024, bias=True) (linear_v): Linear(in_features=1024, out_features=1024, bias=True) (linear_out): Linear(in_features=1024, out_features=1024, bias=True) (dropout): Identity() ) (cgmlp): ConvolutionalGatingMLP( (channel_proj1): Sequential( (0): Linear(in_features=1024, out_features=4096, bias=True) (1): GELU(approximate='none') ) (csgu): ConvolutionalSpatialGatingUnit( (norm): LayerNorm((2048,), eps=1e-12, elementwise_affine=True) (conv): Conv1d(2048, 2048, kernel_size=(31,), stride=(1,), padding=(15,), groups=2048) (act): Identity() (dropout): Dropout(p=0.1, inplace=False) ) (channel_proj2): Linear(in_features=2048, out_features=1024, bias=True) ) (feed_forward): PositionwiseFeedForward( (w_1): Linear(in_features=1024, out_features=4096, bias=True) (w_2): Linear(in_features=4096, out_features=1024, bias=True) (dropout): Dropout(p=0.1, inplace=False) (activation): Swish() ) (feed_forward_macaron): PositionwiseFeedForward( (w_1): Linear(in_features=1024, out_features=4096, bias=True) (w_2): Linear(in_features=4096, out_features=1024, bias=True) (dropout): Dropout(p=0.1, inplace=False) (activation): Swish() ) (norm_ff): LayerNorm((1024,), eps=1e-12, elementwise_affine=True) (norm_ff_macaron): LayerNorm((1024,), eps=1e-12, elementwise_affine=True) (norm_mha): LayerNorm((1024,), eps=1e-12, elementwise_affine=True) (norm_mlp): LayerNorm((1024,), eps=1e-12, elementwise_affine=True) (norm_final): LayerNorm((1024,), eps=1e-12, elementwise_affine=True) (cross_attn): MultiHeadedAttention( (linear_q): Linear(in_features=1024, out_features=1024, bias=True) (linear_k): Linear(in_features=1024, out_features=1024, bias=True) (linear_v): Linear(in_features=1024, out_features=1024, bias=True) (linear_out): Linear(in_features=1024, out_features=1024, bias=True) (dropout): Identity() ) (norm_cross_attn): LayerNorm((1024,), eps=1e-12, elementwise_affine=True) (dropout): Dropout(p=0.1, inplace=False) (depthwise_conv_fusion): Conv1d(2048, 2048, kernel_size=(31,), stride=(1,), padding=(15,), groups=2048) (merge_proj): Linear(in_features=2048, out_features=1024, bias=True) ) (15): EBranchformerEncoderLayer( (attn): MultiHeadedAttention( (linear_q): Linear(in_features=1024, out_features=1024, bias=True) (linear_k): Linear(in_features=1024, out_features=1024, bias=True) (linear_v): Linear(in_features=1024, out_features=1024, bias=True) (linear_out): Linear(in_features=1024, out_features=1024, bias=True) (dropout): Identity() ) (cgmlp): ConvolutionalGatingMLP( (channel_proj1): Sequential( (0): Linear(in_features=1024, out_features=4096, bias=True) (1): GELU(approximate='none') ) (csgu): ConvolutionalSpatialGatingUnit( (norm): LayerNorm((2048,), eps=1e-12, elementwise_affine=True) (conv): Conv1d(2048, 2048, kernel_size=(31,), stride=(1,), padding=(15,), groups=2048) (act): Identity() (dropout): Dropout(p=0.1, inplace=False) ) (channel_proj2): Linear(in_features=2048, out_features=1024, bias=True) ) (feed_forward): PositionwiseFeedForward( (w_1): Linear(in_features=1024, out_features=4096, bias=True) (w_2): Linear(in_features=4096, out_features=1024, bias=True) (dropout): Dropout(p=0.1, inplace=False) (activation): Swish() ) (feed_forward_macaron): PositionwiseFeedForward( (w_1): Linear(in_features=1024, out_features=4096, bias=True) (w_2): Linear(in_features=4096, out_features=1024, bias=True) (dropout): Dropout(p=0.1, inplace=False) (activation): Swish() ) (norm_ff): LayerNorm((1024,), eps=1e-12, elementwise_affine=True) (norm_ff_macaron): LayerNorm((1024,), eps=1e-12, elementwise_affine=True) (norm_mha): LayerNorm((1024,), eps=1e-12, elementwise_affine=True) (norm_mlp): LayerNorm((1024,), eps=1e-12, elementwise_affine=True) (norm_final): LayerNorm((1024,), eps=1e-12, elementwise_affine=True) (dropout): Dropout(p=0.1, inplace=False) (depthwise_conv_fusion): Conv1d(2048, 2048, kernel_size=(31,), stride=(1,), padding=(15,), groups=2048) (merge_proj): Linear(in_features=2048, out_features=1024, bias=True) ) (16): EBranchformerEncoderLayer( (attn): MultiHeadedAttention( (linear_q): Linear(in_features=1024, out_features=1024, bias=True) (linear_k): Linear(in_features=1024, out_features=1024, bias=True) (linear_v): Linear(in_features=1024, out_features=1024, bias=True) (linear_out): Linear(in_features=1024, out_features=1024, bias=True) (dropout): Identity() ) (cgmlp): ConvolutionalGatingMLP( (channel_proj1): Sequential( (0): Linear(in_features=1024, out_features=4096, bias=True) (1): GELU(approximate='none') ) (csgu): ConvolutionalSpatialGatingUnit( (norm): LayerNorm((2048,), eps=1e-12, elementwise_affine=True) (conv): Conv1d(2048, 2048, kernel_size=(31,), stride=(1,), padding=(15,), groups=2048) (act): Identity() (dropout): Dropout(p=0.1, inplace=False) ) (channel_proj2): Linear(in_features=2048, out_features=1024, bias=True) ) (feed_forward): PositionwiseFeedForward( (w_1): Linear(in_features=1024, out_features=4096, bias=True) (w_2): Linear(in_features=4096, out_features=1024, bias=True) (dropout): Dropout(p=0.1, inplace=False) (activation): Swish() ) (feed_forward_macaron): PositionwiseFeedForward( (w_1): Linear(in_features=1024, out_features=4096, bias=True) (w_2): Linear(in_features=4096, out_features=1024, bias=True) (dropout): Dropout(p=0.1, inplace=False) (activation): Swish() ) (norm_ff): LayerNorm((1024,), eps=1e-12, elementwise_affine=True) (norm_ff_macaron): LayerNorm((1024,), eps=1e-12, elementwise_affine=True) (norm_mha): LayerNorm((1024,), eps=1e-12, elementwise_affine=True) (norm_mlp): LayerNorm((1024,), eps=1e-12, elementwise_affine=True) (norm_final): LayerNorm((1024,), eps=1e-12, elementwise_affine=True) (dropout): Dropout(p=0.1, inplace=False) (depthwise_conv_fusion): Conv1d(2048, 2048, kernel_size=(31,), stride=(1,), padding=(15,), groups=2048) (merge_proj): Linear(in_features=2048, out_features=1024, bias=True) ) (17): EBranchformerEncoderLayer( (attn): MultiHeadedAttention( (linear_q): Linear(in_features=1024, out_features=1024, bias=True) (linear_k): Linear(in_features=1024, out_features=1024, bias=True) (linear_v): Linear(in_features=1024, out_features=1024, bias=True) (linear_out): Linear(in_features=1024, out_features=1024, bias=True) (dropout): Identity() ) (cgmlp): ConvolutionalGatingMLP( (channel_proj1): Sequential( (0): Linear(in_features=1024, out_features=4096, bias=True) (1): GELU(approximate='none') ) (csgu): ConvolutionalSpatialGatingUnit( (norm): LayerNorm((2048,), eps=1e-12, elementwise_affine=True) (conv): Conv1d(2048, 2048, kernel_size=(31,), stride=(1,), padding=(15,), groups=2048) (act): Identity() (dropout): Dropout(p=0.1, inplace=False) ) (channel_proj2): Linear(in_features=2048, out_features=1024, bias=True) ) (feed_forward): PositionwiseFeedForward( (w_1): Linear(in_features=1024, out_features=4096, bias=True) (w_2): Linear(in_features=4096, out_features=1024, bias=True) (dropout): Dropout(p=0.1, inplace=False) (activation): Swish() ) (feed_forward_macaron): PositionwiseFeedForward( (w_1): Linear(in_features=1024, out_features=4096, bias=True) (w_2): Linear(in_features=4096, out_features=1024, bias=True) (dropout): Dropout(p=0.1, inplace=False) (activation): Swish() ) (norm_ff): LayerNorm((1024,), eps=1e-12, elementwise_affine=True) (norm_ff_macaron): LayerNorm((1024,), eps=1e-12, elementwise_affine=True) (norm_mha): LayerNorm((1024,), eps=1e-12, elementwise_affine=True) (norm_mlp): LayerNorm((1024,), eps=1e-12, elementwise_affine=True) (norm_final): LayerNorm((1024,), eps=1e-12, elementwise_affine=True) (cross_attn): MultiHeadedAttention( (linear_q): Linear(in_features=1024, out_features=1024, bias=True) (linear_k): Linear(in_features=1024, out_features=1024, bias=True) (linear_v): Linear(in_features=1024, out_features=1024, bias=True) (linear_out): Linear(in_features=1024, out_features=1024, bias=True) (dropout): Identity() ) (norm_cross_attn): LayerNorm((1024,), eps=1e-12, elementwise_affine=True) (dropout): Dropout(p=0.1, inplace=False) (depthwise_conv_fusion): Conv1d(2048, 2048, kernel_size=(31,), stride=(1,), padding=(15,), groups=2048) (merge_proj): Linear(in_features=2048, out_features=1024, bias=True) ) (18): EBranchformerEncoderLayer( (attn): MultiHeadedAttention( (linear_q): Linear(in_features=1024, out_features=1024, bias=True) (linear_k): Linear(in_features=1024, out_features=1024, bias=True) (linear_v): Linear(in_features=1024, out_features=1024, bias=True) (linear_out): Linear(in_features=1024, out_features=1024, bias=True) (dropout): Identity() ) (cgmlp): ConvolutionalGatingMLP( (channel_proj1): Sequential( (0): Linear(in_features=1024, out_features=4096, bias=True) (1): GELU(approximate='none') ) (csgu): ConvolutionalSpatialGatingUnit( (norm): LayerNorm((2048,), eps=1e-12, elementwise_affine=True) (conv): Conv1d(2048, 2048, kernel_size=(31,), stride=(1,), padding=(15,), groups=2048) (act): Identity() (dropout): Dropout(p=0.1, inplace=False) ) (channel_proj2): Linear(in_features=2048, out_features=1024, bias=True) ) (feed_forward): PositionwiseFeedForward( (w_1): Linear(in_features=1024, out_features=4096, bias=True) (w_2): Linear(in_features=4096, out_features=1024, bias=True) (dropout): Dropout(p=0.1, inplace=False) (activation): Swish() ) (feed_forward_macaron): PositionwiseFeedForward( (w_1): Linear(in_features=1024, out_features=4096, bias=True) (w_2): Linear(in_features=4096, out_features=1024, bias=True) (dropout): Dropout(p=0.1, inplace=False) (activation): Swish() ) (norm_ff): LayerNorm((1024,), eps=1e-12, elementwise_affine=True) (norm_ff_macaron): LayerNorm((1024,), eps=1e-12, elementwise_affine=True) (norm_mha): LayerNorm((1024,), eps=1e-12, elementwise_affine=True) (norm_mlp): LayerNorm((1024,), eps=1e-12, elementwise_affine=True) (norm_final): LayerNorm((1024,), eps=1e-12, elementwise_affine=True) (dropout): Dropout(p=0.1, inplace=False) (depthwise_conv_fusion): Conv1d(2048, 2048, kernel_size=(31,), stride=(1,), padding=(15,), groups=2048) (merge_proj): Linear(in_features=2048, out_features=1024, bias=True) ) (19): EBranchformerEncoderLayer( (attn): MultiHeadedAttention( (linear_q): Linear(in_features=1024, out_features=1024, bias=True) (linear_k): Linear(in_features=1024, out_features=1024, bias=True) (linear_v): Linear(in_features=1024, out_features=1024, bias=True) (linear_out): Linear(in_features=1024, out_features=1024, bias=True) (dropout): Identity() ) (cgmlp): ConvolutionalGatingMLP( (channel_proj1): Sequential( (0): Linear(in_features=1024, out_features=4096, bias=True) (1): GELU(approximate='none') ) (csgu): ConvolutionalSpatialGatingUnit( (norm): LayerNorm((2048,), eps=1e-12, elementwise_affine=True) (conv): Conv1d(2048, 2048, kernel_size=(31,), stride=(1,), padding=(15,), groups=2048) (act): Identity() (dropout): Dropout(p=0.1, inplace=False) ) (channel_proj2): Linear(in_features=2048, out_features=1024, bias=True) ) (feed_forward): PositionwiseFeedForward( (w_1): Linear(in_features=1024, out_features=4096, bias=True) (w_2): Linear(in_features=4096, out_features=1024, bias=True) (dropout): Dropout(p=0.1, inplace=False) (activation): Swish() ) (feed_forward_macaron): PositionwiseFeedForward( (w_1): Linear(in_features=1024, out_features=4096, bias=True) (w_2): Linear(in_features=4096, out_features=1024, bias=True) (dropout): Dropout(p=0.1, inplace=False) (activation): Swish() ) (norm_ff): LayerNorm((1024,), eps=1e-12, elementwise_affine=True) (norm_ff_macaron): LayerNorm((1024,), eps=1e-12, elementwise_affine=True) (norm_mha): LayerNorm((1024,), eps=1e-12, elementwise_affine=True) (norm_mlp): LayerNorm((1024,), eps=1e-12, elementwise_affine=True) (norm_final): LayerNorm((1024,), eps=1e-12, elementwise_affine=True) (dropout): Dropout(p=0.1, inplace=False) (depthwise_conv_fusion): Conv1d(2048, 2048, kernel_size=(31,), stride=(1,), padding=(15,), groups=2048) (merge_proj): Linear(in_features=2048, out_features=1024, bias=True) ) (20): EBranchformerEncoderLayer( (attn): MultiHeadedAttention( (linear_q): Linear(in_features=1024, out_features=1024, bias=True) (linear_k): Linear(in_features=1024, out_features=1024, bias=True) (linear_v): Linear(in_features=1024, out_features=1024, bias=True) (linear_out): Linear(in_features=1024, out_features=1024, bias=True) (dropout): Identity() ) (cgmlp): ConvolutionalGatingMLP( (channel_proj1): Sequential( (0): Linear(in_features=1024, out_features=4096, bias=True) (1): GELU(approximate='none') ) (csgu): ConvolutionalSpatialGatingUnit( (norm): LayerNorm((2048,), eps=1e-12, elementwise_affine=True) (conv): Conv1d(2048, 2048, kernel_size=(31,), stride=(1,), padding=(15,), groups=2048) (act): Identity() (dropout): Dropout(p=0.1, inplace=False) ) (channel_proj2): Linear(in_features=2048, out_features=1024, bias=True) ) (feed_forward): PositionwiseFeedForward( (w_1): Linear(in_features=1024, out_features=4096, bias=True) (w_2): Linear(in_features=4096, out_features=1024, bias=True) (dropout): Dropout(p=0.1, inplace=False) (activation): Swish() ) (feed_forward_macaron): PositionwiseFeedForward( (w_1): Linear(in_features=1024, out_features=4096, bias=True) (w_2): Linear(in_features=4096, out_features=1024, bias=True) (dropout): Dropout(p=0.1, inplace=False) (activation): Swish() ) (norm_ff): LayerNorm((1024,), eps=1e-12, elementwise_affine=True) (norm_ff_macaron): LayerNorm((1024,), eps=1e-12, elementwise_affine=True) (norm_mha): LayerNorm((1024,), eps=1e-12, elementwise_affine=True) (norm_mlp): LayerNorm((1024,), eps=1e-12, elementwise_affine=True) (norm_final): LayerNorm((1024,), eps=1e-12, elementwise_affine=True) (cross_attn): MultiHeadedAttention( (linear_q): Linear(in_features=1024, out_features=1024, bias=True) (linear_k): Linear(in_features=1024, out_features=1024, bias=True) (linear_v): Linear(in_features=1024, out_features=1024, bias=True) (linear_out): Linear(in_features=1024, out_features=1024, bias=True) (dropout): Identity() ) (norm_cross_attn): LayerNorm((1024,), eps=1e-12, elementwise_affine=True) (dropout): Dropout(p=0.1, inplace=False) (depthwise_conv_fusion): Conv1d(2048, 2048, kernel_size=(31,), stride=(1,), padding=(15,), groups=2048) (merge_proj): Linear(in_features=2048, out_features=1024, bias=True) ) (21): EBranchformerEncoderLayer( (attn): MultiHeadedAttention( (linear_q): Linear(in_features=1024, out_features=1024, bias=True) (linear_k): Linear(in_features=1024, out_features=1024, bias=True) (linear_v): Linear(in_features=1024, out_features=1024, bias=True) (linear_out): Linear(in_features=1024, out_features=1024, bias=True) (dropout): Identity() ) (cgmlp): ConvolutionalGatingMLP( (channel_proj1): Sequential( (0): Linear(in_features=1024, out_features=4096, bias=True) (1): GELU(approximate='none') ) (csgu): ConvolutionalSpatialGatingUnit( (norm): LayerNorm((2048,), eps=1e-12, elementwise_affine=True) (conv): Conv1d(2048, 2048, kernel_size=(31,), stride=(1,), padding=(15,), groups=2048) (act): Identity() (dropout): Dropout(p=0.1, inplace=False) ) (channel_proj2): Linear(in_features=2048, out_features=1024, bias=True) ) (feed_forward): PositionwiseFeedForward( (w_1): Linear(in_features=1024, out_features=4096, bias=True) (w_2): Linear(in_features=4096, out_features=1024, bias=True) (dropout): Dropout(p=0.1, inplace=False) (activation): Swish() ) (feed_forward_macaron): PositionwiseFeedForward( (w_1): Linear(in_features=1024, out_features=4096, bias=True) (w_2): Linear(in_features=4096, out_features=1024, bias=True) (dropout): Dropout(p=0.1, inplace=False) (activation): Swish() ) (norm_ff): LayerNorm((1024,), eps=1e-12, elementwise_affine=True) (norm_ff_macaron): LayerNorm((1024,), eps=1e-12, elementwise_affine=True) (norm_mha): LayerNorm((1024,), eps=1e-12, elementwise_affine=True) (norm_mlp): LayerNorm((1024,), eps=1e-12, elementwise_affine=True) (norm_final): LayerNorm((1024,), eps=1e-12, elementwise_affine=True) (dropout): Dropout(p=0.1, inplace=False) (depthwise_conv_fusion): Conv1d(2048, 2048, kernel_size=(31,), stride=(1,), padding=(15,), groups=2048) (merge_proj): Linear(in_features=2048, out_features=1024, bias=True) ) (22): EBranchformerEncoderLayer( (attn): MultiHeadedAttention( (linear_q): Linear(in_features=1024, out_features=1024, bias=True) (linear_k): Linear(in_features=1024, out_features=1024, bias=True) (linear_v): Linear(in_features=1024, out_features=1024, bias=True) (linear_out): Linear(in_features=1024, out_features=1024, bias=True) (dropout): Identity() ) (cgmlp): ConvolutionalGatingMLP( (channel_proj1): Sequential( (0): Linear(in_features=1024, out_features=4096, bias=True) (1): GELU(approximate='none') ) (csgu): ConvolutionalSpatialGatingUnit( (norm): LayerNorm((2048,), eps=1e-12, elementwise_affine=True) (conv): Conv1d(2048, 2048, kernel_size=(31,), stride=(1,), padding=(15,), groups=2048) (act): Identity() (dropout): Dropout(p=0.1, inplace=False) ) (channel_proj2): Linear(in_features=2048, out_features=1024, bias=True) ) (feed_forward): PositionwiseFeedForward( (w_1): Linear(in_features=1024, out_features=4096, bias=True) (w_2): Linear(in_features=4096, out_features=1024, bias=True) (dropout): Dropout(p=0.1, inplace=False) (activation): Swish() ) (feed_forward_macaron): PositionwiseFeedForward( (w_1): Linear(in_features=1024, out_features=4096, bias=True) (w_2): Linear(in_features=4096, out_features=1024, bias=True) (dropout): Dropout(p=0.1, inplace=False) (activation): Swish() ) (norm_ff): LayerNorm((1024,), eps=1e-12, elementwise_affine=True) (norm_ff_macaron): LayerNorm((1024,), eps=1e-12, elementwise_affine=True) (norm_mha): LayerNorm((1024,), eps=1e-12, elementwise_affine=True) (norm_mlp): LayerNorm((1024,), eps=1e-12, elementwise_affine=True) (norm_final): LayerNorm((1024,), eps=1e-12, elementwise_affine=True) (dropout): Dropout(p=0.1, inplace=False) (depthwise_conv_fusion): Conv1d(2048, 2048, kernel_size=(31,), stride=(1,), padding=(15,), groups=2048) (merge_proj): Linear(in_features=2048, out_features=1024, bias=True) ) (23): EBranchformerEncoderLayer( (attn): MultiHeadedAttention( (linear_q): Linear(in_features=1024, out_features=1024, bias=True) (linear_k): Linear(in_features=1024, out_features=1024, bias=True) (linear_v): Linear(in_features=1024, out_features=1024, bias=True) (linear_out): Linear(in_features=1024, out_features=1024, bias=True) (dropout): Identity() ) (cgmlp): ConvolutionalGatingMLP( (channel_proj1): Sequential( (0): Linear(in_features=1024, out_features=4096, bias=True) (1): GELU(approximate='none') ) (csgu): ConvolutionalSpatialGatingUnit( (norm): LayerNorm((2048,), eps=1e-12, elementwise_affine=True) (conv): Conv1d(2048, 2048, kernel_size=(31,), stride=(1,), padding=(15,), groups=2048) (act): Identity() (dropout): Dropout(p=0.1, inplace=False) ) (channel_proj2): Linear(in_features=2048, out_features=1024, bias=True) ) (feed_forward): PositionwiseFeedForward( (w_1): Linear(in_features=1024, out_features=4096, bias=True) (w_2): Linear(in_features=4096, out_features=1024, bias=True) (dropout): Dropout(p=0.1, inplace=False) (activation): Swish() ) (feed_forward_macaron): PositionwiseFeedForward( (w_1): Linear(in_features=1024, out_features=4096, bias=True) (w_2): Linear(in_features=4096, out_features=1024, bias=True) (dropout): Dropout(p=0.1, inplace=False) (activation): Swish() ) (norm_ff): LayerNorm((1024,), eps=1e-12, elementwise_affine=True) (norm_ff_macaron): LayerNorm((1024,), eps=1e-12, elementwise_affine=True) (norm_mha): LayerNorm((1024,), eps=1e-12, elementwise_affine=True) (norm_mlp): LayerNorm((1024,), eps=1e-12, elementwise_affine=True) (norm_final): LayerNorm((1024,), eps=1e-12, elementwise_affine=True) (cross_attn): MultiHeadedAttention( (linear_q): Linear(in_features=1024, out_features=1024, bias=True) (linear_k): Linear(in_features=1024, out_features=1024, bias=True) (linear_v): Linear(in_features=1024, out_features=1024, bias=True) (linear_out): Linear(in_features=1024, out_features=1024, bias=True) (dropout): Identity() ) (norm_cross_attn): LayerNorm((1024,), eps=1e-12, elementwise_affine=True) (dropout): Dropout(p=0.1, inplace=False) (depthwise_conv_fusion): Conv1d(2048, 2048, kernel_size=(31,), stride=(1,), padding=(15,), groups=2048) (merge_proj): Linear(in_features=2048, out_features=1024, bias=True) ) (24): EBranchformerEncoderLayer( (attn): MultiHeadedAttention( (linear_q): Linear(in_features=1024, out_features=1024, bias=True) (linear_k): Linear(in_features=1024, out_features=1024, bias=True) (linear_v): Linear(in_features=1024, out_features=1024, bias=True) (linear_out): Linear(in_features=1024, out_features=1024, bias=True) (dropout): Identity() ) (cgmlp): ConvolutionalGatingMLP( (channel_proj1): Sequential( (0): Linear(in_features=1024, out_features=4096, bias=True) (1): GELU(approximate='none') ) (csgu): ConvolutionalSpatialGatingUnit( (norm): LayerNorm((2048,), eps=1e-12, elementwise_affine=True) (conv): Conv1d(2048, 2048, kernel_size=(31,), stride=(1,), padding=(15,), groups=2048) (act): Identity() (dropout): Dropout(p=0.1, inplace=False) ) (channel_proj2): Linear(in_features=2048, out_features=1024, bias=True) ) (feed_forward): PositionwiseFeedForward( (w_1): Linear(in_features=1024, out_features=4096, bias=True) (w_2): Linear(in_features=4096, out_features=1024, bias=True) (dropout): Dropout(p=0.1, inplace=False) (activation): Swish() ) (feed_forward_macaron): PositionwiseFeedForward( (w_1): Linear(in_features=1024, out_features=4096, bias=True) (w_2): Linear(in_features=4096, out_features=1024, bias=True) (dropout): Dropout(p=0.1, inplace=False) (activation): Swish() ) (norm_ff): LayerNorm((1024,), eps=1e-12, elementwise_affine=True) (norm_ff_macaron): LayerNorm((1024,), eps=1e-12, elementwise_affine=True) (norm_mha): LayerNorm((1024,), eps=1e-12, elementwise_affine=True) (norm_mlp): LayerNorm((1024,), eps=1e-12, elementwise_affine=True) (norm_final): LayerNorm((1024,), eps=1e-12, elementwise_affine=True) (dropout): Dropout(p=0.1, inplace=False) (depthwise_conv_fusion): Conv1d(2048, 2048, kernel_size=(31,), stride=(1,), padding=(15,), groups=2048) (merge_proj): Linear(in_features=2048, out_features=1024, bias=True) ) (25): EBranchformerEncoderLayer( (attn): MultiHeadedAttention( (linear_q): Linear(in_features=1024, out_features=1024, bias=True) (linear_k): Linear(in_features=1024, out_features=1024, bias=True) (linear_v): Linear(in_features=1024, out_features=1024, bias=True) (linear_out): Linear(in_features=1024, out_features=1024, bias=True) (dropout): Identity() ) (cgmlp): ConvolutionalGatingMLP( (channel_proj1): Sequential( (0): Linear(in_features=1024, out_features=4096, bias=True) (1): GELU(approximate='none') ) (csgu): ConvolutionalSpatialGatingUnit( (norm): LayerNorm((2048,), eps=1e-12, elementwise_affine=True) (conv): Conv1d(2048, 2048, kernel_size=(31,), stride=(1,), padding=(15,), groups=2048) (act): Identity() (dropout): Dropout(p=0.1, inplace=False) ) (channel_proj2): Linear(in_features=2048, out_features=1024, bias=True) ) (feed_forward): PositionwiseFeedForward( (w_1): Linear(in_features=1024, out_features=4096, bias=True) (w_2): Linear(in_features=4096, out_features=1024, bias=True) (dropout): Dropout(p=0.1, inplace=False) (activation): Swish() ) (feed_forward_macaron): PositionwiseFeedForward( (w_1): Linear(in_features=1024, out_features=4096, bias=True) (w_2): Linear(in_features=4096, out_features=1024, bias=True) (dropout): Dropout(p=0.1, inplace=False) (activation): Swish() ) (norm_ff): LayerNorm((1024,), eps=1e-12, elementwise_affine=True) (norm_ff_macaron): LayerNorm((1024,), eps=1e-12, elementwise_affine=True) (norm_mha): LayerNorm((1024,), eps=1e-12, elementwise_affine=True) (norm_mlp): LayerNorm((1024,), eps=1e-12, elementwise_affine=True) (norm_final): LayerNorm((1024,), eps=1e-12, elementwise_affine=True) (dropout): Dropout(p=0.1, inplace=False) (depthwise_conv_fusion): Conv1d(2048, 2048, kernel_size=(31,), stride=(1,), padding=(15,), groups=2048) (merge_proj): Linear(in_features=2048, out_features=1024, bias=True) ) (26): EBranchformerEncoderLayer( (attn): MultiHeadedAttention( (linear_q): Linear(in_features=1024, out_features=1024, bias=True) (linear_k): Linear(in_features=1024, out_features=1024, bias=True) (linear_v): Linear(in_features=1024, out_features=1024, bias=True) (linear_out): Linear(in_features=1024, out_features=1024, bias=True) (dropout): Identity() ) (cgmlp): ConvolutionalGatingMLP( (channel_proj1): Sequential( (0): Linear(in_features=1024, out_features=4096, bias=True) (1): GELU(approximate='none') ) (csgu): ConvolutionalSpatialGatingUnit( (norm): LayerNorm((2048,), eps=1e-12, elementwise_affine=True) (conv): Conv1d(2048, 2048, kernel_size=(31,), stride=(1,), padding=(15,), groups=2048) (act): Identity() (dropout): Dropout(p=0.1, inplace=False) ) (channel_proj2): Linear(in_features=2048, out_features=1024, bias=True) ) (feed_forward): PositionwiseFeedForward( (w_1): Linear(in_features=1024, out_features=4096, bias=True) (w_2): Linear(in_features=4096, out_features=1024, bias=True) (dropout): Dropout(p=0.1, inplace=False) (activation): Swish() ) (feed_forward_macaron): PositionwiseFeedForward( (w_1): Linear(in_features=1024, out_features=4096, bias=True) (w_2): Linear(in_features=4096, out_features=1024, bias=True) (dropout): Dropout(p=0.1, inplace=False) (activation): Swish() ) (norm_ff): LayerNorm((1024,), eps=1e-12, elementwise_affine=True) (norm_ff_macaron): LayerNorm((1024,), eps=1e-12, elementwise_affine=True) (norm_mha): LayerNorm((1024,), eps=1e-12, elementwise_affine=True) (norm_mlp): LayerNorm((1024,), eps=1e-12, elementwise_affine=True) (norm_final): LayerNorm((1024,), eps=1e-12, elementwise_affine=True) (cross_attn): MultiHeadedAttention( (linear_q): Linear(in_features=1024, out_features=1024, bias=True) (linear_k): Linear(in_features=1024, out_features=1024, bias=True) (linear_v): Linear(in_features=1024, out_features=1024, bias=True) (linear_out): Linear(in_features=1024, out_features=1024, bias=True) (dropout): Identity() ) (norm_cross_attn): LayerNorm((1024,), eps=1e-12, elementwise_affine=True) (dropout): Dropout(p=0.1, inplace=False) (depthwise_conv_fusion): Conv1d(2048, 2048, kernel_size=(31,), stride=(1,), padding=(15,), groups=2048) (merge_proj): Linear(in_features=2048, out_features=1024, bias=True) ) ) (after_norm): LayerNorm((1024,), eps=1e-12, elementwise_affine=True) (conditioning_layer): Linear(in_features=50002, out_features=1024, bias=True) ) (prompt_encoder): TransformerEncoder( (encoders): MultiSequential( (0): EncoderLayer( (self_attn): MultiHeadedAttention( (linear_q): Linear(in_features=512, out_features=512, bias=True) (linear_k): Linear(in_features=512, out_features=512, bias=True) (linear_v): Linear(in_features=512, out_features=512, bias=True) (linear_out): Linear(in_features=512, out_features=512, bias=True) (dropout): Identity() ) (feed_forward): PositionwiseFeedForward( (w_1): Linear(in_features=512, out_features=2048, bias=True) (w_2): Linear(in_features=2048, out_features=512, bias=True) (dropout): Dropout(p=0.1, inplace=False) (activation): ReLU() ) (norm1): LayerNorm((512,), eps=1e-12, elementwise_affine=True) (norm2): LayerNorm((512,), eps=1e-12, elementwise_affine=True) (dropout): Dropout(p=0.1, inplace=False) ) (1): EncoderLayer( (self_attn): MultiHeadedAttention( (linear_q): Linear(in_features=512, out_features=512, bias=True) (linear_k): Linear(in_features=512, out_features=512, bias=True) (linear_v): Linear(in_features=512, out_features=512, bias=True) (linear_out): Linear(in_features=512, out_features=512, bias=True) (dropout): Identity() ) (feed_forward): PositionwiseFeedForward( (w_1): Linear(in_features=512, out_features=2048, bias=True) (w_2): Linear(in_features=2048, out_features=512, bias=True) (dropout): Dropout(p=0.1, inplace=False) (activation): ReLU() ) (norm1): LayerNorm((512,), eps=1e-12, elementwise_affine=True) (norm2): LayerNorm((512,), eps=1e-12, elementwise_affine=True) (dropout): Dropout(p=0.1, inplace=False) ) (2): EncoderLayer( (self_attn): MultiHeadedAttention( (linear_q): Linear(in_features=512, out_features=512, bias=True) (linear_k): Linear(in_features=512, out_features=512, bias=True) (linear_v): Linear(in_features=512, out_features=512, bias=True) (linear_out): Linear(in_features=512, out_features=512, bias=True) (dropout): Identity() ) (feed_forward): PositionwiseFeedForward( (w_1): Linear(in_features=512, out_features=2048, bias=True) (w_2): Linear(in_features=2048, out_features=512, bias=True) (dropout): Dropout(p=0.1, inplace=False) (activation): ReLU() ) (norm1): LayerNorm((512,), eps=1e-12, elementwise_affine=True) (norm2): LayerNorm((512,), eps=1e-12, elementwise_affine=True) (dropout): Dropout(p=0.1, inplace=False) ) (3): EncoderLayer( (self_attn): MultiHeadedAttention( (linear_q): Linear(in_features=512, out_features=512, bias=True) (linear_k): Linear(in_features=512, out_features=512, bias=True) (linear_v): Linear(in_features=512, out_features=512, bias=True) (linear_out): Linear(in_features=512, out_features=512, bias=True) (dropout): Identity() ) (feed_forward): PositionwiseFeedForward( (w_1): Linear(in_features=512, out_features=2048, bias=True) (w_2): Linear(in_features=2048, out_features=512, bias=True) (dropout): Dropout(p=0.1, inplace=False) (activation): ReLU() ) (norm1): LayerNorm((512,), eps=1e-12, elementwise_affine=True) (norm2): LayerNorm((512,), eps=1e-12, elementwise_affine=True) (dropout): Dropout(p=0.1, inplace=False) ) ) (after_norm): LayerNorm((512,), eps=1e-12, elementwise_affine=True) ) (embed): Embedding(50002, 512) (pos_enc): PositionalEncoding( (dropout): Dropout(p=0.0, inplace=False) ) (embed_proj): Linear(in_features=512, out_features=1024, bias=True) (prompt_proj): Linear(in_features=512, out_features=1024, bias=True) (ctc): CTC( (ctc_lo): Linear(in_features=1024, out_features=50002, bias=True) (ctc_loss): CTCLoss() ) ) Model summary: Class Name: ESPnetS2TCTCModel Total Number of model parameters: 1.01 B Number of trainable parameters: 1.01 B (100.0%) Size: 4.02 GB Type: torch.float32 [gpua003:0/64] 2024-02-05 00:24:45,474 (abs_task:1274) INFO: Optimizer: AdamW ( Parameter Group 0 amsgrad: False betas: [0.9, 0.98] capturable: False eps: 1e-06 foreach: None initial_lr: 0.0002 lr: 1.6666666666666667e-09 maximize: False weight_decay: 0.0 ) [gpua003:0/64] 2024-02-05 00:24:45,474 (abs_task:1275) INFO: Scheduler: PiecewiseLinearWarmupLR(warmup_steps_list=[0, 30000, 60000], warmup_lr_list=[0.0, 5e-05, 0.0002]) [gpua003:0/64] 2024-02-05 00:24:45,475 (abs_task:1284) INFO: Saving the configuration in exp/s2t_train_s2t_multitask-ctc_ebf27_conv2d8_size1024_raw_bpe50000/config.yaml [gpua003:0/64] 2024-02-05 00:24:51,051 (s2t:401) INFO: Optional Data Names: ('text_prev', 'text_ctc', 'text_spk2', 'text_spk3', 'text_spk4') [gpua003:0/64] 2024-02-05 00:24:52,061 (abs_task:1660) INFO: [valid] dataset: ESPnetDataset( speech: {"path": "dump/raw/dev_v3/wav.scp", "type": "kaldi_ark"} text_prev: {"path": "dump/raw/dev_v3/text.prev", "type": "text"} text_ctc: {"path": "dump/raw/dev_v3/text.ctc", "type": "text"} text: {"path": "dump/raw/dev_v3/text", "type": "text"} preprocess: ) [gpua003:0/64] 2024-02-05 00:24:52,061 (abs_task:1661) INFO: [valid] Batch sampler: UnsortedBatchSampler(N-batch=4671, batch_size=256, key_file=exp/s2t_stats_raw_bpe50000/valid/speech_shape, [gpua003:0/64] 2024-02-05 00:24:52,062 (abs_task:1662) INFO: [valid] mini-batch sizes summary: N-batch=4671, mean=256.0, min=256, max=257 [gpua003:0/64] 2024-02-05 00:25:38,445 (trainer:167) INFO: The training was resumed using exp/s2t_train_s2t_multitask-ctc_ebf27_conv2d8_size1024_raw_bpe50000/checkpoint.pth gpua003:274356:274356 [0] NCCL INFO Bootstrap : Using eth1:172.28.23.3<0> gpua003:274356:274356 [0] NCCL INFO NET/Plugin : No plugin found (libnccl-net.so), using internal implementation gpua003:274356:274356 [0] NCCL INFO cudaDriverVersion 12020 NCCL version 2.14.3+cuda11.7 [gpua003:0/64] 2024-02-05 00:25:48,156 (trainer:301) INFO: 19/45epoch started [gpua003:0/64] 2024-02-05 00:25:48,198 (multiple_iter_factory:32) INFO: Building 0th iter-factory... [gpua003:0/64] 2024-02-05 00:26:06,271 (s2t:401) INFO: Optional Data Names: ('text_prev', 'text_ctc', 'text_spk2', 'text_spk3', 'text_spk4') [gpua003:0/64] 2024-02-05 00:26:09,657 (abs_task:1660) INFO: [train] dataset: ESPnetDataset( speech: {"path": "exp/s2t_stats_raw_bpe50000/splits12/wav.scp/split.7", "type": "kaldi_ark"} text_prev: {"path": "exp/s2t_stats_raw_bpe50000/splits12/text.prev/split.7", "type": "text"} text_ctc: {"path": "exp/s2t_stats_raw_bpe50000/splits12/text.ctc/split.7", "type": "text"} text: {"path": "exp/s2t_stats_raw_bpe50000/splits12/text/split.7", "type": "text"} preprocess: ) [gpua003:0/64] 2024-02-05 00:26:09,657 (abs_task:1661) INFO: [train] Batch sampler: UnsortedBatchSampler(N-batch=19027, batch_size=256, key_file=exp/s2t_stats_raw_bpe50000/splits12/speech_shape/split.7, [gpua003:0/64] 2024-02-05 00:26:09,661 (abs_task:1662) INFO: [train] mini-batch sizes summary: N-batch=19027, mean=256.0, min=256, max=257 gpua025:3663865:3663865 [1] NCCL INFO cudaDriverVersion 12020 gpua025:3663865:3663865 [1] NCCL INFO Bootstrap : Using eth1:172.28.23.25<0> gpua025:3663865:3663865 [1] NCCL INFO NET/Plugin : No plugin found (libnccl-net.so), using internal implementation gpua025:3663865:3663932 [1] NCCL INFO NET/IB : No device found. gpua025:3663865:3663932 [1] NCCL INFO NET/Socket : Using [0]eth1:172.28.23.25<0> [1]hsn0:141.142.145.25<0> gpua025:3663865:3663932 [1] NCCL INFO Using network Socket gpua025:3663865:3663932 [1] NCCL INFO Setting affinity for GPU 1 to ffff,00000000 gpua025:3663865:3663932 [1] NCCL INFO Trees [0] 10/4/-1->9->8 [1] 10/-1/-1->9->8 gpua025:3663865:3663932 [1] NCCL INFO Channel 00/0 : 9[46000] -> 10[85000] via P2P/IPC/read gpua025:3663865:3663932 [1] NCCL INFO Channel 01/0 : 9[46000] -> 10[85000] via P2P/IPC/read gpua025:3663865:3663932 [1] NCCL INFO Connected all rings gpua025:3663865:3663932 [1] NCCL INFO Channel 00/0 : 4[7000] -> 9[46000] [receive] via NET/Socket/1 gpua025:3663865:3663932 [1] NCCL INFO Channel 00/0 : 9[46000] -> 4[7000] [send] via NET/Socket/1 gpua025:3663865:3663932 [1] NCCL INFO Channel 00/0 : 9[46000] -> 8[7000] via P2P/IPC/read gpua025:3663865:3663932 [1] NCCL INFO Channel 01/0 : 9[46000] -> 8[7000] via P2P/IPC/read gpua025:3663865:3663932 [1] NCCL INFO Connected all trees gpua025:3663865:3663932 [1] NCCL INFO threadThresholds 8/8/64 | 512/8/64 | 512 | 512 gpua025:3663865:3663932 [1] NCCL INFO 2 coll channels, 2 p2p channels, 2 p2p channels per peer gpua025:3663865:3663932 [1] NCCL INFO comm 0x564969fd1600 rank 9 nranks 64 cudaDev 1 busId 46000 - Init COMPLETE gpua025:3663867:3663867 [3] NCCL INFO cudaDriverVersion 12020 gpua025:3663867:3663867 [3] NCCL INFO Bootstrap : Using eth1:172.28.23.25<0> gpua025:3663867:3663867 [3] NCCL INFO NET/Plugin : No plugin found (libnccl-net.so), using internal implementation gpua025:3663867:3663935 [3] NCCL INFO NET/IB : No device found. gpua025:3663867:3663935 [3] NCCL INFO NET/Socket : Using [0]eth1:172.28.23.25<0> [1]hsn0:141.142.145.25<0> gpua025:3663867:3663935 [3] NCCL INFO Using network Socket gpua025:3663867:3663935 [3] NCCL INFO Setting affinity for GPU 3 to ffff gpua025:3663867:3663935 [3] NCCL INFO Trees [0] -1/-1/-1->11->10 [1] -1/-1/-1->11->10 gpua025:3663867:3663935 [3] NCCL INFO Channel 00/0 : 11[c7000] -> 12[7000] [send] via NET/Socket/1 gpua025:3663867:3663935 [3] NCCL INFO Channel 01/0 : 11[c7000] -> 12[7000] [send] via NET/Socket/1 gpua025:3663867:3663935 [3] NCCL INFO Connected all rings gpua025:3663867:3663935 [3] NCCL INFO Channel 00/0 : 11[c7000] -> 10[85000] via P2P/IPC/read gpua054:47127:47127 [0] NCCL INFO cudaDriverVersion 12020 gpua054:47127:47127 [0] NCCL INFO Bootstrap : Using eth1:172.28.23.54<0> gpua054:47127:47127 [0] NCCL INFO NET/Plugin : No plugin found (libnccl-net.so), using internal implementation gpua054:47127:47191 [0] NCCL INFO NET/IB : No device found. gpua054:47127:47191 [0] NCCL INFO NET/Socket : Using [0]eth1:172.28.23.54<0> [1]hsn0:141.142.145.54<0> gpua054:47127:47191 [0] NCCL INFO Using network Socket gpua054:47127:47191 [0] NCCL INFO Setting affinity for GPU 0 to ffff0000,00000000 gpua054:47127:47191 [0] NCCL INFO Trees [0] 29/-1/-1->28->24 [1] 29/12/-1->28->60 gpua054:47127:47191 [0] NCCL INFO Channel 00/0 : 27[c7000] -> 28[7000] [receive] via NET/Socket/1 gpua054:47127:47191 [0] NCCL INFO Channel 01/0 : 27[c7000] -> 28[7000] [receive] via NET/Socket/1 gpua054:47127:47191 [0] NCCL INFO Channel 00/0 : 28[7000] -> 29[46000] via P2P/IPC/read gpua054:47127:47191 [0] NCCL INFO Channel 01/0 : 28[7000] -> 29[46000] via P2P/IPC/read gpua025:3663867:3663935 [3] NCCL INFO Channel 01/0 : 11[c7000] -> 10[85000] via P2P/IPC/read gpua025:3663867:3663935 [3] NCCL INFO Connected all trees gpua025:3663867:3663935 [3] NCCL INFO threadThresholds 8/8/64 | 512/8/64 | 512 | 512 gpua025:3663867:3663935 [3] NCCL INFO 2 coll channels, 2 p2p channels, 2 p2p channels per peer gpua025:3663867:3663935 [3] NCCL INFO comm 0x55fd8d42f120 rank 11 nranks 64 cudaDev 3 busId c7000 - Init COMPLETE gpua054:47127:47191 [0] NCCL INFO Connected all rings gpua054:47127:47191 [0] NCCL INFO Channel 00/0 : 24[7000] -> 28[7000] [receive] via NET/Socket/1 gpua054:47127:47191 [0] NCCL INFO Channel 01/0 : 12[7000] -> 28[7000] [receive] via NET/Socket/1 gpua054:47127:47191 [0] NCCL INFO Channel 01/0 : 60[7000] -> 28[7000] [receive] via NET/Socket/1 gpua054:47127:47191 [0] NCCL INFO Channel 01/0 : 28[7000] -> 60[7000] [send] via NET/Socket/1 gpua054:47127:47191 [0] NCCL INFO Channel 01/0 : 28[7000] -> 12[7000] [send] via NET/Socket/1 gpua054:47127:47191 [0] NCCL INFO Channel 00/0 : 28[7000] -> 24[7000] [send] via NET/Socket/1 gpua054:47127:47191 [0] NCCL INFO Connected all trees gpua054:47127:47191 [0] NCCL INFO threadThresholds 8/8/64 | 512/8/64 | 512 | 512 gpua054:47127:47191 [0] NCCL INFO 2 coll channels, 2 p2p channels, 2 p2p channels per peer gpua054:47127:47191 [0] NCCL INFO comm 0x564f6963c6e0 rank 28 nranks 64 cudaDev 0 busId 7000 - Init COMPLETE gpua025:3663864:3663864 [0] NCCL INFO cudaDriverVersion 12020 gpua025:3663864:3663864 [0] NCCL INFO Bootstrap : Using eth1:172.28.23.25<0> gpua025:3663864:3663864 [0] NCCL INFO NET/Plugin : No plugin found (libnccl-net.so), using internal implementation gpua025:3663864:3663933 [0] NCCL INFO NET/IB : No device found. gpua025:3663864:3663933 [0] NCCL INFO NET/Socket : Using [0]eth1:172.28.23.25<0> [1]hsn0:141.142.145.25<0> gpua025:3663864:3663933 [0] NCCL INFO Using network Socket gpua025:3663864:3663933 [0] NCCL INFO Setting affinity for GPU 0 to ffff0000,00000000 gpua025:3663864:3663933 [0] NCCL INFO Trees [0] 9/12/-1->8->17 [1] 9/-1/-1->8->5 gpua025:3663864:3663933 [0] NCCL INFO Channel 00/0 : 7[c7000] -> 8[7000] [receive] via NET/Socket/1 gpua025:3663864:3663933 [0] NCCL INFO Channel 01/0 : 7[c7000] -> 8[7000] [receive] via NET/Socket/1 gpua025:3663864:3663933 [0] NCCL INFO Channel 00/0 : 8[7000] -> 9[46000] via P2P/IPC/read gpua054:47129:47129 [2] NCCL INFO cudaDriverVersion 12020 gpua054:47129:47129 [2] NCCL INFO Bootstrap : Using eth1:172.28.23.54<0> gpua054:47129:47129 [2] NCCL INFO NET/Plugin : No plugin found (libnccl-net.so), using internal implementation gpua054:47129:47194 [2] NCCL INFO NET/IB : No device found. gpua054:47129:47194 [2] NCCL INFO NET/Socket : Using [0]eth1:172.28.23.54<0> [1]hsn0:141.142.145.54<0> gpua054:47129:47194 [2] NCCL INFO Using network Socket gpua054:47129:47194 [2] NCCL INFO Setting affinity for GPU 2 to ffff0000 gpua054:47129:47194 [2] NCCL INFO Trees [0] 31/-1/-1->30->29 [1] 31/-1/-1->30->29 gpua054:47129:47194 [2] NCCL INFO Channel 00/0 : 30[85000] -> 31[c7000] via P2P/IPC/read gpua054:47129:47194 [2] NCCL INFO Channel 01/0 : 30[85000] -> 31[c7000] via P2P/IPC/read gpua054:47129:47194 [2] NCCL INFO Connected all rings gpua054:47129:47194 [2] NCCL INFO Channel 00/0 : 30[85000] -> 29[46000] via P2P/IPC/read gpua025:3663864:3663933 [0] NCCL INFO Channel 01/0 : 8[7000] -> 9[46000] via P2P/IPC/read gpua025:3663864:3663933 [0] NCCL INFO Connected all rings gpua025:3663864:3663933 [0] NCCL INFO Channel 01/0 : 5[46000] -> 8[7000] [receive] via NET/Socket/1 gpua025:3663864:3663933 [0] NCCL INFO Channel 00/0 : 8[7000] -> 12[7000] [send] via NET/Socket/1 gpua025:3663864:3663933 [0] NCCL INFO Channel 00/0 : 8[7000] -> 17[46000] [send] via NET/Socket/1 gpua025:3663864:3663933 [0] NCCL INFO Channel 00/0 : 17[46000] -> 8[7000] [receive] via NET/Socket/1 gpua025:3663864:3663933 [0] NCCL INFO Channel 00/0 : 12[7000] -> 8[7000] [receive] via NET/Socket/1 gpua025:3663864:3663933 [0] NCCL INFO Channel 01/0 : 8[7000] -> 5[46000] [send] via NET/Socket/1 gpua025:3663864:3663933 [0] NCCL INFO Connected all trees gpua025:3663864:3663933 [0] NCCL INFO threadThresholds 8/8/64 | 512/8/64 | 512 | 512 gpua025:3663864:3663933 [0] NCCL INFO 2 coll channels, 2 p2p channels, 2 p2p channels per peer gpua054:47129:47194 [2] NCCL INFO Channel 01/0 : 30[85000] -> 29[46000] via P2P/IPC/read gpua054:47129:47194 [2] NCCL INFO Connected all trees gpua054:47129:47194 [2] NCCL INFO threadThresholds 8/8/64 | 512/8/64 | 512 | 512 gpua054:47129:47194 [2] NCCL INFO 2 coll channels, 2 p2p channels, 2 p2p channels per peer gpua054:47129:47194 [2] NCCL INFO comm 0x55d1a318a720 rank 30 nranks 64 cudaDev 2 busId 85000 - Init COMPLETE gpua025:3663864:3663933 [0] NCCL INFO comm 0x563af91dae20 rank 8 nranks 64 cudaDev 0 busId 7000 - Init COMPLETE gpua054:47128:47128 [1] NCCL INFO cudaDriverVersion 12020 gpua054:47128:47128 [1] NCCL INFO Bootstrap : Using eth1:172.28.23.54<0> gpua054:47128:47128 [1] NCCL INFO NET/Plugin : No plugin found (libnccl-net.so), using internal implementation gpua054:47128:47192 [1] NCCL INFO NET/IB : No device found. gpua054:47128:47192 [1] NCCL INFO NET/Socket : Using [0]eth1:172.28.23.54<0> [1]hsn0:141.142.145.54<0> gpua054:47128:47192 [1] NCCL INFO Using network Socket gpua054:47128:47192 [1] NCCL INFO Setting affinity for GPU 1 to ffff,00000000 gpua054:47128:47192 [1] NCCL INFO Trees [0] 30/-1/-1->29->28 [1] 30/44/-1->29->28 gpua054:47128:47192 [1] NCCL INFO Channel 00/0 : 29[46000] -> 30[85000] via P2P/IPC/read gpua054:47128:47192 [1] NCCL INFO Channel 01/0 : 29[46000] -> 30[85000] via P2P/IPC/read gpua054:47128:47192 [1] NCCL INFO Connected all rings gpua054:47128:47192 [1] NCCL INFO Channel 01/0 : 29[46000] -> 44[7000] [send] via NET/Socket/1 gpua025:3663866:3663866 [2] NCCL INFO cudaDriverVersion 12020 gpua025:3663866:3663866 [2] NCCL INFO Bootstrap : Using eth1:172.28.23.25<0> gpua025:3663866:3663866 [2] NCCL INFO NET/Plugin : No plugin found (libnccl-net.so), using internal implementation gpua025:3663866:3663934 [2] NCCL INFO NET/IB : No device found. gpua025:3663866:3663934 [2] NCCL INFO NET/Socket : Using [0]eth1:172.28.23.25<0> [1]hsn0:141.142.145.25<0> gpua025:3663866:3663934 [2] NCCL INFO Using network Socket gpua025:3663866:3663934 [2] NCCL INFO Setting affinity for GPU 2 to ffff0000 gpua025:3663866:3663934 [2] NCCL INFO Trees [0] 11/-1/-1->10->9 [1] 11/-1/-1->10->9 gpua025:3663866:3663934 [2] NCCL INFO Channel 00/0 : 10[85000] -> 11[c7000] via P2P/IPC/read gpua025:3663866:3663934 [2] NCCL INFO Channel 01/0 : 10[85000] -> 11[c7000] via P2P/IPC/read gpua025:3663866:3663934 [2] NCCL INFO Connected all rings gpua025:3663866:3663934 [2] NCCL INFO Channel 00/0 : 10[85000] -> 9[46000] via P2P/IPC/read gpua054:47128:47192 [1] NCCL INFO Channel 01/0 : 44[7000] -> 29[46000] [receive] via NET/Socket/1 gpua054:47128:47192 [1] NCCL INFO Channel 00/0 : 29[46000] -> 28[7000] via P2P/IPC/read gpua054:47128:47192 [1] NCCL INFO Channel 01/0 : 29[46000] -> 28[7000] via P2P/IPC/read gpua054:47128:47192 [1] NCCL INFO Connected all trees gpua054:47128:47192 [1] NCCL INFO threadThresholds 8/8/64 | 512/8/64 | 512 | 512 gpua054:47128:47192 [1] NCCL INFO 2 coll channels, 2 p2p channels, 2 p2p channels per peer gpua054:47128:47192 [1] NCCL INFO comm 0x55561a0e1130 rank 29 nranks 64 cudaDev 1 busId 46000 - Init COMPLETE gpua025:3663866:3663934 [2] NCCL INFO Channel 01/0 : 10[85000] -> 9[46000] via P2P/IPC/read gpua025:3663866:3663934 [2] NCCL INFO Connected all trees gpua025:3663866:3663934 [2] NCCL INFO threadThresholds 8/8/64 | 512/8/64 | 512 | 512 gpua025:3663866:3663934 [2] NCCL INFO 2 coll channels, 2 p2p channels, 2 p2p channels per peer gpua025:3663866:3663934 [2] NCCL INFO comm 0x562a963270f0 rank 10 nranks 64 cudaDev 2 busId 85000 - Init COMPLETE gpua054:47130:47130 [3] NCCL INFO cudaDriverVersion 12020 gpua054:47130:47130 [3] NCCL INFO Bootstrap : Using eth1:172.28.23.54<0> gpua054:47130:47130 [3] NCCL INFO NET/Plugin : No plugin found (libnccl-net.so), using internal implementation gpua054:47130:47193 [3] NCCL INFO NET/IB : No device found. gpua054:47130:47193 [3] NCCL INFO NET/Socket : Using [0]eth1:172.28.23.54<0> [1]hsn0:141.142.145.54<0> gpua054:47130:47193 [3] NCCL INFO Using network Socket gpua054:47130:47193 [3] NCCL INFO Setting affinity for GPU 3 to ffff gpua054:47130:47193 [3] NCCL INFO Trees [0] -1/-1/-1->31->30 [1] -1/-1/-1->31->30 gpua054:47130:47193 [3] NCCL INFO Channel 00/0 : 31[c7000] -> 32[7000] [send] via NET/Socket/1 gpua054:47130:47193 [3] NCCL INFO Channel 01/0 : 31[c7000] -> 32[7000] [send] via NET/Socket/1 gpua054:47130:47193 [3] NCCL INFO Connected all rings gpua054:47130:47193 [3] NCCL INFO Channel 00/0 : 31[c7000] -> 30[85000] via P2P/IPC/read gpua054:47130:47193 [3] NCCL INFO Channel 01/0 : 31[c7000] -> 30[85000] via P2P/IPC/read gpua054:47130:47193 [3] NCCL INFO Connected all trees gpua054:47130:47193 [3] NCCL INFO threadThresholds 8/8/64 | 512/8/64 | 512 | 512 gpua054:47130:47193 [3] NCCL INFO 2 coll channels, 2 p2p channels, 2 p2p channels per peer gpua054:47130:47193 [3] NCCL INFO comm 0x5556e0d4b370 rank 31 nranks 64 cudaDev 3 busId c7000 - Init COMPLETE gpua048:2699225:2699225 [2] NCCL INFO cudaDriverVersion 12020 gpua048:2699225:2699225 [2] NCCL INFO Bootstrap : Using eth1:172.28.23.48<0> gpua048:2699225:2699225 [2] NCCL INFO NET/Plugin : No plugin found (libnccl-net.so), using internal implementation gpua048:2699225:2699287 [2] NCCL INFO NET/IB : No device found. gpua048:2699225:2699287 [2] NCCL INFO NET/Socket : Using [0]eth1:172.28.23.48<0> [1]hsn0:141.142.145.48<0> gpua048:2699225:2699287 [2] NCCL INFO Using network Socket gpua048:2699225:2699287 [2] NCCL INFO Setting affinity for GPU 2 to ffff0000 gpua048:2699225:2699287 [2] NCCL INFO Trees [0] 27/-1/-1->26->25 [1] 27/-1/-1->26->25 gpua048:2699225:2699287 [2] NCCL INFO Channel 00/0 : 26[85000] -> 27[c7000] via P2P/IPC/read gpua048:2699225:2699287 [2] NCCL INFO Channel 01/0 : 26[85000] -> 27[c7000] via P2P/IPC/read gpua048:2699225:2699287 [2] NCCL INFO Connected all rings gpua048:2699225:2699287 [2] NCCL INFO Channel 00/0 : 26[85000] -> 25[46000] via P2P/IPC/read gpua048:2699225:2699287 [2] NCCL INFO Channel 01/0 : 26[85000] -> 25[46000] via P2P/IPC/read gpua048:2699225:2699287 [2] NCCL INFO Connected all trees gpua048:2699225:2699287 [2] NCCL INFO threadThresholds 8/8/64 | 512/8/64 | 512 | 512 gpua048:2699225:2699287 [2] NCCL INFO 2 coll channels, 2 p2p channels, 2 p2p channels per peer gpua048:2699225:2699287 [2] NCCL INFO comm 0x56540a478000 rank 26 nranks 64 cudaDev 2 busId 85000 - Init COMPLETE gpua027:572261:572261 [3] NCCL INFO cudaDriverVersion 12020 gpua027:572261:572261 [3] NCCL INFO Bootstrap : Using eth1:172.28.23.27<0> gpua027:572261:572261 [3] NCCL INFO NET/Plugin : No plugin found (libnccl-net.so), using internal implementation gpua027:572261:572320 [3] NCCL INFO NET/IB : No device found. gpua027:572261:572320 [3] NCCL INFO NET/Socket : Using [0]eth1:172.28.23.27<0> [1]hsn0:141.142.145.27<0> gpua027:572261:572320 [3] NCCL INFO Using network Socket gpua027:572261:572320 [3] NCCL INFO Setting affinity for GPU 3 to ffff gpua027:572261:572320 [3] NCCL INFO Trees [0] -1/-1/-1->15->14 [1] -1/-1/-1->15->14 gpua027:572261:572320 [3] NCCL INFO Channel 00/0 : 15[c7000] -> 16[7000] [send] via NET/Socket/1 gpua027:572261:572320 [3] NCCL INFO Channel 01/0 : 15[c7000] -> 16[7000] [send] via NET/Socket/1 gpua027:572261:572320 [3] NCCL INFO Connected all rings gpua027:572261:572320 [3] NCCL INFO Channel 00/0 : 15[c7000] -> 14[85000] via P2P/IPC/read gpua027:572261:572320 [3] NCCL INFO Channel 01/0 : 15[c7000] -> 14[85000] via P2P/IPC/read gpua027:572261:572320 [3] NCCL INFO Connected all trees gpua027:572261:572320 [3] NCCL INFO threadThresholds 8/8/64 | 512/8/64 | 512 | 512 gpua027:572261:572320 [3] NCCL INFO 2 coll channels, 2 p2p channels, 2 p2p channels per peer gpua027:572261:572320 [3] NCCL INFO comm 0x561790bd85e0 rank 15 nranks 64 cudaDev 3 busId c7000 - Init COMPLETE gpua027:572260:572260 [2] NCCL INFO cudaDriverVersion 12020 gpua027:572260:572260 [2] NCCL INFO Bootstrap : Using eth1:172.28.23.27<0> gpua027:572260:572260 [2] NCCL INFO NET/Plugin : No plugin found (libnccl-net.so), using internal implementation gpua027:572260:572322 [2] NCCL INFO NET/IB : No device found. gpua027:572260:572322 [2] NCCL INFO NET/Socket : Using [0]eth1:172.28.23.27<0> [1]hsn0:141.142.145.27<0> gpua027:572260:572322 [2] NCCL INFO Using network Socket gpua027:572260:572322 [2] NCCL INFO Setting affinity for GPU 2 to ffff0000 gpua027:572260:572322 [2] NCCL INFO Trees [0] 15/-1/-1->14->13 [1] 15/-1/-1->14->13 gpua027:572260:572322 [2] NCCL INFO Channel 00/0 : 14[85000] -> 15[c7000] via P2P/IPC/read gpua027:572260:572322 [2] NCCL INFO Channel 01/0 : 14[85000] -> 15[c7000] via P2P/IPC/read gpua027:572260:572322 [2] NCCL INFO Connected all rings gpua027:572260:572322 [2] NCCL INFO Channel 00/0 : 14[85000] -> 13[46000] via P2P/IPC/read gpua048:2699224:2699224 [1] NCCL INFO cudaDriverVersion 12020 gpua048:2699224:2699224 [1] NCCL INFO Bootstrap : Using eth1:172.28.23.48<0> gpua048:2699224:2699224 [1] NCCL INFO NET/Plugin : No plugin found (libnccl-net.so), using internal implementation gpua048:2699224:2699286 [1] NCCL INFO NET/IB : No device found. gpua048:2699224:2699286 [1] NCCL INFO NET/Socket : Using [0]eth1:172.28.23.48<0> [1]hsn0:141.142.145.48<0> gpua048:2699224:2699286 [1] NCCL INFO Using network Socket gpua048:2699224:2699286 [1] NCCL INFO Setting affinity for GPU 1 to ffff,00000000 gpua048:2699224:2699286 [1] NCCL INFO Trees [0] 26/20/-1->25->24 [1] 26/-1/-1->25->24 gpua048:2699224:2699286 [1] NCCL INFO Channel 00/0 : 25[46000] -> 26[85000] via P2P/IPC/read gpua048:2699224:2699286 [1] NCCL INFO Channel 01/0 : 25[46000] -> 26[85000] via P2P/IPC/read gpua048:2699224:2699286 [1] NCCL INFO Connected all rings gpua048:2699224:2699286 [1] NCCL INFO Channel 00/0 : 20[7000] -> 25[46000] [receive] via NET/Socket/1 gpua068:455944:455944 [1] NCCL INFO cudaDriverVersion 12020 gpua068:455944:455944 [1] NCCL INFO Bootstrap : Using eth1:172.28.23.68<0> gpua068:455944:455944 [1] NCCL INFO NET/Plugin : No plugin found (libnccl-net.so), using internal implementation gpua068:455944:456010 [1] NCCL INFO NET/IB : No device found. gpua068:455944:456010 [1] NCCL INFO NET/Socket : Using [0]eth1:172.28.23.68<0> [1]hsn0:141.142.145.68<0> gpua068:455944:456010 [1] NCCL INFO Using network Socket gpua068:455944:456010 [1] NCCL INFO Setting affinity for GPU 1 to ffff,00000000 gpua068:455944:456010 [1] NCCL INFO Trees [0] 38/-1/-1->37->36 [1] 38/40/-1->37->36 gpua068:455944:456010 [1] NCCL INFO Channel 00/0 : 37[46000] -> 38[85000] via P2P/IPC/read gpua068:455944:456010 [1] NCCL INFO Channel 01/0 : 37[46000] -> 38[85000] via P2P/IPC/read gpua068:455944:456010 [1] NCCL INFO Connected all rings gpua068:455944:456010 [1] NCCL INFO Channel 01/0 : 37[46000] -> 40[7000] [send] via NET/Socket/1 gpua075:366300:366300 [0] NCCL INFO cudaDriverVersion 12020 gpua075:366300:366300 [0] NCCL INFO Bootstrap : Using eth1:172.28.23.75<0> gpua075:366300:366300 [0] NCCL INFO NET/Plugin : No plugin found (libnccl-net.so), using internal implementation gpua075:366300:366361 [0] NCCL INFO NET/IB : No device found. gpua075:366300:366361 [0] NCCL INFO NET/Socket : Using [0]eth1:172.28.23.75<0> [1]hsn0:141.142.145.75<0> gpua075:366300:366361 [0] NCCL INFO Using network Socket gpua075:366300:366361 [0] NCCL INFO Setting affinity for GPU 0 to ffff0000,00000000 gpua075:366300:366361 [0] NCCL INFO Trees [0] 41/44/-1->40->49 [1] 41/-1/-1->40->37 gpua075:366300:366361 [0] NCCL INFO Channel 00/0 : 39[c7000] -> 40[7000] [receive] via NET/Socket/1 gpua075:366300:366361 [0] NCCL INFO Channel 01/0 : 39[c7000] -> 40[7000] [receive] via NET/Socket/1 gpua075:366300:366361 [0] NCCL INFO Channel 00/0 : 40[7000] -> 41[46000] via P2P/IPC/read gpua075:366300:366361 [0] NCCL INFO Channel 01/0 : 40[7000] -> 41[46000] via P2P/IPC/read gpua079:3449379:3449379 [1] NCCL INFO cudaDriverVersion 12020 gpua079:3449379:3449379 [1] NCCL INFO Bootstrap : Using eth1:172.28.23.79<0> gpua079:3449379:3449379 [1] NCCL INFO NET/Plugin : No plugin found (libnccl-net.so), using internal implementation gpua079:3449379:3449441 [1] NCCL INFO NET/IB : No device found. gpua079:3449379:3449441 [1] NCCL INFO NET/Socket : Using [0]eth1:172.28.23.79<0> [1]hsn0:141.142.145.79<0> gpua079:3449379:3449441 [1] NCCL INFO Using network Socket gpua079:3449379:3449441 [1] NCCL INFO Setting affinity for GPU 1 to ffff,00000000 gpua079:3449379:3449441 [1] NCCL INFO Trees [0] 54/-1/-1->53->52 [1] 54/56/-1->53->52 gpua079:3449379:3449441 [1] NCCL INFO Channel 00/0 : 53[46000] -> 54[85000] via P2P/IPC/read gpua079:3449379:3449441 [1] NCCL INFO Channel 01/0 : 53[46000] -> 54[85000] via P2P/IPC/read gpua079:3449379:3449441 [1] NCCL INFO Connected all rings gpua079:3449379:3449441 [1] NCCL INFO Channel 01/0 : 53[46000] -> 56[7000] [send] via NET/Socket/1 gpua027:572260:572322 [2] NCCL INFO Channel 01/0 : 14[85000] -> 13[46000] via P2P/IPC/read gpua027:572260:572322 [2] NCCL INFO Connected all trees gpua027:572260:572322 [2] NCCL INFO threadThresholds 8/8/64 | 512/8/64 | 512 | 512 gpua027:572260:572322 [2] NCCL INFO 2 coll channels, 2 p2p channels, 2 p2p channels per peer gpua027:572260:572322 [2] NCCL INFO comm 0x558afc512cd0 rank 14 nranks 64 cudaDev 2 busId 85000 - Init COMPLETE gpua048:2699224:2699286 [1] NCCL INFO Channel 00/0 : 25[46000] -> 20[7000] [send] via NET/Socket/1 gpua048:2699224:2699286 [1] NCCL INFO Channel 00/0 : 25[46000] -> 24[7000] via P2P/IPC/read gpua048:2699224:2699286 [1] NCCL INFO Channel 01/0 : 25[46000] -> 24[7000] via P2P/IPC/read gpua048:2699224:2699286 [1] NCCL INFO Connected all trees gpua048:2699224:2699286 [1] NCCL INFO threadThresholds 8/8/64 | 512/8/64 | 512 | 512 gpua048:2699224:2699286 [1] NCCL INFO 2 coll channels, 2 p2p channels, 2 p2p channels per peer gpua048:2699224:2699286 [1] NCCL INFO comm 0x5592c018a290 rank 25 nranks 64 cudaDev 1 busId 46000 - Init COMPLETE gpua068:455944:456010 [1] NCCL INFO Channel 01/0 : 40[7000] -> 37[46000] [receive] via NET/Socket/1 gpua068:455944:456010 [1] NCCL INFO Channel 00/0 : 37[46000] -> 36[7000] via P2P/IPC/read gpua068:455944:456010 [1] NCCL INFO Channel 01/0 : 37[46000] -> 36[7000] via P2P/IPC/read gpua068:455944:456010 [1] NCCL INFO Connected all trees gpua068:455944:456010 [1] NCCL INFO threadThresholds 8/8/64 | 512/8/64 | 512 | 512 gpua068:455944:456010 [1] NCCL INFO 2 coll channels, 2 p2p channels, 2 p2p channels per peer gpua068:455944:456010 [1] NCCL INFO comm 0x561cf3cfe900 rank 37 nranks 64 cudaDev 1 busId 46000 - Init COMPLETE gpua075:366300:366361 [0] NCCL INFO Connected all rings gpua075:366300:366361 [0] NCCL INFO Channel 01/0 : 37[46000] -> 40[7000] [receive] via NET/Socket/1 gpua075:366300:366361 [0] NCCL INFO Channel 00/0 : 40[7000] -> 44[7000] [send] via NET/Socket/1 gpua075:366300:366361 [0] NCCL INFO Channel 00/0 : 40[7000] -> 49[46000] [send] via NET/Socket/1 gpua075:366300:366361 [0] NCCL INFO Channel 00/0 : 49[46000] -> 40[7000] [receive] via NET/Socket/1 gpua075:366300:366361 [0] NCCL INFO Channel 00/0 : 44[7000] -> 40[7000] [receive] via NET/Socket/1 gpua075:366300:366361 [0] NCCL INFO Channel 01/0 : 40[7000] -> 37[46000] [send] via NET/Socket/1 gpua075:366300:366361 [0] NCCL INFO Connected all trees gpua075:366300:366361 [0] NCCL INFO threadThresholds 8/8/64 | 512/8/64 | 512 | 512 gpua075:366300:366361 [0] NCCL INFO 2 coll channels, 2 p2p channels, 2 p2p channels per peer gpua075:366300:366361 [0] NCCL INFO comm 0x564dd5adcb80 rank 40 nranks 64 cudaDev 0 busId 7000 - Init COMPLETE gpua079:3449379:3449441 [1] NCCL INFO Channel 01/0 : 56[7000] -> 53[46000] [receive] via NET/Socket/1 gpua079:3449379:3449441 [1] NCCL INFO Channel 00/0 : 53[46000] -> 52[7000] via P2P/IPC/read gpua079:3449379:3449441 [1] NCCL INFO Channel 01/0 : 53[46000] -> 52[7000] via P2P/IPC/read gpua079:3449379:3449441 [1] NCCL INFO Connected all trees gpua079:3449379:3449441 [1] NCCL INFO threadThresholds 8/8/64 | 512/8/64 | 512 | 512 gpua079:3449379:3449441 [1] NCCL INFO 2 coll channels, 2 p2p channels, 2 p2p channels per peer gpua079:3449379:3449441 [1] NCCL INFO comm 0x5569dccafec0 rank 53 nranks 64 cudaDev 1 busId 46000 - Init COMPLETE gpua048:2699223:2699223 [0] NCCL INFO cudaDriverVersion 12020 gpua048:2699223:2699223 [0] NCCL INFO Bootstrap : Using eth1:172.28.23.48<0> gpua048:2699223:2699223 [0] NCCL INFO NET/Plugin : No plugin found (libnccl-net.so), using internal implementation gpua048:2699223:2699284 [0] NCCL INFO NET/IB : No device found. gpua048:2699223:2699284 [0] NCCL INFO NET/Socket : Using [0]eth1:172.28.23.48<0> [1]hsn0:141.142.145.48<0> gpua048:2699223:2699284 [0] NCCL INFO Using network Socket gpua048:2699223:2699284 [0] NCCL INFO Setting affinity for GPU 0 to ffff0000,00000000 gpua048:2699223:2699284 [0] NCCL INFO Trees [0] 25/28/-1->24->16 [1] 25/-1/-1->24->21 gpua048:2699223:2699284 [0] NCCL INFO Channel 00/0 : 23[c7000] -> 24[7000] [receive] via NET/Socket/1 gpua048:2699223:2699284 [0] NCCL INFO Channel 01/0 : 23[c7000] -> 24[7000] [receive] via NET/Socket/1 gpua048:2699223:2699284 [0] NCCL INFO Channel 00/0 : 24[7000] -> 25[46000] via P2P/IPC/read gpua068:455945:455945 [2] NCCL INFO cudaDriverVersion 12020 gpua068:455945:455945 [2] NCCL INFO Bootstrap : Using eth1:172.28.23.68<0> gpua068:455945:455945 [2] NCCL INFO NET/Plugin : No plugin found (libnccl-net.so), using internal implementation gpua068:455945:456011 [2] NCCL INFO NET/IB : No device found. gpua068:455945:456011 [2] NCCL INFO NET/Socket : Using [0]eth1:172.28.23.68<0> [1]hsn0:141.142.145.68<0> gpua068:455945:456011 [2] NCCL INFO Using network Socket gpua068:455945:456011 [2] NCCL INFO Setting affinity for GPU 2 to ffff0000 gpua068:455945:456011 [2] NCCL INFO Trees [0] 39/-1/-1->38->37 [1] 39/-1/-1->38->37 gpua068:455945:456011 [2] NCCL INFO Channel 00/0 : 38[85000] -> 39[c7000] via P2P/IPC/read gpua068:455945:456011 [2] NCCL INFO Channel 01/0 : 38[85000] -> 39[c7000] via P2P/IPC/read gpua068:455945:456011 [2] NCCL INFO Connected all rings gpua068:455945:456011 [2] NCCL INFO Channel 00/0 : 38[85000] -> 37[46000] via P2P/IPC/read gpua075:366303:366303 [3] NCCL INFO cudaDriverVersion 12020 gpua075:366303:366303 [3] NCCL INFO Bootstrap : Using eth1:172.28.23.75<0> gpua075:366303:366303 [3] NCCL INFO NET/Plugin : No plugin found (libnccl-net.so), using internal implementation gpua075:366303:366362 [3] NCCL INFO NET/IB : No device found. gpua075:366303:366362 [3] NCCL INFO NET/Socket : Using [0]eth1:172.28.23.75<0> [1]hsn0:141.142.145.75<0> gpua075:366303:366362 [3] NCCL INFO Using network Socket gpua075:366303:366362 [3] NCCL INFO Setting affinity for GPU 3 to ffff gpua075:366303:366362 [3] NCCL INFO Trees [0] -1/-1/-1->43->42 [1] -1/-1/-1->43->42 gpua075:366303:366362 [3] NCCL INFO Channel 00/0 : 43[c7000] -> 44[7000] [send] via NET/Socket/1 gpua075:366303:366362 [3] NCCL INFO Channel 01/0 : 43[c7000] -> 44[7000] [send] via NET/Socket/1 gpua075:366303:366362 [3] NCCL INFO Connected all rings gpua075:366303:366362 [3] NCCL INFO Channel 00/0 : 43[c7000] -> 42[85000] via P2P/IPC/read gpua079:3449378:3449378 [0] NCCL INFO cudaDriverVersion 12020 gpua079:3449378:3449378 [0] NCCL INFO Bootstrap : Using eth1:172.28.23.79<0> gpua079:3449378:3449378 [0] NCCL INFO NET/Plugin : No plugin found (libnccl-net.so), using internal implementation gpua079:3449378:3449438 [0] NCCL INFO NET/IB : No device found. gpua079:3449378:3449438 [0] NCCL INFO NET/Socket : Using [0]eth1:172.28.23.79<0> [1]hsn0:141.142.145.79<0> gpua079:3449378:3449438 [0] NCCL INFO Using network Socket gpua079:3449378:3449438 [0] NCCL INFO Setting affinity for GPU 0 to ffff0000,00000000 gpua079:3449378:3449438 [0] NCCL INFO Trees [0] 53/-1/-1->52->57 [1] 53/48/-1->52->45 gpua079:3449378:3449438 [0] NCCL INFO Channel 00/0 : 51[c7000] -> 52[7000] [receive] via NET/Socket/1 gpua079:3449378:3449438 [0] NCCL INFO Channel 01/0 : 51[c7000] -> 52[7000] [receive] via NET/Socket/1 gpua079:3449378:3449438 [0] NCCL INFO Channel 00/0 : 52[7000] -> 53[46000] via P2P/IPC/read gpua027:572259:572259 [1] NCCL INFO cudaDriverVersion 12020 gpua027:572259:572259 [1] NCCL INFO Bootstrap : Using eth1:172.28.23.27<0> gpua027:572259:572259 [1] NCCL INFO NET/Plugin : No plugin found (libnccl-net.so), using internal implementation gpua027:572259:572323 [1] NCCL INFO NET/IB : No device found. gpua027:572259:572323 [1] NCCL INFO NET/Socket : Using [0]eth1:172.28.23.27<0> [1]hsn0:141.142.145.27<0> gpua027:572259:572323 [1] NCCL INFO Using network Socket gpua027:572259:572323 [1] NCCL INFO Setting affinity for GPU 1 to ffff,00000000 gpua027:572259:572323 [1] NCCL INFO Trees [0] 14/-1/-1->13->12 [1] 14/20/-1->13->12 gpua027:572259:572323 [1] NCCL INFO Channel 00/0 : 13[46000] -> 14[85000] via P2P/IPC/read gpua027:572259:572323 [1] NCCL INFO Channel 01/0 : 13[46000] -> 14[85000] via P2P/IPC/read gpua027:572259:572323 [1] NCCL INFO Connected all rings gpua027:572259:572323 [1] NCCL INFO Channel 01/0 : 13[46000] -> 20[7000] [send] via NET/Socket/1 gpua048:2699223:2699284 [0] NCCL INFO Channel 01/0 : 24[7000] -> 25[46000] via P2P/IPC/read gpua048:2699223:2699284 [0] NCCL INFO Connected all rings gpua048:2699223:2699284 [0] NCCL INFO Channel 01/0 : 21[46000] -> 24[7000] [receive] via NET/Socket/1 gpua048:2699223:2699284 [0] NCCL INFO Channel 00/0 : 24[7000] -> 28[7000] [send] via NET/Socket/1 gpua048:2699223:2699284 [0] NCCL INFO Channel 00/0 : 16[7000] -> 24[7000] [receive] via NET/Socket/1 gpua048:2699223:2699284 [0] NCCL INFO Channel 00/0 : 24[7000] -> 16[7000] [send] via NET/Socket/1 gpua048:2699223:2699284 [0] NCCL INFO Channel 00/0 : 28[7000] -> 24[7000] [receive] via NET/Socket/1 gpua048:2699223:2699284 [0] NCCL INFO Channel 01/0 : 24[7000] -> 21[46000] [send] via NET/Socket/1 gpua048:2699223:2699284 [0] NCCL INFO Connected all trees gpua048:2699223:2699284 [0] NCCL INFO threadThresholds 8/8/64 | 512/8/64 | 512 | 512 gpua048:2699223:2699284 [0] NCCL INFO 2 coll channels, 2 p2p channels, 2 p2p channels per peer gpua068:455945:456011 [2] NCCL INFO Channel 01/0 : 38[85000] -> 37[46000] via P2P/IPC/read gpua068:455945:456011 [2] NCCL INFO Connected all trees gpua068:455945:456011 [2] NCCL INFO threadThresholds 8/8/64 | 512/8/64 | 512 | 512 gpua068:455945:456011 [2] NCCL INFO 2 coll channels, 2 p2p channels, 2 p2p channels per peer gpua068:455945:456011 [2] NCCL INFO comm 0x55d9bc057230 rank 38 nranks 64 cudaDev 2 busId 85000 - Init COMPLETE gpua075:366303:366362 [3] NCCL INFO Channel 01/0 : 43[c7000] -> 42[85000] via P2P/IPC/read gpua075:366303:366362 [3] NCCL INFO Connected all trees gpua075:366303:366362 [3] NCCL INFO threadThresholds 8/8/64 | 512/8/64 | 512 | 512 gpua075:366303:366362 [3] NCCL INFO 2 coll channels, 2 p2p channels, 2 p2p channels per peer gpua075:366303:366362 [3] NCCL INFO comm 0x55e5d7c92fb0 rank 43 nranks 64 cudaDev 3 busId c7000 - Init COMPLETE gpua079:3449378:3449438 [0] NCCL INFO Channel 01/0 : 52[7000] -> 53[46000] via P2P/IPC/read gpua079:3449378:3449438 [0] NCCL INFO Connected all rings gpua079:3449378:3449438 [0] NCCL INFO Channel 01/0 : 48[7000] -> 52[7000] [receive] via NET/Socket/1 gpua079:3449378:3449438 [0] NCCL INFO Channel 00/0 : 52[7000] -> 57[46000] [send] via NET/Socket/1 gpua079:3449378:3449438 [0] NCCL INFO Channel 01/0 : 45[46000] -> 52[7000] [receive] via NET/Socket/1 gpua079:3449378:3449438 [0] NCCL INFO Channel 01/0 : 52[7000] -> 45[46000] [send] via NET/Socket/1 gpua079:3449378:3449438 [0] NCCL INFO Channel 00/0 : 57[46000] -> 52[7000] [receive] via NET/Socket/1 gpua079:3449378:3449438 [0] NCCL INFO Channel 01/0 : 52[7000] -> 48[7000] [send] via NET/Socket/1 gpua079:3449378:3449438 [0] NCCL INFO Connected all trees gpua079:3449378:3449438 [0] NCCL INFO threadThresholds 8/8/64 | 512/8/64 | 512 | 512 gpua079:3449378:3449438 [0] NCCL INFO 2 coll channels, 2 p2p channels, 2 p2p channels per peer gpua027:572259:572323 [1] NCCL INFO Channel 01/0 : 20[7000] -> 13[46000] [receive] via NET/Socket/1 gpua027:572259:572323 [1] NCCL INFO Channel 00/0 : 13[46000] -> 12[7000] via P2P/IPC/read gpua027:572259:572323 [1] NCCL INFO Channel 01/0 : 13[46000] -> 12[7000] via P2P/IPC/read gpua027:572259:572323 [1] NCCL INFO Connected all trees gpua027:572259:572323 [1] NCCL INFO threadThresholds 8/8/64 | 512/8/64 | 512 | 512 gpua027:572259:572323 [1] NCCL INFO 2 coll channels, 2 p2p channels, 2 p2p channels per peer gpua027:572259:572323 [1] NCCL INFO comm 0x55a18500cee0 rank 13 nranks 64 cudaDev 1 busId 46000 - Init COMPLETE gpua048:2699223:2699284 [0] NCCL INFO comm 0x5558cd445e90 rank 24 nranks 64 cudaDev 0 busId 7000 - Init COMPLETE gpua057:3639473:3639473 [3] NCCL INFO cudaDriverVersion 12020 gpua057:3639473:3639473 [3] NCCL INFO Bootstrap : Using eth1:172.28.23.57<0> gpua057:3639473:3639473 [3] NCCL INFO NET/Plugin : No plugin found (libnccl-net.so), using internal implementation gpua057:3639473:3639535 [3] NCCL INFO NET/IB : No device found. gpua057:3639473:3639535 [3] NCCL INFO NET/Socket : Using [0]eth1:172.28.23.57<0> [1]hsn0:141.142.145.57<0> gpua057:3639473:3639535 [3] NCCL INFO Using network Socket gpua057:3639473:3639535 [3] NCCL INFO Setting affinity for GPU 3 to ffff gpua057:3639473:3639535 [3] NCCL INFO Trees [0] -1/-1/-1->35->34 [1] -1/-1/-1->35->34 gpua057:3639473:3639535 [3] NCCL INFO Channel 00/0 : 35[c7000] -> 36[7000] [send] via NET/Socket/1 gpua057:3639473:3639535 [3] NCCL INFO Channel 01/0 : 35[c7000] -> 36[7000] [send] via NET/Socket/1 gpua057:3639473:3639535 [3] NCCL INFO Connected all rings gpua057:3639473:3639535 [3] NCCL INFO Channel 00/0 : 35[c7000] -> 34[85000] via P2P/IPC/read gpua068:455946:455946 [3] NCCL INFO cudaDriverVersion 12020 gpua068:455946:455946 [3] NCCL INFO Bootstrap : Using eth1:172.28.23.68<0> gpua068:455946:455946 [3] NCCL INFO NET/Plugin : No plugin found (libnccl-net.so), using internal implementation gpua068:455946:456009 [3] NCCL INFO NET/IB : No device found. gpua068:455946:456009 [3] NCCL INFO NET/Socket : Using [0]eth1:172.28.23.68<0> [1]hsn0:141.142.145.68<0> gpua068:455946:456009 [3] NCCL INFO Using network Socket gpua068:455946:456009 [3] NCCL INFO Setting affinity for GPU 3 to ffff gpua068:455946:456009 [3] NCCL INFO Trees [0] -1/-1/-1->39->38 [1] -1/-1/-1->39->38 gpua068:455946:456009 [3] NCCL INFO Channel 00/0 : 39[c7000] -> 40[7000] [send] via NET/Socket/1 gpua068:455946:456009 [3] NCCL INFO Channel 01/0 : 39[c7000] -> 40[7000] [send] via NET/Socket/1 gpua068:455946:456009 [3] NCCL INFO Connected all rings gpua068:455946:456009 [3] NCCL INFO Channel 00/0 : 39[c7000] -> 38[85000] via P2P/IPC/read gpua075:366302:366302 [2] NCCL INFO cudaDriverVersion 12020 gpua075:366302:366302 [2] NCCL INFO Bootstrap : Using eth1:172.28.23.75<0> gpua075:366302:366302 [2] NCCL INFO NET/Plugin : No plugin found (libnccl-net.so), using internal implementation gpua075:366302:366364 [2] NCCL INFO NET/IB : No device found. gpua075:366302:366364 [2] NCCL INFO NET/Socket : Using [0]eth1:172.28.23.75<0> [1]hsn0:141.142.145.75<0> gpua075:366302:366364 [2] NCCL INFO Using network Socket gpua075:366302:366364 [2] NCCL INFO Setting affinity for GPU 2 to ffff0000 gpua075:366302:366364 [2] NCCL INFO Trees [0] 43/-1/-1->42->41 [1] 43/-1/-1->42->41 gpua075:366302:366364 [2] NCCL INFO Channel 00/0 : 42[85000] -> 43[c7000] via P2P/IPC/read gpua075:366302:366364 [2] NCCL INFO Channel 01/0 : 42[85000] -> 43[c7000] via P2P/IPC/read gpua075:366302:366364 [2] NCCL INFO Connected all rings gpua075:366302:366364 [2] NCCL INFO Channel 00/0 : 42[85000] -> 41[46000] via P2P/IPC/read gpua079:3449378:3449438 [0] NCCL INFO comm 0x5566b52294e0 rank 52 nranks 64 cudaDev 0 busId 7000 - Init COMPLETE gpua027:572258:572258 [0] NCCL INFO cudaDriverVersion 12020 gpua027:572258:572258 [0] NCCL INFO Bootstrap : Using eth1:172.28.23.27<0> gpua027:572258:572258 [0] NCCL INFO NET/Plugin : No plugin found (libnccl-net.so), using internal implementation gpua027:572258:572321 [0] NCCL INFO NET/IB : No device found. gpua027:572258:572321 [0] NCCL INFO NET/Socket : Using [0]eth1:172.28.23.27<0> [1]hsn0:141.142.145.27<0> gpua027:572258:572321 [0] NCCL INFO Using network Socket gpua027:572258:572321 [0] NCCL INFO Setting affinity for GPU 0 to ffff0000,00000000 gpua027:572258:572321 [0] NCCL INFO Trees [0] 13/-1/-1->12->8 [1] 13/4/-1->12->28 gpua027:572258:572321 [0] NCCL INFO Channel 00/0 : 11[c7000] -> 12[7000] [receive] via NET/Socket/1 gpua027:572258:572321 [0] NCCL INFO Channel 01/0 : 11[c7000] -> 12[7000] [receive] via NET/Socket/1 gpua027:572258:572321 [0] NCCL INFO Channel 00/0 : 12[7000] -> 13[46000] via P2P/IPC/read gpua027:572258:572321 [0] NCCL INFO Channel 01/0 : 12[7000] -> 13[46000] via P2P/IPC/read gpua048:2699226:2699226 [3] NCCL INFO cudaDriverVersion 12020 gpua048:2699226:2699226 [3] NCCL INFO Bootstrap : Using eth1:172.28.23.48<0> gpua048:2699226:2699226 [3] NCCL INFO NET/Plugin : No plugin found (libnccl-net.so), using internal implementation gpua048:2699226:2699285 [3] NCCL INFO NET/IB : No device found. gpua048:2699226:2699285 [3] NCCL INFO NET/Socket : Using [0]eth1:172.28.23.48<0> [1]hsn0:141.142.145.48<0> gpua048:2699226:2699285 [3] NCCL INFO Using network Socket gpua048:2699226:2699285 [3] NCCL INFO Setting affinity for GPU 3 to ffff gpua048:2699226:2699285 [3] NCCL INFO Trees [0] -1/-1/-1->27->26 [1] -1/-1/-1->27->26 gpua048:2699226:2699285 [3] NCCL INFO Channel 00/0 : 27[c7000] -> 28[7000] [send] via NET/Socket/1 gpua048:2699226:2699285 [3] NCCL INFO Channel 01/0 : 27[c7000] -> 28[7000] [send] via NET/Socket/1 gpua048:2699226:2699285 [3] NCCL INFO Connected all rings gpua048:2699226:2699285 [3] NCCL INFO Channel 00/0 : 27[c7000] -> 26[85000] via P2P/IPC/read gpua057:3639473:3639535 [3] NCCL INFO Channel 01/0 : 35[c7000] -> 34[85000] via P2P/IPC/read gpua057:3639473:3639535 [3] NCCL INFO Connected all trees gpua057:3639473:3639535 [3] NCCL INFO threadThresholds 8/8/64 | 512/8/64 | 512 | 512 gpua057:3639473:3639535 [3] NCCL INFO 2 coll channels, 2 p2p channels, 2 p2p channels per peer gpua057:3639473:3639535 [3] NCCL INFO comm 0x55f2e0c3ff20 rank 35 nranks 64 cudaDev 3 busId c7000 - Init COMPLETE gpua068:455946:456009 [3] NCCL INFO Channel 01/0 : 39[c7000] -> 38[85000] via P2P/IPC/read gpua068:455946:456009 [3] NCCL INFO Connected all trees gpua068:455946:456009 [3] NCCL INFO threadThresholds 8/8/64 | 512/8/64 | 512 | 512 gpua068:455946:456009 [3] NCCL INFO 2 coll channels, 2 p2p channels, 2 p2p channels per peer gpua068:455946:456009 [3] NCCL INFO comm 0x55795fdbdca0 rank 39 nranks 64 cudaDev 3 busId c7000 - Init COMPLETE gpua075:366302:366364 [2] NCCL INFO Channel 01/0 : 42[85000] -> 41[46000] via P2P/IPC/read gpua075:366302:366364 [2] NCCL INFO Connected all trees gpua075:366302:366364 [2] NCCL INFO threadThresholds 8/8/64 | 512/8/64 | 512 | 512 gpua075:366302:366364 [2] NCCL INFO 2 coll channels, 2 p2p channels, 2 p2p channels per peer gpua075:366302:366364 [2] NCCL INFO comm 0x558917ed6310 rank 42 nranks 64 cudaDev 2 busId 85000 - Init COMPLETE gpua027:572258:572321 [0] NCCL INFO Connected all rings gpua027:572258:572321 [0] NCCL INFO Channel 00/0 : 8[7000] -> 12[7000] [receive] via NET/Socket/1 gpua027:572258:572321 [0] NCCL INFO Channel 01/0 : 4[7000] -> 12[7000] [receive] via NET/Socket/1 gpua027:572258:572321 [0] NCCL INFO Channel 01/0 : 12[7000] -> 28[7000] [send] via NET/Socket/1 gpua027:572258:572321 [0] NCCL INFO Channel 01/0 : 28[7000] -> 12[7000] [receive] via NET/Socket/1 gpua027:572258:572321 [0] NCCL INFO Channel 01/0 : 12[7000] -> 4[7000] [send] via NET/Socket/1 gpua027:572258:572321 [0] NCCL INFO Channel 00/0 : 12[7000] -> 8[7000] [send] via NET/Socket/1 gpua027:572258:572321 [0] NCCL INFO Connected all trees gpua027:572258:572321 [0] NCCL INFO threadThresholds 8/8/64 | 512/8/64 | 512 | 512 gpua027:572258:572321 [0] NCCL INFO 2 coll channels, 2 p2p channels, 2 p2p channels per peer gpua027:572258:572321 [0] NCCL INFO comm 0x5561666df910 rank 12 nranks 64 cudaDev 0 busId 7000 - Init COMPLETE gpua036:3787651:3787651 [3] NCCL INFO cudaDriverVersion 12020 gpua036:3787651:3787651 [3] NCCL INFO Bootstrap : Using eth1:172.28.23.36<0> gpua036:3787651:3787651 [3] NCCL INFO NET/Plugin : No plugin found (libnccl-net.so), using internal implementation gpua036:3787651:3787719 [3] NCCL INFO NET/IB : No device found. gpua036:3787651:3787719 [3] NCCL INFO NET/Socket : Using [0]eth1:172.28.23.36<0> [1]hsn0:141.142.145.36<0> [2]eth0:fe80::625:6d73:10ee:58bc%eth0<0> gpua036:3787651:3787719 [3] NCCL INFO Using network Socket gpua036:3787651:3787719 [3] NCCL INFO Setting affinity for GPU 3 to ffff gpua036:3787651:3787719 [3] NCCL INFO Trees [0] -1/-1/-1->23->22 [1] -1/-1/-1->23->22 gpua036:3787651:3787719 [3] NCCL INFO Channel 00/0 : 23[c7000] -> 24[7000] [send] via NET/Socket/1 gpua036:3787651:3787719 [3] NCCL INFO Channel 01/0 : 23[c7000] -> 24[7000] [send] via NET/Socket/1 gpua036:3787651:3787719 [3] NCCL INFO Connected all rings gpua048:2699226:2699285 [3] NCCL INFO Channel 01/0 : 27[c7000] -> 26[85000] via P2P/IPC/read gpua048:2699226:2699285 [3] NCCL INFO Connected all trees gpua048:2699226:2699285 [3] NCCL INFO threadThresholds 8/8/64 | 512/8/64 | 512 | 512 gpua048:2699226:2699285 [3] NCCL INFO 2 coll channels, 2 p2p channels, 2 p2p channels per peer gpua048:2699226:2699285 [3] NCCL INFO comm 0x556ca54814f0 rank 27 nranks 64 cudaDev 3 busId c7000 - Init COMPLETE gpua068:455943:455943 [0] NCCL INFO cudaDriverVersion 12020 gpua068:455943:455943 [0] NCCL INFO Bootstrap : Using eth1:172.28.23.68<0> gpua068:455943:455943 [0] NCCL INFO NET/Plugin : No plugin found (libnccl-net.so), using internal implementation gpua068:455943:456012 [0] NCCL INFO NET/IB : No device found. gpua068:455943:456012 [0] NCCL INFO NET/Socket : Using [0]eth1:172.28.23.68<0> [1]hsn0:141.142.145.68<0> gpua068:455943:456012 [0] NCCL INFO Using network Socket gpua068:455943:456012 [0] NCCL INFO Setting affinity for GPU 0 to ffff0000,00000000 gpua068:455943:456012 [0] NCCL INFO Trees [0] 37/-1/-1->36->41 [1] 37/32/-1->36->44 gpua068:455943:456012 [0] NCCL INFO Channel 00/0 : 35[c7000] -> 36[7000] [receive] via NET/Socket/1 gpua068:455943:456012 [0] NCCL INFO Channel 01/0 : 35[c7000] -> 36[7000] [receive] via NET/Socket/1 gpua068:455943:456012 [0] NCCL INFO Channel 00/0 : 36[7000] -> 37[46000] via P2P/IPC/read gpua068:455943:456012 [0] NCCL INFO Channel 01/0 : 36[7000] -> 37[46000] via P2P/IPC/read gpua096:13797:13797 [3] NCCL INFO cudaDriverVersion 12020 gpua096:13797:13797 [3] NCCL INFO Bootstrap : Using eth1:172.28.23.96<0> gpua096:13797:13797 [3] NCCL INFO NET/Plugin : No plugin found (libnccl-net.so), using internal implementation gpua096:13797:13857 [3] NCCL INFO NET/IB : No device found. gpua096:13797:13857 [3] NCCL INFO NET/Socket : Using [0]eth1:172.28.23.96<0> [1]hsn0:141.142.145.96<0> gpua096:13797:13857 [3] NCCL INFO Using network Socket gpua096:13797:13857 [3] NCCL INFO Setting affinity for GPU 3 to ffff gpua096:13797:13857 [3] NCCL INFO Trees [0] -1/-1/-1->63->62 [1] -1/-1/-1->63->62 gpua096:13797:13857 [3] NCCL INFO Channel 00/0 : 63[c7000] -> 0[7000] [send] via NET/Socket/1 gpua096:13797:13857 [3] NCCL INFO Channel 01/0 : 63[c7000] -> 0[7000] [send] via NET/Socket/1 gpua096:13797:13857 [3] NCCL INFO Connected all rings gpua096:13797:13857 [3] NCCL INFO Channel 00/0 : 63[c7000] -> 62[85000] via P2P/IPC/read gpua036:3787651:3787719 [3] NCCL INFO Channel 00/0 : 23[c7000] -> 22[85000] via P2P/IPC/read gpua036:3787651:3787719 [3] NCCL INFO Channel 01/0 : 23[c7000] -> 22[85000] via P2P/IPC/read gpua036:3787651:3787719 [3] NCCL INFO Connected all trees gpua036:3787651:3787719 [3] NCCL INFO threadThresholds 8/8/64 | 512/8/64 | 512 | 512 gpua036:3787651:3787719 [3] NCCL INFO 2 coll channels, 2 p2p channels, 2 p2p channels per peer gpua036:3787651:3787719 [3] NCCL INFO comm 0x5566fbe59050 rank 23 nranks 64 cudaDev 3 busId c7000 - Init COMPLETE gpua036:3787650:3787650 [2] NCCL INFO cudaDriverVersion 12020 gpua036:3787650:3787650 [2] NCCL INFO Bootstrap : Using eth1:172.28.23.36<0> gpua036:3787650:3787650 [2] NCCL INFO NET/Plugin : No plugin found (libnccl-net.so), using internal implementation gpua036:3787650:3787718 [2] NCCL INFO NET/IB : No device found. gpua036:3787650:3787718 [2] NCCL INFO NET/Socket : Using [0]eth1:172.28.23.36<0> [1]hsn0:141.142.145.36<0> [2]eth0:fe80::625:6d73:10ee:58bc%eth0<0> gpua068:455943:456012 [0] NCCL INFO Connected all rings gpua068:455943:456012 [0] NCCL INFO Channel 01/0 : 32[7000] -> 36[7000] [receive] via NET/Socket/1 gpua068:455943:456012 [0] NCCL INFO Channel 00/0 : 36[7000] -> 41[46000] [send] via NET/Socket/1 gpua068:455943:456012 [0] NCCL INFO Channel 01/0 : 36[7000] -> 44[7000] [send] via NET/Socket/1 gpua068:455943:456012 [0] NCCL INFO Channel 01/0 : 44[7000] -> 36[7000] [receive] via NET/Socket/1 gpua068:455943:456012 [0] NCCL INFO Channel 00/0 : 41[46000] -> 36[7000] [receive] via NET/Socket/1 gpua068:455943:456012 [0] NCCL INFO Channel 01/0 : 36[7000] -> 32[7000] [send] via NET/Socket/1 gpua068:455943:456012 [0] NCCL INFO Connected all trees gpua068:455943:456012 [0] NCCL INFO threadThresholds 8/8/64 | 512/8/64 | 512 | 512 gpua068:455943:456012 [0] NCCL INFO 2 coll channels, 2 p2p channels, 2 p2p channels per peer gpua068:455943:456012 [0] NCCL INFO comm 0x564b69d1d3a0 rank 36 nranks 64 cudaDev 0 busId 7000 - Init COMPLETE gpua096:13797:13857 [3] NCCL INFO Channel 01/0 : 63[c7000] -> 62[85000] via P2P/IPC/read gpua096:13797:13857 [3] NCCL INFO Connected all trees gpua096:13797:13857 [3] NCCL INFO threadThresholds 8/8/64 | 512/8/64 | 512 | 512 gpua096:13797:13857 [3] NCCL INFO 2 coll channels, 2 p2p channels, 2 p2p channels per peer gpua096:13797:13857 [3] NCCL INFO comm 0x55b6aa46dcc0 rank 63 nranks 64 cudaDev 3 busId c7000 - Init COMPLETE gpua036:3787650:3787718 [2] NCCL INFO Using network Socket gpua036:3787650:3787718 [2] NCCL INFO Setting affinity for GPU 2 to ffff0000 gpua036:3787650:3787718 [2] NCCL INFO Trees [0] 23/-1/-1->22->21 [1] 23/-1/-1->22->21 gpua036:3787650:3787718 [2] NCCL INFO Channel 00/0 : 22[85000] -> 23[c7000] via P2P/IPC/read gpua036:3787650:3787718 [2] NCCL INFO Channel 01/0 : 22[85000] -> 23[c7000] via P2P/IPC/read gpua036:3787650:3787718 [2] NCCL INFO Connected all rings gpua036:3787650:3787718 [2] NCCL INFO Channel 00/0 : 22[85000] -> 21[46000] via P2P/IPC/read gpua036:3787650:3787718 [2] NCCL INFO Channel 01/0 : 22[85000] -> 21[46000] via P2P/IPC/read gpua036:3787650:3787718 [2] NCCL INFO Connected all trees gpua036:3787650:3787718 [2] NCCL INFO threadThresholds 8/8/64 | 512/8/64 | 512 | 512 gpua036:3787650:3787718 [2] NCCL INFO 2 coll channels, 2 p2p channels, 2 p2p channels per peer gpua036:3787650:3787718 [2] NCCL INFO comm 0x5628ff999990 rank 22 nranks 64 cudaDev 2 busId 85000 - Init COMPLETE gpua079:3449381:3449381 [3] NCCL INFO cudaDriverVersion 12020 gpua079:3449381:3449381 [3] NCCL INFO Bootstrap : Using eth1:172.28.23.79<0> gpua079:3449381:3449381 [3] NCCL INFO NET/Plugin : No plugin found (libnccl-net.so), using internal implementation gpua079:3449381:3449439 [3] NCCL INFO NET/IB : No device found. gpua079:3449381:3449439 [3] NCCL INFO NET/Socket : Using [0]eth1:172.28.23.79<0> [1]hsn0:141.142.145.79<0> gpua079:3449381:3449439 [3] NCCL INFO Using network Socket gpua079:3449381:3449439 [3] NCCL INFO Setting affinity for GPU 3 to ffff gpua079:3449381:3449439 [3] NCCL INFO Trees [0] -1/-1/-1->55->54 [1] -1/-1/-1->55->54 gpua079:3449381:3449439 [3] NCCL INFO Channel 00/0 : 55[c7000] -> 56[7000] [send] via NET/Socket/1 gpua079:3449381:3449439 [3] NCCL INFO Channel 01/0 : 55[c7000] -> 56[7000] [send] via NET/Socket/1 gpua079:3449381:3449439 [3] NCCL INFO Connected all rings gpua079:3449381:3449439 [3] NCCL INFO Channel 00/0 : 55[c7000] -> 54[85000] via P2P/IPC/read gpua096:13794:13794 [0] NCCL INFO cudaDriverVersion 12020 gpua096:13794:13794 [0] NCCL INFO Bootstrap : Using eth1:172.28.23.96<0> gpua096:13794:13794 [0] NCCL INFO NET/Plugin : No plugin found (libnccl-net.so), using internal implementation gpua096:13794:13855 [0] NCCL INFO NET/IB : No device found. gpua096:13794:13855 [0] NCCL INFO NET/Socket : Using [0]eth1:172.28.23.96<0> [1]hsn0:141.142.145.96<0> gpua096:13794:13855 [0] NCCL INFO Using network Socket gpua096:13794:13855 [0] NCCL INFO Setting affinity for GPU 0 to ffff0000,00000000 gpua096:13794:13855 [0] NCCL INFO Trees [0] 61/-1/-1->60->56 [1] 61/28/-1->60->-1 gpua096:13794:13855 [0] NCCL INFO Channel 00/0 : 59[c7000] -> 60[7000] [receive] via NET/Socket/1 gpua096:13794:13855 [0] NCCL INFO Channel 01/0 : 59[c7000] -> 60[7000] [receive] via NET/Socket/1 gpua096:13794:13855 [0] NCCL INFO Channel 00/0 : 60[7000] -> 61[46000] via P2P/IPC/read gpua096:13794:13855 [0] NCCL INFO Channel 01/0 : 60[7000] -> 61[46000] via P2P/IPC/read gpua036:3787649:3787649 [1] NCCL INFO cudaDriverVersion 12020 gpua036:3787649:3787649 [1] NCCL INFO Bootstrap : Using eth1:172.28.23.36<0> gpua036:3787649:3787649 [1] NCCL INFO NET/Plugin : No plugin found (libnccl-net.so), using internal implementation gpua036:3787649:3787717 [1] NCCL INFO NET/IB : No device found. gpua036:3787649:3787717 [1] NCCL INFO NET/Socket : Using [0]eth1:172.28.23.36<0> [1]hsn0:141.142.145.36<0> [2]eth0:fe80::625:6d73:10ee:58bc%eth0<0> gpua036:3787649:3787717 [1] NCCL INFO Using network Socket gpua036:3787649:3787717 [1] NCCL INFO Setting affinity for GPU 1 to ffff,00000000 gpua036:3787649:3787717 [1] NCCL INFO Trees [0] 22/-1/-1->21->20 [1] 22/24/-1->21->20 gpua036:3787649:3787717 [1] NCCL INFO Channel 00/0 : 21[46000] -> 22[85000] via P2P/IPC/read gpua036:3787649:3787717 [1] NCCL INFO Channel 01/0 : 21[46000] -> 22[85000] via P2P/IPC/read gpua036:3787649:3787717 [1] NCCL INFO Connected all rings gpua057:3639470:3639470 [0] NCCL INFO cudaDriverVersion 12020 gpua057:3639470:3639470 [0] NCCL INFO Bootstrap : Using eth1:172.28.23.57<0> gpua057:3639470:3639470 [0] NCCL INFO NET/Plugin : No plugin found (libnccl-net.so), using internal implementation gpua057:3639470:3639536 [0] NCCL INFO NET/IB : No device found. gpua057:3639470:3639536 [0] NCCL INFO NET/Socket : Using [0]eth1:172.28.23.57<0> [1]hsn0:141.142.145.57<0> gpua057:3639470:3639536 [0] NCCL INFO Using network Socket gpua057:3639470:3639536 [0] NCCL INFO Setting affinity for GPU 0 to ffff0000,00000000 gpua057:3639470:3639536 [0] NCCL INFO Trees [0] 33/48/-1->32->0 [1] 33/-1/-1->32->36 gpua057:3639470:3639536 [0] NCCL INFO Channel 00/0 : 31[c7000] -> 32[7000] [receive] via NET/Socket/1 gpua057:3639470:3639536 [0] NCCL INFO Channel 01/0 : 31[c7000] -> 32[7000] [receive] via NET/Socket/1 gpua057:3639470:3639536 [0] NCCL INFO Channel 00/0 : 32[7000] -> 33[46000] via P2P/IPC/read gpua079:3449381:3449439 [3] NCCL INFO Channel 01/0 : 55[c7000] -> 54[85000] via P2P/IPC/read gpua079:3449381:3449439 [3] NCCL INFO Connected all trees gpua079:3449381:3449439 [3] NCCL INFO threadThresholds 8/8/64 | 512/8/64 | 512 | 512 gpua079:3449381:3449439 [3] NCCL INFO 2 coll channels, 2 p2p channels, 2 p2p channels per peer gpua079:3449381:3449439 [3] NCCL INFO comm 0x558bdf338fb0 rank 55 nranks 64 cudaDev 3 busId c7000 - Init COMPLETE gpua079:3449380:3449380 [2] NCCL INFO cudaDriverVersion 12020 gpua079:3449380:3449380 [2] NCCL INFO Bootstrap : Using eth1:172.28.23.79<0> gpua079:3449380:3449380 [2] NCCL INFO NET/Plugin : No plugin found (libnccl-net.so), using internal implementation gpua079:3449380:3449440 [2] NCCL INFO NET/IB : No device found. gpua079:3449380:3449440 [2] NCCL INFO NET/Socket : Using [0]eth1:172.28.23.79<0> [1]hsn0:141.142.145.79<0> gpua079:3449380:3449440 [2] NCCL INFO Using network Socket gpua079:3449380:3449440 [2] NCCL INFO Setting affinity for GPU 2 to ffff0000 gpua096:13794:13855 [0] NCCL INFO Connected all rings gpua096:13794:13855 [0] NCCL INFO Channel 00/0 : 56[7000] -> 60[7000] [receive] via NET/Socket/1 gpua096:13794:13855 [0] NCCL INFO Channel 01/0 : 28[7000] -> 60[7000] [receive] via NET/Socket/1 gpua096:13794:13855 [0] NCCL INFO Channel 01/0 : 60[7000] -> 28[7000] [send] via NET/Socket/1 gpua096:13794:13855 [0] NCCL INFO Channel 00/0 : 60[7000] -> 56[7000] [send] via NET/Socket/1 gpua096:13794:13855 [0] NCCL INFO Connected all trees gpua096:13794:13855 [0] NCCL INFO threadThresholds 8/8/64 | 512/8/64 | 512 | 512 gpua096:13794:13855 [0] NCCL INFO 2 coll channels, 2 p2p channels, 2 p2p channels per peer gpua096:13794:13855 [0] NCCL INFO comm 0x5571ee9865a0 rank 60 nranks 64 cudaDev 0 busId 7000 - Init COMPLETE gpua036:3787649:3787717 [1] NCCL INFO Channel 01/0 : 21[46000] -> 24[7000] [send] via NET/Socket/1 gpua036:3787649:3787717 [1] NCCL INFO Channel 01/0 : 24[7000] -> 21[46000] [receive] via NET/Socket/1 gpua036:3787649:3787717 [1] NCCL INFO Channel 00/0 : 21[46000] -> 20[7000] via P2P/IPC/read gpua036:3787649:3787717 [1] NCCL INFO Channel 01/0 : 21[46000] -> 20[7000] via P2P/IPC/read gpua036:3787649:3787717 [1] NCCL INFO Connected all trees gpua036:3787649:3787717 [1] NCCL INFO threadThresholds 8/8/64 | 512/8/64 | 512 | 512 gpua036:3787649:3787717 [1] NCCL INFO 2 coll channels, 2 p2p channels, 2 p2p channels per peer gpua036:3787649:3787717 [1] NCCL INFO comm 0x55b6f22694a0 rank 21 nranks 64 cudaDev 1 busId 46000 - Init COMPLETE gpua057:3639470:3639536 [0] NCCL INFO Channel 01/0 : 32[7000] -> 33[46000] via P2P/IPC/read gpua057:3639470:3639536 [0] NCCL INFO Connected all rings gpua057:3639470:3639536 [0] NCCL INFO Channel 01/0 : 32[7000] -> 36[7000] [send] via NET/Socket/1 gpua057:3639470:3639536 [0] NCCL INFO Channel 00/0 : 32[7000] -> 48[7000] [send] via NET/Socket/1 gpua057:3639470:3639536 [0] NCCL INFO Channel 00/0 : 0[7000] -> 32[7000] [receive] via NET/Socket/1 gpua057:3639470:3639536 [0] NCCL INFO Channel 00/0 : 32[7000] -> 0[7000] [send] via NET/Socket/1 gpua057:3639470:3639536 [0] NCCL INFO Channel 00/0 : 48[7000] -> 32[7000] [receive] via NET/Socket/1 gpua057:3639470:3639536 [0] NCCL INFO Channel 01/0 : 36[7000] -> 32[7000] [receive] via NET/Socket/1 gpua057:3639470:3639536 [0] NCCL INFO Connected all trees gpua057:3639470:3639536 [0] NCCL INFO threadThresholds 8/8/64 | 512/8/64 | 512 | 512 gpua057:3639470:3639536 [0] NCCL INFO 2 coll channels, 2 p2p channels, 2 p2p channels per peer gpua075:366301:366301 [1] NCCL INFO cudaDriverVersion 12020 gpua075:366301:366301 [1] NCCL INFO Bootstrap : Using eth1:172.28.23.75<0> gpua075:366301:366301 [1] NCCL INFO NET/Plugin : No plugin found (libnccl-net.so), using internal implementation gpua075:366301:366363 [1] NCCL INFO NET/IB : No device found. gpua075:366301:366363 [1] NCCL INFO NET/Socket : Using [0]eth1:172.28.23.75<0> [1]hsn0:141.142.145.75<0> gpua075:366301:366363 [1] NCCL INFO Using network Socket gpua075:366301:366363 [1] NCCL INFO Setting affinity for GPU 1 to ffff,00000000 gpua075:366301:366363 [1] NCCL INFO Trees [0] 42/36/-1->41->40 [1] 42/-1/-1->41->40 gpua075:366301:366363 [1] NCCL INFO Channel 00/0 : 41[46000] -> 42[85000] via P2P/IPC/read gpua075:366301:366363 [1] NCCL INFO Channel 01/0 : 41[46000] -> 42[85000] via P2P/IPC/read gpua075:366301:366363 [1] NCCL INFO Connected all rings gpua075:366301:366363 [1] NCCL INFO Channel 00/0 : 36[7000] -> 41[46000] [receive] via NET/Socket/1 gpua079:3449380:3449440 [2] NCCL INFO Trees [0] 55/-1/-1->54->53 [1] 55/-1/-1->54->53 gpua079:3449380:3449440 [2] NCCL INFO Channel 00/0 : 54[85000] -> 55[c7000] via P2P/IPC/read gpua079:3449380:3449440 [2] NCCL INFO Channel 01/0 : 54[85000] -> 55[c7000] via P2P/IPC/read gpua079:3449380:3449440 [2] NCCL INFO Connected all rings gpua079:3449380:3449440 [2] NCCL INFO Channel 00/0 : 54[85000] -> 53[46000] via P2P/IPC/read gpua079:3449380:3449440 [2] NCCL INFO Channel 01/0 : 54[85000] -> 53[46000] via P2P/IPC/read gpua079:3449380:3449440 [2] NCCL INFO Connected all trees gpua079:3449380:3449440 [2] NCCL INFO threadThresholds 8/8/64 | 512/8/64 | 512 | 512 gpua079:3449380:3449440 [2] NCCL INFO 2 coll channels, 2 p2p channels, 2 p2p channels per peer gpua079:3449380:3449440 [2] NCCL INFO comm 0x55a9659d6ca0 rank 54 nranks 64 cudaDev 2 busId 85000 - Init COMPLETE gpua036:3787648:3787648 [0] NCCL INFO cudaDriverVersion 12020 gpua036:3787648:3787648 [0] NCCL INFO Bootstrap : Using eth1:172.28.23.36<0> gpua036:3787648:3787648 [0] NCCL INFO NET/Plugin : No plugin found (libnccl-net.so), using internal implementation gpua036:3787648:3787720 [0] NCCL INFO NET/IB : No device found. gpua036:3787648:3787720 [0] NCCL INFO NET/Socket : Using [0]eth1:172.28.23.36<0> [1]hsn0:141.142.145.36<0> [2]eth0:fe80::625:6d73:10ee:58bc%eth0<0> gpua036:3787648:3787720 [0] NCCL INFO Using network Socket gpua036:3787648:3787720 [0] NCCL INFO Setting affinity for GPU 0 to ffff0000,00000000 gpua036:3787648:3787720 [0] NCCL INFO Trees [0] 21/-1/-1->20->25 [1] 21/16/-1->20->13 gpua036:3787648:3787720 [0] NCCL INFO Channel 00/0 : 19[c7000] -> 20[7000] [receive] via NET/Socket/1 gpua036:3787648:3787720 [0] NCCL INFO Channel 01/0 : 19[c7000] -> 20[7000] [receive] via NET/Socket/1 gpua036:3787648:3787720 [0] NCCL INFO Channel 00/0 : 20[7000] -> 21[46000] via P2P/IPC/read gpua057:3639470:3639536 [0] NCCL INFO comm 0x55cec14553d0 rank 32 nranks 64 cudaDev 0 busId 7000 - Init COMPLETE gpua075:366301:366363 [1] NCCL INFO Channel 00/0 : 41[46000] -> 36[7000] [send] via NET/Socket/1 gpua075:366301:366363 [1] NCCL INFO Channel 00/0 : 41[46000] -> 40[7000] via P2P/IPC/read gpua075:366301:366363 [1] NCCL INFO Channel 01/0 : 41[46000] -> 40[7000] via P2P/IPC/read gpua075:366301:366363 [1] NCCL INFO Connected all trees gpua075:366301:366363 [1] NCCL INFO threadThresholds 8/8/64 | 512/8/64 | 512 | 512 gpua075:366301:366363 [1] NCCL INFO 2 coll channels, 2 p2p channels, 2 p2p channels per peer gpua075:366301:366363 [1] NCCL INFO comm 0x564fb43f94d0 rank 41 nranks 64 cudaDev 1 busId 46000 - Init COMPLETE gpua036:3787648:3787720 [0] NCCL INFO Channel 01/0 : 20[7000] -> 21[46000] via P2P/IPC/read gpua036:3787648:3787720 [0] NCCL INFO Connected all rings gpua036:3787648:3787720 [0] NCCL INFO Channel 01/0 : 16[7000] -> 20[7000] [receive] via NET/Socket/1 gpua036:3787648:3787720 [0] NCCL INFO Channel 00/0 : 20[7000] -> 25[46000] [send] via NET/Socket/1 gpua036:3787648:3787720 [0] NCCL INFO Channel 01/0 : 13[46000] -> 20[7000] [receive] via NET/Socket/1 gpua036:3787648:3787720 [0] NCCL INFO Channel 01/0 : 20[7000] -> 13[46000] [send] via NET/Socket/1 gpua036:3787648:3787720 [0] NCCL INFO Channel 00/0 : 25[46000] -> 20[7000] [receive] via NET/Socket/1 gpua036:3787648:3787720 [0] NCCL INFO Channel 01/0 : 20[7000] -> 16[7000] [send] via NET/Socket/1 gpua036:3787648:3787720 [0] NCCL INFO Connected all trees gpua036:3787648:3787720 [0] NCCL INFO threadThresholds 8/8/64 | 512/8/64 | 512 | 512 gpua036:3787648:3787720 [0] NCCL INFO 2 coll channels, 2 p2p channels, 2 p2p channels per peer gpua057:3639472:3639472 [2] NCCL INFO cudaDriverVersion 12020 gpua057:3639472:3639472 [2] NCCL INFO Bootstrap : Using eth1:172.28.23.57<0> gpua057:3639472:3639472 [2] NCCL INFO NET/Plugin : No plugin found (libnccl-net.so), using internal implementation gpua057:3639472:3639534 [2] NCCL INFO NET/IB : No device found. gpua057:3639472:3639534 [2] NCCL INFO NET/Socket : Using [0]eth1:172.28.23.57<0> [1]hsn0:141.142.145.57<0> gpua057:3639472:3639534 [2] NCCL INFO Using network Socket gpua057:3639472:3639534 [2] NCCL INFO Setting affinity for GPU 2 to ffff0000 gpua057:3639472:3639534 [2] NCCL INFO Trees [0] 35/-1/-1->34->33 [1] 35/-1/-1->34->33 gpua057:3639472:3639534 [2] NCCL INFO Channel 00/0 : 34[85000] -> 35[c7000] via P2P/IPC/read gpua057:3639472:3639534 [2] NCCL INFO Channel 01/0 : 34[85000] -> 35[c7000] via P2P/IPC/read gpua057:3639472:3639534 [2] NCCL INFO Connected all rings gpua057:3639472:3639534 [2] NCCL INFO Channel 00/0 : 34[85000] -> 33[46000] via P2P/IPC/read gpua036:3787648:3787720 [0] NCCL INFO comm 0x55bf4a3590a0 rank 20 nranks 64 cudaDev 0 busId 7000 - Init COMPLETE gpua057:3639472:3639534 [2] NCCL INFO Channel 01/0 : 34[85000] -> 33[46000] via P2P/IPC/read gpua057:3639472:3639534 [2] NCCL INFO Connected all trees gpua057:3639472:3639534 [2] NCCL INFO threadThresholds 8/8/64 | 512/8/64 | 512 | 512 gpua057:3639472:3639534 [2] NCCL INFO 2 coll channels, 2 p2p channels, 2 p2p channels per peer gpua057:3639472:3639534 [2] NCCL INFO comm 0x56078b2b2a40 rank 34 nranks 64 cudaDev 2 busId 85000 - Init COMPLETE gpua057:3639471:3639471 [1] NCCL INFO cudaDriverVersion 12020 gpua057:3639471:3639471 [1] NCCL INFO Bootstrap : Using eth1:172.28.23.57<0> gpua057:3639471:3639471 [1] NCCL INFO NET/Plugin : No plugin found (libnccl-net.so), using internal implementation gpua057:3639471:3639537 [1] NCCL INFO NET/IB : No device found. gpua057:3639471:3639537 [1] NCCL INFO NET/Socket : Using [0]eth1:172.28.23.57<0> [1]hsn0:141.142.145.57<0> gpua057:3639471:3639537 [1] NCCL INFO Using network Socket gpua057:3639471:3639537 [1] NCCL INFO Setting affinity for GPU 1 to ffff,00000000 gpua057:3639471:3639537 [1] NCCL INFO Trees [0] 34/16/-1->33->32 [1] 34/-1/-1->33->32 gpua057:3639471:3639537 [1] NCCL INFO Channel 00/0 : 33[46000] -> 34[85000] via P2P/IPC/read gpua057:3639471:3639537 [1] NCCL INFO Channel 01/0 : 33[46000] -> 34[85000] via P2P/IPC/read gpua057:3639471:3639537 [1] NCCL INFO Connected all rings gpua057:3639471:3639537 [1] NCCL INFO Channel 00/0 : 16[7000] -> 33[46000] [receive] via NET/Socket/1 gpua057:3639471:3639537 [1] NCCL INFO Channel 00/0 : 33[46000] -> 16[7000] [send] via NET/Socket/1 gpua057:3639471:3639537 [1] NCCL INFO Channel 00/0 : 33[46000] -> 32[7000] via P2P/IPC/read gpua057:3639471:3639537 [1] NCCL INFO Channel 01/0 : 33[46000] -> 32[7000] via P2P/IPC/read gpua057:3639471:3639537 [1] NCCL INFO Connected all trees gpua057:3639471:3639537 [1] NCCL INFO threadThresholds 8/8/64 | 512/8/64 | 512 | 512 gpua057:3639471:3639537 [1] NCCL INFO 2 coll channels, 2 p2p channels, 2 p2p channels per peer gpua057:3639471:3639537 [1] NCCL INFO comm 0x56511946e0d0 rank 33 nranks 64 cudaDev 1 busId 46000 - Init COMPLETE gpua096:13796:13796 [2] NCCL INFO cudaDriverVersion 12020 gpua096:13796:13796 [2] NCCL INFO Bootstrap : Using eth1:172.28.23.96<0> gpua096:13796:13796 [2] NCCL INFO NET/Plugin : No plugin found (libnccl-net.so), using internal implementation gpua096:13796:13858 [2] NCCL INFO NET/IB : No device found. gpua096:13796:13858 [2] NCCL INFO NET/Socket : Using [0]eth1:172.28.23.96<0> [1]hsn0:141.142.145.96<0> gpua096:13796:13858 [2] NCCL INFO Using network Socket gpua096:13796:13858 [2] NCCL INFO Setting affinity for GPU 2 to ffff0000 gpua096:13796:13858 [2] NCCL INFO Trees [0] 63/-1/-1->62->61 [1] 63/-1/-1->62->61 gpua096:13796:13858 [2] NCCL INFO Channel 00/0 : 62[85000] -> 63[c7000] via P2P/IPC/read gpua096:13796:13858 [2] NCCL INFO Channel 01/0 : 62[85000] -> 63[c7000] via P2P/IPC/read gpua096:13796:13858 [2] NCCL INFO Connected all rings gpua096:13796:13858 [2] NCCL INFO Channel 00/0 : 62[85000] -> 61[46000] via P2P/IPC/read gpua096:13796:13858 [2] NCCL INFO Channel 01/0 : 62[85000] -> 61[46000] via P2P/IPC/read gpua096:13796:13858 [2] NCCL INFO Connected all trees gpua096:13796:13858 [2] NCCL INFO threadThresholds 8/8/64 | 512/8/64 | 512 | 512 gpua096:13796:13858 [2] NCCL INFO 2 coll channels, 2 p2p channels, 2 p2p channels per peer gpua096:13796:13858 [2] NCCL INFO comm 0x555cc6a27400 rank 62 nranks 64 cudaDev 2 busId 85000 - Init COMPLETE gpua096:13795:13795 [1] NCCL INFO cudaDriverVersion 12020 gpua096:13795:13795 [1] NCCL INFO Bootstrap : Using eth1:172.28.23.96<0> gpua096:13795:13795 [1] NCCL INFO NET/Plugin : No plugin found (libnccl-net.so), using internal implementation gpua096:13795:13856 [1] NCCL INFO NET/IB : No device found. gpua096:13795:13856 [1] NCCL INFO NET/Socket : Using [0]eth1:172.28.23.96<0> [1]hsn0:141.142.145.96<0> gpua096:13795:13856 [1] NCCL INFO Using network Socket gpua096:13795:13856 [1] NCCL INFO Setting affinity for GPU 1 to ffff,00000000 gpua096:13795:13856 [1] NCCL INFO Trees [0] 62/-1/-1->61->60 [1] 62/-1/-1->61->60 gpua096:13795:13856 [1] NCCL INFO Channel 00/0 : 61[46000] -> 62[85000] via P2P/IPC/read gpua096:13795:13856 [1] NCCL INFO Channel 01/0 : 61[46000] -> 62[85000] via P2P/IPC/read gpua096:13795:13856 [1] NCCL INFO Connected all rings gpua096:13795:13856 [1] NCCL INFO Channel 00/0 : 61[46000] -> 60[7000] via P2P/IPC/read gpua096:13795:13856 [1] NCCL INFO Channel 01/0 : 61[46000] -> 60[7000] via P2P/IPC/read gpua096:13795:13856 [1] NCCL INFO Connected all trees gpua096:13795:13856 [1] NCCL INFO threadThresholds 8/8/64 | 512/8/64 | 512 | 512 gpua096:13795:13856 [1] NCCL INFO 2 coll channels, 2 p2p channels, 2 p2p channels per peer gpua096:13795:13856 [1] NCCL INFO comm 0x55823d0de5e0 rank 61 nranks 64 cudaDev 1 busId 46000 - Init COMPLETE gpua078:834949:834949 [1] NCCL INFO cudaDriverVersion 12020 gpua078:834949:834949 [1] NCCL INFO Bootstrap : Using eth1:172.28.23.78<0> gpua078:834949:834949 [1] NCCL INFO NET/Plugin : No plugin found (libnccl-net.so), using internal implementation gpua078:834949:835008 [1] NCCL INFO NET/IB : No device found. gpua078:834949:835008 [1] NCCL INFO NET/Socket : Using [0]eth1:172.28.23.78<0> [1]hsn0:141.142.145.78<0> gpua078:834949:835008 [1] NCCL INFO Using network Socket gpua078:834949:835008 [1] NCCL INFO Setting affinity for GPU 1 to ffff,00000000 gpua078:834949:835008 [1] NCCL INFO Trees [0] 50/40/-1->49->48 [1] 50/-1/-1->49->48 gpua078:834949:835008 [1] NCCL INFO Channel 00/0 : 49[46000] -> 50[85000] via P2P/IPC/read gpua078:834949:835008 [1] NCCL INFO Channel 01/0 : 49[46000] -> 50[85000] via P2P/IPC/read gpua078:834949:835008 [1] NCCL INFO Connected all rings gpua078:834949:835008 [1] NCCL INFO Channel 00/0 : 40[7000] -> 49[46000] [receive] via NET/Socket/1 gpua078:834949:835008 [1] NCCL INFO Channel 00/0 : 49[46000] -> 40[7000] [send] via NET/Socket/1 gpua078:834949:835008 [1] NCCL INFO Channel 00/0 : 49[46000] -> 48[7000] via P2P/IPC/read gpua078:834949:835008 [1] NCCL INFO Channel 01/0 : 49[46000] -> 48[7000] via P2P/IPC/read gpua078:834949:835008 [1] NCCL INFO Connected all trees gpua078:834949:835008 [1] NCCL INFO threadThresholds 8/8/64 | 512/8/64 | 512 | 512 gpua078:834949:835008 [1] NCCL INFO 2 coll channels, 2 p2p channels, 2 p2p channels per peer gpua078:834949:835008 [1] NCCL INFO comm 0x561c49421120 rank 49 nranks 64 cudaDev 1 busId 46000 - Init COMPLETE gpua078:834951:834951 [3] NCCL INFO cudaDriverVersion 12020 gpua078:834951:834951 [3] NCCL INFO Bootstrap : Using eth1:172.28.23.78<0> gpua078:834951:834951 [3] NCCL INFO NET/Plugin : No plugin found (libnccl-net.so), using internal implementation gpua078:834951:835005 [3] NCCL INFO NET/IB : No device found. gpua078:834951:835005 [3] NCCL INFO NET/Socket : Using [0]eth1:172.28.23.78<0> [1]hsn0:141.142.145.78<0> gpua078:834951:835005 [3] NCCL INFO Using network Socket gpua078:834951:835005 [3] NCCL INFO Setting affinity for GPU 3 to ffff gpua078:834951:835005 [3] NCCL INFO Trees [0] -1/-1/-1->51->50 [1] -1/-1/-1->51->50 gpua078:834951:835005 [3] NCCL INFO Channel 00/0 : 51[c7000] -> 52[7000] [send] via NET/Socket/1 gpua078:834951:835005 [3] NCCL INFO Channel 01/0 : 51[c7000] -> 52[7000] [send] via NET/Socket/1 gpua078:834951:835005 [3] NCCL INFO Connected all rings gpua078:834951:835005 [3] NCCL INFO Channel 00/0 : 51[c7000] -> 50[85000] via P2P/IPC/read gpua078:834951:835005 [3] NCCL INFO Channel 01/0 : 51[c7000] -> 50[85000] via P2P/IPC/read gpua078:834951:835005 [3] NCCL INFO Connected all trees gpua078:834951:835005 [3] NCCL INFO threadThresholds 8/8/64 | 512/8/64 | 512 | 512 gpua078:834951:835005 [3] NCCL INFO 2 coll channels, 2 p2p channels, 2 p2p channels per peer gpua078:834951:835005 [3] NCCL INFO comm 0x559eef5a6fe0 rank 51 nranks 64 cudaDev 3 busId c7000 - Init COMPLETE gpua078:834950:834950 [2] NCCL INFO cudaDriverVersion 12020 gpua078:834950:834950 [2] NCCL INFO Bootstrap : Using eth1:172.28.23.78<0> gpua078:834950:834950 [2] NCCL INFO NET/Plugin : No plugin found (libnccl-net.so), using internal implementation gpua078:834950:835007 [2] NCCL INFO NET/IB : No device found. gpua078:834950:835007 [2] NCCL INFO NET/Socket : Using [0]eth1:172.28.23.78<0> [1]hsn0:141.142.145.78<0> gpua078:834950:835007 [2] NCCL INFO Using network Socket gpua078:834950:835007 [2] NCCL INFO Setting affinity for GPU 2 to ffff0000 gpua078:834950:835007 [2] NCCL INFO Trees [0] 51/-1/-1->50->49 [1] 51/-1/-1->50->49 gpua078:834950:835007 [2] NCCL INFO Channel 00/0 : 50[85000] -> 51[c7000] via P2P/IPC/read gpua078:834950:835007 [2] NCCL INFO Channel 01/0 : 50[85000] -> 51[c7000] via P2P/IPC/read gpua078:834950:835007 [2] NCCL INFO Connected all rings gpua078:834950:835007 [2] NCCL INFO Channel 00/0 : 50[85000] -> 49[46000] via P2P/IPC/read gpua078:834950:835007 [2] NCCL INFO Channel 01/0 : 50[85000] -> 49[46000] via P2P/IPC/read gpua078:834950:835007 [2] NCCL INFO Connected all trees gpua078:834950:835007 [2] NCCL INFO threadThresholds 8/8/64 | 512/8/64 | 512 | 512 gpua078:834950:835007 [2] NCCL INFO 2 coll channels, 2 p2p channels, 2 p2p channels per peer gpua078:834950:835007 [2] NCCL INFO comm 0x56361c8f2440 rank 50 nranks 64 cudaDev 2 busId 85000 - Init COMPLETE gpua077:3581986:3581986 [3] NCCL INFO cudaDriverVersion 12020 gpua077:3581986:3581986 [3] NCCL INFO Bootstrap : Using eth1:172.28.23.77<0> gpua077:3581986:3581986 [3] NCCL INFO NET/Plugin : No plugin found (libnccl-net.so), using internal implementation gpua077:3581986:3582053 [3] NCCL INFO NET/IB : No device found. gpua077:3581986:3582053 [3] NCCL INFO NET/Socket : Using [0]eth1:172.28.23.77<0> [1]hsn0:141.142.145.77<0> gpua077:3581986:3582053 [3] NCCL INFO Using network Socket gpua077:3581986:3582053 [3] NCCL INFO Setting affinity for GPU 3 to ffff gpua077:3581986:3582053 [3] NCCL INFO Trees [0] -1/-1/-1->47->46 [1] -1/-1/-1->47->46 gpua077:3581986:3582053 [3] NCCL INFO Channel 00/0 : 47[c7000] -> 48[7000] [send] via NET/Socket/1 gpua077:3581986:3582053 [3] NCCL INFO Channel 01/0 : 47[c7000] -> 48[7000] [send] via NET/Socket/1 gpua077:3581986:3582053 [3] NCCL INFO Connected all rings gpua077:3581986:3582053 [3] NCCL INFO Channel 00/0 : 47[c7000] -> 46[85000] via P2P/IPC/read gpua077:3581986:3582053 [3] NCCL INFO Channel 01/0 : 47[c7000] -> 46[85000] via P2P/IPC/read gpua077:3581986:3582053 [3] NCCL INFO Connected all trees gpua077:3581986:3582053 [3] NCCL INFO threadThresholds 8/8/64 | 512/8/64 | 512 | 512 gpua077:3581986:3582053 [3] NCCL INFO 2 coll channels, 2 p2p channels, 2 p2p channels per peer gpua077:3581986:3582053 [3] NCCL INFO comm 0x558806f8aa90 rank 47 nranks 64 cudaDev 3 busId c7000 - Init COMPLETE gpua077:3581985:3581985 [2] NCCL INFO cudaDriverVersion 12020 gpua077:3581985:3581985 [2] NCCL INFO Bootstrap : Using eth1:172.28.23.77<0> gpua077:3581985:3581985 [2] NCCL INFO NET/Plugin : No plugin found (libnccl-net.so), using internal implementation gpua077:3581985:3582054 [2] NCCL INFO NET/IB : No device found. gpua077:3581985:3582054 [2] NCCL INFO NET/Socket : Using [0]eth1:172.28.23.77<0> [1]hsn0:141.142.145.77<0> gpua077:3581985:3582054 [2] NCCL INFO Using network Socket gpua077:3581985:3582054 [2] NCCL INFO Setting affinity for GPU 2 to ffff0000 gpua077:3581985:3582054 [2] NCCL INFO Trees [0] 47/-1/-1->46->45 [1] 47/-1/-1->46->45 gpua077:3581985:3582054 [2] NCCL INFO Channel 00/0 : 46[85000] -> 47[c7000] via P2P/IPC/read gpua077:3581985:3582054 [2] NCCL INFO Channel 01/0 : 46[85000] -> 47[c7000] via P2P/IPC/read gpua077:3581985:3582054 [2] NCCL INFO Connected all rings gpua077:3581985:3582054 [2] NCCL INFO Channel 00/0 : 46[85000] -> 45[46000] via P2P/IPC/read gpua077:3581985:3582054 [2] NCCL INFO Channel 01/0 : 46[85000] -> 45[46000] via P2P/IPC/read gpua077:3581985:3582054 [2] NCCL INFO Connected all trees gpua077:3581985:3582054 [2] NCCL INFO threadThresholds 8/8/64 | 512/8/64 | 512 | 512 gpua077:3581985:3582054 [2] NCCL INFO 2 coll channels, 2 p2p channels, 2 p2p channels per peer gpua077:3581985:3582054 [2] NCCL INFO comm 0x56463ba1f440 rank 46 nranks 64 cudaDev 2 busId 85000 - Init COMPLETE gpua029:4090292:4090292 [2] NCCL INFO cudaDriverVersion 12020 gpua029:4090292:4090292 [2] NCCL INFO Bootstrap : Using eth1:172.28.23.29<0> gpua029:4090292:4090292 [2] NCCL INFO NET/Plugin : No plugin found (libnccl-net.so), using internal implementation gpua029:4090292:4090350 [2] NCCL INFO NET/IB : No device found. gpua029:4090292:4090350 [2] NCCL INFO NET/Socket : Using [0]eth1:172.28.23.29<0> [1]hsn0:141.142.145.29<0> gpua029:4090292:4090350 [2] NCCL INFO Using network Socket gpua029:4090292:4090350 [2] NCCL INFO Setting affinity for GPU 2 to ffff0000 gpua029:4090292:4090350 [2] NCCL INFO Trees [0] 19/-1/-1->18->17 [1] 19/-1/-1->18->17 gpua029:4090292:4090350 [2] NCCL INFO Channel 00/0 : 18[85000] -> 19[c7000] via P2P/IPC/read gpua029:4090292:4090350 [2] NCCL INFO Channel 01/0 : 18[85000] -> 19[c7000] via P2P/IPC/read gpua029:4090292:4090350 [2] NCCL INFO Connected all rings gpua029:4090292:4090350 [2] NCCL INFO Channel 00/0 : 18[85000] -> 17[46000] via P2P/IPC/read gpua003:274358:274358 [2] NCCL INFO cudaDriverVersion 12020 gpua003:274358:274358 [2] NCCL INFO Bootstrap : Using eth1:172.28.23.3<0> gpua003:274358:274358 [2] NCCL INFO NET/Plugin : No plugin found (libnccl-net.so), using internal implementation gpua003:274358:274417 [2] NCCL INFO NET/IB : No device found. gpua003:274358:274417 [2] NCCL INFO NET/Socket : Using [0]eth1:172.28.23.3<0> [1]hsn0:141.142.145.3<0> gpua003:274358:274417 [2] NCCL INFO Using network Socket gpua003:274358:274417 [2] NCCL INFO Setting affinity for GPU 2 to ffff0000 gpua003:274358:274417 [2] NCCL INFO Trees [0] 3/-1/-1->2->1 [1] 3/-1/-1->2->1 gpua003:274358:274417 [2] NCCL INFO Channel 00/0 : 2[85000] -> 3[c7000] via P2P/IPC/read gpua003:274358:274417 [2] NCCL INFO Channel 01/0 : 2[85000] -> 3[c7000] via P2P/IPC/read gpua003:274358:274417 [2] NCCL INFO Connected all rings gpua003:274358:274417 [2] NCCL INFO Channel 00/0 : 2[85000] -> 1[46000] via P2P/IPC/read gpua078:834948:834948 [0] NCCL INFO cudaDriverVersion 12020 gpua078:834948:834948 [0] NCCL INFO Bootstrap : Using eth1:172.28.23.78<0> gpua078:834948:834948 [0] NCCL INFO NET/Plugin : No plugin found (libnccl-net.so), using internal implementation gpua078:834948:835006 [0] NCCL INFO NET/IB : No device found. gpua078:834948:835006 [0] NCCL INFO NET/Socket : Using [0]eth1:172.28.23.78<0> [1]hsn0:141.142.145.78<0> gpua078:834948:835006 [0] NCCL INFO Using network Socket gpua078:834948:835006 [0] NCCL INFO Setting affinity for GPU 0 to ffff0000,00000000 gpua078:834948:835006 [0] NCCL INFO Trees [0] 49/56/-1->48->32 [1] 49/-1/-1->48->52 gpua078:834948:835006 [0] NCCL INFO Channel 00/0 : 47[c7000] -> 48[7000] [receive] via NET/Socket/1 gpua078:834948:835006 [0] NCCL INFO Channel 01/0 : 47[c7000] -> 48[7000] [receive] via NET/Socket/1 gpua078:834948:835006 [0] NCCL INFO Channel 00/0 : 48[7000] -> 49[46000] via P2P/IPC/read gpua078:834948:835006 [0] NCCL INFO Channel 01/0 : 48[7000] -> 49[46000] via P2P/IPC/read gpua077:3581983:3581983 [0] NCCL INFO cudaDriverVersion 12020 gpua077:3581983:3581983 [0] NCCL INFO Bootstrap : Using eth1:172.28.23.77<0> gpua077:3581983:3581983 [0] NCCL INFO NET/Plugin : No plugin found (libnccl-net.so), using internal implementation gpua077:3581983:3582055 [0] NCCL INFO NET/IB : No device found. gpua077:3581983:3582055 [0] NCCL INFO NET/Socket : Using [0]eth1:172.28.23.77<0> [1]hsn0:141.142.145.77<0> gpua077:3581983:3582055 [0] NCCL INFO Using network Socket gpua077:3581983:3582055 [0] NCCL INFO Setting affinity for GPU 0 to ffff0000,00000000 gpua077:3581983:3582055 [0] NCCL INFO Trees [0] 45/-1/-1->44->40 [1] 45/36/-1->44->29 gpua077:3581983:3582055 [0] NCCL INFO Channel 00/0 : 43[c7000] -> 44[7000] [receive] via NET/Socket/1 gpua077:3581983:3582055 [0] NCCL INFO Channel 01/0 : 43[c7000] -> 44[7000] [receive] via NET/Socket/1 gpua077:3581983:3582055 [0] NCCL INFO Channel 00/0 : 44[7000] -> 45[46000] via P2P/IPC/read gpua080:2988501:2988501 [1] NCCL INFO cudaDriverVersion 12020 gpua080:2988501:2988501 [1] NCCL INFO Bootstrap : Using eth1:172.28.23.80<0> gpua080:2988501:2988501 [1] NCCL INFO NET/Plugin : No plugin found (libnccl-net.so), using internal implementation gpua080:2988501:2988559 [1] NCCL INFO NET/IB : No device found. gpua080:2988501:2988559 [1] NCCL INFO NET/Socket : Using [0]eth1:172.28.23.80<0> [1]hsn0:141.142.145.80<0> gpua080:2988501:2988559 [1] NCCL INFO Using network Socket gpua080:2988501:2988559 [1] NCCL INFO Setting affinity for GPU 1 to ffff,00000000 gpua080:2988501:2988559 [1] NCCL INFO Trees [0] 58/52/-1->57->56 [1] 58/-1/-1->57->56 gpua080:2988501:2988559 [1] NCCL INFO Channel 00/0 : 57[46000] -> 58[85000] via P2P/IPC/read gpua080:2988501:2988559 [1] NCCL INFO Channel 01/0 : 57[46000] -> 58[85000] via P2P/IPC/read gpua080:2988501:2988559 [1] NCCL INFO Connected all rings gpua080:2988501:2988559 [1] NCCL INFO Channel 00/0 : 52[7000] -> 57[46000] [receive] via NET/Socket/1 gpua029:4090292:4090350 [2] NCCL INFO Channel 01/0 : 18[85000] -> 17[46000] via P2P/IPC/read gpua029:4090292:4090350 [2] NCCL INFO Connected all trees gpua029:4090292:4090350 [2] NCCL INFO threadThresholds 8/8/64 | 512/8/64 | 512 | 512 gpua029:4090292:4090350 [2] NCCL INFO 2 coll channels, 2 p2p channels, 2 p2p channels per peer gpua029:4090292:4090350 [2] NCCL INFO comm 0x557aa2165da0 rank 18 nranks 64 cudaDev 2 busId 85000 - Init COMPLETE gpua003:274358:274417 [2] NCCL INFO Channel 01/0 : 2[85000] -> 1[46000] via P2P/IPC/read gpua003:274358:274417 [2] NCCL INFO Connected all trees gpua003:274358:274417 [2] NCCL INFO threadThresholds 8/8/64 | 512/8/64 | 512 | 512 gpua003:274358:274417 [2] NCCL INFO 2 coll channels, 2 p2p channels, 2 p2p channels per peer gpua003:274358:274417 [2] NCCL INFO comm 0x559f4cca6f60 rank 2 nranks 64 cudaDev 2 busId 85000 - Init COMPLETE gpua078:834948:835006 [0] NCCL INFO Connected all rings gpua078:834948:835006 [0] NCCL INFO Channel 01/0 : 48[7000] -> 52[7000] [send] via NET/Socket/1 gpua078:834948:835006 [0] NCCL INFO Channel 00/0 : 48[7000] -> 56[7000] [send] via NET/Socket/1 gpua078:834948:835006 [0] NCCL INFO Channel 00/0 : 32[7000] -> 48[7000] [receive] via NET/Socket/1 gpua078:834948:835006 [0] NCCL INFO Channel 00/0 : 48[7000] -> 32[7000] [send] via NET/Socket/1 gpua078:834948:835006 [0] NCCL INFO Channel 00/0 : 56[7000] -> 48[7000] [receive] via NET/Socket/1 gpua078:834948:835006 [0] NCCL INFO Channel 01/0 : 52[7000] -> 48[7000] [receive] via NET/Socket/1 gpua078:834948:835006 [0] NCCL INFO Connected all trees gpua078:834948:835006 [0] NCCL INFO threadThresholds 8/8/64 | 512/8/64 | 512 | 512 gpua078:834948:835006 [0] NCCL INFO 2 coll channels, 2 p2p channels, 2 p2p channels per peer gpua078:834948:835006 [0] NCCL INFO comm 0x56539b4bea20 rank 48 nranks 64 cudaDev 0 busId 7000 - Init COMPLETE gpua077:3581983:3582055 [0] NCCL INFO Channel 01/0 : 44[7000] -> 45[46000] via P2P/IPC/read gpua077:3581983:3582055 [0] NCCL INFO Connected all rings gpua077:3581983:3582055 [0] NCCL INFO Channel 00/0 : 40[7000] -> 44[7000] [receive] via NET/Socket/1 gpua077:3581983:3582055 [0] NCCL INFO Channel 01/0 : 36[7000] -> 44[7000] [receive] via NET/Socket/1 gpua077:3581983:3582055 [0] NCCL INFO Channel 01/0 : 29[46000] -> 44[7000] [receive] via NET/Socket/1 gpua077:3581983:3582055 [0] NCCL INFO Channel 01/0 : 44[7000] -> 29[46000] [send] via NET/Socket/1 gpua077:3581983:3582055 [0] NCCL INFO Channel 01/0 : 44[7000] -> 36[7000] [send] via NET/Socket/1 gpua077:3581983:3582055 [0] NCCL INFO Channel 00/0 : 44[7000] -> 40[7000] [send] via NET/Socket/1 gpua077:3581983:3582055 [0] NCCL INFO Connected all trees gpua077:3581983:3582055 [0] NCCL INFO threadThresholds 8/8/64 | 512/8/64 | 512 | 512 gpua077:3581983:3582055 [0] NCCL INFO 2 coll channels, 2 p2p channels, 2 p2p channels per peer gpua080:2988501:2988559 [1] NCCL INFO Channel 00/0 : 57[46000] -> 52[7000] [send] via NET/Socket/1 gpua080:2988501:2988559 [1] NCCL INFO Channel 00/0 : 57[46000] -> 56[7000] via P2P/IPC/read gpua080:2988501:2988559 [1] NCCL INFO Channel 01/0 : 57[46000] -> 56[7000] via P2P/IPC/read gpua080:2988501:2988559 [1] NCCL INFO Connected all trees gpua080:2988501:2988559 [1] NCCL INFO threadThresholds 8/8/64 | 512/8/64 | 512 | 512 gpua080:2988501:2988559 [1] NCCL INFO 2 coll channels, 2 p2p channels, 2 p2p channels per peer gpua080:2988501:2988559 [1] NCCL INFO comm 0x5622653a6000 rank 57 nranks 64 cudaDev 1 busId 46000 - Init COMPLETE gpua029:4090293:4090293 [3] NCCL INFO cudaDriverVersion 12020 gpua029:4090293:4090293 [3] NCCL INFO Bootstrap : Using eth1:172.28.23.29<0> gpua029:4090293:4090293 [3] NCCL INFO NET/Plugin : No plugin found (libnccl-net.so), using internal implementation gpua029:4090293:4090352 [3] NCCL INFO NET/IB : No device found. gpua029:4090293:4090352 [3] NCCL INFO NET/Socket : Using [0]eth1:172.28.23.29<0> [1]hsn0:141.142.145.29<0> gpua029:4090293:4090352 [3] NCCL INFO Using network Socket gpua029:4090293:4090352 [3] NCCL INFO Setting affinity for GPU 3 to ffff gpua029:4090293:4090352 [3] NCCL INFO Trees [0] -1/-1/-1->19->18 [1] -1/-1/-1->19->18 gpua029:4090293:4090352 [3] NCCL INFO Channel 00/0 : 19[c7000] -> 20[7000] [send] via NET/Socket/1 gpua029:4090293:4090352 [3] NCCL INFO Channel 01/0 : 19[c7000] -> 20[7000] [send] via NET/Socket/1 gpua029:4090293:4090352 [3] NCCL INFO Connected all rings gpua029:4090293:4090352 [3] NCCL INFO Channel 00/0 : 19[c7000] -> 18[85000] via P2P/IPC/read gpua003:274356:274419 [0] NCCL INFO NET/IB : No device found. gpua003:274356:274419 [0] NCCL INFO NET/Socket : Using [0]eth1:172.28.23.3<0> [1]hsn0:141.142.145.3<0> gpua003:274356:274419 [0] NCCL INFO Using network Socket gpua003:274356:274419 [0] NCCL INFO Setting affinity for GPU 0 to ffff0000,00000000 gpua003:274356:274419 [0] NCCL INFO Channel 00/02 : 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 gpua003:274356:274419 [0] NCCL INFO Channel 01/02 : 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 gpua003:274356:274419 [0] NCCL INFO Trees [0] 1/32/-1->0->-1 [1] 1/-1/-1->0->4 gpua003:274356:274419 [0] NCCL INFO Channel 00/0 : 63[c7000] -> 0[7000] [receive] via NET/Socket/1 gpua003:274356:274419 [0] NCCL INFO Channel 01/0 : 63[c7000] -> 0[7000] [receive] via NET/Socket/1 gpua003:274356:274419 [0] NCCL INFO Channel 00/0 : 0[7000] -> 1[46000] via P2P/IPC/read gpua014:2825143:2825143 [2] NCCL INFO cudaDriverVersion 12020 gpua014:2825143:2825143 [2] NCCL INFO Bootstrap : Using eth1:172.28.23.14<0> gpua014:2825143:2825143 [2] NCCL INFO NET/Plugin : No plugin found (libnccl-net.so), using internal implementation gpua014:2825143:2825202 [2] NCCL INFO NET/IB : No device found. gpua014:2825143:2825202 [2] NCCL INFO NET/Socket : Using [0]eth1:172.28.23.14<0> [1]hsn0:141.142.145.14<0> gpua014:2825143:2825202 [2] NCCL INFO Using network Socket gpua014:2825143:2825202 [2] NCCL INFO Setting affinity for GPU 2 to ffff0000 gpua014:2825143:2825202 [2] NCCL INFO Trees [0] 7/-1/-1->6->5 [1] 7/-1/-1->6->5 gpua014:2825143:2825202 [2] NCCL INFO Channel 00/0 : 6[85000] -> 7[c7000] via P2P/IPC/read gpua014:2825143:2825202 [2] NCCL INFO Channel 01/0 : 6[85000] -> 7[c7000] via P2P/IPC/read gpua014:2825143:2825202 [2] NCCL INFO Connected all rings gpua014:2825143:2825202 [2] NCCL INFO Channel 00/0 : 6[85000] -> 5[46000] via P2P/IPC/read gpua077:3581983:3582055 [0] NCCL INFO comm 0x55fd1f9039d0 rank 44 nranks 64 cudaDev 0 busId 7000 - Init COMPLETE gpua080:2988500:2988500 [0] NCCL INFO cudaDriverVersion 12020 gpua080:2988500:2988500 [0] NCCL INFO Bootstrap : Using eth1:172.28.23.80<0> gpua080:2988500:2988500 [0] NCCL INFO NET/Plugin : No plugin found (libnccl-net.so), using internal implementation gpua080:2988500:2988560 [0] NCCL INFO NET/IB : No device found. gpua080:2988500:2988560 [0] NCCL INFO NET/Socket : Using [0]eth1:172.28.23.80<0> [1]hsn0:141.142.145.80<0> gpua080:2988500:2988560 [0] NCCL INFO Using network Socket gpua080:2988500:2988560 [0] NCCL INFO Setting affinity for GPU 0 to ffff0000,00000000 gpua080:2988500:2988560 [0] NCCL INFO Trees [0] 57/60/-1->56->48 [1] 57/-1/-1->56->53 gpua080:2988500:2988560 [0] NCCL INFO Channel 00/0 : 55[c7000] -> 56[7000] [receive] via NET/Socket/1 gpua080:2988500:2988560 [0] NCCL INFO Channel 01/0 : 55[c7000] -> 56[7000] [receive] via NET/Socket/1 gpua080:2988500:2988560 [0] NCCL INFO Channel 00/0 : 56[7000] -> 57[46000] via P2P/IPC/read gpua029:4090293:4090352 [3] NCCL INFO Channel 01/0 : 19[c7000] -> 18[85000] via P2P/IPC/read gpua029:4090293:4090352 [3] NCCL INFO Connected all trees gpua029:4090293:4090352 [3] NCCL INFO threadThresholds 8/8/64 | 512/8/64 | 512 | 512 gpua029:4090293:4090352 [3] NCCL INFO 2 coll channels, 2 p2p channels, 2 p2p channels per peer gpua029:4090293:4090352 [3] NCCL INFO comm 0x563fc2b09d40 rank 19 nranks 64 cudaDev 3 busId c7000 - Init COMPLETE gpua003:274356:274419 [0] NCCL INFO Channel 01/0 : 0[7000] -> 1[46000] via P2P/IPC/read gpua003:274356:274419 [0] NCCL INFO Connected all rings gpua003:274356:274419 [0] NCCL INFO Channel 01/0 : 0[7000] -> 4[7000] [send] via NET/Socket/1 gpua003:274356:274419 [0] NCCL INFO Channel 00/0 : 32[7000] -> 0[7000] [receive] via NET/Socket/1 gpua003:274356:274419 [0] NCCL INFO Channel 00/0 : 0[7000] -> 32[7000] [send] via NET/Socket/1 gpua003:274356:274419 [0] NCCL INFO Channel 01/0 : 4[7000] -> 0[7000] [receive] via NET/Socket/1 gpua003:274356:274419 [0] NCCL INFO Connected all trees gpua003:274356:274419 [0] NCCL INFO threadThresholds 8/8/64 | 512/8/64 | 512 | 512 gpua003:274356:274419 [0] NCCL INFO 2 coll channels, 2 p2p channels, 2 p2p channels per peer gpua003:274356:274419 [0] NCCL INFO comm 0x55c1a631c140 rank 0 nranks 64 cudaDev 0 busId 7000 - Init COMPLETE gpua014:2825143:2825202 [2] NCCL INFO Channel 01/0 : 6[85000] -> 5[46000] via P2P/IPC/read gpua014:2825143:2825202 [2] NCCL INFO Connected all trees gpua014:2825143:2825202 [2] NCCL INFO threadThresholds 8/8/64 | 512/8/64 | 512 | 512 gpua014:2825143:2825202 [2] NCCL INFO 2 coll channels, 2 p2p channels, 2 p2p channels per peer gpua014:2825143:2825202 [2] NCCL INFO comm 0x55aef0e028d0 rank 6 nranks 64 cudaDev 2 busId 85000 - Init COMPLETE gpua077:3581984:3581984 [1] NCCL INFO cudaDriverVersion 12020 gpua077:3581984:3581984 [1] NCCL INFO Bootstrap : Using eth1:172.28.23.77<0> gpua077:3581984:3581984 [1] NCCL INFO NET/Plugin : No plugin found (libnccl-net.so), using internal implementation gpua077:3581984:3582052 [1] NCCL INFO NET/IB : No device found. gpua077:3581984:3582052 [1] NCCL INFO NET/Socket : Using [0]eth1:172.28.23.77<0> [1]hsn0:141.142.145.77<0> gpua077:3581984:3582052 [1] NCCL INFO Using network Socket gpua077:3581984:3582052 [1] NCCL INFO Setting affinity for GPU 1 to ffff,00000000 gpua077:3581984:3582052 [1] NCCL INFO Trees [0] 46/-1/-1->45->44 [1] 46/52/-1->45->44 gpua077:3581984:3582052 [1] NCCL INFO Channel 00/0 : 45[46000] -> 46[85000] via P2P/IPC/read gpua077:3581984:3582052 [1] NCCL INFO Channel 01/0 : 45[46000] -> 46[85000] via P2P/IPC/read gpua077:3581984:3582052 [1] NCCL INFO Connected all rings gpua077:3581984:3582052 [1] NCCL INFO Channel 01/0 : 45[46000] -> 52[7000] [send] via NET/Socket/1 gpua080:2988500:2988560 [0] NCCL INFO Channel 01/0 : 56[7000] -> 57[46000] via P2P/IPC/read gpua080:2988500:2988560 [0] NCCL INFO Connected all rings gpua080:2988500:2988560 [0] NCCL INFO Channel 01/0 : 53[46000] -> 56[7000] [receive] via NET/Socket/1 gpua080:2988500:2988560 [0] NCCL INFO Channel 00/0 : 56[7000] -> 60[7000] [send] via NET/Socket/1 gpua080:2988500:2988560 [0] NCCL INFO Channel 00/0 : 48[7000] -> 56[7000] [receive] via NET/Socket/1 gpua080:2988500:2988560 [0] NCCL INFO Channel 00/0 : 56[7000] -> 48[7000] [send] via NET/Socket/1 gpua080:2988500:2988560 [0] NCCL INFO Channel 00/0 : 60[7000] -> 56[7000] [receive] via NET/Socket/1 gpua080:2988500:2988560 [0] NCCL INFO Channel 01/0 : 56[7000] -> 53[46000] [send] via NET/Socket/1 gpua080:2988500:2988560 [0] NCCL INFO Connected all trees gpua080:2988500:2988560 [0] NCCL INFO threadThresholds 8/8/64 | 512/8/64 | 512 | 512 gpua080:2988500:2988560 [0] NCCL INFO 2 coll channels, 2 p2p channels, 2 p2p channels per peer gpua029:4090290:4090290 [0] NCCL INFO cudaDriverVersion 12020 gpua029:4090290:4090290 [0] NCCL INFO Bootstrap : Using eth1:172.28.23.29<0> gpua029:4090290:4090290 [0] NCCL INFO NET/Plugin : No plugin found (libnccl-net.so), using internal implementation gpua029:4090290:4090351 [0] NCCL INFO NET/IB : No device found. gpua029:4090290:4090351 [0] NCCL INFO NET/Socket : Using [0]eth1:172.28.23.29<0> [1]hsn0:141.142.145.29<0> gpua029:4090290:4090351 [0] NCCL INFO Using network Socket gpua029:4090290:4090351 [0] NCCL INFO Setting affinity for GPU 0 to ffff0000,00000000 gpua029:4090290:4090351 [0] NCCL INFO Trees [0] 17/24/-1->16->33 [1] 17/-1/-1->16->20 gpua029:4090290:4090351 [0] NCCL INFO Channel 00/0 : 15[c7000] -> 16[7000] [receive] via NET/Socket/1 gpua029:4090290:4090351 [0] NCCL INFO Channel 01/0 : 15[c7000] -> 16[7000] [receive] via NET/Socket/1 gpua029:4090290:4090351 [0] NCCL INFO Channel 00/0 : 16[7000] -> 17[46000] via P2P/IPC/read gpua014:2825144:2825144 [3] NCCL INFO cudaDriverVersion 12020 gpua014:2825144:2825144 [3] NCCL INFO Bootstrap : Using eth1:172.28.23.14<0> gpua014:2825144:2825144 [3] NCCL INFO NET/Plugin : No plugin found (libnccl-net.so), using internal implementation gpua014:2825144:2825203 [3] NCCL INFO NET/IB : No device found. gpua014:2825144:2825203 [3] NCCL INFO NET/Socket : Using [0]eth1:172.28.23.14<0> [1]hsn0:141.142.145.14<0> gpua014:2825144:2825203 [3] NCCL INFO Using network Socket gpua014:2825144:2825203 [3] NCCL INFO Setting affinity for GPU 3 to ffff gpua014:2825144:2825203 [3] NCCL INFO Trees [0] -1/-1/-1->7->6 [1] -1/-1/-1->7->6 gpua014:2825144:2825203 [3] NCCL INFO Channel 00/0 : 7[c7000] -> 8[7000] [send] via NET/Socket/1 gpua014:2825144:2825203 [3] NCCL INFO Channel 01/0 : 7[c7000] -> 8[7000] [send] via NET/Socket/1 gpua014:2825144:2825203 [3] NCCL INFO Connected all rings gpua014:2825144:2825203 [3] NCCL INFO Channel 00/0 : 7[c7000] -> 6[85000] via P2P/IPC/read gpua077:3581984:3582052 [1] NCCL INFO Channel 01/0 : 52[7000] -> 45[46000] [receive] via NET/Socket/1 gpua077:3581984:3582052 [1] NCCL INFO Channel 00/0 : 45[46000] -> 44[7000] via P2P/IPC/read gpua077:3581984:3582052 [1] NCCL INFO Channel 01/0 : 45[46000] -> 44[7000] via P2P/IPC/read gpua077:3581984:3582052 [1] NCCL INFO Connected all trees gpua077:3581984:3582052 [1] NCCL INFO threadThresholds 8/8/64 | 512/8/64 | 512 | 512 gpua077:3581984:3582052 [1] NCCL INFO 2 coll channels, 2 p2p channels, 2 p2p channels per peer gpua077:3581984:3582052 [1] NCCL INFO comm 0x55654a59fb50 rank 45 nranks 64 cudaDev 1 busId 46000 - Init COMPLETE gpua080:2988500:2988560 [0] NCCL INFO comm 0x55b683430250 rank 56 nranks 64 cudaDev 0 busId 7000 - Init COMPLETE gpua029:4090290:4090351 [0] NCCL INFO Channel 01/0 : 16[7000] -> 17[46000] via P2P/IPC/read gpua029:4090290:4090351 [0] NCCL INFO Connected all rings gpua029:4090290:4090351 [0] NCCL INFO Channel 01/0 : 16[7000] -> 20[7000] [send] via NET/Socket/1 gpua029:4090290:4090351 [0] NCCL INFO Channel 00/0 : 16[7000] -> 24[7000] [send] via NET/Socket/1 gpua029:4090290:4090351 [0] NCCL INFO Channel 00/0 : 16[7000] -> 33[46000] [send] via NET/Socket/1 gpua029:4090290:4090351 [0] NCCL INFO Channel 00/0 : 33[46000] -> 16[7000] [receive] via NET/Socket/1 gpua029:4090290:4090351 [0] NCCL INFO Channel 00/0 : 24[7000] -> 16[7000] [receive] via NET/Socket/1 gpua029:4090290:4090351 [0] NCCL INFO Channel 01/0 : 20[7000] -> 16[7000] [receive] via NET/Socket/1 gpua029:4090290:4090351 [0] NCCL INFO Connected all trees gpua029:4090290:4090351 [0] NCCL INFO threadThresholds 8/8/64 | 512/8/64 | 512 | 512 gpua029:4090290:4090351 [0] NCCL INFO 2 coll channels, 2 p2p channels, 2 p2p channels per peer gpua003:274359:274359 [3] NCCL INFO cudaDriverVersion 12020 gpua003:274359:274359 [3] NCCL INFO Bootstrap : Using eth1:172.28.23.3<0> gpua003:274359:274359 [3] NCCL INFO NET/Plugin : No plugin found (libnccl-net.so), using internal implementation gpua003:274359:274420 [3] NCCL INFO NET/IB : No device found. gpua003:274359:274420 [3] NCCL INFO NET/Socket : Using [0]eth1:172.28.23.3<0> [1]hsn0:141.142.145.3<0> gpua003:274359:274420 [3] NCCL INFO Using network Socket gpua003:274359:274420 [3] NCCL INFO Setting affinity for GPU 3 to ffff gpua003:274359:274420 [3] NCCL INFO Trees [0] -1/-1/-1->3->2 [1] -1/-1/-1->3->2 gpua003:274359:274420 [3] NCCL INFO Channel 00/0 : 3[c7000] -> 4[7000] [send] via NET/Socket/1 gpua003:274359:274420 [3] NCCL INFO Channel 01/0 : 3[c7000] -> 4[7000] [send] via NET/Socket/1 gpua003:274359:274420 [3] NCCL INFO Connected all rings gpua003:274359:274420 [3] NCCL INFO Channel 00/0 : 3[c7000] -> 2[85000] via P2P/IPC/read gpua014:2825144:2825203 [3] NCCL INFO Channel 01/0 : 7[c7000] -> 6[85000] via P2P/IPC/read gpua014:2825144:2825203 [3] NCCL INFO Connected all trees gpua014:2825144:2825203 [3] NCCL INFO threadThresholds 8/8/64 | 512/8/64 | 512 | 512 gpua014:2825144:2825203 [3] NCCL INFO 2 coll channels, 2 p2p channels, 2 p2p channels per peer gpua014:2825144:2825203 [3] NCCL INFO comm 0x5651cb07a9b0 rank 7 nranks 64 cudaDev 3 busId c7000 - Init COMPLETE gpua080:2988503:2988503 [3] NCCL INFO cudaDriverVersion 12020 gpua080:2988503:2988503 [3] NCCL INFO Bootstrap : Using eth1:172.28.23.80<0> gpua080:2988503:2988503 [3] NCCL INFO NET/Plugin : No plugin found (libnccl-net.so), using internal implementation gpua080:2988503:2988558 [3] NCCL INFO NET/IB : No device found. gpua080:2988503:2988558 [3] NCCL INFO NET/Socket : Using [0]eth1:172.28.23.80<0> [1]hsn0:141.142.145.80<0> gpua080:2988503:2988558 [3] NCCL INFO Using network Socket gpua080:2988503:2988558 [3] NCCL INFO Setting affinity for GPU 3 to ffff gpua080:2988503:2988558 [3] NCCL INFO Trees [0] -1/-1/-1->59->58 [1] -1/-1/-1->59->58 gpua080:2988503:2988558 [3] NCCL INFO Channel 00/0 : 59[c7000] -> 60[7000] [send] via NET/Socket/1 gpua080:2988503:2988558 [3] NCCL INFO Channel 01/0 : 59[c7000] -> 60[7000] [send] via NET/Socket/1 gpua080:2988503:2988558 [3] NCCL INFO Connected all rings gpua080:2988503:2988558 [3] NCCL INFO Channel 00/0 : 59[c7000] -> 58[85000] via P2P/IPC/read gpua029:4090290:4090351 [0] NCCL INFO comm 0x55d4c88773d0 rank 16 nranks 64 cudaDev 0 busId 7000 - Init COMPLETE gpua029:4090291:4090291 [1] NCCL INFO cudaDriverVersion 12020 gpua029:4090291:4090291 [1] NCCL INFO Bootstrap : Using eth1:172.28.23.29<0> gpua029:4090291:4090291 [1] NCCL INFO NET/Plugin : No plugin found (libnccl-net.so), using internal implementation gpua029:4090291:4090353 [1] NCCL INFO NET/IB : No device found. gpua029:4090291:4090353 [1] NCCL INFO NET/Socket : Using [0]eth1:172.28.23.29<0> [1]hsn0:141.142.145.29<0> gpua029:4090291:4090353 [1] NCCL INFO Using network Socket gpua029:4090291:4090353 [1] NCCL INFO Setting affinity for GPU 1 to ffff,00000000 gpua029:4090291:4090353 [1] NCCL INFO Trees [0] 18/8/-1->17->16 [1] 18/-1/-1->17->16 gpua029:4090291:4090353 [1] NCCL INFO Channel 00/0 : 17[46000] -> 18[85000] via P2P/IPC/read gpua029:4090291:4090353 [1] NCCL INFO Channel 01/0 : 17[46000] -> 18[85000] via P2P/IPC/read gpua029:4090291:4090353 [1] NCCL INFO Connected all rings gpua003:274359:274420 [3] NCCL INFO Channel 01/0 : 3[c7000] -> 2[85000] via P2P/IPC/read gpua003:274359:274420 [3] NCCL INFO Connected all trees gpua003:274359:274420 [3] NCCL INFO threadThresholds 8/8/64 | 512/8/64 | 512 | 512 gpua003:274359:274420 [3] NCCL INFO 2 coll channels, 2 p2p channels, 2 p2p channels per peer gpua003:274359:274420 [3] NCCL INFO comm 0x55c43c1fbd10 rank 3 nranks 64 cudaDev 3 busId c7000 - Init COMPLETE gpua014:2825142:2825142 [1] NCCL INFO cudaDriverVersion 12020 gpua014:2825142:2825142 [1] NCCL INFO Bootstrap : Using eth1:172.28.23.14<0> gpua014:2825142:2825142 [1] NCCL INFO NET/Plugin : No plugin found (libnccl-net.so), using internal implementation gpua014:2825142:2825201 [1] NCCL INFO NET/IB : No device found. gpua014:2825142:2825201 [1] NCCL INFO NET/Socket : Using [0]eth1:172.28.23.14<0> [1]hsn0:141.142.145.14<0> gpua014:2825142:2825201 [1] NCCL INFO Using network Socket gpua014:2825142:2825201 [1] NCCL INFO Setting affinity for GPU 1 to ffff,00000000 gpua014:2825142:2825201 [1] NCCL INFO Trees [0] 6/-1/-1->5->4 [1] 6/8/-1->5->4 gpua014:2825142:2825201 [1] NCCL INFO Channel 00/0 : 5[46000] -> 6[85000] via P2P/IPC/read gpua014:2825142:2825201 [1] NCCL INFO Channel 01/0 : 5[46000] -> 6[85000] via P2P/IPC/read gpua014:2825142:2825201 [1] NCCL INFO Connected all rings gpua014:2825142:2825201 [1] NCCL INFO Channel 01/0 : 5[46000] -> 8[7000] [send] via NET/Socket/1 gpua080:2988503:2988558 [3] NCCL INFO Channel 01/0 : 59[c7000] -> 58[85000] via P2P/IPC/read gpua080:2988503:2988558 [3] NCCL INFO Connected all trees gpua080:2988503:2988558 [3] NCCL INFO threadThresholds 8/8/64 | 512/8/64 | 512 | 512 gpua080:2988503:2988558 [3] NCCL INFO 2 coll channels, 2 p2p channels, 2 p2p channels per peer gpua080:2988503:2988558 [3] NCCL INFO comm 0x5560579663a0 rank 59 nranks 64 cudaDev 3 busId c7000 - Init COMPLETE gpua029:4090291:4090353 [1] NCCL INFO Channel 00/0 : 8[7000] -> 17[46000] [receive] via NET/Socket/1 gpua029:4090291:4090353 [1] NCCL INFO Channel 00/0 : 17[46000] -> 8[7000] [send] via NET/Socket/1 gpua029:4090291:4090353 [1] NCCL INFO Channel 00/0 : 17[46000] -> 16[7000] via P2P/IPC/read gpua029:4090291:4090353 [1] NCCL INFO Channel 01/0 : 17[46000] -> 16[7000] via P2P/IPC/read gpua029:4090291:4090353 [1] NCCL INFO Connected all trees gpua029:4090291:4090353 [1] NCCL INFO threadThresholds 8/8/64 | 512/8/64 | 512 | 512 gpua029:4090291:4090353 [1] NCCL INFO 2 coll channels, 2 p2p channels, 2 p2p channels per peer gpua029:4090291:4090353 [1] NCCL INFO comm 0x557b1e24bfe0 rank 17 nranks 64 cudaDev 1 busId 46000 - Init COMPLETE gpua003:274357:274357 [1] NCCL INFO cudaDriverVersion 12020 gpua003:274357:274357 [1] NCCL INFO Bootstrap : Using eth1:172.28.23.3<0> gpua003:274357:274357 [1] NCCL INFO NET/Plugin : No plugin found (libnccl-net.so), using internal implementation gpua003:274357:274418 [1] NCCL INFO NET/IB : No device found. gpua003:274357:274418 [1] NCCL INFO NET/Socket : Using [0]eth1:172.28.23.3<0> [1]hsn0:141.142.145.3<0> gpua003:274357:274418 [1] NCCL INFO Using network Socket gpua003:274357:274418 [1] NCCL INFO Setting affinity for GPU 1 to ffff,00000000 gpua003:274357:274418 [1] NCCL INFO Trees [0] 2/-1/-1->1->0 [1] 2/-1/-1->1->0 gpua003:274357:274418 [1] NCCL INFO Channel 00/0 : 1[46000] -> 2[85000] via P2P/IPC/read gpua003:274357:274418 [1] NCCL INFO Channel 01/0 : 1[46000] -> 2[85000] via P2P/IPC/read gpua003:274357:274418 [1] NCCL INFO Connected all rings gpua003:274357:274418 [1] NCCL INFO Channel 00/0 : 1[46000] -> 0[7000] via P2P/IPC/read gpua014:2825142:2825201 [1] NCCL INFO Channel 01/0 : 8[7000] -> 5[46000] [receive] via NET/Socket/1 gpua014:2825142:2825201 [1] NCCL INFO Channel 00/0 : 5[46000] -> 4[7000] via P2P/IPC/read gpua014:2825142:2825201 [1] NCCL INFO Channel 01/0 : 5[46000] -> 4[7000] via P2P/IPC/read gpua014:2825142:2825201 [1] NCCL INFO Connected all trees gpua014:2825142:2825201 [1] NCCL INFO threadThresholds 8/8/64 | 512/8/64 | 512 | 512 gpua014:2825142:2825201 [1] NCCL INFO 2 coll channels, 2 p2p channels, 2 p2p channels per peer gpua014:2825142:2825201 [1] NCCL INFO comm 0x558c24c95220 rank 5 nranks 64 cudaDev 1 busId 46000 - Init COMPLETE gpua080:2988502:2988502 [2] NCCL INFO cudaDriverVersion 12020 gpua080:2988502:2988502 [2] NCCL INFO Bootstrap : Using eth1:172.28.23.80<0> gpua080:2988502:2988502 [2] NCCL INFO NET/Plugin : No plugin found (libnccl-net.so), using internal implementation gpua080:2988502:2988561 [2] NCCL INFO NET/IB : No device found. gpua080:2988502:2988561 [2] NCCL INFO NET/Socket : Using [0]eth1:172.28.23.80<0> [1]hsn0:141.142.145.80<0> gpua080:2988502:2988561 [2] NCCL INFO Using network Socket gpua080:2988502:2988561 [2] NCCL INFO Setting affinity for GPU 2 to ffff0000 gpua080:2988502:2988561 [2] NCCL INFO Trees [0] 59/-1/-1->58->57 [1] 59/-1/-1->58->57 gpua080:2988502:2988561 [2] NCCL INFO Channel 00/0 : 58[85000] -> 59[c7000] via P2P/IPC/read gpua080:2988502:2988561 [2] NCCL INFO Channel 01/0 : 58[85000] -> 59[c7000] via P2P/IPC/read gpua080:2988502:2988561 [2] NCCL INFO Connected all rings gpua080:2988502:2988561 [2] NCCL INFO Channel 00/0 : 58[85000] -> 57[46000] via P2P/IPC/read gpua003:274357:274418 [1] NCCL INFO Channel 01/0 : 1[46000] -> 0[7000] via P2P/IPC/read gpua003:274357:274418 [1] NCCL INFO Connected all trees gpua003:274357:274418 [1] NCCL INFO threadThresholds 8/8/64 | 512/8/64 | 512 | 512 gpua003:274357:274418 [1] NCCL INFO 2 coll channels, 2 p2p channels, 2 p2p channels per peer gpua003:274357:274418 [1] NCCL INFO comm 0x556c3f41ddd0 rank 1 nranks 64 cudaDev 1 busId 46000 - Init COMPLETE gpua014:2825141:2825141 [0] NCCL INFO cudaDriverVersion 12020 gpua014:2825141:2825141 [0] NCCL INFO Bootstrap : Using eth1:172.28.23.14<0> gpua014:2825141:2825141 [0] NCCL INFO NET/Plugin : No plugin found (libnccl-net.so), using internal implementation gpua014:2825141:2825204 [0] NCCL INFO NET/IB : No device found. gpua014:2825141:2825204 [0] NCCL INFO NET/Socket : Using [0]eth1:172.28.23.14<0> [1]hsn0:141.142.145.14<0> gpua014:2825141:2825204 [0] NCCL INFO Using network Socket gpua014:2825141:2825204 [0] NCCL INFO Setting affinity for GPU 0 to ffff0000,00000000 gpua014:2825141:2825204 [0] NCCL INFO Trees [0] 5/-1/-1->4->9 [1] 5/0/-1->4->12 gpua014:2825141:2825204 [0] NCCL INFO Channel 00/0 : 3[c7000] -> 4[7000] [receive] via NET/Socket/1 gpua014:2825141:2825204 [0] NCCL INFO Channel 01/0 : 3[c7000] -> 4[7000] [receive] via NET/Socket/1 gpua014:2825141:2825204 [0] NCCL INFO Channel 00/0 : 4[7000] -> 5[46000] via P2P/IPC/read gpua080:2988502:2988561 [2] NCCL INFO Channel 01/0 : 58[85000] -> 57[46000] via P2P/IPC/read gpua080:2988502:2988561 [2] NCCL INFO Connected all trees gpua080:2988502:2988561 [2] NCCL INFO threadThresholds 8/8/64 | 512/8/64 | 512 | 512 gpua080:2988502:2988561 [2] NCCL INFO 2 coll channels, 2 p2p channels, 2 p2p channels per peer gpua080:2988502:2988561 [2] NCCL INFO comm 0x564777b13610 rank 58 nranks 64 cudaDev 2 busId 85000 - Init COMPLETE gpua014:2825141:2825204 [0] NCCL INFO Channel 01/0 : 4[7000] -> 5[46000] via P2P/IPC/read gpua014:2825141:2825204 [0] NCCL INFO Connected all rings gpua014:2825141:2825204 [0] NCCL INFO Channel 01/0 : 0[7000] -> 4[7000] [receive] via NET/Socket/1 gpua014:2825141:2825204 [0] NCCL INFO Channel 00/0 : 4[7000] -> 9[46000] [send] via NET/Socket/1 gpua014:2825141:2825204 [0] NCCL INFO Channel 01/0 : 4[7000] -> 12[7000] [send] via NET/Socket/1 gpua014:2825141:2825204 [0] NCCL INFO Channel 01/0 : 12[7000] -> 4[7000] [receive] via NET/Socket/1 gpua014:2825141:2825204 [0] NCCL INFO Channel 00/0 : 9[46000] -> 4[7000] [receive] via NET/Socket/1 gpua014:2825141:2825204 [0] NCCL INFO Channel 01/0 : 4[7000] -> 0[7000] [send] via NET/Socket/1 gpua014:2825141:2825204 [0] NCCL INFO Connected all trees gpua014:2825141:2825204 [0] NCCL INFO threadThresholds 8/8/64 | 512/8/64 | 512 | 512 gpua014:2825141:2825204 [0] NCCL INFO 2 coll channels, 2 p2p channels, 2 p2p channels per peer gpua014:2825141:2825204 [0] NCCL INFO comm 0x55ecb1011eb0 rank 4 nranks 64 cudaDev 0 busId 7000 - Init COMPLETE [gpua003:0/64] 2024-02-05 00:33:58,576 (distributed:1027) INFO: Reducer buckets have been rebuilt in this iteration. [gpua003:0/64] 2024-02-05 00:37:36,309 (trainer:756) INFO: 19epoch:train:1-100batch: iter_time=2.132, forward_time=0.498, loss_ctc=84.701, loss_interctc_layer6=90.329, loss_interctc_layer12=76.032, loss_interctc_layer15=71.052, loss_interctc_layer21=87.391, loss=81.901, backward_time=0.326, grad_norm=70.807, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.153, optim0_lr0=9.428e-05, train_time=7.051 [gpua003:0/64] 2024-02-05 00:42:47,982 (trainer:756) INFO: 19epoch:train:101-200batch: iter_time=2.129e-04, forward_time=0.501, loss_ctc=79.012, loss_interctc_layer6=83.585, loss_interctc_layer12=69.924, loss_interctc_layer15=64.769, loss_interctc_layer21=81.483, loss=75.754, backward_time=0.355, grad_norm=61.369, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.154, optim0_lr0=9.426e-05, train_time=3.146 [gpua003:0/64] 2024-02-05 00:48:26,801 (trainer:756) INFO: 19epoch:train:201-300batch: iter_time=9.035e-04, forward_time=0.466, loss_ctc=76.778, loss_interctc_layer6=82.913, loss_interctc_layer12=69.975, loss_interctc_layer15=64.974, loss_interctc_layer21=78.820, loss=74.692, backward_time=0.366, grad_norm=60.935, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.150, optim0_lr0=9.424e-05, train_time=3.387 [gpua003:0/64] 2024-02-05 00:54:21,988 (trainer:756) INFO: 19epoch:train:301-400batch: iter_time=6.945e-04, forward_time=0.407, loss_ctc=86.492, loss_interctc_layer6=85.848, loss_interctc_layer12=71.480, loss_interctc_layer15=65.617, loss_interctc_layer21=89.275, loss=79.742, backward_time=0.425, grad_norm=61.771, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.156, optim0_lr0=9.423e-05, train_time=3.551 [gpua003:0/64] 2024-02-05 01:00:46,126 (trainer:756) INFO: 19epoch:train:401-500batch: iter_time=7.550e-04, forward_time=0.362, loss_ctc=89.295, loss_interctc_layer6=96.274, loss_interctc_layer12=80.419, loss_interctc_layer15=73.962, loss_interctc_layer21=92.404, loss=86.471, backward_time=0.320, grad_norm=104.224, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.143, optim0_lr0=9.421e-05, train_time=3.842 [gpua003:0/64] 2024-02-05 01:05:29,829 (trainer:756) INFO: 19epoch:train:501-600batch: iter_time=9.804e-05, forward_time=0.299, loss_ctc=78.488, loss_interctc_layer6=90.527, loss_interctc_layer12=76.452, loss_interctc_layer15=70.649, loss_interctc_layer21=80.665, loss=79.356, backward_time=0.315, grad_norm=69.221, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.144, optim0_lr0=9.419e-05, train_time=2.835 [gpua003:0/64] 2024-02-05 01:10:02,535 (trainer:756) INFO: 19epoch:train:601-700batch: iter_time=3.225e-04, forward_time=0.313, loss_ctc=92.850, loss_interctc_layer6=97.405, loss_interctc_layer12=81.952, loss_interctc_layer15=75.836, loss_interctc_layer21=95.447, loss=88.698, backward_time=0.301, grad_norm=102.713, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.145, optim0_lr0=9.417e-05, train_time=2.729 [gpua003:0/64] 2024-02-05 01:15:38,504 (trainer:756) INFO: 19epoch:train:701-800batch: iter_time=6.732e-04, forward_time=0.328, loss_ctc=79.376, loss_interctc_layer6=90.170, loss_interctc_layer12=75.426, loss_interctc_layer15=69.688, loss_interctc_layer21=81.594, loss=79.251, backward_time=0.304, grad_norm=63.024, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.143, optim0_lr0=9.416e-05, train_time=3.359 [gpua003:0/64] 2024-02-05 01:20:40,116 (trainer:756) INFO: 19epoch:train:801-900batch: iter_time=0.002, forward_time=0.341, loss_ctc=88.478, loss_interctc_layer6=96.660, loss_interctc_layer12=81.003, loss_interctc_layer15=74.811, loss_interctc_layer21=91.034, loss=86.397, backward_time=0.350, grad_norm=64.047, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.147, optim0_lr0=9.414e-05, train_time=3.016 [gpua003:0/64] 2024-02-05 01:25:47,660 (trainer:756) INFO: 19epoch:train:901-1000batch: iter_time=5.644e-04, forward_time=0.297, loss_ctc=77.866, loss_interctc_layer6=87.273, loss_interctc_layer12=73.101, loss_interctc_layer15=67.586, loss_interctc_layer21=79.987, loss=77.163, backward_time=0.290, grad_norm=59.057, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.143, optim0_lr0=9.412e-05, train_time=3.073 [gpua003:0/64] 2024-02-05 01:30:30,005 (trainer:756) INFO: 19epoch:train:1001-1100batch: iter_time=7.279e-04, forward_time=0.293, loss_ctc=85.773, loss_interctc_layer6=87.499, loss_interctc_layer12=73.548, loss_interctc_layer15=68.023, loss_interctc_layer21=88.369, loss=80.642, backward_time=0.303, grad_norm=63.174, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.144, optim0_lr0=9.410e-05, train_time=2.825 [gpua003:0/64] 2024-02-05 01:35:17,484 (trainer:756) INFO: 19epoch:train:1101-1200batch: iter_time=3.924e-04, forward_time=0.317, loss_ctc=62.790, loss_interctc_layer6=71.702, loss_interctc_layer12=59.768, loss_interctc_layer15=55.010, loss_interctc_layer21=64.579, loss=62.770, backward_time=0.293, grad_norm=57.018, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.143, optim0_lr0=9.409e-05, train_time=2.874 [gpua003:0/64] 2024-02-05 01:37:14,679 (multiple_iter_factory:32) INFO: Building 1th iter-factory... [gpua003:0/64] 2024-02-05 01:37:33,091 (s2t:401) INFO: Optional Data Names: ('text_prev', 'text_ctc', 'text_spk2', 'text_spk3', 'text_spk4') [gpua003:0/64] 2024-02-05 01:37:36,593 (abs_task:1660) INFO: [train] dataset: ESPnetDataset( speech: {"path": "exp/s2t_stats_raw_bpe50000/splits12/wav.scp/split.5", "type": "kaldi_ark"} text_prev: {"path": "exp/s2t_stats_raw_bpe50000/splits12/text.prev/split.5", "type": "text"} text_ctc: {"path": "exp/s2t_stats_raw_bpe50000/splits12/text.ctc/split.5", "type": "text"} text: {"path": "exp/s2t_stats_raw_bpe50000/splits12/text/split.5", "type": "text"} preprocess: ) [gpua003:0/64] 2024-02-05 01:37:36,593 (abs_task:1661) INFO: [train] Batch sampler: UnsortedBatchSampler(N-batch=19027, batch_size=256, key_file=exp/s2t_stats_raw_bpe50000/splits12/speech_shape/split.5, [gpua003:0/64] 2024-02-05 01:37:36,823 (abs_task:1662) INFO: [train] mini-batch sizes summary: N-batch=19027, mean=256.0, min=256, max=257 [gpua003:0/64] 2024-02-05 01:50:27,088 (trainer:687) WARNING: The grad norm is nan. Skipping updating the model. [gpua003:0/64] 2024-02-05 01:51:14,370 (trainer:756) INFO: 19epoch:train:1201-1300batch: iter_time=7.466, forward_time=0.250, loss_ctc=71.604, loss_interctc_layer6=85.022, loss_interctc_layer12=71.243, loss_interctc_layer15=65.732, loss_interctc_layer21=73.743, loss=73.469, backward_time=0.226, grad_norm=83.720, clip=100.000, loss_scale=1.762e+31, optim_step_time=0.138, optim0_lr0=9.407e-05, train_time=9.569 [gpua003:0/64] 2024-02-05 01:54:14,184 (trainer:756) INFO: 19epoch:train:1301-1400batch: iter_time=8.366e-05, forward_time=0.142, loss_ctc=85.994, loss_interctc_layer6=88.862, loss_interctc_layer12=74.247, loss_interctc_layer15=68.828, loss_interctc_layer21=88.325, loss=81.251, backward_time=0.201, grad_norm=64.529, clip=100.000, loss_scale=1.014e+31, optim_step_time=0.135, optim0_lr0=9.405e-05, train_time=1.798 [gpua003:0/64] 2024-02-05 01:58:48,628 (trainer:756) INFO: 19epoch:train:1401-1500batch: iter_time=8.392e-05, forward_time=0.144, loss_ctc=86.855, loss_interctc_layer6=88.424, loss_interctc_layer12=74.224, loss_interctc_layer15=68.489, loss_interctc_layer21=89.558, loss=81.510, backward_time=0.200, grad_norm=69.257, clip=100.000, loss_scale=1.014e+31, optim_step_time=0.135, optim0_lr0=9.403e-05, train_time=2.744 [gpua003:0/64] 2024-02-05 02:03:18,149 (trainer:756) INFO: 19epoch:train:1501-1600batch: iter_time=6.820e-04, forward_time=0.285, loss_ctc=66.710, loss_interctc_layer6=69.851, loss_interctc_layer12=58.015, loss_interctc_layer15=53.196, loss_interctc_layer21=68.864, loss=63.327, backward_time=0.264, grad_norm=48.768, clip=100.000, loss_scale=1.014e+31, optim_step_time=0.191, optim0_lr0=9.402e-05, train_time=2.694 [gpua003:0/64] 2024-02-05 02:07:58,540 (trainer:756) INFO: 19epoch:train:1601-1700batch: iter_time=8.618e-05, forward_time=0.143, loss_ctc=92.449, loss_interctc_layer6=97.351, loss_interctc_layer12=81.446, loss_interctc_layer15=74.904, loss_interctc_layer21=95.478, loss=88.325, backward_time=0.201, grad_norm=73.892, clip=100.000, loss_scale=1.014e+31, optim_step_time=0.134, optim0_lr0=9.400e-05, train_time=2.804 [gpua003:0/64] 2024-02-05 02:12:25,191 (trainer:756) INFO: 19epoch:train:1701-1800batch: iter_time=9.022e-05, forward_time=0.143, loss_ctc=85.853, loss_interctc_layer6=92.790, loss_interctc_layer12=77.817, loss_interctc_layer15=71.703, loss_interctc_layer21=88.362, loss=83.305, backward_time=0.202, grad_norm=68.044, clip=100.000, loss_scale=1.014e+31, optim_step_time=0.134, optim0_lr0=9.398e-05, train_time=2.666 [gpua003:0/64] 2024-02-05 02:17:28,316 (trainer:756) INFO: 19epoch:train:1801-1900batch: iter_time=2.959e-04, forward_time=0.257, loss_ctc=80.222, loss_interctc_layer6=91.523, loss_interctc_layer12=76.300, loss_interctc_layer15=70.067, loss_interctc_layer21=82.655, loss=80.153, backward_time=0.277, grad_norm=60.581, clip=100.000, loss_scale=1.014e+31, optim_step_time=0.144, optim0_lr0=9.397e-05, train_time=3.031 [gpua003:0/64] 2024-02-05 02:21:31,059 (trainer:756) INFO: 19epoch:train:1901-2000batch: iter_time=8.358e-05, forward_time=0.144, loss_ctc=77.817, loss_interctc_layer6=89.746, loss_interctc_layer12=75.246, loss_interctc_layer15=69.639, loss_interctc_layer21=80.130, loss=78.516, backward_time=0.202, grad_norm=118.629, clip=100.000, loss_scale=1.014e+31, optim_step_time=0.134, optim0_lr0=9.395e-05, train_time=2.426 [gpua003:0/64] 2024-02-05 02:25:53,669 (trainer:756) INFO: 19epoch:train:2001-2100batch: iter_time=8.089e-05, forward_time=0.144, loss_ctc=87.004, loss_interctc_layer6=95.988, loss_interctc_layer12=80.368, loss_interctc_layer15=74.268, loss_interctc_layer21=89.625, loss=85.451, backward_time=0.201, grad_norm=92.489, clip=100.000, loss_scale=1.014e+31, optim_step_time=0.134, optim0_lr0=9.393e-05, train_time=2.627 [gpua003:0/64] 2024-02-05 02:30:46,080 (trainer:756) INFO: 19epoch:train:2101-2200batch: iter_time=3.154e-04, forward_time=0.240, loss_ctc=85.216, loss_interctc_layer6=94.403, loss_interctc_layer12=79.073, loss_interctc_layer15=72.807, loss_interctc_layer21=87.930, loss=83.886, backward_time=0.262, grad_norm=80.498, clip=100.000, loss_scale=1.014e+31, optim_step_time=0.139, optim0_lr0=9.391e-05, train_time=2.923 [gpua003:0/64] 2024-02-05 02:35:51,449 (trainer:756) INFO: 19epoch:train:2201-2300batch: iter_time=8.908e-05, forward_time=0.143, loss_ctc=78.925, loss_interctc_layer6=82.068, loss_interctc_layer12=68.684, loss_interctc_layer15=63.383, loss_interctc_layer21=81.132, loss=74.839, backward_time=0.201, grad_norm=75.495, clip=100.000, loss_scale=1.014e+31, optim_step_time=0.135, optim0_lr0=9.390e-05, train_time=3.054 [gpua003:0/64] 2024-02-05 02:39:28,063 (trainer:756) INFO: 19epoch:train:2301-2400batch: iter_time=8.560e-05, forward_time=0.141, loss_ctc=70.294, loss_interctc_layer6=77.352, loss_interctc_layer12=64.644, loss_interctc_layer15=59.526, loss_interctc_layer21=72.331, loss=68.829, backward_time=0.201, grad_norm=54.576, clip=100.000, loss_scale=1.014e+31, optim_step_time=0.135, optim0_lr0=9.388e-05, train_time=2.164 [gpua003:0/64] 2024-02-05 02:44:37,077 (trainer:756) INFO: 19epoch:train:2401-2500batch: iter_time=8.430e-05, forward_time=0.145, loss_ctc=75.250, loss_interctc_layer6=82.708, loss_interctc_layer12=69.074, loss_interctc_layer15=63.551, loss_interctc_layer21=77.425, loss=73.602, backward_time=0.201, grad_norm=74.896, clip=100.000, loss_scale=1.014e+31, optim_step_time=0.135, optim0_lr0=9.386e-05, train_time=3.092 [gpua003:0/64] 2024-02-05 02:44:57,151 (multiple_iter_factory:32) INFO: Building 2th iter-factory... [gpua003:0/64] 2024-02-05 02:45:15,593 (s2t:401) INFO: Optional Data Names: ('text_prev', 'text_ctc', 'text_spk2', 'text_spk3', 'text_spk4') [gpua003:0/64] 2024-02-05 02:45:19,001 (abs_task:1660) INFO: [train] dataset: ESPnetDataset( speech: {"path": "exp/s2t_stats_raw_bpe50000/splits12/wav.scp/split.6", "type": "kaldi_ark"} text_prev: {"path": "exp/s2t_stats_raw_bpe50000/splits12/text.prev/split.6", "type": "text"} text_ctc: {"path": "exp/s2t_stats_raw_bpe50000/splits12/text.ctc/split.6", "type": "text"} text: {"path": "exp/s2t_stats_raw_bpe50000/splits12/text/split.6", "type": "text"} preprocess: ) [gpua003:0/64] 2024-02-05 02:45:19,001 (abs_task:1661) INFO: [train] Batch sampler: UnsortedBatchSampler(N-batch=19027, batch_size=256, key_file=exp/s2t_stats_raw_bpe50000/splits12/speech_shape/split.6, [gpua003:0/64] 2024-02-05 02:45:19,006 (abs_task:1662) INFO: [train] mini-batch sizes summary: N-batch=19027, mean=256.0, min=256, max=257 [gpua003:0/64] 2024-02-05 02:55:48,558 (trainer:756) INFO: 19epoch:train:2501-2600batch: iter_time=3.714, forward_time=0.246, loss_ctc=87.415, loss_interctc_layer6=88.646, loss_interctc_layer12=74.279, loss_interctc_layer15=68.166, loss_interctc_layer21=89.903, loss=81.682, backward_time=0.239, grad_norm=83.088, clip=100.000, loss_scale=1.014e+31, optim_step_time=0.136, optim0_lr0=9.384e-05, train_time=6.712 [gpua003:0/64] 2024-02-05 02:59:09,485 (trainer:756) INFO: 19epoch:train:2601-2700batch: iter_time=8.335e-05, forward_time=0.140, loss_ctc=77.877, loss_interctc_layer6=80.350, loss_interctc_layer12=66.991, loss_interctc_layer15=61.620, loss_interctc_layer21=80.345, loss=73.437, backward_time=0.200, grad_norm=63.775, clip=100.000, loss_scale=1.014e+31, optim_step_time=0.134, optim0_lr0=9.383e-05, train_time=2.012 [gpua003:0/64] 2024-02-05 03:03:01,464 (trainer:756) INFO: 19epoch:train:2701-2800batch: iter_time=8.251e-05, forward_time=0.142, loss_ctc=77.999, loss_interctc_layer6=81.462, loss_interctc_layer12=68.206, loss_interctc_layer15=63.054, loss_interctc_layer21=80.370, loss=74.218, backward_time=0.200, grad_norm=158.730, clip=100.000, loss_scale=1.014e+31, optim_step_time=0.134, optim0_lr0=9.381e-05, train_time=2.320 [gpua003:0/64] 2024-02-05 03:06:00,271 (trainer:756) INFO: 19epoch:train:2801-2900batch: iter_time=8.407e-05, forward_time=0.141, loss_ctc=90.727, loss_interctc_layer6=85.545, loss_interctc_layer12=71.169, loss_interctc_layer15=65.331, loss_interctc_layer21=93.625, loss=81.280, backward_time=0.201, grad_norm=57.159, clip=100.000, loss_scale=1.014e+31, optim_step_time=0.134, optim0_lr0=9.379e-05, train_time=1.788 [gpua003:0/64] 2024-02-05 03:09:46,708 (trainer:756) INFO: 19epoch:train:2901-3000batch: iter_time=8.699e-05, forward_time=0.142, loss_ctc=101.024, loss_interctc_layer6=94.506, loss_interctc_layer12=78.792, loss_interctc_layer15=72.503, loss_interctc_layer21=104.707, loss=90.306, backward_time=0.201, grad_norm=82.007, clip=100.000, loss_scale=1.014e+31, optim_step_time=0.134, optim0_lr0=9.378e-05, train_time=2.264 [gpua003:0/64] 2024-02-05 03:13:28,585 (trainer:756) INFO: 19epoch:train:3001-3100batch: iter_time=3.574e-04, forward_time=0.277, loss_ctc=81.346, loss_interctc_layer6=89.638, loss_interctc_layer12=75.255, loss_interctc_layer15=69.522, loss_interctc_layer21=83.693, loss=79.891, backward_time=0.228, grad_norm=78.532, clip=100.000, loss_scale=1.014e+31, optim_step_time=0.156, optim0_lr0=9.376e-05, train_time=2.218 [gpua003:0/64] 2024-02-05 03:17:07,498 (trainer:756) INFO: 19epoch:train:3101-3200batch: iter_time=8.559e-05, forward_time=0.143, loss_ctc=92.358, loss_interctc_layer6=95.870, loss_interctc_layer12=80.290, loss_interctc_layer15=74.067, loss_interctc_layer21=94.898, loss=87.497, backward_time=0.202, grad_norm=66.312, clip=100.000, loss_scale=1.014e+31, optim_step_time=0.134, optim0_lr0=9.374e-05, train_time=2.189 [gpua003:0/64] 2024-02-05 03:21:29,411 (trainer:756) INFO: 19epoch:train:3201-3300batch: iter_time=6.688e-04, forward_time=0.227, loss_ctc=84.270, loss_interctc_layer6=89.199, loss_interctc_layer12=74.291, loss_interctc_layer15=68.484, loss_interctc_layer21=86.726, loss=80.594, backward_time=0.257, grad_norm=68.473, clip=100.000, loss_scale=1.278e+31, optim_step_time=0.140, optim0_lr0=9.372e-05, train_time=2.619 [gpua003:0/64] 2024-02-05 03:25:24,069 (trainer:756) INFO: 19epoch:train:3301-3400batch: iter_time=9.676e-05, forward_time=0.143, loss_ctc=91.240, loss_interctc_layer6=95.550, loss_interctc_layer12=79.782, loss_interctc_layer15=73.580, loss_interctc_layer21=94.062, loss=86.843, backward_time=0.201, grad_norm=61.717, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.134, optim0_lr0=9.371e-05, train_time=2.347 [gpua003:0/64] 2024-02-05 03:29:22,990 (trainer:756) INFO: 19epoch:train:3401-3500batch: iter_time=2.074e-04, forward_time=0.201, loss_ctc=78.409, loss_interctc_layer6=85.959, loss_interctc_layer12=71.989, loss_interctc_layer15=66.177, loss_interctc_layer21=80.713, loss=76.650, backward_time=0.245, grad_norm=70.288, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.137, optim0_lr0=9.369e-05, train_time=2.388 [gpua003:0/64] 2024-02-05 03:32:48,974 (trainer:756) INFO: 19epoch:train:3501-3600batch: iter_time=1.046e-04, forward_time=0.163, loss_ctc=90.375, loss_interctc_layer6=86.898, loss_interctc_layer12=72.718, loss_interctc_layer15=66.961, loss_interctc_layer21=93.007, loss=81.992, backward_time=0.251, grad_norm=67.559, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.135, optim0_lr0=9.367e-05, train_time=2.060 [gpua003:0/64] 2024-02-05 03:36:24,169 (trainer:756) INFO: 19epoch:train:3601-3700batch: iter_time=1.029e-04, forward_time=0.142, loss_ctc=63.885, loss_interctc_layer6=71.146, loss_interctc_layer12=59.065, loss_interctc_layer15=54.177, loss_interctc_layer21=65.895, loss=62.833, backward_time=0.201, grad_norm=71.739, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.134, optim0_lr0=9.366e-05, train_time=2.152 [gpua003:0/64] 2024-02-05 03:38:32,274 (multiple_iter_factory:32) INFO: Building 3th iter-factory... [gpua003:0/64] 2024-02-05 03:38:50,904 (s2t:401) INFO: Optional Data Names: ('text_prev', 'text_ctc', 'text_spk2', 'text_spk3', 'text_spk4') [gpua003:0/64] 2024-02-05 03:38:54,424 (abs_task:1660) INFO: [train] dataset: ESPnetDataset( speech: {"path": "exp/s2t_stats_raw_bpe50000/splits12/wav.scp/split.11", "type": "kaldi_ark"} text_prev: {"path": "exp/s2t_stats_raw_bpe50000/splits12/text.prev/split.11", "type": "text"} text_ctc: {"path": "exp/s2t_stats_raw_bpe50000/splits12/text.ctc/split.11", "type": "text"} text: {"path": "exp/s2t_stats_raw_bpe50000/splits12/text/split.11", "type": "text"} preprocess: ) [gpua003:0/64] 2024-02-05 03:38:54,425 (abs_task:1661) INFO: [train] Batch sampler: UnsortedBatchSampler(N-batch=19027, batch_size=256, key_file=exp/s2t_stats_raw_bpe50000/splits12/speech_shape/split.11, [gpua003:0/64] 2024-02-05 03:38:54,457 (abs_task:1662) INFO: [train] mini-batch sizes summary: N-batch=19027, mean=256.0, min=256, max=257 [gpua003:0/64] 2024-02-05 03:45:57,977 (trainer:756) INFO: 19epoch:train:3701-3800batch: iter_time=3.520, forward_time=0.219, loss_ctc=73.511, loss_interctc_layer6=84.134, loss_interctc_layer12=70.341, loss_interctc_layer15=64.837, loss_interctc_layer21=75.800, loss=73.725, backward_time=0.219, grad_norm=188.262, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.138, optim0_lr0=9.364e-05, train_time=5.738 [gpua003:0/64] 2024-02-05 03:49:09,504 (trainer:756) INFO: 19epoch:train:3801-3900batch: iter_time=8.625e-05, forward_time=0.141, loss_ctc=85.953, loss_interctc_layer6=88.653, loss_interctc_layer12=74.333, loss_interctc_layer15=68.384, loss_interctc_layer21=87.804, loss=81.025, backward_time=0.201, grad_norm=73.226, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.134, optim0_lr0=9.362e-05, train_time=1.915 [gpua003:0/64] 2024-02-05 03:53:14,485 (trainer:756) INFO: 19epoch:train:3901-4000batch: iter_time=8.774e-05, forward_time=0.141, loss_ctc=86.979, loss_interctc_layer6=87.540, loss_interctc_layer12=73.380, loss_interctc_layer15=67.643, loss_interctc_layer21=89.667, loss=81.042, backward_time=0.200, grad_norm=78.732, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.134, optim0_lr0=9.360e-05, train_time=2.450 [gpua003:0/64] 2024-02-05 03:56:36,255 (trainer:756) INFO: 19epoch:train:4001-4100batch: iter_time=8.627e-05, forward_time=0.140, loss_ctc=66.486, loss_interctc_layer6=69.285, loss_interctc_layer12=57.370, loss_interctc_layer15=52.702, loss_interctc_layer21=68.470, loss=62.863, backward_time=0.200, grad_norm=58.341, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.134, optim0_lr0=9.359e-05, train_time=2.017 [gpua003:0/64] 2024-02-05 04:00:04,539 (trainer:756) INFO: 19epoch:train:4101-4200batch: iter_time=9.133e-05, forward_time=0.233, loss_ctc=92.286, loss_interctc_layer6=98.089, loss_interctc_layer12=81.901, loss_interctc_layer15=75.181, loss_interctc_layer21=95.254, loss=88.542, backward_time=0.268, grad_norm=72.855, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.140, optim0_lr0=9.357e-05, train_time=2.082 [gpua003:0/64] 2024-02-05 04:04:55,869 (trainer:756) INFO: 19epoch:train:4201-4300batch: iter_time=9.477e-05, forward_time=0.142, loss_ctc=86.294, loss_interctc_layer6=92.853, loss_interctc_layer12=77.821, loss_interctc_layer15=71.817, loss_interctc_layer21=88.862, loss=83.529, backward_time=0.201, grad_norm=73.141, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.134, optim0_lr0=9.355e-05, train_time=2.914 [gpua003:0/64] 2024-02-05 04:09:13,249 (trainer:756) INFO: 19epoch:train:4301-4400batch: iter_time=8.427e-05, forward_time=0.142, loss_ctc=80.336, loss_interctc_layer6=90.916, loss_interctc_layer12=75.784, loss_interctc_layer15=69.586, loss_interctc_layer21=82.769, loss=79.878, backward_time=0.201, grad_norm=66.845, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.134, optim0_lr0=9.354e-05, train_time=2.574 [gpua003:0/64] 2024-02-05 04:12:15,896 (trainer:756) INFO: 19epoch:train:4401-4500batch: iter_time=8.495e-05, forward_time=0.141, loss_ctc=77.143, loss_interctc_layer6=89.177, loss_interctc_layer12=74.644, loss_interctc_layer15=69.020, loss_interctc_layer21=79.358, loss=77.868, backward_time=0.201, grad_norm=61.618, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.134, optim0_lr0=9.352e-05, train_time=1.826 [gpua003:0/64] 2024-02-05 04:16:10,617 (trainer:756) INFO: 19epoch:train:4501-4600batch: iter_time=8.585e-05, forward_time=0.287, loss_ctc=85.711, loss_interctc_layer6=94.892, loss_interctc_layer12=79.225, loss_interctc_layer15=73.039, loss_interctc_layer21=88.262, loss=84.226, backward_time=0.270, grad_norm=71.159, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.139, optim0_lr0=9.350e-05, train_time=2.347 [gpua003:0/64] 2024-02-05 04:19:25,368 (trainer:756) INFO: 19epoch:train:4601-4700batch: iter_time=8.479e-05, forward_time=0.141, loss_ctc=84.038, loss_interctc_layer6=93.336, loss_interctc_layer12=78.120, loss_interctc_layer15=71.851, loss_interctc_layer21=86.325, loss=82.734, backward_time=0.201, grad_norm=88.776, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.134, optim0_lr0=9.349e-05, train_time=1.946 [gpua003:0/64] 2024-02-05 04:23:25,369 (trainer:756) INFO: 19epoch:train:4701-4800batch: iter_time=8.728e-05, forward_time=0.140, loss_ctc=77.824, loss_interctc_layer6=81.262, loss_interctc_layer12=67.989, loss_interctc_layer15=62.665, loss_interctc_layer21=80.062, loss=73.960, backward_time=0.200, grad_norm=58.708, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.134, optim0_lr0=9.347e-05, train_time=2.401 [gpua003:0/64] 2024-02-05 04:26:58,009 (trainer:756) INFO: 19epoch:train:4801-4900batch: iter_time=8.795e-05, forward_time=0.140, loss_ctc=68.318, loss_interctc_layer6=76.199, loss_interctc_layer12=63.546, loss_interctc_layer15=58.405, loss_interctc_layer21=70.402, loss=67.374, backward_time=0.201, grad_norm=61.531, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.134, optim0_lr0=9.345e-05, train_time=2.127 [gpua003:0/64] 2024-02-05 04:29:59,271 (trainer:756) INFO: 19epoch:train:4901-5000batch: iter_time=8.593e-05, forward_time=0.141, loss_ctc=73.740, loss_interctc_layer6=81.952, loss_interctc_layer12=68.343, loss_interctc_layer15=62.827, loss_interctc_layer21=76.103, loss=72.593, backward_time=0.202, grad_norm=66.402, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.134, optim0_lr0=9.343e-05, train_time=1.812 [gpua003:0/64] 2024-02-05 04:30:19,301 (multiple_iter_factory:32) INFO: Building 4th iter-factory... [gpua003:0/64] 2024-02-05 04:30:37,725 (s2t:401) INFO: Optional Data Names: ('text_prev', 'text_ctc', 'text_spk2', 'text_spk3', 'text_spk4') [gpua003:0/64] 2024-02-05 04:30:41,163 (abs_task:1660) INFO: [train] dataset: ESPnetDataset( speech: {"path": "exp/s2t_stats_raw_bpe50000/splits12/wav.scp/split.0", "type": "kaldi_ark"} text_prev: {"path": "exp/s2t_stats_raw_bpe50000/splits12/text.prev/split.0", "type": "text"} text_ctc: {"path": "exp/s2t_stats_raw_bpe50000/splits12/text.ctc/split.0", "type": "text"} text: {"path": "exp/s2t_stats_raw_bpe50000/splits12/text/split.0", "type": "text"} preprocess: ) [gpua003:0/64] 2024-02-05 04:30:41,163 (abs_task:1661) INFO: [train] Batch sampler: UnsortedBatchSampler(N-batch=19027, batch_size=256, key_file=exp/s2t_stats_raw_bpe50000/splits12/speech_shape/split.0, [gpua003:0/64] 2024-02-05 04:30:41,255 (abs_task:1662) INFO: [train] mini-batch sizes summary: N-batch=19027, mean=256.0, min=256, max=257 [gpua003:0/64] 2024-02-05 04:43:15,609 (trainer:756) INFO: 19epoch:train:5001-5100batch: iter_time=3.337, forward_time=0.209, loss_ctc=87.290, loss_interctc_layer6=88.668, loss_interctc_layer12=74.270, loss_interctc_layer15=69.314, loss_interctc_layer21=90.531, loss=82.015, backward_time=0.218, grad_norm=68.122, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.138, optim0_lr0=9.342e-05, train_time=7.962 [gpua003:0/64] 2024-02-05 04:46:14,918 (trainer:756) INFO: 19epoch:train:5101-5200batch: iter_time=8.407e-05, forward_time=0.140, loss_ctc=77.514, loss_interctc_layer6=79.826, loss_interctc_layer12=66.474, loss_interctc_layer15=61.176, loss_interctc_layer21=79.942, loss=72.987, backward_time=0.201, grad_norm=64.007, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.135, optim0_lr0=9.340e-05, train_time=1.794 [gpua003:0/64] 2024-02-05 04:49:51,757 (trainer:756) INFO: 19epoch:train:5201-5300batch: iter_time=8.475e-05, forward_time=0.215, loss_ctc=79.435, loss_interctc_layer6=81.788, loss_interctc_layer12=68.812, loss_interctc_layer15=63.649, loss_interctc_layer21=81.837, loss=75.104, backward_time=0.274, grad_norm=59.576, clip=100.000, loss_scale=2.556e+31, optim_step_time=0.140, optim0_lr0=9.338e-05, train_time=2.168 [gpua003:0/64] 2024-02-05 04:53:10,273 (trainer:756) INFO: 19epoch:train:5301-5400batch: iter_time=8.510e-05, forward_time=0.142, loss_ctc=89.463, loss_interctc_layer6=84.290, loss_interctc_layer12=69.952, loss_interctc_layer15=64.013, loss_interctc_layer21=92.464, loss=80.036, backward_time=0.201, grad_norm=72.392, clip=100.000, loss_scale=4.056e+31, optim_step_time=0.135, optim0_lr0=9.337e-05, train_time=1.985 [gpua003:0/64] 2024-02-05 04:57:08,719 (trainer:756) INFO: 19epoch:train:5401-5500batch: iter_time=8.564e-05, forward_time=0.142, loss_ctc=99.859, loss_interctc_layer6=94.918, loss_interctc_layer12=79.018, loss_interctc_layer15=72.525, loss_interctc_layer21=103.294, loss=89.923, backward_time=0.201, grad_norm=75.920, clip=100.000, loss_scale=4.056e+31, optim_step_time=0.135, optim0_lr0=9.335e-05, train_time=2.385 [gpua003:0/64] 2024-02-05 04:57:49,363 (trainer:687) WARNING: The grad norm is nan. Skipping updating the model. [gpua003:0/64] 2024-02-05 05:00:44,894 (trainer:756) INFO: 19epoch:train:5501-5600batch: iter_time=8.491e-05, forward_time=0.175, loss_ctc=81.182, loss_interctc_layer6=89.787, loss_interctc_layer12=75.287, loss_interctc_layer15=69.524, loss_interctc_layer21=83.470, loss=79.850, backward_time=0.201, grad_norm=59.005, clip=100.000, loss_scale=2.458e+31, optim_step_time=0.134, optim0_lr0=9.333e-05, train_time=2.162 [gpua003:0/64] 2024-02-05 05:04:31,396 (trainer:756) INFO: 19epoch:train:5601-5700batch: iter_time=8.547e-05, forward_time=0.142, loss_ctc=93.155, loss_interctc_layer6=95.464, loss_interctc_layer12=79.967, loss_interctc_layer15=73.648, loss_interctc_layer21=96.113, loss=87.669, backward_time=0.202, grad_norm=75.041, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.135, optim0_lr0=9.332e-05, train_time=2.265 [gpua003:0/64] 2024-02-05 05:08:53,235 (trainer:756) INFO: 19epoch:train:5701-5800batch: iter_time=2.685e-04, forward_time=0.313, loss_ctc=83.732, loss_interctc_layer6=89.998, loss_interctc_layer12=75.016, loss_interctc_layer15=69.075, loss_interctc_layer21=86.207, loss=80.805, backward_time=0.241, grad_norm=75.929, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.143, optim0_lr0=9.330e-05, train_time=2.615 [gpua003:0/64] 2024-02-05 05:13:24,136 (trainer:756) INFO: 19epoch:train:5801-5900batch: iter_time=8.448e-05, forward_time=0.142, loss_ctc=89.919, loss_interctc_layer6=95.469, loss_interctc_layer12=79.766, loss_interctc_layer15=73.385, loss_interctc_layer21=92.668, loss=86.241, backward_time=0.201, grad_norm=65.228, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.134, optim0_lr0=9.328e-05, train_time=2.711 [gpua003:0/64] 2024-02-05 05:17:03,672 (trainer:756) INFO: 19epoch:train:5901-6000batch: iter_time=8.557e-05, forward_time=0.141, loss_ctc=77.769, loss_interctc_layer6=85.902, loss_interctc_layer12=71.694, loss_interctc_layer15=65.918, loss_interctc_layer21=80.109, loss=76.278, backward_time=0.201, grad_norm=98.513, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.134, optim0_lr0=9.326e-05, train_time=2.196 [gpua003:0/64] 2024-02-05 05:20:33,546 (trainer:756) INFO: 19epoch:train:6001-6100batch: iter_time=8.651e-05, forward_time=0.141, loss_ctc=89.772, loss_interctc_layer6=86.995, loss_interctc_layer12=72.840, loss_interctc_layer15=67.146, loss_interctc_layer21=92.446, loss=81.840, backward_time=0.201, grad_norm=68.836, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.133, optim0_lr0=9.325e-05, train_time=2.099 [gpua003:0/64] 2024-02-05 05:24:45,651 (trainer:756) INFO: 19epoch:train:6101-6200batch: iter_time=2.427e-04, forward_time=0.210, loss_ctc=61.965, loss_interctc_layer6=70.666, loss_interctc_layer12=58.536, loss_interctc_layer15=53.699, loss_interctc_layer21=63.809, loss=61.735, backward_time=0.261, grad_norm=51.342, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.142, optim0_lr0=9.323e-05, train_time=2.521 [gpua003:0/64] 2024-02-05 05:26:46,851 (multiple_iter_factory:32) INFO: Building 5th iter-factory... [gpua003:0/64] 2024-02-05 05:27:05,364 (s2t:401) INFO: Optional Data Names: ('text_prev', 'text_ctc', 'text_spk2', 'text_spk3', 'text_spk4') [gpua003:0/64] 2024-02-05 05:27:09,132 (abs_task:1660) INFO: [train] dataset: ESPnetDataset( speech: {"path": "exp/s2t_stats_raw_bpe50000/splits12/wav.scp/split.3", "type": "kaldi_ark"} text_prev: {"path": "exp/s2t_stats_raw_bpe50000/splits12/text.prev/split.3", "type": "text"} text_ctc: {"path": "exp/s2t_stats_raw_bpe50000/splits12/text.ctc/split.3", "type": "text"} text: {"path": "exp/s2t_stats_raw_bpe50000/splits12/text/split.3", "type": "text"} preprocess: ) [gpua003:0/64] 2024-02-05 05:27:09,133 (abs_task:1661) INFO: [train] Batch sampler: UnsortedBatchSampler(N-batch=19027, batch_size=256, key_file=exp/s2t_stats_raw_bpe50000/splits12/speech_shape/split.3, [gpua003:0/64] 2024-02-05 05:27:09,174 (abs_task:1662) INFO: [train] mini-batch sizes summary: N-batch=19027, mean=256.0, min=256, max=257 [gpua003:0/64] 2024-02-05 05:37:19,255 (trainer:756) INFO: 19epoch:train:6201-6300batch: iter_time=5.519, forward_time=0.205, loss_ctc=73.836, loss_interctc_layer6=84.340, loss_interctc_layer12=70.430, loss_interctc_layer15=64.897, loss_interctc_layer21=76.134, loss=73.927, backward_time=0.213, grad_norm=70.547, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.138, optim0_lr0=9.321e-05, train_time=7.535 [gpua003:0/64] 2024-02-05 05:41:33,442 (trainer:756) INFO: 19epoch:train:6301-6400batch: iter_time=0.001, forward_time=0.328, loss_ctc=86.090, loss_interctc_layer6=89.088, loss_interctc_layer12=74.656, loss_interctc_layer15=68.555, loss_interctc_layer21=88.308, loss=81.339, backward_time=0.417, grad_norm=110.247, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.156, optim0_lr0=9.320e-05, train_time=2.540 [gpua003:0/64] 2024-02-05 05:46:02,995 (trainer:756) INFO: 19epoch:train:6401-6500batch: iter_time=0.001, forward_time=0.605, loss_ctc=85.643, loss_interctc_layer6=86.280, loss_interctc_layer12=72.132, loss_interctc_layer15=66.500, loss_interctc_layer21=88.579, loss=79.827, backward_time=0.504, grad_norm=72.646, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.168, optim0_lr0=9.318e-05, train_time=2.695 [gpua003:0/64] 2024-02-05 05:50:40,543 (trainer:756) INFO: 19epoch:train:6501-6600batch: iter_time=8.026e-04, forward_time=0.682, loss_ctc=66.441, loss_interctc_layer6=68.924, loss_interctc_layer12=57.145, loss_interctc_layer15=52.298, loss_interctc_layer21=68.581, loss=62.678, backward_time=0.351, grad_norm=51.015, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.165, optim0_lr0=9.316e-05, train_time=2.772 [gpua003:0/64] 2024-02-05 05:55:24,267 (trainer:756) INFO: 19epoch:train:6601-6700batch: iter_time=9.528e-04, forward_time=0.642, loss_ctc=90.649, loss_interctc_layer6=96.907, loss_interctc_layer12=80.926, loss_interctc_layer15=74.211, loss_interctc_layer21=93.424, loss=87.223, backward_time=0.501, grad_norm=84.026, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.165, optim0_lr0=9.315e-05, train_time=2.840 [gpua003:0/64] 2024-02-05 06:00:58,520 (trainer:756) INFO: 19epoch:train:6701-6800batch: iter_time=7.326e-04, forward_time=0.603, loss_ctc=85.163, loss_interctc_layer6=91.892, loss_interctc_layer12=77.104, loss_interctc_layer15=70.871, loss_interctc_layer21=87.870, loss=82.580, backward_time=0.384, grad_norm=70.661, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.162, optim0_lr0=9.313e-05, train_time=3.344 [gpua003:0/64] 2024-02-05 06:05:43,826 (trainer:756) INFO: 19epoch:train:6801-6900batch: iter_time=0.001, forward_time=0.710, loss_ctc=80.419, loss_interctc_layer6=90.965, loss_interctc_layer12=76.062, loss_interctc_layer15=69.816, loss_interctc_layer21=82.686, loss=79.990, backward_time=0.444, grad_norm=61.309, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.163, optim0_lr0=9.311e-05, train_time=2.852 [gpua003:0/64] 2024-02-05 06:10:42,329 (trainer:756) INFO: 19epoch:train:6901-7000batch: iter_time=0.001, forward_time=0.797, loss_ctc=76.666, loss_interctc_layer6=88.154, loss_interctc_layer12=73.953, loss_interctc_layer15=68.268, loss_interctc_layer21=78.579, loss=77.124, backward_time=0.489, grad_norm=80.559, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.178, optim0_lr0=9.310e-05, train_time=2.985 [gpua003:0/64] 2024-02-05 06:15:59,159 (trainer:756) INFO: 19epoch:train:7001-7100batch: iter_time=0.002, forward_time=0.873, loss_ctc=86.168, loss_interctc_layer6=95.115, loss_interctc_layer12=79.597, loss_interctc_layer15=73.320, loss_interctc_layer21=88.776, loss=84.595, backward_time=0.536, grad_norm=78.774, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.179, optim0_lr0=9.308e-05, train_time=3.168 [gpua003:0/64] 2024-02-05 06:21:51,872 (trainer:756) INFO: 19epoch:train:7101-7200batch: iter_time=0.003, forward_time=0.986, loss_ctc=83.665, loss_interctc_layer6=93.608, loss_interctc_layer12=78.199, loss_interctc_layer15=71.862, loss_interctc_layer21=86.342, loss=82.735, backward_time=0.681, grad_norm=59.889, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.194, optim0_lr0=9.306e-05, train_time=3.526 [gpua003:0/64] 2024-02-05 06:28:00,374 (trainer:756) INFO: 19epoch:train:7201-7300batch: iter_time=0.002, forward_time=1.039, loss_ctc=77.621, loss_interctc_layer6=81.279, loss_interctc_layer12=68.043, loss_interctc_layer15=62.619, loss_interctc_layer21=79.886, loss=73.890, backward_time=0.579, grad_norm=60.624, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.182, optim0_lr0=9.305e-05, train_time=3.686 [gpua003:0/64] 2024-02-05 06:33:38,448 (trainer:756) INFO: 19epoch:train:7301-7400batch: iter_time=0.003, forward_time=0.746, loss_ctc=68.071, loss_interctc_layer6=75.432, loss_interctc_layer12=62.825, loss_interctc_layer15=57.615, loss_interctc_layer21=70.087, loss=66.806, backward_time=0.570, grad_norm=109.979, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.177, optim0_lr0=9.303e-05, train_time=3.379 [gpua003:0/64] 2024-02-05 06:39:12,760 (trainer:756) INFO: 19epoch:train:7401-7500batch: iter_time=0.002, forward_time=0.905, loss_ctc=73.386, loss_interctc_layer6=81.974, loss_interctc_layer12=68.246, loss_interctc_layer15=62.678, loss_interctc_layer21=75.599, loss=72.376, backward_time=0.527, grad_norm=77.244, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.209, optim0_lr0=9.301e-05, train_time=3.343 [gpua003:0/64] 2024-02-05 06:39:32,930 (multiple_iter_factory:32) INFO: Building 6th iter-factory... [gpua003:0/64] 2024-02-05 06:39:51,353 (s2t:401) INFO: Optional Data Names: ('text_prev', 'text_ctc', 'text_spk2', 'text_spk3', 'text_spk4') [gpua003:0/64] 2024-02-05 06:39:54,842 (abs_task:1660) INFO: [train] dataset: ESPnetDataset( speech: {"path": "exp/s2t_stats_raw_bpe50000/splits12/wav.scp/split.4", "type": "kaldi_ark"} text_prev: {"path": "exp/s2t_stats_raw_bpe50000/splits12/text.prev/split.4", "type": "text"} text_ctc: {"path": "exp/s2t_stats_raw_bpe50000/splits12/text.ctc/split.4", "type": "text"} text: {"path": "exp/s2t_stats_raw_bpe50000/splits12/text/split.4", "type": "text"} preprocess: ) [gpua003:0/64] 2024-02-05 06:39:54,842 (abs_task:1661) INFO: [train] Batch sampler: UnsortedBatchSampler(N-batch=19027, batch_size=256, key_file=exp/s2t_stats_raw_bpe50000/splits12/speech_shape/split.4, [gpua003:0/64] 2024-02-05 06:39:54,866 (abs_task:1662) INFO: [train] mini-batch sizes summary: N-batch=19027, mean=256.0, min=256, max=257 [gpua003:0/64] 2024-02-05 06:53:18,879 (trainer:756) INFO: 19epoch:train:7501-7600batch: iter_time=3.633, forward_time=0.185, loss_ctc=86.063, loss_interctc_layer6=87.432, loss_interctc_layer12=72.764, loss_interctc_layer15=67.361, loss_interctc_layer21=88.742, loss=80.472, backward_time=0.208, grad_norm=74.974, clip=100.000, loss_scale=3.610e+31, optim_step_time=0.137, optim0_lr0=9.300e-05, train_time=8.462 [gpua003:0/64] 2024-02-05 06:57:29,411 (trainer:756) INFO: 19epoch:train:7601-7700batch: iter_time=8.634e-05, forward_time=0.142, loss_ctc=77.545, loss_interctc_layer6=79.877, loss_interctc_layer12=66.442, loss_interctc_layer15=61.029, loss_interctc_layer21=80.178, loss=73.014, backward_time=0.202, grad_norm=59.768, clip=100.000, loss_scale=4.056e+31, optim_step_time=0.136, optim0_lr0=9.298e-05, train_time=2.505 [gpua003:0/64] 2024-02-05 07:01:58,998 (trainer:756) INFO: 19epoch:train:7701-7800batch: iter_time=8.855e-05, forward_time=0.207, loss_ctc=78.279, loss_interctc_layer6=81.322, loss_interctc_layer12=68.223, loss_interctc_layer15=63.090, loss_interctc_layer21=80.778, loss=74.338, backward_time=0.225, grad_norm=67.945, clip=100.000, loss_scale=4.056e+31, optim_step_time=0.139, optim0_lr0=9.296e-05, train_time=2.689 [gpua003:0/64] 2024-02-05 07:05:28,866 (trainer:756) INFO: 19epoch:train:7801-7900batch: iter_time=8.916e-05, forward_time=0.143, loss_ctc=89.441, loss_interctc_layer6=84.126, loss_interctc_layer12=69.630, loss_interctc_layer15=63.702, loss_interctc_layer21=92.085, loss=79.797, backward_time=0.202, grad_norm=75.336, clip=100.000, loss_scale=4.056e+31, optim_step_time=0.135, optim0_lr0=9.295e-05, train_time=2.105 [gpua003:0/64] 2024-02-05 07:09:28,591 (trainer:756) INFO: 19epoch:train:7901-8000batch: iter_time=8.976e-05, forward_time=0.169, loss_ctc=99.306, loss_interctc_layer6=94.357, loss_interctc_layer12=78.365, loss_interctc_layer15=71.789, loss_interctc_layer21=102.357, loss=89.235, backward_time=0.224, grad_norm=83.953, clip=100.000, loss_scale=4.056e+31, optim_step_time=0.138, optim0_lr0=9.293e-05, train_time=2.396 [gpua003:0/64] 2024-02-05 07:13:13,116 (trainer:756) INFO: 19epoch:train:8001-8100batch: iter_time=8.259e-05, forward_time=0.171, loss_ctc=79.852, loss_interctc_layer6=88.312, loss_interctc_layer12=74.053, loss_interctc_layer15=68.363, loss_interctc_layer21=82.209, loss=78.558, backward_time=0.218, grad_norm=74.069, clip=100.000, loss_scale=4.056e+31, optim_step_time=0.141, optim0_lr0=9.291e-05, train_time=2.246 [gpua003:0/64] 2024-02-05 07:16:49,542 (trainer:756) INFO: 19epoch:train:8101-8200batch: iter_time=8.241e-05, forward_time=0.143, loss_ctc=92.374, loss_interctc_layer6=94.963, loss_interctc_layer12=79.395, loss_interctc_layer15=73.056, loss_interctc_layer21=95.253, loss=87.008, backward_time=0.202, grad_norm=65.619, clip=100.000, loss_scale=4.056e+31, optim_step_time=0.135, optim0_lr0=9.290e-05, train_time=2.164 [gpua003:0/64] 2024-02-05 07:21:19,198 (trainer:756) INFO: 19epoch:train:8201-8300batch: iter_time=8.533e-05, forward_time=0.156, loss_ctc=82.598, loss_interctc_layer6=88.999, loss_interctc_layer12=74.041, loss_interctc_layer15=68.293, loss_interctc_layer21=85.052, loss=79.796, backward_time=0.211, grad_norm=95.445, clip=100.000, loss_scale=4.056e+31, optim_step_time=0.135, optim0_lr0=9.288e-05, train_time=2.697 [gpua003:0/64] 2024-02-05 07:25:23,432 (trainer:756) INFO: 19epoch:train:8301-8400batch: iter_time=9.222e-05, forward_time=0.205, loss_ctc=89.918, loss_interctc_layer6=95.139, loss_interctc_layer12=79.608, loss_interctc_layer15=73.171, loss_interctc_layer21=92.887, loss=86.145, backward_time=0.219, grad_norm=62.106, clip=100.000, loss_scale=4.056e+31, optim_step_time=0.140, optim0_lr0=9.286e-05, train_time=2.441 [gpua003:0/64] 2024-02-05 07:29:11,279 (trainer:756) INFO: 19epoch:train:8401-8500batch: iter_time=8.570e-05, forward_time=0.143, loss_ctc=77.643, loss_interctc_layer6=85.504, loss_interctc_layer12=71.252, loss_interctc_layer15=65.532, loss_interctc_layer21=79.997, loss=75.986, backward_time=0.201, grad_norm=74.235, clip=100.000, loss_scale=4.056e+31, optim_step_time=0.135, optim0_lr0=9.285e-05, train_time=2.279 [gpua003:0/64] 2024-02-05 07:33:19,917 (trainer:756) INFO: 19epoch:train:8501-8600batch: iter_time=8.393e-05, forward_time=0.186, loss_ctc=88.639, loss_interctc_layer6=85.685, loss_interctc_layer12=71.559, loss_interctc_layer15=65.887, loss_interctc_layer21=91.250, loss=80.604, backward_time=0.210, grad_norm=64.496, clip=100.000, loss_scale=4.056e+31, optim_step_time=0.136, optim0_lr0=9.283e-05, train_time=2.485 [gpua003:0/64] 2024-02-05 07:37:09,396 (trainer:756) INFO: 19epoch:train:8601-8700batch: iter_time=8.601e-05, forward_time=0.155, loss_ctc=61.344, loss_interctc_layer6=70.254, loss_interctc_layer12=58.107, loss_interctc_layer15=53.263, loss_interctc_layer21=63.145, loss=61.223, backward_time=0.220, grad_norm=77.044, clip=100.000, loss_scale=4.056e+31, optim_step_time=0.138, optim0_lr0=9.281e-05, train_time=2.295 [gpua003:0/64] 2024-02-05 07:39:33,647 (multiple_iter_factory:32) INFO: Building 7th iter-factory... [gpua003:0/64] 2024-02-05 07:39:52,987 (s2t:401) INFO: Optional Data Names: ('text_prev', 'text_ctc', 'text_spk2', 'text_spk3', 'text_spk4') [gpua003:0/64] 2024-02-05 07:39:56,714 (abs_task:1660) INFO: [train] dataset: ESPnetDataset( speech: {"path": "exp/s2t_stats_raw_bpe50000/splits12/wav.scp/split.1", "type": "kaldi_ark"} text_prev: {"path": "exp/s2t_stats_raw_bpe50000/splits12/text.prev/split.1", "type": "text"} text_ctc: {"path": "exp/s2t_stats_raw_bpe50000/splits12/text.ctc/split.1", "type": "text"} text: {"path": "exp/s2t_stats_raw_bpe50000/splits12/text/split.1", "type": "text"} preprocess: ) [gpua003:0/64] 2024-02-05 07:39:56,714 (abs_task:1661) INFO: [train] Batch sampler: UnsortedBatchSampler(N-batch=19027, batch_size=256, key_file=exp/s2t_stats_raw_bpe50000/splits12/speech_shape/split.1, [gpua003:0/64] 2024-02-05 07:39:56,718 (abs_task:1662) INFO: [train] mini-batch sizes summary: N-batch=19027, mean=256.0, min=256, max=257 [gpua003:0/64] 2024-02-05 07:56:33,837 (trainer:756) INFO: 19epoch:train:8701-8800batch: iter_time=3.617, forward_time=0.198, loss_ctc=72.987, loss_interctc_layer6=83.681, loss_interctc_layer12=69.894, loss_interctc_layer15=64.296, loss_interctc_layer21=75.319, loss=73.236, backward_time=0.208, grad_norm=77.932, clip=100.000, loss_scale=4.056e+31, optim_step_time=0.137, optim0_lr0=9.280e-05, train_time=11.644 [gpua003:0/64] 2024-02-05 08:00:03,547 (trainer:756) INFO: 19epoch:train:8801-8900batch: iter_time=7.844e-05, forward_time=0.141, loss_ctc=84.578, loss_interctc_layer6=87.758, loss_interctc_layer12=73.276, loss_interctc_layer15=67.319, loss_interctc_layer21=87.602, loss=80.106, backward_time=0.201, grad_norm=66.117, clip=100.000, loss_scale=4.056e+31, optim_step_time=0.135, optim0_lr0=9.278e-05, train_time=2.097 [gpua003:0/64] 2024-02-05 08:03:48,701 (trainer:756) INFO: 19epoch:train:8901-9000batch: iter_time=8.158e-05, forward_time=0.147, loss_ctc=84.548, loss_interctc_layer6=85.870, loss_interctc_layer12=71.860, loss_interctc_layer15=66.097, loss_interctc_layer21=87.192, loss=79.113, backward_time=0.203, grad_norm=60.954, clip=100.000, loss_scale=4.056e+31, optim_step_time=0.135, optim0_lr0=9.276e-05, train_time=2.249 [gpua003:0/64] 2024-02-05 08:06:49,410 (trainer:756) INFO: 19epoch:train:9001-9100batch: iter_time=8.388e-05, forward_time=0.142, loss_ctc=65.125, loss_interctc_layer6=68.989, loss_interctc_layer12=57.076, loss_interctc_layer15=52.279, loss_interctc_layer21=67.322, loss=62.158, backward_time=0.202, grad_norm=60.903, clip=100.000, loss_scale=4.056e+31, optim_step_time=0.135, optim0_lr0=9.275e-05, train_time=1.809 [gpua003:0/64] 2024-02-05 08:10:06,523 (trainer:756) INFO: 19epoch:train:9101-9200batch: iter_time=8.605e-05, forward_time=0.142, loss_ctc=92.285, loss_interctc_layer6=97.273, loss_interctc_layer12=81.064, loss_interctc_layer15=74.347, loss_interctc_layer21=95.194, loss=88.033, backward_time=0.203, grad_norm=76.590, clip=100.000, loss_scale=4.056e+31, optim_step_time=0.135, optim0_lr0=9.273e-05, train_time=1.971 [gpua003:0/64] 2024-02-05 08:13:53,829 (trainer:756) INFO: 19epoch:train:9201-9300batch: iter_time=8.423e-05, forward_time=0.153, loss_ctc=85.326, loss_interctc_layer6=92.604, loss_interctc_layer12=77.409, loss_interctc_layer15=71.320, loss_interctc_layer21=87.998, loss=82.931, backward_time=0.206, grad_norm=86.015, clip=100.000, loss_scale=4.056e+31, optim_step_time=0.138, optim0_lr0=9.271e-05, train_time=2.273 [gpua003:0/64] 2024-02-05 08:18:00,953 (trainer:756) INFO: 19epoch:train:9301-9400batch: iter_time=8.357e-05, forward_time=0.171, loss_ctc=79.197, loss_interctc_layer6=90.487, loss_interctc_layer12=75.163, loss_interctc_layer15=68.939, loss_interctc_layer21=81.706, loss=79.098, backward_time=0.209, grad_norm=71.656, clip=100.000, loss_scale=4.056e+31, optim_step_time=0.135, optim0_lr0=9.270e-05, train_time=2.470 [gpua003:0/64] 2024-02-05 08:21:52,700 (trainer:756) INFO: 19epoch:train:9401-9500batch: iter_time=8.623e-05, forward_time=0.173, loss_ctc=76.185, loss_interctc_layer6=88.055, loss_interctc_layer12=73.536, loss_interctc_layer15=67.796, loss_interctc_layer21=78.334, loss=76.781, backward_time=0.209, grad_norm=69.194, clip=100.000, loss_scale=4.056e+31, optim_step_time=0.136, optim0_lr0=9.268e-05, train_time=2.318 [gpua003:0/64] 2024-02-05 08:26:14,724 (trainer:756) INFO: 19epoch:train:9501-9600batch: iter_time=8.970e-05, forward_time=0.143, loss_ctc=86.163, loss_interctc_layer6=95.065, loss_interctc_layer12=79.397, loss_interctc_layer15=72.950, loss_interctc_layer21=88.885, loss=84.492, backward_time=0.201, grad_norm=62.147, clip=100.000, loss_scale=7.221e+31, optim_step_time=0.134, optim0_lr0=9.266e-05, train_time=2.620 [gpua003:0/64] 2024-02-05 08:30:15,127 (trainer:756) INFO: 19epoch:train:9601-9700batch: iter_time=8.488e-05, forward_time=0.189, loss_ctc=84.358, loss_interctc_layer6=93.281, loss_interctc_layer12=77.758, loss_interctc_layer15=71.602, loss_interctc_layer21=87.067, loss=82.813, backward_time=0.209, grad_norm=77.951, clip=100.000, loss_scale=8.113e+31, optim_step_time=0.134, optim0_lr0=9.265e-05, train_time=2.404 [gpua003:0/64] 2024-02-05 08:34:53,415 (trainer:756) INFO: 19epoch:train:9701-9800batch: iter_time=8.665e-05, forward_time=0.244, loss_ctc=78.283, loss_interctc_layer6=81.142, loss_interctc_layer12=67.665, loss_interctc_layer15=62.391, loss_interctc_layer21=80.601, loss=74.016, backward_time=0.238, grad_norm=96.756, clip=100.000, loss_scale=8.113e+31, optim_step_time=0.143, optim0_lr0=9.263e-05, train_time=2.783 [gpua003:0/64] 2024-02-05 08:40:48,758 (trainer:756) INFO: 19epoch:train:9801-9900batch: iter_time=8.189e-05, forward_time=0.140, loss_ctc=68.789, loss_interctc_layer6=75.657, loss_interctc_layer12=62.801, loss_interctc_layer15=57.716, loss_interctc_layer21=70.927, loss=67.178, backward_time=0.201, grad_norm=60.125, clip=100.000, loss_scale=8.113e+31, optim_step_time=0.134, optim0_lr0=9.261e-05, train_time=3.553 [gpua003:0/64] 2024-02-05 08:43:35,992 (trainer:687) WARNING: The grad norm is nan. Skipping updating the model. [gpua003:0/64] 2024-02-05 08:44:05,240 (trainer:756) INFO: 19epoch:train:9901-10000batch: iter_time=8.138e-05, forward_time=0.144, loss_ctc=74.481, loss_interctc_layer6=81.480, loss_interctc_layer12=67.732, loss_interctc_layer15=62.150, loss_interctc_layer21=76.806, loss=72.530, backward_time=0.203, grad_norm=63.705, clip=100.000, loss_scale=7.498e+31, optim_step_time=0.134, optim0_lr0=9.260e-05, train_time=1.965 [gpua003:0/64] 2024-02-05 08:44:25,270 (multiple_iter_factory:32) INFO: Building 8th iter-factory... [gpua003:0/64] 2024-02-05 08:44:43,777 (s2t:401) INFO: Optional Data Names: ('text_prev', 'text_ctc', 'text_spk2', 'text_spk3', 'text_spk4') [gpua003:0/64] 2024-02-05 08:44:47,298 (abs_task:1660) INFO: [train] dataset: ESPnetDataset( speech: {"path": "exp/s2t_stats_raw_bpe50000/splits12/wav.scp/split.10", "type": "kaldi_ark"} text_prev: {"path": "exp/s2t_stats_raw_bpe50000/splits12/text.prev/split.10", "type": "text"} text_ctc: {"path": "exp/s2t_stats_raw_bpe50000/splits12/text.ctc/split.10", "type": "text"} text: {"path": "exp/s2t_stats_raw_bpe50000/splits12/text/split.10", "type": "text"} preprocess: ) [gpua003:0/64] 2024-02-05 08:44:47,299 (abs_task:1661) INFO: [train] Batch sampler: UnsortedBatchSampler(N-batch=19027, batch_size=256, key_file=exp/s2t_stats_raw_bpe50000/splits12/speech_shape/split.10, [gpua003:0/64] 2024-02-05 08:44:47,358 (abs_task:1662) INFO: [train] mini-batch sizes summary: N-batch=19027, mean=256.0, min=256, max=257 [gpua003:0/64] 2024-02-05 08:59:23,578 (trainer:756) INFO: 19epoch:train:10001-10100batch: iter_time=4.569, forward_time=0.210, loss_ctc=86.397, loss_interctc_layer6=88.147, loss_interctc_layer12=73.507, loss_interctc_layer15=67.448, loss_interctc_layer21=89.128, loss=80.925, backward_time=0.212, grad_norm=103.895, clip=100.000, loss_scale=4.056e+31, optim_step_time=0.137, optim0_lr0=9.258e-05, train_time=9.183 [gpua003:0/64] 2024-02-05 09:02:39,000 (trainer:756) INFO: 19epoch:train:10101-10200batch: iter_time=8.466e-05, forward_time=0.141, loss_ctc=77.759, loss_interctc_layer6=79.707, loss_interctc_layer12=66.131, loss_interctc_layer15=60.778, loss_interctc_layer21=80.254, loss=72.926, backward_time=0.201, grad_norm=66.631, clip=100.000, loss_scale=4.056e+31, optim_step_time=0.135, optim0_lr0=9.256e-05, train_time=1.954 [gpua003:0/64] 2024-02-05 09:06:20,808 (trainer:756) INFO: 19epoch:train:10201-10300batch: iter_time=8.486e-05, forward_time=0.147, loss_ctc=77.513, loss_interctc_layer6=81.116, loss_interctc_layer12=68.001, loss_interctc_layer15=62.936, loss_interctc_layer21=80.196, loss=73.952, backward_time=0.201, grad_norm=85.780, clip=100.000, loss_scale=4.056e+31, optim_step_time=0.135, optim0_lr0=9.255e-05, train_time=2.218 [gpua003:0/64] 2024-02-05 09:10:55,017 (trainer:756) INFO: 19epoch:train:10301-10400batch: iter_time=8.641e-05, forward_time=0.153, loss_ctc=89.227, loss_interctc_layer6=83.916, loss_interctc_layer12=69.527, loss_interctc_layer15=63.639, loss_interctc_layer21=92.184, loss=79.699, backward_time=0.201, grad_norm=66.092, clip=100.000, loss_scale=4.056e+31, optim_step_time=0.135, optim0_lr0=9.253e-05, train_time=2.742 [gpua003:0/64] 2024-02-05 09:14:44,474 (trainer:756) INFO: 19epoch:train:10401-10500batch: iter_time=8.665e-05, forward_time=0.169, loss_ctc=100.232, loss_interctc_layer6=94.828, loss_interctc_layer12=78.762, loss_interctc_layer15=72.260, loss_interctc_layer21=103.730, loss=89.962, backward_time=0.248, grad_norm=70.720, clip=100.000, loss_scale=4.056e+31, optim_step_time=0.135, optim0_lr0=9.251e-05, train_time=2.294 [gpua003:0/64] 2024-02-05 09:18:25,126 (trainer:756) INFO: 19epoch:train:10501-10600batch: iter_time=8.846e-05, forward_time=0.160, loss_ctc=79.226, loss_interctc_layer6=87.774, loss_interctc_layer12=73.531, loss_interctc_layer15=67.741, loss_interctc_layer21=81.572, loss=77.969, backward_time=0.213, grad_norm=91.496, clip=100.000, loss_scale=4.056e+31, optim_step_time=0.139, optim0_lr0=9.250e-05, train_time=2.206 [gpua003:0/64] 2024-02-05 09:21:41,906 (trainer:756) INFO: 19epoch:train:10601-10700batch: iter_time=9.471e-05, forward_time=0.142, loss_ctc=91.309, loss_interctc_layer6=94.885, loss_interctc_layer12=79.227, loss_interctc_layer15=72.848, loss_interctc_layer21=94.027, loss=86.459, backward_time=0.202, grad_norm=70.149, clip=100.000, loss_scale=4.056e+31, optim_step_time=0.134, optim0_lr0=9.248e-05, train_time=1.967 [gpua003:0/64] 2024-02-05 09:26:16,947 (trainer:756) INFO: 19epoch:train:10701-10800batch: iter_time=9.563e-05, forward_time=0.143, loss_ctc=83.409, loss_interctc_layer6=88.665, loss_interctc_layer12=73.859, loss_interctc_layer15=67.998, loss_interctc_layer21=86.142, loss=80.015, backward_time=0.201, grad_norm=64.509, clip=100.000, loss_scale=4.056e+31, optim_step_time=0.134, optim0_lr0=9.246e-05, train_time=2.751 [gpua003:0/64] 2024-02-05 09:30:06,934 (trainer:756) INFO: 19epoch:train:10801-10900batch: iter_time=9.321e-05, forward_time=0.147, loss_ctc=89.132, loss_interctc_layer6=93.659, loss_interctc_layer12=78.179, loss_interctc_layer15=71.810, loss_interctc_layer21=92.141, loss=84.984, backward_time=0.202, grad_norm=73.707, clip=100.000, loss_scale=4.056e+31, optim_step_time=0.134, optim0_lr0=9.245e-05, train_time=2.300 [gpua003:0/64] 2024-02-05 09:33:49,181 (trainer:756) INFO: 19epoch:train:10901-11000batch: iter_time=8.699e-05, forward_time=0.141, loss_ctc=76.922, loss_interctc_layer6=84.997, loss_interctc_layer12=70.889, loss_interctc_layer15=65.114, loss_interctc_layer21=79.228, loss=75.430, backward_time=0.201, grad_norm=58.315, clip=100.000, loss_scale=4.056e+31, optim_step_time=0.134, optim0_lr0=9.243e-05, train_time=2.222 [gpua003:0/64] 2024-02-05 09:37:38,839 (trainer:756) INFO: 19epoch:train:11001-11100batch: iter_time=9.677e-05, forward_time=0.164, loss_ctc=89.843, loss_interctc_layer6=86.377, loss_interctc_layer12=72.011, loss_interctc_layer15=66.269, loss_interctc_layer21=92.743, loss=81.448, backward_time=0.258, grad_norm=76.647, clip=100.000, loss_scale=4.056e+31, optim_step_time=0.138, optim0_lr0=9.241e-05, train_time=2.295 [gpua003:0/64] 2024-02-05 09:41:36,747 (trainer:756) INFO: 19epoch:train:11101-11200batch: iter_time=9.317e-05, forward_time=0.141, loss_ctc=62.700, loss_interctc_layer6=69.696, loss_interctc_layer12=57.757, loss_interctc_layer15=52.777, loss_interctc_layer21=64.685, loss=61.523, backward_time=0.202, grad_norm=51.459, clip=100.000, loss_scale=4.056e+31, optim_step_time=0.134, optim0_lr0=9.240e-05, train_time=2.380 [gpua003:0/64] 2024-02-05 09:43:31,287 (multiple_iter_factory:32) INFO: Building 9th iter-factory... [gpua003:0/64] 2024-02-05 09:43:50,120 (s2t:401) INFO: Optional Data Names: ('text_prev', 'text_ctc', 'text_spk2', 'text_spk3', 'text_spk4') [gpua003:0/64] 2024-02-05 09:43:53,561 (abs_task:1660) INFO: [train] dataset: ESPnetDataset( speech: {"path": "exp/s2t_stats_raw_bpe50000/splits12/wav.scp/split.9", "type": "kaldi_ark"} text_prev: {"path": "exp/s2t_stats_raw_bpe50000/splits12/text.prev/split.9", "type": "text"} text_ctc: {"path": "exp/s2t_stats_raw_bpe50000/splits12/text.ctc/split.9", "type": "text"} text: {"path": "exp/s2t_stats_raw_bpe50000/splits12/text/split.9", "type": "text"} preprocess: ) [gpua003:0/64] 2024-02-05 09:43:53,561 (abs_task:1661) INFO: [train] Batch sampler: UnsortedBatchSampler(N-batch=19027, batch_size=256, key_file=exp/s2t_stats_raw_bpe50000/splits12/speech_shape/split.9, [gpua003:0/64] 2024-02-05 09:43:53,577 (abs_task:1662) INFO: [train] mini-batch sizes summary: N-batch=19027, mean=256.0, min=256, max=257 [gpua003:0/64] 2024-02-05 09:51:25,850 (trainer:756) INFO: 19epoch:train:11201-11300batch: iter_time=3.906, forward_time=0.231, loss_ctc=72.182, loss_interctc_layer6=82.913, loss_interctc_layer12=69.318, loss_interctc_layer15=63.778, loss_interctc_layer21=74.418, loss=72.522, backward_time=0.217, grad_norm=83.135, clip=100.000, loss_scale=4.056e+31, optim_step_time=0.136, optim0_lr0=9.238e-05, train_time=5.891 [gpua003:0/64] 2024-02-05 09:54:25,560 (trainer:756) INFO: 19epoch:train:11301-11400batch: iter_time=8.556e-05, forward_time=0.148, loss_ctc=84.250, loss_interctc_layer6=87.505, loss_interctc_layer12=73.246, loss_interctc_layer15=68.173, loss_interctc_layer21=86.852, loss=80.005, backward_time=0.205, grad_norm=67.862, clip=100.000, loss_scale=4.056e+31, optim_step_time=0.136, optim0_lr0=9.237e-05, train_time=1.797 [gpua003:0/64] 2024-02-05 09:57:34,636 (trainer:756) INFO: 19epoch:train:11401-11500batch: iter_time=8.386e-04, forward_time=0.194, loss_ctc=84.280, loss_interctc_layer6=85.768, loss_interctc_layer12=71.616, loss_interctc_layer15=65.877, loss_interctc_layer21=87.079, loss=78.924, backward_time=0.216, grad_norm=56.676, clip=100.000, loss_scale=4.056e+31, optim_step_time=0.147, optim0_lr0=9.235e-05, train_time=1.889 [gpua003:0/64] 2024-02-05 10:01:33,393 (trainer:756) INFO: 19epoch:train:11501-11600batch: iter_time=8.924e-05, forward_time=0.212, loss_ctc=64.960, loss_interctc_layer6=69.002, loss_interctc_layer12=56.953, loss_interctc_layer15=52.030, loss_interctc_layer21=67.379, loss=62.065, backward_time=0.236, grad_norm=59.792, clip=100.000, loss_scale=4.056e+31, optim_step_time=0.143, optim0_lr0=9.233e-05, train_time=2.389 [gpua003:0/64] 2024-02-05 10:05:48,484 (trainer:756) INFO: 19epoch:train:11601-11700batch: iter_time=9.226e-05, forward_time=0.160, loss_ctc=90.836, loss_interctc_layer6=96.153, loss_interctc_layer12=80.049, loss_interctc_layer15=73.356, loss_interctc_layer21=93.986, loss=86.876, backward_time=0.215, grad_norm=76.420, clip=100.000, loss_scale=4.056e+31, optim_step_time=0.136, optim0_lr0=9.232e-05, train_time=2.551 [gpua003:0/64] 2024-02-05 10:09:01,158 (trainer:756) INFO: 19epoch:train:11701-11800batch: iter_time=9.628e-05, forward_time=0.164, loss_ctc=84.261, loss_interctc_layer6=91.441, loss_interctc_layer12=76.408, loss_interctc_layer15=70.368, loss_interctc_layer21=86.859, loss=81.867, backward_time=0.216, grad_norm=69.536, clip=100.000, loss_scale=4.056e+31, optim_step_time=0.138, optim0_lr0=9.230e-05, train_time=1.926 [gpua003:0/64] 2024-02-05 10:12:48,128 (trainer:756) INFO: 19epoch:train:11801-11900batch: iter_time=9.724e-05, forward_time=0.156, loss_ctc=78.949, loss_interctc_layer6=90.345, loss_interctc_layer12=75.197, loss_interctc_layer15=68.826, loss_interctc_layer21=81.309, loss=78.925, backward_time=0.212, grad_norm=64.488, clip=100.000, loss_scale=4.056e+31, optim_step_time=0.136, optim0_lr0=9.228e-05, train_time=2.270 [gpua003:0/64] 2024-02-05 10:16:53,881 (trainer:756) INFO: 19epoch:train:11901-12000batch: iter_time=9.948e-05, forward_time=0.142, loss_ctc=75.815, loss_interctc_layer6=87.547, loss_interctc_layer12=73.202, loss_interctc_layer15=67.590, loss_interctc_layer21=78.083, loss=76.447, backward_time=0.203, grad_norm=78.457, clip=100.000, loss_scale=4.665e+31, optim_step_time=0.135, optim0_lr0=9.227e-05, train_time=2.457 [gpua003:0/64] 2024-02-05 10:20:59,856 (trainer:756) INFO: 19epoch:train:12001-12100batch: iter_time=9.731e-05, forward_time=0.142, loss_ctc=85.044, loss_interctc_layer6=94.135, loss_interctc_layer12=78.638, loss_interctc_layer15=72.413, loss_interctc_layer21=87.644, loss=83.575, backward_time=0.201, grad_norm=70.550, clip=100.000, loss_scale=8.113e+31, optim_step_time=0.135, optim0_lr0=9.225e-05, train_time=2.460 [gpua003:0/64] 2024-02-05 10:25:02,335 (trainer:756) INFO: 19epoch:train:12101-12200batch: iter_time=9.469e-05, forward_time=0.144, loss_ctc=83.471, loss_interctc_layer6=92.487, loss_interctc_layer12=77.288, loss_interctc_layer15=71.064, loss_interctc_layer21=86.372, loss=82.136, backward_time=0.208, grad_norm=65.789, clip=100.000, loss_scale=8.113e+31, optim_step_time=0.135, optim0_lr0=9.223e-05, train_time=2.425 [gpua003:0/64] 2024-02-05 10:28:52,075 (trainer:756) INFO: 19epoch:train:12201-12300batch: iter_time=1.005e-04, forward_time=0.160, loss_ctc=77.730, loss_interctc_layer6=80.923, loss_interctc_layer12=67.445, loss_interctc_layer15=62.074, loss_interctc_layer21=80.118, loss=73.658, backward_time=0.224, grad_norm=60.318, clip=100.000, loss_scale=8.113e+31, optim_step_time=0.137, optim0_lr0=9.222e-05, train_time=2.297 [gpua003:0/64] 2024-02-05 10:33:20,426 (trainer:756) INFO: 19epoch:train:12301-12400batch: iter_time=1.019e-04, forward_time=0.200, loss_ctc=68.838, loss_interctc_layer6=75.521, loss_interctc_layer12=62.836, loss_interctc_layer15=57.705, loss_interctc_layer21=70.924, loss=67.165, backward_time=0.257, grad_norm=52.798, clip=100.000, loss_scale=8.113e+31, optim_step_time=0.139, optim0_lr0=9.220e-05, train_time=2.683 [gpua003:0/64] 2024-02-05 10:37:16,258 (trainer:756) INFO: 19epoch:train:12401-12500batch: iter_time=9.108e-05, forward_time=0.150, loss_ctc=74.068, loss_interctc_layer6=81.371, loss_interctc_layer12=67.650, loss_interctc_layer15=62.032, loss_interctc_layer21=76.394, loss=72.303, backward_time=0.220, grad_norm=70.478, clip=100.000, loss_scale=8.113e+31, optim_step_time=0.140, optim0_lr0=9.219e-05, train_time=2.359 [gpua003:0/64] 2024-02-05 10:37:36,315 (multiple_iter_factory:32) INFO: Building 10th iter-factory... [gpua003:0/64] 2024-02-05 10:37:55,070 (s2t:401) INFO: Optional Data Names: ('text_prev', 'text_ctc', 'text_spk2', 'text_spk3', 'text_spk4') [gpua003:0/64] 2024-02-05 10:37:58,504 (abs_task:1660) INFO: [train] dataset: ESPnetDataset( speech: {"path": "exp/s2t_stats_raw_bpe50000/splits12/wav.scp/split.8", "type": "kaldi_ark"} text_prev: {"path": "exp/s2t_stats_raw_bpe50000/splits12/text.prev/split.8", "type": "text"} text_ctc: {"path": "exp/s2t_stats_raw_bpe50000/splits12/text.ctc/split.8", "type": "text"} text: {"path": "exp/s2t_stats_raw_bpe50000/splits12/text/split.8", "type": "text"} preprocess: ) [gpua003:0/64] 2024-02-05 10:37:58,504 (abs_task:1661) INFO: [train] Batch sampler: UnsortedBatchSampler(N-batch=19027, batch_size=256, key_file=exp/s2t_stats_raw_bpe50000/splits12/speech_shape/split.8, [gpua003:0/64] 2024-02-05 10:37:58,521 (abs_task:1662) INFO: [train] mini-batch sizes summary: N-batch=19027, mean=256.0, min=256, max=257 [gpua003:0/64] 2024-02-05 10:55:06,138 (trainer:756) INFO: 19epoch:train:12501-12600batch: iter_time=3.942, forward_time=0.177, loss_ctc=85.419, loss_interctc_layer6=86.976, loss_interctc_layer12=72.381, loss_interctc_layer15=66.946, loss_interctc_layer21=88.142, loss=79.973, backward_time=0.206, grad_norm=88.094, clip=100.000, loss_scale=8.113e+31, optim_step_time=0.139, optim0_lr0=9.217e-05, train_time=10.698 [gpua003:0/64] 2024-02-05 10:58:33,422 (trainer:756) INFO: 19epoch:train:12601-12700batch: iter_time=8.244e-05, forward_time=0.141, loss_ctc=76.420, loss_interctc_layer6=79.035, loss_interctc_layer12=65.779, loss_interctc_layer15=60.348, loss_interctc_layer21=79.039, loss=72.124, backward_time=0.201, grad_norm=61.308, clip=100.000, loss_scale=8.113e+31, optim_step_time=0.136, optim0_lr0=9.215e-05, train_time=2.073 [gpua003:0/64] 2024-02-05 11:02:37,318 (trainer:756) INFO: 19epoch:train:12701-12800batch: iter_time=9.064e-05, forward_time=0.142, loss_ctc=77.750, loss_interctc_layer6=80.608, loss_interctc_layer12=67.523, loss_interctc_layer15=62.302, loss_interctc_layer21=80.154, loss=73.667, backward_time=0.201, grad_norm=56.484, clip=100.000, loss_scale=8.113e+31, optim_step_time=0.135, optim0_lr0=9.214e-05, train_time=2.439 [gpua003:0/64] 2024-02-05 11:07:13,702 (trainer:756) INFO: 19epoch:train:12801-12900batch: iter_time=1.041e-04, forward_time=0.142, loss_ctc=88.897, loss_interctc_layer6=83.846, loss_interctc_layer12=69.326, loss_interctc_layer15=63.530, loss_interctc_layer21=92.011, loss=79.522, backward_time=0.200, grad_norm=66.149, clip=100.000, loss_scale=8.113e+31, optim_step_time=0.135, optim0_lr0=9.212e-05, train_time=2.764 [gpua003:0/64] 2024-02-05 11:11:34,697 (trainer:756) INFO: 19epoch:train:12901-13000batch: iter_time=1.001e-04, forward_time=0.143, loss_ctc=97.739, loss_interctc_layer6=93.711, loss_interctc_layer12=77.873, loss_interctc_layer15=71.373, loss_interctc_layer21=101.356, loss=88.411, backward_time=0.202, grad_norm=83.441, clip=100.000, loss_scale=8.113e+31, optim_step_time=0.135, optim0_lr0=9.210e-05, train_time=2.610 [gpua003:0/64] 2024-02-05 11:15:22,085 (trainer:756) INFO: 19epoch:train:13001-13100batch: iter_time=8.947e-05, forward_time=0.149, loss_ctc=79.414, loss_interctc_layer6=88.188, loss_interctc_layer12=74.016, loss_interctc_layer15=68.189, loss_interctc_layer21=81.672, loss=78.296, backward_time=0.202, grad_norm=82.105, clip=100.000, loss_scale=8.113e+31, optim_step_time=0.136, optim0_lr0=9.209e-05, train_time=2.274 [gpua003:0/64] 2024-02-05 11:19:57,415 (trainer:756) INFO: 19epoch:train:13101-13200batch: iter_time=9.697e-05, forward_time=0.173, loss_ctc=92.934, loss_interctc_layer6=95.088, loss_interctc_layer12=79.319, loss_interctc_layer15=72.890, loss_interctc_layer21=95.754, loss=87.197, backward_time=0.234, grad_norm=86.956, clip=100.000, loss_scale=8.113e+31, optim_step_time=0.138, optim0_lr0=9.207e-05, train_time=2.753 [gpua003:0/64] 2024-02-05 11:22:25,627 (trainer:687) WARNING: The grad norm is nan. Skipping updating the model. [gpua003:0/64] 2024-02-05 11:24:15,415 (trainer:756) INFO: 19epoch:train:13201-13300batch: iter_time=1.066e-04, forward_time=0.169, loss_ctc=82.020, loss_interctc_layer6=88.376, loss_interctc_layer12=73.506, loss_interctc_layer15=67.530, loss_interctc_layer21=84.600, loss=79.206, backward_time=0.225, grad_norm=60.851, clip=100.000, loss_scale=6.187e+31, optim_step_time=0.135, optim0_lr0=9.206e-05, train_time=2.580 [gpua003:0/64] 2024-02-05 11:28:09,636 (trainer:756) INFO: 19epoch:train:13301-13400batch: iter_time=9.874e-05, forward_time=0.170, loss_ctc=88.569, loss_interctc_layer6=93.673, loss_interctc_layer12=78.053, loss_interctc_layer15=71.718, loss_interctc_layer21=91.437, loss=84.690, backward_time=0.214, grad_norm=71.327, clip=100.000, loss_scale=4.056e+31, optim_step_time=0.136, optim0_lr0=9.204e-05, train_time=2.342 [gpua003:0/64] 2024-02-05 11:32:21,706 (trainer:756) INFO: 19epoch:train:13401-13500batch: iter_time=9.803e-05, forward_time=0.156, loss_ctc=76.906, loss_interctc_layer6=84.863, loss_interctc_layer12=70.757, loss_interctc_layer15=64.945, loss_interctc_layer21=79.032, loss=75.301, backward_time=0.218, grad_norm=61.562, clip=100.000, loss_scale=4.056e+31, optim_step_time=0.135, optim0_lr0=9.202e-05, train_time=2.520 [gpua003:0/64] 2024-02-05 11:36:26,818 (trainer:756) INFO: 19epoch:train:13501-13600batch: iter_time=1.018e-04, forward_time=0.180, loss_ctc=88.483, loss_interctc_layer6=85.498, loss_interctc_layer12=71.213, loss_interctc_layer15=65.417, loss_interctc_layer21=91.055, loss=80.333, backward_time=0.208, grad_norm=64.059, clip=100.000, loss_scale=4.056e+31, optim_step_time=0.140, optim0_lr0=9.201e-05, train_time=2.452 [gpua003:0/64] 2024-02-05 11:40:15,125 (trainer:756) INFO: 19epoch:train:13601-13700batch: iter_time=8.443e-05, forward_time=0.156, loss_ctc=60.632, loss_interctc_layer6=69.611, loss_interctc_layer12=57.492, loss_interctc_layer15=52.596, loss_interctc_layer21=62.366, loss=60.539, backward_time=0.205, grad_norm=58.171, clip=100.000, loss_scale=4.056e+31, optim_step_time=0.137, optim0_lr0=9.199e-05, train_time=2.283 [gpua003:0/64] 2024-02-05 11:42:11,732 (multiple_iter_factory:32) INFO: Building 11th iter-factory... [gpua003:0/64] 2024-02-05 11:42:30,099 (s2t:401) INFO: Optional Data Names: ('text_prev', 'text_ctc', 'text_spk2', 'text_spk3', 'text_spk4') [gpua003:0/64] 2024-02-05 11:42:33,671 (abs_task:1660) INFO: [train] dataset: ESPnetDataset( speech: {"path": "exp/s2t_stats_raw_bpe50000/splits12/wav.scp/split.2", "type": "kaldi_ark"} text_prev: {"path": "exp/s2t_stats_raw_bpe50000/splits12/text.prev/split.2", "type": "text"} text_ctc: {"path": "exp/s2t_stats_raw_bpe50000/splits12/text.ctc/split.2", "type": "text"} text: {"path": "exp/s2t_stats_raw_bpe50000/splits12/text/split.2", "type": "text"} preprocess: ) [gpua003:0/64] 2024-02-05 11:42:33,671 (abs_task:1661) INFO: [train] Batch sampler: UnsortedBatchSampler(N-batch=19027, batch_size=256, key_file=exp/s2t_stats_raw_bpe50000/splits12/speech_shape/split.2, [gpua003:0/64] 2024-02-05 11:42:33,756 (abs_task:1662) INFO: [train] mini-batch sizes summary: N-batch=19027, mean=256.0, min=256, max=257 [gpua003:0/64] 2024-02-05 11:53:41,504 (trainer:756) INFO: 19epoch:train:13701-13800batch: iter_time=3.701, forward_time=0.184, loss_ctc=74.314, loss_interctc_layer6=83.567, loss_interctc_layer12=69.854, loss_interctc_layer15=64.241, loss_interctc_layer21=76.735, loss=73.742, backward_time=0.210, grad_norm=106.798, clip=100.000, loss_scale=4.056e+31, optim_step_time=0.137, optim0_lr0=9.197e-05, train_time=8.064 [gpua003:0/64] 2024-02-05 11:57:21,834 (trainer:756) INFO: 19epoch:train:13801-13900batch: iter_time=7.951e-05, forward_time=0.144, loss_ctc=87.500, loss_interctc_layer6=87.777, loss_interctc_layer12=73.236, loss_interctc_layer15=67.282, loss_interctc_layer21=90.778, loss=81.314, backward_time=0.201, grad_norm=80.241, clip=100.000, loss_scale=4.056e+31, optim_step_time=0.135, optim0_lr0=9.196e-05, train_time=2.203 [gpua003:0/64] 2024-02-05 12:00:32,001 (trainer:756) INFO: 19epoch:train:13901-14000batch: iter_time=8.172e-05, forward_time=0.151, loss_ctc=89.060, loss_interctc_layer6=85.386, loss_interctc_layer12=71.252, loss_interctc_layer15=65.615, loss_interctc_layer21=91.955, loss=80.654, backward_time=0.206, grad_norm=60.750, clip=100.000, loss_scale=4.056e+31, optim_step_time=0.138, optim0_lr0=9.194e-05, train_time=1.901 [gpua003:0/64] 2024-02-05 12:04:23,671 (trainer:756) INFO: 19epoch:train:14001-14100batch: iter_time=8.945e-05, forward_time=0.167, loss_ctc=69.419, loss_interctc_layer6=68.323, loss_interctc_layer12=56.523, loss_interctc_layer15=51.679, loss_interctc_layer21=71.954, loss=63.580, backward_time=0.213, grad_norm=48.523, clip=100.000, loss_scale=4.056e+31, optim_step_time=0.137, optim0_lr0=9.193e-05, train_time=2.317 [gpua003:0/64] 2024-02-05 12:08:39,054 (trainer:756) INFO: 19epoch:train:14101-14200batch: iter_time=8.899e-05, forward_time=0.156, loss_ctc=98.297, loss_interctc_layer6=96.663, loss_interctc_layer12=80.467, loss_interctc_layer15=73.830, loss_interctc_layer21=102.020, loss=90.255, backward_time=0.232, grad_norm=83.203, clip=100.000, loss_scale=4.056e+31, optim_step_time=0.135, optim0_lr0=9.191e-05, train_time=2.553 [gpua003:0/64] 2024-02-05 12:12:19,455 (trainer:756) INFO: 19epoch:train:14201-14300batch: iter_time=9.185e-05, forward_time=0.169, loss_ctc=94.250, loss_interctc_layer6=91.207, loss_interctc_layer12=76.011, loss_interctc_layer15=69.938, loss_interctc_layer21=97.046, loss=85.691, backward_time=0.208, grad_norm=68.493, clip=100.000, loss_scale=4.056e+31, optim_step_time=0.136, optim0_lr0=9.189e-05, train_time=2.205 [gpua003:0/64] 2024-02-05 12:16:53,394 (trainer:756) INFO: 19epoch:train:14301-14400batch: iter_time=9.590e-05, forward_time=0.144, loss_ctc=82.161, loss_interctc_layer6=90.638, loss_interctc_layer12=75.303, loss_interctc_layer15=68.960, loss_interctc_layer21=84.624, loss=80.337, backward_time=0.202, grad_norm=65.117, clip=100.000, loss_scale=4.056e+31, optim_step_time=0.135, optim0_lr0=9.188e-05, train_time=2.739 [gpua003:0/64] 2024-02-05 12:20:29,845 (trainer:756) INFO: 19epoch:train:14401-14500batch: iter_time=9.288e-05, forward_time=0.142, loss_ctc=77.410, loss_interctc_layer6=87.248, loss_interctc_layer12=72.844, loss_interctc_layer15=67.155, loss_interctc_layer21=79.690, loss=76.869, backward_time=0.202, grad_norm=67.943, clip=100.000, loss_scale=4.056e+31, optim_step_time=0.135, optim0_lr0=9.186e-05, train_time=2.164 [gpua003:0/64] 2024-02-05 12:25:00,205 (trainer:756) INFO: 19epoch:train:14501-14600batch: iter_time=9.092e-05, forward_time=0.191, loss_ctc=91.031, loss_interctc_layer6=94.487, loss_interctc_layer12=78.744, loss_interctc_layer15=72.475, loss_interctc_layer21=93.977, loss=86.143, backward_time=0.211, grad_norm=64.918, clip=100.000, loss_scale=4.056e+31, optim_step_time=0.136, optim0_lr0=9.184e-05, train_time=2.703 [gpua003:0/64] 2024-02-05 12:29:40,298 (trainer:756) INFO: 19epoch:train:14601-14700batch: iter_time=4.798e-04, forward_time=0.166, loss_ctc=85.736, loss_interctc_layer6=92.796, loss_interctc_layer12=77.398, loss_interctc_layer15=71.145, loss_interctc_layer21=88.542, loss=83.123, backward_time=0.227, grad_norm=67.595, clip=100.000, loss_scale=4.056e+31, optim_step_time=0.136, optim0_lr0=9.183e-05, train_time=2.800 [gpua003:0/64] 2024-02-05 12:33:33,985 (trainer:756) INFO: 19epoch:train:14701-14800batch: iter_time=9.214e-05, forward_time=0.142, loss_ctc=78.679, loss_interctc_layer6=80.954, loss_interctc_layer12=67.451, loss_interctc_layer15=61.957, loss_interctc_layer21=81.002, loss=74.009, backward_time=0.202, grad_norm=60.568, clip=100.000, loss_scale=4.056e+31, optim_step_time=0.135, optim0_lr0=9.181e-05, train_time=2.337 [gpua003:0/64] 2024-02-05 12:37:06,824 (trainer:756) INFO: 19epoch:train:14801-14900batch: iter_time=9.093e-05, forward_time=0.161, loss_ctc=72.166, loss_interctc_layer6=75.553, loss_interctc_layer12=62.734, loss_interctc_layer15=57.561, loss_interctc_layer21=74.459, loss=68.495, backward_time=0.214, grad_norm=55.806, clip=100.000, loss_scale=4.056e+31, optim_step_time=0.135, optim0_lr0=9.180e-05, train_time=2.128 [gpua003:0/64] 2024-02-05 12:41:06,191 (trainer:756) INFO: 19epoch:train:14901-15000batch: iter_time=9.617e-05, forward_time=0.142, loss_ctc=76.854, loss_interctc_layer6=81.880, loss_interctc_layer12=67.998, loss_interctc_layer15=62.266, loss_interctc_layer21=79.462, loss=73.692, backward_time=0.202, grad_norm=61.073, clip=100.000, loss_scale=4.056e+31, optim_step_time=0.135, optim0_lr0=9.178e-05, train_time=2.393 [gpua003:0/64] 2024-02-05 13:12:55,345 (trainer:355) INFO: 19epoch results: [train] iter_time=0.327, forward_time=0.229, loss_ctc=81.748, loss_interctc_layer6=86.595, loss_interctc_layer12=72.283, loss_interctc_layer15=66.525, loss_interctc_layer21=84.284, loss=78.287, backward_time=0.246, grad_norm=72.438, clip=100.000, loss_scale=3.405e+31, optim_step_time=0.141, optim0_lr0=9.301e-05, train_time=2.941, time=12 hours, 15 minutes and 42.1 seconds, total_count=285000, gpu_max_cached_mem_GB=33.436, [valid] loss_ctc=47.225, cer_ctc=0.215, loss_interctc_layer6=52.024, cer_interctc_layer6=0.232, loss_interctc_layer12=39.253, cer_interctc_layer12=0.166, loss_interctc_layer15=34.845, cer_interctc_layer15=0.140, loss_interctc_layer21=49.726, cer_interctc_layer21=0.228, loss=44.615, time=31 minutes and 25.03 seconds, total_count=88749, gpu_max_cached_mem_GB=33.436 [gpua003:0/64] 2024-02-05 13:13:21,820 (trainer:410) INFO: The best model has been updated: valid.cer_ctc, valid.loss_ctc, valid.total_count [gpua003:0/64] 2024-02-05 13:13:21,826 (trainer:289) INFO: 20/45epoch started. Estimated time to finish: 1 week, 6 days and 20 hours [gpua003:0/64] 2024-02-05 13:13:21,982 (multiple_iter_factory:32) INFO: Building 0th iter-factory... [gpua003:0/64] 2024-02-05 13:13:40,543 (s2t:401) INFO: Optional Data Names: ('text_prev', 'text_ctc', 'text_spk2', 'text_spk3', 'text_spk4') [gpua003:0/64] 2024-02-05 13:13:44,512 (abs_task:1660) INFO: [train] dataset: ESPnetDataset( speech: {"path": "exp/s2t_stats_raw_bpe50000/splits12/wav.scp/split.3", "type": "kaldi_ark"} text_prev: {"path": "exp/s2t_stats_raw_bpe50000/splits12/text.prev/split.3", "type": "text"} text_ctc: {"path": "exp/s2t_stats_raw_bpe50000/splits12/text.ctc/split.3", "type": "text"} text: {"path": "exp/s2t_stats_raw_bpe50000/splits12/text/split.3", "type": "text"} preprocess: ) [gpua003:0/64] 2024-02-05 13:13:44,512 (abs_task:1661) INFO: [train] Batch sampler: UnsortedBatchSampler(N-batch=19027, batch_size=256, key_file=exp/s2t_stats_raw_bpe50000/splits12/speech_shape/split.3, [gpua003:0/64] 2024-02-05 13:13:45,028 (abs_task:1662) INFO: [train] mini-batch sizes summary: N-batch=19027, mean=256.0, min=256, max=257 [gpua003:0/64] 2024-02-05 13:22:49,910 (trainer:687) WARNING: The grad norm is nan. Skipping updating the model. [gpua003:0/64] 2024-02-05 13:24:29,935 (trainer:756) INFO: 20epoch:train:1-100batch: iter_time=3.117, forward_time=0.183, loss_ctc=80.395, loss_interctc_layer6=86.943, loss_interctc_layer12=73.877, loss_interctc_layer15=68.590, loss_interctc_layer21=82.944, loss=78.550, backward_time=0.209, grad_norm=81.817, clip=100.000, loss_scale=2.930e+31, optim_step_time=0.139, optim0_lr0=9.176e-05, train_time=6.679 [gpua003:0/64] 2024-02-05 13:27:29,738 (trainer:756) INFO: 20epoch:train:101-200batch: iter_time=8.240e-05, forward_time=0.142, loss_ctc=86.624, loss_interctc_layer6=88.775, loss_interctc_layer12=74.260, loss_interctc_layer15=68.446, loss_interctc_layer21=89.395, loss=81.500, backward_time=0.203, grad_norm=65.104, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.136, optim0_lr0=9.175e-05, train_time=1.798 [gpua003:0/64] 2024-02-05 13:30:32,320 (trainer:756) INFO: 20epoch:train:201-300batch: iter_time=8.390e-05, forward_time=0.143, loss_ctc=87.135, loss_interctc_layer6=92.332, loss_interctc_layer12=78.526, loss_interctc_layer15=73.125, loss_interctc_layer21=89.729, loss=84.169, backward_time=0.203, grad_norm=67.011, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.136, optim0_lr0=9.173e-05, train_time=1.826 [gpua003:0/64] 2024-02-05 13:34:42,108 (trainer:756) INFO: 20epoch:train:301-400batch: iter_time=9.446e-05, forward_time=0.143, loss_ctc=86.751, loss_interctc_layer6=80.994, loss_interctc_layer12=67.034, loss_interctc_layer15=61.397, loss_interctc_layer21=89.516, loss=77.138, backward_time=0.203, grad_norm=56.008, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.136, optim0_lr0=9.172e-05, train_time=2.498 [gpua003:0/64] 2024-02-05 13:38:12,078 (trainer:756) INFO: 20epoch:train:401-500batch: iter_time=9.507e-05, forward_time=0.158, loss_ctc=77.996, loss_interctc_layer6=86.685, loss_interctc_layer12=72.578, loss_interctc_layer15=66.987, loss_interctc_layer21=80.378, loss=76.925, backward_time=0.204, grad_norm=64.157, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.137, optim0_lr0=9.170e-05, train_time=2.099 [gpua003:0/64] 2024-02-05 13:41:59,146 (trainer:756) INFO: 20epoch:train:501-600batch: iter_time=9.830e-05, forward_time=0.152, loss_ctc=87.268, loss_interctc_layer6=95.154, loss_interctc_layer12=79.340, loss_interctc_layer15=73.072, loss_interctc_layer21=90.181, loss=85.003, backward_time=0.210, grad_norm=72.546, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.137, optim0_lr0=9.168e-05, train_time=2.269 [gpua003:0/64] 2024-02-05 13:45:42,204 (trainer:756) INFO: 20epoch:train:601-700batch: iter_time=9.130e-05, forward_time=0.145, loss_ctc=83.343, loss_interctc_layer6=96.279, loss_interctc_layer12=80.782, loss_interctc_layer15=74.767, loss_interctc_layer21=85.606, loss=84.156, backward_time=0.203, grad_norm=101.585, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.136, optim0_lr0=9.167e-05, train_time=2.231 [gpua003:0/64] 2024-02-05 13:49:52,123 (trainer:756) INFO: 20epoch:train:701-800batch: iter_time=8.899e-05, forward_time=0.142, loss_ctc=66.142, loss_interctc_layer6=78.807, loss_interctc_layer12=66.018, loss_interctc_layer15=60.857, loss_interctc_layer21=68.065, loss=67.978, backward_time=0.201, grad_norm=55.646, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.136, optim0_lr0=9.165e-05, train_time=2.499 [gpua003:0/64] 2024-02-05 13:54:26,215 (trainer:756) INFO: 20epoch:train:801-900batch: iter_time=8.745e-05, forward_time=0.184, loss_ctc=81.584, loss_interctc_layer6=91.021, loss_interctc_layer12=75.761, loss_interctc_layer15=69.542, loss_interctc_layer21=84.164, loss=80.414, backward_time=0.209, grad_norm=65.429, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.138, optim0_lr0=9.164e-05, train_time=2.741 [gpua003:0/64] 2024-02-05 13:58:01,138 (trainer:756) INFO: 20epoch:train:901-1000batch: iter_time=8.969e-05, forward_time=0.187, loss_ctc=84.588, loss_interctc_layer6=89.961, loss_interctc_layer12=74.912, loss_interctc_layer15=68.900, loss_interctc_layer21=87.395, loss=81.151, backward_time=0.214, grad_norm=70.469, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.138, optim0_lr0=9.162e-05, train_time=2.149 [gpua003:0/64] 2024-02-05 14:01:41,612 (trainer:756) INFO: 20epoch:train:1001-1100batch: iter_time=8.598e-05, forward_time=0.191, loss_ctc=85.409, loss_interctc_layer6=95.841, loss_interctc_layer12=81.212, loss_interctc_layer15=75.265, loss_interctc_layer21=87.640, loss=85.073, backward_time=0.232, grad_norm=84.002, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.140, optim0_lr0=9.160e-05, train_time=2.205 [gpua003:0/64] 2024-02-05 14:05:34,013 (trainer:756) INFO: 20epoch:train:1101-1200batch: iter_time=8.485e-05, forward_time=0.141, loss_ctc=73.077, loss_interctc_layer6=80.605, loss_interctc_layer12=67.221, loss_interctc_layer15=62.001, loss_interctc_layer21=75.273, loss=71.636, backward_time=0.200, grad_norm=178.432, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.137, optim0_lr0=9.159e-05, train_time=2.324 [gpua003:0/64] 2024-02-05 14:07:36,138 (multiple_iter_factory:32) INFO: Building 1th iter-factory... [gpua003:0/64] 2024-02-05 14:07:54,784 (s2t:401) INFO: Optional Data Names: ('text_prev', 'text_ctc', 'text_spk2', 'text_spk3', 'text_spk4') [gpua003:0/64] 2024-02-05 14:07:58,204 (abs_task:1660) INFO: [train] dataset: ESPnetDataset( speech: {"path": "exp/s2t_stats_raw_bpe50000/splits12/wav.scp/split.6", "type": "kaldi_ark"} text_prev: {"path": "exp/s2t_stats_raw_bpe50000/splits12/text.prev/split.6", "type": "text"} text_ctc: {"path": "exp/s2t_stats_raw_bpe50000/splits12/text.ctc/split.6", "type": "text"} text: {"path": "exp/s2t_stats_raw_bpe50000/splits12/text/split.6", "type": "text"} preprocess: ) [gpua003:0/64] 2024-02-05 14:07:58,204 (abs_task:1661) INFO: [train] Batch sampler: UnsortedBatchSampler(N-batch=19027, batch_size=256, key_file=exp/s2t_stats_raw_bpe50000/splits12/speech_shape/split.6, [gpua003:0/64] 2024-02-05 14:07:58,279 (abs_task:1662) INFO: [train] mini-batch sizes summary: N-batch=19027, mean=256.0, min=256, max=257 [gpua003:0/64] 2024-02-05 14:17:17,774 (trainer:756) INFO: 20epoch:train:1201-1300batch: iter_time=4.093, forward_time=0.165, loss_ctc=84.951, loss_interctc_layer6=84.798, loss_interctc_layer12=70.917, loss_interctc_layer15=65.594, loss_interctc_layer21=87.265, loss=78.705, backward_time=0.207, grad_norm=72.317, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.137, optim0_lr0=9.157e-05, train_time=7.037 [gpua003:0/64] 2024-02-05 14:21:14,623 (trainer:756) INFO: 20epoch:train:1301-1400batch: iter_time=7.925e-05, forward_time=0.142, loss_ctc=82.399, loss_interctc_layer6=82.427, loss_interctc_layer12=68.955, loss_interctc_layer15=63.598, loss_interctc_layer21=85.062, loss=76.488, backward_time=0.202, grad_norm=79.110, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.136, optim0_lr0=9.156e-05, train_time=2.368 [gpua003:0/64] 2024-02-05 14:25:57,822 (trainer:756) INFO: 20epoch:train:1401-1500batch: iter_time=7.914e-05, forward_time=0.144, loss_ctc=102.345, loss_interctc_layer6=100.428, loss_interctc_layer12=84.409, loss_interctc_layer15=77.838, loss_interctc_layer21=105.367, loss=94.077, backward_time=0.202, grad_norm=77.459, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.136, optim0_lr0=9.154e-05, train_time=2.832 [gpua003:0/64] 2024-02-05 14:30:38,172 (trainer:756) INFO: 20epoch:train:1501-1600batch: iter_time=8.107e-05, forward_time=0.142, loss_ctc=75.511, loss_interctc_layer6=81.825, loss_interctc_layer12=68.772, loss_interctc_layer15=63.679, loss_interctc_layer21=77.575, loss=73.473, backward_time=0.201, grad_norm=96.623, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.136, optim0_lr0=9.152e-05, train_time=2.803 [gpua003:0/64] 2024-02-05 14:35:01,603 (trainer:756) INFO: 20epoch:train:1601-1700batch: iter_time=8.321e-05, forward_time=0.141, loss_ctc=83.881, loss_interctc_layer6=78.377, loss_interctc_layer12=64.948, loss_interctc_layer15=59.648, loss_interctc_layer21=86.660, loss=74.703, backward_time=0.200, grad_norm=65.354, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.136, optim0_lr0=9.151e-05, train_time=2.634 [gpua003:0/64] 2024-02-05 14:39:20,084 (trainer:756) INFO: 20epoch:train:1701-1800batch: iter_time=7.827e-05, forward_time=0.151, loss_ctc=98.812, loss_interctc_layer6=94.722, loss_interctc_layer12=79.175, loss_interctc_layer15=73.024, loss_interctc_layer21=102.266, loss=89.600, backward_time=0.209, grad_norm=68.624, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.137, optim0_lr0=9.149e-05, train_time=2.584 [gpua003:0/64] 2024-02-05 14:44:22,632 (trainer:756) INFO: 20epoch:train:1801-1900batch: iter_time=8.552e-05, forward_time=0.200, loss_ctc=92.948, loss_interctc_layer6=99.466, loss_interctc_layer12=83.281, loss_interctc_layer15=77.015, loss_interctc_layer21=95.867, loss=89.716, backward_time=0.268, grad_norm=87.718, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.139, optim0_lr0=9.148e-05, train_time=3.025 [gpua003:0/64] 2024-02-05 14:48:29,607 (trainer:756) INFO: 20epoch:train:1901-2000batch: iter_time=8.336e-05, forward_time=0.142, loss_ctc=72.647, loss_interctc_layer6=80.126, loss_interctc_layer12=66.798, loss_interctc_layer15=61.517, loss_interctc_layer21=74.598, loss=71.137, backward_time=0.201, grad_norm=71.539, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.136, optim0_lr0=9.146e-05, train_time=2.470 [gpua003:0/64] 2024-02-05 14:52:29,099 (trainer:756) INFO: 20epoch:train:2001-2100batch: iter_time=8.480e-05, forward_time=0.143, loss_ctc=70.685, loss_interctc_layer6=84.160, loss_interctc_layer12=70.052, loss_interctc_layer15=64.425, loss_interctc_layer21=72.636, loss=72.392, backward_time=0.201, grad_norm=62.837, clip=100.000, loss_scale=3.144e+31, optim_step_time=0.136, optim0_lr0=9.144e-05, train_time=2.395 [gpua003:0/64] 2024-02-05 14:56:46,387 (trainer:756) INFO: 20epoch:train:2101-2200batch: iter_time=8.758e-05, forward_time=0.143, loss_ctc=81.701, loss_interctc_layer6=84.784, loss_interctc_layer12=69.924, loss_interctc_layer15=63.697, loss_interctc_layer21=84.455, loss=76.912, backward_time=0.203, grad_norm=73.240, clip=100.000, loss_scale=4.056e+31, optim_step_time=0.137, optim0_lr0=9.143e-05, train_time=2.573 [gpua003:0/64] 2024-02-05 15:01:25,319 (trainer:756) INFO: 20epoch:train:2201-2300batch: iter_time=8.541e-05, forward_time=0.147, loss_ctc=99.636, loss_interctc_layer6=96.282, loss_interctc_layer12=80.712, loss_interctc_layer15=74.409, loss_interctc_layer21=102.952, loss=90.798, backward_time=0.206, grad_norm=72.283, clip=100.000, loss_scale=4.056e+31, optim_step_time=0.137, optim0_lr0=9.141e-05, train_time=2.788 [gpua003:0/64] 2024-02-05 15:05:22,818 (trainer:756) INFO: 20epoch:train:2301-2400batch: iter_time=8.766e-05, forward_time=0.154, loss_ctc=73.893, loss_interctc_layer6=90.304, loss_interctc_layer12=76.053, loss_interctc_layer15=70.344, loss_interctc_layer21=75.873, loss=77.293, backward_time=0.227, grad_norm=90.145, clip=100.000, loss_scale=4.056e+31, optim_step_time=0.139, optim0_lr0=9.140e-05, train_time=2.376 [gpua003:0/64] 2024-02-05 15:09:18,168 (trainer:756) INFO: 20epoch:train:2401-2500batch: iter_time=8.347e-05, forward_time=0.142, loss_ctc=85.956, loss_interctc_layer6=83.231, loss_interctc_layer12=69.063, loss_interctc_layer15=63.474, loss_interctc_layer21=88.507, loss=78.046, backward_time=0.201, grad_norm=68.446, clip=100.000, loss_scale=4.056e+31, optim_step_time=0.138, optim0_lr0=9.138e-05, train_time=2.353 [gpua003:0/64] 2024-02-05 15:09:38,197 (multiple_iter_factory:32) INFO: Building 2th iter-factory... [gpua003:0/64] 2024-02-05 15:09:56,963 (s2t:401) INFO: Optional Data Names: ('text_prev', 'text_ctc', 'text_spk2', 'text_spk3', 'text_spk4') [gpua003:0/64] 2024-02-05 15:10:00,382 (abs_task:1660) INFO: [train] dataset: ESPnetDataset( speech: {"path": "exp/s2t_stats_raw_bpe50000/splits12/wav.scp/split.5", "type": "kaldi_ark"} text_prev: {"path": "exp/s2t_stats_raw_bpe50000/splits12/text.prev/split.5", "type": "text"} text_ctc: {"path": "exp/s2t_stats_raw_bpe50000/splits12/text.ctc/split.5", "type": "text"} text: {"path": "exp/s2t_stats_raw_bpe50000/splits12/text/split.5", "type": "text"} preprocess: ) [gpua003:0/64] 2024-02-05 15:10:00,382 (abs_task:1661) INFO: [train] Batch sampler: UnsortedBatchSampler(N-batch=19027, batch_size=256, key_file=exp/s2t_stats_raw_bpe50000/splits12/speech_shape/split.5, [gpua003:0/64] 2024-02-05 15:10:00,389 (abs_task:1662) INFO: [train] mini-batch sizes summary: N-batch=19027, mean=256.0, min=256, max=257 [gpua003:0/64] 2024-02-05 15:18:12,389 (trainer:756) INFO: 20epoch:train:2501-2600batch: iter_time=3.372, forward_time=0.203, loss_ctc=76.408, loss_interctc_layer6=84.512, loss_interctc_layer12=71.191, loss_interctc_layer15=65.977, loss_interctc_layer21=79.092, loss=75.436, backward_time=0.213, grad_norm=73.658, clip=100.000, loss_scale=4.056e+31, optim_step_time=0.142, optim0_lr0=9.136e-05, train_time=5.342 [gpua003:0/64] 2024-02-05 15:21:49,972 (trainer:756) INFO: 20epoch:train:2601-2700batch: iter_time=8.274e-05, forward_time=0.145, loss_ctc=85.198, loss_interctc_layer6=87.068, loss_interctc_layer12=72.496, loss_interctc_layer15=66.506, loss_interctc_layer21=88.024, loss=79.858, backward_time=0.202, grad_norm=76.604, clip=100.000, loss_scale=4.056e+31, optim_step_time=0.137, optim0_lr0=9.135e-05, train_time=2.176 [gpua003:0/64] 2024-02-05 15:25:22,956 (trainer:756) INFO: 20epoch:train:2701-2800batch: iter_time=8.364e-05, forward_time=0.144, loss_ctc=85.704, loss_interctc_layer6=90.588, loss_interctc_layer12=76.999, loss_interctc_layer15=71.172, loss_interctc_layer21=88.481, loss=82.589, backward_time=0.202, grad_norm=75.266, clip=100.000, loss_scale=4.056e+31, optim_step_time=0.138, optim0_lr0=9.133e-05, train_time=2.130 [gpua003:0/64] 2024-02-05 15:29:04,705 (trainer:756) INFO: 20epoch:train:2801-2900batch: iter_time=8.492e-05, forward_time=0.143, loss_ctc=85.784, loss_interctc_layer6=79.668, loss_interctc_layer12=65.616, loss_interctc_layer15=59.938, loss_interctc_layer21=88.700, loss=75.941, backward_time=0.206, grad_norm=59.022, clip=100.000, loss_scale=4.056e+31, optim_step_time=0.138, optim0_lr0=9.132e-05, train_time=2.217 [gpua003:0/64] 2024-02-05 15:33:01,763 (trainer:756) INFO: 20epoch:train:2901-3000batch: iter_time=8.633e-05, forward_time=0.143, loss_ctc=76.952, loss_interctc_layer6=84.958, loss_interctc_layer12=70.736, loss_interctc_layer15=65.443, loss_interctc_layer21=79.544, loss=75.527, backward_time=0.201, grad_norm=62.903, clip=100.000, loss_scale=4.056e+31, optim_step_time=0.138, optim0_lr0=9.130e-05, train_time=2.368 [gpua003:0/64] 2024-02-05 15:36:57,777 (trainer:756) INFO: 20epoch:train:3001-3100batch: iter_time=8.470e-05, forward_time=0.152, loss_ctc=86.583, loss_interctc_layer6=94.244, loss_interctc_layer12=78.789, loss_interctc_layer15=72.356, loss_interctc_layer21=89.336, loss=84.261, backward_time=0.228, grad_norm=74.494, clip=100.000, loss_scale=4.056e+31, optim_step_time=0.138, optim0_lr0=9.129e-05, train_time=2.362 [gpua003:0/64] 2024-02-05 15:40:27,856 (trainer:756) INFO: 20epoch:train:3101-3200batch: iter_time=9.005e-05, forward_time=0.142, loss_ctc=81.611, loss_interctc_layer6=93.596, loss_interctc_layer12=78.184, loss_interctc_layer15=71.801, loss_interctc_layer21=83.988, loss=81.836, backward_time=0.201, grad_norm=67.514, clip=100.000, loss_scale=4.056e+31, optim_step_time=0.137, optim0_lr0=9.127e-05, train_time=2.101 [gpua003:0/64] 2024-02-05 15:44:03,910 (trainer:756) INFO: 20epoch:train:3201-3300batch: iter_time=8.390e-05, forward_time=0.161, loss_ctc=65.746, loss_interctc_layer6=78.456, loss_interctc_layer12=65.503, loss_interctc_layer15=60.288, loss_interctc_layer21=67.785, loss=67.556, backward_time=0.222, grad_norm=59.024, clip=100.000, loss_scale=4.056e+31, optim_step_time=0.138, optim0_lr0=9.125e-05, train_time=2.160 [gpua003:0/64] 2024-02-05 15:47:18,645 (trainer:756) INFO: 20epoch:train:3301-3400batch: iter_time=8.889e-05, forward_time=0.199, loss_ctc=80.560, loss_interctc_layer6=90.038, loss_interctc_layer12=74.640, loss_interctc_layer15=68.444, loss_interctc_layer21=82.922, loss=79.321, backward_time=0.264, grad_norm=72.689, clip=100.000, loss_scale=4.056e+31, optim_step_time=0.143, optim0_lr0=9.124e-05, train_time=1.947 [gpua003:0/64] 2024-02-05 15:51:49,102 (trainer:756) INFO: 20epoch:train:3401-3500batch: iter_time=8.355e-05, forward_time=0.144, loss_ctc=82.377, loss_interctc_layer6=88.337, loss_interctc_layer12=73.214, loss_interctc_layer15=67.012, loss_interctc_layer21=85.180, loss=79.224, backward_time=0.202, grad_norm=62.581, clip=100.000, loss_scale=4.056e+31, optim_step_time=0.137, optim0_lr0=9.122e-05, train_time=2.705 [gpua003:0/64] 2024-02-05 15:55:57,168 (trainer:756) INFO: 20epoch:train:3501-3600batch: iter_time=8.529e-05, forward_time=0.143, loss_ctc=82.638, loss_interctc_layer6=94.629, loss_interctc_layer12=79.591, loss_interctc_layer15=73.466, loss_interctc_layer21=85.108, loss=83.086, backward_time=0.202, grad_norm=68.434, clip=100.000, loss_scale=4.056e+31, optim_step_time=0.137, optim0_lr0=9.121e-05, train_time=2.480 [gpua003:0/64] 2024-02-05 15:59:21,670 (trainer:756) INFO: 20epoch:train:3601-3700batch: iter_time=8.641e-05, forward_time=0.143, loss_ctc=72.514, loss_interctc_layer6=79.396, loss_interctc_layer12=66.231, loss_interctc_layer15=61.031, loss_interctc_layer21=74.630, loss=70.760, backward_time=0.205, grad_norm=76.995, clip=100.000, loss_scale=4.056e+31, optim_step_time=0.137, optim0_lr0=9.119e-05, train_time=2.045 [gpua003:0/64] 2024-02-05 16:01:16,846 (multiple_iter_factory:32) INFO: Building 3th iter-factory... [gpua003:0/64] 2024-02-05 16:01:35,037 (s2t:401) INFO: Optional Data Names: ('text_prev', 'text_ctc', 'text_spk2', 'text_spk3', 'text_spk4') [gpua003:0/64] 2024-02-05 16:01:38,416 (abs_task:1660) INFO: [train] dataset: ESPnetDataset( speech: {"path": "exp/s2t_stats_raw_bpe50000/splits12/wav.scp/split.0", "type": "kaldi_ark"} text_prev: {"path": "exp/s2t_stats_raw_bpe50000/splits12/text.prev/split.0", "type": "text"} text_ctc: {"path": "exp/s2t_stats_raw_bpe50000/splits12/text.ctc/split.0", "type": "text"} text: {"path": "exp/s2t_stats_raw_bpe50000/splits12/text/split.0", "type": "text"} preprocess: ) [gpua003:0/64] 2024-02-05 16:01:38,416 (abs_task:1661) INFO: [train] Batch sampler: UnsortedBatchSampler(N-batch=19027, batch_size=256, key_file=exp/s2t_stats_raw_bpe50000/splits12/speech_shape/split.0, [gpua003:0/64] 2024-02-05 16:01:38,460 (abs_task:1662) INFO: [train] mini-batch sizes summary: N-batch=19027, mean=256.0, min=256, max=257 [gpua003:0/64] 2024-02-05 16:07:45,393 (trainer:756) INFO: 20epoch:train:3701-3800batch: iter_time=3.037, forward_time=0.193, loss_ctc=85.789, loss_interctc_layer6=83.788, loss_interctc_layer12=69.986, loss_interctc_layer15=64.330, loss_interctc_layer21=88.481, loss=78.475, backward_time=0.208, grad_norm=72.428, clip=100.000, loss_scale=4.056e+31, optim_step_time=0.138, optim0_lr0=9.117e-05, train_time=5.037 [gpua003:0/64] 2024-02-05 16:11:21,179 (trainer:756) INFO: 20epoch:train:3801-3900batch: iter_time=8.089e-05, forward_time=0.142, loss_ctc=82.459, loss_interctc_layer6=82.514, loss_interctc_layer12=69.056, loss_interctc_layer15=63.855, loss_interctc_layer21=85.022, loss=76.581, backward_time=0.202, grad_norm=69.007, clip=100.000, loss_scale=4.056e+31, optim_step_time=0.137, optim0_lr0=9.116e-05, train_time=2.158 [gpua003:0/64] 2024-02-05 16:15:29,092 (trainer:756) INFO: 20epoch:train:3901-4000batch: iter_time=8.214e-05, forward_time=0.147, loss_ctc=100.584, loss_interctc_layer6=100.286, loss_interctc_layer12=84.094, loss_interctc_layer15=77.596, loss_interctc_layer21=103.728, loss=93.257, backward_time=0.218, grad_norm=71.987, clip=100.000, loss_scale=4.056e+31, optim_step_time=0.137, optim0_lr0=9.114e-05, train_time=2.479 [gpua003:0/64] 2024-02-05 16:19:06,291 (trainer:756) INFO: 20epoch:train:4001-4100batch: iter_time=8.450e-05, forward_time=0.143, loss_ctc=74.438, loss_interctc_layer6=81.063, loss_interctc_layer12=67.899, loss_interctc_layer15=62.757, loss_interctc_layer21=76.556, loss=72.543, backward_time=0.202, grad_norm=82.537, clip=100.000, loss_scale=6.288e+31, optim_step_time=0.137, optim0_lr0=9.113e-05, train_time=2.172 [gpua003:0/64] 2024-02-05 16:23:06,784 (trainer:756) INFO: 20epoch:train:4101-4200batch: iter_time=8.411e-05, forward_time=0.142, loss_ctc=83.145, loss_interctc_layer6=77.824, loss_interctc_layer12=64.511, loss_interctc_layer15=59.142, loss_interctc_layer21=85.947, loss=74.114, backward_time=0.201, grad_norm=56.325, clip=100.000, loss_scale=8.113e+31, optim_step_time=0.137, optim0_lr0=9.111e-05, train_time=2.405 [gpua003:0/64] 2024-02-05 16:27:20,278 (trainer:756) INFO: 20epoch:train:4201-4300batch: iter_time=8.316e-05, forward_time=0.145, loss_ctc=98.993, loss_interctc_layer6=93.680, loss_interctc_layer12=78.343, loss_interctc_layer15=72.136, loss_interctc_layer21=102.319, loss=89.094, backward_time=0.203, grad_norm=71.223, clip=100.000, loss_scale=8.113e+31, optim_step_time=0.137, optim0_lr0=9.110e-05, train_time=2.535 [gpua003:0/64] 2024-02-05 16:30:45,863 (trainer:756) INFO: 20epoch:train:4301-4400batch: iter_time=8.297e-05, forward_time=0.143, loss_ctc=92.198, loss_interctc_layer6=98.301, loss_interctc_layer12=82.473, loss_interctc_layer15=75.915, loss_interctc_layer21=94.989, loss=88.775, backward_time=0.202, grad_norm=78.570, clip=100.000, loss_scale=8.113e+31, optim_step_time=0.137, optim0_lr0=9.108e-05, train_time=2.056 [gpua003:0/64] 2024-02-05 16:34:18,984 (trainer:756) INFO: 20epoch:train:4401-4500batch: iter_time=8.441e-05, forward_time=0.142, loss_ctc=71.881, loss_interctc_layer6=79.740, loss_interctc_layer12=66.474, loss_interctc_layer15=61.067, loss_interctc_layer21=73.888, loss=70.610, backward_time=0.201, grad_norm=65.143, clip=100.000, loss_scale=8.113e+31, optim_step_time=0.137, optim0_lr0=9.106e-05, train_time=2.131 [gpua003:0/64] 2024-02-05 16:38:58,563 (trainer:756) INFO: 20epoch:train:4501-4600batch: iter_time=8.584e-05, forward_time=0.158, loss_ctc=70.531, loss_interctc_layer6=83.468, loss_interctc_layer12=69.458, loss_interctc_layer15=63.802, loss_interctc_layer21=72.512, loss=71.954, backward_time=0.216, grad_norm=75.644, clip=100.000, loss_scale=8.113e+31, optim_step_time=0.138, optim0_lr0=9.105e-05, train_time=2.796 [gpua003:0/64] 2024-02-05 16:42:45,204 (trainer:756) INFO: 20epoch:train:4601-4700batch: iter_time=8.357e-05, forward_time=0.202, loss_ctc=81.054, loss_interctc_layer6=84.015, loss_interctc_layer12=69.216, loss_interctc_layer15=63.283, loss_interctc_layer21=83.734, loss=76.260, backward_time=0.248, grad_norm=66.866, clip=100.000, loss_scale=8.113e+31, optim_step_time=0.140, optim0_lr0=9.103e-05, train_time=2.264 [gpua003:0/64] 2024-02-05 16:44:19,833 (trainer:687) WARNING: The grad norm is nan. Skipping updating the model. [gpua003:0/64] 2024-02-05 16:46:48,211 (trainer:756) INFO: 20epoch:train:4701-4800batch: iter_time=9.049e-05, forward_time=0.232, loss_ctc=99.571, loss_interctc_layer6=97.511, loss_interctc_layer12=81.514, loss_interctc_layer15=75.098, loss_interctc_layer21=102.720, loss=91.283, backward_time=0.229, grad_norm=79.515, clip=100.000, loss_scale=5.409e+31, optim_step_time=0.142, optim0_lr0=9.102e-05, train_time=2.431 [gpua003:0/64] 2024-02-05 16:51:22,924 (trainer:756) INFO: 20epoch:train:4801-4900batch: iter_time=8.426e-05, forward_time=0.142, loss_ctc=72.829, loss_interctc_layer6=90.177, loss_interctc_layer12=75.690, loss_interctc_layer15=69.930, loss_interctc_layer21=74.775, loss=76.680, backward_time=0.201, grad_norm=80.636, clip=100.000, loss_scale=4.056e+31, optim_step_time=0.137, optim0_lr0=9.100e-05, train_time=2.748 [gpua003:0/64] 2024-02-05 16:55:05,315 (trainer:756) INFO: 20epoch:train:4901-5000batch: iter_time=9.026e-05, forward_time=0.143, loss_ctc=87.080, loss_interctc_layer6=83.990, loss_interctc_layer12=69.624, loss_interctc_layer15=64.032, loss_interctc_layer21=89.769, loss=78.899, backward_time=0.201, grad_norm=65.979, clip=100.000, loss_scale=4.056e+31, optim_step_time=0.137, optim0_lr0=9.099e-05, train_time=2.224 [gpua003:0/64] 2024-02-05 16:55:25,368 (multiple_iter_factory:32) INFO: Building 4th iter-factory... [gpua003:0/64] 2024-02-05 16:55:43,949 (s2t:401) INFO: Optional Data Names: ('text_prev', 'text_ctc', 'text_spk2', 'text_spk3', 'text_spk4') [gpua003:0/64] 2024-02-05 16:55:47,374 (abs_task:1660) INFO: [train] dataset: ESPnetDataset( speech: {"path": "exp/s2t_stats_raw_bpe50000/splits12/wav.scp/split.8", "type": "kaldi_ark"} text_prev: {"path": "exp/s2t_stats_raw_bpe50000/splits12/text.prev/split.8", "type": "text"} text_ctc: {"path": "exp/s2t_stats_raw_bpe50000/splits12/text.ctc/split.8", "type": "text"} text: {"path": "exp/s2t_stats_raw_bpe50000/splits12/text/split.8", "type": "text"} preprocess: ) [gpua003:0/64] 2024-02-05 16:55:47,374 (abs_task:1661) INFO: [train] Batch sampler: UnsortedBatchSampler(N-batch=19027, batch_size=256, key_file=exp/s2t_stats_raw_bpe50000/splits12/speech_shape/split.8, [gpua003:0/64] 2024-02-05 16:55:47,378 (abs_task:1662) INFO: [train] mini-batch sizes summary: N-batch=19027, mean=256.0, min=256, max=257 [gpua003:0/64] 2024-02-05 17:05:08,390 (trainer:756) INFO: 20epoch:train:5001-5100batch: iter_time=3.828, forward_time=0.235, loss_ctc=82.528, loss_interctc_layer6=84.678, loss_interctc_layer12=71.229, loss_interctc_layer15=65.846, loss_interctc_layer21=85.304, loss=77.917, backward_time=0.220, grad_norm=74.712, clip=100.000, loss_scale=4.056e+31, optim_step_time=0.140, optim0_lr0=9.097e-05, train_time=6.030 [gpua003:0/64] 2024-02-05 17:08:28,846 (trainer:687) WARNING: The grad norm is nan. Skipping updating the model. [gpua003:0/64] 2024-02-05 17:08:30,617 (trainer:756) INFO: 20epoch:train:5101-5200batch: iter_time=9.054e-05, forward_time=0.143, loss_ctc=86.030, loss_interctc_layer6=86.387, loss_interctc_layer12=71.673, loss_interctc_layer15=65.634, loss_interctc_layer21=88.850, loss=79.715, backward_time=0.202, grad_norm=63.197, clip=100.000, loss_scale=4.036e+31, optim_step_time=0.137, optim0_lr0=9.095e-05, train_time=2.022 [gpua003:0/64] 2024-02-05 17:11:55,716 (trainer:756) INFO: 20epoch:train:5201-5300batch: iter_time=8.204e-05, forward_time=0.146, loss_ctc=87.870, loss_interctc_layer6=90.293, loss_interctc_layer12=76.258, loss_interctc_layer15=70.486, loss_interctc_layer21=90.444, loss=83.070, backward_time=0.203, grad_norm=75.598, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.137, optim0_lr0=9.094e-05, train_time=2.051 [gpua003:0/64] 2024-02-05 17:13:43,329 (trainer:687) WARNING: The grad norm is nan. Skipping updating the model. [gpua003:0/64] 2024-02-05 17:15:54,750 (trainer:756) INFO: 20epoch:train:5301-5400batch: iter_time=8.704e-05, forward_time=0.142, loss_ctc=87.531, loss_interctc_layer6=79.848, loss_interctc_layer12=65.711, loss_interctc_layer15=59.960, loss_interctc_layer21=90.399, loss=76.690, backward_time=0.201, grad_norm=65.209, clip=100.000, loss_scale=1.506e+31, optim_step_time=0.137, optim0_lr0=9.092e-05, train_time=2.390 [gpua003:0/64] 2024-02-05 17:20:21,264 (trainer:756) INFO: 20epoch:train:5401-5500batch: iter_time=8.994e-05, forward_time=0.143, loss_ctc=78.925, loss_interctc_layer6=84.523, loss_interctc_layer12=70.412, loss_interctc_layer15=64.902, loss_interctc_layer21=81.471, loss=76.047, backward_time=0.201, grad_norm=69.322, clip=100.000, loss_scale=1.014e+31, optim_step_time=0.137, optim0_lr0=9.091e-05, train_time=2.665 [gpua003:0/64] 2024-02-05 17:23:40,051 (trainer:756) INFO: 20epoch:train:5501-5600batch: iter_time=8.760e-05, forward_time=0.144, loss_ctc=92.553, loss_interctc_layer6=93.689, loss_interctc_layer12=77.932, loss_interctc_layer15=71.812, loss_interctc_layer21=95.507, loss=86.299, backward_time=0.202, grad_norm=86.505, clip=100.000, loss_scale=1.014e+31, optim_step_time=0.137, optim0_lr0=9.089e-05, train_time=1.988 [gpua003:0/64] 2024-02-05 17:27:45,186 (trainer:756) INFO: 20epoch:train:5601-5700batch: iter_time=9.030e-05, forward_time=0.263, loss_ctc=88.351, loss_interctc_layer6=94.620, loss_interctc_layer12=78.833, loss_interctc_layer15=72.670, loss_interctc_layer21=90.824, loss=85.059, backward_time=0.299, grad_norm=139.252, clip=100.000, loss_scale=1.014e+31, optim_step_time=0.156, optim0_lr0=9.088e-05, train_time=2.450 [gpua003:0/64] 2024-02-05 17:32:19,586 (trainer:756) INFO: 20epoch:train:5701-5800batch: iter_time=9.034e-05, forward_time=0.146, loss_ctc=68.825, loss_interctc_layer6=77.937, loss_interctc_layer12=65.013, loss_interctc_layer15=59.927, loss_interctc_layer21=70.845, loss=68.509, backward_time=0.201, grad_norm=75.017, clip=100.000, loss_scale=1.014e+31, optim_step_time=0.137, optim0_lr0=9.086e-05, train_time=2.745 [gpua003:0/64] 2024-02-05 17:36:39,851 (trainer:756) INFO: 20epoch:train:5801-5900batch: iter_time=9.095e-05, forward_time=0.143, loss_ctc=83.276, loss_interctc_layer6=89.490, loss_interctc_layer12=74.029, loss_interctc_layer15=67.744, loss_interctc_layer21=85.755, loss=80.059, backward_time=0.201, grad_norm=78.904, clip=100.000, loss_scale=1.014e+31, optim_step_time=0.138, optim0_lr0=9.085e-05, train_time=2.603 [gpua003:0/64] 2024-02-05 17:41:36,907 (trainer:756) INFO: 20epoch:train:5901-6000batch: iter_time=9.425e-05, forward_time=0.143, loss_ctc=84.976, loss_interctc_layer6=88.357, loss_interctc_layer12=73.111, loss_interctc_layer15=66.976, loss_interctc_layer21=87.962, loss=80.276, backward_time=0.201, grad_norm=76.479, clip=100.000, loss_scale=1.014e+31, optim_step_time=0.138, optim0_lr0=9.083e-05, train_time=2.970 [gpua003:0/64] 2024-02-05 17:45:10,831 (trainer:756) INFO: 20epoch:train:6001-6100batch: iter_time=8.822e-05, forward_time=0.143, loss_ctc=85.472, loss_interctc_layer6=93.716, loss_interctc_layer12=78.809, loss_interctc_layer15=72.826, loss_interctc_layer21=87.982, loss=83.761, backward_time=0.201, grad_norm=71.042, clip=100.000, loss_scale=1.014e+31, optim_step_time=0.137, optim0_lr0=9.081e-05, train_time=2.139 [gpua003:0/64] 2024-02-05 17:49:25,014 (trainer:756) INFO: 20epoch:train:6101-6200batch: iter_time=9.112e-05, forward_time=0.142, loss_ctc=73.394, loss_interctc_layer6=79.553, loss_interctc_layer12=66.399, loss_interctc_layer15=60.790, loss_interctc_layer21=75.568, loss=71.141, backward_time=0.201, grad_norm=90.356, clip=100.000, loss_scale=1.014e+31, optim_step_time=0.138, optim0_lr0=9.080e-05, train_time=2.542 [gpua003:0/64] 2024-02-05 17:51:16,474 (multiple_iter_factory:32) INFO: Building 5th iter-factory... [gpua003:0/64] 2024-02-05 17:51:35,348 (s2t:401) INFO: Optional Data Names: ('text_prev', 'text_ctc', 'text_spk2', 'text_spk3', 'text_spk4') [gpua003:0/64] 2024-02-05 17:51:38,700 (abs_task:1660) INFO: [train] dataset: ESPnetDataset( speech: {"path": "exp/s2t_stats_raw_bpe50000/splits12/wav.scp/split.7", "type": "kaldi_ark"} text_prev: {"path": "exp/s2t_stats_raw_bpe50000/splits12/text.prev/split.7", "type": "text"} text_ctc: {"path": "exp/s2t_stats_raw_bpe50000/splits12/text.ctc/split.7", "type": "text"} text: {"path": "exp/s2t_stats_raw_bpe50000/splits12/text/split.7", "type": "text"} preprocess: ) [gpua003:0/64] 2024-02-05 17:51:38,700 (abs_task:1661) INFO: [train] Batch sampler: UnsortedBatchSampler(N-batch=19027, batch_size=256, key_file=exp/s2t_stats_raw_bpe50000/splits12/speech_shape/split.7, [gpua003:0/64] 2024-02-05 17:51:38,707 (abs_task:1662) INFO: [train] mini-batch sizes summary: N-batch=19027, mean=256.0, min=256, max=257 [gpua003:0/64] 2024-02-05 17:58:15,984 (trainer:756) INFO: 20epoch:train:6201-6300batch: iter_time=3.339, forward_time=0.143, loss_ctc=84.575, loss_interctc_layer6=83.520, loss_interctc_layer12=69.681, loss_interctc_layer15=64.130, loss_interctc_layer21=87.348, loss=77.851, backward_time=0.202, grad_norm=61.701, clip=100.000, loss_scale=1.014e+31, optim_step_time=0.138, optim0_lr0=9.078e-05, train_time=5.309 [gpua003:0/64] 2024-02-05 18:01:51,571 (trainer:756) INFO: 20epoch:train:6301-6400batch: iter_time=8.832e-05, forward_time=0.145, loss_ctc=78.138, loss_interctc_layer6=81.901, loss_interctc_layer12=68.476, loss_interctc_layer15=63.053, loss_interctc_layer21=80.886, loss=74.491, backward_time=0.202, grad_norm=67.576, clip=100.000, loss_scale=1.014e+31, optim_step_time=0.138, optim0_lr0=9.077e-05, train_time=2.156 [gpua003:0/64] 2024-02-05 18:05:46,113 (trainer:756) INFO: 20epoch:train:6401-6500batch: iter_time=8.791e-05, forward_time=0.320, loss_ctc=96.242, loss_interctc_layer6=98.867, loss_interctc_layer12=82.524, loss_interctc_layer15=76.073, loss_interctc_layer21=99.236, loss=90.588, backward_time=0.241, grad_norm=65.668, clip=100.000, loss_scale=1.014e+31, optim_step_time=0.165, optim0_lr0=9.075e-05, train_time=2.344 [gpua003:0/64] 2024-02-05 18:09:38,637 (trainer:756) INFO: 20epoch:train:6501-6600batch: iter_time=8.527e-05, forward_time=0.143, loss_ctc=71.610, loss_interctc_layer6=80.694, loss_interctc_layer12=67.464, loss_interctc_layer15=62.143, loss_interctc_layer21=73.788, loss=71.140, backward_time=0.201, grad_norm=70.696, clip=100.000, loss_scale=1.014e+31, optim_step_time=0.137, optim0_lr0=9.074e-05, train_time=2.326 [gpua003:0/64] 2024-02-05 18:13:29,443 (trainer:756) INFO: 20epoch:train:6601-6700batch: iter_time=8.285e-05, forward_time=0.142, loss_ctc=80.117, loss_interctc_layer6=76.833, loss_interctc_layer12=63.368, loss_interctc_layer15=57.961, loss_interctc_layer21=82.703, loss=72.196, backward_time=0.201, grad_norm=87.739, clip=100.000, loss_scale=1.014e+31, optim_step_time=0.137, optim0_lr0=9.072e-05, train_time=2.308 [gpua003:0/64] 2024-02-05 18:17:13,257 (trainer:756) INFO: 20epoch:train:6701-6800batch: iter_time=8.631e-05, forward_time=0.143, loss_ctc=90.544, loss_interctc_layer6=94.148, loss_interctc_layer12=78.667, loss_interctc_layer15=72.610, loss_interctc_layer21=93.789, loss=85.952, backward_time=0.201, grad_norm=116.134, clip=100.000, loss_scale=1.014e+31, optim_step_time=0.137, optim0_lr0=9.070e-05, train_time=2.238 [gpua003:0/64] 2024-02-05 18:20:46,300 (trainer:756) INFO: 20epoch:train:6801-6900batch: iter_time=8.343e-05, forward_time=0.143, loss_ctc=86.339, loss_interctc_layer6=97.296, loss_interctc_layer12=81.109, loss_interctc_layer15=74.718, loss_interctc_layer21=88.971, loss=85.687, backward_time=0.202, grad_norm=69.385, clip=100.000, loss_scale=1.014e+31, optim_step_time=0.137, optim0_lr0=9.069e-05, train_time=2.130 [gpua003:0/64] 2024-02-05 18:25:16,213 (trainer:756) INFO: 20epoch:train:6901-7000batch: iter_time=8.178e-05, forward_time=0.142, loss_ctc=64.869, loss_interctc_layer6=79.017, loss_interctc_layer12=65.732, loss_interctc_layer15=60.478, loss_interctc_layer21=66.682, loss=67.356, backward_time=0.202, grad_norm=58.865, clip=100.000, loss_scale=1.014e+31, optim_step_time=0.137, optim0_lr0=9.067e-05, train_time=2.699 [gpua003:0/64] 2024-02-05 18:29:04,721 (trainer:756) INFO: 20epoch:train:7001-7100batch: iter_time=8.147e-05, forward_time=0.143, loss_ctc=68.150, loss_interctc_layer6=83.146, loss_interctc_layer12=69.119, loss_interctc_layer15=63.397, loss_interctc_layer21=70.058, loss=70.774, backward_time=0.202, grad_norm=58.420, clip=100.000, loss_scale=1.014e+31, optim_step_time=0.137, optim0_lr0=9.066e-05, train_time=2.285 [gpua003:0/64] 2024-02-05 18:32:24,869 (trainer:756) INFO: 20epoch:train:7101-7200batch: iter_time=8.240e-05, forward_time=0.142, loss_ctc=78.072, loss_interctc_layer6=83.654, loss_interctc_layer12=68.965, loss_interctc_layer15=63.011, loss_interctc_layer21=80.582, loss=74.857, backward_time=0.201, grad_norm=70.050, clip=100.000, loss_scale=1.014e+31, optim_step_time=0.137, optim0_lr0=9.064e-05, train_time=2.001 [gpua003:0/64] 2024-02-05 18:35:56,184 (trainer:756) INFO: 20epoch:train:7201-7300batch: iter_time=8.191e-05, forward_time=0.143, loss_ctc=92.408, loss_interctc_layer6=95.489, loss_interctc_layer12=79.781, loss_interctc_layer15=73.441, loss_interctc_layer21=95.648, loss=87.354, backward_time=0.202, grad_norm=66.069, clip=100.000, loss_scale=1.014e+31, optim_step_time=0.137, optim0_lr0=9.063e-05, train_time=2.113 [gpua003:0/64] 2024-02-05 18:40:00,473 (trainer:756) INFO: 20epoch:train:7301-7400batch: iter_time=8.148e-05, forward_time=0.142, loss_ctc=71.326, loss_interctc_layer6=89.475, loss_interctc_layer12=74.957, loss_interctc_layer15=69.200, loss_interctc_layer21=73.299, loss=75.651, backward_time=0.201, grad_norm=62.899, clip=100.000, loss_scale=1.531e+31, optim_step_time=0.137, optim0_lr0=9.061e-05, train_time=2.443 [gpua003:0/64] 2024-02-05 18:43:58,963 (trainer:756) INFO: 20epoch:train:7401-7500batch: iter_time=7.789e-05, forward_time=0.296, loss_ctc=81.974, loss_interctc_layer6=82.485, loss_interctc_layer12=68.194, loss_interctc_layer15=62.562, loss_interctc_layer21=84.609, loss=75.965, backward_time=0.228, grad_norm=60.026, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.140, optim0_lr0=9.060e-05, train_time=2.384 [gpua003:0/64] 2024-02-05 18:44:18,994 (multiple_iter_factory:32) INFO: Building 6th iter-factory... [gpua003:0/64] 2024-02-05 18:44:37,313 (s2t:401) INFO: Optional Data Names: ('text_prev', 'text_ctc', 'text_spk2', 'text_spk3', 'text_spk4') [gpua003:0/64] 2024-02-05 18:44:41,037 (abs_task:1660) INFO: [train] dataset: ESPnetDataset( speech: {"path": "exp/s2t_stats_raw_bpe50000/splits12/wav.scp/split.10", "type": "kaldi_ark"} text_prev: {"path": "exp/s2t_stats_raw_bpe50000/splits12/text.prev/split.10", "type": "text"} text_ctc: {"path": "exp/s2t_stats_raw_bpe50000/splits12/text.ctc/split.10", "type": "text"} text: {"path": "exp/s2t_stats_raw_bpe50000/splits12/text/split.10", "type": "text"} preprocess: ) [gpua003:0/64] 2024-02-05 18:44:41,037 (abs_task:1661) INFO: [train] Batch sampler: UnsortedBatchSampler(N-batch=19027, batch_size=256, key_file=exp/s2t_stats_raw_bpe50000/splits12/speech_shape/split.10, [gpua003:0/64] 2024-02-05 18:44:41,040 (abs_task:1662) INFO: [train] mini-batch sizes summary: N-batch=19027, mean=256.0, min=256, max=257 [gpua003:0/64] 2024-02-05 18:52:51,546 (trainer:756) INFO: 20epoch:train:7501-7600batch: iter_time=3.342, forward_time=0.142, loss_ctc=80.749, loss_interctc_layer6=83.056, loss_interctc_layer12=69.728, loss_interctc_layer15=64.519, loss_interctc_layer21=83.319, loss=76.274, backward_time=0.202, grad_norm=77.468, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.136, optim0_lr0=9.058e-05, train_time=5.326 [gpua003:0/64] 2024-02-05 18:56:25,301 (trainer:756) INFO: 20epoch:train:7601-7700batch: iter_time=8.308e-05, forward_time=0.142, loss_ctc=84.634, loss_interctc_layer6=85.527, loss_interctc_layer12=70.830, loss_interctc_layer15=64.920, loss_interctc_layer21=87.361, loss=78.654, backward_time=0.202, grad_norm=60.123, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.136, optim0_lr0=9.057e-05, train_time=2.138 [gpua003:0/64] 2024-02-05 19:00:47,284 (trainer:756) INFO: 20epoch:train:7701-7800batch: iter_time=8.603e-05, forward_time=0.143, loss_ctc=88.099, loss_interctc_layer6=89.844, loss_interctc_layer12=75.694, loss_interctc_layer15=70.159, loss_interctc_layer21=90.518, loss=82.863, backward_time=0.202, grad_norm=87.700, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.136, optim0_lr0=9.055e-05, train_time=2.620 [gpua003:0/64] 2024-02-05 19:04:48,971 (trainer:756) INFO: 20epoch:train:7801-7900batch: iter_time=8.480e-05, forward_time=0.142, loss_ctc=87.114, loss_interctc_layer6=79.294, loss_interctc_layer12=65.236, loss_interctc_layer15=59.554, loss_interctc_layer21=89.989, loss=76.238, backward_time=0.201, grad_norm=54.975, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.136, optim0_lr0=9.053e-05, train_time=2.417 [gpua003:0/64] 2024-02-05 19:08:33,200 (trainer:756) INFO: 20epoch:train:7901-8000batch: iter_time=8.670e-05, forward_time=0.142, loss_ctc=78.623, loss_interctc_layer6=83.866, loss_interctc_layer12=69.836, loss_interctc_layer15=64.396, loss_interctc_layer21=81.069, loss=75.558, backward_time=0.201, grad_norm=82.326, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.136, optim0_lr0=9.052e-05, train_time=2.242 [gpua003:0/64] 2024-02-05 19:12:20,206 (trainer:756) INFO: 20epoch:train:8001-8100batch: iter_time=8.165e-05, forward_time=0.143, loss_ctc=91.762, loss_interctc_layer6=93.321, loss_interctc_layer12=77.581, loss_interctc_layer15=71.179, loss_interctc_layer21=95.039, loss=85.777, backward_time=0.201, grad_norm=126.017, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.136, optim0_lr0=9.050e-05, train_time=2.270 [gpua003:0/64] 2024-02-05 19:15:59,655 (trainer:687) WARNING: The grad norm is nan. Skipping updating the model. [gpua003:0/64] 2024-02-05 19:16:06,135 (trainer:756) INFO: 20epoch:train:8101-8200batch: iter_time=8.595e-05, forward_time=0.154, loss_ctc=88.148, loss_interctc_layer6=94.691, loss_interctc_layer12=78.906, loss_interctc_layer15=72.701, loss_interctc_layer21=90.721, loss=85.034, backward_time=0.203, grad_norm=84.843, clip=100.000, loss_scale=1.998e+31, optim_step_time=0.137, optim0_lr0=9.049e-05, train_time=2.259 [gpua003:0/64] 2024-02-05 19:20:25,678 (trainer:756) INFO: 20epoch:train:8201-8300batch: iter_time=8.653e-05, forward_time=0.318, loss_ctc=69.086, loss_interctc_layer6=78.158, loss_interctc_layer12=65.155, loss_interctc_layer15=59.971, loss_interctc_layer21=71.141, loss=68.702, backward_time=0.232, grad_norm=68.368, clip=100.000, loss_scale=1.014e+31, optim_step_time=0.145, optim0_lr0=9.047e-05, train_time=2.595 [gpua003:0/64] 2024-02-05 19:24:48,641 (trainer:756) INFO: 20epoch:train:8301-8400batch: iter_time=8.678e-05, forward_time=0.144, loss_ctc=83.794, loss_interctc_layer6=89.774, loss_interctc_layer12=74.275, loss_interctc_layer15=68.024, loss_interctc_layer21=86.431, loss=80.460, backward_time=0.202, grad_norm=74.155, clip=100.000, loss_scale=1.014e+31, optim_step_time=0.136, optim0_lr0=9.046e-05, train_time=2.629 [gpua003:0/64] 2024-02-05 19:28:56,656 (trainer:756) INFO: 20epoch:train:8401-8500batch: iter_time=8.787e-05, forward_time=0.145, loss_ctc=85.115, loss_interctc_layer6=88.143, loss_interctc_layer12=73.036, loss_interctc_layer15=66.920, loss_interctc_layer21=87.749, loss=80.192, backward_time=0.203, grad_norm=81.099, clip=100.000, loss_scale=1.014e+31, optim_step_time=0.136, optim0_lr0=9.044e-05, train_time=2.479 [gpua003:0/64] 2024-02-05 19:33:16,313 (trainer:756) INFO: 20epoch:train:8501-8600batch: iter_time=9.903e-05, forward_time=0.143, loss_ctc=84.565, loss_interctc_layer6=93.061, loss_interctc_layer12=77.885, loss_interctc_layer15=71.767, loss_interctc_layer21=87.310, loss=82.918, backward_time=0.202, grad_norm=85.804, clip=100.000, loss_scale=1.014e+31, optim_step_time=0.136, optim0_lr0=9.043e-05, train_time=2.598 [gpua003:0/64] 2024-02-05 19:36:14,790 (trainer:756) INFO: 20epoch:train:8601-8700batch: iter_time=8.973e-05, forward_time=0.144, loss_ctc=72.883, loss_interctc_layer6=79.048, loss_interctc_layer12=65.596, loss_interctc_layer15=60.266, loss_interctc_layer21=75.263, loss=70.611, backward_time=0.203, grad_norm=74.416, clip=100.000, loss_scale=1.014e+31, optim_step_time=0.136, optim0_lr0=9.041e-05, train_time=1.785 [gpua003:0/64] 2024-02-05 19:38:30,102 (multiple_iter_factory:32) INFO: Building 7th iter-factory... [gpua003:0/64] 2024-02-05 19:38:48,895 (s2t:401) INFO: Optional Data Names: ('text_prev', 'text_ctc', 'text_spk2', 'text_spk3', 'text_spk4') [gpua003:0/64] 2024-02-05 19:38:52,264 (abs_task:1660) INFO: [train] dataset: ESPnetDataset( speech: {"path": "exp/s2t_stats_raw_bpe50000/splits12/wav.scp/split.9", "type": "kaldi_ark"} text_prev: {"path": "exp/s2t_stats_raw_bpe50000/splits12/text.prev/split.9", "type": "text"} text_ctc: {"path": "exp/s2t_stats_raw_bpe50000/splits12/text.ctc/split.9", "type": "text"} text: {"path": "exp/s2t_stats_raw_bpe50000/splits12/text/split.9", "type": "text"} preprocess: ) [gpua003:0/64] 2024-02-05 19:38:52,264 (abs_task:1661) INFO: [train] Batch sampler: UnsortedBatchSampler(N-batch=19027, batch_size=256, key_file=exp/s2t_stats_raw_bpe50000/splits12/speech_shape/split.9, [gpua003:0/64] 2024-02-05 19:38:52,323 (abs_task:1662) INFO: [train] mini-batch sizes summary: N-batch=19027, mean=256.0, min=256, max=257 [gpua003:0/64] 2024-02-05 19:46:29,771 (trainer:756) INFO: 20epoch:train:8701-8800batch: iter_time=3.785, forward_time=0.181, loss_ctc=83.793, loss_interctc_layer6=82.856, loss_interctc_layer12=69.037, loss_interctc_layer15=63.462, loss_interctc_layer21=86.452, loss=77.120, backward_time=0.211, grad_norm=63.697, clip=100.000, loss_scale=1.014e+31, optim_step_time=0.141, optim0_lr0=9.040e-05, train_time=6.149 [gpua003:0/64] 2024-02-05 19:50:22,211 (trainer:756) INFO: 20epoch:train:8801-8900batch: iter_time=8.273e-05, forward_time=0.141, loss_ctc=77.297, loss_interctc_layer6=81.009, loss_interctc_layer12=67.284, loss_interctc_layer15=61.915, loss_interctc_layer21=79.751, loss=73.451, backward_time=0.201, grad_norm=63.049, clip=100.000, loss_scale=1.014e+31, optim_step_time=0.136, optim0_lr0=9.038e-05, train_time=2.325 [gpua003:0/64] 2024-02-05 19:54:15,620 (trainer:756) INFO: 20epoch:train:8901-9000batch: iter_time=8.324e-05, forward_time=0.144, loss_ctc=97.382, loss_interctc_layer6=99.179, loss_interctc_layer12=82.540, loss_interctc_layer15=75.968, loss_interctc_layer21=100.658, loss=91.146, backward_time=0.202, grad_norm=67.201, clip=100.000, loss_scale=1.014e+31, optim_step_time=0.136, optim0_lr0=9.036e-05, train_time=2.334 [gpua003:0/64] 2024-02-05 19:58:21,999 (trainer:756) INFO: 20epoch:train:9001-9100batch: iter_time=8.492e-05, forward_time=0.142, loss_ctc=70.559, loss_interctc_layer6=80.613, loss_interctc_layer12=67.436, loss_interctc_layer15=62.071, loss_interctc_layer21=72.766, loss=70.689, backward_time=0.201, grad_norm=59.747, clip=100.000, loss_scale=1.014e+31, optim_step_time=0.136, optim0_lr0=9.035e-05, train_time=2.463 [gpua003:0/64] 2024-02-05 20:02:28,756 (trainer:756) INFO: 20epoch:train:9101-9200batch: iter_time=8.512e-05, forward_time=0.175, loss_ctc=81.196, loss_interctc_layer6=76.810, loss_interctc_layer12=63.260, loss_interctc_layer15=57.860, loss_interctc_layer21=84.081, loss=72.641, backward_time=0.201, grad_norm=75.008, clip=100.000, loss_scale=1.014e+31, optim_step_time=0.136, optim0_lr0=9.033e-05, train_time=2.467 [gpua003:0/64] 2024-02-05 20:06:06,212 (trainer:756) INFO: 20epoch:train:9201-9300batch: iter_time=8.264e-05, forward_time=0.143, loss_ctc=89.950, loss_interctc_layer6=92.940, loss_interctc_layer12=77.558, loss_interctc_layer15=71.381, loss_interctc_layer21=93.004, loss=84.967, backward_time=0.201, grad_norm=89.976, clip=100.000, loss_scale=1.014e+31, optim_step_time=0.136, optim0_lr0=9.032e-05, train_time=2.175 [gpua003:0/64] 2024-02-05 20:10:15,043 (trainer:756) INFO: 20epoch:train:9301-9400batch: iter_time=8.522e-05, forward_time=0.143, loss_ctc=87.533, loss_interctc_layer6=97.484, loss_interctc_layer12=81.156, loss_interctc_layer15=74.670, loss_interctc_layer21=90.338, loss=86.236, backward_time=0.202, grad_norm=79.931, clip=100.000, loss_scale=1.014e+31, optim_step_time=0.136, optim0_lr0=9.030e-05, train_time=2.488 [gpua003:0/64] 2024-02-05 20:14:22,977 (trainer:756) INFO: 20epoch:train:9401-9500batch: iter_time=9.927e-05, forward_time=0.243, loss_ctc=65.595, loss_interctc_layer6=79.098, loss_interctc_layer12=65.809, loss_interctc_layer15=60.586, loss_interctc_layer21=67.684, loss=67.754, backward_time=0.225, grad_norm=58.113, clip=100.000, loss_scale=1.014e+31, optim_step_time=0.150, optim0_lr0=9.029e-05, train_time=2.479 [gpua003:0/64] 2024-02-05 20:18:41,767 (trainer:756) INFO: 20epoch:train:9501-9600batch: iter_time=9.077e-05, forward_time=0.207, loss_ctc=67.773, loss_interctc_layer6=82.600, loss_interctc_layer12=68.607, loss_interctc_layer15=62.877, loss_interctc_layer21=69.684, loss=70.308, backward_time=0.220, grad_norm=64.512, clip=100.000, loss_scale=1.014e+31, optim_step_time=0.144, optim0_lr0=9.027e-05, train_time=2.587 [gpua003:0/64] 2024-02-05 20:23:06,147 (trainer:756) INFO: 20epoch:train:9601-9700batch: iter_time=8.551e-05, forward_time=0.142, loss_ctc=76.833, loss_interctc_layer6=83.429, loss_interctc_layer12=68.856, loss_interctc_layer15=62.905, loss_interctc_layer21=79.240, loss=74.252, backward_time=0.200, grad_norm=68.621, clip=100.000, loss_scale=1.014e+31, optim_step_time=0.136, optim0_lr0=9.026e-05, train_time=2.644 [gpua003:0/64] 2024-02-05 20:26:53,230 (trainer:756) INFO: 20epoch:train:9701-9800batch: iter_time=8.613e-05, forward_time=0.143, loss_ctc=92.875, loss_interctc_layer6=95.666, loss_interctc_layer12=79.917, loss_interctc_layer15=73.518, loss_interctc_layer21=96.079, loss=87.611, backward_time=0.201, grad_norm=72.759, clip=100.000, loss_scale=1.014e+31, optim_step_time=0.136, optim0_lr0=9.024e-05, train_time=2.271 [gpua003:0/64] 2024-02-05 20:30:10,121 (trainer:756) INFO: 20epoch:train:9801-9900batch: iter_time=8.468e-05, forward_time=0.141, loss_ctc=71.735, loss_interctc_layer6=89.266, loss_interctc_layer12=74.801, loss_interctc_layer15=68.981, loss_interctc_layer21=73.756, loss=75.708, backward_time=0.201, grad_norm=75.897, clip=100.000, loss_scale=1.014e+31, optim_step_time=0.136, optim0_lr0=9.023e-05, train_time=1.969 [gpua003:0/64] 2024-02-05 20:34:36,046 (trainer:756) INFO: 20epoch:train:9901-10000batch: iter_time=8.508e-05, forward_time=0.142, loss_ctc=82.787, loss_interctc_layer6=82.859, loss_interctc_layer12=68.459, loss_interctc_layer15=62.692, loss_interctc_layer21=85.354, loss=76.430, backward_time=0.201, grad_norm=58.237, clip=100.000, loss_scale=1.014e+31, optim_step_time=0.136, optim0_lr0=9.021e-05, train_time=2.659 [gpua003:0/64] 2024-02-05 20:34:56,165 (multiple_iter_factory:32) INFO: Building 8th iter-factory... [gpua003:0/64] 2024-02-05 20:35:15,071 (s2t:401) INFO: Optional Data Names: ('text_prev', 'text_ctc', 'text_spk2', 'text_spk3', 'text_spk4') [gpua003:0/64] 2024-02-05 20:35:18,578 (abs_task:1660) INFO: [train] dataset: ESPnetDataset( speech: {"path": "exp/s2t_stats_raw_bpe50000/splits12/wav.scp/split.1", "type": "kaldi_ark"} text_prev: {"path": "exp/s2t_stats_raw_bpe50000/splits12/text.prev/split.1", "type": "text"} text_ctc: {"path": "exp/s2t_stats_raw_bpe50000/splits12/text.ctc/split.1", "type": "text"} text: {"path": "exp/s2t_stats_raw_bpe50000/splits12/text/split.1", "type": "text"} preprocess: ) [gpua003:0/64] 2024-02-05 20:35:18,578 (abs_task:1661) INFO: [train] Batch sampler: UnsortedBatchSampler(N-batch=19027, batch_size=256, key_file=exp/s2t_stats_raw_bpe50000/splits12/speech_shape/split.1, [gpua003:0/64] 2024-02-05 20:35:18,675 (abs_task:1662) INFO: [train] mini-batch sizes summary: N-batch=19027, mean=256.0, min=256, max=257 [gpua003:0/64] 2024-02-05 20:44:56,113 (trainer:756) INFO: 20epoch:train:10001-10100batch: iter_time=4.118, forward_time=0.189, loss_ctc=76.065, loss_interctc_layer6=83.942, loss_interctc_layer12=70.337, loss_interctc_layer15=65.049, loss_interctc_layer21=78.330, loss=74.745, backward_time=0.213, grad_norm=107.474, clip=100.000, loss_scale=1.014e+31, optim_step_time=0.139, optim0_lr0=9.020e-05, train_time=6.201 [gpua003:0/64] 2024-02-05 20:48:34,117 (trainer:756) INFO: 20epoch:train:10101-10200batch: iter_time=8.514e-05, forward_time=0.171, loss_ctc=83.295, loss_interctc_layer6=85.817, loss_interctc_layer12=70.991, loss_interctc_layer15=65.035, loss_interctc_layer21=86.017, loss=78.231, backward_time=0.263, grad_norm=61.865, clip=100.000, loss_scale=1.045e+31, optim_step_time=0.139, optim0_lr0=9.018e-05, train_time=2.179 [gpua003:0/64] 2024-02-05 20:52:18,714 (trainer:756) INFO: 20epoch:train:10201-10300batch: iter_time=8.608e-05, forward_time=0.143, loss_ctc=84.518, loss_interctc_layer6=89.499, loss_interctc_layer12=75.223, loss_interctc_layer15=69.826, loss_interctc_layer21=86.779, loss=81.169, backward_time=0.203, grad_norm=100.578, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.136, optim0_lr0=9.017e-05, train_time=2.247 [gpua003:0/64] 2024-02-05 20:55:41,847 (trainer:756) INFO: 20epoch:train:10301-10400batch: iter_time=8.959e-05, forward_time=0.142, loss_ctc=85.468, loss_interctc_layer6=79.163, loss_interctc_layer12=65.036, loss_interctc_layer15=59.300, loss_interctc_layer21=88.200, loss=75.433, backward_time=0.202, grad_norm=54.292, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.136, optim0_lr0=9.015e-05, train_time=2.031 [gpua003:0/64] 2024-02-05 21:00:21,966 (trainer:756) INFO: 20epoch:train:10401-10500batch: iter_time=8.859e-05, forward_time=0.143, loss_ctc=75.764, loss_interctc_layer6=83.361, loss_interctc_layer12=69.425, loss_interctc_layer15=63.700, loss_interctc_layer21=78.462, loss=74.142, backward_time=0.201, grad_norm=60.808, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.136, optim0_lr0=9.014e-05, train_time=2.801 [gpua003:0/64] 2024-02-05 21:04:22,513 (trainer:756) INFO: 20epoch:train:10501-10600batch: iter_time=8.710e-05, forward_time=0.143, loss_ctc=84.448, loss_interctc_layer6=92.976, loss_interctc_layer12=77.278, loss_interctc_layer15=70.884, loss_interctc_layer21=87.109, loss=82.539, backward_time=0.202, grad_norm=76.846, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.136, optim0_lr0=9.012e-05, train_time=2.405 [gpua003:0/64] 2024-02-05 21:08:27,164 (trainer:756) INFO: 20epoch:train:10601-10700batch: iter_time=9.263e-05, forward_time=0.234, loss_ctc=80.690, loss_interctc_layer6=93.117, loss_interctc_layer12=77.604, loss_interctc_layer15=71.576, loss_interctc_layer21=83.055, loss=81.208, backward_time=0.226, grad_norm=117.143, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.149, optim0_lr0=9.010e-05, train_time=2.445 [gpua003:0/64] 2024-02-05 21:13:15,671 (trainer:756) INFO: 20epoch:train:10701-10800batch: iter_time=9.124e-05, forward_time=0.147, loss_ctc=64.954, loss_interctc_layer6=77.446, loss_interctc_layer12=64.611, loss_interctc_layer15=59.347, loss_interctc_layer21=67.331, loss=66.738, backward_time=0.202, grad_norm=94.093, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.138, optim0_lr0=9.009e-05, train_time=2.886 [gpua003:0/64] 2024-02-05 21:16:44,363 (trainer:756) INFO: 20epoch:train:10801-10900batch: iter_time=9.192e-05, forward_time=0.144, loss_ctc=79.164, loss_interctc_layer6=88.777, loss_interctc_layer12=73.422, loss_interctc_layer15=67.190, loss_interctc_layer21=81.685, loss=78.047, backward_time=0.202, grad_norm=68.073, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.138, optim0_lr0=9.007e-05, train_time=2.087 [gpua003:0/64] 2024-02-05 21:20:36,876 (trainer:756) INFO: 20epoch:train:10901-11000batch: iter_time=8.723e-05, forward_time=0.180, loss_ctc=81.355, loss_interctc_layer6=87.631, loss_interctc_layer12=72.448, loss_interctc_layer15=66.337, loss_interctc_layer21=84.031, loss=78.361, backward_time=0.211, grad_norm=95.390, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.146, optim0_lr0=9.006e-05, train_time=2.325 [gpua003:0/64] 2024-02-05 21:25:05,410 (trainer:756) INFO: 20epoch:train:11001-11100batch: iter_time=9.223e-05, forward_time=0.143, loss_ctc=80.507, loss_interctc_layer6=92.307, loss_interctc_layer12=77.209, loss_interctc_layer15=71.328, loss_interctc_layer21=83.026, loss=80.875, backward_time=0.202, grad_norm=60.753, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.137, optim0_lr0=9.004e-05, train_time=2.685 [gpua003:0/64] 2024-02-05 21:28:02,324 (trainer:687) WARNING: The grad norm is nan. Skipping updating the model. [gpua003:0/64] 2024-02-05 21:28:38,213 (trainer:756) INFO: 20epoch:train:11101-11200batch: iter_time=9.257e-05, forward_time=0.170, loss_ctc=71.540, loss_interctc_layer6=78.505, loss_interctc_layer12=65.119, loss_interctc_layer15=59.796, loss_interctc_layer21=73.663, loss=69.725, backward_time=0.202, grad_norm=57.485, clip=100.000, loss_scale=1.834e+31, optim_step_time=0.137, optim0_lr0=9.003e-05, train_time=2.128 [gpua003:0/64] 2024-02-05 21:31:04,072 (multiple_iter_factory:32) INFO: Building 9th iter-factory... [gpua003:0/64] 2024-02-05 21:31:22,624 (s2t:401) INFO: Optional Data Names: ('text_prev', 'text_ctc', 'text_spk2', 'text_spk3', 'text_spk4') [gpua003:0/64] 2024-02-05 21:31:25,974 (abs_task:1660) INFO: [train] dataset: ESPnetDataset( speech: {"path": "exp/s2t_stats_raw_bpe50000/splits12/wav.scp/split.11", "type": "kaldi_ark"} text_prev: {"path": "exp/s2t_stats_raw_bpe50000/splits12/text.prev/split.11", "type": "text"} text_ctc: {"path": "exp/s2t_stats_raw_bpe50000/splits12/text.ctc/split.11", "type": "text"} text: {"path": "exp/s2t_stats_raw_bpe50000/splits12/text/split.11", "type": "text"} preprocess: ) [gpua003:0/64] 2024-02-05 21:31:25,975 (abs_task:1661) INFO: [train] Batch sampler: UnsortedBatchSampler(N-batch=19027, batch_size=256, key_file=exp/s2t_stats_raw_bpe50000/splits12/speech_shape/split.11, [gpua003:0/64] 2024-02-05 21:31:25,989 (abs_task:1662) INFO: [train] mini-batch sizes summary: N-batch=19027, mean=256.0, min=256, max=257 [gpua003:0/64] 2024-02-05 21:41:51,517 (trainer:756) INFO: 20epoch:train:11201-11300batch: iter_time=3.248, forward_time=0.190, loss_ctc=82.014, loss_interctc_layer6=83.694, loss_interctc_layer12=69.725, loss_interctc_layer15=64.177, loss_interctc_layer21=84.637, loss=76.849, backward_time=0.216, grad_norm=61.645, clip=100.000, loss_scale=1.014e+31, optim_step_time=0.138, optim0_lr0=9.001e-05, train_time=7.932 [gpua003:0/64] 2024-02-05 21:45:13,750 (trainer:756) INFO: 20epoch:train:11301-11400batch: iter_time=8.426e-05, forward_time=0.142, loss_ctc=76.679, loss_interctc_layer6=80.850, loss_interctc_layer12=67.381, loss_interctc_layer15=62.027, loss_interctc_layer21=79.151, loss=73.218, backward_time=0.201, grad_norm=78.874, clip=100.000, loss_scale=1.014e+31, optim_step_time=0.137, optim0_lr0=9.000e-05, train_time=2.023 [gpua003:0/64] 2024-02-05 21:49:02,453 (trainer:756) INFO: 20epoch:train:11401-11500batch: iter_time=2.096e-04, forward_time=0.184, loss_ctc=95.435, loss_interctc_layer6=98.833, loss_interctc_layer12=82.663, loss_interctc_layer15=76.031, loss_interctc_layer21=98.476, loss=90.287, backward_time=0.224, grad_norm=70.034, clip=100.000, loss_scale=1.014e+31, optim_step_time=0.142, optim0_lr0=8.998e-05, train_time=2.287 [gpua003:0/64] 2024-02-05 21:53:33,419 (trainer:756) INFO: 20epoch:train:11501-11600batch: iter_time=8.586e-05, forward_time=0.142, loss_ctc=71.646, loss_interctc_layer6=80.481, loss_interctc_layer12=67.411, loss_interctc_layer15=62.065, loss_interctc_layer21=73.880, loss=71.097, backward_time=0.201, grad_norm=58.996, clip=100.000, loss_scale=1.014e+31, optim_step_time=0.137, optim0_lr0=8.997e-05, train_time=2.710 [gpua003:0/64] 2024-02-05 21:57:37,116 (trainer:756) INFO: 20epoch:train:11601-11700batch: iter_time=8.410e-05, forward_time=0.144, loss_ctc=80.240, loss_interctc_layer6=76.552, loss_interctc_layer12=62.982, loss_interctc_layer15=57.561, loss_interctc_layer21=83.063, loss=72.080, backward_time=0.201, grad_norm=61.035, clip=100.000, loss_scale=1.014e+31, optim_step_time=0.137, optim0_lr0=8.995e-05, train_time=2.436 [gpua003:0/64] 2024-02-05 22:00:44,444 (trainer:756) INFO: 20epoch:train:11701-11800batch: iter_time=8.802e-05, forward_time=0.143, loss_ctc=89.293, loss_interctc_layer6=92.728, loss_interctc_layer12=77.273, loss_interctc_layer15=71.020, loss_interctc_layer21=92.439, loss=84.550, backward_time=0.202, grad_norm=75.693, clip=100.000, loss_scale=1.014e+31, optim_step_time=0.137, optim0_lr0=8.994e-05, train_time=1.873 [gpua003:0/64] 2024-02-05 22:04:00,618 (trainer:756) INFO: 20epoch:train:11801-11900batch: iter_time=9.163e-05, forward_time=0.143, loss_ctc=86.101, loss_interctc_layer6=97.123, loss_interctc_layer12=80.892, loss_interctc_layer15=74.439, loss_interctc_layer21=88.898, loss=85.491, backward_time=0.202, grad_norm=68.290, clip=100.000, loss_scale=1.014e+31, optim_step_time=0.137, optim0_lr0=8.992e-05, train_time=1.962 [gpua003:0/64] 2024-02-05 22:08:49,629 (trainer:756) INFO: 20epoch:train:11901-12000batch: iter_time=9.205e-05, forward_time=0.267, loss_ctc=64.329, loss_interctc_layer6=78.602, loss_interctc_layer12=65.253, loss_interctc_layer15=59.971, loss_interctc_layer21=66.301, loss=66.891, backward_time=0.276, grad_norm=73.732, clip=100.000, loss_scale=1.014e+31, optim_step_time=0.153, optim0_lr0=8.991e-05, train_time=2.889 [gpua003:0/64] 2024-02-05 22:13:35,802 (trainer:756) INFO: 20epoch:train:12001-12100batch: iter_time=9.277e-05, forward_time=0.142, loss_ctc=67.658, loss_interctc_layer6=82.582, loss_interctc_layer12=68.565, loss_interctc_layer15=62.943, loss_interctc_layer21=69.424, loss=70.235, backward_time=0.201, grad_norm=66.228, clip=100.000, loss_scale=1.014e+31, optim_step_time=0.136, optim0_lr0=8.989e-05, train_time=2.862 [gpua003:0/64] 2024-02-05 22:17:53,726 (trainer:756) INFO: 20epoch:train:12101-12200batch: iter_time=8.994e-05, forward_time=0.142, loss_ctc=77.050, loss_interctc_layer6=82.788, loss_interctc_layer12=68.336, loss_interctc_layer15=62.414, loss_interctc_layer21=79.750, loss=74.068, backward_time=0.201, grad_norm=61.739, clip=100.000, loss_scale=1.014e+31, optim_step_time=0.136, optim0_lr0=8.988e-05, train_time=2.579 [gpua003:0/64] 2024-02-05 22:21:40,863 (trainer:756) INFO: 20epoch:train:12201-12300batch: iter_time=8.830e-05, forward_time=0.143, loss_ctc=92.119, loss_interctc_layer6=95.441, loss_interctc_layer12=79.526, loss_interctc_layer15=73.072, loss_interctc_layer21=95.376, loss=87.107, backward_time=0.202, grad_norm=72.075, clip=100.000, loss_scale=1.014e+31, optim_step_time=0.136, optim0_lr0=8.986e-05, train_time=2.271 [gpua003:0/64] 2024-02-05 22:25:40,776 (trainer:756) INFO: 20epoch:train:12301-12400batch: iter_time=9.008e-05, forward_time=0.169, loss_ctc=70.447, loss_interctc_layer6=88.720, loss_interctc_layer12=74.259, loss_interctc_layer15=68.420, loss_interctc_layer21=72.435, loss=74.856, backward_time=0.201, grad_norm=75.395, clip=100.000, loss_scale=1.014e+31, optim_step_time=0.136, optim0_lr0=8.985e-05, train_time=2.399 [gpua003:0/64] 2024-02-05 22:29:16,845 (trainer:756) INFO: 20epoch:train:12401-12500batch: iter_time=8.603e-05, forward_time=0.142, loss_ctc=81.421, loss_interctc_layer6=82.206, loss_interctc_layer12=68.003, loss_interctc_layer15=62.214, loss_interctc_layer21=84.095, loss=75.588, backward_time=0.201, grad_norm=57.002, clip=100.000, loss_scale=1.014e+31, optim_step_time=0.136, optim0_lr0=8.983e-05, train_time=2.160 [gpua003:0/64] 2024-02-05 22:29:36,875 (multiple_iter_factory:32) INFO: Building 10th iter-factory... [gpua003:0/64] 2024-02-05 22:29:55,527 (s2t:401) INFO: Optional Data Names: ('text_prev', 'text_ctc', 'text_spk2', 'text_spk3', 'text_spk4') [gpua003:0/64] 2024-02-05 22:29:58,953 (abs_task:1660) INFO: [train] dataset: ESPnetDataset( speech: {"path": "exp/s2t_stats_raw_bpe50000/splits12/wav.scp/split.2", "type": "kaldi_ark"} text_prev: {"path": "exp/s2t_stats_raw_bpe50000/splits12/text.prev/split.2", "type": "text"} text_ctc: {"path": "exp/s2t_stats_raw_bpe50000/splits12/text.ctc/split.2", "type": "text"} text: {"path": "exp/s2t_stats_raw_bpe50000/splits12/text/split.2", "type": "text"} preprocess: ) [gpua003:0/64] 2024-02-05 22:29:58,954 (abs_task:1661) INFO: [train] Batch sampler: UnsortedBatchSampler(N-batch=19027, batch_size=256, key_file=exp/s2t_stats_raw_bpe50000/splits12/speech_shape/split.2, [gpua003:0/64] 2024-02-05 22:29:58,980 (abs_task:1662) INFO: [train] mini-batch sizes summary: N-batch=19027, mean=256.0, min=256, max=257 [gpua003:0/64] 2024-02-05 22:44:45,758 (trainer:756) INFO: 20epoch:train:12501-12600batch: iter_time=3.971, forward_time=0.209, loss_ctc=81.575, loss_interctc_layer6=83.231, loss_interctc_layer12=69.923, loss_interctc_layer15=64.726, loss_interctc_layer21=84.309, loss=76.753, backward_time=0.217, grad_norm=233.063, clip=100.000, loss_scale=1.014e+31, optim_step_time=0.138, optim0_lr0=8.982e-05, train_time=9.289 [gpua003:0/64] 2024-02-05 22:48:32,468 (trainer:756) INFO: 20epoch:train:12601-12700batch: iter_time=8.491e-05, forward_time=0.143, loss_ctc=84.879, loss_interctc_layer6=85.606, loss_interctc_layer12=70.891, loss_interctc_layer15=64.879, loss_interctc_layer21=87.740, loss=78.799, backward_time=0.203, grad_norm=120.545, clip=100.000, loss_scale=1.014e+31, optim_step_time=0.136, optim0_lr0=8.980e-05, train_time=2.267 [gpua003:0/64] 2024-02-05 22:52:13,278 (trainer:756) INFO: 20epoch:train:12701-12800batch: iter_time=9.457e-05, forward_time=0.144, loss_ctc=87.648, loss_interctc_layer6=89.324, loss_interctc_layer12=75.126, loss_interctc_layer15=69.704, loss_interctc_layer21=90.264, loss=82.413, backward_time=0.203, grad_norm=80.908, clip=100.000, loss_scale=1.014e+31, optim_step_time=0.136, optim0_lr0=8.979e-05, train_time=2.208 [gpua003:0/64] 2024-02-05 22:56:03,985 (trainer:756) INFO: 20epoch:train:12801-12900batch: iter_time=9.548e-05, forward_time=0.142, loss_ctc=87.330, loss_interctc_layer6=78.751, loss_interctc_layer12=64.677, loss_interctc_layer15=59.038, loss_interctc_layer21=90.298, loss=76.019, backward_time=0.202, grad_norm=96.431, clip=100.000, loss_scale=1.014e+31, optim_step_time=0.136, optim0_lr0=8.977e-05, train_time=2.307 [gpua003:0/64] 2024-02-05 23:00:30,657 (trainer:756) INFO: 20epoch:train:12901-13000batch: iter_time=3.100e-04, forward_time=0.260, loss_ctc=77.871, loss_interctc_layer6=83.555, loss_interctc_layer12=69.257, loss_interctc_layer15=63.602, loss_interctc_layer21=80.408, loss=74.939, backward_time=0.233, grad_norm=67.926, clip=100.000, loss_scale=1.014e+31, optim_step_time=0.158, optim0_lr0=8.976e-05, train_time=2.666 [gpua003:0/64] 2024-02-05 23:04:42,014 (trainer:756) INFO: 20epoch:train:13001-13100batch: iter_time=9.436e-05, forward_time=0.147, loss_ctc=91.801, loss_interctc_layer6=92.531, loss_interctc_layer12=76.921, loss_interctc_layer15=70.783, loss_interctc_layer21=94.808, loss=85.369, backward_time=0.202, grad_norm=78.302, clip=100.000, loss_scale=1.014e+31, optim_step_time=0.137, optim0_lr0=8.974e-05, train_time=2.514 [gpua003:0/64] 2024-02-05 23:08:00,222 (trainer:756) INFO: 20epoch:train:13101-13200batch: iter_time=9.645e-05, forward_time=0.143, loss_ctc=87.329, loss_interctc_layer6=93.268, loss_interctc_layer12=77.611, loss_interctc_layer15=71.464, loss_interctc_layer21=90.008, loss=83.936, backward_time=0.202, grad_norm=68.088, clip=100.000, loss_scale=1.207e+31, optim_step_time=0.137, optim0_lr0=8.973e-05, train_time=1.982 [gpua003:0/64] 2024-02-05 23:12:37,151 (trainer:756) INFO: 20epoch:train:13201-13300batch: iter_time=9.646e-05, forward_time=0.142, loss_ctc=68.982, loss_interctc_layer6=77.657, loss_interctc_layer12=64.630, loss_interctc_layer15=59.398, loss_interctc_layer21=71.129, loss=68.359, backward_time=0.201, grad_norm=70.994, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.137, optim0_lr0=8.971e-05, train_time=2.769 [gpua003:0/64] 2024-02-05 23:16:21,364 (trainer:756) INFO: 20epoch:train:13301-13400batch: iter_time=8.762e-05, forward_time=0.143, loss_ctc=82.311, loss_interctc_layer6=88.968, loss_interctc_layer12=73.407, loss_interctc_layer15=67.212, loss_interctc_layer21=84.953, loss=79.370, backward_time=0.203, grad_norm=59.923, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.137, optim0_lr0=8.970e-05, train_time=2.242 [gpua003:0/64] 2024-02-05 23:21:06,759 (trainer:756) INFO: 20epoch:train:13401-13500batch: iter_time=9.307e-05, forward_time=0.338, loss_ctc=85.135, loss_interctc_layer6=87.896, loss_interctc_layer12=72.463, loss_interctc_layer15=66.397, loss_interctc_layer21=88.054, loss=79.989, backward_time=0.249, grad_norm=66.083, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.154, optim0_lr0=8.968e-05, train_time=2.853 [gpua003:0/64] 2024-02-05 23:25:26,840 (trainer:756) INFO: 20epoch:train:13501-13600batch: iter_time=9.193e-05, forward_time=0.144, loss_ctc=84.336, loss_interctc_layer6=92.091, loss_interctc_layer12=77.125, loss_interctc_layer15=71.208, loss_interctc_layer21=86.901, loss=82.332, backward_time=0.202, grad_norm=119.270, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.136, optim0_lr0=8.967e-05, train_time=2.602 [gpua003:0/64] 2024-02-05 23:29:27,360 (trainer:756) INFO: 20epoch:train:13601-13700batch: iter_time=9.678e-05, forward_time=0.142, loss_ctc=73.101, loss_interctc_layer6=78.445, loss_interctc_layer12=65.142, loss_interctc_layer15=59.710, loss_interctc_layer21=75.261, loss=70.332, backward_time=0.201, grad_norm=70.055, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.136, optim0_lr0=8.965e-05, train_time=2.405 [gpua003:0/64] 2024-02-05 23:31:37,387 (multiple_iter_factory:32) INFO: Building 11th iter-factory... [gpua003:0/64] 2024-02-05 23:31:55,978 (s2t:401) INFO: Optional Data Names: ('text_prev', 'text_ctc', 'text_spk2', 'text_spk3', 'text_spk4') [gpua003:0/64] 2024-02-05 23:31:59,433 (abs_task:1660) INFO: [train] dataset: ESPnetDataset( speech: {"path": "exp/s2t_stats_raw_bpe50000/splits12/wav.scp/split.4", "type": "kaldi_ark"} text_prev: {"path": "exp/s2t_stats_raw_bpe50000/splits12/text.prev/split.4", "type": "text"} text_ctc: {"path": "exp/s2t_stats_raw_bpe50000/splits12/text.ctc/split.4", "type": "text"} text: {"path": "exp/s2t_stats_raw_bpe50000/splits12/text/split.4", "type": "text"} preprocess: ) [gpua003:0/64] 2024-02-05 23:31:59,434 (abs_task:1661) INFO: [train] Batch sampler: UnsortedBatchSampler(N-batch=19027, batch_size=256, key_file=exp/s2t_stats_raw_bpe50000/splits12/speech_shape/split.4, [gpua003:0/64] 2024-02-05 23:31:59,458 (abs_task:1662) INFO: [train] mini-batch sizes summary: N-batch=19027, mean=256.0, min=256, max=257 [gpua003:0/64] 2024-02-05 23:41:44,013 (trainer:756) INFO: 20epoch:train:13701-13800batch: iter_time=4.117, forward_time=0.145, loss_ctc=86.449, loss_interctc_layer6=83.156, loss_interctc_layer12=69.241, loss_interctc_layer15=63.642, loss_interctc_layer21=89.204, loss=78.339, backward_time=0.201, grad_norm=65.688, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.136, optim0_lr0=8.964e-05, train_time=7.366 [gpua003:0/64] 2024-02-05 23:49:00,707 (trainer:756) INFO: 20epoch:train:13801-13900batch: iter_time=8.801e-05, forward_time=0.142, loss_ctc=80.330, loss_interctc_layer6=81.265, loss_interctc_layer12=67.461, loss_interctc_layer15=61.896, loss_interctc_layer21=82.918, loss=74.774, backward_time=0.202, grad_norm=67.895, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.136, optim0_lr0=8.962e-05, train_time=4.367 [gpua003:0/64] 2024-02-05 23:58:46,196 (trainer:756) INFO: 20epoch:train:13901-14000batch: iter_time=9.316e-05, forward_time=0.144, loss_ctc=98.902, loss_interctc_layer6=98.098, loss_interctc_layer12=81.715, loss_interctc_layer15=75.207, loss_interctc_layer21=101.996, loss=91.183, backward_time=0.202, grad_norm=113.239, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.136, optim0_lr0=8.961e-05, train_time=5.855 [gpua003:0/64] 2024-02-06 00:13:31,413 (trainer:756) INFO: 20epoch:train:14001-14100batch: iter_time=1.045e-04, forward_time=0.218, loss_ctc=75.012, loss_interctc_layer6=80.880, loss_interctc_layer12=67.798, loss_interctc_layer15=62.506, loss_interctc_layer21=77.267, loss=72.693, backward_time=0.292, grad_norm=77.240, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.154, optim0_lr0=8.959e-05, train_time=8.851 [gpua003:0/64] 2024-02-06 00:23:31,035 (trainer:756) INFO: 20epoch:train:14101-14200batch: iter_time=9.865e-05, forward_time=0.144, loss_ctc=81.051, loss_interctc_layer6=76.332, loss_interctc_layer12=62.740, loss_interctc_layer15=57.407, loss_interctc_layer21=83.790, loss=72.264, backward_time=0.201, grad_norm=58.619, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.136, optim0_lr0=8.958e-05, train_time=5.997 [gpua003:0/64] 2024-02-06 00:37:04,883 (trainer:756) INFO: 20epoch:train:14201-14300batch: iter_time=9.754e-05, forward_time=0.146, loss_ctc=97.521, loss_interctc_layer6=92.201, loss_interctc_layer12=76.981, loss_interctc_layer15=70.955, loss_interctc_layer21=101.021, loss=87.736, backward_time=0.200, grad_norm=66.034, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.137, optim0_lr0=8.956e-05, train_time=8.138 [gpua003:0/64] 2024-02-06 00:49:41,216 (trainer:756) INFO: 20epoch:train:14301-14400batch: iter_time=9.892e-05, forward_time=0.144, loss_ctc=89.973, loss_interctc_layer6=96.271, loss_interctc_layer12=80.099, loss_interctc_layer15=73.833, loss_interctc_layer21=92.760, loss=86.587, backward_time=0.201, grad_norm=87.962, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.137, optim0_lr0=8.955e-05, train_time=7.563 [gpua003:0/64] 2024-02-06 00:58:20,989 (trainer:756) INFO: 20epoch:train:14401-14500batch: iter_time=9.948e-05, forward_time=0.143, loss_ctc=70.083, loss_interctc_layer6=78.583, loss_interctc_layer12=65.226, loss_interctc_layer15=59.896, loss_interctc_layer21=72.153, loss=69.188, backward_time=0.200, grad_norm=70.908, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.136, optim0_lr0=8.953e-05, train_time=5.198 [gpua003:0/64] 2024-02-06 01:09:22,165 (trainer:756) INFO: 20epoch:train:14501-14600batch: iter_time=0.002, forward_time=0.246, loss_ctc=69.918, loss_interctc_layer6=82.699, loss_interctc_layer12=68.707, loss_interctc_layer15=63.026, loss_interctc_layer21=71.832, loss=71.236, backward_time=0.223, grad_norm=67.039, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.139, optim0_lr0=8.952e-05, train_time=6.612 [gpua003:0/64] 2024-02-06 01:21:25,436 (trainer:756) INFO: 20epoch:train:14601-14700batch: iter_time=1.032e-04, forward_time=0.149, loss_ctc=80.245, loss_interctc_layer6=82.941, loss_interctc_layer12=68.505, loss_interctc_layer15=62.413, loss_interctc_layer21=83.166, loss=75.454, backward_time=0.202, grad_norm=66.544, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.139, optim0_lr0=8.950e-05, train_time=7.232 [gpua003:0/64] 2024-02-06 01:32:47,644 (trainer:756) INFO: 20epoch:train:14701-14800batch: iter_time=9.608e-05, forward_time=0.148, loss_ctc=97.344, loss_interctc_layer6=95.586, loss_interctc_layer12=79.730, loss_interctc_layer15=73.317, loss_interctc_layer21=100.660, loss=89.327, backward_time=0.202, grad_norm=70.770, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.138, optim0_lr0=8.949e-05, train_time=6.821 [gpua003:0/64] 2024-02-06 01:47:07,274 (trainer:756) INFO: 20epoch:train:14801-14900batch: iter_time=1.021e-04, forward_time=0.146, loss_ctc=71.126, loss_interctc_layer6=88.790, loss_interctc_layer12=74.293, loss_interctc_layer15=68.473, loss_interctc_layer21=73.103, loss=75.157, backward_time=0.216, grad_norm=128.306, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.136, optim0_lr0=8.947e-05, train_time=8.597 [gpua003:0/64] 2024-02-06 02:06:28,525 (trainer:756) INFO: 20epoch:train:14901-15000batch: iter_time=1.035e-04, forward_time=0.143, loss_ctc=84.991, loss_interctc_layer6=82.436, loss_interctc_layer12=68.244, loss_interctc_layer15=62.398, loss_interctc_layer21=87.843, loss=77.183, backward_time=0.199, grad_norm=61.786, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.136, optim0_lr0=8.946e-05, train_time=11.612 [gpua003:0/64] 2024-02-06 02:39:11,433 (trainer:355) INFO: 20epoch results: [train] iter_time=0.289, forward_time=0.161, loss_ctc=81.932, loss_interctc_layer6=86.807, loss_interctc_layer12=72.340, loss_interctc_layer15=66.547, loss_interctc_layer21=84.516, loss=78.428, backward_time=0.209, grad_norm=76.115, clip=100.000, loss_scale=2.241e+31, optim_step_time=0.138, optim0_lr0=9.060e-05, train_time=3.092, time=12 hours, 53 minutes and 30.9 seconds, total_count=300000, gpu_max_cached_mem_GB=33.436, [valid] loss_ctc=48.274, cer_ctc=0.223, loss_interctc_layer6=53.929, cer_interctc_layer6=0.237, loss_interctc_layer12=40.858, cer_interctc_layer12=0.170, loss_interctc_layer15=36.244, cer_interctc_layer15=0.143, loss_interctc_layer21=50.897, cer_interctc_layer21=0.234, loss=46.040, time=32 minutes and 18.61 seconds, total_count=93420, gpu_max_cached_mem_GB=33.436 [gpua003:0/64] 2024-02-06 02:39:33,616 (trainer:410) INFO: The best model has been updated: valid.total_count [gpua003:0/64] 2024-02-06 02:39:33,741 (trainer:464) INFO: The model files were removed: exp/s2t_train_s2t_multitask-ctc_ebf27_conv2d8_size1024_raw_bpe50000/14epoch.pth, exp/s2t_train_s2t_multitask-ctc_ebf27_conv2d8_size1024_raw_bpe50000/15epoch.pth [gpua003:0/64] 2024-02-06 02:39:33,741 (trainer:289) INFO: 21/45epoch started. Estimated time to finish: 1 week, 6 days and 15 hours [gpua003:0/64] 2024-02-06 02:39:33,884 (multiple_iter_factory:32) INFO: Building 0th iter-factory... [gpua003:0/64] 2024-02-06 02:39:52,078 (s2t:401) INFO: Optional Data Names: ('text_prev', 'text_ctc', 'text_spk2', 'text_spk3', 'text_spk4') [gpua003:0/64] 2024-02-06 02:39:55,502 (abs_task:1660) INFO: [train] dataset: ESPnetDataset( speech: {"path": "exp/s2t_stats_raw_bpe50000/splits12/wav.scp/split.1", "type": "kaldi_ark"} text_prev: {"path": "exp/s2t_stats_raw_bpe50000/splits12/text.prev/split.1", "type": "text"} text_ctc: {"path": "exp/s2t_stats_raw_bpe50000/splits12/text.ctc/split.1", "type": "text"} text: {"path": "exp/s2t_stats_raw_bpe50000/splits12/text/split.1", "type": "text"} preprocess: ) [gpua003:0/64] 2024-02-06 02:39:55,503 (abs_task:1661) INFO: [train] Batch sampler: UnsortedBatchSampler(N-batch=19027, batch_size=256, key_file=exp/s2t_stats_raw_bpe50000/splits12/speech_shape/split.1, [gpua003:0/64] 2024-02-06 02:39:55,506 (abs_task:1662) INFO: [train] mini-batch sizes summary: N-batch=19027, mean=256.0, min=256, max=257 [gpua003:0/64] 2024-02-06 02:48:33,011 (trainer:756) INFO: 21epoch:train:1-100batch: iter_time=2.907, forward_time=0.168, loss_ctc=71.505, loss_interctc_layer6=79.294, loss_interctc_layer12=66.609, loss_interctc_layer15=61.401, loss_interctc_layer21=73.827, loss=70.527, backward_time=0.212, grad_norm=64.266, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.138, optim0_lr0=8.944e-05, train_time=5.391 [gpua003:0/64] 2024-02-06 02:51:32,976 (trainer:756) INFO: 21epoch:train:101-200batch: iter_time=8.326e-05, forward_time=0.142, loss_ctc=77.459, loss_interctc_layer6=85.428, loss_interctc_layer12=72.892, loss_interctc_layer15=67.676, loss_interctc_layer21=80.090, loss=76.709, backward_time=0.202, grad_norm=63.938, clip=100.000, loss_scale=2.414e+31, optim_step_time=0.136, optim0_lr0=8.943e-05, train_time=1.799 [gpua003:0/64] 2024-02-06 02:55:15,269 (trainer:756) INFO: 21epoch:train:201-300batch: iter_time=8.692e-05, forward_time=0.143, loss_ctc=86.505, loss_interctc_layer6=89.506, loss_interctc_layer12=76.431, loss_interctc_layer15=70.336, loss_interctc_layer21=88.776, loss=82.311, backward_time=0.202, grad_norm=72.336, clip=100.000, loss_scale=4.056e+31, optim_step_time=0.136, optim0_lr0=8.941e-05, train_time=2.223 [gpua003:0/64] 2024-02-06 02:58:34,160 (trainer:756) INFO: 21epoch:train:301-400batch: iter_time=9.051e-05, forward_time=0.142, loss_ctc=81.667, loss_interctc_layer6=86.280, loss_interctc_layer12=73.990, loss_interctc_layer15=66.711, loss_interctc_layer21=81.166, loss=77.963, backward_time=0.201, grad_norm=68.397, clip=100.000, loss_scale=4.056e+31, optim_step_time=0.137, optim0_lr0=8.940e-05, train_time=1.989 [gpua003:0/64] 2024-02-06 03:01:59,042 (trainer:756) INFO: 21epoch:train:401-500batch: iter_time=8.494e-05, forward_time=0.143, loss_ctc=80.909, loss_interctc_layer6=86.728, loss_interctc_layer12=73.516, loss_interctc_layer15=67.474, loss_interctc_layer21=82.024, loss=78.130, backward_time=0.205, grad_norm=67.297, clip=100.000, loss_scale=4.056e+31, optim_step_time=0.137, optim0_lr0=8.938e-05, train_time=2.049 [gpua003:0/64] 2024-02-06 03:05:52,127 (trainer:756) INFO: 21epoch:train:501-600batch: iter_time=9.747e-05, forward_time=0.144, loss_ctc=70.936, loss_interctc_layer6=90.188, loss_interctc_layer12=75.176, loss_interctc_layer15=69.082, loss_interctc_layer21=72.622, loss=75.601, backward_time=0.202, grad_norm=71.312, clip=100.000, loss_scale=4.056e+31, optim_step_time=0.137, optim0_lr0=8.937e-05, train_time=2.330 [gpua003:0/64] 2024-02-06 03:09:39,130 (trainer:756) INFO: 21epoch:train:601-700batch: iter_time=9.608e-05, forward_time=0.143, loss_ctc=69.227, loss_interctc_layer6=80.216, loss_interctc_layer12=66.841, loss_interctc_layer15=61.385, loss_interctc_layer21=71.701, loss=69.874, backward_time=0.202, grad_norm=65.893, clip=100.000, loss_scale=4.056e+31, optim_step_time=0.137, optim0_lr0=8.935e-05, train_time=2.271 [gpua003:0/64] 2024-02-06 03:13:11,608 (trainer:756) INFO: 21epoch:train:701-800batch: iter_time=9.149e-05, forward_time=0.143, loss_ctc=70.109, loss_interctc_layer6=78.237, loss_interctc_layer12=65.184, loss_interctc_layer15=60.007, loss_interctc_layer21=72.367, loss=69.181, backward_time=0.203, grad_norm=61.576, clip=100.000, loss_scale=4.056e+31, optim_step_time=0.137, optim0_lr0=8.934e-05, train_time=2.125 [gpua003:0/64] 2024-02-06 03:16:55,386 (trainer:756) INFO: 21epoch:train:801-900batch: iter_time=1.024e-04, forward_time=0.143, loss_ctc=102.998, loss_interctc_layer6=99.132, loss_interctc_layer12=83.489, loss_interctc_layer15=77.207, loss_interctc_layer21=106.196, loss=93.804, backward_time=0.202, grad_norm=96.569, clip=100.000, loss_scale=4.056e+31, optim_step_time=0.137, optim0_lr0=8.932e-05, train_time=2.238 [gpua003:0/64] 2024-02-06 03:20:50,608 (trainer:756) INFO: 21epoch:train:901-1000batch: iter_time=3.829e-04, forward_time=0.162, loss_ctc=69.706, loss_interctc_layer6=85.973, loss_interctc_layer12=73.001, loss_interctc_layer15=67.897, loss_interctc_layer21=71.940, loss=73.703, backward_time=0.206, grad_norm=65.448, clip=100.000, loss_scale=4.056e+31, optim_step_time=0.138, optim0_lr0=8.931e-05, train_time=2.352 [gpua003:0/64] 2024-02-06 03:24:52,161 (trainer:756) INFO: 21epoch:train:1001-1100batch: iter_time=9.468e-05, forward_time=0.181, loss_ctc=75.657, loss_interctc_layer6=83.951, loss_interctc_layer12=69.857, loss_interctc_layer15=64.000, loss_interctc_layer21=77.976, loss=74.288, backward_time=0.206, grad_norm=66.776, clip=100.000, loss_scale=4.056e+31, optim_step_time=0.140, optim0_lr0=8.929e-05, train_time=2.415 [gpua003:0/64] 2024-02-06 03:28:39,530 (trainer:756) INFO: 21epoch:train:1101-1200batch: iter_time=9.442e-05, forward_time=0.200, loss_ctc=72.603, loss_interctc_layer6=82.978, loss_interctc_layer12=69.107, loss_interctc_layer15=63.580, loss_interctc_layer21=74.955, loss=72.645, backward_time=0.216, grad_norm=59.109, clip=100.000, loss_scale=4.056e+31, optim_step_time=0.143, optim0_lr0=8.928e-05, train_time=2.273 [gpua003:0/64] 2024-02-06 03:30:43,051 (multiple_iter_factory:32) INFO: Building 1th iter-factory... [gpua003:0/64] 2024-02-06 03:31:01,554 (s2t:401) INFO: Optional Data Names: ('text_prev', 'text_ctc', 'text_spk2', 'text_spk3', 'text_spk4') [gpua003:0/64] 2024-02-06 03:31:04,976 (abs_task:1660) INFO: [train] dataset: ESPnetDataset( speech: {"path": "exp/s2t_stats_raw_bpe50000/splits12/wav.scp/split.6", "type": "kaldi_ark"} text_prev: {"path": "exp/s2t_stats_raw_bpe50000/splits12/text.prev/split.6", "type": "text"} text_ctc: {"path": "exp/s2t_stats_raw_bpe50000/splits12/text.ctc/split.6", "type": "text"} text: {"path": "exp/s2t_stats_raw_bpe50000/splits12/text/split.6", "type": "text"} preprocess: ) [gpua003:0/64] 2024-02-06 03:31:04,976 (abs_task:1661) INFO: [train] Batch sampler: UnsortedBatchSampler(N-batch=19027, batch_size=256, key_file=exp/s2t_stats_raw_bpe50000/splits12/speech_shape/split.6, [gpua003:0/64] 2024-02-06 03:31:05,000 (abs_task:1662) INFO: [train] mini-batch sizes summary: N-batch=19027, mean=256.0, min=256, max=257 [gpua003:0/64] 2024-02-06 03:42:16,008 (trainer:756) INFO: 21epoch:train:1201-1300batch: iter_time=3.320, forward_time=0.141, loss_ctc=69.938, loss_interctc_layer6=76.133, loss_interctc_layer12=63.573, loss_interctc_layer15=58.516, loss_interctc_layer21=72.093, loss=68.051, backward_time=0.204, grad_norm=57.161, clip=100.000, loss_scale=4.056e+31, optim_step_time=0.137, optim0_lr0=8.926e-05, train_time=8.164 [gpua003:0/64] 2024-02-06 03:45:29,672 (trainer:756) INFO: 21epoch:train:1301-1400batch: iter_time=7.984e-05, forward_time=0.142, loss_ctc=80.701, loss_interctc_layer6=80.038, loss_interctc_layer12=67.066, loss_interctc_layer15=61.755, loss_interctc_layer21=83.469, loss=74.606, backward_time=0.201, grad_norm=60.862, clip=100.000, loss_scale=4.056e+31, optim_step_time=0.137, optim0_lr0=8.925e-05, train_time=1.937 [gpua003:0/64] 2024-02-06 03:45:48,568 (trainer:687) WARNING: The grad norm is nan. Skipping updating the model. [gpua003:0/64] 2024-02-06 03:48:39,237 (trainer:756) INFO: 21epoch:train:1401-1500batch: iter_time=8.106e-05, forward_time=0.142, loss_ctc=78.686, loss_interctc_layer6=84.338, loss_interctc_layer12=71.027, loss_interctc_layer15=65.821, loss_interctc_layer21=81.171, loss=76.209, backward_time=0.201, grad_norm=90.195, clip=100.000, loss_scale=2.213e+31, optim_step_time=0.137, optim0_lr0=8.923e-05, train_time=1.895 [gpua003:0/64] 2024-02-06 03:49:49,188 (trainer:687) WARNING: The grad norm is nan. Skipping updating the model. [gpua003:0/64] 2024-02-06 03:51:53,376 (trainer:756) INFO: 21epoch:train:1501-1600batch: iter_time=8.288e-05, forward_time=0.142, loss_ctc=79.187, loss_interctc_layer6=84.756, loss_interctc_layer12=71.998, loss_interctc_layer15=66.156, loss_interctc_layer21=81.621, loss=76.744, backward_time=0.201, grad_norm=82.849, clip=100.000, loss_scale=1.352e+31, optim_step_time=0.137, optim0_lr0=8.922e-05, train_time=1.941 [gpua003:0/64] 2024-02-06 03:55:38,503 (trainer:756) INFO: 21epoch:train:1601-1700batch: iter_time=8.358e-05, forward_time=0.143, loss_ctc=85.299, loss_interctc_layer6=87.241, loss_interctc_layer12=73.274, loss_interctc_layer15=68.206, loss_interctc_layer21=88.160, loss=80.436, backward_time=0.202, grad_norm=108.024, clip=100.000, loss_scale=1.014e+31, optim_step_time=0.137, optim0_lr0=8.920e-05, train_time=2.251 [gpua003:0/64] 2024-02-06 03:59:31,433 (trainer:756) INFO: 21epoch:train:1701-1800batch: iter_time=8.564e-05, forward_time=0.142, loss_ctc=83.728, loss_interctc_layer6=89.777, loss_interctc_layer12=75.005, loss_interctc_layer15=69.255, loss_interctc_layer21=86.425, loss=80.838, backward_time=0.202, grad_norm=71.946, clip=100.000, loss_scale=1.014e+31, optim_step_time=0.137, optim0_lr0=8.919e-05, train_time=2.329 [gpua003:0/64] 2024-02-06 04:03:19,675 (trainer:756) INFO: 21epoch:train:1801-1900batch: iter_time=8.226e-05, forward_time=0.145, loss_ctc=79.708, loss_interctc_layer6=87.017, loss_interctc_layer12=72.616, loss_interctc_layer15=66.734, loss_interctc_layer21=81.947, loss=77.604, backward_time=0.202, grad_norm=77.123, clip=100.000, loss_scale=1.014e+31, optim_step_time=0.137, optim0_lr0=8.917e-05, train_time=2.282 [gpua003:0/64] 2024-02-06 04:06:46,917 (trainer:756) INFO: 21epoch:train:1901-2000batch: iter_time=8.576e-05, forward_time=0.159, loss_ctc=73.065, loss_interctc_layer6=77.358, loss_interctc_layer12=64.250, loss_interctc_layer15=58.832, loss_interctc_layer21=75.387, loss=69.778, backward_time=0.212, grad_norm=60.145, clip=100.000, loss_scale=1.014e+31, optim_step_time=0.138, optim0_lr0=8.916e-05, train_time=2.072 [gpua003:0/64] 2024-02-06 04:10:38,747 (trainer:756) INFO: 21epoch:train:2001-2100batch: iter_time=8.469e-05, forward_time=0.143, loss_ctc=86.209, loss_interctc_layer6=90.316, loss_interctc_layer12=76.085, loss_interctc_layer15=70.261, loss_interctc_layer21=88.780, loss=82.330, backward_time=0.201, grad_norm=78.690, clip=100.000, loss_scale=1.014e+31, optim_step_time=0.136, optim0_lr0=8.915e-05, train_time=2.318 [gpua003:0/64] 2024-02-06 04:14:31,482 (trainer:756) INFO: 21epoch:train:2101-2200batch: iter_time=8.556e-05, forward_time=0.171, loss_ctc=94.545, loss_interctc_layer6=87.706, loss_interctc_layer12=73.380, loss_interctc_layer15=67.299, loss_interctc_layer21=97.760, loss=84.138, backward_time=0.208, grad_norm=69.546, clip=100.000, loss_scale=1.014e+31, optim_step_time=0.137, optim0_lr0=8.913e-05, train_time=2.326 [gpua003:0/64] 2024-02-06 04:18:48,439 (trainer:756) INFO: 21epoch:train:2201-2300batch: iter_time=8.671e-05, forward_time=0.195, loss_ctc=69.126, loss_interctc_layer6=87.113, loss_interctc_layer12=73.401, loss_interctc_layer15=67.985, loss_interctc_layer21=71.171, loss=73.759, backward_time=0.227, grad_norm=68.009, clip=100.000, loss_scale=1.014e+31, optim_step_time=0.144, optim0_lr0=8.912e-05, train_time=2.570 [gpua003:0/64] 2024-02-06 04:22:03,183 (trainer:756) INFO: 21epoch:train:2301-2400batch: iter_time=8.155e-05, forward_time=0.155, loss_ctc=75.191, loss_interctc_layer6=81.179, loss_interctc_layer12=67.122, loss_interctc_layer15=61.465, loss_interctc_layer21=77.577, loss=72.507, backward_time=0.202, grad_norm=98.649, clip=100.000, loss_scale=1.014e+31, optim_step_time=0.136, optim0_lr0=8.910e-05, train_time=1.947 [gpua003:0/64] 2024-02-06 04:25:35,870 (trainer:756) INFO: 21epoch:train:2401-2500batch: iter_time=8.176e-05, forward_time=0.144, loss_ctc=80.951, loss_interctc_layer6=81.074, loss_interctc_layer12=67.679, loss_interctc_layer15=62.303, loss_interctc_layer21=83.239, loss=75.049, backward_time=0.210, grad_norm=61.388, clip=100.000, loss_scale=1.014e+31, optim_step_time=0.137, optim0_lr0=8.909e-05, train_time=2.126 [gpua003:0/64] 2024-02-06 04:25:55,916 (multiple_iter_factory:32) INFO: Building 2th iter-factory... [gpua003:0/64] 2024-02-06 04:26:14,703 (s2t:401) INFO: Optional Data Names: ('text_prev', 'text_ctc', 'text_spk2', 'text_spk3', 'text_spk4') [gpua003:0/64] 2024-02-06 04:26:18,470 (abs_task:1660) INFO: [train] dataset: ESPnetDataset( speech: {"path": "exp/s2t_stats_raw_bpe50000/splits12/wav.scp/split.5", "type": "kaldi_ark"} text_prev: {"path": "exp/s2t_stats_raw_bpe50000/splits12/text.prev/split.5", "type": "text"} text_ctc: {"path": "exp/s2t_stats_raw_bpe50000/splits12/text.ctc/split.5", "type": "text"} text: {"path": "exp/s2t_stats_raw_bpe50000/splits12/text/split.5", "type": "text"} preprocess: ) [gpua003:0/64] 2024-02-06 04:26:18,470 (abs_task:1661) INFO: [train] Batch sampler: UnsortedBatchSampler(N-batch=19027, batch_size=256, key_file=exp/s2t_stats_raw_bpe50000/splits12/speech_shape/split.5, [gpua003:0/64] 2024-02-06 04:26:18,545 (abs_task:1662) INFO: [train] mini-batch sizes summary: N-batch=19027, mean=256.0, min=256, max=257 [gpua003:0/64] 2024-02-06 04:34:29,012 (trainer:756) INFO: 21epoch:train:2501-2600batch: iter_time=3.221, forward_time=0.142, loss_ctc=69.520, loss_interctc_layer6=77.782, loss_interctc_layer12=64.885, loss_interctc_layer15=59.815, loss_interctc_layer21=71.654, loss=68.731, backward_time=0.203, grad_norm=56.868, clip=100.000, loss_scale=1.014e+31, optim_step_time=0.136, optim0_lr0=8.907e-05, train_time=5.332 [gpua003:0/64] 2024-02-06 04:37:40,590 (trainer:756) INFO: 21epoch:train:2601-2700batch: iter_time=8.477e-05, forward_time=0.144, loss_ctc=76.169, loss_interctc_layer6=83.914, loss_interctc_layer12=70.716, loss_interctc_layer15=65.703, loss_interctc_layer21=78.736, loss=75.048, backward_time=0.202, grad_norm=78.912, clip=100.000, loss_scale=1.014e+31, optim_step_time=0.136, optim0_lr0=8.906e-05, train_time=1.916 [gpua003:0/64] 2024-02-06 04:40:49,833 (trainer:756) INFO: 21epoch:train:2701-2800batch: iter_time=8.316e-05, forward_time=0.143, loss_ctc=84.976, loss_interctc_layer6=88.093, loss_interctc_layer12=73.552, loss_interctc_layer15=68.520, loss_interctc_layer21=87.797, loss=80.588, backward_time=0.203, grad_norm=81.192, clip=100.000, loss_scale=1.014e+31, optim_step_time=0.136, optim0_lr0=8.904e-05, train_time=1.892 [gpua003:0/64] 2024-02-06 04:44:21,327 (trainer:756) INFO: 21epoch:train:2801-2900batch: iter_time=8.808e-05, forward_time=0.166, loss_ctc=77.680, loss_interctc_layer6=85.506, loss_interctc_layer12=71.332, loss_interctc_layer15=66.058, loss_interctc_layer21=80.619, loss=76.239, backward_time=0.209, grad_norm=67.349, clip=100.000, loss_scale=1.014e+31, optim_step_time=0.137, optim0_lr0=8.903e-05, train_time=2.115 [gpua003:0/64] 2024-02-06 04:47:44,008 (trainer:756) INFO: 21epoch:train:2901-3000batch: iter_time=8.784e-05, forward_time=0.162, loss_ctc=76.941, loss_interctc_layer6=83.946, loss_interctc_layer12=70.087, loss_interctc_layer15=65.066, loss_interctc_layer21=80.362, loss=75.280, backward_time=0.209, grad_norm=74.805, clip=100.000, loss_scale=1.014e+31, optim_step_time=0.138, optim0_lr0=8.901e-05, train_time=2.027 [gpua003:0/64] 2024-02-06 04:51:47,310 (trainer:756) INFO: 21epoch:train:3001-3100batch: iter_time=9.030e-05, forward_time=0.201, loss_ctc=69.255, loss_interctc_layer6=88.821, loss_interctc_layer12=73.838, loss_interctc_layer15=67.646, loss_interctc_layer21=71.179, loss=74.148, backward_time=0.221, grad_norm=65.350, clip=100.000, loss_scale=1.014e+31, optim_step_time=0.148, optim0_lr0=8.900e-05, train_time=2.432 [gpua003:0/64] 2024-02-06 04:55:22,947 (trainer:756) INFO: 21epoch:train:3101-3200batch: iter_time=8.756e-05, forward_time=0.142, loss_ctc=68.307, loss_interctc_layer6=79.395, loss_interctc_layer12=65.886, loss_interctc_layer15=60.450, loss_interctc_layer21=70.705, loss=68.949, backward_time=0.201, grad_norm=90.884, clip=100.000, loss_scale=1.014e+31, optim_step_time=0.136, optim0_lr0=8.898e-05, train_time=2.157 [gpua003:0/64] 2024-02-06 04:59:25,138 (trainer:756) INFO: 21epoch:train:3201-3300batch: iter_time=8.876e-05, forward_time=0.148, loss_ctc=68.749, loss_interctc_layer6=77.464, loss_interctc_layer12=64.173, loss_interctc_layer15=58.776, loss_interctc_layer21=70.906, loss=68.014, backward_time=0.203, grad_norm=65.361, clip=100.000, loss_scale=1.014e+31, optim_step_time=0.136, optim0_lr0=8.897e-05, train_time=2.422 [gpua003:0/64] 2024-02-06 05:02:25,445 (trainer:756) INFO: 21epoch:train:3301-3400batch: iter_time=8.777e-05, forward_time=0.143, loss_ctc=101.862, loss_interctc_layer6=97.648, loss_interctc_layer12=81.989, loss_interctc_layer15=75.541, loss_interctc_layer21=105.138, loss=92.436, backward_time=0.203, grad_norm=86.358, clip=100.000, loss_scale=1.014e+31, optim_step_time=0.136, optim0_lr0=8.895e-05, train_time=1.802 [gpua003:0/64] 2024-02-06 05:05:56,562 (trainer:756) INFO: 21epoch:train:3401-3500batch: iter_time=9.504e-05, forward_time=0.144, loss_ctc=68.920, loss_interctc_layer6=84.908, loss_interctc_layer12=72.103, loss_interctc_layer15=67.016, loss_interctc_layer21=71.065, loss=72.802, backward_time=0.204, grad_norm=103.937, clip=100.000, loss_scale=1.014e+31, optim_step_time=0.136, optim0_lr0=8.894e-05, train_time=2.112 [gpua003:0/64] 2024-02-06 05:09:34,337 (trainer:756) INFO: 21epoch:train:3501-3600batch: iter_time=8.867e-05, forward_time=0.142, loss_ctc=74.607, loss_interctc_layer6=83.214, loss_interctc_layer12=69.053, loss_interctc_layer15=63.107, loss_interctc_layer21=76.743, loss=73.345, backward_time=0.201, grad_norm=70.607, clip=100.000, loss_scale=1.683e+31, optim_step_time=0.136, optim0_lr0=8.892e-05, train_time=2.178 [gpua003:0/64] 2024-02-06 05:12:57,645 (trainer:756) INFO: 21epoch:train:3601-3700batch: iter_time=9.301e-05, forward_time=0.143, loss_ctc=71.753, loss_interctc_layer6=82.064, loss_interctc_layer12=68.183, loss_interctc_layer15=62.559, loss_interctc_layer21=73.934, loss=71.699, backward_time=0.202, grad_norm=66.771, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.136, optim0_lr0=8.891e-05, train_time=2.033 [gpua003:0/64] 2024-02-06 05:14:58,733 (multiple_iter_factory:32) INFO: Building 3th iter-factory... [gpua003:0/64] 2024-02-06 05:15:17,269 (s2t:401) INFO: Optional Data Names: ('text_prev', 'text_ctc', 'text_spk2', 'text_spk3', 'text_spk4') [gpua003:0/64] 2024-02-06 05:15:20,671 (abs_task:1660) INFO: [train] dataset: ESPnetDataset( speech: {"path": "exp/s2t_stats_raw_bpe50000/splits12/wav.scp/split.3", "type": "kaldi_ark"} text_prev: {"path": "exp/s2t_stats_raw_bpe50000/splits12/text.prev/split.3", "type": "text"} text_ctc: {"path": "exp/s2t_stats_raw_bpe50000/splits12/text.ctc/split.3", "type": "text"} text: {"path": "exp/s2t_stats_raw_bpe50000/splits12/text/split.3", "type": "text"} preprocess: ) [gpua003:0/64] 2024-02-06 05:15:20,671 (abs_task:1661) INFO: [train] Batch sampler: UnsortedBatchSampler(N-batch=19027, batch_size=256, key_file=exp/s2t_stats_raw_bpe50000/splits12/speech_shape/split.3, [gpua003:0/64] 2024-02-06 05:15:20,732 (abs_task:1662) INFO: [train] mini-batch sizes summary: N-batch=19027, mean=256.0, min=256, max=257 [gpua003:0/64] 2024-02-06 05:24:51,269 (trainer:756) INFO: 21epoch:train:3701-3800batch: iter_time=2.827, forward_time=0.157, loss_ctc=68.924, loss_interctc_layer6=75.753, loss_interctc_layer12=63.106, loss_interctc_layer15=58.139, loss_interctc_layer21=71.217, loss=67.428, backward_time=0.206, grad_norm=67.367, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.137, optim0_lr0=8.890e-05, train_time=7.136 [gpua003:0/64] 2024-02-06 05:28:00,021 (trainer:756) INFO: 21epoch:train:3801-3900batch: iter_time=7.889e-05, forward_time=0.142, loss_ctc=76.336, loss_interctc_layer6=80.263, loss_interctc_layer12=67.133, loss_interctc_layer15=61.694, loss_interctc_layer21=78.906, loss=72.866, backward_time=0.202, grad_norm=56.962, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.138, optim0_lr0=8.888e-05, train_time=1.887 [gpua003:0/64] 2024-02-06 05:31:33,535 (trainer:756) INFO: 21epoch:train:3901-4000batch: iter_time=2.168e-04, forward_time=0.160, loss_ctc=71.886, loss_interctc_layer6=82.978, loss_interctc_layer12=70.074, loss_interctc_layer15=64.480, loss_interctc_layer21=74.439, loss=72.771, backward_time=0.213, grad_norm=78.733, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.137, optim0_lr0=8.887e-05, train_time=2.135 [gpua003:0/64] 2024-02-06 05:34:53,193 (trainer:756) INFO: 21epoch:train:4001-4100batch: iter_time=8.347e-05, forward_time=0.189, loss_ctc=76.499, loss_interctc_layer6=83.668, loss_interctc_layer12=70.102, loss_interctc_layer15=65.570, loss_interctc_layer21=80.052, loss=75.178, backward_time=0.213, grad_norm=82.700, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.146, optim0_lr0=8.885e-05, train_time=1.996 [gpua003:0/64] 2024-02-06 05:38:24,547 (trainer:756) INFO: 21epoch:train:4101-4200batch: iter_time=8.388e-05, forward_time=0.142, loss_ctc=81.245, loss_interctc_layer6=85.870, loss_interctc_layer12=71.669, loss_interctc_layer15=66.934, loss_interctc_layer21=84.603, loss=78.064, backward_time=0.202, grad_norm=107.448, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.136, optim0_lr0=8.884e-05, train_time=2.114 [gpua003:0/64] 2024-02-06 05:41:53,235 (trainer:756) INFO: 21epoch:train:4201-4300batch: iter_time=8.608e-05, forward_time=0.168, loss_ctc=77.814, loss_interctc_layer6=88.870, loss_interctc_layer12=74.163, loss_interctc_layer15=68.256, loss_interctc_layer21=80.070, loss=77.835, backward_time=0.205, grad_norm=74.252, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.137, optim0_lr0=8.882e-05, train_time=2.087 [gpua003:0/64] 2024-02-06 05:45:28,198 (trainer:756) INFO: 21epoch:train:4301-4400batch: iter_time=8.536e-05, forward_time=0.143, loss_ctc=72.173, loss_interctc_layer6=86.200, loss_interctc_layer12=71.772, loss_interctc_layer15=65.846, loss_interctc_layer21=74.376, loss=74.073, backward_time=0.202, grad_norm=65.189, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.136, optim0_lr0=8.881e-05, train_time=2.148 [gpua003:0/64] 2024-02-06 05:48:58,332 (trainer:756) INFO: 21epoch:train:4401-4500batch: iter_time=8.941e-05, forward_time=0.143, loss_ctc=66.928, loss_interctc_layer6=77.267, loss_interctc_layer12=63.890, loss_interctc_layer15=58.457, loss_interctc_layer21=69.079, loss=67.124, backward_time=0.204, grad_norm=79.341, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.136, optim0_lr0=8.879e-05, train_time=2.102 [gpua003:0/64] 2024-02-06 05:52:18,611 (trainer:756) INFO: 21epoch:train:4501-4600batch: iter_time=8.533e-05, forward_time=0.143, loss_ctc=81.788, loss_interctc_layer6=90.036, loss_interctc_layer12=75.491, loss_interctc_layer15=69.652, loss_interctc_layer21=84.290, loss=80.251, backward_time=0.202, grad_norm=64.131, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.136, optim0_lr0=8.878e-05, train_time=2.003 [gpua003:0/64] 2024-02-06 05:55:59,991 (trainer:756) INFO: 21epoch:train:4601-4700batch: iter_time=8.612e-05, forward_time=0.143, loss_ctc=90.311, loss_interctc_layer6=86.062, loss_interctc_layer12=71.596, loss_interctc_layer15=65.614, loss_interctc_layer21=93.351, loss=81.387, backward_time=0.202, grad_norm=66.809, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.136, optim0_lr0=8.876e-05, train_time=2.214 [gpua003:0/64] 2024-02-06 05:59:25,737 (trainer:756) INFO: 21epoch:train:4701-4800batch: iter_time=8.702e-05, forward_time=0.143, loss_ctc=66.733, loss_interctc_layer6=86.867, loss_interctc_layer12=73.545, loss_interctc_layer15=68.086, loss_interctc_layer21=68.763, loss=72.799, backward_time=0.203, grad_norm=73.324, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.137, optim0_lr0=8.875e-05, train_time=2.057 [gpua003:0/64] 2024-02-06 06:03:03,582 (trainer:756) INFO: 21epoch:train:4801-4900batch: iter_time=8.607e-05, forward_time=0.166, loss_ctc=73.139, loss_interctc_layer6=81.217, loss_interctc_layer12=67.049, loss_interctc_layer15=61.174, loss_interctc_layer21=75.497, loss=71.615, backward_time=0.209, grad_norm=96.718, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.138, optim0_lr0=8.873e-05, train_time=2.178 [gpua003:0/64] 2024-02-06 06:06:27,881 (trainer:756) INFO: 21epoch:train:4901-5000batch: iter_time=8.070e-05, forward_time=0.164, loss_ctc=73.779, loss_interctc_layer6=80.569, loss_interctc_layer12=67.256, loss_interctc_layer15=61.635, loss_interctc_layer21=76.286, loss=71.905, backward_time=0.207, grad_norm=69.807, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.140, optim0_lr0=8.872e-05, train_time=2.043 [gpua003:0/64] 2024-02-06 06:06:47,923 (multiple_iter_factory:32) INFO: Building 4th iter-factory... [gpua003:0/64] 2024-02-06 06:07:06,450 (s2t:401) INFO: Optional Data Names: ('text_prev', 'text_ctc', 'text_spk2', 'text_spk3', 'text_spk4') [gpua003:0/64] 2024-02-06 06:07:09,881 (abs_task:1660) INFO: [train] dataset: ESPnetDataset( speech: {"path": "exp/s2t_stats_raw_bpe50000/splits12/wav.scp/split.0", "type": "kaldi_ark"} text_prev: {"path": "exp/s2t_stats_raw_bpe50000/splits12/text.prev/split.0", "type": "text"} text_ctc: {"path": "exp/s2t_stats_raw_bpe50000/splits12/text.ctc/split.0", "type": "text"} text: {"path": "exp/s2t_stats_raw_bpe50000/splits12/text/split.0", "type": "text"} preprocess: ) [gpua003:0/64] 2024-02-06 06:07:09,881 (abs_task:1661) INFO: [train] Batch sampler: UnsortedBatchSampler(N-batch=19027, batch_size=256, key_file=exp/s2t_stats_raw_bpe50000/splits12/speech_shape/split.0, [gpua003:0/64] 2024-02-06 06:07:09,890 (abs_task:1662) INFO: [train] mini-batch sizes summary: N-batch=19027, mean=256.0, min=256, max=257 [gpua003:0/64] 2024-02-06 06:18:44,411 (trainer:756) INFO: 21epoch:train:5001-5100batch: iter_time=3.042, forward_time=0.166, loss_ctc=71.611, loss_interctc_layer6=77.240, loss_interctc_layer12=64.414, loss_interctc_layer15=59.218, loss_interctc_layer21=74.047, loss=69.306, backward_time=0.207, grad_norm=71.146, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.138, optim0_lr0=8.871e-05, train_time=7.365 [gpua003:0/64] 2024-02-06 06:22:34,746 (trainer:756) INFO: 21epoch:train:5101-5200batch: iter_time=8.432e-05, forward_time=0.142, loss_ctc=79.332, loss_interctc_layer6=82.573, loss_interctc_layer12=69.690, loss_interctc_layer15=64.456, loss_interctc_layer21=82.133, loss=75.637, backward_time=0.201, grad_norm=133.918, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.137, optim0_lr0=8.869e-05, train_time=2.303 [gpua003:0/64] 2024-02-06 06:25:38,261 (trainer:756) INFO: 21epoch:train:5201-5300batch: iter_time=8.486e-05, forward_time=0.143, loss_ctc=91.316, loss_interctc_layer6=88.196, loss_interctc_layer12=73.578, loss_interctc_layer15=68.424, loss_interctc_layer21=94.036, loss=83.110, backward_time=0.203, grad_norm=77.706, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.137, optim0_lr0=8.868e-05, train_time=1.835 [gpua003:0/64] 2024-02-06 06:29:27,949 (trainer:756) INFO: 21epoch:train:5301-5400batch: iter_time=8.880e-05, forward_time=0.142, loss_ctc=80.847, loss_interctc_layer6=84.712, loss_interctc_layer12=70.961, loss_interctc_layer15=65.026, loss_interctc_layer21=83.419, loss=76.993, backward_time=0.201, grad_norm=64.770, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.137, optim0_lr0=8.866e-05, train_time=2.297 [gpua003:0/64] 2024-02-06 06:32:43,942 (trainer:756) INFO: 21epoch:train:5401-5500batch: iter_time=8.518e-05, forward_time=0.143, loss_ctc=82.054, loss_interctc_layer6=82.943, loss_interctc_layer12=69.583, loss_interctc_layer15=64.400, loss_interctc_layer21=84.217, loss=76.639, backward_time=0.203, grad_norm=73.888, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.137, optim0_lr0=8.865e-05, train_time=1.960 [gpua003:0/64] 2024-02-06 06:36:07,285 (trainer:756) INFO: 21epoch:train:5501-5600batch: iter_time=8.321e-05, forward_time=0.160, loss_ctc=75.645, loss_interctc_layer6=89.153, loss_interctc_layer12=74.073, loss_interctc_layer15=67.906, loss_interctc_layer21=77.787, loss=76.913, backward_time=0.208, grad_norm=84.076, clip=100.000, loss_scale=3.367e+31, optim_step_time=0.138, optim0_lr0=8.863e-05, train_time=2.033 [gpua003:0/64] 2024-02-06 06:39:48,728 (trainer:756) INFO: 21epoch:train:5601-5700batch: iter_time=8.485e-05, forward_time=0.160, loss_ctc=73.960, loss_interctc_layer6=78.942, loss_interctc_layer12=65.560, loss_interctc_layer15=60.034, loss_interctc_layer21=76.626, loss=71.024, backward_time=0.204, grad_norm=78.613, clip=100.000, loss_scale=4.056e+31, optim_step_time=0.140, optim0_lr0=8.862e-05, train_time=2.214 [gpua003:0/64] 2024-02-06 06:43:10,830 (trainer:756) INFO: 21epoch:train:5701-5800batch: iter_time=8.664e-05, forward_time=0.143, loss_ctc=72.412, loss_interctc_layer6=76.911, loss_interctc_layer12=63.767, loss_interctc_layer15=58.345, loss_interctc_layer21=74.637, loss=69.214, backward_time=0.202, grad_norm=55.171, clip=100.000, loss_scale=4.056e+31, optim_step_time=0.137, optim0_lr0=8.860e-05, train_time=2.021 [gpua003:0/64] 2024-02-06 06:47:26,070 (trainer:756) INFO: 21epoch:train:5801-5900batch: iter_time=8.475e-05, forward_time=0.184, loss_ctc=103.772, loss_interctc_layer6=96.677, loss_interctc_layer12=81.065, loss_interctc_layer15=74.613, loss_interctc_layer21=107.125, loss=92.650, backward_time=0.219, grad_norm=73.680, clip=100.000, loss_scale=4.056e+31, optim_step_time=0.145, optim0_lr0=8.859e-05, train_time=2.551 [gpua003:0/64] 2024-02-06 06:51:40,930 (trainer:756) INFO: 21epoch:train:5901-6000batch: iter_time=8.780e-05, forward_time=0.151, loss_ctc=72.571, loss_interctc_layer6=84.391, loss_interctc_layer12=71.599, loss_interctc_layer15=66.443, loss_interctc_layer21=74.932, loss=73.987, backward_time=0.202, grad_norm=66.742, clip=100.000, loss_scale=4.056e+31, optim_step_time=0.137, optim0_lr0=8.858e-05, train_time=2.549 [gpua003:0/64] 2024-02-06 06:55:17,842 (trainer:687) WARNING: The grad norm is nan. Skipping updating the model. [gpua003:0/64] 2024-02-06 06:55:28,230 (trainer:756) INFO: 21epoch:train:6001-6100batch: iter_time=8.511e-05, forward_time=0.143, loss_ctc=76.570, loss_interctc_layer6=82.928, loss_interctc_layer12=68.873, loss_interctc_layer15=62.986, loss_interctc_layer21=78.927, loss=74.057, backward_time=0.202, grad_norm=95.937, clip=100.000, loss_scale=3.954e+31, optim_step_time=0.137, optim0_lr0=8.856e-05, train_time=2.273 [gpua003:0/64] 2024-02-06 06:58:50,465 (trainer:756) INFO: 21epoch:train:6101-6200batch: iter_time=8.386e-05, forward_time=0.144, loss_ctc=76.511, loss_interctc_layer6=81.901, loss_interctc_layer12=67.873, loss_interctc_layer15=62.239, loss_interctc_layer21=78.838, loss=73.472, backward_time=0.203, grad_norm=62.910, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.137, optim0_lr0=8.855e-05, train_time=2.022 [gpua003:0/64] 2024-02-06 07:00:51,321 (multiple_iter_factory:32) INFO: Building 5th iter-factory... [gpua003:0/64] 2024-02-06 07:01:09,836 (s2t:401) INFO: Optional Data Names: ('text_prev', 'text_ctc', 'text_spk2', 'text_spk3', 'text_spk4') [gpua003:0/64] 2024-02-06 07:01:13,221 (abs_task:1660) INFO: [train] dataset: ESPnetDataset( speech: {"path": "exp/s2t_stats_raw_bpe50000/splits12/wav.scp/split.8", "type": "kaldi_ark"} text_prev: {"path": "exp/s2t_stats_raw_bpe50000/splits12/text.prev/split.8", "type": "text"} text_ctc: {"path": "exp/s2t_stats_raw_bpe50000/splits12/text.ctc/split.8", "type": "text"} text: {"path": "exp/s2t_stats_raw_bpe50000/splits12/text/split.8", "type": "text"} preprocess: ) [gpua003:0/64] 2024-02-06 07:01:13,221 (abs_task:1661) INFO: [train] Batch sampler: UnsortedBatchSampler(N-batch=19027, batch_size=256, key_file=exp/s2t_stats_raw_bpe50000/splits12/speech_shape/split.8, [gpua003:0/64] 2024-02-06 07:01:13,251 (abs_task:1662) INFO: [train] mini-batch sizes summary: N-batch=19027, mean=256.0, min=256, max=257 [gpua003:0/64] 2024-02-06 07:11:54,232 (trainer:756) INFO: 21epoch:train:6201-6300batch: iter_time=3.352, forward_time=0.179, loss_ctc=71.482, loss_interctc_layer6=75.445, loss_interctc_layer12=62.793, loss_interctc_layer15=57.761, loss_interctc_layer21=73.858, loss=68.268, backward_time=0.212, grad_norm=68.618, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.139, optim0_lr0=8.853e-05, train_time=7.837 [gpua003:0/64] 2024-02-06 07:15:23,514 (trainer:756) INFO: 21epoch:train:6301-6400batch: iter_time=7.630e-05, forward_time=0.143, loss_ctc=80.502, loss_interctc_layer6=79.409, loss_interctc_layer12=66.380, loss_interctc_layer15=61.108, loss_interctc_layer21=83.264, loss=74.133, backward_time=0.202, grad_norm=89.958, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.137, optim0_lr0=8.852e-05, train_time=2.093 [gpua003:0/64] 2024-02-06 07:18:57,383 (trainer:756) INFO: 21epoch:train:6401-6500batch: iter_time=7.745e-05, forward_time=0.143, loss_ctc=78.838, loss_interctc_layer6=82.902, loss_interctc_layer12=69.676, loss_interctc_layer15=64.386, loss_interctc_layer21=81.570, loss=75.474, backward_time=0.202, grad_norm=61.944, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.137, optim0_lr0=8.850e-05, train_time=2.138 [gpua003:0/64] 2024-02-06 07:23:06,714 (trainer:756) INFO: 21epoch:train:6501-6600batch: iter_time=8.337e-05, forward_time=0.165, loss_ctc=78.743, loss_interctc_layer6=84.078, loss_interctc_layer12=71.006, loss_interctc_layer15=65.844, loss_interctc_layer21=80.985, loss=76.131, backward_time=0.209, grad_norm=84.918, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.139, optim0_lr0=8.849e-05, train_time=2.492 [gpua003:0/64] 2024-02-06 07:26:44,655 (trainer:756) INFO: 21epoch:train:6601-6700batch: iter_time=8.231e-05, forward_time=0.144, loss_ctc=83.712, loss_interctc_layer6=84.637, loss_interctc_layer12=71.141, loss_interctc_layer15=65.286, loss_interctc_layer21=86.229, loss=78.201, backward_time=0.202, grad_norm=76.243, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.137, optim0_lr0=8.847e-05, train_time=2.180 [gpua003:0/64] 2024-02-06 07:30:27,555 (trainer:756) INFO: 21epoch:train:6701-6800batch: iter_time=7.887e-05, forward_time=0.143, loss_ctc=81.590, loss_interctc_layer6=88.608, loss_interctc_layer12=73.932, loss_interctc_layer15=67.960, loss_interctc_layer21=84.013, loss=79.220, backward_time=0.203, grad_norm=65.846, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.137, optim0_lr0=8.846e-05, train_time=2.229 [gpua003:0/64] 2024-02-06 07:34:50,197 (trainer:756) INFO: 21epoch:train:6801-6900batch: iter_time=8.223e-05, forward_time=0.144, loss_ctc=79.047, loss_interctc_layer6=85.585, loss_interctc_layer12=71.174, loss_interctc_layer15=65.192, loss_interctc_layer21=81.452, loss=76.490, backward_time=0.203, grad_norm=77.966, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.137, optim0_lr0=8.845e-05, train_time=2.626 [gpua003:0/64] 2024-02-06 07:38:52,447 (trainer:756) INFO: 21epoch:train:6901-7000batch: iter_time=8.620e-05, forward_time=0.145, loss_ctc=71.972, loss_interctc_layer6=77.019, loss_interctc_layer12=63.786, loss_interctc_layer15=58.349, loss_interctc_layer21=74.183, loss=69.062, backward_time=0.204, grad_norm=74.093, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.137, optim0_lr0=8.843e-05, train_time=2.422 [gpua003:0/64] 2024-02-06 07:42:47,382 (trainer:756) INFO: 21epoch:train:7001-7100batch: iter_time=8.509e-05, forward_time=0.219, loss_ctc=84.575, loss_interctc_layer6=89.571, loss_interctc_layer12=75.238, loss_interctc_layer15=69.461, loss_interctc_layer21=87.223, loss=81.214, backward_time=0.228, grad_norm=71.068, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.146, optim0_lr0=8.842e-05, train_time=2.349 [gpua003:0/64] 2024-02-06 07:46:40,649 (trainer:756) INFO: 21epoch:train:7101-7200batch: iter_time=8.337e-05, forward_time=0.143, loss_ctc=95.132, loss_interctc_layer6=87.038, loss_interctc_layer12=72.541, loss_interctc_layer15=66.349, loss_interctc_layer21=98.428, loss=83.898, backward_time=0.202, grad_norm=96.117, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.136, optim0_lr0=8.840e-05, train_time=2.332 [gpua003:0/64] 2024-02-06 07:50:25,307 (trainer:756) INFO: 21epoch:train:7201-7300batch: iter_time=8.206e-05, forward_time=0.143, loss_ctc=69.670, loss_interctc_layer6=87.048, loss_interctc_layer12=73.731, loss_interctc_layer15=68.168, loss_interctc_layer21=71.995, loss=74.122, backward_time=0.203, grad_norm=79.237, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.136, optim0_lr0=8.839e-05, train_time=2.246 [gpua003:0/64] 2024-02-06 07:53:48,681 (trainer:756) INFO: 21epoch:train:7301-7400batch: iter_time=8.103e-05, forward_time=0.143, loss_ctc=73.776, loss_interctc_layer6=80.416, loss_interctc_layer12=66.354, loss_interctc_layer15=60.422, loss_interctc_layer21=76.104, loss=71.414, backward_time=0.204, grad_norm=57.035, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.136, optim0_lr0=8.837e-05, train_time=2.034 [gpua003:0/64] 2024-02-06 07:57:30,712 (trainer:756) INFO: 21epoch:train:7401-7500batch: iter_time=8.140e-05, forward_time=0.166, loss_ctc=79.478, loss_interctc_layer6=79.721, loss_interctc_layer12=66.208, loss_interctc_layer15=60.715, loss_interctc_layer21=82.123, loss=73.649, backward_time=0.210, grad_norm=57.290, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.139, optim0_lr0=8.836e-05, train_time=2.219 [gpua003:0/64] 2024-02-06 07:57:50,743 (multiple_iter_factory:32) INFO: Building 6th iter-factory... [gpua003:0/64] 2024-02-06 07:58:09,588 (s2t:401) INFO: Optional Data Names: ('text_prev', 'text_ctc', 'text_spk2', 'text_spk3', 'text_spk4') [gpua003:0/64] 2024-02-06 07:58:12,988 (abs_task:1660) INFO: [train] dataset: ESPnetDataset( speech: {"path": "exp/s2t_stats_raw_bpe50000/splits12/wav.scp/split.4", "type": "kaldi_ark"} text_prev: {"path": "exp/s2t_stats_raw_bpe50000/splits12/text.prev/split.4", "type": "text"} text_ctc: {"path": "exp/s2t_stats_raw_bpe50000/splits12/text.ctc/split.4", "type": "text"} text: {"path": "exp/s2t_stats_raw_bpe50000/splits12/text/split.4", "type": "text"} preprocess: ) [gpua003:0/64] 2024-02-06 07:58:12,988 (abs_task:1661) INFO: [train] Batch sampler: UnsortedBatchSampler(N-batch=19027, batch_size=256, key_file=exp/s2t_stats_raw_bpe50000/splits12/speech_shape/split.4, [gpua003:0/64] 2024-02-06 07:58:12,995 (abs_task:1662) INFO: [train] mini-batch sizes summary: N-batch=19027, mean=256.0, min=256, max=257 [gpua003:0/64] 2024-02-06 08:06:50,329 (trainer:756) INFO: 21epoch:train:7501-7600batch: iter_time=2.999, forward_time=0.142, loss_ctc=71.501, loss_interctc_layer6=76.874, loss_interctc_layer12=64.067, loss_interctc_layer15=58.842, loss_interctc_layer21=73.886, loss=69.034, backward_time=0.202, grad_norm=53.904, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.137, optim0_lr0=8.834e-05, train_time=5.597 [gpua003:0/64] 2024-02-06 08:10:05,397 (trainer:756) INFO: 21epoch:train:7601-7700batch: iter_time=8.278e-05, forward_time=0.142, loss_ctc=78.662, loss_interctc_layer6=82.481, loss_interctc_layer12=69.335, loss_interctc_layer15=64.088, loss_interctc_layer21=81.419, loss=75.197, backward_time=0.202, grad_norm=60.476, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.136, optim0_lr0=8.833e-05, train_time=1.951 [gpua003:0/64] 2024-02-06 08:13:25,049 (trainer:756) INFO: 21epoch:train:7701-7800batch: iter_time=8.451e-05, forward_time=0.143, loss_ctc=90.451, loss_interctc_layer6=86.576, loss_interctc_layer12=72.197, loss_interctc_layer15=66.285, loss_interctc_layer21=92.552, loss=81.612, backward_time=0.202, grad_norm=68.308, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.136, optim0_lr0=8.832e-05, train_time=1.996 [gpua003:0/64] 2024-02-06 08:17:41,740 (trainer:756) INFO: 21epoch:train:7801-7900batch: iter_time=8.431e-05, forward_time=0.223, loss_ctc=79.817, loss_interctc_layer6=84.315, loss_interctc_layer12=70.257, loss_interctc_layer15=64.509, loss_interctc_layer21=82.337, loss=76.247, backward_time=0.218, grad_norm=75.794, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.145, optim0_lr0=8.830e-05, train_time=2.567 [gpua003:0/64] 2024-02-06 08:21:13,581 (trainer:756) INFO: 21epoch:train:7901-8000batch: iter_time=8.287e-05, forward_time=0.149, loss_ctc=80.785, loss_interctc_layer6=83.516, loss_interctc_layer12=69.773, loss_interctc_layer15=65.247, loss_interctc_layer21=83.526, loss=76.570, backward_time=0.207, grad_norm=107.922, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.139, optim0_lr0=8.829e-05, train_time=2.118 [gpua003:0/64] 2024-02-06 08:25:21,510 (trainer:756) INFO: 21epoch:train:8001-8100batch: iter_time=8.499e-05, forward_time=0.144, loss_ctc=74.759, loss_interctc_layer6=88.377, loss_interctc_layer12=73.429, loss_interctc_layer15=67.230, loss_interctc_layer21=76.954, loss=76.150, backward_time=0.202, grad_norm=121.088, clip=100.000, loss_scale=2.130e+31, optim_step_time=0.136, optim0_lr0=8.827e-05, train_time=2.479 [gpua003:0/64] 2024-02-06 08:27:11,493 (trainer:687) WARNING: The grad norm is nan. Skipping updating the model. [gpua003:0/64] 2024-02-06 08:29:29,515 (trainer:756) INFO: 21epoch:train:8101-8200batch: iter_time=8.400e-05, forward_time=0.146, loss_ctc=73.202, loss_interctc_layer6=79.010, loss_interctc_layer12=65.538, loss_interctc_layer15=59.963, loss_interctc_layer21=75.752, loss=70.693, backward_time=0.201, grad_norm=103.237, clip=100.000, loss_scale=2.786e+31, optim_step_time=0.136, optim0_lr0=8.826e-05, train_time=2.480 [gpua003:0/64] 2024-02-06 08:33:27,946 (trainer:756) INFO: 21epoch:train:8201-8300batch: iter_time=8.400e-05, forward_time=0.169, loss_ctc=71.833, loss_interctc_layer6=76.578, loss_interctc_layer12=63.449, loss_interctc_layer15=57.987, loss_interctc_layer21=74.088, loss=68.787, backward_time=0.211, grad_norm=48.322, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.138, optim0_lr0=8.824e-05, train_time=2.382 [gpua003:0/64] 2024-02-06 08:36:51,691 (trainer:756) INFO: 21epoch:train:8301-8400batch: iter_time=8.828e-05, forward_time=0.144, loss_ctc=103.101, loss_interctc_layer6=96.093, loss_interctc_layer12=80.364, loss_interctc_layer15=73.864, loss_interctc_layer21=106.468, loss=91.978, backward_time=0.203, grad_norm=85.752, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.136, optim0_lr0=8.823e-05, train_time=2.039 [gpua003:0/64] 2024-02-06 08:40:38,816 (trainer:756) INFO: 21epoch:train:8401-8500batch: iter_time=8.413e-05, forward_time=0.143, loss_ctc=71.735, loss_interctc_layer6=84.124, loss_interctc_layer12=71.307, loss_interctc_layer15=66.019, loss_interctc_layer21=74.178, loss=73.472, backward_time=0.203, grad_norm=70.757, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.136, optim0_lr0=8.822e-05, train_time=2.271 [gpua003:0/64] 2024-02-06 08:44:18,196 (trainer:756) INFO: 21epoch:train:8501-8600batch: iter_time=9.322e-05, forward_time=0.144, loss_ctc=76.531, loss_interctc_layer6=82.473, loss_interctc_layer12=68.282, loss_interctc_layer15=62.358, loss_interctc_layer21=78.845, loss=73.698, backward_time=0.203, grad_norm=68.065, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.136, optim0_lr0=8.820e-05, train_time=2.193 [gpua003:0/64] 2024-02-06 08:48:02,845 (trainer:756) INFO: 21epoch:train:8601-8700batch: iter_time=8.611e-05, forward_time=0.143, loss_ctc=76.656, loss_interctc_layer6=81.784, loss_interctc_layer12=67.802, loss_interctc_layer15=62.137, loss_interctc_layer21=79.013, loss=73.478, backward_time=0.202, grad_norm=66.353, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.136, optim0_lr0=8.819e-05, train_time=2.247 [gpua003:0/64] 2024-02-06 08:50:20,219 (multiple_iter_factory:32) INFO: Building 7th iter-factory... [gpua003:0/64] 2024-02-06 08:50:38,789 (s2t:401) INFO: Optional Data Names: ('text_prev', 'text_ctc', 'text_spk2', 'text_spk3', 'text_spk4') [gpua003:0/64] 2024-02-06 08:50:42,559 (abs_task:1660) INFO: [train] dataset: ESPnetDataset( speech: {"path": "exp/s2t_stats_raw_bpe50000/splits12/wav.scp/split.11", "type": "kaldi_ark"} text_prev: {"path": "exp/s2t_stats_raw_bpe50000/splits12/text.prev/split.11", "type": "text"} text_ctc: {"path": "exp/s2t_stats_raw_bpe50000/splits12/text.ctc/split.11", "type": "text"} text: {"path": "exp/s2t_stats_raw_bpe50000/splits12/text/split.11", "type": "text"} preprocess: ) [gpua003:0/64] 2024-02-06 08:50:42,559 (abs_task:1661) INFO: [train] Batch sampler: UnsortedBatchSampler(N-batch=19027, batch_size=256, key_file=exp/s2t_stats_raw_bpe50000/splits12/speech_shape/split.11, [gpua003:0/64] 2024-02-06 08:50:42,562 (abs_task:1662) INFO: [train] mini-batch sizes summary: N-batch=19027, mean=256.0, min=256, max=257 [gpua003:0/64] 2024-02-06 08:57:40,097 (trainer:756) INFO: 21epoch:train:8701-8800batch: iter_time=3.517, forward_time=0.188, loss_ctc=70.787, loss_interctc_layer6=75.006, loss_interctc_layer12=62.222, loss_interctc_layer15=57.093, loss_interctc_layer21=73.348, loss=67.691, backward_time=0.212, grad_norm=66.671, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.139, optim0_lr0=8.817e-05, train_time=5.772 [gpua003:0/64] 2024-02-06 09:00:49,974 (trainer:756) INFO: 21epoch:train:8801-8900batch: iter_time=8.277e-05, forward_time=0.143, loss_ctc=75.937, loss_interctc_layer6=78.960, loss_interctc_layer12=65.786, loss_interctc_layer15=60.398, loss_interctc_layer21=78.709, loss=71.958, backward_time=0.204, grad_norm=92.817, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.137, optim0_lr0=8.816e-05, train_time=1.899 [gpua003:0/64] 2024-02-06 09:04:10,798 (trainer:756) INFO: 21epoch:train:8901-9000batch: iter_time=8.199e-05, forward_time=0.150, loss_ctc=71.436, loss_interctc_layer6=82.186, loss_interctc_layer12=68.986, loss_interctc_layer15=63.735, loss_interctc_layer21=74.009, loss=72.070, backward_time=0.203, grad_norm=62.803, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.139, optim0_lr0=8.814e-05, train_time=2.008 [gpua003:0/64] 2024-02-06 09:07:57,199 (trainer:756) INFO: 21epoch:train:9001-9100batch: iter_time=8.360e-05, forward_time=0.142, loss_ctc=75.513, loss_interctc_layer6=82.694, loss_interctc_layer12=69.405, loss_interctc_layer15=63.859, loss_interctc_layer21=77.731, loss=73.840, backward_time=0.202, grad_norm=70.019, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.137, optim0_lr0=8.813e-05, train_time=2.264 [gpua003:0/64] 2024-02-06 09:11:27,751 (trainer:756) INFO: 21epoch:train:9101-9200batch: iter_time=8.330e-05, forward_time=0.142, loss_ctc=80.182, loss_interctc_layer6=84.375, loss_interctc_layer12=70.931, loss_interctc_layer15=65.448, loss_interctc_layer21=82.343, loss=76.656, backward_time=0.202, grad_norm=85.510, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.137, optim0_lr0=8.812e-05, train_time=2.105 [gpua003:0/64] 2024-02-06 09:15:06,267 (trainer:756) INFO: 21epoch:train:9201-9300batch: iter_time=8.117e-05, forward_time=0.142, loss_ctc=76.965, loss_interctc_layer6=87.816, loss_interctc_layer12=73.190, loss_interctc_layer15=67.298, loss_interctc_layer21=79.514, loss=76.957, backward_time=0.203, grad_norm=63.618, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.137, optim0_lr0=8.810e-05, train_time=2.185 [gpua003:0/64] 2024-02-06 09:18:38,925 (trainer:756) INFO: 21epoch:train:9301-9400batch: iter_time=8.314e-05, forward_time=0.143, loss_ctc=71.369, loss_interctc_layer6=85.128, loss_interctc_layer12=70.473, loss_interctc_layer15=64.870, loss_interctc_layer21=73.628, loss=73.094, backward_time=0.202, grad_norm=76.427, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.137, optim0_lr0=8.809e-05, train_time=2.126 [gpua003:0/64] 2024-02-06 09:22:20,881 (trainer:756) INFO: 21epoch:train:9401-9500batch: iter_time=8.614e-05, forward_time=0.143, loss_ctc=66.483, loss_interctc_layer6=76.854, loss_interctc_layer12=63.530, loss_interctc_layer15=58.058, loss_interctc_layer21=68.643, loss=66.714, backward_time=0.203, grad_norm=53.613, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.137, optim0_lr0=8.807e-05, train_time=2.220 [gpua003:0/64] 2024-02-06 09:27:10,852 (trainer:756) INFO: 21epoch:train:9501-9600batch: iter_time=8.375e-05, forward_time=0.246, loss_ctc=81.599, loss_interctc_layer6=89.579, loss_interctc_layer12=75.296, loss_interctc_layer15=69.627, loss_interctc_layer21=84.178, loss=80.056, backward_time=0.228, grad_norm=78.030, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.152, optim0_lr0=8.806e-05, train_time=2.899 [gpua003:0/64] 2024-02-06 09:30:50,666 (trainer:756) INFO: 21epoch:train:9601-9700batch: iter_time=8.320e-05, forward_time=0.143, loss_ctc=90.442, loss_interctc_layer6=86.030, loss_interctc_layer12=71.420, loss_interctc_layer15=65.362, loss_interctc_layer21=93.606, loss=81.372, backward_time=0.202, grad_norm=70.479, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.136, optim0_lr0=8.804e-05, train_time=2.198 [gpua003:0/64] 2024-02-06 09:34:19,403 (trainer:756) INFO: 21epoch:train:9701-9800batch: iter_time=8.300e-05, forward_time=0.143, loss_ctc=66.691, loss_interctc_layer6=86.109, loss_interctc_layer12=72.881, loss_interctc_layer15=67.532, loss_interctc_layer21=68.706, loss=72.384, backward_time=0.203, grad_norm=72.482, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.136, optim0_lr0=8.803e-05, train_time=2.087 [gpua003:0/64] 2024-02-06 09:37:33,737 (trainer:756) INFO: 21epoch:train:9801-9900batch: iter_time=8.448e-05, forward_time=0.150, loss_ctc=72.028, loss_interctc_layer6=80.134, loss_interctc_layer12=66.034, loss_interctc_layer15=60.305, loss_interctc_layer21=74.415, loss=70.583, backward_time=0.206, grad_norm=75.158, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.136, optim0_lr0=8.802e-05, train_time=1.942 [gpua003:0/64] 2024-02-06 09:41:03,769 (trainer:756) INFO: 21epoch:train:9901-10000batch: iter_time=8.070e-05, forward_time=0.142, loss_ctc=72.756, loss_interctc_layer6=79.252, loss_interctc_layer12=65.844, loss_interctc_layer15=60.360, loss_interctc_layer21=75.191, loss=70.681, backward_time=0.202, grad_norm=61.260, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.136, optim0_lr0=8.800e-05, train_time=2.101 [gpua003:0/64] 2024-02-06 09:41:23,800 (multiple_iter_factory:32) INFO: Building 8th iter-factory... [gpua003:0/64] 2024-02-06 09:41:42,332 (s2t:401) INFO: Optional Data Names: ('text_prev', 'text_ctc', 'text_spk2', 'text_spk3', 'text_spk4') [gpua003:0/64] 2024-02-06 09:41:46,010 (abs_task:1660) INFO: [train] dataset: ESPnetDataset( speech: {"path": "exp/s2t_stats_raw_bpe50000/splits12/wav.scp/split.2", "type": "kaldi_ark"} text_prev: {"path": "exp/s2t_stats_raw_bpe50000/splits12/text.prev/split.2", "type": "text"} text_ctc: {"path": "exp/s2t_stats_raw_bpe50000/splits12/text.ctc/split.2", "type": "text"} text: {"path": "exp/s2t_stats_raw_bpe50000/splits12/text/split.2", "type": "text"} preprocess: ) [gpua003:0/64] 2024-02-06 09:41:46,010 (abs_task:1661) INFO: [train] Batch sampler: UnsortedBatchSampler(N-batch=19027, batch_size=256, key_file=exp/s2t_stats_raw_bpe50000/splits12/speech_shape/split.2, [gpua003:0/64] 2024-02-06 09:41:46,013 (abs_task:1662) INFO: [train] mini-batch sizes summary: N-batch=19027, mean=256.0, min=256, max=257 [gpua003:0/64] 2024-02-06 09:49:57,054 (trainer:756) INFO: 21epoch:train:10001-10100batch: iter_time=3.452, forward_time=0.187, loss_ctc=69.663, loss_interctc_layer6=76.533, loss_interctc_layer12=63.657, loss_interctc_layer15=58.445, loss_interctc_layer21=72.002, loss=68.060, backward_time=0.212, grad_norm=69.383, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.138, optim0_lr0=8.799e-05, train_time=5.333 [gpua003:0/64] 2024-02-06 09:53:16,980 (trainer:756) INFO: 21epoch:train:10101-10200batch: iter_time=8.546e-05, forward_time=0.142, loss_ctc=78.600, loss_interctc_layer6=81.663, loss_interctc_layer12=68.822, loss_interctc_layer15=63.871, loss_interctc_layer21=81.410, loss=74.873, backward_time=0.202, grad_norm=77.646, clip=100.000, loss_scale=3.286e+31, optim_step_time=0.136, optim0_lr0=8.797e-05, train_time=1.999 [gpua003:0/64] 2024-02-06 09:57:05,122 (trainer:756) INFO: 21epoch:train:10201-10300batch: iter_time=9.417e-05, forward_time=0.275, loss_ctc=89.117, loss_interctc_layer6=86.396, loss_interctc_layer12=72.498, loss_interctc_layer15=66.485, loss_interctc_layer21=92.394, loss=81.378, backward_time=0.240, grad_norm=79.913, clip=100.000, loss_scale=4.056e+31, optim_step_time=0.149, optim0_lr0=8.796e-05, train_time=2.280 [gpua003:0/64] 2024-02-06 10:01:46,518 (trainer:756) INFO: 21epoch:train:10301-10400batch: iter_time=9.022e-05, forward_time=0.143, loss_ctc=80.379, loss_interctc_layer6=84.051, loss_interctc_layer12=70.298, loss_interctc_layer15=64.326, loss_interctc_layer21=83.028, loss=76.416, backward_time=0.201, grad_norm=65.042, clip=100.000, loss_scale=4.056e+31, optim_step_time=0.137, optim0_lr0=8.795e-05, train_time=2.815 [gpua003:0/64] 2024-02-06 10:05:22,849 (trainer:756) INFO: 21epoch:train:10401-10500batch: iter_time=8.788e-05, forward_time=0.143, loss_ctc=80.663, loss_interctc_layer6=82.488, loss_interctc_layer12=68.997, loss_interctc_layer15=63.582, loss_interctc_layer21=82.983, loss=75.743, backward_time=0.203, grad_norm=91.344, clip=100.000, loss_scale=4.056e+31, optim_step_time=0.137, optim0_lr0=8.793e-05, train_time=2.163 [gpua003:0/64] 2024-02-06 10:09:31,009 (trainer:756) INFO: 21epoch:train:10501-10600batch: iter_time=8.752e-05, forward_time=0.144, loss_ctc=74.458, loss_interctc_layer6=88.230, loss_interctc_layer12=73.054, loss_interctc_layer15=66.986, loss_interctc_layer21=76.596, loss=75.865, backward_time=0.202, grad_norm=68.001, clip=100.000, loss_scale=4.056e+31, optim_step_time=0.137, optim0_lr0=8.792e-05, train_time=2.481 [gpua003:0/64] 2024-02-06 10:13:23,788 (trainer:756) INFO: 21epoch:train:10601-10700batch: iter_time=8.544e-05, forward_time=0.143, loss_ctc=73.974, loss_interctc_layer6=79.621, loss_interctc_layer12=66.181, loss_interctc_layer15=60.541, loss_interctc_layer21=76.500, loss=71.363, backward_time=0.201, grad_norm=60.677, clip=100.000, loss_scale=4.056e+31, optim_step_time=0.137, optim0_lr0=8.790e-05, train_time=2.328 [gpua003:0/64] 2024-02-06 10:17:28,280 (trainer:756) INFO: 21epoch:train:10701-10800batch: iter_time=8.539e-05, forward_time=0.293, loss_ctc=71.956, loss_interctc_layer6=76.951, loss_interctc_layer12=63.694, loss_interctc_layer15=58.260, loss_interctc_layer21=74.155, loss=69.003, backward_time=0.235, grad_norm=56.219, clip=100.000, loss_scale=4.056e+31, optim_step_time=0.142, optim0_lr0=8.789e-05, train_time=2.445 [gpua003:0/64] 2024-02-06 10:21:05,270 (trainer:756) INFO: 21epoch:train:10801-10900batch: iter_time=8.265e-05, forward_time=0.153, loss_ctc=102.452, loss_interctc_layer6=95.610, loss_interctc_layer12=79.995, loss_interctc_layer15=73.467, loss_interctc_layer21=105.677, loss=91.440, backward_time=0.205, grad_norm=92.384, clip=100.000, loss_scale=4.056e+31, optim_step_time=0.136, optim0_lr0=8.787e-05, train_time=2.169 [gpua003:0/64] 2024-02-06 10:24:44,808 (trainer:756) INFO: 21epoch:train:10901-11000batch: iter_time=8.149e-05, forward_time=0.142, loss_ctc=70.734, loss_interctc_layer6=83.453, loss_interctc_layer12=70.553, loss_interctc_layer15=65.220, loss_interctc_layer21=72.949, loss=72.582, backward_time=0.201, grad_norm=74.975, clip=100.000, loss_scale=4.056e+31, optim_step_time=0.136, optim0_lr0=8.786e-05, train_time=2.196 [gpua003:0/64] 2024-02-06 10:29:05,293 (trainer:756) INFO: 21epoch:train:11001-11100batch: iter_time=8.882e-05, forward_time=0.142, loss_ctc=76.260, loss_interctc_layer6=82.704, loss_interctc_layer12=68.367, loss_interctc_layer15=62.483, loss_interctc_layer21=78.622, loss=73.687, backward_time=0.201, grad_norm=79.365, clip=100.000, loss_scale=4.056e+31, optim_step_time=0.136, optim0_lr0=8.785e-05, train_time=2.605 [gpua003:0/64] 2024-02-06 10:33:13,698 (trainer:756) INFO: 21epoch:train:11101-11200batch: iter_time=9.748e-05, forward_time=0.143, loss_ctc=74.844, loss_interctc_layer6=81.152, loss_interctc_layer12=67.169, loss_interctc_layer15=61.489, loss_interctc_layer21=77.351, loss=72.401, backward_time=0.202, grad_norm=95.717, clip=100.000, loss_scale=4.056e+31, optim_step_time=0.136, optim0_lr0=8.783e-05, train_time=2.484 [gpua003:0/64] 2024-02-06 10:35:30,146 (multiple_iter_factory:32) INFO: Building 9th iter-factory... [gpua003:0/64] 2024-02-06 10:35:48,966 (s2t:401) INFO: Optional Data Names: ('text_prev', 'text_ctc', 'text_spk2', 'text_spk3', 'text_spk4') [gpua003:0/64] 2024-02-06 10:35:52,656 (abs_task:1660) INFO: [train] dataset: ESPnetDataset( speech: {"path": "exp/s2t_stats_raw_bpe50000/splits12/wav.scp/split.9", "type": "kaldi_ark"} text_prev: {"path": "exp/s2t_stats_raw_bpe50000/splits12/text.prev/split.9", "type": "text"} text_ctc: {"path": "exp/s2t_stats_raw_bpe50000/splits12/text.ctc/split.9", "type": "text"} text: {"path": "exp/s2t_stats_raw_bpe50000/splits12/text/split.9", "type": "text"} preprocess: ) [gpua003:0/64] 2024-02-06 10:35:52,656 (abs_task:1661) INFO: [train] Batch sampler: UnsortedBatchSampler(N-batch=19027, batch_size=256, key_file=exp/s2t_stats_raw_bpe50000/splits12/speech_shape/split.9, [gpua003:0/64] 2024-02-06 10:35:52,660 (abs_task:1662) INFO: [train] mini-batch sizes summary: N-batch=19027, mean=256.0, min=256, max=257 [gpua003:0/64] 2024-02-06 10:42:42,813 (trainer:756) INFO: 21epoch:train:11201-11300batch: iter_time=3.506, forward_time=0.200, loss_ctc=70.304, loss_interctc_layer6=74.819, loss_interctc_layer12=62.078, loss_interctc_layer15=56.936, loss_interctc_layer21=72.837, loss=67.395, backward_time=0.213, grad_norm=60.150, clip=100.000, loss_scale=4.056e+31, optim_step_time=0.139, optim0_lr0=8.782e-05, train_time=5.691 [gpua003:0/64] 2024-02-06 10:45:48,092 (trainer:756) INFO: 21epoch:train:11301-11400batch: iter_time=8.431e-05, forward_time=0.144, loss_ctc=76.368, loss_interctc_layer6=79.219, loss_interctc_layer12=65.896, loss_interctc_layer15=60.558, loss_interctc_layer21=79.155, loss=72.239, backward_time=0.203, grad_norm=58.296, clip=100.000, loss_scale=4.056e+31, optim_step_time=0.138, optim0_lr0=8.780e-05, train_time=1.853 [gpua003:0/64] 2024-02-06 10:49:04,650 (trainer:756) INFO: 21epoch:train:11401-11500batch: iter_time=8.410e-05, forward_time=0.142, loss_ctc=70.252, loss_interctc_layer6=82.519, loss_interctc_layer12=69.028, loss_interctc_layer15=63.708, loss_interctc_layer21=72.636, loss=71.628, backward_time=0.202, grad_norm=63.526, clip=100.000, loss_scale=4.056e+31, optim_step_time=0.137, optim0_lr0=8.779e-05, train_time=1.965 [gpua003:0/64] 2024-02-06 10:52:15,964 (trainer:756) INFO: 21epoch:train:11501-11600batch: iter_time=8.873e-05, forward_time=0.142, loss_ctc=73.476, loss_interctc_layer6=82.402, loss_interctc_layer12=69.053, loss_interctc_layer15=63.097, loss_interctc_layer21=76.581, loss=72.922, backward_time=0.202, grad_norm=75.127, clip=100.000, loss_scale=4.056e+31, optim_step_time=0.137, optim0_lr0=8.778e-05, train_time=1.913 [gpua003:0/64] 2024-02-06 10:55:54,312 (trainer:756) INFO: 21epoch:train:11601-11700batch: iter_time=8.858e-05, forward_time=0.142, loss_ctc=80.455, loss_interctc_layer6=83.934, loss_interctc_layer12=70.402, loss_interctc_layer15=64.758, loss_interctc_layer21=82.628, loss=76.435, backward_time=0.202, grad_norm=73.205, clip=100.000, loss_scale=4.056e+31, optim_step_time=0.137, optim0_lr0=8.776e-05, train_time=2.183 [gpua003:0/64] 2024-02-06 10:59:50,931 (trainer:756) INFO: 21epoch:train:11701-11800batch: iter_time=9.114e-05, forward_time=0.164, loss_ctc=76.134, loss_interctc_layer6=87.669, loss_interctc_layer12=72.935, loss_interctc_layer15=67.111, loss_interctc_layer21=78.583, loss=76.486, backward_time=0.208, grad_norm=59.451, clip=100.000, loss_scale=4.056e+31, optim_step_time=0.141, optim0_lr0=8.775e-05, train_time=2.366 [gpua003:0/64] 2024-02-06 11:03:46,820 (trainer:756) INFO: 21epoch:train:11801-11900batch: iter_time=4.730e-04, forward_time=0.192, loss_ctc=70.933, loss_interctc_layer6=85.664, loss_interctc_layer12=70.955, loss_interctc_layer15=65.039, loss_interctc_layer21=73.245, loss=73.167, backward_time=0.212, grad_norm=72.504, clip=100.000, loss_scale=4.056e+31, optim_step_time=0.142, optim0_lr0=8.773e-05, train_time=2.358 [gpua003:0/64] 2024-02-06 11:07:45,594 (trainer:756) INFO: 21epoch:train:11901-12000batch: iter_time=0.003, forward_time=0.179, loss_ctc=66.140, loss_interctc_layer6=75.889, loss_interctc_layer12=62.707, loss_interctc_layer15=57.369, loss_interctc_layer21=68.212, loss=66.063, backward_time=0.216, grad_norm=95.707, clip=100.000, loss_scale=4.056e+31, optim_step_time=0.139, optim0_lr0=8.772e-05, train_time=2.388 [gpua003:0/64] 2024-02-06 11:11:20,629 (trainer:756) INFO: 21epoch:train:12001-12100batch: iter_time=8.688e-05, forward_time=0.143, loss_ctc=80.773, loss_interctc_layer6=88.759, loss_interctc_layer12=74.188, loss_interctc_layer15=68.382, loss_interctc_layer21=83.288, loss=79.078, backward_time=0.202, grad_norm=71.330, clip=100.000, loss_scale=4.056e+31, optim_step_time=0.137, optim0_lr0=8.771e-05, train_time=2.150 [gpua003:0/64] 2024-02-06 11:14:35,715 (trainer:687) WARNING: The grad norm is nan. Skipping updating the model. [gpua003:0/64] 2024-02-06 11:15:35,354 (trainer:756) INFO: 21epoch:train:12101-12200batch: iter_time=8.610e-05, forward_time=0.143, loss_ctc=89.359, loss_interctc_layer6=86.281, loss_interctc_layer12=71.586, loss_interctc_layer15=65.473, loss_interctc_layer21=92.649, loss=81.070, backward_time=0.201, grad_norm=57.154, clip=100.000, loss_scale=6.105e+31, optim_step_time=0.137, optim0_lr0=8.769e-05, train_time=2.547 [gpua003:0/64] 2024-02-06 11:21:46,324 (trainer:756) INFO: 21epoch:train:12201-12300batch: iter_time=8.732e-05, forward_time=0.143, loss_ctc=65.139, loss_interctc_layer6=85.521, loss_interctc_layer12=72.095, loss_interctc_layer15=66.852, loss_interctc_layer21=67.209, loss=71.364, backward_time=0.202, grad_norm=77.329, clip=100.000, loss_scale=4.056e+31, optim_step_time=0.137, optim0_lr0=8.768e-05, train_time=3.710 [gpua003:0/64] 2024-02-06 11:26:28,857 (trainer:756) INFO: 21epoch:train:12301-12400batch: iter_time=8.774e-05, forward_time=0.143, loss_ctc=71.769, loss_interctc_layer6=79.433, loss_interctc_layer12=65.266, loss_interctc_layer15=59.485, loss_interctc_layer21=74.134, loss=70.017, backward_time=0.202, grad_norm=73.051, clip=100.000, loss_scale=4.056e+31, optim_step_time=0.137, optim0_lr0=8.766e-05, train_time=2.825 [gpua003:0/64] 2024-02-06 11:31:40,144 (trainer:756) INFO: 21epoch:train:12401-12500batch: iter_time=8.894e-05, forward_time=0.184, loss_ctc=72.356, loss_interctc_layer6=79.160, loss_interctc_layer12=65.602, loss_interctc_layer15=60.087, loss_interctc_layer21=74.729, loss=70.387, backward_time=0.241, grad_norm=65.789, clip=100.000, loss_scale=4.056e+31, optim_step_time=0.141, optim0_lr0=8.765e-05, train_time=3.113 [gpua003:0/64] 2024-02-06 11:32:00,174 (multiple_iter_factory:32) INFO: Building 10th iter-factory... [gpua003:0/64] 2024-02-06 11:32:18,672 (s2t:401) INFO: Optional Data Names: ('text_prev', 'text_ctc', 'text_spk2', 'text_spk3', 'text_spk4') [gpua003:0/64] 2024-02-06 11:32:22,374 (abs_task:1660) INFO: [train] dataset: ESPnetDataset( speech: {"path": "exp/s2t_stats_raw_bpe50000/splits12/wav.scp/split.10", "type": "kaldi_ark"} text_prev: {"path": "exp/s2t_stats_raw_bpe50000/splits12/text.prev/split.10", "type": "text"} text_ctc: {"path": "exp/s2t_stats_raw_bpe50000/splits12/text.ctc/split.10", "type": "text"} text: {"path": "exp/s2t_stats_raw_bpe50000/splits12/text/split.10", "type": "text"} preprocess: ) [gpua003:0/64] 2024-02-06 11:32:22,374 (abs_task:1661) INFO: [train] Batch sampler: UnsortedBatchSampler(N-batch=19027, batch_size=256, key_file=exp/s2t_stats_raw_bpe50000/splits12/speech_shape/split.10, [gpua003:0/64] 2024-02-06 11:32:22,377 (abs_task:1662) INFO: [train] mini-batch sizes summary: N-batch=19027, mean=256.0, min=256, max=257 [gpua003:0/64] 2024-02-06 11:41:01,550 (trainer:756) INFO: 21epoch:train:12501-12600batch: iter_time=3.713, forward_time=0.178, loss_ctc=69.074, loss_interctc_layer6=75.808, loss_interctc_layer12=63.094, loss_interctc_layer15=57.891, loss_interctc_layer21=71.352, loss=67.444, backward_time=0.212, grad_norm=52.072, clip=100.000, loss_scale=4.056e+31, optim_step_time=0.140, optim0_lr0=8.764e-05, train_time=5.613 [gpua003:0/64] 2024-02-06 11:44:19,323 (trainer:756) INFO: 21epoch:train:12601-12700batch: iter_time=8.698e-05, forward_time=0.142, loss_ctc=78.671, loss_interctc_layer6=81.642, loss_interctc_layer12=68.781, loss_interctc_layer15=63.566, loss_interctc_layer21=81.480, loss=74.828, backward_time=0.202, grad_norm=68.137, clip=100.000, loss_scale=4.056e+31, optim_step_time=0.137, optim0_lr0=8.762e-05, train_time=1.978 [gpua003:0/64] 2024-02-06 11:47:28,472 (trainer:756) INFO: 21epoch:train:12701-12800batch: iter_time=8.908e-05, forward_time=0.143, loss_ctc=88.654, loss_interctc_layer6=86.151, loss_interctc_layer12=71.692, loss_interctc_layer15=65.857, loss_interctc_layer21=91.818, loss=80.834, backward_time=0.201, grad_norm=77.704, clip=100.000, loss_scale=4.056e+31, optim_step_time=0.137, optim0_lr0=8.761e-05, train_time=1.891 [gpua003:0/64] 2024-02-06 11:53:28,717 (trainer:756) INFO: 21epoch:train:12801-12900batch: iter_time=8.557e-05, forward_time=0.143, loss_ctc=79.077, loss_interctc_layer6=83.361, loss_interctc_layer12=69.361, loss_interctc_layer15=63.631, loss_interctc_layer21=81.735, loss=75.433, backward_time=0.202, grad_norm=70.716, clip=100.000, loss_scale=4.056e+31, optim_step_time=0.137, optim0_lr0=8.759e-05, train_time=3.602 [gpua003:0/64] 2024-02-06 11:57:41,432 (trainer:756) INFO: 21epoch:train:12901-13000batch: iter_time=8.705e-05, forward_time=0.175, loss_ctc=81.065, loss_interctc_layer6=82.280, loss_interctc_layer12=68.825, loss_interctc_layer15=63.687, loss_interctc_layer21=83.760, loss=75.923, backward_time=0.207, grad_norm=76.258, clip=100.000, loss_scale=4.056e+31, optim_step_time=0.140, optim0_lr0=8.758e-05, train_time=2.527 [gpua003:0/64] 2024-02-06 12:01:18,261 (trainer:756) INFO: 21epoch:train:13001-13100batch: iter_time=9.172e-05, forward_time=0.143, loss_ctc=74.361, loss_interctc_layer6=88.037, loss_interctc_layer12=72.815, loss_interctc_layer15=66.890, loss_interctc_layer21=76.528, loss=75.726, backward_time=0.203, grad_norm=64.890, clip=100.000, loss_scale=4.056e+31, optim_step_time=0.136, optim0_lr0=8.757e-05, train_time=2.168 [gpua003:0/64] 2024-02-06 12:05:38,208 (trainer:756) INFO: 21epoch:train:13101-13200batch: iter_time=8.932e-05, forward_time=0.249, loss_ctc=72.925, loss_interctc_layer6=78.340, loss_interctc_layer12=64.888, loss_interctc_layer15=59.505, loss_interctc_layer21=75.362, loss=70.204, backward_time=0.227, grad_norm=71.735, clip=100.000, loss_scale=4.056e+31, optim_step_time=0.152, optim0_lr0=8.755e-05, train_time=2.599 [gpua003:0/64] 2024-02-06 12:08:53,218 (trainer:756) INFO: 21epoch:train:13201-13300batch: iter_time=8.866e-05, forward_time=0.143, loss_ctc=71.712, loss_interctc_layer6=76.825, loss_interctc_layer12=63.454, loss_interctc_layer15=58.063, loss_interctc_layer21=73.926, loss=68.796, backward_time=0.202, grad_norm=65.322, clip=100.000, loss_scale=4.056e+31, optim_step_time=0.136, optim0_lr0=8.754e-05, train_time=1.950 [gpua003:0/64] 2024-02-06 12:13:24,209 (trainer:756) INFO: 21epoch:train:13301-13400batch: iter_time=9.001e-05, forward_time=0.143, loss_ctc=102.977, loss_interctc_layer6=96.379, loss_interctc_layer12=80.540, loss_interctc_layer15=73.990, loss_interctc_layer21=106.168, loss=92.011, backward_time=0.201, grad_norm=78.499, clip=100.000, loss_scale=4.056e+31, optim_step_time=0.136, optim0_lr0=8.752e-05, train_time=2.710 [gpua003:0/64] 2024-02-06 12:16:56,579 (trainer:756) INFO: 21epoch:train:13401-13500batch: iter_time=8.600e-05, forward_time=0.142, loss_ctc=70.982, loss_interctc_layer6=83.585, loss_interctc_layer12=70.579, loss_interctc_layer15=65.380, loss_interctc_layer21=73.314, loss=72.768, backward_time=0.201, grad_norm=70.679, clip=100.000, loss_scale=4.056e+31, optim_step_time=0.136, optim0_lr0=8.751e-05, train_time=2.123 [gpua003:0/64] 2024-02-06 12:21:04,804 (trainer:756) INFO: 21epoch:train:13501-13600batch: iter_time=9.666e-05, forward_time=0.145, loss_ctc=76.335, loss_interctc_layer6=82.585, loss_interctc_layer12=68.349, loss_interctc_layer15=62.593, loss_interctc_layer21=78.658, loss=73.704, backward_time=0.205, grad_norm=65.213, clip=100.000, loss_scale=4.056e+31, optim_step_time=0.137, optim0_lr0=8.750e-05, train_time=2.482 [gpua003:0/64] 2024-02-06 12:24:36,185 (trainer:756) INFO: 21epoch:train:13601-13700batch: iter_time=9.031e-05, forward_time=0.163, loss_ctc=74.515, loss_interctc_layer6=80.935, loss_interctc_layer12=66.796, loss_interctc_layer15=61.064, loss_interctc_layer21=76.885, loss=72.039, backward_time=0.218, grad_norm=63.829, clip=100.000, loss_scale=4.056e+31, optim_step_time=0.137, optim0_lr0=8.748e-05, train_time=2.114 [gpua003:0/64] 2024-02-06 12:26:49,784 (multiple_iter_factory:32) INFO: Building 11th iter-factory... [gpua003:0/64] 2024-02-06 12:27:08,631 (s2t:401) INFO: Optional Data Names: ('text_prev', 'text_ctc', 'text_spk2', 'text_spk3', 'text_spk4') [gpua003:0/64] 2024-02-06 12:27:12,092 (abs_task:1660) INFO: [train] dataset: ESPnetDataset( speech: {"path": "exp/s2t_stats_raw_bpe50000/splits12/wav.scp/split.7", "type": "kaldi_ark"} text_prev: {"path": "exp/s2t_stats_raw_bpe50000/splits12/text.prev/split.7", "type": "text"} text_ctc: {"path": "exp/s2t_stats_raw_bpe50000/splits12/text.ctc/split.7", "type": "text"} text: {"path": "exp/s2t_stats_raw_bpe50000/splits12/text/split.7", "type": "text"} preprocess: ) [gpua003:0/64] 2024-02-06 12:27:12,092 (abs_task:1661) INFO: [train] Batch sampler: UnsortedBatchSampler(N-batch=19027, batch_size=256, key_file=exp/s2t_stats_raw_bpe50000/splits12/speech_shape/split.7, [gpua003:0/64] 2024-02-06 12:27:12,100 (abs_task:1662) INFO: [train] mini-batch sizes summary: N-batch=19027, mean=256.0, min=256, max=257 [gpua003:0/64] 2024-02-06 12:38:44,629 (trainer:756) INFO: 21epoch:train:13701-13800batch: iter_time=3.663, forward_time=0.184, loss_ctc=69.942, loss_interctc_layer6=74.672, loss_interctc_layer12=61.960, loss_interctc_layer15=56.844, loss_interctc_layer21=72.319, loss=67.148, backward_time=0.216, grad_norm=54.470, clip=100.000, loss_scale=4.056e+31, optim_step_time=0.138, optim0_lr0=8.747e-05, train_time=8.484 [gpua003:0/64] 2024-02-06 12:42:07,448 (trainer:756) INFO: 21epoch:train:13801-13900batch: iter_time=8.151e-05, forward_time=0.142, loss_ctc=75.041, loss_interctc_layer6=78.320, loss_interctc_layer12=65.033, loss_interctc_layer15=59.638, loss_interctc_layer21=77.793, loss=71.165, backward_time=0.202, grad_norm=55.595, clip=100.000, loss_scale=4.056e+31, optim_step_time=0.136, optim0_lr0=8.745e-05, train_time=2.028 [gpua003:0/64] 2024-02-06 12:45:07,221 (trainer:687) WARNING: The grad norm is nan. Skipping updating the model. [gpua003:0/64] 2024-02-06 12:45:35,295 (trainer:756) INFO: 21epoch:train:13901-14000batch: iter_time=8.135e-05, forward_time=0.141, loss_ctc=70.972, loss_interctc_layer6=82.294, loss_interctc_layer12=69.012, loss_interctc_layer15=63.584, loss_interctc_layer21=73.386, loss=71.850, backward_time=0.201, grad_norm=63.345, clip=100.000, loss_scale=3.729e+31, optim_step_time=0.136, optim0_lr0=8.744e-05, train_time=2.078 [gpua003:0/64] 2024-02-06 12:49:38,516 (trainer:756) INFO: 21epoch:train:14001-14100batch: iter_time=8.354e-05, forward_time=0.142, loss_ctc=75.084, loss_interctc_layer6=81.705, loss_interctc_layer12=69.471, loss_interctc_layer15=63.713, loss_interctc_layer21=77.633, loss=73.521, backward_time=0.202, grad_norm=196.192, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.136, optim0_lr0=8.743e-05, train_time=2.432 [gpua003:0/64] 2024-02-06 12:53:55,121 (trainer:756) INFO: 21epoch:train:14101-14200batch: iter_time=4.731e-04, forward_time=0.257, loss_ctc=79.000, loss_interctc_layer6=83.540, loss_interctc_layer12=69.989, loss_interctc_layer15=64.401, loss_interctc_layer21=81.567, loss=75.700, backward_time=0.236, grad_norm=87.580, clip=100.000, loss_scale=2.028e+31, optim_step_time=0.151, optim0_lr0=8.741e-05, train_time=2.566 [gpua003:0/64] 2024-02-06 12:53:56,815 (trainer:687) WARNING: The grad norm is nan. Skipping updating the model. [gpua003:0/64] 2024-02-06 12:57:44,652 (trainer:756) INFO: 21epoch:train:14201-14300batch: iter_time=8.574e-05, forward_time=0.143, loss_ctc=75.892, loss_interctc_layer6=87.202, loss_interctc_layer12=72.586, loss_interctc_layer15=66.669, loss_interctc_layer21=78.599, loss=76.190, backward_time=0.202, grad_norm=61.102, clip=100.000, loss_scale=1.014e+31, optim_step_time=0.137, optim0_lr0=8.740e-05, train_time=2.295 [gpua003:0/64] 2024-02-06 13:01:23,208 (trainer:756) INFO: 21epoch:train:14301-14400batch: iter_time=8.534e-05, forward_time=0.144, loss_ctc=70.697, loss_interctc_layer6=84.765, loss_interctc_layer12=70.250, loss_interctc_layer15=64.315, loss_interctc_layer21=72.958, loss=72.597, backward_time=0.202, grad_norm=66.752, clip=100.000, loss_scale=1.014e+31, optim_step_time=0.137, optim0_lr0=8.738e-05, train_time=2.186 [gpua003:0/64] 2024-02-06 13:06:13,807 (trainer:756) INFO: 21epoch:train:14401-14500batch: iter_time=8.455e-05, forward_time=0.143, loss_ctc=66.060, loss_interctc_layer6=76.615, loss_interctc_layer12=63.352, loss_interctc_layer15=57.913, loss_interctc_layer21=68.169, loss=66.422, backward_time=0.202, grad_norm=65.304, clip=100.000, loss_scale=1.014e+31, optim_step_time=0.137, optim0_lr0=8.737e-05, train_time=2.906 [gpua003:0/64] 2024-02-06 13:10:26,033 (trainer:756) INFO: 21epoch:train:14501-14600batch: iter_time=8.070e-05, forward_time=0.236, loss_ctc=80.628, loss_interctc_layer6=88.790, loss_interctc_layer12=74.270, loss_interctc_layer15=68.451, loss_interctc_layer21=83.185, loss=79.065, backward_time=0.226, grad_norm=81.195, clip=100.000, loss_scale=1.014e+31, optim_step_time=0.151, optim0_lr0=8.736e-05, train_time=2.521 [gpua003:0/64] 2024-02-06 13:14:15,493 (trainer:756) INFO: 21epoch:train:14601-14700batch: iter_time=8.619e-05, forward_time=0.173, loss_ctc=89.857, loss_interctc_layer6=85.650, loss_interctc_layer12=70.866, loss_interctc_layer15=64.734, loss_interctc_layer21=92.894, loss=80.800, backward_time=0.210, grad_norm=67.051, clip=100.000, loss_scale=1.014e+31, optim_step_time=0.140, optim0_lr0=8.734e-05, train_time=2.295 [gpua003:0/64] 2024-02-06 13:17:52,017 (trainer:756) INFO: 21epoch:train:14701-14800batch: iter_time=8.987e-05, forward_time=0.143, loss_ctc=66.794, loss_interctc_layer6=85.039, loss_interctc_layer12=71.947, loss_interctc_layer15=66.533, loss_interctc_layer21=68.915, loss=71.845, backward_time=0.203, grad_norm=77.949, clip=100.000, loss_scale=1.014e+31, optim_step_time=0.137, optim0_lr0=8.733e-05, train_time=2.165 [gpua003:0/64] 2024-02-06 13:21:41,049 (trainer:756) INFO: 21epoch:train:14801-14900batch: iter_time=9.162e-05, forward_time=0.143, loss_ctc=71.675, loss_interctc_layer6=80.098, loss_interctc_layer12=65.879, loss_interctc_layer15=60.041, loss_interctc_layer21=74.077, loss=70.354, backward_time=0.202, grad_norm=66.351, clip=100.000, loss_scale=1.014e+31, optim_step_time=0.137, optim0_lr0=8.731e-05, train_time=2.290 [gpua003:0/64] 2024-02-06 13:22:35,106 (trainer:687) WARNING: The grad norm is nan. Skipping updating the model. [gpua003:0/64] 2024-02-06 13:25:21,835 (trainer:756) INFO: 21epoch:train:14901-15000batch: iter_time=8.559e-05, forward_time=0.253, loss_ctc=72.338, loss_interctc_layer6=78.987, loss_interctc_layer12=65.591, loss_interctc_layer15=60.140, loss_interctc_layer21=74.592, loss=70.330, backward_time=0.230, grad_norm=55.965, clip=100.000, loss_scale=6.249e+30, optim_step_time=0.151, optim0_lr0=8.730e-05, train_time=2.207 [gpua003:0/64] 2024-02-06 13:58:02,333 (trainer:355) INFO: 21epoch results: [train] iter_time=0.264, forward_time=0.157, loss_ctc=77.128, loss_interctc_layer6=83.456, loss_interctc_layer12=69.710, loss_interctc_layer15=64.119, loss_interctc_layer21=79.600, loss=74.802, backward_time=0.206, grad_norm=73.956, clip=100.000, loss_scale=2.618e+31, optim_step_time=0.138, optim0_lr0=8.836e-05, train_time=2.583, time=10 hours, 46 minutes and 12.21 seconds, total_count=315000, gpu_max_cached_mem_GB=33.436, [valid] loss_ctc=48.475, cer_ctc=0.222, loss_interctc_layer6=54.201, cer_interctc_layer6=0.234, loss_interctc_layer12=41.198, cer_interctc_layer12=0.169, loss_interctc_layer15=36.517, cer_interctc_layer15=0.143, loss_interctc_layer21=51.110, cer_interctc_layer21=0.233, loss=46.300, time=32 minutes and 16.29 seconds, total_count=98091, gpu_max_cached_mem_GB=33.436 [gpua003:0/64] 2024-02-06 13:58:22,415 (trainer:410) INFO: The best model has been updated: valid.total_count [gpua003:0/64] 2024-02-06 13:58:22,445 (trainer:464) INFO: The model files were removed: exp/s2t_train_s2t_multitask-ctc_ebf27_conv2d8_size1024_raw_bpe50000/16epoch.pth [gpua003:0/64] 2024-02-06 13:58:22,445 (trainer:289) INFO: 22/45epoch started. Estimated time to finish: 1 week, 5 days and 12 hours [gpua003:0/64] 2024-02-06 13:58:22,489 (multiple_iter_factory:32) INFO: Building 0th iter-factory... [gpua003:0/64] 2024-02-06 13:58:40,502 (s2t:401) INFO: Optional Data Names: ('text_prev', 'text_ctc', 'text_spk2', 'text_spk3', 'text_spk4') [gpua003:0/64] 2024-02-06 13:58:43,846 (abs_task:1660) INFO: [train] dataset: ESPnetDataset( speech: {"path": "exp/s2t_stats_raw_bpe50000/splits12/wav.scp/split.1", "type": "kaldi_ark"} text_prev: {"path": "exp/s2t_stats_raw_bpe50000/splits12/text.prev/split.1", "type": "text"} text_ctc: {"path": "exp/s2t_stats_raw_bpe50000/splits12/text.ctc/split.1", "type": "text"} text: {"path": "exp/s2t_stats_raw_bpe50000/splits12/text/split.1", "type": "text"} preprocess: ) [gpua003:0/64] 2024-02-06 13:58:43,846 (abs_task:1661) INFO: [train] Batch sampler: UnsortedBatchSampler(N-batch=19027, batch_size=256, key_file=exp/s2t_stats_raw_bpe50000/splits12/speech_shape/split.1, [gpua003:0/64] 2024-02-06 13:58:43,849 (abs_task:1662) INFO: [train] mini-batch sizes summary: N-batch=19027, mean=256.0, min=256, max=257 [gpua003:0/64] 2024-02-06 14:10:38,121 (trainer:756) INFO: 22epoch:train:1-100batch: iter_time=3.185, forward_time=0.188, loss_ctc=75.188, loss_interctc_layer6=83.751, loss_interctc_layer12=69.864, loss_interctc_layer15=64.223, loss_interctc_layer21=77.320, loss=74.069, backward_time=0.224, grad_norm=72.597, clip=100.000, loss_scale=5.071e+30, optim_step_time=0.139, optim0_lr0=8.729e-05, train_time=7.356 [gpua003:0/64] 2024-02-06 14:13:33,290 (trainer:756) INFO: 22epoch:train:101-200batch: iter_time=8.508e-05, forward_time=0.143, loss_ctc=70.778, loss_interctc_layer6=77.524, loss_interctc_layer12=64.768, loss_interctc_layer15=59.754, loss_interctc_layer21=72.620, loss=69.089, backward_time=0.204, grad_norm=75.481, clip=100.000, loss_scale=5.071e+30, optim_step_time=0.138, optim0_lr0=8.727e-05, train_time=1.751 [gpua003:0/64] 2024-02-06 14:16:40,351 (trainer:756) INFO: 22epoch:train:201-300batch: iter_time=9.086e-05, forward_time=0.143, loss_ctc=81.607, loss_interctc_layer6=90.628, loss_interctc_layer12=76.710, loss_interctc_layer15=70.965, loss_interctc_layer21=84.095, loss=80.801, backward_time=0.204, grad_norm=66.851, clip=100.000, loss_scale=5.071e+30, optim_step_time=0.138, optim0_lr0=8.726e-05, train_time=1.870 srun: Job step aborted: Waiting up to 32 seconds for job step to finish.