diff --git "a/exp/asr_train_asr_raw_en_word/train.1.log" "b/exp/asr_train_asr_raw_en_word/train.1.log" deleted file mode 100644--- "a/exp/asr_train_asr_raw_en_word/train.1.log" +++ /dev/null @@ -1,945 +0,0 @@ -# Running on v019.ib.bridges2.psc.edu -# Started at Mon Jan 31 02:51:03 EST 2022 -# SLURMD_NODENAME=v019 -# SLURM_CLUSTER_NAME=bridges2 -# SLURM_CONF=/var/spool/slurm/d/conf-cache/slurm.conf -# SLURM_CPUS_ON_NODE=5 -# SLURM_CPUS_PER_TASK=1 -# SLURM_EXPORT_ENV=PATH -# SLURM_GET_USER_ENV=1 -# SLURM_GTIDS=0 -# SLURM_JOBID=6473506 -# SLURM_JOB_ACCOUNT=cis210027p -# SLURM_JOB_CPUS_PER_NODE=5 -# SLURM_JOB_GID=24886 -# SLURM_JOB_GPUS=1 -# SLURM_JOB_ID=6473506 -# SLURM_JOB_NAME=exp/asr_train_asr_raw_en_word/train.log -# SLURM_JOB_NODELIST=v019 -# SLURM_JOB_NUM_NODES=1 -# SLURM_JOB_PARTITION=GPU-shared -# SLURM_JOB_QOS=gpu -# SLURM_JOB_UID=82326 -# SLURM_JOB_USER=ganesank -# SLURM_LOCALID=0 -# SLURM_NNODES=1 -# SLURM_NODEID=0 -# SLURM_NODELIST=v019 -# SLURM_NODE_ALIASES='(null)' -# SLURM_OPEN_MODE=a -# SLURM_PRIO_PROCESS=0 -# SLURM_PROCID=0 -# SLURM_SUBMIT_DIR=/ocean/projects/cis210027p/ganesank/karthik_new/espnet/egs2/dstc2/asr2 -# SLURM_SUBMIT_HOST=br012.ib.bridges2.psc.edu -# SLURM_TASKS_PER_NODE=5 -# SLURM_TASK_PID=72878 -# SLURM_TOPOLOGY_ADDR=v019 -# SLURM_TOPOLOGY_ADDR_PATTERN=node -# SLURM_WORKING_CLUSTER=bridges2:br003:6814:9216:109 -# python3 -m espnet2.bin.asr_train --use_preprocessor true --bpemodel none --token_type word --token_list data/en_token_list/word/tokens.txt --non_linguistic_symbols none --cleaner none --g2p none --valid_data_path_and_name_and_type dump/raw/valid/wav.scp,speech,sound --valid_data_path_and_name_and_type dump/raw/valid/text,text,text --valid_shape_file exp/asr_stats_raw_en_word/valid/speech_shape --valid_shape_file exp/asr_stats_raw_en_word/valid/text_shape.word --resume true --fold_length 80000 --fold_length 150 --output_dir exp/asr_train_asr_raw_en_word --config conf/train_asr.yaml --frontend_conf fs=16k --normalize=global_mvn --normalize_conf stats_file=exp/asr_stats_raw_en_word/train/feats_stats.npz --train_data_path_and_name_and_type dump/raw/train/wav.scp,speech,sound --train_data_path_and_name_and_type dump/raw/train/text,text,text --train_shape_file exp/asr_stats_raw_en_word/train/speech_shape --train_shape_file exp/asr_stats_raw_en_word/train/text_shape.word --ngpu 1 --multiprocessing_distributed True -/ocean/projects/cis210027p/ganesank/karthik_new/espnet/tools/venv/bin/python3 /ocean/projects/cis210027p/ganesank/karthik_new/espnet/espnet2/bin/asr_train.py --use_preprocessor true --bpemodel none --token_type word --token_list data/en_token_list/word/tokens.txt --non_linguistic_symbols none --cleaner none --g2p none --valid_data_path_and_name_and_type dump/raw/valid/wav.scp,speech,sound --valid_data_path_and_name_and_type dump/raw/valid/text,text,text --valid_shape_file exp/asr_stats_raw_en_word/valid/speech_shape --valid_shape_file exp/asr_stats_raw_en_word/valid/text_shape.word --resume true --fold_length 80000 --fold_length 150 --output_dir exp/asr_train_asr_raw_en_word --config conf/train_asr.yaml --frontend_conf fs=16k --normalize=global_mvn --normalize_conf stats_file=exp/asr_stats_raw_en_word/train/feats_stats.npz --train_data_path_and_name_and_type dump/raw/train/wav.scp,speech,sound --train_data_path_and_name_and_type dump/raw/train/text,text,text --train_shape_file exp/asr_stats_raw_en_word/train/speech_shape --train_shape_file exp/asr_stats_raw_en_word/train/text_shape.word --ngpu 1 --multiprocessing_distributed True -[v019] 2022-01-31 02:51:53,537 (asr:382) INFO: Vocabulary size: 613 -[v019] 2022-01-31 02:52:03,371 (abs_task:1132) INFO: pytorch.version=1.8.1+cu102, cuda.available=True, cudnn.version=7605, cudnn.benchmark=False, cudnn.deterministic=True -[v019] 2022-01-31 02:52:03,376 (abs_task:1133) INFO: Model structure: -ESPnetASRModel( - (frontend): DefaultFrontend( - (stft): Stft(n_fft=512, win_length=512, hop_length=128, center=True, normalized=False, onesided=True) - (frontend): Frontend() - (logmel): LogMel(sr=16000, n_fft=512, n_mels=80, fmin=0, fmax=8000.0, htk=False) - ) - (specaug): SpecAug( - (time_warp): TimeWarp(window=5, mode=bicubic) - (freq_mask): MaskAlongAxis(mask_width_range=[0, 30], num_mask=2, axis=freq) - (time_mask): MaskAlongAxis(mask_width_range=[0, 40], num_mask=2, axis=time) - ) - (normalize): GlobalMVN(stats_file=exp/asr_stats_raw_en_word/train/feats_stats.npz, norm_means=True, norm_vars=True) - (encoder): TransformerEncoder( - (embed): Conv2dSubsampling( - (conv): Sequential( - (0): Conv2d(1, 256, kernel_size=(3, 3), stride=(2, 2)) - (1): ReLU() - (2): Conv2d(256, 256, kernel_size=(3, 3), stride=(2, 2)) - (3): ReLU() - ) - (out): Sequential( - (0): Linear(in_features=4864, out_features=256, bias=True) - (1): PositionalEncoding( - (dropout): Dropout(p=0.1, inplace=False) - ) - ) - ) - (encoders): MultiSequential( - (0): EncoderLayer( - (self_attn): MultiHeadedAttention( - (linear_q): Linear(in_features=256, out_features=256, bias=True) - (linear_k): Linear(in_features=256, out_features=256, bias=True) - (linear_v): Linear(in_features=256, out_features=256, bias=True) - (linear_out): Linear(in_features=256, out_features=256, bias=True) - (dropout): Dropout(p=0.0, inplace=False) - ) - (feed_forward): PositionwiseFeedForward( - (w_1): Linear(in_features=256, out_features=2048, bias=True) - (w_2): Linear(in_features=2048, out_features=256, bias=True) - (dropout): Dropout(p=0.1, inplace=False) - (activation): ReLU() - ) - (norm1): LayerNorm((256,), eps=1e-12, elementwise_affine=True) - (norm2): LayerNorm((256,), eps=1e-12, elementwise_affine=True) - (dropout): Dropout(p=0.1, inplace=False) - ) - (1): EncoderLayer( - (self_attn): MultiHeadedAttention( - (linear_q): Linear(in_features=256, out_features=256, bias=True) - (linear_k): Linear(in_features=256, out_features=256, bias=True) - (linear_v): Linear(in_features=256, out_features=256, bias=True) - (linear_out): Linear(in_features=256, out_features=256, bias=True) - (dropout): Dropout(p=0.0, inplace=False) - ) - (feed_forward): PositionwiseFeedForward( - (w_1): Linear(in_features=256, out_features=2048, bias=True) - (w_2): Linear(in_features=2048, out_features=256, bias=True) - (dropout): Dropout(p=0.1, inplace=False) - (activation): ReLU() - ) - (norm1): LayerNorm((256,), eps=1e-12, elementwise_affine=True) - (norm2): LayerNorm((256,), eps=1e-12, elementwise_affine=True) - (dropout): Dropout(p=0.1, inplace=False) - ) - (2): EncoderLayer( - (self_attn): MultiHeadedAttention( - (linear_q): Linear(in_features=256, out_features=256, bias=True) - (linear_k): Linear(in_features=256, out_features=256, bias=True) - (linear_v): Linear(in_features=256, out_features=256, bias=True) - (linear_out): Linear(in_features=256, out_features=256, bias=True) - (dropout): Dropout(p=0.0, inplace=False) - ) - (feed_forward): PositionwiseFeedForward( - (w_1): Linear(in_features=256, out_features=2048, bias=True) - (w_2): Linear(in_features=2048, out_features=256, bias=True) - (dropout): Dropout(p=0.1, inplace=False) - (activation): ReLU() - ) - (norm1): LayerNorm((256,), eps=1e-12, elementwise_affine=True) - (norm2): LayerNorm((256,), eps=1e-12, elementwise_affine=True) - (dropout): Dropout(p=0.1, inplace=False) - ) - (3): EncoderLayer( - (self_attn): MultiHeadedAttention( - (linear_q): Linear(in_features=256, out_features=256, bias=True) - (linear_k): Linear(in_features=256, out_features=256, bias=True) - (linear_v): Linear(in_features=256, out_features=256, bias=True) - (linear_out): Linear(in_features=256, out_features=256, bias=True) - (dropout): Dropout(p=0.0, inplace=False) - ) - (feed_forward): PositionwiseFeedForward( - (w_1): Linear(in_features=256, out_features=2048, bias=True) - (w_2): Linear(in_features=2048, out_features=256, bias=True) - (dropout): Dropout(p=0.1, inplace=False) - (activation): ReLU() - ) - (norm1): LayerNorm((256,), eps=1e-12, elementwise_affine=True) - (norm2): LayerNorm((256,), eps=1e-12, elementwise_affine=True) - (dropout): Dropout(p=0.1, inplace=False) - ) - (4): EncoderLayer( - (self_attn): MultiHeadedAttention( - (linear_q): Linear(in_features=256, out_features=256, bias=True) - (linear_k): Linear(in_features=256, out_features=256, bias=True) - (linear_v): Linear(in_features=256, out_features=256, bias=True) - (linear_out): Linear(in_features=256, out_features=256, bias=True) - (dropout): Dropout(p=0.0, inplace=False) - ) - (feed_forward): PositionwiseFeedForward( - (w_1): Linear(in_features=256, out_features=2048, bias=True) - (w_2): Linear(in_features=2048, out_features=256, bias=True) - (dropout): Dropout(p=0.1, inplace=False) - (activation): ReLU() - ) - (norm1): LayerNorm((256,), eps=1e-12, elementwise_affine=True) - (norm2): LayerNorm((256,), eps=1e-12, elementwise_affine=True) - (dropout): Dropout(p=0.1, inplace=False) - ) - (5): EncoderLayer( - (self_attn): MultiHeadedAttention( - (linear_q): Linear(in_features=256, out_features=256, bias=True) - (linear_k): Linear(in_features=256, out_features=256, bias=True) - (linear_v): Linear(in_features=256, out_features=256, bias=True) - (linear_out): Linear(in_features=256, out_features=256, bias=True) - (dropout): Dropout(p=0.0, inplace=False) - ) - (feed_forward): PositionwiseFeedForward( - (w_1): Linear(in_features=256, out_features=2048, bias=True) - (w_2): Linear(in_features=2048, out_features=256, bias=True) - (dropout): Dropout(p=0.1, inplace=False) - (activation): ReLU() - ) - (norm1): LayerNorm((256,), eps=1e-12, elementwise_affine=True) - (norm2): LayerNorm((256,), eps=1e-12, elementwise_affine=True) - (dropout): Dropout(p=0.1, inplace=False) - ) - (6): EncoderLayer( - (self_attn): MultiHeadedAttention( - (linear_q): Linear(in_features=256, out_features=256, bias=True) - (linear_k): Linear(in_features=256, out_features=256, bias=True) - (linear_v): Linear(in_features=256, out_features=256, bias=True) - (linear_out): Linear(in_features=256, out_features=256, bias=True) - (dropout): Dropout(p=0.0, inplace=False) - ) - (feed_forward): PositionwiseFeedForward( - (w_1): Linear(in_features=256, out_features=2048, bias=True) - (w_2): Linear(in_features=2048, out_features=256, bias=True) - (dropout): Dropout(p=0.1, inplace=False) - (activation): ReLU() - ) - (norm1): LayerNorm((256,), eps=1e-12, elementwise_affine=True) - (norm2): LayerNorm((256,), eps=1e-12, elementwise_affine=True) - (dropout): Dropout(p=0.1, inplace=False) - ) - (7): EncoderLayer( - (self_attn): MultiHeadedAttention( - (linear_q): Linear(in_features=256, out_features=256, bias=True) - (linear_k): Linear(in_features=256, out_features=256, bias=True) - (linear_v): Linear(in_features=256, out_features=256, bias=True) - (linear_out): Linear(in_features=256, out_features=256, bias=True) - (dropout): Dropout(p=0.0, inplace=False) - ) - (feed_forward): PositionwiseFeedForward( - (w_1): Linear(in_features=256, out_features=2048, bias=True) - (w_2): Linear(in_features=2048, out_features=256, bias=True) - (dropout): Dropout(p=0.1, inplace=False) - (activation): ReLU() - ) - (norm1): LayerNorm((256,), eps=1e-12, elementwise_affine=True) - (norm2): LayerNorm((256,), eps=1e-12, elementwise_affine=True) - (dropout): Dropout(p=0.1, inplace=False) - ) - (8): EncoderLayer( - (self_attn): MultiHeadedAttention( - (linear_q): Linear(in_features=256, out_features=256, bias=True) - (linear_k): Linear(in_features=256, out_features=256, bias=True) - (linear_v): Linear(in_features=256, out_features=256, bias=True) - (linear_out): Linear(in_features=256, out_features=256, bias=True) - (dropout): Dropout(p=0.0, inplace=False) - ) - (feed_forward): PositionwiseFeedForward( - (w_1): Linear(in_features=256, out_features=2048, bias=True) - (w_2): Linear(in_features=2048, out_features=256, bias=True) - (dropout): Dropout(p=0.1, inplace=False) - (activation): ReLU() - ) - (norm1): LayerNorm((256,), eps=1e-12, elementwise_affine=True) - (norm2): LayerNorm((256,), eps=1e-12, elementwise_affine=True) - (dropout): Dropout(p=0.1, inplace=False) - ) - (9): EncoderLayer( - (self_attn): MultiHeadedAttention( - (linear_q): Linear(in_features=256, out_features=256, bias=True) - (linear_k): Linear(in_features=256, out_features=256, bias=True) - (linear_v): Linear(in_features=256, out_features=256, bias=True) - (linear_out): Linear(in_features=256, out_features=256, bias=True) - (dropout): Dropout(p=0.0, inplace=False) - ) - (feed_forward): PositionwiseFeedForward( - (w_1): Linear(in_features=256, out_features=2048, bias=True) - (w_2): Linear(in_features=2048, out_features=256, bias=True) - (dropout): Dropout(p=0.1, inplace=False) - (activation): ReLU() - ) - (norm1): LayerNorm((256,), eps=1e-12, elementwise_affine=True) - (norm2): LayerNorm((256,), eps=1e-12, elementwise_affine=True) - (dropout): Dropout(p=0.1, inplace=False) - ) - (10): EncoderLayer( - (self_attn): MultiHeadedAttention( - (linear_q): Linear(in_features=256, out_features=256, bias=True) - (linear_k): Linear(in_features=256, out_features=256, bias=True) - (linear_v): Linear(in_features=256, out_features=256, bias=True) - (linear_out): Linear(in_features=256, out_features=256, bias=True) - (dropout): Dropout(p=0.0, inplace=False) - ) - (feed_forward): PositionwiseFeedForward( - (w_1): Linear(in_features=256, out_features=2048, bias=True) - (w_2): Linear(in_features=2048, out_features=256, bias=True) - (dropout): Dropout(p=0.1, inplace=False) - (activation): ReLU() - ) - (norm1): LayerNorm((256,), eps=1e-12, elementwise_affine=True) - (norm2): LayerNorm((256,), eps=1e-12, elementwise_affine=True) - (dropout): Dropout(p=0.1, inplace=False) - ) - (11): EncoderLayer( - (self_attn): MultiHeadedAttention( - (linear_q): Linear(in_features=256, out_features=256, bias=True) - (linear_k): Linear(in_features=256, out_features=256, bias=True) - (linear_v): Linear(in_features=256, out_features=256, bias=True) - (linear_out): Linear(in_features=256, out_features=256, bias=True) - (dropout): Dropout(p=0.0, inplace=False) - ) - (feed_forward): PositionwiseFeedForward( - (w_1): Linear(in_features=256, out_features=2048, bias=True) - (w_2): Linear(in_features=2048, out_features=256, bias=True) - (dropout): Dropout(p=0.1, inplace=False) - (activation): ReLU() - ) - (norm1): LayerNorm((256,), eps=1e-12, elementwise_affine=True) - (norm2): LayerNorm((256,), eps=1e-12, elementwise_affine=True) - (dropout): Dropout(p=0.1, inplace=False) - ) - ) - (after_norm): LayerNorm((256,), eps=1e-12, elementwise_affine=True) - ) - (decoder): TransformerDecoder( - (embed): Sequential( - (0): Embedding(613, 256) - (1): PositionalEncoding( - (dropout): Dropout(p=0.1, inplace=False) - ) - ) - (after_norm): LayerNorm((256,), eps=1e-12, elementwise_affine=True) - (output_layer): Linear(in_features=256, out_features=613, bias=True) - (decoders): MultiSequential( - (0): DecoderLayer( - (self_attn): MultiHeadedAttention( - (linear_q): Linear(in_features=256, out_features=256, bias=True) - (linear_k): Linear(in_features=256, out_features=256, bias=True) - (linear_v): Linear(in_features=256, out_features=256, bias=True) - (linear_out): Linear(in_features=256, out_features=256, bias=True) - (dropout): Dropout(p=0.0, inplace=False) - ) - (src_attn): MultiHeadedAttention( - (linear_q): Linear(in_features=256, out_features=256, bias=True) - (linear_k): Linear(in_features=256, out_features=256, bias=True) - (linear_v): Linear(in_features=256, out_features=256, bias=True) - (linear_out): Linear(in_features=256, out_features=256, bias=True) - (dropout): Dropout(p=0.0, inplace=False) - ) - (feed_forward): PositionwiseFeedForward( - (w_1): Linear(in_features=256, out_features=2048, bias=True) - (w_2): Linear(in_features=2048, out_features=256, bias=True) - (dropout): Dropout(p=0.1, inplace=False) - (activation): ReLU() - ) - (norm1): LayerNorm((256,), eps=1e-12, elementwise_affine=True) - (norm2): LayerNorm((256,), eps=1e-12, elementwise_affine=True) - (norm3): LayerNorm((256,), eps=1e-12, elementwise_affine=True) - (dropout): Dropout(p=0.1, inplace=False) - ) - (1): DecoderLayer( - (self_attn): MultiHeadedAttention( - (linear_q): Linear(in_features=256, out_features=256, bias=True) - (linear_k): Linear(in_features=256, out_features=256, bias=True) - (linear_v): Linear(in_features=256, out_features=256, bias=True) - (linear_out): Linear(in_features=256, out_features=256, bias=True) - (dropout): Dropout(p=0.0, inplace=False) - ) - (src_attn): MultiHeadedAttention( - (linear_q): Linear(in_features=256, out_features=256, bias=True) - (linear_k): Linear(in_features=256, out_features=256, bias=True) - (linear_v): Linear(in_features=256, out_features=256, bias=True) - (linear_out): Linear(in_features=256, out_features=256, bias=True) - (dropout): Dropout(p=0.0, inplace=False) - ) - (feed_forward): PositionwiseFeedForward( - (w_1): Linear(in_features=256, out_features=2048, bias=True) - (w_2): Linear(in_features=2048, out_features=256, bias=True) - (dropout): Dropout(p=0.1, inplace=False) - (activation): ReLU() - ) - (norm1): LayerNorm((256,), eps=1e-12, elementwise_affine=True) - (norm2): LayerNorm((256,), eps=1e-12, elementwise_affine=True) - (norm3): LayerNorm((256,), eps=1e-12, elementwise_affine=True) - (dropout): Dropout(p=0.1, inplace=False) - ) - (2): DecoderLayer( - (self_attn): MultiHeadedAttention( - (linear_q): Linear(in_features=256, out_features=256, bias=True) - (linear_k): Linear(in_features=256, out_features=256, bias=True) - (linear_v): Linear(in_features=256, out_features=256, bias=True) - (linear_out): Linear(in_features=256, out_features=256, bias=True) - (dropout): Dropout(p=0.0, inplace=False) - ) - (src_attn): MultiHeadedAttention( - (linear_q): Linear(in_features=256, out_features=256, bias=True) - (linear_k): Linear(in_features=256, out_features=256, bias=True) - (linear_v): Linear(in_features=256, out_features=256, bias=True) - (linear_out): Linear(in_features=256, out_features=256, bias=True) - (dropout): Dropout(p=0.0, inplace=False) - ) - (feed_forward): PositionwiseFeedForward( - (w_1): Linear(in_features=256, out_features=2048, bias=True) - (w_2): Linear(in_features=2048, out_features=256, bias=True) - (dropout): Dropout(p=0.1, inplace=False) - (activation): ReLU() - ) - (norm1): LayerNorm((256,), eps=1e-12, elementwise_affine=True) - (norm2): LayerNorm((256,), eps=1e-12, elementwise_affine=True) - (norm3): LayerNorm((256,), eps=1e-12, elementwise_affine=True) - (dropout): Dropout(p=0.1, inplace=False) - ) - (3): DecoderLayer( - (self_attn): MultiHeadedAttention( - (linear_q): Linear(in_features=256, out_features=256, bias=True) - (linear_k): Linear(in_features=256, out_features=256, bias=True) - (linear_v): Linear(in_features=256, out_features=256, bias=True) - (linear_out): Linear(in_features=256, out_features=256, bias=True) - (dropout): Dropout(p=0.0, inplace=False) - ) - (src_attn): MultiHeadedAttention( - (linear_q): Linear(in_features=256, out_features=256, bias=True) - (linear_k): Linear(in_features=256, out_features=256, bias=True) - (linear_v): Linear(in_features=256, out_features=256, bias=True) - (linear_out): Linear(in_features=256, out_features=256, bias=True) - (dropout): Dropout(p=0.0, inplace=False) - ) - (feed_forward): PositionwiseFeedForward( - (w_1): Linear(in_features=256, out_features=2048, bias=True) - (w_2): Linear(in_features=2048, out_features=256, bias=True) - (dropout): Dropout(p=0.1, inplace=False) - (activation): ReLU() - ) - (norm1): LayerNorm((256,), eps=1e-12, elementwise_affine=True) - (norm2): LayerNorm((256,), eps=1e-12, elementwise_affine=True) - (norm3): LayerNorm((256,), eps=1e-12, elementwise_affine=True) - (dropout): Dropout(p=0.1, inplace=False) - ) - (4): DecoderLayer( - (self_attn): MultiHeadedAttention( - (linear_q): Linear(in_features=256, out_features=256, bias=True) - (linear_k): Linear(in_features=256, out_features=256, bias=True) - (linear_v): Linear(in_features=256, out_features=256, bias=True) - (linear_out): Linear(in_features=256, out_features=256, bias=True) - (dropout): Dropout(p=0.0, inplace=False) - ) - (src_attn): MultiHeadedAttention( - (linear_q): Linear(in_features=256, out_features=256, bias=True) - (linear_k): Linear(in_features=256, out_features=256, bias=True) - (linear_v): Linear(in_features=256, out_features=256, bias=True) - (linear_out): Linear(in_features=256, out_features=256, bias=True) - (dropout): Dropout(p=0.0, inplace=False) - ) - (feed_forward): PositionwiseFeedForward( - (w_1): Linear(in_features=256, out_features=2048, bias=True) - (w_2): Linear(in_features=2048, out_features=256, bias=True) - (dropout): Dropout(p=0.1, inplace=False) - (activation): ReLU() - ) - (norm1): LayerNorm((256,), eps=1e-12, elementwise_affine=True) - (norm2): LayerNorm((256,), eps=1e-12, elementwise_affine=True) - (norm3): LayerNorm((256,), eps=1e-12, elementwise_affine=True) - (dropout): Dropout(p=0.1, inplace=False) - ) - (5): DecoderLayer( - (self_attn): MultiHeadedAttention( - (linear_q): Linear(in_features=256, out_features=256, bias=True) - (linear_k): Linear(in_features=256, out_features=256, bias=True) - (linear_v): Linear(in_features=256, out_features=256, bias=True) - (linear_out): Linear(in_features=256, out_features=256, bias=True) - (dropout): Dropout(p=0.0, inplace=False) - ) - (src_attn): MultiHeadedAttention( - (linear_q): Linear(in_features=256, out_features=256, bias=True) - (linear_k): Linear(in_features=256, out_features=256, bias=True) - (linear_v): Linear(in_features=256, out_features=256, bias=True) - (linear_out): Linear(in_features=256, out_features=256, bias=True) - (dropout): Dropout(p=0.0, inplace=False) - ) - (feed_forward): PositionwiseFeedForward( - (w_1): Linear(in_features=256, out_features=2048, bias=True) - (w_2): Linear(in_features=2048, out_features=256, bias=True) - (dropout): Dropout(p=0.1, inplace=False) - (activation): ReLU() - ) - (norm1): LayerNorm((256,), eps=1e-12, elementwise_affine=True) - (norm2): LayerNorm((256,), eps=1e-12, elementwise_affine=True) - (norm3): LayerNorm((256,), eps=1e-12, elementwise_affine=True) - (dropout): Dropout(p=0.1, inplace=False) - ) - ) - ) - (ctc): CTC( - (ctc_lo): Linear(in_features=256, out_features=613, bias=True) - (ctc_loss): CTCLoss() - ) - (criterion_att): LabelSmoothingLoss( - (criterion): KLDivLoss() - ) -) - -Model summary: - Class Name: ESPnetASRModel - Total Number of model parameters: 27.56 M - Number of trainable parameters: 27.56 M (100.0%) - Size: 110.26 MB - Type: torch.float32 -[v019] 2022-01-31 02:52:03,376 (abs_task:1136) INFO: Optimizer: -Adam ( -Parameter Group 0 - amsgrad: False - betas: (0.9, 0.999) - eps: 1e-08 - initial_lr: 0.0002 - lr: 8e-09 - weight_decay: 0 -) -[v019] 2022-01-31 02:52:03,376 (abs_task:1137) INFO: Scheduler: WarmupLR(warmup_steps=25000) -[v019] 2022-01-31 02:52:03,379 (abs_task:1146) INFO: Saving the configuration in exp/asr_train_asr_raw_en_word/config.yaml -[v019] 2022-01-31 02:52:03,964 (abs_task:1493) INFO: [train] dataset: -ESPnetDataset( - speech: {"path": "dump/raw/train/wav.scp", "type": "sound"} - text: {"path": "dump/raw/train/text", "type": "text"} - preprocess: ) -[v019] 2022-01-31 02:52:03,964 (abs_task:1494) INFO: [train] Batch sampler: FoldedBatchSampler(N-batch=547, batch_size=20, shape_files=['exp/asr_stats_raw_en_word/train/speech_shape', 'exp/asr_stats_raw_en_word/train/text_shape.word'], sort_in_batch=descending, sort_batch=descending) -[v019] 2022-01-31 02:52:03,964 (abs_task:1495) INFO: [train] mini-batch sizes summary: N-batch=547, mean=19.9, min=6, max=20 -[v019] 2022-01-31 02:52:04,281 (abs_task:1493) INFO: [valid] dataset: -ESPnetDataset( - speech: {"path": "dump/raw/valid/wav.scp", "type": "sound"} - text: {"path": "dump/raw/valid/text", "type": "text"} - preprocess: ) -[v019] 2022-01-31 02:52:04,282 (abs_task:1494) INFO: [valid] Batch sampler: FoldedBatchSampler(N-batch=178, batch_size=20, shape_files=['exp/asr_stats_raw_en_word/valid/speech_shape', 'exp/asr_stats_raw_en_word/valid/text_shape.word'], sort_in_batch=descending, sort_batch=descending) -[v019] 2022-01-31 02:52:04,282 (abs_task:1495) INFO: [valid] mini-batch sizes summary: N-batch=178, mean=20.0, min=19, max=20 -[v019] 2022-01-31 02:52:04,313 (abs_task:1493) INFO: [plot_att] dataset: -ESPnetDataset( - speech: {"path": "dump/raw/valid/wav.scp", "type": "sound"} - text: {"path": "dump/raw/valid/text", "type": "text"} - preprocess: ) -[v019] 2022-01-31 02:52:04,313 (abs_task:1494) INFO: [plot_att] Batch sampler: UnsortedBatchSampler(N-batch=3559, batch_size=1, key_file=exp/asr_stats_raw_en_word/valid/speech_shape, -[v019] 2022-01-31 02:52:04,313 (abs_task:1495) INFO: [plot_att] mini-batch sizes summary: N-batch=3, mean=1.0, min=1, max=1 -[v019] 2022-01-31 02:52:07,791 (trainer:155) INFO: The training was resumed using exp/asr_train_asr_raw_en_word/checkpoint.pth -[v019] 2022-01-31 02:52:07,796 (trainer:273) INFO: 3/20epoch started -[v019] 2022-01-31 02:52:17,483 (trainer:653) INFO: 3epoch:train:1-27batch: iter_time=0.223, forward_time=0.048, loss=33.373, loss_att=32.646, loss_ctc=34.100, acc=0.290, backward_time=0.026, optim_step_time=0.023, optim0_lr0=8.872e-06, train_time=0.355 -[v019] 2022-01-31 02:52:25,430 (trainer:653) INFO: 3epoch:train:28-54batch: iter_time=0.189, forward_time=0.037, loss=33.612, loss_att=32.644, loss_ctc=34.581, acc=0.303, backward_time=0.025, optim_step_time=0.023, optim0_lr0=9.088e-06, train_time=0.295 -[v019] 2022-01-31 02:52:33,251 (trainer:653) INFO: 3epoch:train:55-81batch: iter_time=0.189, forward_time=0.035, loss=30.735, loss_att=29.766, loss_ctc=31.703, acc=0.318, backward_time=0.024, optim_step_time=0.022, optim0_lr0=9.304e-06, train_time=0.289 -[v019] 2022-01-31 02:52:38,825 (trainer:653) INFO: 3epoch:train:82-108batch: iter_time=0.107, forward_time=0.034, loss=25.240, loss_att=24.106, loss_ctc=26.374, acc=0.360, backward_time=0.025, optim_step_time=0.023, optim0_lr0=9.520e-06, train_time=0.206 -[v019] 2022-01-31 02:52:44,185 (trainer:653) INFO: 3epoch:train:109-135batch: iter_time=0.097, forward_time=0.035, loss=28.627, loss_att=27.360, loss_ctc=29.895, acc=0.362, backward_time=0.025, optim_step_time=0.023, optim0_lr0=9.736e-06, train_time=0.198 -[v019] 2022-01-31 02:52:49,300 (trainer:653) INFO: 3epoch:train:136-162batch: iter_time=0.085, forward_time=0.036, loss=29.800, loss_att=28.423, loss_ctc=31.178, acc=0.365, backward_time=0.025, optim_step_time=0.023, optim0_lr0=9.952e-06, train_time=0.189 -[v019] 2022-01-31 02:52:55,366 (trainer:653) INFO: 3epoch:train:163-189batch: iter_time=0.120, forward_time=0.036, loss=32.691, loss_att=31.049, loss_ctc=34.333, acc=0.346, backward_time=0.025, optim_step_time=0.023, optim0_lr0=1.017e-05, train_time=0.224 -[v019] 2022-01-31 02:53:00,848 (trainer:653) INFO: 3epoch:train:190-216batch: iter_time=0.098, forward_time=0.036, loss=31.983, loss_att=30.548, loss_ctc=33.417, acc=0.360, backward_time=0.025, optim_step_time=0.023, optim0_lr0=1.038e-05, train_time=0.203 -[v019] 2022-01-31 02:53:05,078 (trainer:653) INFO: 3epoch:train:217-243batch: iter_time=0.055, forward_time=0.034, loss=25.546, loss_att=23.417, loss_ctc=27.676, acc=0.430, backward_time=0.025, optim_step_time=0.023, optim0_lr0=1.060e-05, train_time=0.156 -[v019] 2022-01-31 02:53:10,622 (trainer:653) INFO: 3epoch:train:244-270batch: iter_time=0.095, forward_time=0.038, loss=37.794, loss_att=35.860, loss_ctc=39.728, acc=0.325, backward_time=0.025, optim_step_time=0.023, optim0_lr0=1.082e-05, train_time=0.205 -[v019] 2022-01-31 02:53:16,015 (trainer:653) INFO: 3epoch:train:271-297batch: iter_time=0.094, forward_time=0.036, loss=34.395, loss_att=32.350, loss_ctc=36.439, acc=0.364, backward_time=0.025, optim_step_time=0.023, optim0_lr0=1.103e-05, train_time=0.199 -[v019] 2022-01-31 02:53:22,327 (trainer:653) INFO: 3epoch:train:298-324batch: iter_time=0.132, forward_time=0.035, loss=28.348, loss_att=25.971, loss_ctc=30.725, acc=0.442, backward_time=0.025, optim_step_time=0.022, optim0_lr0=1.125e-05, train_time=0.234 -[v019] 2022-01-31 02:53:27,215 (trainer:653) INFO: 3epoch:train:325-351batch: iter_time=0.080, forward_time=0.034, loss=24.324, loss_att=21.903, loss_ctc=26.746, acc=0.492, backward_time=0.025, optim_step_time=0.023, optim0_lr0=1.146e-05, train_time=0.181 -[v019] 2022-01-31 02:53:33,238 (trainer:653) INFO: 3epoch:train:352-378batch: iter_time=0.121, forward_time=0.035, loss=29.075, loss_att=26.547, loss_ctc=31.603, acc=0.445, backward_time=0.024, optim_step_time=0.023, optim0_lr0=1.168e-05, train_time=0.223 -[v019] 2022-01-31 02:53:38,006 (trainer:653) INFO: 3epoch:train:379-405batch: iter_time=0.072, forward_time=0.036, loss=29.781, loss_att=27.286, loss_ctc=32.277, acc=0.443, backward_time=0.025, optim_step_time=0.023, optim0_lr0=1.190e-05, train_time=0.176 -[v019] 2022-01-31 02:53:42,169 (trainer:653) INFO: 3epoch:train:406-432batch: iter_time=0.050, forward_time=0.035, loss=25.708, loss_att=23.393, loss_ctc=28.023, acc=0.478, backward_time=0.025, optim_step_time=0.023, optim0_lr0=1.211e-05, train_time=0.154 -[v019] 2022-01-31 02:53:47,561 (trainer:653) INFO: 3epoch:train:433-459batch: iter_time=0.095, forward_time=0.036, loss=29.741, loss_att=27.185, loss_ctc=32.297, acc=0.432, backward_time=0.025, optim_step_time=0.023, optim0_lr0=1.233e-05, train_time=0.199 -[v019] 2022-01-31 02:53:52,759 (trainer:653) INFO: 3epoch:train:460-486batch: iter_time=0.089, forward_time=0.035, loss=27.532, loss_att=24.471, loss_ctc=30.594, acc=0.479, backward_time=0.025, optim_step_time=0.023, optim0_lr0=1.254e-05, train_time=0.192 -[v019] 2022-01-31 02:53:57,126 (trainer:653) INFO: 3epoch:train:487-513batch: iter_time=0.060, forward_time=0.034, loss=24.400, loss_att=21.634, loss_ctc=27.165, acc=0.483, backward_time=0.025, optim_step_time=0.023, optim0_lr0=1.276e-05, train_time=0.161 -[v019] 2022-01-31 02:54:01,253 (trainer:653) INFO: 3epoch:train:514-540batch: iter_time=0.048, forward_time=0.035, loss=23.681, loss_att=20.971, loss_ctc=26.390, acc=0.486, backward_time=0.025, optim_step_time=0.023, optim0_lr0=1.298e-05, train_time=0.153 -[v019] 2022-01-31 02:55:12,363 (trainer:328) INFO: 3epoch results: [train] iter_time=0.104, forward_time=0.036, loss=29.227, loss_att=27.270, loss_ctc=31.183, acc=0.402, backward_time=0.025, optim_step_time=0.023, optim0_lr0=1.095e-05, train_time=0.209, time=1 minute and 54.56 seconds, total_count=1641, gpu_max_cached_mem_GB=4.389, [valid] loss=29.027, loss_att=24.563, loss_ctc=33.491, acc=0.459, cer=0.738, wer=0.884, cer_ctc=1.000, time=35.01 seconds, total_count=534, gpu_max_cached_mem_GB=5.014, [att_plot] time=34.97 seconds, total_count=0, gpu_max_cached_mem_GB=5.014 -[v019] 2022-01-31 02:55:14,718 (trainer:375) INFO: The best model has been updated: train.loss, valid.loss, train.acc, valid.acc -[v019] 2022-01-31 02:55:14,719 (trainer:261) INFO: 4/20epoch started. Estimated time to finish: 52 minutes and 57.68 seconds -[v019] 2022-01-31 02:55:17,616 (trainer:653) INFO: 4epoch:train:1-27batch: iter_time=0.003, forward_time=0.037, loss=22.417, loss_att=19.369, loss_ctc=25.464, acc=0.510, backward_time=0.024, optim_step_time=0.022, optim0_lr0=1.325e-05, train_time=0.107 -[v019] 2022-01-31 02:55:20,415 (trainer:653) INFO: 4epoch:train:28-54batch: iter_time=1.145e-04, forward_time=0.037, loss=30.858, loss_att=27.817, loss_ctc=33.899, acc=0.425, backward_time=0.023, optim_step_time=0.022, optim0_lr0=1.346e-05, train_time=0.103 -[v019] 2022-01-31 02:55:23,128 (trainer:653) INFO: 4epoch:train:55-81batch: iter_time=1.100e-04, forward_time=0.036, loss=21.424, loss_att=18.426, loss_ctc=24.421, acc=0.528, backward_time=0.024, optim_step_time=0.022, optim0_lr0=1.368e-05, train_time=0.100 -[v019] 2022-01-31 02:55:25,935 (trainer:653) INFO: 4epoch:train:82-108batch: iter_time=1.125e-04, forward_time=0.037, loss=27.050, loss_att=23.860, loss_ctc=30.239, acc=0.479, backward_time=0.024, optim_step_time=0.022, optim0_lr0=1.390e-05, train_time=0.104 -[v019] 2022-01-31 02:55:28,743 (trainer:653) INFO: 4epoch:train:109-135batch: iter_time=1.185e-04, forward_time=0.037, loss=27.442, loss_att=24.364, loss_ctc=30.519, acc=0.455, backward_time=0.024, optim_step_time=0.022, optim0_lr0=1.411e-05, train_time=0.104 -[v019] 2022-01-31 02:55:31,515 (trainer:653) INFO: 4epoch:train:136-162batch: iter_time=1.179e-04, forward_time=0.036, loss=25.746, loss_att=22.636, loss_ctc=28.857, acc=0.475, backward_time=0.024, optim_step_time=0.022, optim0_lr0=1.433e-05, train_time=0.102 -[v019] 2022-01-31 02:55:34,266 (trainer:653) INFO: 4epoch:train:163-189batch: iter_time=1.141e-04, forward_time=0.036, loss=26.430, loss_att=23.161, loss_ctc=29.700, acc=0.492, backward_time=0.024, optim_step_time=0.022, optim0_lr0=1.454e-05, train_time=0.102 -[v019] 2022-01-31 02:55:36,984 (trainer:653) INFO: 4epoch:train:190-216batch: iter_time=1.116e-04, forward_time=0.035, loss=22.575, loss_att=19.584, loss_ctc=25.565, acc=0.508, backward_time=0.024, optim_step_time=0.022, optim0_lr0=1.476e-05, train_time=0.100 -[v019] 2022-01-31 02:55:39,722 (trainer:653) INFO: 4epoch:train:217-243batch: iter_time=1.114e-04, forward_time=0.036, loss=24.217, loss_att=21.232, loss_ctc=27.202, acc=0.473, backward_time=0.024, optim_step_time=0.022, optim0_lr0=1.498e-05, train_time=0.101 -[v019] 2022-01-31 02:55:42,528 (trainer:653) INFO: 4epoch:train:244-270batch: iter_time=1.060e-04, forward_time=0.036, loss=24.982, loss_att=21.607, loss_ctc=28.356, acc=0.510, backward_time=0.024, optim_step_time=0.022, optim0_lr0=1.519e-05, train_time=0.104 -[v019] 2022-01-31 02:55:45,313 (trainer:653) INFO: 4epoch:train:271-297batch: iter_time=1.047e-04, forward_time=0.037, loss=28.354, loss_att=25.459, loss_ctc=31.248, acc=0.463, backward_time=0.024, optim_step_time=0.022, optim0_lr0=1.541e-05, train_time=0.103 -[v019] 2022-01-31 02:55:48,035 (trainer:653) INFO: 4epoch:train:298-324batch: iter_time=1.072e-04, forward_time=0.035, loss=21.577, loss_att=18.355, loss_ctc=24.799, acc=0.544, backward_time=0.024, optim_step_time=0.022, optim0_lr0=1.562e-05, train_time=0.101 -[v019] 2022-01-31 02:55:50,858 (trainer:653) INFO: 4epoch:train:325-351batch: iter_time=1.307e-04, forward_time=0.037, loss=25.593, loss_att=22.237, loss_ctc=28.949, acc=0.502, backward_time=0.024, optim_step_time=0.022, optim0_lr0=1.584e-05, train_time=0.104 -[v019] 2022-01-31 02:55:53,622 (trainer:653) INFO: 4epoch:train:352-378batch: iter_time=1.113e-04, forward_time=0.036, loss=23.015, loss_att=19.807, loss_ctc=26.222, acc=0.520, backward_time=0.024, optim_step_time=0.022, optim0_lr0=1.606e-05, train_time=0.102 -[v019] 2022-01-31 02:55:56,558 (trainer:653) INFO: 4epoch:train:379-405batch: iter_time=1.115e-04, forward_time=0.038, loss=31.523, loss_att=28.024, loss_ctc=35.022, acc=0.472, backward_time=0.024, optim_step_time=0.022, optim0_lr0=1.627e-05, train_time=0.108 -[v019] 2022-01-31 02:55:59,327 (trainer:653) INFO: 4epoch:train:406-432batch: iter_time=1.081e-04, forward_time=0.036, loss=23.282, loss_att=20.331, loss_ctc=26.234, acc=0.504, backward_time=0.024, optim_step_time=0.022, optim0_lr0=1.649e-05, train_time=0.102 -[v019] 2022-01-31 02:56:02,115 (trainer:653) INFO: 4epoch:train:433-459batch: iter_time=1.087e-04, forward_time=0.037, loss=26.168, loss_att=22.835, loss_ctc=29.501, acc=0.500, backward_time=0.024, optim_step_time=0.022, optim0_lr0=1.670e-05, train_time=0.103 -[v019] 2022-01-31 02:56:04,858 (trainer:653) INFO: 4epoch:train:460-486batch: iter_time=1.041e-04, forward_time=0.036, loss=23.192, loss_att=20.192, loss_ctc=26.191, acc=0.511, backward_time=0.024, optim_step_time=0.022, optim0_lr0=1.692e-05, train_time=0.101 -[v019] 2022-01-31 02:56:07,774 (trainer:653) INFO: 4epoch:train:487-513batch: iter_time=0.008, forward_time=0.035, loss=23.281, loss_att=20.100, loss_ctc=26.461, acc=0.499, backward_time=0.024, optim_step_time=0.022, optim0_lr0=1.714e-05, train_time=0.108 -[v019] 2022-01-31 02:56:10,524 (trainer:653) INFO: 4epoch:train:514-540batch: iter_time=1.032e-04, forward_time=0.036, loss=21.068, loss_att=17.864, loss_ctc=24.273, acc=0.542, backward_time=0.024, optim_step_time=0.022, optim0_lr0=1.735e-05, train_time=0.102 -[v019] 2022-01-31 02:56:49,170 (trainer:328) INFO: 4epoch results: [train] iter_time=6.660e-04, forward_time=0.036, loss=24.967, loss_att=21.815, loss_ctc=28.119, acc=0.496, backward_time=0.024, optim_step_time=0.022, optim0_lr0=1.533e-05, train_time=0.103, time=56.57 seconds, total_count=2188, gpu_max_cached_mem_GB=5.014, [valid] loss=24.271, loss_att=20.204, loss_ctc=28.338, acc=0.514, cer=0.661, wer=0.884, cer_ctc=0.872, time=6.09 seconds, total_count=712, gpu_max_cached_mem_GB=5.014, [att_plot] time=31.8 seconds, total_count=0, gpu_max_cached_mem_GB=5.014 -[v019] 2022-01-31 02:56:51,328 (trainer:375) INFO: The best model has been updated: train.loss, valid.loss, train.acc, valid.acc -[v019] 2022-01-31 02:56:51,328 (trainer:261) INFO: 5/20epoch started. Estimated time to finish: 37 minutes and 48.25 seconds -[v019] 2022-01-31 02:56:54,226 (trainer:653) INFO: 5epoch:train:1-27batch: iter_time=0.004, forward_time=0.037, loss=20.119, loss_att=17.035, loss_ctc=23.203, acc=0.547, backward_time=0.024, optim_step_time=0.022, optim0_lr0=1.762e-05, train_time=0.107 -[v019] 2022-01-31 02:56:57,028 (trainer:653) INFO: 5epoch:train:28-54batch: iter_time=1.243e-04, forward_time=0.037, loss=22.363, loss_att=19.143, loss_ctc=25.582, acc=0.530, backward_time=0.025, optim_step_time=0.022, optim0_lr0=1.784e-05, train_time=0.104 -[v019] 2022-01-31 02:56:59,747 (trainer:653) INFO: 5epoch:train:55-81batch: iter_time=1.167e-04, forward_time=0.036, loss=18.747, loss_att=15.425, loss_ctc=22.070, acc=0.573, backward_time=0.024, optim_step_time=0.022, optim0_lr0=1.806e-05, train_time=0.100 -[v019] 2022-01-31 02:57:02,454 (trainer:653) INFO: 5epoch:train:82-108batch: iter_time=1.116e-04, forward_time=0.036, loss=18.287, loss_att=15.197, loss_ctc=21.378, acc=0.574, backward_time=0.024, optim_step_time=0.022, optim0_lr0=1.827e-05, train_time=0.100 -[v019] 2022-01-31 02:57:05,228 (trainer:653) INFO: 5epoch:train:109-135batch: iter_time=1.224e-04, forward_time=0.036, loss=23.482, loss_att=20.233, loss_ctc=26.731, acc=0.529, backward_time=0.024, optim_step_time=0.022, optim0_lr0=1.849e-05, train_time=0.102 -[v019] 2022-01-31 02:57:07,951 (trainer:653) INFO: 5epoch:train:136-162batch: iter_time=1.063e-04, forward_time=0.036, loss=21.375, loss_att=18.246, loss_ctc=24.505, acc=0.545, backward_time=0.024, optim_step_time=0.022, optim0_lr0=1.870e-05, train_time=0.101 -[v019] 2022-01-31 02:57:10,724 (trainer:653) INFO: 5epoch:train:163-189batch: iter_time=0.002, forward_time=0.035, loss=19.269, loss_att=16.400, loss_ctc=22.138, acc=0.538, backward_time=0.024, optim_step_time=0.022, optim0_lr0=1.892e-05, train_time=0.102 -[v019] 2022-01-31 02:57:13,532 (trainer:653) INFO: 5epoch:train:190-216batch: iter_time=1.118e-04, forward_time=0.037, loss=22.359, loss_att=19.209, loss_ctc=25.508, acc=0.529, backward_time=0.024, optim_step_time=0.022, optim0_lr0=1.914e-05, train_time=0.104 -[v019] 2022-01-31 02:57:16,397 (trainer:653) INFO: 5epoch:train:217-243batch: iter_time=1.244e-04, forward_time=0.038, loss=26.520, loss_att=23.227, loss_ctc=29.814, acc=0.513, backward_time=0.024, optim_step_time=0.022, optim0_lr0=1.935e-05, train_time=0.106 -[v019] 2022-01-31 02:57:19,142 (trainer:653) INFO: 5epoch:train:244-270batch: iter_time=1.165e-04, forward_time=0.036, loss=21.090, loss_att=17.627, loss_ctc=24.553, acc=0.564, backward_time=0.024, optim_step_time=0.022, optim0_lr0=1.957e-05, train_time=0.101 -[v019] 2022-01-31 02:57:22,044 (trainer:653) INFO: 5epoch:train:271-297batch: iter_time=1.161e-04, forward_time=0.038, loss=24.874, loss_att=21.676, loss_ctc=28.072, acc=0.529, backward_time=0.025, optim_step_time=0.022, optim0_lr0=1.978e-05, train_time=0.107 -[v019] 2022-01-31 02:57:24,877 (trainer:653) INFO: 5epoch:train:298-324batch: iter_time=1.138e-04, forward_time=0.037, loss=22.035, loss_att=18.329, loss_ctc=25.741, acc=0.565, backward_time=0.025, optim_step_time=0.022, optim0_lr0=2.000e-05, train_time=0.105 -[v019] 2022-01-31 02:57:27,612 (trainer:653) INFO: 5epoch:train:325-351batch: iter_time=1.224e-04, forward_time=0.036, loss=18.775, loss_att=15.525, loss_ctc=22.026, acc=0.587, backward_time=0.024, optim_step_time=0.022, optim0_lr0=2.022e-05, train_time=0.101 -[v019] 2022-01-31 02:57:30,428 (trainer:653) INFO: 5epoch:train:352-378batch: iter_time=1.137e-04, forward_time=0.037, loss=22.525, loss_att=19.323, loss_ctc=25.727, acc=0.537, backward_time=0.024, optim_step_time=0.022, optim0_lr0=2.043e-05, train_time=0.104 -[v019] 2022-01-31 02:57:33,124 (trainer:653) INFO: 5epoch:train:379-405batch: iter_time=1.097e-04, forward_time=0.035, loss=18.208, loss_att=15.021, loss_ctc=21.395, acc=0.572, backward_time=0.024, optim_step_time=0.022, optim0_lr0=2.065e-05, train_time=0.100 -[v019] 2022-01-31 02:57:35,857 (trainer:653) INFO: 5epoch:train:406-432batch: iter_time=1.171e-04, forward_time=0.036, loss=18.457, loss_att=15.063, loss_ctc=21.851, acc=0.616, backward_time=0.024, optim_step_time=0.022, optim0_lr0=2.086e-05, train_time=0.101 -[v019] 2022-01-31 02:57:38,588 (trainer:653) INFO: 5epoch:train:433-459batch: iter_time=1.075e-04, forward_time=0.036, loss=20.295, loss_att=16.997, loss_ctc=23.592, acc=0.575, backward_time=0.024, optim_step_time=0.022, optim0_lr0=2.108e-05, train_time=0.101 -[v019] 2022-01-31 02:57:41,272 (trainer:653) INFO: 5epoch:train:460-486batch: iter_time=1.084e-04, forward_time=0.034, loss=18.208, loss_att=15.028, loss_ctc=21.387, acc=0.594, backward_time=0.024, optim_step_time=0.022, optim0_lr0=2.130e-05, train_time=0.099 -[v019] 2022-01-31 02:57:43,983 (trainer:653) INFO: 5epoch:train:487-513batch: iter_time=1.103e-04, forward_time=0.036, loss=19.759, loss_att=16.291, loss_ctc=23.228, acc=0.591, backward_time=0.024, optim_step_time=0.022, optim0_lr0=2.151e-05, train_time=0.100 -[v019] 2022-01-31 02:57:46,987 (trainer:653) INFO: 5epoch:train:514-540batch: iter_time=1.090e-04, forward_time=0.037, loss=23.827, loss_att=19.824, loss_ctc=27.829, acc=0.563, backward_time=0.024, optim_step_time=0.022, optim0_lr0=2.173e-05, train_time=0.111 -[v019] 2022-01-31 02:58:25,274 (trainer:328) INFO: 5epoch results: [train] iter_time=4.109e-04, forward_time=0.036, loss=21.034, loss_att=17.744, loss_ctc=24.325, acc=0.558, backward_time=0.024, optim_step_time=0.022, optim0_lr0=1.970e-05, train_time=0.103, time=56.47 seconds, total_count=2735, gpu_max_cached_mem_GB=5.014, [valid] loss=19.406, loss_att=16.048, loss_ctc=22.764, acc=0.595, cer=0.555, wer=0.915, cer_ctc=0.703, time=6.13 seconds, total_count=890, gpu_max_cached_mem_GB=5.014, [att_plot] time=31.34 seconds, total_count=0, gpu_max_cached_mem_GB=5.014 -[v019] 2022-01-31 02:58:27,470 (trainer:375) INFO: The best model has been updated: train.loss, valid.loss, train.acc, valid.acc -[v019] 2022-01-31 02:58:27,471 (trainer:261) INFO: 6/20epoch started. Estimated time to finish: 31 minutes and 38.37 seconds -[v019] 2022-01-31 02:58:30,409 (trainer:653) INFO: 6epoch:train:1-27batch: iter_time=0.004, forward_time=0.037, loss=19.551, loss_att=16.160, loss_ctc=22.942, acc=0.591, backward_time=0.024, optim_step_time=0.022, optim0_lr0=2.200e-05, train_time=0.108 -[v019] 2022-01-31 02:58:33,160 (trainer:653) INFO: 6epoch:train:28-54batch: iter_time=1.109e-04, forward_time=0.036, loss=16.361, loss_att=13.293, loss_ctc=19.428, acc=0.633, backward_time=0.024, optim_step_time=0.022, optim0_lr0=2.222e-05, train_time=0.102 -[v019] 2022-01-31 02:58:35,987 (trainer:653) INFO: 6epoch:train:55-81batch: iter_time=1.155e-04, forward_time=0.037, loss=19.057, loss_att=15.811, loss_ctc=22.304, acc=0.586, backward_time=0.024, optim_step_time=0.022, optim0_lr0=2.243e-05, train_time=0.104 -[v019] 2022-01-31 02:58:38,745 (trainer:653) INFO: 6epoch:train:82-108batch: iter_time=1.208e-04, forward_time=0.036, loss=18.027, loss_att=14.897, loss_ctc=21.158, acc=0.592, backward_time=0.024, optim_step_time=0.022, optim0_lr0=2.265e-05, train_time=0.102 -[v019] 2022-01-31 02:58:41,561 (trainer:653) INFO: 6epoch:train:109-135batch: iter_time=1.149e-04, forward_time=0.037, loss=18.606, loss_att=15.012, loss_ctc=22.199, acc=0.626, backward_time=0.024, optim_step_time=0.022, optim0_lr0=2.286e-05, train_time=0.104 -[v019] 2022-01-31 02:58:44,388 (trainer:653) INFO: 6epoch:train:136-162batch: iter_time=1.134e-04, forward_time=0.037, loss=19.984, loss_att=16.338, loss_ctc=23.631, acc=0.592, backward_time=0.024, optim_step_time=0.022, optim0_lr0=2.308e-05, train_time=0.104 -[v019] 2022-01-31 02:58:47,105 (trainer:653) INFO: 6epoch:train:163-189batch: iter_time=1.101e-04, forward_time=0.036, loss=16.294, loss_att=13.137, loss_ctc=19.450, acc=0.617, backward_time=0.024, optim_step_time=0.022, optim0_lr0=2.330e-05, train_time=0.100 -[v019] 2022-01-31 02:58:49,857 (trainer:653) INFO: 6epoch:train:190-216batch: iter_time=1.072e-04, forward_time=0.037, loss=17.924, loss_att=14.642, loss_ctc=21.206, acc=0.609, backward_time=0.024, optim_step_time=0.022, optim0_lr0=2.351e-05, train_time=0.102 -[v019] 2022-01-31 02:58:52,656 (trainer:653) INFO: 6epoch:train:217-243batch: iter_time=1.087e-04, forward_time=0.037, loss=19.226, loss_att=15.744, loss_ctc=22.708, acc=0.606, backward_time=0.024, optim_step_time=0.022, optim0_lr0=2.373e-05, train_time=0.103 -[v019] 2022-01-31 02:58:55,530 (trainer:653) INFO: 6epoch:train:244-270batch: iter_time=1.121e-04, forward_time=0.038, loss=20.074, loss_att=16.255, loss_ctc=23.894, acc=0.613, backward_time=0.024, optim_step_time=0.022, optim0_lr0=2.394e-05, train_time=0.106 -[v019] 2022-01-31 02:58:58,314 (trainer:653) INFO: 6epoch:train:271-297batch: iter_time=1.103e-04, forward_time=0.037, loss=18.348, loss_att=14.694, loss_ctc=22.002, acc=0.629, backward_time=0.024, optim_step_time=0.022, optim0_lr0=2.416e-05, train_time=0.103 -[v019] 2022-01-31 02:59:01,037 (trainer:653) INFO: 6epoch:train:298-324batch: iter_time=1.208e-04, forward_time=0.036, loss=14.394, loss_att=11.745, loss_ctc=17.042, acc=0.637, backward_time=0.024, optim_step_time=0.022, optim0_lr0=2.438e-05, train_time=0.101 -[v019] 2022-01-31 02:59:03,813 (trainer:653) INFO: 6epoch:train:325-351batch: iter_time=1.092e-04, forward_time=0.036, loss=17.257, loss_att=13.840, loss_ctc=20.674, acc=0.641, backward_time=0.024, optim_step_time=0.022, optim0_lr0=2.459e-05, train_time=0.103 -[v019] 2022-01-31 02:59:06,630 (trainer:653) INFO: 6epoch:train:352-378batch: iter_time=1.147e-04, forward_time=0.038, loss=20.152, loss_att=16.204, loss_ctc=24.100, acc=0.620, backward_time=0.024, optim_step_time=0.022, optim0_lr0=2.481e-05, train_time=0.104 -[v019] 2022-01-31 02:59:09,433 (trainer:653) INFO: 6epoch:train:379-405batch: iter_time=1.095e-04, forward_time=0.037, loss=17.114, loss_att=13.500, loss_ctc=20.728, acc=0.653, backward_time=0.024, optim_step_time=0.022, optim0_lr0=2.502e-05, train_time=0.104 -[v019] 2022-01-31 02:59:12,232 (trainer:653) INFO: 6epoch:train:406-432batch: iter_time=1.129e-04, forward_time=0.037, loss=17.299, loss_att=13.836, loss_ctc=20.762, acc=0.631, backward_time=0.024, optim_step_time=0.022, optim0_lr0=2.524e-05, train_time=0.103 -[v019] 2022-01-31 02:59:15,088 (trainer:653) INFO: 6epoch:train:433-459batch: iter_time=0.005, forward_time=0.036, loss=15.493, loss_att=12.221, loss_ctc=18.764, acc=0.653, backward_time=0.024, optim_step_time=0.022, optim0_lr0=2.546e-05, train_time=0.106 -[v019] 2022-01-31 02:59:17,871 (trainer:653) INFO: 6epoch:train:460-486batch: iter_time=1.175e-04, forward_time=0.037, loss=18.571, loss_att=14.977, loss_ctc=22.166, acc=0.600, backward_time=0.023, optim_step_time=0.022, optim0_lr0=2.567e-05, train_time=0.103 -[v019] 2022-01-31 02:59:20,624 (trainer:653) INFO: 6epoch:train:487-513batch: iter_time=1.098e-04, forward_time=0.036, loss=17.760, loss_att=14.060, loss_ctc=21.461, acc=0.656, backward_time=0.023, optim_step_time=0.022, optim0_lr0=2.589e-05, train_time=0.102 -[v019] 2022-01-31 02:59:23,374 (trainer:653) INFO: 6epoch:train:514-540batch: iter_time=1.196e-04, forward_time=0.036, loss=16.850, loss_att=13.333, loss_ctc=20.368, acc=0.652, backward_time=0.024, optim_step_time=0.022, optim0_lr0=2.610e-05, train_time=0.102 -[v019] 2022-01-31 03:00:02,056 (trainer:328) INFO: 6epoch results: [train] iter_time=5.417e-04, forward_time=0.037, loss=17.826, loss_att=14.403, loss_ctc=21.249, acc=0.623, backward_time=0.024, optim_step_time=0.022, optim0_lr0=2.408e-05, train_time=0.103, time=56.65 seconds, total_count=3282, gpu_max_cached_mem_GB=5.014, [valid] loss=17.253, loss_att=13.790, loss_ctc=20.716, acc=0.639, cer=0.540, wer=0.884, cer_ctc=0.628, time=6.13 seconds, total_count=1068, gpu_max_cached_mem_GB=5.014, [att_plot] time=31.8 seconds, total_count=0, gpu_max_cached_mem_GB=5.014 -[v019] 2022-01-31 03:00:04,305 (trainer:375) INFO: The best model has been updated: train.loss, valid.loss, train.acc, valid.acc -[v019] 2022-01-31 03:00:04,306 (trainer:413) INFO: The model files were removed: exp/asr_train_asr_raw_en_word/1epoch.pth -[v019] 2022-01-31 03:00:04,306 (trainer:261) INFO: 7/20epoch started. Estimated time to finish: 27 minutes and 47.78 seconds -[v019] 2022-01-31 03:00:07,241 (trainer:653) INFO: 7epoch:train:1-27batch: iter_time=0.004, forward_time=0.037, loss=16.821, loss_att=13.437, loss_ctc=20.205, acc=0.638, backward_time=0.024, optim_step_time=0.022, optim0_lr0=2.638e-05, train_time=0.108 -[v019] 2022-01-31 03:00:10,051 (trainer:653) INFO: 7epoch:train:28-54batch: iter_time=1.279e-04, forward_time=0.037, loss=17.036, loss_att=13.491, loss_ctc=20.581, acc=0.633, backward_time=0.024, optim_step_time=0.022, optim0_lr0=2.659e-05, train_time=0.104 -[v019] 2022-01-31 03:00:12,844 (trainer:653) INFO: 7epoch:train:55-81batch: iter_time=1.225e-04, forward_time=0.037, loss=16.813, loss_att=13.242, loss_ctc=20.383, acc=0.655, backward_time=0.024, optim_step_time=0.022, optim0_lr0=2.681e-05, train_time=0.103 -[v019] 2022-01-31 03:00:15,593 (trainer:653) INFO: 7epoch:train:82-108batch: iter_time=1.132e-04, forward_time=0.036, loss=13.965, loss_att=11.041, loss_ctc=16.888, acc=0.670, backward_time=0.024, optim_step_time=0.022, optim0_lr0=2.702e-05, train_time=0.102 -[v019] 2022-01-31 03:00:18,447 (trainer:653) INFO: 7epoch:train:109-135batch: iter_time=0.001, forward_time=0.037, loss=15.838, loss_att=12.657, loss_ctc=19.019, acc=0.657, backward_time=0.024, optim_step_time=0.022, optim0_lr0=2.724e-05, train_time=0.105 -[v019] 2022-01-31 03:00:21,292 (trainer:653) INFO: 7epoch:train:136-162batch: iter_time=1.111e-04, forward_time=0.038, loss=18.423, loss_att=14.556, loss_ctc=22.291, acc=0.627, backward_time=0.024, optim_step_time=0.022, optim0_lr0=2.746e-05, train_time=0.105 -[v019] 2022-01-31 03:00:24,055 (trainer:653) INFO: 7epoch:train:163-189batch: iter_time=1.093e-04, forward_time=0.036, loss=15.736, loss_att=12.287, loss_ctc=19.186, acc=0.660, backward_time=0.024, optim_step_time=0.022, optim0_lr0=2.767e-05, train_time=0.102 -[v019] 2022-01-31 03:00:26,684 (trainer:653) INFO: 7epoch:train:190-216batch: iter_time=1.296e-04, forward_time=0.034, loss=14.637, loss_att=11.439, loss_ctc=17.835, acc=0.651, backward_time=0.023, optim_step_time=0.022, optim0_lr0=2.789e-05, train_time=0.097 -[v019] 2022-01-31 03:00:29,392 (trainer:653) INFO: 7epoch:train:217-243batch: iter_time=1.074e-04, forward_time=0.036, loss=15.705, loss_att=12.052, loss_ctc=19.359, acc=0.678, backward_time=0.023, optim_step_time=0.022, optim0_lr0=2.810e-05, train_time=0.100 -[v019] 2022-01-31 03:00:32,158 (trainer:653) INFO: 7epoch:train:244-270batch: iter_time=1.158e-04, forward_time=0.036, loss=18.656, loss_att=14.480, loss_ctc=22.832, acc=0.646, backward_time=0.024, optim_step_time=0.022, optim0_lr0=2.832e-05, train_time=0.102 -[v019] 2022-01-31 03:00:34,937 (trainer:653) INFO: 7epoch:train:271-297batch: iter_time=1.078e-04, forward_time=0.036, loss=16.488, loss_att=12.877, loss_ctc=20.099, acc=0.659, backward_time=0.024, optim_step_time=0.022, optim0_lr0=2.854e-05, train_time=0.103 -[v019] 2022-01-31 03:00:37,723 (trainer:653) INFO: 7epoch:train:298-324batch: iter_time=1.076e-04, forward_time=0.036, loss=16.099, loss_att=12.610, loss_ctc=19.588, acc=0.653, backward_time=0.024, optim_step_time=0.022, optim0_lr0=2.875e-05, train_time=0.103 -[v019] 2022-01-31 03:00:40,624 (trainer:653) INFO: 7epoch:train:325-351batch: iter_time=1.101e-04, forward_time=0.038, loss=18.090, loss_att=14.101, loss_ctc=22.080, acc=0.665, backward_time=0.024, optim_step_time=0.022, optim0_lr0=2.897e-05, train_time=0.107 -[v019] 2022-01-31 03:00:43,377 (trainer:653) INFO: 7epoch:train:352-378batch: iter_time=1.119e-04, forward_time=0.036, loss=15.858, loss_att=12.214, loss_ctc=19.502, acc=0.664, backward_time=0.024, optim_step_time=0.022, optim0_lr0=2.918e-05, train_time=0.102 -[v019] 2022-01-31 03:00:46,195 (trainer:653) INFO: 7epoch:train:379-405batch: iter_time=1.188e-04, forward_time=0.037, loss=16.121, loss_att=12.584, loss_ctc=19.658, acc=0.658, backward_time=0.024, optim_step_time=0.022, optim0_lr0=2.940e-05, train_time=0.104 -[v019] 2022-01-31 03:00:48,888 (trainer:653) INFO: 7epoch:train:406-432batch: iter_time=1.064e-04, forward_time=0.035, loss=15.347, loss_att=11.680, loss_ctc=19.015, acc=0.673, backward_time=0.024, optim_step_time=0.022, optim0_lr0=2.962e-05, train_time=0.099 -[v019] 2022-01-31 03:00:51,654 (trainer:653) INFO: 7epoch:train:433-459batch: iter_time=1.069e-04, forward_time=0.036, loss=17.121, loss_att=12.928, loss_ctc=21.314, acc=0.683, backward_time=0.023, optim_step_time=0.022, optim0_lr0=2.983e-05, train_time=0.102 -[v019] 2022-01-31 03:00:54,416 (trainer:653) INFO: 7epoch:train:460-486batch: iter_time=1.069e-04, forward_time=0.036, loss=16.174, loss_att=12.525, loss_ctc=19.824, acc=0.669, backward_time=0.024, optim_step_time=0.022, optim0_lr0=3.005e-05, train_time=0.102 -[v019] 2022-01-31 03:00:57,145 (trainer:653) INFO: 7epoch:train:487-513batch: iter_time=1.086e-04, forward_time=0.036, loss=13.886, loss_att=10.438, loss_ctc=17.335, acc=0.696, backward_time=0.024, optim_step_time=0.022, optim0_lr0=3.026e-05, train_time=0.101 -[v019] 2022-01-31 03:00:59,911 (trainer:653) INFO: 7epoch:train:514-540batch: iter_time=1.103e-04, forward_time=0.037, loss=15.667, loss_att=11.929, loss_ctc=19.405, acc=0.677, backward_time=0.024, optim_step_time=0.022, optim0_lr0=3.048e-05, train_time=0.102 -[v019] 2022-01-31 03:01:38,264 (trainer:328) INFO: 7epoch results: [train] iter_time=3.416e-04, forward_time=0.036, loss=16.174, loss_att=12.588, loss_ctc=19.760, acc=0.661, backward_time=0.024, optim_step_time=0.022, optim0_lr0=2.846e-05, train_time=0.103, time=56.36 seconds, total_count=3829, gpu_max_cached_mem_GB=5.014, [valid] loss=15.838, loss_att=12.381, loss_ctc=19.296, acc=0.675, cer=0.492, wer=0.875, cer_ctc=0.598, time=6.08 seconds, total_count=1246, gpu_max_cached_mem_GB=5.014, [att_plot] time=31.51 seconds, total_count=0, gpu_max_cached_mem_GB=5.014 -[v019] 2022-01-31 03:01:40,793 (trainer:375) INFO: The best model has been updated: train.loss, valid.loss, train.acc, valid.acc -[v019] 2022-01-31 03:01:40,794 (trainer:413) INFO: The model files were removed: exp/asr_train_asr_raw_en_word/2epoch.pth -[v019] 2022-01-31 03:01:40,794 (trainer:261) INFO: 8/20epoch started. Estimated time to finish: 24 minutes and 49.79 seconds -[v019] 2022-01-31 03:01:43,740 (trainer:653) INFO: 8epoch:train:1-27batch: iter_time=0.003, forward_time=0.038, loss=15.266, loss_att=11.699, loss_ctc=18.834, acc=0.682, backward_time=0.024, optim_step_time=0.022, optim0_lr0=3.075e-05, train_time=0.109 -[v019] 2022-01-31 03:01:46,647 (trainer:653) INFO: 8epoch:train:28-54batch: iter_time=1.162e-04, forward_time=0.038, loss=18.730, loss_att=14.599, loss_ctc=22.861, acc=0.671, backward_time=0.024, optim_step_time=0.022, optim0_lr0=3.097e-05, train_time=0.107 -[v019] 2022-01-31 03:01:49,442 (trainer:653) INFO: 8epoch:train:55-81batch: iter_time=1.119e-04, forward_time=0.037, loss=14.615, loss_att=11.063, loss_ctc=18.167, acc=0.684, backward_time=0.024, optim_step_time=0.022, optim0_lr0=3.118e-05, train_time=0.103 -[v019] 2022-01-31 03:01:52,152 (trainer:653) INFO: 8epoch:train:82-108batch: iter_time=1.053e-04, forward_time=0.035, loss=13.810, loss_att=10.289, loss_ctc=17.331, acc=0.693, backward_time=0.024, optim_step_time=0.022, optim0_lr0=3.140e-05, train_time=0.100 -[v019] 2022-01-31 03:01:54,900 (trainer:653) INFO: 8epoch:train:109-135batch: iter_time=1.090e-04, forward_time=0.036, loss=16.126, loss_att=12.185, loss_ctc=20.066, acc=0.672, backward_time=0.024, optim_step_time=0.022, optim0_lr0=3.162e-05, train_time=0.102 -[v019] 2022-01-31 03:01:57,680 (trainer:653) INFO: 8epoch:train:136-162batch: iter_time=1.074e-04, forward_time=0.037, loss=16.517, loss_att=12.352, loss_ctc=20.683, acc=0.675, backward_time=0.024, optim_step_time=0.022, optim0_lr0=3.183e-05, train_time=0.103 -[v019] 2022-01-31 03:02:00,461 (trainer:653) INFO: 8epoch:train:163-189batch: iter_time=1.108e-04, forward_time=0.037, loss=14.258, loss_att=10.532, loss_ctc=17.984, acc=0.697, backward_time=0.024, optim_step_time=0.022, optim0_lr0=3.205e-05, train_time=0.103 -[v019] 2022-01-31 03:02:03,240 (trainer:653) INFO: 8epoch:train:190-216batch: iter_time=1.082e-04, forward_time=0.037, loss=16.480, loss_att=12.640, loss_ctc=20.320, acc=0.669, backward_time=0.024, optim_step_time=0.022, optim0_lr0=3.226e-05, train_time=0.103 -[v019] 2022-01-31 03:02:05,984 (trainer:653) INFO: 8epoch:train:217-243batch: iter_time=1.136e-04, forward_time=0.036, loss=12.380, loss_att=9.335, loss_ctc=15.425, acc=0.696, backward_time=0.024, optim_step_time=0.022, optim0_lr0=3.248e-05, train_time=0.101 -[v019] 2022-01-31 03:02:08,735 (trainer:653) INFO: 8epoch:train:244-270batch: iter_time=1.126e-04, forward_time=0.036, loss=14.372, loss_att=10.881, loss_ctc=17.863, acc=0.694, backward_time=0.024, optim_step_time=0.022, optim0_lr0=3.270e-05, train_time=0.102 -[v019] 2022-01-31 03:02:11,552 (trainer:653) INFO: 8epoch:train:271-297batch: iter_time=1.115e-04, forward_time=0.037, loss=16.741, loss_att=12.643, loss_ctc=20.839, acc=0.678, backward_time=0.024, optim_step_time=0.022, optim0_lr0=3.291e-05, train_time=0.104 -[v019] 2022-01-31 03:02:14,363 (trainer:653) INFO: 8epoch:train:298-324batch: iter_time=1.096e-04, forward_time=0.037, loss=16.535, loss_att=12.536, loss_ctc=20.535, acc=0.693, backward_time=0.024, optim_step_time=0.022, optim0_lr0=3.313e-05, train_time=0.104 -[v019] 2022-01-31 03:02:17,272 (trainer:653) INFO: 8epoch:train:325-351batch: iter_time=0.007, forward_time=0.036, loss=14.586, loss_att=10.685, loss_ctc=18.487, acc=0.704, backward_time=0.024, optim_step_time=0.022, optim0_lr0=3.334e-05, train_time=0.107 -[v019] 2022-01-31 03:02:20,008 (trainer:653) INFO: 8epoch:train:352-378batch: iter_time=1.108e-04, forward_time=0.036, loss=13.662, loss_att=10.015, loss_ctc=17.310, acc=0.710, backward_time=0.024, optim_step_time=0.022, optim0_lr0=3.356e-05, train_time=0.101 -[v019] 2022-01-31 03:02:22,745 (trainer:653) INFO: 8epoch:train:379-405batch: iter_time=1.114e-04, forward_time=0.036, loss=13.762, loss_att=10.312, loss_ctc=17.212, acc=0.726, backward_time=0.023, optim_step_time=0.022, optim0_lr0=3.378e-05, train_time=0.101 -[v019] 2022-01-31 03:02:25,465 (trainer:653) INFO: 8epoch:train:406-432batch: iter_time=1.067e-04, forward_time=0.036, loss=14.327, loss_att=10.761, loss_ctc=17.894, acc=0.704, backward_time=0.023, optim_step_time=0.022, optim0_lr0=3.399e-05, train_time=0.100 -[v019] 2022-01-31 03:02:28,262 (trainer:653) INFO: 8epoch:train:433-459batch: iter_time=1.089e-04, forward_time=0.037, loss=14.641, loss_att=11.059, loss_ctc=18.223, acc=0.712, backward_time=0.024, optim_step_time=0.022, optim0_lr0=3.421e-05, train_time=0.103 -[v019] 2022-01-31 03:02:31,082 (trainer:653) INFO: 8epoch:train:460-486batch: iter_time=1.172e-04, forward_time=0.037, loss=15.619, loss_att=11.689, loss_ctc=19.550, acc=0.706, backward_time=0.024, optim_step_time=0.022, optim0_lr0=3.442e-05, train_time=0.104 -[v019] 2022-01-31 03:02:33,856 (trainer:653) INFO: 8epoch:train:487-513batch: iter_time=1.174e-04, forward_time=0.036, loss=13.635, loss_att=10.202, loss_ctc=17.068, acc=0.714, backward_time=0.024, optim_step_time=0.022, optim0_lr0=3.464e-05, train_time=0.102 -[v019] 2022-01-31 03:02:36,672 (trainer:653) INFO: 8epoch:train:514-540batch: iter_time=1.140e-04, forward_time=0.037, loss=15.712, loss_att=11.985, loss_ctc=19.439, acc=0.695, backward_time=0.024, optim_step_time=0.022, optim0_lr0=3.486e-05, train_time=0.104 -[v019] 2022-01-31 03:03:15,039 (trainer:328) INFO: 8epoch results: [train] iter_time=6.182e-04, forward_time=0.037, loss=15.094, loss_att=11.379, loss_ctc=18.809, acc=0.694, backward_time=0.024, optim_step_time=0.022, optim0_lr0=3.283e-05, train_time=0.103, time=56.65 seconds, total_count=4376, gpu_max_cached_mem_GB=5.016, [valid] loss=14.833, loss_att=11.333, loss_ctc=18.333, acc=0.712, cer=0.402, wer=0.827, cer_ctc=0.566, time=6.17 seconds, total_count=1424, gpu_max_cached_mem_GB=5.016, [att_plot] time=31.42 seconds, total_count=0, gpu_max_cached_mem_GB=5.016 -[v019] 2022-01-31 03:03:17,470 (trainer:375) INFO: The best model has been updated: train.loss, valid.loss, train.acc, valid.acc -[v019] 2022-01-31 03:03:17,488 (trainer:413) INFO: The model files were removed: exp/asr_train_asr_raw_en_word/3epoch.pth -[v019] 2022-01-31 03:03:17,489 (trainer:261) INFO: 9/20epoch started. Estimated time to finish: 22 minutes and 19.38 seconds -[v019] 2022-01-31 03:03:20,875 (trainer:653) INFO: 9epoch:train:1-27batch: iter_time=0.020, forward_time=0.037, loss=15.581, loss_att=11.700, loss_ctc=19.462, acc=0.683, backward_time=0.024, optim_step_time=0.022, optim0_lr0=3.513e-05, train_time=0.125 -[v019] 2022-01-31 03:03:23,628 (trainer:653) INFO: 9epoch:train:28-54batch: iter_time=1.182e-04, forward_time=0.036, loss=16.579, loss_att=12.491, loss_ctc=20.668, acc=0.679, backward_time=0.024, optim_step_time=0.022, optim0_lr0=3.534e-05, train_time=0.102 -[v019] 2022-01-31 03:03:26,427 (trainer:653) INFO: 9epoch:train:55-81batch: iter_time=1.127e-04, forward_time=0.037, loss=14.841, loss_att=10.892, loss_ctc=18.791, acc=0.711, backward_time=0.024, optim_step_time=0.022, optim0_lr0=3.556e-05, train_time=0.103 -[v019] 2022-01-31 03:03:29,192 (trainer:653) INFO: 9epoch:train:82-108batch: iter_time=1.177e-04, forward_time=0.036, loss=14.331, loss_att=10.583, loss_ctc=18.078, acc=0.717, backward_time=0.024, optim_step_time=0.022, optim0_lr0=3.578e-05, train_time=0.102 -[v019] 2022-01-31 03:03:31,977 (trainer:653) INFO: 9epoch:train:109-135batch: iter_time=1.097e-04, forward_time=0.036, loss=16.164, loss_att=11.944, loss_ctc=20.384, acc=0.703, backward_time=0.024, optim_step_time=0.022, optim0_lr0=3.599e-05, train_time=0.103 -[v019] 2022-01-31 03:03:34,715 (trainer:653) INFO: 9epoch:train:136-162batch: iter_time=1.052e-04, forward_time=0.036, loss=12.512, loss_att=9.347, loss_ctc=15.677, acc=0.713, backward_time=0.024, optim_step_time=0.022, optim0_lr0=3.621e-05, train_time=0.101 -[v019] 2022-01-31 03:03:37,476 (trainer:653) INFO: 9epoch:train:163-189batch: iter_time=1.030e-04, forward_time=0.036, loss=15.103, loss_att=11.232, loss_ctc=18.973, acc=0.702, backward_time=0.024, optim_step_time=0.022, optim0_lr0=3.642e-05, train_time=0.102 -[v019] 2022-01-31 03:03:40,331 (trainer:653) INFO: 9epoch:train:190-216batch: iter_time=1.164e-04, forward_time=0.038, loss=17.408, loss_att=12.757, loss_ctc=22.059, acc=0.700, backward_time=0.024, optim_step_time=0.022, optim0_lr0=3.664e-05, train_time=0.105 -[v019] 2022-01-31 03:03:43,193 (trainer:653) INFO: 9epoch:train:217-243batch: iter_time=1.213e-04, forward_time=0.038, loss=15.523, loss_att=11.587, loss_ctc=19.459, acc=0.709, backward_time=0.024, optim_step_time=0.022, optim0_lr0=3.686e-05, train_time=0.106 -[v019] 2022-01-31 03:03:45,930 (trainer:653) INFO: 9epoch:train:244-270batch: iter_time=1.101e-04, forward_time=0.036, loss=11.225, loss_att=7.972, loss_ctc=14.479, acc=0.743, backward_time=0.024, optim_step_time=0.022, optim0_lr0=3.707e-05, train_time=0.101 -[v019] 2022-01-31 03:03:48,650 (trainer:653) INFO: 9epoch:train:271-297batch: iter_time=1.099e-04, forward_time=0.036, loss=13.200, loss_att=9.704, loss_ctc=16.696, acc=0.714, backward_time=0.024, optim_step_time=0.022, optim0_lr0=3.729e-05, train_time=0.100 -[v019] 2022-01-31 03:03:52,045 (trainer:653) INFO: 9epoch:train:298-324batch: iter_time=0.019, forward_time=0.038, loss=16.769, loss_att=12.272, loss_ctc=21.265, acc=0.707, backward_time=0.024, optim_step_time=0.022, optim0_lr0=3.750e-05, train_time=0.125 -[v019] 2022-01-31 03:03:54,841 (trainer:653) INFO: 9epoch:train:325-351batch: iter_time=1.164e-04, forward_time=0.037, loss=15.180, loss_att=11.123, loss_ctc=19.236, acc=0.699, backward_time=0.024, optim_step_time=0.022, optim0_lr0=3.772e-05, train_time=0.103 -[v019] 2022-01-31 03:03:57,703 (trainer:653) INFO: 9epoch:train:352-378batch: iter_time=1.223e-04, forward_time=0.038, loss=15.172, loss_att=10.974, loss_ctc=19.370, acc=0.735, backward_time=0.024, optim_step_time=0.022, optim0_lr0=3.794e-05, train_time=0.106 -[v019] 2022-01-31 03:04:00,446 (trainer:653) INFO: 9epoch:train:379-405batch: iter_time=1.215e-04, forward_time=0.036, loss=12.619, loss_att=9.062, loss_ctc=16.175, acc=0.743, backward_time=0.024, optim_step_time=0.022, optim0_lr0=3.815e-05, train_time=0.101 -[v019] 2022-01-31 03:04:03,220 (trainer:653) INFO: 9epoch:train:406-432batch: iter_time=1.092e-04, forward_time=0.037, loss=12.377, loss_att=9.272, loss_ctc=15.483, acc=0.727, backward_time=0.024, optim_step_time=0.022, optim0_lr0=3.837e-05, train_time=0.103 -[v019] 2022-01-31 03:04:05,930 (trainer:653) INFO: 9epoch:train:433-459batch: iter_time=1.073e-04, forward_time=0.036, loss=12.299, loss_att=8.783, loss_ctc=15.815, acc=0.736, backward_time=0.023, optim_step_time=0.022, optim0_lr0=3.858e-05, train_time=0.100 -[v019] 2022-01-31 03:04:08,643 (trainer:653) INFO: 9epoch:train:460-486batch: iter_time=1.094e-04, forward_time=0.036, loss=13.368, loss_att=9.820, loss_ctc=16.915, acc=0.721, backward_time=0.023, optim_step_time=0.021, optim0_lr0=3.880e-05, train_time=0.100 -[v019] 2022-01-31 03:04:11,395 (trainer:653) INFO: 9epoch:train:487-513batch: iter_time=1.066e-04, forward_time=0.037, loss=13.095, loss_att=9.260, loss_ctc=16.930, acc=0.734, backward_time=0.023, optim_step_time=0.021, optim0_lr0=3.902e-05, train_time=0.102 -[v019] 2022-01-31 03:04:14,083 (trainer:653) INFO: 9epoch:train:514-540batch: iter_time=1.069e-04, forward_time=0.036, loss=12.416, loss_att=8.986, loss_ctc=15.847, acc=0.733, backward_time=0.023, optim_step_time=0.021, optim0_lr0=3.923e-05, train_time=0.099 -[v019] 2022-01-31 03:04:50,952 (trainer:328) INFO: 9epoch results: [train] iter_time=0.002, forward_time=0.037, loss=14.268, loss_att=10.469, loss_ctc=18.067, acc=0.716, backward_time=0.024, optim_step_time=0.022, optim0_lr0=3.721e-05, train_time=0.104, time=57.34 seconds, total_count=4923, gpu_max_cached_mem_GB=5.016, [valid] loss=14.323, loss_att=10.615, loss_ctc=18.030, acc=0.725, cer=0.404, wer=0.821, cer_ctc=0.557, time=6.28 seconds, total_count=1602, gpu_max_cached_mem_GB=5.016, [att_plot] time=29.84 seconds, total_count=0, gpu_max_cached_mem_GB=5.016 -[v019] 2022-01-31 03:04:53,749 (trainer:375) INFO: The best model has been updated: train.loss, valid.loss, train.acc, valid.acc -[v019] 2022-01-31 03:04:53,767 (trainer:413) INFO: The model files were removed: exp/asr_train_asr_raw_en_word/4epoch.pth -[v019] 2022-01-31 03:04:53,768 (trainer:261) INFO: 10/20epoch started. Estimated time to finish: 20 minutes and 3.67 seconds -[v019] 2022-01-31 03:04:56,630 (trainer:653) INFO: 10epoch:train:1-27batch: iter_time=0.004, forward_time=0.037, loss=13.112, loss_att=9.452, loss_ctc=16.772, acc=0.732, backward_time=0.023, optim_step_time=0.022, optim0_lr0=3.950e-05, train_time=0.106 -[v019] 2022-01-31 03:04:59,292 (trainer:653) INFO: 10epoch:train:28-54batch: iter_time=1.089e-04, forward_time=0.035, loss=11.972, loss_att=8.467, loss_ctc=15.477, acc=0.745, backward_time=0.023, optim_step_time=0.022, optim0_lr0=3.972e-05, train_time=0.098 -[v019] 2022-01-31 03:05:01,984 (trainer:653) INFO: 10epoch:train:55-81batch: iter_time=1.074e-04, forward_time=0.036, loss=12.790, loss_att=9.238, loss_ctc=16.342, acc=0.732, backward_time=0.023, optim_step_time=0.021, optim0_lr0=3.994e-05, train_time=0.099 -[v019] 2022-01-31 03:05:04,755 (trainer:653) INFO: 10epoch:train:82-108batch: iter_time=1.077e-04, forward_time=0.037, loss=13.095, loss_att=9.340, loss_ctc=16.849, acc=0.740, backward_time=0.024, optim_step_time=0.022, optim0_lr0=4.015e-05, train_time=0.102 -[v019] 2022-01-31 03:05:07,483 (trainer:653) INFO: 10epoch:train:109-135batch: iter_time=1.086e-04, forward_time=0.036, loss=13.502, loss_att=9.836, loss_ctc=17.167, acc=0.735, backward_time=0.024, optim_step_time=0.022, optim0_lr0=4.037e-05, train_time=0.101 -[v019] 2022-01-31 03:05:10,250 (trainer:653) INFO: 10epoch:train:136-162batch: iter_time=1.080e-04, forward_time=0.037, loss=14.922, loss_att=10.419, loss_ctc=19.425, acc=0.727, backward_time=0.023, optim_step_time=0.021, optim0_lr0=4.058e-05, train_time=0.102 -[v019] 2022-01-31 03:05:12,941 (trainer:653) INFO: 10epoch:train:163-189batch: iter_time=1.046e-04, forward_time=0.036, loss=12.770, loss_att=9.165, loss_ctc=16.376, acc=0.730, backward_time=0.023, optim_step_time=0.021, optim0_lr0=4.080e-05, train_time=0.099 -[v019] 2022-01-31 03:05:15,701 (trainer:653) INFO: 10epoch:train:190-216batch: iter_time=1.042e-04, forward_time=0.037, loss=16.828, loss_att=12.138, loss_ctc=21.519, acc=0.696, backward_time=0.023, optim_step_time=0.021, optim0_lr0=4.102e-05, train_time=0.102 -[v019] 2022-01-31 03:05:18,423 (trainer:653) INFO: 10epoch:train:217-243batch: iter_time=1.067e-04, forward_time=0.036, loss=15.791, loss_att=11.237, loss_ctc=20.345, acc=0.718, backward_time=0.023, optim_step_time=0.021, optim0_lr0=4.123e-05, train_time=0.101 -[v019] 2022-01-31 03:05:21,177 (trainer:653) INFO: 10epoch:train:244-270batch: iter_time=1.036e-04, forward_time=0.037, loss=14.894, loss_att=10.746, loss_ctc=19.043, acc=0.717, backward_time=0.023, optim_step_time=0.021, optim0_lr0=4.145e-05, train_time=0.102 -[v019] 2022-01-31 03:05:23,871 (trainer:653) INFO: 10epoch:train:271-297batch: iter_time=1.031e-04, forward_time=0.036, loss=12.932, loss_att=9.167, loss_ctc=16.697, acc=0.760, backward_time=0.023, optim_step_time=0.021, optim0_lr0=4.166e-05, train_time=0.100 -[v019] 2022-01-31 03:05:26,928 (trainer:653) INFO: 10epoch:train:298-324batch: iter_time=0.011, forward_time=0.036, loss=12.545, loss_att=9.116, loss_ctc=15.975, acc=0.743, backward_time=0.023, optim_step_time=0.021, optim0_lr0=4.188e-05, train_time=0.113 -[v019] 2022-01-31 03:05:29,613 (trainer:653) INFO: 10epoch:train:325-351batch: iter_time=1.064e-04, forward_time=0.036, loss=12.457, loss_att=8.847, loss_ctc=16.067, acc=0.733, backward_time=0.023, optim_step_time=0.021, optim0_lr0=4.210e-05, train_time=0.099 -[v019] 2022-01-31 03:05:32,385 (trainer:653) INFO: 10epoch:train:352-378batch: iter_time=1.077e-04, forward_time=0.037, loss=14.574, loss_att=10.562, loss_ctc=18.585, acc=0.726, backward_time=0.023, optim_step_time=0.022, optim0_lr0=4.231e-05, train_time=0.102 -[v019] 2022-01-31 03:05:35,047 (trainer:653) INFO: 10epoch:train:379-405batch: iter_time=1.075e-04, forward_time=0.035, loss=11.907, loss_att=8.505, loss_ctc=15.309, acc=0.747, backward_time=0.023, optim_step_time=0.021, optim0_lr0=4.253e-05, train_time=0.098 -[v019] 2022-01-31 03:05:37,740 (trainer:653) INFO: 10epoch:train:406-432batch: iter_time=1.155e-04, forward_time=0.036, loss=12.555, loss_att=8.794, loss_ctc=16.317, acc=0.742, backward_time=0.023, optim_step_time=0.022, optim0_lr0=4.274e-05, train_time=0.099 -[v019] 2022-01-31 03:05:40,530 (trainer:653) INFO: 10epoch:train:433-459batch: iter_time=1.101e-04, forward_time=0.037, loss=15.129, loss_att=10.958, loss_ctc=19.300, acc=0.730, backward_time=0.024, optim_step_time=0.022, optim0_lr0=4.296e-05, train_time=0.103 -[v019] 2022-01-31 03:05:43,270 (trainer:653) INFO: 10epoch:train:460-486batch: iter_time=1.103e-04, forward_time=0.036, loss=13.228, loss_att=9.156, loss_ctc=17.300, acc=0.733, backward_time=0.024, optim_step_time=0.022, optim0_lr0=4.318e-05, train_time=0.101 -[v019] 2022-01-31 03:05:46,004 (trainer:653) INFO: 10epoch:train:487-513batch: iter_time=1.056e-04, forward_time=0.036, loss=13.572, loss_att=9.698, loss_ctc=17.445, acc=0.730, backward_time=0.023, optim_step_time=0.021, optim0_lr0=4.339e-05, train_time=0.101 -[v019] 2022-01-31 03:05:48,766 (trainer:653) INFO: 10epoch:train:514-540batch: iter_time=1.046e-04, forward_time=0.037, loss=13.146, loss_att=9.361, loss_ctc=16.931, acc=0.745, backward_time=0.023, optim_step_time=0.022, optim0_lr0=4.361e-05, train_time=0.102 -[v019] 2022-01-31 03:06:25,963 (trainer:328) INFO: 10epoch results: [train] iter_time=8.333e-04, forward_time=0.036, loss=13.581, loss_att=9.708, loss_ctc=17.454, acc=0.733, backward_time=0.023, optim_step_time=0.021, optim0_lr0=4.158e-05, train_time=0.102, time=55.74 seconds, total_count=5470, gpu_max_cached_mem_GB=5.016, [valid] loss=13.831, loss_att=9.956, loss_ctc=17.707, acc=0.744, cer=0.352, wer=0.787, cer_ctc=0.502, time=6.27 seconds, total_count=1780, gpu_max_cached_mem_GB=5.016, [att_plot] time=30.18 seconds, total_count=0, gpu_max_cached_mem_GB=5.016 -[v019] 2022-01-31 03:06:29,142 (trainer:375) INFO: The best model has been updated: train.loss, valid.loss, train.acc, valid.acc -[v019] 2022-01-31 03:06:29,160 (trainer:413) INFO: The model files were removed: exp/asr_train_asr_raw_en_word/5epoch.pth -[v019] 2022-01-31 03:06:29,160 (trainer:261) INFO: 11/20epoch started. Estimated time to finish: 17 minutes and 56.7 seconds -[v019] 2022-01-31 03:06:31,964 (trainer:653) INFO: 11epoch:train:1-27batch: iter_time=0.003, forward_time=0.036, loss=11.544, loss_att=8.266, loss_ctc=14.822, acc=0.748, backward_time=0.023, optim_step_time=0.021, optim0_lr0=4.388e-05, train_time=0.103 -[v019] 2022-01-31 03:06:34,707 (trainer:653) INFO: 11epoch:train:28-54batch: iter_time=1.158e-04, forward_time=0.036, loss=14.875, loss_att=10.603, loss_ctc=19.148, acc=0.717, backward_time=0.024, optim_step_time=0.022, optim0_lr0=4.410e-05, train_time=0.101 -[v019] 2022-01-31 03:06:37,359 (trainer:653) INFO: 11epoch:train:55-81batch: iter_time=1.070e-04, forward_time=0.035, loss=9.739, loss_att=6.769, loss_ctc=12.709, acc=0.787, backward_time=0.023, optim_step_time=0.021, optim0_lr0=4.431e-05, train_time=0.098 -[v019] 2022-01-31 03:06:40,124 (trainer:653) INFO: 11epoch:train:82-108batch: iter_time=1.308e-04, forward_time=0.037, loss=13.175, loss_att=9.429, loss_ctc=16.921, acc=0.746, backward_time=0.023, optim_step_time=0.022, optim0_lr0=4.453e-05, train_time=0.102 -[v019] 2022-01-31 03:06:42,852 (trainer:653) INFO: 11epoch:train:109-135batch: iter_time=1.181e-04, forward_time=0.036, loss=12.828, loss_att=8.981, loss_ctc=16.675, acc=0.751, backward_time=0.023, optim_step_time=0.022, optim0_lr0=4.474e-05, train_time=0.101 -[v019] 2022-01-31 03:06:45,605 (trainer:653) INFO: 11epoch:train:136-162batch: iter_time=1.025e-04, forward_time=0.036, loss=15.013, loss_att=10.627, loss_ctc=19.398, acc=0.726, backward_time=0.023, optim_step_time=0.021, optim0_lr0=4.496e-05, train_time=0.102 -[v019] 2022-01-31 03:06:48,323 (trainer:653) INFO: 11epoch:train:163-189batch: iter_time=1.054e-04, forward_time=0.036, loss=14.136, loss_att=9.718, loss_ctc=18.554, acc=0.729, backward_time=0.023, optim_step_time=0.021, optim0_lr0=4.518e-05, train_time=0.100 -[v019] 2022-01-31 03:06:51,071 (trainer:653) INFO: 11epoch:train:190-216batch: iter_time=1.047e-04, forward_time=0.037, loss=12.897, loss_att=9.290, loss_ctc=16.503, acc=0.741, backward_time=0.023, optim_step_time=0.021, optim0_lr0=4.539e-05, train_time=0.102 -[v019] 2022-01-31 03:06:53,834 (trainer:653) INFO: 11epoch:train:217-243batch: iter_time=1.138e-04, forward_time=0.037, loss=14.051, loss_att=9.822, loss_ctc=18.280, acc=0.729, backward_time=0.023, optim_step_time=0.021, optim0_lr0=4.561e-05, train_time=0.102 -[v019] 2022-01-31 03:06:56,558 (trainer:653) INFO: 11epoch:train:244-270batch: iter_time=1.078e-04, forward_time=0.036, loss=12.279, loss_att=8.755, loss_ctc=15.804, acc=0.740, backward_time=0.023, optim_step_time=0.021, optim0_lr0=4.582e-05, train_time=0.101 -[v019] 2022-01-31 03:06:59,497 (trainer:653) INFO: 11epoch:train:271-297batch: iter_time=0.009, forward_time=0.035, loss=12.778, loss_att=8.827, loss_ctc=16.728, acc=0.753, backward_time=0.024, optim_step_time=0.021, optim0_lr0=4.604e-05, train_time=0.109 -[v019] 2022-01-31 03:07:02,333 (trainer:653) INFO: 11epoch:train:298-324batch: iter_time=1.039e-04, forward_time=0.037, loss=16.781, loss_att=11.989, loss_ctc=21.573, acc=0.718, backward_time=0.023, optim_step_time=0.021, optim0_lr0=4.626e-05, train_time=0.105 -[v019] 2022-01-31 03:07:05,040 (trainer:653) INFO: 11epoch:train:325-351batch: iter_time=1.106e-04, forward_time=0.036, loss=11.104, loss_att=7.668, loss_ctc=14.541, acc=0.743, backward_time=0.023, optim_step_time=0.021, optim0_lr0=4.647e-05, train_time=0.100 -[v019] 2022-01-31 03:07:07,775 (trainer:653) INFO: 11epoch:train:352-378batch: iter_time=1.087e-04, forward_time=0.036, loss=11.210, loss_att=7.753, loss_ctc=14.668, acc=0.768, backward_time=0.024, optim_step_time=0.021, optim0_lr0=4.669e-05, train_time=0.101 -[v019] 2022-01-31 03:07:10,511 (trainer:653) INFO: 11epoch:train:379-405batch: iter_time=1.003e-04, forward_time=0.036, loss=14.789, loss_att=10.488, loss_ctc=19.090, acc=0.732, backward_time=0.023, optim_step_time=0.021, optim0_lr0=4.690e-05, train_time=0.101 -[v019] 2022-01-31 03:07:13,185 (trainer:653) INFO: 11epoch:train:406-432batch: iter_time=1.027e-04, forward_time=0.036, loss=11.031, loss_att=7.757, loss_ctc=14.305, acc=0.753, backward_time=0.023, optim_step_time=0.021, optim0_lr0=4.712e-05, train_time=0.099 -[v019] 2022-01-31 03:07:15,826 (trainer:653) INFO: 11epoch:train:433-459batch: iter_time=1.037e-04, forward_time=0.035, loss=11.168, loss_att=7.594, loss_ctc=14.742, acc=0.771, backward_time=0.023, optim_step_time=0.021, optim0_lr0=4.734e-05, train_time=0.098 -[v019] 2022-01-31 03:07:18,617 (trainer:653) INFO: 11epoch:train:460-486batch: iter_time=1.060e-04, forward_time=0.037, loss=14.667, loss_att=10.216, loss_ctc=19.119, acc=0.754, backward_time=0.023, optim_step_time=0.021, optim0_lr0=4.755e-05, train_time=0.103 -[v019] 2022-01-31 03:07:21,363 (trainer:653) INFO: 11epoch:train:487-513batch: iter_time=1.207e-04, forward_time=0.037, loss=12.512, loss_att=8.528, loss_ctc=16.495, acc=0.755, backward_time=0.023, optim_step_time=0.021, optim0_lr0=4.777e-05, train_time=0.101 -[v019] 2022-01-31 03:07:24,108 (trainer:653) INFO: 11epoch:train:514-540batch: iter_time=1.231e-04, forward_time=0.036, loss=13.345, loss_att=9.364, loss_ctc=17.325, acc=0.754, backward_time=0.023, optim_step_time=0.022, optim0_lr0=4.798e-05, train_time=0.101 -[v019] 2022-01-31 03:08:02,074 (trainer:328) INFO: 11epoch results: [train] iter_time=7.234e-04, forward_time=0.036, loss=12.996, loss_att=9.118, loss_ctc=16.873, acc=0.746, backward_time=0.023, optim_step_time=0.021, optim0_lr0=4.596e-05, train_time=0.101, time=55.72 seconds, total_count=6017, gpu_max_cached_mem_GB=5.016, [valid] loss=13.098, loss_att=9.453, loss_ctc=16.742, acc=0.746, cer=0.354, wer=0.798, cer_ctc=0.501, time=6.78 seconds, total_count=1958, gpu_max_cached_mem_GB=5.016, [att_plot] time=30.41 seconds, total_count=0, gpu_max_cached_mem_GB=5.016 -[v019] 2022-01-31 03:08:04,323 (trainer:375) INFO: The best model has been updated: train.loss, valid.loss, train.acc, valid.acc -[v019] 2022-01-31 03:08:04,342 (trainer:413) INFO: The model files were removed: exp/asr_train_asr_raw_en_word/6epoch.pth -[v019] 2022-01-31 03:08:04,342 (trainer:261) INFO: 12/20epoch started. Estimated time to finish: 15 minutes and 56.55 seconds -[v019] 2022-01-31 03:08:07,299 (trainer:653) INFO: 12epoch:train:1-27batch: iter_time=0.003, forward_time=0.038, loss=12.198, loss_att=8.530, loss_ctc=15.865, acc=0.763, backward_time=0.023, optim_step_time=0.021, optim0_lr0=4.826e-05, train_time=0.109 -[v019] 2022-01-31 03:08:09,991 (trainer:653) INFO: 12epoch:train:28-54batch: iter_time=1.157e-04, forward_time=0.035, loss=12.678, loss_att=8.830, loss_ctc=16.526, acc=0.750, backward_time=0.023, optim_step_time=0.021, optim0_lr0=4.847e-05, train_time=0.099 -[v019] 2022-01-31 03:08:12,663 (trainer:653) INFO: 12epoch:train:55-81batch: iter_time=1.185e-04, forward_time=0.035, loss=10.214, loss_att=6.914, loss_ctc=13.513, acc=0.766, backward_time=0.024, optim_step_time=0.021, optim0_lr0=4.869e-05, train_time=0.099 -[v019] 2022-01-31 03:08:15,581 (trainer:653) INFO: 12epoch:train:82-108batch: iter_time=1.207e-04, forward_time=0.039, loss=14.369, loss_att=10.000, loss_ctc=18.739, acc=0.755, backward_time=0.024, optim_step_time=0.021, optim0_lr0=4.890e-05, train_time=0.108 -[v019] 2022-01-31 03:08:18,358 (trainer:653) INFO: 12epoch:train:109-135batch: iter_time=1.178e-04, forward_time=0.037, loss=14.118, loss_att=9.696, loss_ctc=18.540, acc=0.743, backward_time=0.023, optim_step_time=0.021, optim0_lr0=4.912e-05, train_time=0.103 -[v019] 2022-01-31 03:08:21,034 (trainer:653) INFO: 12epoch:train:136-162batch: iter_time=1.004e-04, forward_time=0.036, loss=10.975, loss_att=7.563, loss_ctc=14.388, acc=0.772, backward_time=0.023, optim_step_time=0.021, optim0_lr0=4.934e-05, train_time=0.099 -[v019] 2022-01-31 03:08:23,746 (trainer:653) INFO: 12epoch:train:163-189batch: iter_time=1.029e-04, forward_time=0.036, loss=13.105, loss_att=8.972, loss_ctc=17.239, acc=0.745, backward_time=0.023, optim_step_time=0.021, optim0_lr0=4.955e-05, train_time=0.100 -[v019] 2022-01-31 03:08:26,395 (trainer:653) INFO: 12epoch:train:190-216batch: iter_time=1.013e-04, forward_time=0.035, loss=10.713, loss_att=7.316, loss_ctc=14.110, acc=0.773, backward_time=0.023, optim_step_time=0.021, optim0_lr0=4.977e-05, train_time=0.098 -[v019] 2022-01-31 03:08:29,080 (trainer:653) INFO: 12epoch:train:217-243batch: iter_time=1.027e-04, forward_time=0.036, loss=9.701, loss_att=6.521, loss_ctc=12.882, acc=0.787, backward_time=0.024, optim_step_time=0.022, optim0_lr0=4.998e-05, train_time=0.099 -[v019] 2022-01-31 03:08:31,864 (trainer:653) INFO: 12epoch:train:244-270batch: iter_time=1.049e-04, forward_time=0.037, loss=14.321, loss_att=9.908, loss_ctc=18.734, acc=0.739, backward_time=0.024, optim_step_time=0.022, optim0_lr0=5.020e-05, train_time=0.103 -[v019] 2022-01-31 03:08:34,798 (trainer:653) INFO: 12epoch:train:271-297batch: iter_time=0.005, forward_time=0.037, loss=13.007, loss_att=9.030, loss_ctc=16.983, acc=0.764, backward_time=0.024, optim_step_time=0.022, optim0_lr0=5.042e-05, train_time=0.108 -[v019] 2022-01-31 03:08:37,571 (trainer:653) INFO: 12epoch:train:298-324batch: iter_time=1.055e-04, forward_time=0.037, loss=12.623, loss_att=8.671, loss_ctc=16.576, acc=0.764, backward_time=0.023, optim_step_time=0.021, optim0_lr0=5.063e-05, train_time=0.102 -[v019] 2022-01-31 03:08:40,354 (trainer:653) INFO: 12epoch:train:325-351batch: iter_time=1.056e-04, forward_time=0.037, loss=13.348, loss_att=9.315, loss_ctc=17.381, acc=0.748, backward_time=0.024, optim_step_time=0.022, optim0_lr0=5.085e-05, train_time=0.103 -[v019] 2022-01-31 03:08:43,078 (trainer:653) INFO: 12epoch:train:352-378batch: iter_time=1.029e-04, forward_time=0.036, loss=11.601, loss_att=7.965, loss_ctc=15.236, acc=0.762, backward_time=0.024, optim_step_time=0.022, optim0_lr0=5.106e-05, train_time=0.101 -[v019] 2022-01-31 03:08:45,817 (trainer:653) INFO: 12epoch:train:379-405batch: iter_time=1.025e-04, forward_time=0.037, loss=13.553, loss_att=9.293, loss_ctc=17.813, acc=0.751, backward_time=0.023, optim_step_time=0.022, optim0_lr0=5.128e-05, train_time=0.101 -[v019] 2022-01-31 03:08:48,499 (trainer:653) INFO: 12epoch:train:406-432batch: iter_time=1.015e-04, forward_time=0.036, loss=11.288, loss_att=7.673, loss_ctc=14.902, acc=0.766, backward_time=0.023, optim_step_time=0.021, optim0_lr0=5.150e-05, train_time=0.099 -[v019] 2022-01-31 03:08:51,269 (trainer:653) INFO: 12epoch:train:433-459batch: iter_time=1.050e-04, forward_time=0.037, loss=13.851, loss_att=9.664, loss_ctc=18.039, acc=0.736, backward_time=0.023, optim_step_time=0.021, optim0_lr0=5.171e-05, train_time=0.102 -[v019] 2022-01-31 03:08:54,046 (trainer:653) INFO: 12epoch:train:460-486batch: iter_time=1.162e-04, forward_time=0.037, loss=13.197, loss_att=9.206, loss_ctc=17.188, acc=0.732, backward_time=0.024, optim_step_time=0.022, optim0_lr0=5.193e-05, train_time=0.103 -[v019] 2022-01-31 03:08:56,747 (trainer:653) INFO: 12epoch:train:487-513batch: iter_time=1.082e-04, forward_time=0.036, loss=13.399, loss_att=9.368, loss_ctc=17.430, acc=0.734, backward_time=0.023, optim_step_time=0.022, optim0_lr0=5.214e-05, train_time=0.100 -[v019] 2022-01-31 03:08:59,480 (trainer:653) INFO: 12epoch:train:514-540batch: iter_time=0.002, forward_time=0.036, loss=12.436, loss_att=8.360, loss_ctc=16.512, acc=0.756, backward_time=0.023, optim_step_time=0.021, optim0_lr0=5.236e-05, train_time=0.101 -[v019] 2022-01-31 03:09:37,112 (trainer:328) INFO: 12epoch results: [train] iter_time=5.653e-04, forward_time=0.036, loss=12.523, loss_att=8.630, loss_ctc=16.416, acc=0.756, backward_time=0.023, optim_step_time=0.021, optim0_lr0=5.034e-05, train_time=0.102, time=55.9 seconds, total_count=6564, gpu_max_cached_mem_GB=5.016, [valid] loss=12.682, loss_att=9.021, loss_ctc=16.343, acc=0.764, cer=0.328, wer=0.756, cer_ctc=0.475, time=6.5 seconds, total_count=2136, gpu_max_cached_mem_GB=5.016, [att_plot] time=30.37 seconds, total_count=0, gpu_max_cached_mem_GB=5.016 -[v019] 2022-01-31 03:09:39,381 (trainer:375) INFO: The best model has been updated: train.loss, valid.loss, train.acc, valid.acc -[v019] 2022-01-31 03:09:39,399 (trainer:413) INFO: The model files were removed: exp/asr_train_asr_raw_en_word/7epoch.pth -[v019] 2022-01-31 03:09:39,400 (trainer:261) INFO: 13/20epoch started. Estimated time to finish: 14 minutes and 1.28 seconds -[v019] 2022-01-31 03:09:42,253 (trainer:653) INFO: 13epoch:train:1-27batch: iter_time=0.003, forward_time=0.036, loss=11.191, loss_att=7.812, loss_ctc=14.570, acc=0.764, backward_time=0.023, optim_step_time=0.021, optim0_lr0=5.263e-05, train_time=0.105 -[v019] 2022-01-31 03:09:45,022 (trainer:653) INFO: 13epoch:train:28-54batch: iter_time=1.144e-04, forward_time=0.037, loss=13.916, loss_att=9.607, loss_ctc=18.225, acc=0.744, backward_time=0.024, optim_step_time=0.022, optim0_lr0=5.285e-05, train_time=0.102 -[v019] 2022-01-31 03:09:47,753 (trainer:653) INFO: 13epoch:train:55-81batch: iter_time=1.024e-04, forward_time=0.036, loss=11.977, loss_att=8.251, loss_ctc=15.703, acc=0.745, backward_time=0.024, optim_step_time=0.022, optim0_lr0=5.306e-05, train_time=0.101 -[v019] 2022-01-31 03:09:50,459 (trainer:653) INFO: 13epoch:train:82-108batch: iter_time=1.156e-04, forward_time=0.036, loss=11.402, loss_att=7.684, loss_ctc=15.120, acc=0.784, backward_time=0.023, optim_step_time=0.021, optim0_lr0=5.328e-05, train_time=0.100 -[v019] 2022-01-31 03:09:53,170 (trainer:653) INFO: 13epoch:train:109-135batch: iter_time=1.078e-04, forward_time=0.036, loss=11.284, loss_att=7.665, loss_ctc=14.903, acc=0.760, backward_time=0.023, optim_step_time=0.022, optim0_lr0=5.350e-05, train_time=0.100 -[v019] 2022-01-31 03:09:55,908 (trainer:653) INFO: 13epoch:train:136-162batch: iter_time=1.034e-04, forward_time=0.036, loss=12.697, loss_att=8.729, loss_ctc=16.665, acc=0.752, backward_time=0.024, optim_step_time=0.022, optim0_lr0=5.371e-05, train_time=0.101 -[v019] 2022-01-31 03:09:58,639 (trainer:653) INFO: 13epoch:train:163-189batch: iter_time=1.081e-04, forward_time=0.036, loss=12.305, loss_att=8.529, loss_ctc=16.082, acc=0.749, backward_time=0.024, optim_step_time=0.022, optim0_lr0=5.393e-05, train_time=0.101 -[v019] 2022-01-31 03:10:01,314 (trainer:653) INFO: 13epoch:train:190-216batch: iter_time=1.096e-04, forward_time=0.036, loss=9.259, loss_att=6.045, loss_ctc=12.473, acc=0.791, backward_time=0.024, optim_step_time=0.022, optim0_lr0=5.414e-05, train_time=0.099 -[v019] 2022-01-31 03:10:04,032 (trainer:653) INFO: 13epoch:train:217-243batch: iter_time=1.090e-04, forward_time=0.036, loss=10.743, loss_att=7.184, loss_ctc=14.302, acc=0.781, backward_time=0.023, optim_step_time=0.022, optim0_lr0=5.436e-05, train_time=0.100 -[v019] 2022-01-31 03:10:07,176 (trainer:653) INFO: 13epoch:train:244-270batch: iter_time=0.016, forward_time=0.036, loss=12.975, loss_att=8.874, loss_ctc=17.076, acc=0.754, backward_time=0.023, optim_step_time=0.021, optim0_lr0=5.458e-05, train_time=0.116 -[v019] 2022-01-31 03:10:09,920 (trainer:653) INFO: 13epoch:train:271-297batch: iter_time=1.091e-04, forward_time=0.036, loss=13.121, loss_att=9.024, loss_ctc=17.219, acc=0.738, backward_time=0.023, optim_step_time=0.022, optim0_lr0=5.479e-05, train_time=0.101 -[v019] 2022-01-31 03:10:12,723 (trainer:653) INFO: 13epoch:train:298-324batch: iter_time=1.078e-04, forward_time=0.037, loss=14.799, loss_att=10.034, loss_ctc=19.563, acc=0.762, backward_time=0.023, optim_step_time=0.021, optim0_lr0=5.501e-05, train_time=0.104 -[v019] 2022-01-31 03:10:15,525 (trainer:653) INFO: 13epoch:train:325-351batch: iter_time=1.003e-04, forward_time=0.037, loss=13.770, loss_att=9.208, loss_ctc=18.332, acc=0.768, backward_time=0.023, optim_step_time=0.021, optim0_lr0=5.522e-05, train_time=0.104 -[v019] 2022-01-31 03:10:18,267 (trainer:653) INFO: 13epoch:train:352-378batch: iter_time=1.013e-04, forward_time=0.037, loss=13.339, loss_att=8.985, loss_ctc=17.692, acc=0.756, backward_time=0.023, optim_step_time=0.021, optim0_lr0=5.544e-05, train_time=0.101 -[v019] 2022-01-31 03:10:21,011 (trainer:653) INFO: 13epoch:train:379-405batch: iter_time=1.043e-04, forward_time=0.036, loss=11.979, loss_att=8.114, loss_ctc=15.844, acc=0.770, backward_time=0.023, optim_step_time=0.022, optim0_lr0=5.566e-05, train_time=0.101 -[v019] 2022-01-31 03:10:23,774 (trainer:653) INFO: 13epoch:train:406-432batch: iter_time=1.072e-04, forward_time=0.037, loss=14.299, loss_att=9.987, loss_ctc=18.610, acc=0.741, backward_time=0.023, optim_step_time=0.022, optim0_lr0=5.587e-05, train_time=0.102 -[v019] 2022-01-31 03:10:26,480 (trainer:653) INFO: 13epoch:train:433-459batch: iter_time=1.053e-04, forward_time=0.036, loss=10.645, loss_att=7.107, loss_ctc=14.184, acc=0.786, backward_time=0.023, optim_step_time=0.021, optim0_lr0=5.609e-05, train_time=0.100 -[v019] 2022-01-31 03:10:29,191 (trainer:653) INFO: 13epoch:train:460-486batch: iter_time=1.052e-04, forward_time=0.036, loss=11.602, loss_att=7.716, loss_ctc=15.487, acc=0.770, backward_time=0.023, optim_step_time=0.021, optim0_lr0=5.630e-05, train_time=0.100 -[v019] 2022-01-31 03:10:31,851 (trainer:653) INFO: 13epoch:train:487-513batch: iter_time=1.062e-04, forward_time=0.035, loss=10.108, loss_att=6.754, loss_ctc=13.463, acc=0.784, backward_time=0.023, optim_step_time=0.021, optim0_lr0=5.652e-05, train_time=0.098 -[v019] 2022-01-31 03:10:34,609 (trainer:653) INFO: 13epoch:train:514-540batch: iter_time=1.074e-04, forward_time=0.036, loss=10.363, loss_att=6.891, loss_ctc=13.836, acc=0.785, backward_time=0.024, optim_step_time=0.022, optim0_lr0=5.674e-05, train_time=0.102 -[v019] 2022-01-31 03:11:14,330 (trainer:328) INFO: 13epoch results: [train] iter_time=0.001, forward_time=0.036, loss=12.086, loss_att=8.207, loss_ctc=15.965, acc=0.764, backward_time=0.023, optim_step_time=0.021, optim0_lr0=5.471e-05, train_time=0.102, time=55.96 seconds, total_count=7111, gpu_max_cached_mem_GB=5.016, [valid] loss=12.146, loss_att=8.616, loss_ctc=15.675, acc=0.775, cer=0.301, wer=0.750, cer_ctc=0.447, time=6.77 seconds, total_count=2314, gpu_max_cached_mem_GB=5.016, [att_plot] time=32.2 seconds, total_count=0, gpu_max_cached_mem_GB=5.016 -[v019] 2022-01-31 03:11:16,449 (trainer:375) INFO: The best model has been updated: train.loss, valid.loss, train.acc, valid.acc -[v019] 2022-01-31 03:11:16,467 (trainer:413) INFO: The model files were removed: exp/asr_train_asr_raw_en_word/8epoch.pth -[v019] 2022-01-31 03:11:16,467 (trainer:261) INFO: 14/20epoch started. Estimated time to finish: 12 minutes and 10.97 seconds -[v019] 2022-01-31 03:11:19,302 (trainer:653) INFO: 14epoch:train:1-27batch: iter_time=0.004, forward_time=0.036, loss=11.972, loss_att=8.009, loss_ctc=15.936, acc=0.764, backward_time=0.023, optim_step_time=0.021, optim0_lr0=5.701e-05, train_time=0.105 -[v019] 2022-01-31 03:11:22,088 (trainer:653) INFO: 14epoch:train:28-54batch: iter_time=1.089e-04, forward_time=0.037, loss=14.603, loss_att=9.889, loss_ctc=19.318, acc=0.745, backward_time=0.023, optim_step_time=0.021, optim0_lr0=5.722e-05, train_time=0.103 -[v019] 2022-01-31 03:11:24,731 (trainer:653) INFO: 14epoch:train:55-81batch: iter_time=1.049e-04, forward_time=0.035, loss=9.290, loss_att=6.098, loss_ctc=12.483, acc=0.797, backward_time=0.023, optim_step_time=0.021, optim0_lr0=5.744e-05, train_time=0.098 -[v019] 2022-01-31 03:11:27,415 (trainer:653) INFO: 14epoch:train:82-108batch: iter_time=1.042e-04, forward_time=0.036, loss=10.603, loss_att=7.215, loss_ctc=13.991, acc=0.772, backward_time=0.023, optim_step_time=0.021, optim0_lr0=5.766e-05, train_time=0.099 -[v019] 2022-01-31 03:11:30,151 (trainer:653) INFO: 14epoch:train:109-135batch: iter_time=1.021e-04, forward_time=0.036, loss=12.388, loss_att=8.315, loss_ctc=16.462, acc=0.777, backward_time=0.023, optim_step_time=0.021, optim0_lr0=5.787e-05, train_time=0.101 -[v019] 2022-01-31 03:11:32,896 (trainer:653) INFO: 14epoch:train:136-162batch: iter_time=1.104e-04, forward_time=0.036, loss=11.666, loss_att=8.132, loss_ctc=15.200, acc=0.770, backward_time=0.023, optim_step_time=0.021, optim0_lr0=5.809e-05, train_time=0.101 -[v019] 2022-01-31 03:11:35,612 (trainer:653) INFO: 14epoch:train:163-189batch: iter_time=1.074e-04, forward_time=0.035, loss=10.579, loss_att=7.009, loss_ctc=14.149, acc=0.785, backward_time=0.023, optim_step_time=0.022, optim0_lr0=5.830e-05, train_time=0.100 -[v019] 2022-01-31 03:11:38,362 (trainer:653) INFO: 14epoch:train:190-216batch: iter_time=1.045e-04, forward_time=0.036, loss=14.291, loss_att=9.678, loss_ctc=18.904, acc=0.742, backward_time=0.024, optim_step_time=0.022, optim0_lr0=5.852e-05, train_time=0.102 -[v019] 2022-01-31 03:11:41,083 (trainer:653) INFO: 14epoch:train:217-243batch: iter_time=1.244e-04, forward_time=0.036, loss=13.010, loss_att=8.607, loss_ctc=17.413, acc=0.772, backward_time=0.023, optim_step_time=0.021, optim0_lr0=5.874e-05, train_time=0.101 -[v019] 2022-01-31 03:11:43,805 (trainer:653) INFO: 14epoch:train:244-270batch: iter_time=1.024e-04, forward_time=0.036, loss=11.589, loss_att=7.491, loss_ctc=15.686, acc=0.786, backward_time=0.023, optim_step_time=0.021, optim0_lr0=5.895e-05, train_time=0.101 -[v019] 2022-01-31 03:11:46,527 (trainer:653) INFO: 14epoch:train:271-297batch: iter_time=1.098e-04, forward_time=0.036, loss=10.425, loss_att=7.060, loss_ctc=13.790, acc=0.790, backward_time=0.023, optim_step_time=0.022, optim0_lr0=5.917e-05, train_time=0.101 -[v019] 2022-01-31 03:11:49,250 (trainer:653) INFO: 14epoch:train:298-324batch: iter_time=1.062e-04, forward_time=0.037, loss=11.411, loss_att=7.577, loss_ctc=15.245, acc=0.763, backward_time=0.023, optim_step_time=0.022, optim0_lr0=5.938e-05, train_time=0.101 -[v019] 2022-01-31 03:11:52,013 (trainer:653) INFO: 14epoch:train:325-351batch: iter_time=1.046e-04, forward_time=0.036, loss=11.202, loss_att=7.338, loss_ctc=15.066, acc=0.792, backward_time=0.023, optim_step_time=0.022, optim0_lr0=5.960e-05, train_time=0.102 -[v019] 2022-01-31 03:11:54,687 (trainer:653) INFO: 14epoch:train:352-378batch: iter_time=1.095e-04, forward_time=0.036, loss=9.749, loss_att=6.414, loss_ctc=13.083, acc=0.801, backward_time=0.023, optim_step_time=0.021, optim0_lr0=5.982e-05, train_time=0.099 -[v019] 2022-01-31 03:11:57,413 (trainer:653) INFO: 14epoch:train:379-405batch: iter_time=1.112e-04, forward_time=0.036, loss=12.681, loss_att=8.420, loss_ctc=16.942, acc=0.749, backward_time=0.023, optim_step_time=0.021, optim0_lr0=6.003e-05, train_time=0.101 -[v019] 2022-01-31 03:12:00,144 (trainer:653) INFO: 14epoch:train:406-432batch: iter_time=1.119e-04, forward_time=0.036, loss=12.669, loss_att=8.423, loss_ctc=16.915, acc=0.764, backward_time=0.023, optim_step_time=0.021, optim0_lr0=6.025e-05, train_time=0.101 -[v019] 2022-01-31 03:12:02,850 (trainer:653) INFO: 14epoch:train:433-459batch: iter_time=1.073e-04, forward_time=0.036, loss=11.965, loss_att=8.062, loss_ctc=15.869, acc=0.761, backward_time=0.023, optim_step_time=0.021, optim0_lr0=6.046e-05, train_time=0.100 -[v019] 2022-01-31 03:12:05,545 (trainer:653) INFO: 14epoch:train:460-486batch: iter_time=1.046e-04, forward_time=0.036, loss=9.821, loss_att=6.663, loss_ctc=12.980, acc=0.793, backward_time=0.023, optim_step_time=0.021, optim0_lr0=6.068e-05, train_time=0.100 -[v019] 2022-01-31 03:12:08,261 (trainer:653) INFO: 14epoch:train:487-513batch: iter_time=1.046e-04, forward_time=0.036, loss=11.261, loss_att=7.667, loss_ctc=14.856, acc=0.770, backward_time=0.023, optim_step_time=0.021, optim0_lr0=6.090e-05, train_time=0.100 -[v019] 2022-01-31 03:12:11,058 (trainer:653) INFO: 14epoch:train:514-540batch: iter_time=0.002, forward_time=0.036, loss=13.500, loss_att=9.049, loss_ctc=17.952, acc=0.763, backward_time=0.023, optim_step_time=0.021, optim0_lr0=6.111e-05, train_time=0.103 -[v019] 2022-01-31 03:12:48,528 (trainer:328) INFO: 14epoch results: [train] iter_time=3.870e-04, forward_time=0.036, loss=11.671, loss_att=7.812, loss_ctc=15.530, acc=0.773, backward_time=0.023, optim_step_time=0.021, optim0_lr0=5.909e-05, train_time=0.101, time=55.32 seconds, total_count=7658, gpu_max_cached_mem_GB=5.016, [valid] loss=11.757, loss_att=8.273, loss_ctc=15.241, acc=0.782, cer=0.294, wer=0.740, cer_ctc=0.432, time=6.08 seconds, total_count=2492, gpu_max_cached_mem_GB=5.016, [att_plot] time=30.66 seconds, total_count=0, gpu_max_cached_mem_GB=5.016 -[v019] 2022-01-31 03:12:50,713 (trainer:375) INFO: The best model has been updated: train.loss, valid.loss, train.acc, valid.acc -[v019] 2022-01-31 03:12:50,732 (trainer:413) INFO: The model files were removed: exp/asr_train_asr_raw_en_word/9epoch.pth -[v019] 2022-01-31 03:12:50,733 (trainer:261) INFO: 15/20epoch started. Estimated time to finish: 10 minutes and 21.47 seconds -[v019] 2022-01-31 03:12:53,593 (trainer:653) INFO: 15epoch:train:1-27batch: iter_time=0.004, forward_time=0.037, loss=12.206, loss_att=8.163, loss_ctc=16.249, acc=0.769, backward_time=0.023, optim_step_time=0.021, optim0_lr0=6.138e-05, train_time=0.106 -[v019] 2022-01-31 03:12:56,377 (trainer:653) INFO: 15epoch:train:28-54batch: iter_time=1.231e-04, forward_time=0.037, loss=11.174, loss_att=7.319, loss_ctc=15.029, acc=0.784, backward_time=0.023, optim_step_time=0.022, optim0_lr0=6.160e-05, train_time=0.103 -[v019] 2022-01-31 03:12:59,128 (trainer:653) INFO: 15epoch:train:55-81batch: iter_time=1.084e-04, forward_time=0.036, loss=10.798, loss_att=7.230, loss_ctc=14.366, acc=0.777, backward_time=0.023, optim_step_time=0.022, optim0_lr0=6.182e-05, train_time=0.102 -[v019] 2022-01-31 03:13:01,919 (trainer:653) INFO: 15epoch:train:82-108batch: iter_time=1.058e-04, forward_time=0.037, loss=12.423, loss_att=8.152, loss_ctc=16.694, acc=0.783, backward_time=0.023, optim_step_time=0.021, optim0_lr0=6.203e-05, train_time=0.103 -[v019] 2022-01-31 03:13:04,659 (trainer:653) INFO: 15epoch:train:109-135batch: iter_time=1.062e-04, forward_time=0.037, loss=11.412, loss_att=7.590, loss_ctc=15.233, acc=0.769, backward_time=0.023, optim_step_time=0.022, optim0_lr0=6.225e-05, train_time=0.101 -[v019] 2022-01-31 03:13:07,374 (trainer:653) INFO: 15epoch:train:136-162batch: iter_time=1.062e-04, forward_time=0.036, loss=10.201, loss_att=7.000, loss_ctc=13.403, acc=0.787, backward_time=0.023, optim_step_time=0.021, optim0_lr0=6.246e-05, train_time=0.100 -[v019] 2022-01-31 03:13:10,143 (trainer:653) INFO: 15epoch:train:163-189batch: iter_time=1.116e-04, forward_time=0.037, loss=12.196, loss_att=7.949, loss_ctc=16.443, acc=0.772, backward_time=0.024, optim_step_time=0.022, optim0_lr0=6.268e-05, train_time=0.102 -[v019] 2022-01-31 03:13:12,809 (trainer:653) INFO: 15epoch:train:190-216batch: iter_time=1.134e-04, forward_time=0.035, loss=10.448, loss_att=6.948, loss_ctc=13.947, acc=0.779, backward_time=0.023, optim_step_time=0.022, optim0_lr0=6.290e-05, train_time=0.099 -[v019] 2022-01-31 03:13:15,536 (trainer:653) INFO: 15epoch:train:217-243batch: iter_time=0.001, forward_time=0.035, loss=10.715, loss_att=7.088, loss_ctc=14.341, acc=0.780, backward_time=0.023, optim_step_time=0.022, optim0_lr0=6.311e-05, train_time=0.101 -[v019] 2022-01-31 03:13:18,223 (trainer:653) INFO: 15epoch:train:244-270batch: iter_time=1.048e-04, forward_time=0.035, loss=11.598, loss_att=7.761, loss_ctc=15.434, acc=0.767, backward_time=0.023, optim_step_time=0.022, optim0_lr0=6.333e-05, train_time=0.099 -[v019] 2022-01-31 03:13:20,953 (trainer:653) INFO: 15epoch:train:271-297batch: iter_time=1.023e-04, forward_time=0.036, loss=12.644, loss_att=8.416, loss_ctc=16.872, acc=0.767, backward_time=0.023, optim_step_time=0.021, optim0_lr0=6.354e-05, train_time=0.101 -[v019] 2022-01-31 03:13:23,600 (trainer:653) INFO: 15epoch:train:298-324batch: iter_time=1.044e-04, forward_time=0.035, loss=10.836, loss_att=7.151, loss_ctc=14.521, acc=0.770, backward_time=0.023, optim_step_time=0.021, optim0_lr0=6.376e-05, train_time=0.098 -[v019] 2022-01-31 03:13:26,313 (trainer:653) INFO: 15epoch:train:325-351batch: iter_time=1.136e-04, forward_time=0.036, loss=11.051, loss_att=7.519, loss_ctc=14.582, acc=0.775, backward_time=0.023, optim_step_time=0.021, optim0_lr0=6.398e-05, train_time=0.100 -[v019] 2022-01-31 03:13:29,027 (trainer:653) INFO: 15epoch:train:352-378batch: iter_time=1.093e-04, forward_time=0.036, loss=9.583, loss_att=6.158, loss_ctc=13.007, acc=0.811, backward_time=0.023, optim_step_time=0.021, optim0_lr0=6.419e-05, train_time=0.100 -[v019] 2022-01-31 03:13:31,751 (trainer:653) INFO: 15epoch:train:379-405batch: iter_time=1.069e-04, forward_time=0.036, loss=10.700, loss_att=7.020, loss_ctc=14.380, acc=0.801, backward_time=0.024, optim_step_time=0.022, optim0_lr0=6.441e-05, train_time=0.101 -[v019] 2022-01-31 03:13:34,497 (trainer:653) INFO: 15epoch:train:406-432batch: iter_time=1.147e-04, forward_time=0.036, loss=9.651, loss_att=6.348, loss_ctc=12.955, acc=0.799, backward_time=0.024, optim_step_time=0.022, optim0_lr0=6.462e-05, train_time=0.101 -[v019] 2022-01-31 03:13:37,222 (trainer:653) INFO: 15epoch:train:433-459batch: iter_time=1.144e-04, forward_time=0.036, loss=11.904, loss_att=7.699, loss_ctc=16.108, acc=0.769, backward_time=0.023, optim_step_time=0.022, optim0_lr0=6.484e-05, train_time=0.101 -[v019] 2022-01-31 03:13:39,973 (trainer:653) INFO: 15epoch:train:460-486batch: iter_time=1.106e-04, forward_time=0.036, loss=10.451, loss_att=6.579, loss_ctc=14.324, acc=0.798, backward_time=0.024, optim_step_time=0.022, optim0_lr0=6.506e-05, train_time=0.102 -[v019] 2022-01-31 03:13:42,759 (trainer:653) INFO: 15epoch:train:487-513batch: iter_time=1.120e-04, forward_time=0.036, loss=11.922, loss_att=8.011, loss_ctc=15.832, acc=0.787, backward_time=0.024, optim_step_time=0.022, optim0_lr0=6.527e-05, train_time=0.103 -[v019] 2022-01-31 03:13:45,789 (trainer:653) INFO: 15epoch:train:514-540batch: iter_time=0.007, forward_time=0.037, loss=13.175, loss_att=8.522, loss_ctc=17.828, acc=0.779, backward_time=0.024, optim_step_time=0.022, optim0_lr0=6.549e-05, train_time=0.112 -[v019] 2022-01-31 03:14:23,742 (trainer:328) INFO: 15epoch results: [train] iter_time=6.587e-04, forward_time=0.036, loss=11.294, loss_att=7.456, loss_ctc=15.133, acc=0.781, backward_time=0.023, optim_step_time=0.022, optim0_lr0=6.346e-05, train_time=0.102, time=55.84 seconds, total_count=8205, gpu_max_cached_mem_GB=5.016, [valid] loss=11.403, loss_att=7.958, loss_ctc=14.847, acc=0.789, cer=0.279, wer=0.731, cer_ctc=0.432, time=6.06 seconds, total_count=2670, gpu_max_cached_mem_GB=5.016, [att_plot] time=31.11 seconds, total_count=0, gpu_max_cached_mem_GB=5.016 -[v019] 2022-01-31 03:14:25,943 (trainer:375) INFO: The best model has been updated: train.loss, valid.loss, train.acc, valid.acc -[v019] 2022-01-31 03:14:25,963 (trainer:413) INFO: The model files were removed: exp/asr_train_asr_raw_en_word/10epoch.pth -[v019] 2022-01-31 03:14:25,964 (trainer:261) INFO: 16/20epoch started. Estimated time to finish: 8 minutes and 34.68 seconds -[v019] 2022-01-31 03:14:28,908 (trainer:653) INFO: 16epoch:train:1-27batch: iter_time=0.004, forward_time=0.038, loss=11.276, loss_att=7.541, loss_ctc=15.010, acc=0.788, backward_time=0.024, optim_step_time=0.022, optim0_lr0=6.576e-05, train_time=0.109 -[v019] 2022-01-31 03:14:31,645 (trainer:653) INFO: 16epoch:train:28-54batch: iter_time=1.156e-04, forward_time=0.036, loss=10.751, loss_att=7.035, loss_ctc=14.468, acc=0.790, backward_time=0.023, optim_step_time=0.022, optim0_lr0=6.598e-05, train_time=0.101 -[v019] 2022-01-31 03:14:34,363 (trainer:653) INFO: 16epoch:train:55-81batch: iter_time=1.299e-04, forward_time=0.036, loss=9.514, loss_att=6.326, loss_ctc=12.703, acc=0.796, backward_time=0.023, optim_step_time=0.022, optim0_lr0=6.619e-05, train_time=0.100 -[v019] 2022-01-31 03:14:37,040 (trainer:653) INFO: 16epoch:train:82-108batch: iter_time=1.115e-04, forward_time=0.036, loss=9.681, loss_att=6.419, loss_ctc=12.944, acc=0.787, backward_time=0.023, optim_step_time=0.022, optim0_lr0=6.641e-05, train_time=0.099 -[v019] 2022-01-31 03:14:39,795 (trainer:653) INFO: 16epoch:train:109-135batch: iter_time=1.056e-04, forward_time=0.036, loss=9.750, loss_att=6.384, loss_ctc=13.116, acc=0.805, backward_time=0.023, optim_step_time=0.021, optim0_lr0=6.662e-05, train_time=0.102 -[v019] 2022-01-31 03:14:42,543 (trainer:653) INFO: 16epoch:train:136-162batch: iter_time=1.078e-04, forward_time=0.036, loss=12.512, loss_att=8.265, loss_ctc=16.759, acc=0.769, backward_time=0.023, optim_step_time=0.021, optim0_lr0=6.684e-05, train_time=0.102 -[v019] 2022-01-31 03:14:45,300 (trainer:653) INFO: 16epoch:train:163-189batch: iter_time=1.080e-04, forward_time=0.037, loss=10.989, loss_att=7.187, loss_ctc=14.792, acc=0.786, backward_time=0.024, optim_step_time=0.022, optim0_lr0=6.706e-05, train_time=0.102 -[v019] 2022-01-31 03:14:48,048 (trainer:653) INFO: 16epoch:train:190-216batch: iter_time=0.001, forward_time=0.036, loss=9.972, loss_att=6.386, loss_ctc=13.559, acc=0.793, backward_time=0.023, optim_step_time=0.022, optim0_lr0=6.727e-05, train_time=0.102 -[v019] 2022-01-31 03:14:50,881 (trainer:653) INFO: 16epoch:train:217-243batch: iter_time=1.065e-04, forward_time=0.037, loss=11.943, loss_att=7.915, loss_ctc=15.971, acc=0.793, backward_time=0.023, optim_step_time=0.022, optim0_lr0=6.749e-05, train_time=0.105 -[v019] 2022-01-31 03:14:53,644 (trainer:653) INFO: 16epoch:train:244-270batch: iter_time=1.068e-04, forward_time=0.037, loss=11.344, loss_att=7.493, loss_ctc=15.196, acc=0.775, backward_time=0.023, optim_step_time=0.022, optim0_lr0=6.770e-05, train_time=0.102 -[v019] 2022-01-31 03:14:56,434 (trainer:653) INFO: 16epoch:train:271-297batch: iter_time=1.088e-04, forward_time=0.037, loss=12.147, loss_att=7.708, loss_ctc=16.587, acc=0.792, backward_time=0.024, optim_step_time=0.022, optim0_lr0=6.792e-05, train_time=0.103 -[v019] 2022-01-31 03:14:59,181 (trainer:653) INFO: 16epoch:train:298-324batch: iter_time=1.061e-04, forward_time=0.036, loss=11.230, loss_att=7.200, loss_ctc=15.260, acc=0.797, backward_time=0.024, optim_step_time=0.022, optim0_lr0=6.814e-05, train_time=0.101 -[v019] 2022-01-31 03:15:01,928 (trainer:653) INFO: 16epoch:train:325-351batch: iter_time=1.071e-04, forward_time=0.036, loss=12.202, loss_att=7.836, loss_ctc=16.568, acc=0.773, backward_time=0.024, optim_step_time=0.022, optim0_lr0=6.835e-05, train_time=0.102 -[v019] 2022-01-31 03:15:04,726 (trainer:653) INFO: 16epoch:train:352-378batch: iter_time=1.076e-04, forward_time=0.037, loss=12.862, loss_att=8.320, loss_ctc=17.404, acc=0.772, backward_time=0.023, optim_step_time=0.022, optim0_lr0=6.857e-05, train_time=0.103 -[v019] 2022-01-31 03:15:07,434 (trainer:653) INFO: 16epoch:train:379-405batch: iter_time=1.054e-04, forward_time=0.036, loss=12.715, loss_att=8.423, loss_ctc=17.007, acc=0.758, backward_time=0.023, optim_step_time=0.021, optim0_lr0=6.878e-05, train_time=0.100 -[v019] 2022-01-31 03:15:10,111 (trainer:653) INFO: 16epoch:train:406-432batch: iter_time=1.070e-04, forward_time=0.035, loss=10.666, loss_att=6.887, loss_ctc=14.444, acc=0.788, backward_time=0.023, optim_step_time=0.022, optim0_lr0=6.900e-05, train_time=0.099 -[v019] 2022-01-31 03:15:12,740 (trainer:653) INFO: 16epoch:train:433-459batch: iter_time=1.032e-04, forward_time=0.035, loss=10.076, loss_att=6.227, loss_ctc=13.926, acc=0.788, backward_time=0.023, optim_step_time=0.021, optim0_lr0=6.922e-05, train_time=0.097 -[v019] 2022-01-31 03:15:15,448 (trainer:653) INFO: 16epoch:train:460-486batch: iter_time=1.026e-04, forward_time=0.036, loss=10.599, loss_att=7.015, loss_ctc=14.183, acc=0.797, backward_time=0.023, optim_step_time=0.021, optim0_lr0=6.943e-05, train_time=0.100 -[v019] 2022-01-31 03:15:18,124 (trainer:653) INFO: 16epoch:train:487-513batch: iter_time=1.147e-04, forward_time=0.035, loss=9.636, loss_att=6.211, loss_ctc=13.061, acc=0.811, backward_time=0.023, optim_step_time=0.021, optim0_lr0=6.965e-05, train_time=0.099 -[v019] 2022-01-31 03:15:20,807 (trainer:653) INFO: 16epoch:train:514-540batch: iter_time=1.085e-04, forward_time=0.036, loss=10.331, loss_att=6.697, loss_ctc=13.965, acc=0.784, backward_time=0.023, optim_step_time=0.021, optim0_lr0=6.986e-05, train_time=0.099 -[v019] 2022-01-31 03:15:57,830 (trainer:328) INFO: 16epoch results: [train] iter_time=3.532e-04, forward_time=0.036, loss=10.973, loss_att=7.150, loss_ctc=14.796, acc=0.787, backward_time=0.023, optim_step_time=0.022, optim0_lr0=6.784e-05, train_time=0.101, time=55.59 seconds, total_count=8752, gpu_max_cached_mem_GB=5.016, [valid] loss=11.333, loss_att=7.742, loss_ctc=14.923, acc=0.794, cer=0.268, wer=0.721, cer_ctc=0.416, time=6.1 seconds, total_count=2848, gpu_max_cached_mem_GB=5.016, [att_plot] time=30.18 seconds, total_count=0, gpu_max_cached_mem_GB=5.016 -[v019] 2022-01-31 03:16:00,076 (trainer:375) INFO: The best model has been updated: train.loss, valid.loss, train.acc, valid.acc -[v019] 2022-01-31 03:16:00,079 (trainer:413) INFO: The model files were removed: exp/asr_train_asr_raw_en_word/11epoch.pth -[v019] 2022-01-31 03:16:00,079 (trainer:261) INFO: 17/20epoch started. Estimated time to finish: 6 minutes and 49.22 seconds -[v019] 2022-01-31 03:16:02,950 (trainer:653) INFO: 17epoch:train:1-27batch: iter_time=0.004, forward_time=0.037, loss=9.824, loss_att=6.411, loss_ctc=13.238, acc=0.801, backward_time=0.023, optim_step_time=0.021, optim0_lr0=7.014e-05, train_time=0.106 -[v019] 2022-01-31 03:16:05,614 (trainer:653) INFO: 17epoch:train:28-54batch: iter_time=1.141e-04, forward_time=0.036, loss=9.162, loss_att=6.103, loss_ctc=12.220, acc=0.793, backward_time=0.023, optim_step_time=0.021, optim0_lr0=7.035e-05, train_time=0.098 -[v019] 2022-01-31 03:16:08,331 (trainer:653) INFO: 17epoch:train:55-81batch: iter_time=1.041e-04, forward_time=0.036, loss=11.545, loss_att=7.458, loss_ctc=15.633, acc=0.784, backward_time=0.023, optim_step_time=0.021, optim0_lr0=7.057e-05, train_time=0.100 -[v019] 2022-01-31 03:16:10,973 (trainer:653) INFO: 17epoch:train:82-108batch: iter_time=1.032e-04, forward_time=0.035, loss=8.096, loss_att=5.265, loss_ctc=10.927, acc=0.816, backward_time=0.023, optim_step_time=0.021, optim0_lr0=7.078e-05, train_time=0.098 -[v019] 2022-01-31 03:16:13,716 (trainer:653) INFO: 17epoch:train:109-135batch: iter_time=1.040e-04, forward_time=0.037, loss=12.792, loss_att=8.397, loss_ctc=17.187, acc=0.775, backward_time=0.023, optim_step_time=0.021, optim0_lr0=7.100e-05, train_time=0.101 -[v019] 2022-01-31 03:16:16,469 (trainer:653) INFO: 17epoch:train:136-162batch: iter_time=1.049e-04, forward_time=0.037, loss=12.011, loss_att=7.628, loss_ctc=16.393, acc=0.782, backward_time=0.023, optim_step_time=0.021, optim0_lr0=7.122e-05, train_time=0.102 -[v019] 2022-01-31 03:16:19,167 (trainer:653) INFO: 17epoch:train:163-189batch: iter_time=1.007e-04, forward_time=0.036, loss=11.274, loss_att=7.319, loss_ctc=15.230, acc=0.779, backward_time=0.023, optim_step_time=0.021, optim0_lr0=7.143e-05, train_time=0.100 -[v019] 2022-01-31 03:16:21,828 (trainer:653) INFO: 17epoch:train:190-216batch: iter_time=1.034e-04, forward_time=0.035, loss=11.708, loss_att=7.608, loss_ctc=15.808, acc=0.778, backward_time=0.023, optim_step_time=0.021, optim0_lr0=7.165e-05, train_time=0.098 -[v019] 2022-01-31 03:16:24,418 (trainer:653) INFO: 17epoch:train:217-243batch: iter_time=1.042e-04, forward_time=0.034, loss=8.954, loss_att=5.739, loss_ctc=12.169, acc=0.802, backward_time=0.022, optim_step_time=0.021, optim0_lr0=7.186e-05, train_time=0.096 -[v019] 2022-01-31 03:16:27,040 (trainer:653) INFO: 17epoch:train:244-270batch: iter_time=9.890e-05, forward_time=0.035, loss=10.451, loss_att=6.679, loss_ctc=14.224, acc=0.805, backward_time=0.022, optim_step_time=0.021, optim0_lr0=7.208e-05, train_time=0.097 -[v019] 2022-01-31 03:16:29,710 (trainer:653) INFO: 17epoch:train:271-297batch: iter_time=9.471e-05, forward_time=0.036, loss=12.209, loss_att=7.729, loss_ctc=16.690, acc=0.794, backward_time=0.022, optim_step_time=0.021, optim0_lr0=7.230e-05, train_time=0.099 -[v019] 2022-01-31 03:16:32,354 (trainer:653) INFO: 17epoch:train:298-324batch: iter_time=1.071e-04, forward_time=0.035, loss=9.639, loss_att=6.254, loss_ctc=13.024, acc=0.797, backward_time=0.022, optim_step_time=0.021, optim0_lr0=7.251e-05, train_time=0.098 -[v019] 2022-01-31 03:16:35,025 (trainer:653) INFO: 17epoch:train:325-351batch: iter_time=1.126e-04, forward_time=0.036, loss=11.684, loss_att=7.676, loss_ctc=15.693, acc=0.773, backward_time=0.023, optim_step_time=0.021, optim0_lr0=7.273e-05, train_time=0.099 -[v019] 2022-01-31 03:16:37,709 (trainer:653) INFO: 17epoch:train:352-378batch: iter_time=1.224e-04, forward_time=0.036, loss=10.979, loss_att=7.003, loss_ctc=14.955, acc=0.797, backward_time=0.023, optim_step_time=0.021, optim0_lr0=7.294e-05, train_time=0.099 -[v019] 2022-01-31 03:16:40,475 (trainer:653) INFO: 17epoch:train:379-405batch: iter_time=1.172e-04, forward_time=0.037, loss=11.097, loss_att=6.929, loss_ctc=15.266, acc=0.792, backward_time=0.023, optim_step_time=0.021, optim0_lr0=7.316e-05, train_time=0.102 -[v019] 2022-01-31 03:16:43,134 (trainer:653) INFO: 17epoch:train:406-432batch: iter_time=1.019e-04, forward_time=0.036, loss=8.503, loss_att=5.547, loss_ctc=11.458, acc=0.809, backward_time=0.023, optim_step_time=0.021, optim0_lr0=7.338e-05, train_time=0.098 -[v019] 2022-01-31 03:16:45,804 (trainer:653) INFO: 17epoch:train:433-459batch: iter_time=1.118e-04, forward_time=0.036, loss=10.098, loss_att=6.386, loss_ctc=13.810, acc=0.799, backward_time=0.023, optim_step_time=0.021, optim0_lr0=7.359e-05, train_time=0.099 -[v019] 2022-01-31 03:16:48,544 (trainer:653) INFO: 17epoch:train:460-486batch: iter_time=1.029e-04, forward_time=0.037, loss=10.210, loss_att=6.491, loss_ctc=13.928, acc=0.815, backward_time=0.023, optim_step_time=0.021, optim0_lr0=7.381e-05, train_time=0.101 -[v019] 2022-01-31 03:16:51,180 (trainer:653) INFO: 17epoch:train:487-513batch: iter_time=9.815e-05, forward_time=0.035, loss=10.347, loss_att=6.742, loss_ctc=13.952, acc=0.797, backward_time=0.023, optim_step_time=0.021, optim0_lr0=7.402e-05, train_time=0.097 -[v019] 2022-01-31 03:16:54,189 (trainer:653) INFO: 17epoch:train:514-540batch: iter_time=0.009, forward_time=0.036, loss=12.634, loss_att=8.218, loss_ctc=17.050, acc=0.795, backward_time=0.022, optim_step_time=0.021, optim0_lr0=7.424e-05, train_time=0.111 -[v019] 2022-01-31 03:17:30,425 (trainer:328) INFO: 17epoch results: [train] iter_time=7.347e-04, forward_time=0.036, loss=10.654, loss_att=6.873, loss_ctc=14.435, acc=0.794, backward_time=0.023, optim_step_time=0.021, optim0_lr0=7.222e-05, train_time=0.100, time=54.86 seconds, total_count=9299, gpu_max_cached_mem_GB=5.016, [valid] loss=10.843, loss_att=7.484, loss_ctc=14.201, acc=0.803, cer=0.272, wer=0.696, cer_ctc=0.397, time=5.96 seconds, total_count=3026, gpu_max_cached_mem_GB=5.016, [att_plot] time=29.52 seconds, total_count=0, gpu_max_cached_mem_GB=5.016 -[v019] 2022-01-31 03:17:32,690 (trainer:375) INFO: The best model has been updated: train.loss, valid.loss, train.acc, valid.acc -[v019] 2022-01-31 03:17:32,691 (trainer:413) INFO: The model files were removed: exp/asr_train_asr_raw_en_word/12epoch.pth -[v019] 2022-01-31 03:17:32,691 (trainer:261) INFO: 18/20epoch started. Estimated time to finish: 5 minutes and 4.98 seconds -[v019] 2022-01-31 03:17:35,664 (trainer:653) INFO: 18epoch:train:1-27batch: iter_time=0.003, forward_time=0.038, loss=12.328, loss_att=7.828, loss_ctc=16.828, acc=0.798, backward_time=0.023, optim_step_time=0.021, optim0_lr0=7.451e-05, train_time=0.110 -[v019] 2022-01-31 03:17:38,324 (trainer:653) INFO: 18epoch:train:28-54batch: iter_time=1.098e-04, forward_time=0.036, loss=9.738, loss_att=6.282, loss_ctc=13.194, acc=0.801, backward_time=0.022, optim_step_time=0.021, optim0_lr0=7.473e-05, train_time=0.098 -[v019] 2022-01-31 03:17:40,988 (trainer:653) INFO: 18epoch:train:55-81batch: iter_time=1.063e-04, forward_time=0.036, loss=11.310, loss_att=7.397, loss_ctc=15.223, acc=0.775, backward_time=0.023, optim_step_time=0.021, optim0_lr0=7.494e-05, train_time=0.098 -[v019] 2022-01-31 03:17:43,737 (trainer:653) INFO: 18epoch:train:82-108batch: iter_time=1.042e-04, forward_time=0.036, loss=11.247, loss_att=7.282, loss_ctc=15.213, acc=0.797, backward_time=0.023, optim_step_time=0.021, optim0_lr0=7.516e-05, train_time=0.102 -[v019] 2022-01-31 03:17:46,454 (trainer:653) INFO: 18epoch:train:109-135batch: iter_time=1.021e-04, forward_time=0.036, loss=11.209, loss_att=7.296, loss_ctc=15.122, acc=0.798, backward_time=0.023, optim_step_time=0.021, optim0_lr0=7.538e-05, train_time=0.100 -[v019] 2022-01-31 03:17:49,080 (trainer:653) INFO: 18epoch:train:136-162batch: iter_time=1.041e-04, forward_time=0.035, loss=9.689, loss_att=6.118, loss_ctc=13.260, acc=0.814, backward_time=0.023, optim_step_time=0.021, optim0_lr0=7.559e-05, train_time=0.097 -[v019] 2022-01-31 03:17:51,717 (trainer:653) INFO: 18epoch:train:163-189batch: iter_time=9.758e-05, forward_time=0.036, loss=9.756, loss_att=6.483, loss_ctc=13.030, acc=0.796, backward_time=0.022, optim_step_time=0.021, optim0_lr0=7.581e-05, train_time=0.097 -[v019] 2022-01-31 03:17:54,541 (trainer:653) INFO: 18epoch:train:190-216batch: iter_time=0.009, forward_time=0.034, loss=8.654, loss_att=5.564, loss_ctc=11.745, acc=0.806, backward_time=0.022, optim_step_time=0.021, optim0_lr0=7.602e-05, train_time=0.104 -[v019] 2022-01-31 03:17:57,160 (trainer:653) INFO: 18epoch:train:217-243batch: iter_time=9.732e-05, forward_time=0.035, loss=10.079, loss_att=6.505, loss_ctc=13.654, acc=0.798, backward_time=0.022, optim_step_time=0.021, optim0_lr0=7.624e-05, train_time=0.097 -[v019] 2022-01-31 03:17:59,813 (trainer:653) INFO: 18epoch:train:244-270batch: iter_time=1.036e-04, forward_time=0.035, loss=11.169, loss_att=7.143, loss_ctc=15.194, acc=0.805, backward_time=0.023, optim_step_time=0.021, optim0_lr0=7.646e-05, train_time=0.098 -[v019] 2022-01-31 03:18:02,374 (trainer:653) INFO: 18epoch:train:271-297batch: iter_time=9.836e-05, forward_time=0.034, loss=8.317, loss_att=5.340, loss_ctc=11.295, acc=0.811, backward_time=0.022, optim_step_time=0.021, optim0_lr0=7.667e-05, train_time=0.095 -[v019] 2022-01-31 03:18:05,052 (trainer:653) INFO: 18epoch:train:298-324batch: iter_time=9.882e-05, forward_time=0.036, loss=11.470, loss_att=7.305, loss_ctc=15.635, acc=0.794, backward_time=0.022, optim_step_time=0.021, optim0_lr0=7.689e-05, train_time=0.099 -[v019] 2022-01-31 03:18:07,647 (trainer:653) INFO: 18epoch:train:325-351batch: iter_time=9.888e-05, forward_time=0.035, loss=8.305, loss_att=5.354, loss_ctc=11.256, acc=0.814, backward_time=0.022, optim_step_time=0.021, optim0_lr0=7.710e-05, train_time=0.096 -[v019] 2022-01-31 03:18:10,306 (trainer:653) INFO: 18epoch:train:352-378batch: iter_time=1.017e-04, forward_time=0.035, loss=11.286, loss_att=7.273, loss_ctc=15.299, acc=0.790, backward_time=0.023, optim_step_time=0.021, optim0_lr0=7.732e-05, train_time=0.098 -[v019] 2022-01-31 03:18:12,972 (trainer:653) INFO: 18epoch:train:379-405batch: iter_time=9.919e-05, forward_time=0.035, loss=10.579, loss_att=6.416, loss_ctc=14.742, acc=0.816, backward_time=0.022, optim_step_time=0.021, optim0_lr0=7.754e-05, train_time=0.098 -[v019] 2022-01-31 03:18:15,606 (trainer:653) INFO: 18epoch:train:406-432batch: iter_time=1.046e-04, forward_time=0.035, loss=9.404, loss_att=6.026, loss_ctc=12.783, acc=0.805, backward_time=0.022, optim_step_time=0.021, optim0_lr0=7.775e-05, train_time=0.097 -[v019] 2022-01-31 03:18:18,324 (trainer:653) INFO: 18epoch:train:433-459batch: iter_time=1.008e-04, forward_time=0.037, loss=12.144, loss_att=7.782, loss_ctc=16.506, acc=0.784, backward_time=0.023, optim_step_time=0.021, optim0_lr0=7.797e-05, train_time=0.100 -[v019] 2022-01-31 03:18:21,034 (trainer:653) INFO: 18epoch:train:460-486batch: iter_time=9.993e-05, forward_time=0.036, loss=11.861, loss_att=7.677, loss_ctc=16.045, acc=0.783, backward_time=0.023, optim_step_time=0.021, optim0_lr0=7.818e-05, train_time=0.100 -[v019] 2022-01-31 03:18:23,657 (trainer:653) INFO: 18epoch:train:487-513batch: iter_time=1.012e-04, forward_time=0.035, loss=9.360, loss_att=6.051, loss_ctc=12.670, acc=0.791, backward_time=0.023, optim_step_time=0.021, optim0_lr0=7.840e-05, train_time=0.097 -[v019] 2022-01-31 03:18:26,273 (trainer:653) INFO: 18epoch:train:514-540batch: iter_time=1.006e-04, forward_time=0.035, loss=8.319, loss_att=5.249, loss_ctc=11.389, acc=0.823, backward_time=0.022, optim_step_time=0.021, optim0_lr0=7.862e-05, train_time=0.097 -[v019] 2022-01-31 03:19:02,253 (trainer:328) INFO: 18epoch results: [train] iter_time=6.986e-04, forward_time=0.036, loss=10.306, loss_att=6.616, loss_ctc=13.995, acc=0.800, backward_time=0.022, optim_step_time=0.021, optim0_lr0=7.659e-05, train_time=0.099, time=54.3 seconds, total_count=9846, gpu_max_cached_mem_GB=5.016, [valid] loss=10.461, loss_att=7.202, loss_ctc=13.719, acc=0.810, cer=0.249, wer=0.693, cer_ctc=0.390, time=6 seconds, total_count=3204, gpu_max_cached_mem_GB=5.016, [att_plot] time=29.26 seconds, total_count=0, gpu_max_cached_mem_GB=5.016 -[v019] 2022-01-31 03:19:04,528 (trainer:375) INFO: The best model has been updated: train.loss, valid.loss, train.acc, valid.acc -[v019] 2022-01-31 03:19:04,529 (trainer:413) INFO: The model files were removed: exp/asr_train_asr_raw_en_word/13epoch.pth -[v019] 2022-01-31 03:19:04,529 (trainer:261) INFO: 19/20epoch started. Estimated time to finish: 3 minutes and 22.09 seconds -[v019] 2022-01-31 03:19:07,379 (trainer:653) INFO: 19epoch:train:1-27batch: iter_time=0.003, forward_time=0.037, loss=11.399, loss_att=7.236, loss_ctc=15.562, acc=0.799, backward_time=0.023, optim_step_time=0.021, optim0_lr0=7.889e-05, train_time=0.105 -[v019] 2022-01-31 03:19:10,007 (trainer:653) INFO: 19epoch:train:28-54batch: iter_time=1.033e-04, forward_time=0.035, loss=9.351, loss_att=5.840, loss_ctc=12.862, acc=0.823, backward_time=0.022, optim_step_time=0.021, optim0_lr0=7.910e-05, train_time=0.097 -[v019] 2022-01-31 03:19:12,596 (trainer:653) INFO: 19epoch:train:55-81batch: iter_time=9.712e-05, forward_time=0.035, loss=9.681, loss_att=6.200, loss_ctc=13.161, acc=0.797, backward_time=0.022, optim_step_time=0.021, optim0_lr0=7.932e-05, train_time=0.096 -[v019] 2022-01-31 03:19:15,305 (trainer:653) INFO: 19epoch:train:82-108batch: iter_time=1.015e-04, forward_time=0.036, loss=11.288, loss_att=6.979, loss_ctc=15.598, acc=0.807, backward_time=0.023, optim_step_time=0.021, optim0_lr0=7.954e-05, train_time=0.100 -[v019] 2022-01-31 03:19:18,050 (trainer:653) INFO: 19epoch:train:109-135batch: iter_time=9.867e-05, forward_time=0.037, loss=12.503, loss_att=7.590, loss_ctc=17.417, acc=0.793, backward_time=0.022, optim_step_time=0.021, optim0_lr0=7.975e-05, train_time=0.101 -[v019] 2022-01-31 03:19:20,659 (trainer:653) INFO: 19epoch:train:136-162batch: iter_time=9.917e-05, forward_time=0.035, loss=8.409, loss_att=5.400, loss_ctc=11.418, acc=0.810, backward_time=0.023, optim_step_time=0.021, optim0_lr0=7.997e-05, train_time=0.096 -[v019] 2022-01-31 03:19:23,364 (trainer:653) INFO: 19epoch:train:163-189batch: iter_time=1.055e-04, forward_time=0.036, loss=10.472, loss_att=6.678, loss_ctc=14.266, acc=0.803, backward_time=0.023, optim_step_time=0.021, optim0_lr0=8.018e-05, train_time=0.100 -[v019] 2022-01-31 03:19:26,030 (trainer:653) INFO: 19epoch:train:190-216batch: iter_time=1.026e-04, forward_time=0.036, loss=9.100, loss_att=5.953, loss_ctc=12.248, acc=0.809, backward_time=0.023, optim_step_time=0.021, optim0_lr0=8.040e-05, train_time=0.098 -[v019] 2022-01-31 03:19:29,342 (trainer:653) INFO: 19epoch:train:217-243batch: iter_time=0.024, forward_time=0.035, loss=10.613, loss_att=6.722, loss_ctc=14.504, acc=0.804, backward_time=0.023, optim_step_time=0.021, optim0_lr0=8.062e-05, train_time=0.122 -[v019] 2022-01-31 03:19:31,912 (trainer:653) INFO: 19epoch:train:244-270batch: iter_time=1.014e-04, forward_time=0.035, loss=8.293, loss_att=5.320, loss_ctc=11.267, acc=0.817, backward_time=0.022, optim_step_time=0.021, optim0_lr0=8.083e-05, train_time=0.095 -[v019] 2022-01-31 03:19:34,591 (trainer:653) INFO: 19epoch:train:271-297batch: iter_time=9.774e-05, forward_time=0.036, loss=11.428, loss_att=7.314, loss_ctc=15.541, acc=0.786, backward_time=0.022, optim_step_time=0.021, optim0_lr0=8.105e-05, train_time=0.099 -[v019] 2022-01-31 03:19:37,210 (trainer:653) INFO: 19epoch:train:298-324batch: iter_time=1.000e-04, forward_time=0.035, loss=8.244, loss_att=5.334, loss_ctc=11.154, acc=0.818, backward_time=0.022, optim_step_time=0.021, optim0_lr0=8.126e-05, train_time=0.097 -[v019] 2022-01-31 03:19:39,860 (trainer:653) INFO: 19epoch:train:325-351batch: iter_time=9.995e-05, forward_time=0.035, loss=10.118, loss_att=6.405, loss_ctc=13.831, acc=0.812, backward_time=0.022, optim_step_time=0.021, optim0_lr0=8.148e-05, train_time=0.098 -[v019] 2022-01-31 03:19:42,487 (trainer:653) INFO: 19epoch:train:352-378batch: iter_time=9.434e-05, forward_time=0.035, loss=11.628, loss_att=7.492, loss_ctc=15.764, acc=0.800, backward_time=0.022, optim_step_time=0.021, optim0_lr0=8.170e-05, train_time=0.097 -[v019] 2022-01-31 03:19:45,119 (trainer:653) INFO: 19epoch:train:379-405batch: iter_time=9.692e-05, forward_time=0.035, loss=9.290, loss_att=5.826, loss_ctc=12.754, acc=0.819, backward_time=0.022, optim_step_time=0.021, optim0_lr0=8.191e-05, train_time=0.097 -[v019] 2022-01-31 03:19:47,811 (trainer:653) INFO: 19epoch:train:406-432batch: iter_time=1.047e-04, forward_time=0.036, loss=10.577, loss_att=6.756, loss_ctc=14.398, acc=0.809, backward_time=0.022, optim_step_time=0.021, optim0_lr0=8.213e-05, train_time=0.099 -[v019] 2022-01-31 03:19:50,483 (trainer:653) INFO: 19epoch:train:433-459batch: iter_time=1.083e-04, forward_time=0.035, loss=9.589, loss_att=6.229, loss_ctc=12.948, acc=0.813, backward_time=0.023, optim_step_time=0.021, optim0_lr0=8.234e-05, train_time=0.099 -[v019] 2022-01-31 03:19:53,110 (trainer:653) INFO: 19epoch:train:460-486batch: iter_time=1.065e-04, forward_time=0.035, loss=9.969, loss_att=6.262, loss_ctc=13.675, acc=0.802, backward_time=0.023, optim_step_time=0.021, optim0_lr0=8.256e-05, train_time=0.097 -[v019] 2022-01-31 03:19:55,752 (trainer:653) INFO: 19epoch:train:487-513batch: iter_time=1.008e-04, forward_time=0.036, loss=10.146, loss_att=6.464, loss_ctc=13.829, acc=0.798, backward_time=0.023, optim_step_time=0.021, optim0_lr0=8.278e-05, train_time=0.098 -[v019] 2022-01-31 03:19:58,702 (trainer:653) INFO: 19epoch:train:514-540batch: iter_time=0.012, forward_time=0.035, loss=8.103, loss_att=5.133, loss_ctc=11.072, acc=0.820, backward_time=0.023, optim_step_time=0.021, optim0_lr0=8.299e-05, train_time=0.109 -[v019] 2022-01-31 03:20:34,831 (trainer:328) INFO: 19epoch results: [train] iter_time=0.002, forward_time=0.035, loss=9.993, loss_att=6.347, loss_ctc=13.640, acc=0.807, backward_time=0.022, optim_step_time=0.021, optim0_lr0=8.097e-05, train_time=0.100, time=55 seconds, total_count=10393, gpu_max_cached_mem_GB=5.016, [valid] loss=10.306, loss_att=7.040, loss_ctc=13.573, acc=0.814, cer=0.227, wer=0.680, cer_ctc=0.362, time=6 seconds, total_count=3382, gpu_max_cached_mem_GB=5.016, [att_plot] time=29.3 seconds, total_count=0, gpu_max_cached_mem_GB=5.016 -[v019] 2022-01-31 03:20:37,069 (trainer:375) INFO: The best model has been updated: train.loss, valid.loss, train.acc, valid.acc -[v019] 2022-01-31 03:20:37,071 (trainer:413) INFO: The model files were removed: exp/asr_train_asr_raw_en_word/14epoch.pth -[v019] 2022-01-31 03:20:37,071 (trainer:261) INFO: 20/20epoch started. Estimated time to finish: 1 minute and 40.55 seconds -[v019] 2022-01-31 03:20:39,939 (trainer:653) INFO: 20epoch:train:1-27batch: iter_time=0.003, forward_time=0.037, loss=11.857, loss_att=7.618, loss_ctc=16.097, acc=0.790, backward_time=0.023, optim_step_time=0.021, optim0_lr0=8.326e-05, train_time=0.106 -[v019] 2022-01-31 03:20:42,554 (trainer:653) INFO: 20epoch:train:28-54batch: iter_time=1.068e-04, forward_time=0.035, loss=8.743, loss_att=5.574, loss_ctc=11.912, acc=0.821, backward_time=0.022, optim_step_time=0.021, optim0_lr0=8.348e-05, train_time=0.097 -[v019] 2022-01-31 03:20:45,215 (trainer:653) INFO: 20epoch:train:55-81batch: iter_time=9.621e-05, forward_time=0.036, loss=10.303, loss_att=6.452, loss_ctc=14.155, acc=0.806, backward_time=0.022, optim_step_time=0.021, optim0_lr0=8.370e-05, train_time=0.098 -[v019] 2022-01-31 03:20:47,964 (trainer:653) INFO: 20epoch:train:82-108batch: iter_time=9.649e-05, forward_time=0.037, loss=11.834, loss_att=7.367, loss_ctc=16.301, acc=0.798, backward_time=0.023, optim_step_time=0.021, optim0_lr0=8.391e-05, train_time=0.102 -[v019] 2022-01-31 03:20:50,678 (trainer:653) INFO: 20epoch:train:109-135batch: iter_time=1.029e-04, forward_time=0.036, loss=9.592, loss_att=6.207, loss_ctc=12.976, acc=0.799, backward_time=0.023, optim_step_time=0.021, optim0_lr0=8.413e-05, train_time=0.100 -[v019] 2022-01-31 03:20:53,322 (trainer:653) INFO: 20epoch:train:136-162batch: iter_time=1.114e-04, forward_time=0.036, loss=9.077, loss_att=5.706, loss_ctc=12.448, acc=0.823, backward_time=0.023, optim_step_time=0.021, optim0_lr0=8.434e-05, train_time=0.098 -[v019] 2022-01-31 03:20:55,964 (trainer:653) INFO: 20epoch:train:163-189batch: iter_time=1.009e-04, forward_time=0.035, loss=7.755, loss_att=4.925, loss_ctc=10.585, acc=0.840, backward_time=0.023, optim_step_time=0.021, optim0_lr0=8.456e-05, train_time=0.098 -[v019] 2022-01-31 03:20:58,644 (trainer:653) INFO: 20epoch:train:190-216batch: iter_time=1.029e-04, forward_time=0.036, loss=9.417, loss_att=5.776, loss_ctc=13.057, acc=0.824, backward_time=0.023, optim_step_time=0.021, optim0_lr0=8.478e-05, train_time=0.099 -[v019] 2022-01-31 03:21:01,496 (trainer:653) INFO: 20epoch:train:217-243batch: iter_time=0.004, forward_time=0.036, loss=10.331, loss_att=6.616, loss_ctc=14.045, acc=0.803, backward_time=0.023, optim_step_time=0.021, optim0_lr0=8.499e-05, train_time=0.105 -[v019] 2022-01-31 03:21:04,196 (trainer:653) INFO: 20epoch:train:244-270batch: iter_time=0.001, forward_time=0.036, loss=10.218, loss_att=6.321, loss_ctc=14.115, acc=0.810, backward_time=0.022, optim_step_time=0.021, optim0_lr0=8.521e-05, train_time=0.100 -[v019] 2022-01-31 03:21:06,784 (trainer:653) INFO: 20epoch:train:271-297batch: iter_time=1.119e-04, forward_time=0.035, loss=8.429, loss_att=5.255, loss_ctc=11.602, acc=0.813, backward_time=0.022, optim_step_time=0.021, optim0_lr0=8.542e-05, train_time=0.096 -[v019] 2022-01-31 03:21:09,437 (trainer:653) INFO: 20epoch:train:298-324batch: iter_time=1.125e-04, forward_time=0.035, loss=10.222, loss_att=6.461, loss_ctc=13.982, acc=0.808, backward_time=0.022, optim_step_time=0.021, optim0_lr0=8.564e-05, train_time=0.098 -[v019] 2022-01-31 03:21:12,070 (trainer:653) INFO: 20epoch:train:325-351batch: iter_time=9.721e-05, forward_time=0.035, loss=8.509, loss_att=5.312, loss_ctc=11.707, acc=0.827, backward_time=0.022, optim_step_time=0.021, optim0_lr0=8.586e-05, train_time=0.097 -[v019] 2022-01-31 03:21:14,756 (trainer:653) INFO: 20epoch:train:352-378batch: iter_time=1.026e-04, forward_time=0.036, loss=11.340, loss_att=7.037, loss_ctc=15.644, acc=0.798, backward_time=0.023, optim_step_time=0.021, optim0_lr0=8.607e-05, train_time=0.099 -[v019] 2022-01-31 03:21:17,444 (trainer:653) INFO: 20epoch:train:379-405batch: iter_time=1.009e-04, forward_time=0.036, loss=11.300, loss_att=7.176, loss_ctc=15.424, acc=0.799, backward_time=0.023, optim_step_time=0.021, optim0_lr0=8.629e-05, train_time=0.099 -[v019] 2022-01-31 03:21:20,083 (trainer:653) INFO: 20epoch:train:406-432batch: iter_time=9.940e-05, forward_time=0.035, loss=9.280, loss_att=5.917, loss_ctc=12.643, acc=0.816, backward_time=0.023, optim_step_time=0.021, optim0_lr0=8.650e-05, train_time=0.098 -[v019] 2022-01-31 03:21:22,721 (trainer:653) INFO: 20epoch:train:433-459batch: iter_time=9.967e-05, forward_time=0.035, loss=9.326, loss_att=5.915, loss_ctc=12.738, acc=0.807, backward_time=0.023, optim_step_time=0.021, optim0_lr0=8.672e-05, train_time=0.097 -[v019] 2022-01-31 03:21:25,346 (trainer:653) INFO: 20epoch:train:460-486batch: iter_time=9.990e-05, forward_time=0.036, loss=7.765, loss_att=5.030, loss_ctc=10.500, acc=0.820, backward_time=0.023, optim_step_time=0.021, optim0_lr0=8.694e-05, train_time=0.097 -[v019] 2022-01-31 03:21:28,001 (trainer:653) INFO: 20epoch:train:487-513batch: iter_time=1.009e-04, forward_time=0.036, loss=9.778, loss_att=6.110, loss_ctc=13.446, acc=0.809, backward_time=0.023, optim_step_time=0.021, optim0_lr0=8.715e-05, train_time=0.098 -[v019] 2022-01-31 03:21:30,673 (trainer:653) INFO: 20epoch:train:514-540batch: iter_time=9.970e-05, forward_time=0.036, loss=9.048, loss_att=5.797, loss_ctc=12.300, acc=0.811, backward_time=0.023, optim_step_time=0.021, optim0_lr0=8.737e-05, train_time=0.099 -[v019] 2022-01-31 03:22:06,790 (trainer:328) INFO: 20epoch results: [train] iter_time=5.309e-04, forward_time=0.036, loss=9.652, loss_att=6.094, loss_ctc=13.210, acc=0.812, backward_time=0.023, optim_step_time=0.021, optim0_lr0=8.534e-05, train_time=0.099, time=54.32 seconds, total_count=10940, gpu_max_cached_mem_GB=5.016, [valid] loss=10.128, loss_att=6.871, loss_ctc=13.385, acc=0.819, cer=0.226, wer=0.664, cer_ctc=0.352, time=6.42 seconds, total_count=3560, gpu_max_cached_mem_GB=5.016, [att_plot] time=28.98 seconds, total_count=0, gpu_max_cached_mem_GB=5.016 -[v019] 2022-01-31 03:22:08,820 (trainer:375) INFO: The best model has been updated: train.loss, valid.loss, train.acc, valid.acc -[v019] 2022-01-31 03:22:08,822 (trainer:413) INFO: The model files were removed: exp/asr_train_asr_raw_en_word/15epoch.pth -[v019] 2022-01-31 03:22:08,822 (trainer:431) INFO: The training was finished at 20 epochs -[v019] 2022-01-31 03:22:08,845 (average_nbest_models:64) INFO: Averaging 5best models: criterion="train.loss": exp/asr_train_asr_raw_en_word/train.loss.ave_5best.pth -[v019] 2022-01-31 03:22:10,161 (average_nbest_models:64) INFO: Averaging 5best models: criterion="valid.loss": exp/asr_train_asr_raw_en_word/valid.loss.ave_5best.pth -[v019] 2022-01-31 03:22:10,447 (average_nbest_models:64) INFO: Averaging 5best models: criterion="train.acc": exp/asr_train_asr_raw_en_word/train.acc.ave_5best.pth -[v019] 2022-01-31 03:22:10,723 (average_nbest_models:64) INFO: Averaging 5best models: criterion="valid.acc": exp/asr_train_asr_raw_en_word/valid.acc.ave_5best.pth -# Accounting: begin_time=1643615463 -# Accounting: end_time=1643617332 -# Accounting: time=1869 threads=1 -# Finished at Mon Jan 31 03:22:12 EST 2022 with status 0