# Running on r099.ib.bridges2.psc.edu # Started at Tue Dec 21 22:24:57 EST 2021 # SLURMD_NODENAME=r099 # SLURM_ARRAY_JOB_ID=5730432 # SLURM_ARRAY_TASK_COUNT=32 # SLURM_ARRAY_TASK_ID=11 # SLURM_ARRAY_TASK_MAX=32 # SLURM_ARRAY_TASK_MIN=1 # SLURM_ARRAY_TASK_STEP=1 # SLURM_CLUSTER_NAME=bridges2 # SLURM_CONF=/var/spool/slurm/d/conf-cache/slurm.conf # SLURM_CPUS_ON_NODE=1 # SLURM_EXPORT_ENV=PATH # SLURM_GET_USER_ENV=1 # SLURM_GTIDS=0 # SLURM_JOBID=5730464 # SLURM_JOB_ACCOUNT=cis210027p # SLURM_JOB_CPUS_PER_NODE=1 # SLURM_JOB_GID=24886 # SLURM_JOB_ID=5730464 # SLURM_JOB_NAME=stats.sh # SLURM_JOB_NODELIST=r099 # SLURM_JOB_NUM_NODES=1 # SLURM_JOB_PARTITION=RM-shared # SLURM_JOB_QOS=rm # SLURM_JOB_UID=82326 # SLURM_JOB_USER=ganesank # SLURM_LOCALID=0 # SLURM_MEM_PER_CPU=2000 # SLURM_NNODES=1 # SLURM_NODEID=0 # SLURM_NODELIST=r099 # SLURM_NODE_ALIASES='(null)' # SLURM_OPEN_MODE=a # SLURM_PRIO_PROCESS=0 # SLURM_PROCID=0 # SLURM_SUBMIT_DIR=/ocean/projects/cis210027p/ganesank/karthik_new/espnet/egs2/sinhala/asr1 # SLURM_SUBMIT_HOST=br012.ib.bridges2.psc.edu # SLURM_TASKS_PER_NODE=1 # SLURM_TASK_PID=6420 # SLURM_TOPOLOGY_ADDR=r099 # SLURM_TOPOLOGY_ADDR_PATTERN=node # SLURM_WORKING_CLUSTER=bridges2:br003:6814:9216:109 # python3 -m espnet2.bin.asr_train --collect_stats true --use_preprocessor true --bpemodel none --token_type word --token_list data/en_token_list/word/tokens.txt --non_linguistic_symbols none --cleaner none --g2p none --train_data_path_and_name_and_type dump/raw/train/wav.scp,speech,sound --train_data_path_and_name_and_type dump/raw/train/text,text,text --valid_data_path_and_name_and_type dump/raw/valid/wav.scp,speech,sound --valid_data_path_and_name_and_type dump/raw/valid/text,text,text --train_shape_file exp/asr_stats_raw_en_word/logdir/train.11.scp --valid_shape_file exp/asr_stats_raw_en_word/logdir/valid.11.scp --output_dir exp/asr_stats_raw_en_word/logdir/stats.11 --config conf/train_asr.yaml --frontend_conf fs=16k /ocean/projects/cis210027p/ganesank/karthik_new/espnet/tools/venv/bin/python3 /ocean/projects/cis210027p/ganesank/karthik_new/espnet/espnet2/bin/asr_train.py --collect_stats true --use_preprocessor true --bpemodel none --token_type word --token_list data/en_token_list/word/tokens.txt --non_linguistic_symbols none --cleaner none --g2p none --train_data_path_and_name_and_type dump/raw/train/wav.scp,speech,sound --train_data_path_and_name_and_type dump/raw/train/text,text,text --valid_data_path_and_name_and_type dump/raw/valid/wav.scp,speech,sound --valid_data_path_and_name_and_type dump/raw/valid/text,text,text --train_shape_file exp/asr_stats_raw_en_word/logdir/train.11.scp --valid_shape_file exp/asr_stats_raw_en_word/logdir/valid.11.scp --output_dir exp/asr_stats_raw_en_word/logdir/stats.11 --config conf/train_asr.yaml --frontend_conf fs=16k [r099] 2021-12-21 22:25:08,042 (asr:382) INFO: Vocabulary size: 40 [r099] 2021-12-21 22:25:09,019 (abs_task:1132) INFO: pytorch.version=1.8.1+cu102, cuda.available=False, cudnn.version=7605, cudnn.benchmark=False, cudnn.deterministic=True [r099] 2021-12-21 22:25:09,024 (abs_task:1133) INFO: Model structure: ESPnetASRModel( (frontend): DefaultFrontend( (stft): Stft(n_fft=512, win_length=512, hop_length=128, center=True, normalized=False, onesided=True) (frontend): Frontend() (logmel): LogMel(sr=16000, n_fft=512, n_mels=80, fmin=0, fmax=8000.0, htk=False) ) (specaug): SpecAug( (time_warp): TimeWarp(window=5, mode=bicubic) (freq_mask): MaskAlongAxis(mask_width_range=[0, 30], num_mask=2, axis=freq) (time_mask): MaskAlongAxis(mask_width_range=[0, 40], num_mask=2, axis=time) ) (normalize): UtteranceMVN(norm_means=True, norm_vars=False) (encoder): TransformerEncoder( (embed): Conv2dSubsampling( (conv): Sequential( (0): Conv2d(1, 256, kernel_size=(3, 3), stride=(2, 2)) (1): ReLU() (2): Conv2d(256, 256, kernel_size=(3, 3), stride=(2, 2)) (3): ReLU() ) (out): Sequential( (0): Linear(in_features=4864, out_features=256, bias=True) (1): PositionalEncoding( (dropout): Dropout(p=0.1, inplace=False) ) ) ) (encoders): MultiSequential( (0): EncoderLayer( (self_attn): MultiHeadedAttention( (linear_q): Linear(in_features=256, out_features=256, bias=True) (linear_k): Linear(in_features=256, out_features=256, bias=True) (linear_v): Linear(in_features=256, out_features=256, bias=True) (linear_out): Linear(in_features=256, out_features=256, bias=True) (dropout): Dropout(p=0.0, inplace=False) ) (feed_forward): PositionwiseFeedForward( (w_1): Linear(in_features=256, out_features=2048, bias=True) (w_2): Linear(in_features=2048, out_features=256, bias=True) (dropout): Dropout(p=0.1, inplace=False) (activation): ReLU() ) (norm1): LayerNorm((256,), eps=1e-12, elementwise_affine=True) (norm2): LayerNorm((256,), eps=1e-12, elementwise_affine=True) (dropout): Dropout(p=0.1, inplace=False) ) (1): EncoderLayer( (self_attn): MultiHeadedAttention( (linear_q): Linear(in_features=256, out_features=256, bias=True) (linear_k): Linear(in_features=256, out_features=256, bias=True) (linear_v): Linear(in_features=256, out_features=256, bias=True) (linear_out): Linear(in_features=256, out_features=256, bias=True) (dropout): Dropout(p=0.0, inplace=False) ) (feed_forward): PositionwiseFeedForward( (w_1): Linear(in_features=256, out_features=2048, bias=True) (w_2): Linear(in_features=2048, out_features=256, bias=True) (dropout): Dropout(p=0.1, inplace=False) (activation): ReLU() ) (norm1): LayerNorm((256,), eps=1e-12, elementwise_affine=True) (norm2): LayerNorm((256,), eps=1e-12, elementwise_affine=True) (dropout): Dropout(p=0.1, inplace=False) ) (2): EncoderLayer( (self_attn): MultiHeadedAttention( (linear_q): Linear(in_features=256, out_features=256, bias=True) (linear_k): Linear(in_features=256, out_features=256, bias=True) (linear_v): Linear(in_features=256, out_features=256, bias=True) (linear_out): Linear(in_features=256, out_features=256, bias=True) (dropout): Dropout(p=0.0, inplace=False) ) (feed_forward): PositionwiseFeedForward( (w_1): Linear(in_features=256, out_features=2048, bias=True) (w_2): Linear(in_features=2048, out_features=256, bias=True) (dropout): Dropout(p=0.1, inplace=False) (activation): ReLU() ) (norm1): LayerNorm((256,), eps=1e-12, elementwise_affine=True) (norm2): LayerNorm((256,), eps=1e-12, elementwise_affine=True) (dropout): Dropout(p=0.1, inplace=False) ) (3): EncoderLayer( (self_attn): MultiHeadedAttention( (linear_q): Linear(in_features=256, out_features=256, bias=True) (linear_k): Linear(in_features=256, out_features=256, bias=True) (linear_v): Linear(in_features=256, out_features=256, bias=True) (linear_out): Linear(in_features=256, out_features=256, bias=True) (dropout): Dropout(p=0.0, inplace=False) ) (feed_forward): PositionwiseFeedForward( (w_1): Linear(in_features=256, out_features=2048, bias=True) (w_2): Linear(in_features=2048, out_features=256, bias=True) (dropout): Dropout(p=0.1, inplace=False) (activation): ReLU() ) (norm1): LayerNorm((256,), eps=1e-12, elementwise_affine=True) (norm2): LayerNorm((256,), eps=1e-12, elementwise_affine=True) (dropout): Dropout(p=0.1, inplace=False) ) (4): EncoderLayer( (self_attn): MultiHeadedAttention( (linear_q): Linear(in_features=256, out_features=256, bias=True) (linear_k): Linear(in_features=256, out_features=256, bias=True) (linear_v): Linear(in_features=256, out_features=256, bias=True) (linear_out): Linear(in_features=256, out_features=256, bias=True) (dropout): Dropout(p=0.0, inplace=False) ) (feed_forward): PositionwiseFeedForward( (w_1): Linear(in_features=256, out_features=2048, bias=True) (w_2): Linear(in_features=2048, out_features=256, bias=True) (dropout): Dropout(p=0.1, inplace=False) (activation): ReLU() ) (norm1): LayerNorm((256,), eps=1e-12, elementwise_affine=True) (norm2): LayerNorm((256,), eps=1e-12, elementwise_affine=True) (dropout): Dropout(p=0.1, inplace=False) ) (5): EncoderLayer( (self_attn): MultiHeadedAttention( (linear_q): Linear(in_features=256, out_features=256, bias=True) (linear_k): Linear(in_features=256, out_features=256, bias=True) (linear_v): Linear(in_features=256, out_features=256, bias=True) (linear_out): Linear(in_features=256, out_features=256, bias=True) (dropout): Dropout(p=0.0, inplace=False) ) (feed_forward): PositionwiseFeedForward( (w_1): Linear(in_features=256, out_features=2048, bias=True) (w_2): Linear(in_features=2048, out_features=256, bias=True) (dropout): Dropout(p=0.1, inplace=False) (activation): ReLU() ) (norm1): LayerNorm((256,), eps=1e-12, elementwise_affine=True) (norm2): LayerNorm((256,), eps=1e-12, elementwise_affine=True) (dropout): Dropout(p=0.1, inplace=False) ) (6): EncoderLayer( (self_attn): MultiHeadedAttention( (linear_q): Linear(in_features=256, out_features=256, bias=True) (linear_k): Linear(in_features=256, out_features=256, bias=True) (linear_v): Linear(in_features=256, out_features=256, bias=True) (linear_out): Linear(in_features=256, out_features=256, bias=True) (dropout): Dropout(p=0.0, inplace=False) ) (feed_forward): PositionwiseFeedForward( (w_1): Linear(in_features=256, out_features=2048, bias=True) (w_2): Linear(in_features=2048, out_features=256, bias=True) (dropout): Dropout(p=0.1, inplace=False) (activation): ReLU() ) (norm1): LayerNorm((256,), eps=1e-12, elementwise_affine=True) (norm2): LayerNorm((256,), eps=1e-12, elementwise_affine=True) (dropout): Dropout(p=0.1, inplace=False) ) (7): EncoderLayer( (self_attn): MultiHeadedAttention( (linear_q): Linear(in_features=256, out_features=256, bias=True) (linear_k): Linear(in_features=256, out_features=256, bias=True) (linear_v): Linear(in_features=256, out_features=256, bias=True) (linear_out): Linear(in_features=256, out_features=256, bias=True) (dropout): Dropout(p=0.0, inplace=False) ) (feed_forward): PositionwiseFeedForward( (w_1): Linear(in_features=256, out_features=2048, bias=True) (w_2): Linear(in_features=2048, out_features=256, bias=True) (dropout): Dropout(p=0.1, inplace=False) (activation): ReLU() ) (norm1): LayerNorm((256,), eps=1e-12, elementwise_affine=True) (norm2): LayerNorm((256,), eps=1e-12, elementwise_affine=True) (dropout): Dropout(p=0.1, inplace=False) ) (8): EncoderLayer( (self_attn): MultiHeadedAttention( (linear_q): Linear(in_features=256, out_features=256, bias=True) (linear_k): Linear(in_features=256, out_features=256, bias=True) (linear_v): Linear(in_features=256, out_features=256, bias=True) (linear_out): Linear(in_features=256, out_features=256, bias=True) (dropout): Dropout(p=0.0, inplace=False) ) (feed_forward): PositionwiseFeedForward( (w_1): Linear(in_features=256, out_features=2048, bias=True) (w_2): Linear(in_features=2048, out_features=256, bias=True) (dropout): Dropout(p=0.1, inplace=False) (activation): ReLU() ) (norm1): LayerNorm((256,), eps=1e-12, elementwise_affine=True) (norm2): LayerNorm((256,), eps=1e-12, elementwise_affine=True) (dropout): Dropout(p=0.1, inplace=False) ) (9): EncoderLayer( (self_attn): MultiHeadedAttention( (linear_q): Linear(in_features=256, out_features=256, bias=True) (linear_k): Linear(in_features=256, out_features=256, bias=True) (linear_v): Linear(in_features=256, out_features=256, bias=True) (linear_out): Linear(in_features=256, out_features=256, bias=True) (dropout): Dropout(p=0.0, inplace=False) ) (feed_forward): PositionwiseFeedForward( (w_1): Linear(in_features=256, out_features=2048, bias=True) (w_2): Linear(in_features=2048, out_features=256, bias=True) (dropout): Dropout(p=0.1, inplace=False) (activation): ReLU() ) (norm1): LayerNorm((256,), eps=1e-12, elementwise_affine=True) (norm2): LayerNorm((256,), eps=1e-12, elementwise_affine=True) (dropout): Dropout(p=0.1, inplace=False) ) (10): EncoderLayer( (self_attn): MultiHeadedAttention( (linear_q): Linear(in_features=256, out_features=256, bias=True) (linear_k): Linear(in_features=256, out_features=256, bias=True) (linear_v): Linear(in_features=256, out_features=256, bias=True) (linear_out): Linear(in_features=256, out_features=256, bias=True) (dropout): Dropout(p=0.0, inplace=False) ) (feed_forward): PositionwiseFeedForward( (w_1): Linear(in_features=256, out_features=2048, bias=True) (w_2): Linear(in_features=2048, out_features=256, bias=True) (dropout): Dropout(p=0.1, inplace=False) (activation): ReLU() ) (norm1): LayerNorm((256,), eps=1e-12, elementwise_affine=True) (norm2): LayerNorm((256,), eps=1e-12, elementwise_affine=True) (dropout): Dropout(p=0.1, inplace=False) ) (11): EncoderLayer( (self_attn): MultiHeadedAttention( (linear_q): Linear(in_features=256, out_features=256, bias=True) (linear_k): Linear(in_features=256, out_features=256, bias=True) (linear_v): Linear(in_features=256, out_features=256, bias=True) (linear_out): Linear(in_features=256, out_features=256, bias=True) (dropout): Dropout(p=0.0, inplace=False) ) (feed_forward): PositionwiseFeedForward( (w_1): Linear(in_features=256, out_features=2048, bias=True) (w_2): Linear(in_features=2048, out_features=256, bias=True) (dropout): Dropout(p=0.1, inplace=False) (activation): ReLU() ) (norm1): LayerNorm((256,), eps=1e-12, elementwise_affine=True) (norm2): LayerNorm((256,), eps=1e-12, elementwise_affine=True) (dropout): Dropout(p=0.1, inplace=False) ) ) (after_norm): LayerNorm((256,), eps=1e-12, elementwise_affine=True) ) (decoder): TransformerDecoder( (embed): Sequential( (0): Embedding(40, 256) (1): PositionalEncoding( (dropout): Dropout(p=0.1, inplace=False) ) ) (after_norm): LayerNorm((256,), eps=1e-12, elementwise_affine=True) (output_layer): Linear(in_features=256, out_features=40, bias=True) (decoders): MultiSequential( (0): DecoderLayer( (self_attn): MultiHeadedAttention( (linear_q): Linear(in_features=256, out_features=256, bias=True) (linear_k): Linear(in_features=256, out_features=256, bias=True) (linear_v): Linear(in_features=256, out_features=256, bias=True) (linear_out): Linear(in_features=256, out_features=256, bias=True) (dropout): Dropout(p=0.0, inplace=False) ) (src_attn): MultiHeadedAttention( (linear_q): Linear(in_features=256, out_features=256, bias=True) (linear_k): Linear(in_features=256, out_features=256, bias=True) (linear_v): Linear(in_features=256, out_features=256, bias=True) (linear_out): Linear(in_features=256, out_features=256, bias=True) (dropout): Dropout(p=0.0, inplace=False) ) (feed_forward): PositionwiseFeedForward( (w_1): Linear(in_features=256, out_features=2048, bias=True) (w_2): Linear(in_features=2048, out_features=256, bias=True) (dropout): Dropout(p=0.1, inplace=False) (activation): ReLU() ) (norm1): LayerNorm((256,), eps=1e-12, elementwise_affine=True) (norm2): LayerNorm((256,), eps=1e-12, elementwise_affine=True) (norm3): LayerNorm((256,), eps=1e-12, elementwise_affine=True) (dropout): Dropout(p=0.1, inplace=False) ) (1): DecoderLayer( (self_attn): MultiHeadedAttention( (linear_q): Linear(in_features=256, out_features=256, bias=True) (linear_k): Linear(in_features=256, out_features=256, bias=True) (linear_v): Linear(in_features=256, out_features=256, bias=True) (linear_out): Linear(in_features=256, out_features=256, bias=True) (dropout): Dropout(p=0.0, inplace=False) ) (src_attn): MultiHeadedAttention( (linear_q): Linear(in_features=256, out_features=256, bias=True) (linear_k): Linear(in_features=256, out_features=256, bias=True) (linear_v): Linear(in_features=256, out_features=256, bias=True) (linear_out): Linear(in_features=256, out_features=256, bias=True) (dropout): Dropout(p=0.0, inplace=False) ) (feed_forward): PositionwiseFeedForward( (w_1): Linear(in_features=256, out_features=2048, bias=True) (w_2): Linear(in_features=2048, out_features=256, bias=True) (dropout): Dropout(p=0.1, inplace=False) (activation): ReLU() ) (norm1): LayerNorm((256,), eps=1e-12, elementwise_affine=True) (norm2): LayerNorm((256,), eps=1e-12, elementwise_affine=True) (norm3): LayerNorm((256,), eps=1e-12, elementwise_affine=True) (dropout): Dropout(p=0.1, inplace=False) ) (2): DecoderLayer( (self_attn): MultiHeadedAttention( (linear_q): Linear(in_features=256, out_features=256, bias=True) (linear_k): Linear(in_features=256, out_features=256, bias=True) (linear_v): Linear(in_features=256, out_features=256, bias=True) (linear_out): Linear(in_features=256, out_features=256, bias=True) (dropout): Dropout(p=0.0, inplace=False) ) (src_attn): MultiHeadedAttention( (linear_q): Linear(in_features=256, out_features=256, bias=True) (linear_k): Linear(in_features=256, out_features=256, bias=True) (linear_v): Linear(in_features=256, out_features=256, bias=True) (linear_out): Linear(in_features=256, out_features=256, bias=True) (dropout): Dropout(p=0.0, inplace=False) ) (feed_forward): PositionwiseFeedForward( (w_1): Linear(in_features=256, out_features=2048, bias=True) (w_2): Linear(in_features=2048, out_features=256, bias=True) (dropout): Dropout(p=0.1, inplace=False) (activation): ReLU() ) (norm1): LayerNorm((256,), eps=1e-12, elementwise_affine=True) (norm2): LayerNorm((256,), eps=1e-12, elementwise_affine=True) (norm3): LayerNorm((256,), eps=1e-12, elementwise_affine=True) (dropout): Dropout(p=0.1, inplace=False) ) (3): DecoderLayer( (self_attn): MultiHeadedAttention( (linear_q): Linear(in_features=256, out_features=256, bias=True) (linear_k): Linear(in_features=256, out_features=256, bias=True) (linear_v): Linear(in_features=256, out_features=256, bias=True) (linear_out): Linear(in_features=256, out_features=256, bias=True) (dropout): Dropout(p=0.0, inplace=False) ) (src_attn): MultiHeadedAttention( (linear_q): Linear(in_features=256, out_features=256, bias=True) (linear_k): Linear(in_features=256, out_features=256, bias=True) (linear_v): Linear(in_features=256, out_features=256, bias=True) (linear_out): Linear(in_features=256, out_features=256, bias=True) (dropout): Dropout(p=0.0, inplace=False) ) (feed_forward): PositionwiseFeedForward( (w_1): Linear(in_features=256, out_features=2048, bias=True) (w_2): Linear(in_features=2048, out_features=256, bias=True) (dropout): Dropout(p=0.1, inplace=False) (activation): ReLU() ) (norm1): LayerNorm((256,), eps=1e-12, elementwise_affine=True) (norm2): LayerNorm((256,), eps=1e-12, elementwise_affine=True) (norm3): LayerNorm((256,), eps=1e-12, elementwise_affine=True) (dropout): Dropout(p=0.1, inplace=False) ) (4): DecoderLayer( (self_attn): MultiHeadedAttention( (linear_q): Linear(in_features=256, out_features=256, bias=True) (linear_k): Linear(in_features=256, out_features=256, bias=True) (linear_v): Linear(in_features=256, out_features=256, bias=True) (linear_out): Linear(in_features=256, out_features=256, bias=True) (dropout): Dropout(p=0.0, inplace=False) ) (src_attn): MultiHeadedAttention( (linear_q): Linear(in_features=256, out_features=256, bias=True) (linear_k): Linear(in_features=256, out_features=256, bias=True) (linear_v): Linear(in_features=256, out_features=256, bias=True) (linear_out): Linear(in_features=256, out_features=256, bias=True) (dropout): Dropout(p=0.0, inplace=False) ) (feed_forward): PositionwiseFeedForward( (w_1): Linear(in_features=256, out_features=2048, bias=True) (w_2): Linear(in_features=2048, out_features=256, bias=True) (dropout): Dropout(p=0.1, inplace=False) (activation): ReLU() ) (norm1): LayerNorm((256,), eps=1e-12, elementwise_affine=True) (norm2): LayerNorm((256,), eps=1e-12, elementwise_affine=True) (norm3): LayerNorm((256,), eps=1e-12, elementwise_affine=True) (dropout): Dropout(p=0.1, inplace=False) ) (5): DecoderLayer( (self_attn): MultiHeadedAttention( (linear_q): Linear(in_features=256, out_features=256, bias=True) (linear_k): Linear(in_features=256, out_features=256, bias=True) (linear_v): Linear(in_features=256, out_features=256, bias=True) (linear_out): Linear(in_features=256, out_features=256, bias=True) (dropout): Dropout(p=0.0, inplace=False) ) (src_attn): MultiHeadedAttention( (linear_q): Linear(in_features=256, out_features=256, bias=True) (linear_k): Linear(in_features=256, out_features=256, bias=True) (linear_v): Linear(in_features=256, out_features=256, bias=True) (linear_out): Linear(in_features=256, out_features=256, bias=True) (dropout): Dropout(p=0.0, inplace=False) ) (feed_forward): PositionwiseFeedForward( (w_1): Linear(in_features=256, out_features=2048, bias=True) (w_2): Linear(in_features=2048, out_features=256, bias=True) (dropout): Dropout(p=0.1, inplace=False) (activation): ReLU() ) (norm1): LayerNorm((256,), eps=1e-12, elementwise_affine=True) (norm2): LayerNorm((256,), eps=1e-12, elementwise_affine=True) (norm3): LayerNorm((256,), eps=1e-12, elementwise_affine=True) (dropout): Dropout(p=0.1, inplace=False) ) ) ) (ctc): CTC( (ctc_lo): Linear(in_features=256, out_features=40, bias=True) (ctc_loss): CTCLoss() ) (criterion_att): LabelSmoothingLoss( (criterion): KLDivLoss() ) ) Model summary: Class Name: ESPnetASRModel Total Number of model parameters: 27.12 M Number of trainable parameters: 27.12 M (100.0%) Size: 108.49 MB Type: torch.float32 [r099] 2021-12-21 22:25:09,024 (abs_task:1136) INFO: Optimizer: Adam ( Parameter Group 0 amsgrad: False betas: (0.9, 0.999) eps: 1e-08 initial_lr: 0.0002 lr: 8e-09 weight_decay: 0 ) [r099] 2021-12-21 22:25:09,024 (abs_task:1137) INFO: Scheduler: WarmupLR(warmup_steps=25000) [r099] 2021-12-21 22:25:09,027 (abs_task:1146) INFO: Saving the configuration in exp/asr_stats_raw_en_word/logdir/stats.11/config.yaml [r099] 2021-12-21 22:25:09,037 (abs_task:1157) INFO: Namespace(config='conf/train_asr.yaml', print_config=False, log_level='INFO', dry_run=False, iterator_type='sequence', output_dir='exp/asr_stats_raw_en_word/logdir/stats.11', ngpu=0, seed=0, num_workers=1, num_att_plot=3, dist_backend='nccl', dist_init_method='env://', dist_world_size=None, dist_rank=None, local_rank=None, dist_master_addr=None, dist_master_port=None, dist_launcher=None, multiprocessing_distributed=False, unused_parameters=False, sharded_ddp=False, cudnn_enabled=True, cudnn_benchmark=False, cudnn_deterministic=True, collect_stats=True, write_collected_feats=False, max_epoch=50, patience=None, val_scheduler_criterion=('valid', 'loss'), early_stopping_criterion=('valid', 'loss', 'min'), best_model_criterion=[('train', 'loss', 'min'), ('valid', 'loss', 'min'), ('train', 'acc', 'max'), ('valid', 'acc', 'max')], keep_nbest_models=5, grad_clip=5.0, grad_clip_type=2.0, grad_noise=False, accum_grad=1, no_forward_run=False, resume=False, train_dtype='float32', use_amp=False, log_interval=None, use_tensorboard=True, use_wandb=False, wandb_project=None, wandb_id=None, wandb_entity=None, wandb_name=None, wandb_model_log_interval=-1, detect_anomaly=False, pretrain_path=None, init_param=[], ignore_init_mismatch=False, freeze_param=[], num_iters_per_epoch=None, batch_size=20, valid_batch_size=None, batch_bins=1000000, valid_batch_bins=None, train_shape_file=['exp/asr_stats_raw_en_word/logdir/train.11.scp'], valid_shape_file=['exp/asr_stats_raw_en_word/logdir/valid.11.scp'], batch_type='folded', valid_batch_type=None, fold_length=[], sort_in_batch='descending', sort_batch='descending', multiple_iterator=False, chunk_length=500, chunk_shift_ratio=0.5, num_cache_chunks=1024, train_data_path_and_name_and_type=[('dump/raw/train/wav.scp', 'speech', 'sound'), ('dump/raw/train/text', 'text', 'text')], valid_data_path_and_name_and_type=[('dump/raw/valid/wav.scp', 'speech', 'sound'), ('dump/raw/valid/text', 'text', 'text')], allow_variable_data_keys=False, max_cache_size=0.0, max_cache_fd=32, valid_max_cache_size=None, optim='adam', optim_conf={'lr': 0.0002}, scheduler='warmuplr', scheduler_conf={'warmup_steps': 25000}, token_list=['', '', '්', 'න', 'ම', 'ක', 'ල', 'ි', 'ු', 'ග', 'ේ', 'ර', 'ත', 'ද', 'ව', 'ට', 'ඕ', 'ී', 'ප', 'ය', 'ෙ', 'ස', 'ණ', 'ා', 'ැ', 'RequestAcc.balance', 'Moneywithdraw', 'Moneydeposit', 'Moneytransfer', 'Billpayments', 'බ', 'ඉ', 'ශ', 'ෂ', 'ඩ', 'Creditcardpayments', 'එ', '\u200d', 'හ', ''], init=None, input_size=None, ctc_conf={'dropout_rate': 0.0, 'ctc_type': 'builtin', 'reduce': True, 'ignore_nan_grad': True}, model_conf={'ctc_weight': 0.5, 'ignore_id': -1, 'lsm_weight': 0.0, 'length_normalized_loss': False, 'report_cer': True, 'report_wer': True, 'sym_space': '', 'sym_blank': '', 'extract_feats_in_collect_stats': True}, use_preprocessor=True, token_type='word', bpemodel=None, non_linguistic_symbols=None, cleaner=None, g2p=None, speech_volume_normalize=None, rir_scp=None, rir_apply_prob=1.0, noise_scp=None, noise_apply_prob=1.0, noise_db_range='13_15', frontend='default', frontend_conf={'fs': '16k'}, specaug='specaug', specaug_conf={'apply_time_warp': True, 'time_warp_window': 5, 'time_warp_mode': 'bicubic', 'apply_freq_mask': True, 'freq_mask_width_range': [0, 30], 'num_freq_mask': 2, 'apply_time_mask': True, 'time_mask_width_range': [0, 40], 'num_time_mask': 2}, normalize='utterance_mvn', normalize_conf={}, preencoder=None, preencoder_conf={}, encoder='transformer', encoder_conf={'output_size': 256, 'attention_heads': 4, 'linear_units': 2048, 'num_blocks': 12, 'dropout_rate': 0.1, 'positional_dropout_rate': 0.1, 'attention_dropout_rate': 0.0, 'input_layer': 'conv2d', 'normalize_before': True}, postencoder=None, postencoder_conf={}, decoder='transformer', decoder_conf={'attention_heads': 4, 'linear_units': 2048, 'num_blocks': 6, 'dropout_rate': 0.1, 'positional_dropout_rate': 0.1, 'self_attention_dropout_rate': 0.0, 'src_attention_dropout_rate': 0.0}, required=['output_dir', 'token_list'], version='0.10.3a3', distributed=False) # Accounting: begin_time=1640143497 # Accounting: end_time=1640143519 # Accounting: time=22 threads=1 # Finished at Tue Dec 21 22:25:19 EST 2021 with status 0