diff --git "a/exp/log/log-train-2024-01-15-11-07-41-0" "b/exp/log/log-train-2024-01-15-11-07-41-0" new file mode 100644--- /dev/null +++ "b/exp/log/log-train-2024-01-15-11-07-41-0" @@ -0,0 +1,4902 @@ +2024-01-15 11:07:41,462 INFO [train.py:1062] (0/2) Training started +2024-01-15 11:07:41,475 INFO [train.py:1072] (0/2) Device: cuda:0 +2024-01-15 11:07:41,478 INFO [train.py:1081] (0/2) {'best_train_loss': inf, 'best_valid_loss': inf, 'best_train_epoch': -1, 'best_valid_epoch': -1, 'batch_idx_train': 0, 'log_interval': 50, 'reset_interval': 200, 'valid_interval': 3000, 'feature_dim': 80, 'subsampling_factor': 4, 'warm_step': 2000, 'env_info': {'k2-version': '1.24.4', 'k2-build-type': 'Release', 'k2-with-cuda': True, 'k2-git-sha1': '2989b0b1186fa6022932804f5b39fbb2781ebf42', 'k2-git-date': 'Fri Nov 24 11:34:10 2023', 'lhotse-version': '1.19.0.dev+git.d1ae9c05.dirty', 'torch-version': '1.11.0+cu102', 'torch-cuda-available': True, 'torch-cuda-version': '10.2', 'python-version': '3.9', 'icefall-git-branch': 'dev/aishell-zipformer-bbpe', 'icefall-git-sha1': 'bce81394-clean', 'icefall-git-date': 'Thu Jan 11 09:56:01 2024', 'icefall-path': '/star-home/jinzengrui/lib/miniconda3/envs/dev39/lib/python3.9/site-packages/icefall-1.0-py3.9.egg', 'k2-path': '/star-home/jinzengrui/lib/miniconda3/envs/dev39/lib/python3.9/site-packages/k2-1.24.4.dev20231207+cuda10.2.torch1.11.0-py3.9-linux-x86_64.egg/k2/__init__.py', 'lhotse-path': '/star-home/jinzengrui/lib/miniconda3/envs/dev39/lib/python3.9/site-packages/lhotse-1.19.0.dev0+git.d1ae9c05.dirty-py3.9.egg/lhotse/__init__.py', 'hostname': 'de-74279-k2-train-1-1207150822-75498b8c5f-55j4z', 'IP address': '10.177.74.211'}, 'world_size': 2, 'master_port': 12354, 'tensorboard': True, 'num_epochs': 40, 'start_epoch': 1, 'start_batch': 0, 'exp_dir': PosixPath('zipformer_bbpe/exp-context-size-2-lr-epochs-10-spec-aug-20-disable-musan'), 'bpe_model': 'data/lang_bbpe_500/bbpe.model', 'base_lr': 0.045, 'lr_batches': 7500, 'lr_epochs': 10.0, 'ref_duration': 600, 'context_size': 2, 'prune_range': 5, 'lm_scale': 0.25, 'am_scale': 0.0, 'simple_loss_scale': 0.5, 'seed': 42, 'print_diagnostics': False, 'inf_check': False, 'save_every_n': 4000, 'keep_last_k': 30, 'average_period': 200, 'use_fp16': True, 'num_encoder_layers': '2,2,3,4,3,2', 'downsampling_factor': '1,2,4,8,4,2', 'feedforward_dim': '512,768,1024,1536,1024,768', 'num_heads': '4,4,4,8,4,4', 'encoder_dim': '192,256,384,512,384,256', 'query_head_dim': '32', 'value_head_dim': '12', 'pos_head_dim': '4', 'pos_dim': 48, 'encoder_unmasked_dim': '192,192,256,256,256,192', 'cnn_module_kernel': '31,31,15,15,15,31', 'decoder_dim': 512, 'joiner_dim': 512, 'causal': False, 'chunk_size': '16,32,64,-1', 'left_context_frames': '64,128,256,-1', 'manifest_dir': PosixPath('data/fbank'), 'max_duration': 1000, 'bucketing_sampler': True, 'num_buckets': 30, 'concatenate_cuts': False, 'duration_factor': 1.0, 'gap': 1.0, 'on_the_fly_feats': False, 'shuffle': True, 'drop_last': True, 'return_cuts': True, 'num_workers': 2, 'enable_spec_aug': True, 'spec_aug_time_warp_factor': 20, 'enable_musan': True, 'blank_id': 0, 'vocab_size': 500} +2024-01-15 11:07:41,478 INFO [train.py:1083] (0/2) About to create model +2024-01-15 11:07:41,998 INFO [train.py:1087] (0/2) Number of model parameters: 65549011 +2024-01-15 11:07:46,531 INFO [train.py:1102] (0/2) Using DDP +2024-01-15 11:07:46,830 INFO [asr_datamodule.py:363] (0/2) About to get train cuts +2024-01-15 11:07:46,847 INFO [asr_datamodule.py:371] (0/2) About to get dev cuts +2024-01-15 11:07:46,849 INFO [asr_datamodule.py:194] (0/2) About to get Musan cuts +2024-01-15 11:07:49,110 INFO [asr_datamodule.py:199] (0/2) Enable MUSAN +2024-01-15 11:07:49,110 INFO [asr_datamodule.py:222] (0/2) Enable SpecAugment +2024-01-15 11:07:49,110 INFO [asr_datamodule.py:223] (0/2) Time warp factor: 20 +2024-01-15 11:07:49,110 INFO [asr_datamodule.py:233] (0/2) Num frame mask: 10 +2024-01-15 11:07:49,111 INFO [asr_datamodule.py:246] (0/2) About to create train dataset +2024-01-15 11:07:49,111 INFO [asr_datamodule.py:272] (0/2) Using DynamicBucketingSampler. +2024-01-15 11:07:53,399 INFO [asr_datamodule.py:287] (0/2) About to create train dataloader +2024-01-15 11:07:53,400 INFO [asr_datamodule.py:312] (0/2) About to create dev dataset +2024-01-15 11:07:54,125 INFO [asr_datamodule.py:329] (0/2) About to create dev dataloader +2024-01-15 11:08:17,810 INFO [train.py:994] (0/2) Epoch 1, batch 0, loss[loss=7.352, simple_loss=6.692, pruned_loss=6.591, over 24025.00 frames. ], tot_loss[loss=7.352, simple_loss=6.692, pruned_loss=6.591, over 24025.00 frames. ], batch size: 131, lr: 2.25e-02, grad_scale: 1.0 +2024-01-15 11:08:17,811 INFO [train.py:1017] (0/2) Computing validation loss +2024-01-15 11:08:38,147 INFO [train.py:1026] (0/2) Epoch 1, validation: loss=7.38, simple_loss=6.719, pruned_loss=6.6, over 1622729.00 frames. +2024-01-15 11:08:38,147 INFO [train.py:1027] (0/2) Maximum memory allocated so far is 13745MB +2024-01-15 11:08:42,104 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.bypass_mid.scale_min, batch_count=0.0, ans=0.9 +2024-01-15 11:08:44,515 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_skip_rate, batch_count=0.0, ans=0.2 +2024-01-15 11:08:48,456 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=0.0, ans=0.3 +2024-01-15 11:08:52,465 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.ff3_skip_rate, batch_count=33.333333333333336, ans=0.09925 +2024-01-15 11:08:54,740 WARNING [optim.py:484] (0/2) Clipping_scale=2.0, grad-norm quartiles 4.492e+03 4.857e+03 5.193e+03 6.740e+03 7.683e+03, threshold=2.077e+04, percent-clipped=0.0 +2024-01-15 11:09:01,908 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_skip_rate, batch_count=33.333333333333336, ans=0.19875 +2024-01-15 11:09:02,413 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.3.encoder.layers.1.self_attn_weights.whiten_keys, num_groups=8, num_channels=256, metric=4.25 vs. limit=3.005 +2024-01-15 11:09:09,681 WARNING [optim.py:484] (0/2) Clipping_scale=2.0, grad-norm quartiles 5.412e+02 1.511e+03 4.492e+03 5.717e+03 7.711e+03, threshold=1.797e+04, percent-clipped=0.0 +2024-01-15 11:09:11,430 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.nonlin_attention.balancer.prob, batch_count=66.66666666666667, ans=0.496875 +2024-01-15 11:09:14,804 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.2.encoder.layers.0.nonlin_attention.whiten2, num_groups=1, num_channels=384, metric=260.30 vs. limit=5.033333333333333 +2024-01-15 11:09:21,430 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.4.encoder.layers.0.feed_forward3.out_whiten, num_groups=1, num_channels=384, metric=299.18 vs. limit=7.5375 +2024-01-15 11:09:26,567 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.2.encoder.layers.0.conv_module1.whiten, num_groups=1, num_channels=384, metric=258.22 vs. limit=7.5375 +2024-01-15 11:09:38,478 WARNING [optim.py:484] (0/2) Clipping_scale=2.0, grad-norm quartiles 5.412e+02 9.988e+02 1.994e+03 4.935e+03 9.600e+03, threshold=7.974e+03, percent-clipped=0.0 +2024-01-15 11:09:41,546 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_skip_rate, batch_count=133.33333333333334, ans=0.195 +2024-01-15 11:09:48,215 INFO [train.py:994] (0/2) Epoch 1, batch 50, loss[loss=1.266, simple_loss=1.139, pruned_loss=1.148, over 24424.00 frames. ], tot_loss[loss=3.058, simple_loss=2.816, pruned_loss=2.369, over 1082004.28 frames. ], batch size: 250, lr: 2.48e-02, grad_scale: 0.25 +2024-01-15 11:09:50,023 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.5.encoder.layers.0.feed_forward3.out_whiten, num_groups=1, num_channels=256, metric=103.97 vs. limit=7.5625 +2024-01-15 11:09:51,640 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.3.encoder.layers.0.conv_module1.whiten, num_groups=1, num_channels=512, metric=126.58 vs. limit=7.5625 +2024-01-15 11:09:59,424 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.0.layers.0.self_attn2.whiten, num_groups=1, num_channels=192, metric=12.23 vs. limit=7.625 +2024-01-15 11:10:08,078 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.3.encoder.layers.0.self_attn2.whiten, num_groups=1, num_channels=512, metric=256.75 vs. limit=7.65 +2024-01-15 11:10:24,254 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.1.encoder.layers.1.conv_module2.whiten, num_groups=1, num_channels=256, metric=82.24 vs. limit=7.5875 +2024-01-15 11:10:24,951 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.feed_forward3.hidden_balancer.prob, batch_count=233.33333333333334, ans=0.4890625 +2024-01-15 11:10:36,635 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.feed_forward2.hidden_balancer.prob, batch_count=266.6666666666667, ans=0.4875 +2024-01-15 11:10:38,739 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.1.encoder.layers.0.nonlin_attention.whiten2, num_groups=1, num_channels=256, metric=248.74 vs. limit=5.133333333333334 +2024-01-15 11:10:41,321 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.3.encoder.layers.2.nonlin_attention.whiten2, num_groups=1, num_channels=512, metric=132.58 vs. limit=5.133333333333334 +2024-01-15 11:10:49,719 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.3.encoder.layers.1.self_attn2.whiten, num_groups=1, num_channels=512, metric=93.14 vs. limit=7.725 +2024-01-15 11:10:54,074 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_module2.balancer2.min_positive, batch_count=300.0, ans=0.098125 +2024-01-15 11:10:55,337 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.feed_forward1.hidden_balancer.prob, batch_count=300.0, ans=0.4859375 +2024-01-15 11:10:55,457 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.ff3_skip_rate, batch_count=300.0, ans=0.09325 +2024-01-15 11:11:01,207 INFO [train.py:994] (0/2) Epoch 1, batch 100, loss[loss=0.9803, simple_loss=0.8548, pruned_loss=1.011, over 24188.00 frames. ], tot_loss[loss=1.998, simple_loss=1.817, pruned_loss=1.681, over 1896641.06 frames. ], batch size: 140, lr: 2.70e-02, grad_scale: 0.5 +2024-01-15 11:11:05,535 WARNING [optim.py:484] (0/2) Clipping_scale=2.0, grad-norm quartiles 7.447e+01 1.465e+02 3.596e+02 1.658e+03 9.600e+03, threshold=7.192e+02, percent-clipped=2.0 +2024-01-15 11:11:24,660 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_module2.balancer1.max_abs, batch_count=366.6666666666667, ans=5.229166666666667 +2024-01-15 11:11:28,356 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.0.layers.0.nonlin_attention.whiten2, num_groups=1, num_channels=192, metric=112.34 vs. limit=5.183333333333334 +2024-01-15 11:11:42,713 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.4.encoder.layers.1.whiten, num_groups=1, num_channels=384, metric=7.46 vs. limit=4.16 +2024-01-15 11:11:49,849 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.3.encoder.layers.3.conv_module1.whiten, num_groups=1, num_channels=512, metric=37.02 vs. limit=7.6625 +2024-01-15 11:11:55,068 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.2.encoder.layers.1.feed_forward1.out_whiten, num_groups=1, num_channels=384, metric=223.81 vs. limit=7.6625 +2024-01-15 11:11:57,638 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.4.encoder.layers.1.self_attn2.whiten, num_groups=1, num_channels=384, metric=21.09 vs. limit=7.825 +2024-01-15 11:12:06,790 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.3.encoder.layers.0.self_attn1.whiten, num_groups=1, num_channels=512, metric=41.53 vs. limit=7.85 +2024-01-15 11:12:07,705 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.feed_forward1.hidden_balancer.prob, batch_count=466.6666666666667, ans=0.478125 +2024-01-15 11:12:10,351 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.0.layers.0.feed_forward2.hidden_balancer.prob, batch_count=466.6666666666667, ans=0.478125 +2024-01-15 11:12:11,063 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.1.encoder.layers.1.nonlin_attention.whiten1, num_groups=1, num_channels=192, metric=42.28 vs. limit=5.116666666666666 +2024-01-15 11:12:14,357 INFO [train.py:994] (0/2) Epoch 1, batch 150, loss[loss=0.9048, simple_loss=0.775, pruned_loss=0.9468, over 24497.00 frames. ], tot_loss[loss=1.561, simple_loss=1.401, pruned_loss=1.392, over 2536059.96 frames. ], batch size: 165, lr: 2.93e-02, grad_scale: 0.5 +2024-01-15 11:12:15,016 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.4.encoder.layers.0.self_attn1.whiten, num_groups=1, num_channels=384, metric=35.10 vs. limit=7.875 +2024-01-15 11:12:16,471 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.3.encoder.layers.2.feed_forward1.out_whiten, num_groups=1, num_channels=512, metric=88.77 vs. limit=7.6875 +2024-01-15 11:12:22,149 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.2.encoder.layers.1.whiten, num_groups=1, num_channels=384, metric=6.28 vs. limit=4.2 +2024-01-15 11:12:24,226 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.2.encoder.layers.2.self_attn1.whiten, num_groups=1, num_channels=384, metric=24.99 vs. limit=7.875 +2024-01-15 11:12:26,861 INFO [scaling.py:1118] (0/2) WithLoss: name=encoder.encoders.2.encoder.layers.2.self_attn_weights, loss-sum=4.538e+00 +2024-01-15 11:12:30,426 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_module2.balancer2.min_positive, batch_count=533.3333333333334, ans=0.09666666666666668 +2024-01-15 11:12:32,505 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.2.encoder.layers.1.nonlin_attention.whiten2, num_groups=1, num_channels=384, metric=312.48 vs. limit=5.266666666666667 +2024-01-15 11:12:43,553 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.4.encoder.layers.0.nonlin_attention.whiten2, num_groups=1, num_channels=384, metric=15.57 vs. limit=5.283333333333333 +2024-01-15 11:12:43,668 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.3.encoder.layers.0.whiten, num_groups=1, num_channels=512, metric=7.33 vs. limit=4.226666666666667 +2024-01-15 11:12:51,824 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.4.encoder.layers.2.feed_forward1.out_whiten, num_groups=1, num_channels=384, metric=58.04 vs. limit=7.7125 +2024-01-15 11:12:53,639 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.2.encoder.layers.1.nonlin_attention.whiten2, num_groups=1, num_channels=384, metric=265.14 vs. limit=5.283333333333333 +2024-01-15 11:12:57,377 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.2.encoder.layers.0.feed_forward1.out_whiten, num_groups=1, num_channels=384, metric=125.80 vs. limit=7.7125 +2024-01-15 11:12:59,231 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.1.encoder.layers.0.self_attn2.whiten, num_groups=1, num_channels=256, metric=17.59 vs. limit=7.95 +2024-01-15 11:12:59,252 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.1.encoder.layers.0.nonlin_attention.whiten2, num_groups=1, num_channels=256, metric=201.26 vs. limit=5.3 +2024-01-15 11:13:10,870 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.2.encoder.layers.1.feed_forward3.out_whiten, num_groups=1, num_channels=384, metric=29.70 vs. limit=7.725 +2024-01-15 11:13:15,070 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.3.encoder.layers.1.self_attn1.whiten, num_groups=1, num_channels=512, metric=19.90 vs. limit=7.975 +2024-01-15 11:13:16,517 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.2.encoder.layers.0.nonlin_attention.whiten2, num_groups=1, num_channels=384, metric=359.38 vs. limit=5.316666666666666 +2024-01-15 11:13:19,008 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.feed_forward3.out_whiten.whitening_limit, batch_count=633.3333333333334, ans=7.7375 +2024-01-15 11:13:25,101 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_module1.balancer2.min_abs, batch_count=633.3333333333334, ans=0.20950000000000002 +2024-01-15 11:13:30,000 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.0.layers.1.self_attn2.whiten, num_groups=1, num_channels=192, metric=10.49 vs. limit=8.0 +2024-01-15 11:13:30,347 INFO [train.py:994] (0/2) Epoch 1, batch 200, loss[loss=0.8627, simple_loss=0.7363, pruned_loss=0.8579, over 24538.00 frames. ], tot_loss[loss=1.318, simple_loss=1.169, pruned_loss=1.216, over 3047037.11 frames. ], batch size: 236, lr: 3.15e-02, grad_scale: 1.0 +2024-01-15 11:13:31,472 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.0.layers.0.whiten, num_groups=1, num_channels=192, metric=11.64 vs. limit=4.266666666666667 +2024-01-15 11:13:34,537 WARNING [optim.py:484] (0/2) Clipping_scale=2.0, grad-norm quartiles 7.780e+01 1.010e+02 1.260e+02 1.654e+02 3.214e+02, threshold=2.519e+02, percent-clipped=0.0 +2024-01-15 11:13:39,722 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.1.encoder.layers.1.feed_forward2.out_whiten, num_groups=1, num_channels=256, metric=76.00 vs. limit=7.75 +2024-01-15 11:13:41,848 INFO [scaling.py:1118] (0/2) WithLoss: name=encoder.encoders.3.encoder.layers.2.self_attn_weights, loss-sum=0.000e+00 +2024-01-15 11:14:01,200 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.balancer2.prob, batch_count=733.3333333333334, ans=0.465625 +2024-01-15 11:14:04,633 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.self_attn1.whiten.whitening_limit, batch_count=733.3333333333334, ans=8.05 +2024-01-15 11:14:08,664 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.4.encoder.layers.2.self_attn1.whiten, num_groups=1, num_channels=384, metric=22.94 vs. limit=8.05 +2024-01-15 11:14:08,910 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.self_attn1.whiten.whitening_limit, batch_count=733.3333333333334, ans=8.05 +2024-01-15 11:14:11,571 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.3.encoder.layers.3.feed_forward3.out_whiten, num_groups=1, num_channels=512, metric=136.56 vs. limit=7.775 +2024-01-15 11:14:18,327 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.5.encoder.layers.0.self_attn2.whiten, num_groups=1, num_channels=256, metric=9.92 vs. limit=8.075 +2024-01-15 11:14:18,394 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.4.encoder.layers.2.conv_module1.whiten, num_groups=1, num_channels=384, metric=13.07 vs. limit=7.7875 +2024-01-15 11:14:18,710 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.2.encoder.layers.0.nonlin_attention.whiten2, num_groups=1, num_channels=384, metric=340.47 vs. limit=5.383333333333333 +2024-01-15 11:14:22,864 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.2.encoder.layers.2.nonlin_attention.whiten1, num_groups=1, num_channels=288, metric=7.55 vs. limit=5.191666666666666 +2024-01-15 11:14:26,273 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.4.encoder.layers.1.conv_module2.whiten, num_groups=1, num_channels=384, metric=12.83 vs. limit=7.7875 +2024-01-15 11:14:42,638 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.0.layers.1.feed_forward1.out_whiten, num_groups=1, num_channels=192, metric=139.16 vs. limit=7.8125 +2024-01-15 11:14:43,002 INFO [train.py:994] (0/2) Epoch 1, batch 250, loss[loss=0.8531, simple_loss=0.724, pruned_loss=0.8199, over 24623.00 frames. ], tot_loss[loss=1.167, simple_loss=1.026, pruned_loss=1.089, over 3437447.47 frames. ], batch size: 199, lr: 3.38e-02, grad_scale: 1.0 +2024-01-15 11:14:47,871 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.3.encoder.layers.3.nonlin_attention.whiten2, num_groups=1, num_channels=512, metric=10.78 vs. limit=5.416666666666667 +2024-01-15 11:14:48,954 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.feed_forward1.hidden_balancer.prob, batch_count=833.3333333333334, ans=0.4609375 +2024-01-15 11:14:50,334 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_module2.balancer1.prob, batch_count=833.3333333333334, ans=0.4609375 +2024-01-15 11:15:06,071 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.3.encoder.layers.1.conv_module2.whiten, num_groups=1, num_channels=512, metric=26.79 vs. limit=7.825 +2024-01-15 11:15:09,062 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.0.layers.1.conv_module2.whiten, num_groups=1, num_channels=192, metric=25.99 vs. limit=7.825 +2024-01-15 11:15:09,758 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.ff3_skip_rate, batch_count=866.6666666666666, ans=0.0805 +2024-01-15 11:15:17,136 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.3.encoder.layers.0.self_attn2.whiten, num_groups=1, num_channels=512, metric=14.70 vs. limit=8.175 +2024-01-15 11:15:22,529 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.4.encoder.layers.1.conv_module2.whiten, num_groups=1, num_channels=384, metric=13.30 vs. limit=7.8375 +2024-01-15 11:15:22,676 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.3.encoder.layers.1.conv_module2.whiten, num_groups=1, num_channels=512, metric=24.14 vs. limit=7.8375 +2024-01-15 11:15:24,312 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_module2.balancer2.prob, batch_count=900.0, ans=0.4578125 +2024-01-15 11:15:28,492 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_skip_rate, batch_count=933.3333333333334, ans=0.165 +2024-01-15 11:15:29,145 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.2.encoder.layers.0.conv_module1.whiten, num_groups=1, num_channels=384, metric=22.30 vs. limit=7.85 +2024-01-15 11:15:29,928 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.0.layers.0.bypass.scale_min, batch_count=933.3333333333334, ans=0.8673333333333334 +2024-01-15 11:15:33,403 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_module2.balancer1.prob, batch_count=933.3333333333334, ans=0.45625 +2024-01-15 11:15:36,397 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.4.encoder.layers.1.self_attn1.whiten, num_groups=1, num_channels=384, metric=17.62 vs. limit=8.2 +2024-01-15 11:15:36,834 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.0.layers.1.nonlin_attention.whiten1, num_groups=1, num_channels=144, metric=11.59 vs. limit=5.233333333333333 +2024-01-15 11:15:39,155 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.4.encoder.layers.1.self_attn2.whiten, num_groups=1, num_channels=384, metric=15.38 vs. limit=8.2 +2024-01-15 11:15:39,663 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.0.layers.0.conv_module1.whiten, num_groups=1, num_channels=192, metric=67.95 vs. limit=7.85 +2024-01-15 11:15:44,740 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.5.encoder.layers.0.conv_module2.whiten, num_groups=1, num_channels=256, metric=23.81 vs. limit=7.8625 +2024-01-15 11:15:49,213 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.2.encoder.layers.1.feed_forward1.out_whiten, num_groups=1, num_channels=384, metric=56.55 vs. limit=7.8625 +2024-01-15 11:15:50,007 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=966.6666666666666, ans=0.29033333333333333 +2024-01-15 11:15:50,439 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.feed_forward2.out_whiten.whitening_limit, batch_count=966.6666666666666, ans=7.8625 +2024-01-15 11:15:50,463 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.3.encoder.layers.2.nonlin_attention.whiten1, num_groups=1, num_channels=384, metric=8.28 vs. limit=5.241666666666666 +2024-01-15 11:15:55,982 INFO [train.py:994] (0/2) Epoch 1, batch 300, loss[loss=0.8083, simple_loss=0.6798, pruned_loss=0.7632, over 24335.00 frames. ], tot_loss[loss=1.066, simple_loss=0.9298, pruned_loss=0.9965, over 3740171.16 frames. ], batch size: 153, lr: 3.60e-02, grad_scale: 2.0 +2024-01-15 11:15:56,364 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.balancer_na.min_abs, batch_count=1000.0, ans=0.008 +2024-01-15 11:15:57,684 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.balancer2.prob, batch_count=1000.0, ans=0.453125 +2024-01-15 11:16:00,790 WARNING [optim.py:484] (0/2) Clipping_scale=2.0, grad-norm quartiles 9.498e+01 1.343e+02 1.625e+02 2.207e+02 3.846e+02, threshold=3.251e+02, percent-clipped=10.0 +2024-01-15 11:16:04,016 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.self_attn_weights.pos_emb_skip_rate, batch_count=1000.0, ans=0.375 +2024-01-15 11:16:08,572 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.2.encoder.layers.1.feed_forward2.out_whiten, num_groups=1, num_channels=384, metric=28.08 vs. limit=7.875 +2024-01-15 11:16:10,798 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=1033.3333333333333, ans=0.2896666666666667 +2024-01-15 11:16:14,304 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.1.encoder.layers.1.nonlin_attention.whiten2, num_groups=1, num_channels=256, metric=14.02 vs. limit=5.516666666666667 +2024-01-15 11:16:17,134 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.1.encoder.layers.1.conv_module1.whiten, num_groups=1, num_channels=256, metric=13.09 vs. limit=7.8875 +2024-01-15 11:16:18,059 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.5.encoder.layers.1.self_attn1.whiten, num_groups=1, num_channels=256, metric=14.65 vs. limit=8.275 +2024-01-15 11:16:19,262 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.nonlin_attention.balancer.min_positive, batch_count=1033.3333333333333, ans=0.23966666666666667 +2024-01-15 11:16:24,807 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.0.layers.0.self_attn2.whiten, num_groups=1, num_channels=192, metric=12.58 vs. limit=8.3 +2024-01-15 11:16:25,960 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.3.encoder.layers.1.self_attn1.whiten, num_groups=1, num_channels=512, metric=17.44 vs. limit=8.3 +2024-01-15 11:16:29,652 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.1.encoder.layers.1.feed_forward3.out_whiten, num_groups=1, num_channels=256, metric=10.19 vs. limit=7.9 +2024-01-15 11:16:30,834 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.3.encoder.layers.0.whiten, num_groups=1, num_channels=512, metric=7.91 vs. limit=4.426666666666667 +2024-01-15 11:16:34,969 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.4.encoder.layers.1.whiten, num_groups=1, num_channels=384, metric=4.53 vs. limit=4.426666666666667 +2024-01-15 11:16:38,891 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.feed_forward1.hidden_balancer.prob, batch_count=1100.0, ans=0.4484375 +2024-01-15 11:16:39,321 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.3.encoder.layers.2.nonlin_attention.whiten1, num_groups=1, num_channels=384, metric=6.44 vs. limit=5.275 +2024-01-15 11:16:41,676 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_module1.balancer1.min_positive, batch_count=1100.0, ans=0.0465625 +2024-01-15 11:16:47,778 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.3.encoder.layers.3.nonlin_attention.whiten1, num_groups=1, num_channels=384, metric=5.71 vs. limit=5.275 +2024-01-15 11:16:50,358 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.5.encoder.layers.0.nonlin_attention.whiten2, num_groups=1, num_channels=256, metric=7.02 vs. limit=5.55 +2024-01-15 11:16:54,426 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.5.encoder.layers.0.self_attn1.whiten, num_groups=1, num_channels=256, metric=10.11 vs. limit=8.35 +2024-01-15 11:17:01,751 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.2.encoder.layers.2.conv_module2.whiten, num_groups=1, num_channels=384, metric=9.86 vs. limit=7.925 +2024-01-15 11:17:09,380 INFO [train.py:994] (0/2) Epoch 1, batch 350, loss[loss=0.835, simple_loss=0.6991, pruned_loss=0.7647, over 24520.00 frames. ], tot_loss[loss=0.994, simple_loss=0.8604, pruned_loss=0.9275, over 3974339.43 frames. ], batch size: 243, lr: 3.83e-02, grad_scale: 2.0 +2024-01-15 11:17:10,175 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.2.encoder.layers.2.conv_module1.whiten, num_groups=1, num_channels=384, metric=11.69 vs. limit=7.9375 +2024-01-15 11:17:13,884 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.nonlin_attention.balancer.prob, batch_count=1166.6666666666667, ans=0.4453125 +2024-01-15 11:17:18,450 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.4.encoder.layers.0.feed_forward3.out_whiten, num_groups=1, num_channels=384, metric=28.17 vs. limit=7.9375 +2024-01-15 11:17:18,621 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.2.encoder.layers.2.feed_forward3.out_whiten, num_groups=1, num_channels=384, metric=23.63 vs. limit=7.9375 +2024-01-15 11:17:21,652 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.1.encoder.layers.0.feed_forward1.out_whiten, num_groups=1, num_channels=256, metric=38.90 vs. limit=7.9375 +2024-01-15 11:17:30,899 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.4.encoder.layers.1.conv_module1.whiten, num_groups=1, num_channels=384, metric=14.50 vs. limit=7.95 +2024-01-15 11:17:31,004 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.3.encoder.layers.2.feed_forward2.out_whiten, num_groups=1, num_channels=512, metric=28.76 vs. limit=7.95 +2024-01-15 11:17:31,244 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.1.encoder.layers.1.nonlin_attention.whiten2, num_groups=1, num_channels=256, metric=9.71 vs. limit=5.6 +2024-01-15 11:17:49,634 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.0.layers.0.conv_module2.whiten, num_groups=1, num_channels=192, metric=21.10 vs. limit=7.9625 +2024-01-15 11:17:53,687 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.2.encoder.layers.1.feed_forward1.out_whiten, num_groups=1, num_channels=384, metric=26.31 vs. limit=7.975 +2024-01-15 11:18:07,817 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=1300.0, ans=0.287 +2024-01-15 11:18:07,818 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.bypass_mid.scale_min, batch_count=1300.0, ans=0.8545 +2024-01-15 11:18:09,618 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.3.encoder.layers.2.self_attn1.whiten, num_groups=1, num_channels=512, metric=16.12 vs. limit=8.475 +2024-01-15 11:18:22,122 INFO [train.py:994] (0/2) Epoch 1, batch 400, loss[loss=0.8239, simple_loss=0.6852, pruned_loss=0.7395, over 24452.00 frames. ], tot_loss[loss=0.9438, simple_loss=0.8102, pruned_loss=0.8762, over 4159671.80 frames. ], batch size: 267, lr: 4.05e-02, grad_scale: 4.0 +2024-01-15 11:18:26,105 WARNING [optim.py:484] (0/2) Clipping_scale=2.0, grad-norm quartiles 1.116e+02 1.629e+02 2.109e+02 2.635e+02 4.747e+02, threshold=4.218e+02, percent-clipped=14.0 +2024-01-15 11:18:39,769 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.ff3_skip_rate, batch_count=1366.6666666666667, ans=0.06925 +2024-01-15 11:18:50,000 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.0.layers.0.self_attn2.whiten.whitening_limit, batch_count=1400.0, ans=8.55 +2024-01-15 11:18:51,230 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.2.encoder.layers.1.feed_forward2.out_whiten, num_groups=1, num_channels=384, metric=21.21 vs. limit=8.025 +2024-01-15 11:19:00,864 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.3.encoder.layers.2.feed_forward1.out_whiten, num_groups=1, num_channels=512, metric=25.24 vs. limit=8.025 +2024-01-15 11:19:00,938 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.2.encoder.layers.2.whiten, num_groups=1, num_channels=384, metric=4.66 vs. limit=4.5600000000000005 +2024-01-15 11:19:08,837 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_module1.balancer1.max_abs, batch_count=1433.3333333333333, ans=5.895833333333333 +2024-01-15 11:19:08,857 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.nonlin_attention.balancer.max_positive, batch_count=1433.3333333333333, ans=0.7643333333333333 +2024-01-15 11:19:21,356 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.nonlin_attention.balancer.prob, batch_count=1466.6666666666667, ans=0.43125 +2024-01-15 11:19:21,853 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.3.encoder.layers.1.whiten, num_groups=1, num_channels=512, metric=6.40 vs. limit=4.586666666666667 +2024-01-15 11:19:34,315 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.2.encoder.layers.0.nonlin_attention.whiten2, num_groups=1, num_channels=384, metric=107.56 vs. limit=5.75 +2024-01-15 11:19:34,829 INFO [train.py:994] (0/2) Epoch 1, batch 450, loss[loss=0.7888, simple_loss=0.6476, pruned_loss=0.707, over 24283.00 frames. ], tot_loss[loss=0.9065, simple_loss=0.7717, pruned_loss=0.8354, over 4310503.86 frames. ], batch size: 147, lr: 4.28e-02, grad_scale: 4.0 +2024-01-15 11:19:38,240 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.3.encoder.layers.2.conv_module1.whiten, num_groups=1, num_channels=512, metric=20.31 vs. limit=8.0625 +2024-01-15 11:19:43,076 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_module1.balancer2.prob, batch_count=1500.0, ans=0.4296875 +2024-01-15 11:19:53,406 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_module2.balancer2.min_abs, batch_count=1533.3333333333333, ans=0.223 +2024-01-15 11:19:53,907 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.3.encoder.layers.0.conv_module2.whiten, num_groups=1, num_channels=512, metric=33.40 vs. limit=8.075 +2024-01-15 11:19:55,637 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.0.layers.0.conv_module1.whiten, num_groups=1, num_channels=192, metric=13.46 vs. limit=8.075 +2024-01-15 11:19:58,109 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.2.encoder.layers.1.feed_forward1.out_whiten, num_groups=1, num_channels=384, metric=12.69 vs. limit=8.075 +2024-01-15 11:20:03,701 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_module2.balancer2.min_abs, batch_count=1566.6666666666667, ans=0.2235 +2024-01-15 11:20:05,488 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.2.encoder.layers.1.feed_forward2.out_whiten, num_groups=1, num_channels=384, metric=15.39 vs. limit=8.0875 +2024-01-15 11:20:12,218 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.3.encoder.layers.2.feed_forward3.out_whiten, num_groups=1, num_channels=512, metric=62.30 vs. limit=8.0875 +2024-01-15 11:20:13,231 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.feed_forward1.hidden_balancer.prob, batch_count=1566.6666666666667, ans=0.4265625 +2024-01-15 11:20:14,521 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=1566.6666666666667, ans=0.2843333333333333 +2024-01-15 11:20:15,316 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.0.layers.1.nonlin_attention.whiten2, num_groups=1, num_channels=192, metric=6.94 vs. limit=5.783333333333333 +2024-01-15 11:20:16,547 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.2.encoder.layers.1.nonlin_attention.whiten2, num_groups=1, num_channels=384, metric=5.54 vs. limit=5.8 +2024-01-15 11:20:19,581 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.4.encoder.layers.1.feed_forward2.out_whiten, num_groups=1, num_channels=384, metric=13.39 vs. limit=8.1 +2024-01-15 11:20:20,077 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.0.layers.0.feed_forward2.out_whiten, num_groups=1, num_channels=192, metric=17.70 vs. limit=8.1 +2024-01-15 11:20:21,147 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.3.encoder.layers.0.feed_forward2.out_whiten, num_groups=1, num_channels=512, metric=46.59 vs. limit=8.1 +2024-01-15 11:20:29,939 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.3.encoder.layers.2.feed_forward3.out_whiten, num_groups=1, num_channels=512, metric=57.04 vs. limit=8.1 +2024-01-15 11:20:35,472 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.3.encoder.layers.3.conv_module1.whiten, num_groups=1, num_channels=512, metric=23.62 vs. limit=8.1125 +2024-01-15 11:20:42,285 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.4.encoder.layers.2.conv_module1.whiten, num_groups=1, num_channels=384, metric=21.78 vs. limit=8.1125 +2024-01-15 11:20:45,778 INFO [train.py:994] (0/2) Epoch 1, batch 500, loss[loss=0.7798, simple_loss=0.6414, pruned_loss=0.6723, over 24333.00 frames. ], tot_loss[loss=0.8801, simple_loss=0.7433, pruned_loss=0.8033, over 4427856.60 frames. ], batch size: 285, lr: 4.49e-02, grad_scale: 8.0 +2024-01-15 11:20:49,588 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.5.encoder.layers.1.conv_module1.whiten, num_groups=1, num_channels=256, metric=14.54 vs. limit=8.125 +2024-01-15 11:20:49,728 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.4.encoder.layers.0.whiten, num_groups=1, num_channels=384, metric=4.54 vs. limit=4.666666666666667 +2024-01-15 11:20:50,436 WARNING [optim.py:484] (0/2) Clipping_scale=2.0, grad-norm quartiles 1.250e+02 1.671e+02 2.007e+02 2.442e+02 4.981e+02, threshold=4.014e+02, percent-clipped=2.0 +2024-01-15 11:20:59,762 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.feed_forward3.hidden_balancer.prob, batch_count=1700.0, ans=0.4203125 +2024-01-15 11:21:07,050 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.4.encoder.layers.2.feed_forward2.out_whiten, num_groups=1, num_channels=384, metric=19.58 vs. limit=8.1375 +2024-01-15 11:21:15,023 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=1733.3333333333333, ans=0.2826666666666667 +2024-01-15 11:21:15,456 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.3.encoder.layers.0.conv_module2.whiten, num_groups=1, num_channels=512, metric=22.62 vs. limit=8.15 +2024-01-15 11:21:25,159 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.feed_forward3.hidden_balancer.prob, batch_count=1733.3333333333333, ans=0.41875 +2024-01-15 11:21:29,211 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.0.layers.0.feed_forward1.out_whiten, num_groups=1, num_channels=192, metric=116.85 vs. limit=8.1625 +2024-01-15 11:21:33,054 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.3.encoder.layers.3.feed_forward1.out_whiten, num_groups=1, num_channels=512, metric=21.99 vs. limit=8.1625 +2024-01-15 11:21:35,422 INFO [scaling.py:1118] (0/2) WithLoss: name=encoder.encoders.2.encoder.layers.0.self_attn_weights, loss-sum=1.768e+01 +2024-01-15 11:21:37,133 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.4.encoder.layers.2.feed_forward1.out_whiten, num_groups=1, num_channels=384, metric=18.75 vs. limit=8.1625 +2024-01-15 11:21:38,407 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.5.encoder.layers.1.feed_forward1.out_whiten, num_groups=1, num_channels=256, metric=19.83 vs. limit=8.1625 +2024-01-15 11:21:39,897 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.4.encoder.layers.1.nonlin_attention.whiten1, num_groups=1, num_channels=288, metric=5.40 vs. limit=5.441666666666666 +2024-01-15 11:21:52,752 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.5.encoder.layers.1.feed_forward3.out_whiten, num_groups=1, num_channels=256, metric=37.46 vs. limit=8.175 +2024-01-15 11:21:53,911 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_module2.balancer2.prob, batch_count=1800.0, ans=0.415625 +2024-01-15 11:21:57,919 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.1.encoder.layers.0.conv_module1.whiten, num_groups=1, num_channels=256, metric=16.17 vs. limit=8.1875 +2024-01-15 11:21:58,270 INFO [train.py:994] (0/2) Epoch 1, batch 550, loss[loss=0.7879, simple_loss=0.6417, pruned_loss=0.6741, over 24518.00 frames. ], tot_loss[loss=0.8571, simple_loss=0.7187, pruned_loss=0.7733, over 4503567.44 frames. ], batch size: 187, lr: 4.49e-02, grad_scale: 8.0 +2024-01-15 11:22:11,051 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_skip_rate, batch_count=1866.6666666666667, ans=0.13 +2024-01-15 11:22:12,589 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.5.encoder.layers.0.conv_module1.whiten, num_groups=1, num_channels=256, metric=9.13 vs. limit=8.2 +2024-01-15 11:22:17,154 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=1866.6666666666667, ans=0.2813333333333333 +2024-01-15 11:22:28,801 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.bypass.scale_min, batch_count=1900.0, ans=0.8335 +2024-01-15 11:22:31,642 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.nonlin_attention.balancer.prob, batch_count=1900.0, ans=0.4109375 +2024-01-15 11:22:32,104 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.3.encoder.layers.1.self_attn1.whiten, num_groups=1, num_channels=512, metric=17.29 vs. limit=8.925 +2024-01-15 11:22:41,430 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.4.encoder.layers.2.self_attn2.whiten, num_groups=1, num_channels=384, metric=24.76 vs. limit=8.95 +2024-01-15 11:22:48,789 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.4.encoder.layers.2.nonlin_attention.whiten2, num_groups=1, num_channels=384, metric=8.03 vs. limit=5.966666666666667 +2024-01-15 11:22:49,014 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.2.encoder.layers.2.whiten, num_groups=1, num_channels=384, metric=4.37 vs. limit=4.773333333333333 +2024-01-15 11:22:51,391 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.feed_forward3.hidden_balancer.prob, batch_count=1933.3333333333333, ans=0.409375 +2024-01-15 11:22:51,991 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.1.encoder.layers.1.self_attn2.whiten, num_groups=1, num_channels=256, metric=11.26 vs. limit=8.95 +2024-01-15 11:22:52,767 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.bypass_mid.scale_min, batch_count=1933.3333333333333, ans=0.8323333333333334 +2024-01-15 11:22:56,238 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_skip_rate, batch_count=1966.6666666666667, ans=0.12625 +2024-01-15 11:23:10,026 INFO [train.py:994] (0/2) Epoch 1, batch 600, loss[loss=0.7984, simple_loss=0.6497, pruned_loss=0.6648, over 24483.00 frames. ], tot_loss[loss=0.8407, simple_loss=0.7004, pruned_loss=0.7477, over 4564706.63 frames. ], batch size: 216, lr: 4.49e-02, grad_scale: 8.0 +2024-01-15 11:23:15,123 WARNING [optim.py:484] (0/2) Clipping_scale=2.0, grad-norm quartiles 1.379e+02 2.269e+02 2.746e+02 3.463e+02 7.914e+02, threshold=5.491e+02, percent-clipped=16.0 +2024-01-15 11:23:20,024 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.3.encoder.layers.0.self_attn2.whiten, num_groups=1, num_channels=512, metric=13.78 vs. limit=9.0 +2024-01-15 11:23:20,255 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.0.layers.1.conv_module2.whiten, num_groups=1, num_channels=192, metric=8.14 vs. limit=8.25 +2024-01-15 11:23:31,523 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.1.encoder.layers.1.feed_forward3.out_whiten, num_groups=1, num_channels=256, metric=119.84 vs. limit=8.2625 +2024-01-15 11:23:41,975 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.ff2_skip_rate, batch_count=2066.6666666666665, ans=0.053500000000000006 +2024-01-15 11:23:41,978 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.ff3_skip_rate, batch_count=2066.6666666666665, ans=0.053500000000000006 +2024-01-15 11:23:43,885 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_module2.balancer2.prob, batch_count=2066.6666666666665, ans=0.403125 +2024-01-15 11:23:45,526 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.3.encoder.layers.3.whiten, num_groups=1, num_channels=512, metric=6.74 vs. limit=4.826666666666666 +2024-01-15 11:23:55,354 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_module1.balancer2.prob, batch_count=2100.0, ans=0.4015625 +2024-01-15 11:24:03,838 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.4.encoder.layers.2.feed_forward3.out_whiten, num_groups=1, num_channels=384, metric=37.76 vs. limit=8.2875 +2024-01-15 11:24:03,927 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.3.encoder.layers.3.whiten, num_groups=1, num_channels=512, metric=6.56 vs. limit=4.84 +2024-01-15 11:24:14,488 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.3.encoder.layers.1.nonlin_attention.whiten1, num_groups=1, num_channels=384, metric=6.56 vs. limit=5.533333333333333 +2024-01-15 11:24:20,046 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.2.encoder.layers.2.self_attn1.whiten, num_groups=1, num_channels=384, metric=13.75 vs. limit=9.1 +2024-01-15 11:24:20,095 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.self_attn2.whiten.whitening_limit, batch_count=2133.3333333333335, ans=9.1 +2024-01-15 11:24:20,159 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.1.encoder.layers.1.self_attn2.whiten, num_groups=1, num_channels=256, metric=10.27 vs. limit=9.1 +2024-01-15 11:24:22,093 INFO [train.py:994] (0/2) Epoch 1, batch 650, loss[loss=0.7564, simple_loss=0.6205, pruned_loss=0.6029, over 24537.00 frames. ], tot_loss[loss=0.8234, simple_loss=0.683, pruned_loss=0.7196, over 4605132.59 frames. ], batch size: 176, lr: 4.49e-02, grad_scale: 8.0 +2024-01-15 11:24:42,699 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.3.encoder.layers.3.conv_module1.whiten, num_groups=1, num_channels=512, metric=12.54 vs. limit=8.325 +2024-01-15 11:24:47,707 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder_embed.convnext.layerdrop_rate, batch_count=2200.0, ans=0.17965 +2024-01-15 11:24:49,181 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=2233.3333333333335, ans=0.2776666666666667 +2024-01-15 11:25:07,521 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.nonlin_attention.balancer.max_positive, batch_count=2266.6666666666665, ans=0.7726666666666666 +2024-01-15 11:25:10,988 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_module2.balancer2.prob, batch_count=2266.6666666666665, ans=0.39375 +2024-01-15 11:25:11,373 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.4.encoder.layers.0.feed_forward1.out_whiten, num_groups=1, num_channels=384, metric=16.07 vs. limit=8.35 +2024-01-15 11:25:27,192 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.1.encoder.layers.1.self_attn1.whiten, num_groups=1, num_channels=256, metric=10.46 vs. limit=9.225 +2024-01-15 11:25:33,017 INFO [train.py:994] (0/2) Epoch 1, batch 700, loss[loss=0.7375, simple_loss=0.6112, pruned_loss=0.5617, over 23843.00 frames. ], tot_loss[loss=0.8041, simple_loss=0.6662, pruned_loss=0.6864, over 4657886.08 frames. ], batch size: 328, lr: 4.49e-02, grad_scale: 8.0 +2024-01-15 11:25:36,079 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.feed_forward1.hidden_balancer.prob, batch_count=2333.3333333333335, ans=0.390625 +2024-01-15 11:25:37,151 WARNING [optim.py:484] (0/2) Clipping_scale=2.0, grad-norm quartiles 1.567e+02 2.797e+02 3.404e+02 4.140e+02 8.487e+02, threshold=6.807e+02, percent-clipped=8.0 +2024-01-15 11:25:46,400 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.5.encoder.layers.1.feed_forward1.out_whiten, num_groups=1, num_channels=256, metric=9.46 vs. limit=8.3875 +2024-01-15 11:25:49,584 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_skip_rate, batch_count=2366.6666666666665, ans=0.11125 +2024-01-15 11:25:57,819 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.5.encoder.layers.1.self_attn2.whiten, num_groups=1, num_channels=256, metric=9.71 vs. limit=9.275 +2024-01-15 11:25:57,936 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.4.encoder.layers.1.nonlin_attention.whiten1, num_groups=1, num_channels=288, metric=6.01 vs. limit=5.591666666666667 +2024-01-15 11:26:11,837 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.5.encoder.layers.1.feed_forward3.out_whiten, num_groups=1, num_channels=256, metric=15.22 vs. limit=8.4 +2024-01-15 11:26:13,608 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.2.encoder.layers.1.conv_module2.whiten, num_groups=1, num_channels=384, metric=8.12 vs. limit=8.4 +2024-01-15 11:26:17,096 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.self_attn_weights.pos_emb_skip_rate, batch_count=2433.3333333333335, ans=0.1958333333333333 +2024-01-15 11:26:24,849 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.5.encoder.layers.1.whiten, num_groups=1, num_channels=256, metric=5.15 vs. limit=4.973333333333334 +2024-01-15 11:26:40,581 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.balancer1.prob, batch_count=2466.6666666666665, ans=0.384375 +2024-01-15 11:26:44,527 INFO [train.py:994] (0/2) Epoch 1, batch 750, loss[loss=0.6884, simple_loss=0.5795, pruned_loss=0.4971, over 24482.00 frames. ], tot_loss[loss=0.779, simple_loss=0.6466, pruned_loss=0.6468, over 4698817.68 frames. ], batch size: 267, lr: 4.49e-02, grad_scale: 8.0 +2024-01-15 11:26:50,291 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.0.layers.0.nonlin_attention.whiten2, num_groups=1, num_channels=192, metric=9.64 vs. limit=6.25 +2024-01-15 11:27:06,863 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_skip_rate, batch_count=2533.3333333333335, ans=0.105 +2024-01-15 11:27:09,957 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.3.encoder.layers.2.self_attn1.whiten, num_groups=1, num_channels=512, metric=9.90 vs. limit=9.4 +2024-01-15 11:27:16,358 INFO [scaling.py:1022] (0/2) Whitening: name=encoder_embed.convnext.out_whiten, num_groups=1, num_channels=128, metric=5.07 vs. limit=5.0 +2024-01-15 11:27:18,519 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.3.encoder.layers.3.feed_forward2.out_whiten, num_groups=1, num_channels=512, metric=9.18 vs. limit=8.4625 +2024-01-15 11:27:20,758 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_module2.balancer1.prob, batch_count=2566.6666666666665, ans=0.3796875 +2024-01-15 11:27:23,802 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.3.encoder.layers.2.nonlin_attention.whiten1, num_groups=1, num_channels=384, metric=7.67 vs. limit=5.641666666666667 +2024-01-15 11:27:30,290 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.2.encoder.layers.2.feed_forward1.out_whiten, num_groups=1, num_channels=384, metric=9.55 vs. limit=8.475 +2024-01-15 11:27:48,579 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.2.encoder.layers.0.whiten, num_groups=1, num_channels=384, metric=5.33 vs. limit=5.053333333333334 +2024-01-15 11:27:49,287 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.bypass_mid.scale_min, batch_count=2633.3333333333335, ans=0.8078333333333334 +2024-01-15 11:27:53,392 INFO [train.py:994] (0/2) Epoch 1, batch 800, loss[loss=0.6355, simple_loss=0.5463, pruned_loss=0.4313, over 24517.00 frames. ], tot_loss[loss=0.7494, simple_loss=0.6247, pruned_loss=0.6038, over 4723313.46 frames. ], batch size: 243, lr: 4.49e-02, grad_scale: 16.0 +2024-01-15 11:27:56,243 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=2666.6666666666665, ans=0.2733333333333333 +2024-01-15 11:27:57,229 WARNING [optim.py:484] (0/2) Clipping_scale=2.0, grad-norm quartiles 1.772e+02 2.597e+02 3.607e+02 4.454e+02 7.869e+02, threshold=7.214e+02, percent-clipped=4.0 +2024-01-15 11:28:01,939 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_skip_rate, batch_count=2666.6666666666665, ans=0.09999999999999999 +2024-01-15 11:28:12,123 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.bypass_mid.scale_min, batch_count=2700.0, ans=0.8055 +2024-01-15 11:28:12,714 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.1.encoder.layers.0.self_attn2.whiten, num_groups=1, num_channels=256, metric=9.79 vs. limit=9.525 +2024-01-15 11:28:13,290 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_module1.balancer1.prob, batch_count=2700.0, ans=0.3734375 +2024-01-15 11:28:19,544 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.feed_forward1.hidden_balancer.prob, batch_count=2733.3333333333335, ans=0.371875 +2024-01-15 11:28:47,218 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.2.encoder.layers.1.nonlin_attention.whiten2, num_groups=1, num_channels=384, metric=6.66 vs. limit=6.4 +2024-01-15 11:28:48,977 INFO [checkpoint.py:75] (0/2) Saving checkpoint to zipformer_bbpe/exp-context-size-2-lr-epochs-10-spec-aug-20-disable-musan/epoch-1.pt +2024-01-15 11:29:12,819 INFO [train.py:994] (0/2) Epoch 2, batch 0, loss[loss=0.6026, simple_loss=0.5231, pruned_loss=0.3957, over 24323.00 frames. ], tot_loss[loss=0.6026, simple_loss=0.5231, pruned_loss=0.3957, over 24323.00 frames. ], batch size: 285, lr: 4.47e-02, grad_scale: 32.0 +2024-01-15 11:29:12,820 INFO [train.py:1017] (0/2) Computing validation loss +2024-01-15 11:29:23,473 INFO [zipformer.py:1858] (0/2) name=encoder.encoders.2.encoder.layers.0.self_attn_weights, attn_weights_entropy = tensor([2.7458, 2.5847, 2.5647, 2.3975], device='cuda:0') +2024-01-15 11:29:32,736 INFO [train.py:1026] (0/2) Epoch 2, validation: loss=0.5319, simple_loss=0.4791, pruned_loss=0.3207, over 1622729.00 frames. +2024-01-15 11:29:32,737 INFO [train.py:1027] (0/2) Maximum memory allocated so far is 15997MB +2024-01-15 11:29:51,749 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_skip_rate, batch_count=2843.3333333333335, ans=0.09337499999999999 +2024-01-15 11:30:00,837 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.bypass_mid.scale_min, batch_count=2876.6666666666665, ans=0.7993166666666667 +2024-01-15 11:30:02,029 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.self_attn_weights.pos_emb_skip_rate, batch_count=2876.6666666666665, ans=0.1404166666666667 +2024-01-15 11:30:10,878 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.5.encoder.layers.0.nonlin_attention.whiten1, num_groups=1, num_channels=192, metric=5.94 vs. limit=5.719166666666666 +2024-01-15 11:30:11,284 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.0.layers.1.nonlin_attention.whiten1, num_groups=1, num_channels=144, metric=6.11 vs. limit=5.719166666666666 +2024-01-15 11:30:13,570 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.4.encoder.layers.2.self_attn2.whiten, num_groups=1, num_channels=384, metric=9.53 vs. limit=9.682500000000001 +2024-01-15 11:30:19,160 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.0.layers.0.feed_forward2.hidden_balancer.prob, batch_count=2910.0, ans=0.36359375 +2024-01-15 11:30:32,618 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_module2.balancer1.prob, batch_count=2943.3333333333335, ans=0.36203125 +2024-01-15 11:30:38,960 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.3.encoder.layers.2.self_attn2.whiten, num_groups=1, num_channels=512, metric=10.07 vs. limit=9.7075 +2024-01-15 11:30:42,321 INFO [train.py:994] (0/2) Epoch 2, batch 50, loss[loss=0.4923, simple_loss=0.432, pruned_loss=0.3122, over 23532.00 frames. ], tot_loss[loss=0.5806, simple_loss=0.505, pruned_loss=0.3774, over 1082683.36 frames. ], batch size: 119, lr: 4.47e-02, grad_scale: 32.0 +2024-01-15 11:30:45,722 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.3.encoder.layers.3.self_attn2.whiten, num_groups=1, num_channels=512, metric=10.68 vs. limit=9.7325 +2024-01-15 11:30:49,933 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_module2.balancer1.prob, batch_count=2976.6666666666665, ans=0.36046875 +2024-01-15 11:30:56,085 WARNING [optim.py:484] (0/2) Clipping_scale=2.0, grad-norm quartiles 1.919e+02 2.574e+02 2.992e+02 3.662e+02 7.050e+02, threshold=5.985e+02, percent-clipped=0.0 +2024-01-15 11:30:58,177 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.3.encoder.layers.0.feed_forward2.out_whiten, num_groups=1, num_channels=512, metric=9.17 vs. limit=8.62875 +2024-01-15 11:31:06,084 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.self_attn_weights.pos_emb_skip_rate, batch_count=3010.0, ans=0.12375000000000003 +2024-01-15 11:31:13,745 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.3.encoder.layers.3.nonlin_attention.whiten2, num_groups=1, num_channels=512, metric=7.56 vs. limit=6.5216666666666665 +2024-01-15 11:31:15,861 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_module2.balancer2.prob, batch_count=3043.3333333333335, ans=0.35734374999999996 +2024-01-15 11:31:16,358 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.3.encoder.layers.0.self_attn1.whiten, num_groups=1, num_channels=512, metric=10.06 vs. limit=9.7825 +2024-01-15 11:31:18,646 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=3043.3333333333335, ans=0.2695666666666667 +2024-01-15 11:31:32,794 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.nonlin_attention.balancer.prob, batch_count=3076.6666666666665, ans=0.35578125 +2024-01-15 11:31:44,001 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.3.encoder.layers.2.nonlin_attention.whiten1, num_groups=1, num_channels=384, metric=6.08 vs. limit=5.7775 +2024-01-15 11:31:51,173 INFO [train.py:994] (0/2) Epoch 2, batch 100, loss[loss=0.4881, simple_loss=0.4209, pruned_loss=0.3173, over 17474.00 frames. ], tot_loss[loss=0.5664, simple_loss=0.4965, pruned_loss=0.3601, over 1914151.52 frames. ], batch size: 75, lr: 4.47e-02, grad_scale: 16.0 +2024-01-15 11:31:51,426 INFO [scaling.py:1118] (0/2) WithLoss: name=encoder.encoders.0.layers.1.self_attn_weights, loss-sum=0.000e+00 +2024-01-15 11:31:54,099 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_module1.balancer1.min_positive, batch_count=3143.3333333333335, ans=0.040177083333333335 +2024-01-15 11:31:57,464 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.feed_forward1.hidden_balancer.prob, batch_count=3143.3333333333335, ans=0.35265625 +2024-01-15 11:32:05,344 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_module1.balancer1.min_positive, batch_count=3176.6666666666665, ans=0.040072916666666666 +2024-01-15 11:32:29,237 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.4.encoder.layers.0.nonlin_attention.whiten1, num_groups=1, num_channels=288, metric=6.11 vs. limit=5.8025 +2024-01-15 11:32:31,387 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_module1.balancer1.min_positive, batch_count=3243.3333333333335, ans=0.039864583333333335 +2024-01-15 11:33:00,190 INFO [train.py:994] (0/2) Epoch 2, batch 150, loss[loss=0.5201, simple_loss=0.4664, pruned_loss=0.3099, over 24368.00 frames. ], tot_loss[loss=0.5509, simple_loss=0.4871, pruned_loss=0.3422, over 2570549.50 frames. ], batch size: 275, lr: 4.47e-02, grad_scale: 16.0 +2024-01-15 11:33:00,456 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.feed_forward3.hidden_balancer.prob, batch_count=3310.0, ans=0.34484375 +2024-01-15 11:33:03,553 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.3.encoder.layers.2.self_attn2.whiten, num_groups=1, num_channels=512, metric=10.82 vs. limit=9.9825 +2024-01-15 11:33:05,065 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.1.encoder.layers.0.conv_module2.whiten, num_groups=1, num_channels=256, metric=7.22 vs. limit=8.74125 +2024-01-15 11:33:15,456 WARNING [optim.py:484] (0/2) Clipping_scale=2.0, grad-norm quartiles 2.094e+02 2.592e+02 3.146e+02 4.123e+02 9.007e+02, threshold=6.292e+02, percent-clipped=3.0 +2024-01-15 11:33:24,319 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.ff3_skip_rate, batch_count=3343.3333333333335, ans=0.02477499999999999 +2024-01-15 11:33:30,820 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=3376.6666666666665, ans=0.2662333333333333 +2024-01-15 11:33:39,067 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=3376.6666666666665, ans=0.2662333333333333 +2024-01-15 11:33:53,244 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.2.encoder.layers.2.nonlin_attention.whiten1, num_groups=1, num_channels=288, metric=6.11 vs. limit=5.8525 +2024-01-15 11:33:57,004 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.4.encoder.layers.0.nonlin_attention.whiten1, num_groups=1, num_channels=288, metric=6.03 vs. limit=5.860833333333334 +2024-01-15 11:34:02,234 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.4.encoder.layers.2.nonlin_attention.whiten1, num_groups=1, num_channels=288, metric=7.11 vs. limit=5.860833333333334 +2024-01-15 11:34:08,858 INFO [train.py:994] (0/2) Epoch 2, batch 200, loss[loss=0.5259, simple_loss=0.4759, pruned_loss=0.3057, over 24491.00 frames. ], tot_loss[loss=0.5363, simple_loss=0.4774, pruned_loss=0.327, over 3064662.47 frames. ], batch size: 165, lr: 4.47e-02, grad_scale: 16.0 +2024-01-15 11:34:32,309 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.ff2_skip_rate, batch_count=3510.0, ans=0.021025000000000002 +2024-01-15 11:34:56,808 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.2.encoder.layers.2.whiten, num_groups=1, num_channels=384, metric=5.32 vs. limit=5.430666666666666 +2024-01-15 11:34:59,573 INFO [scaling.py:1022] (0/2) Whitening: name=encoder_embed.out_whiten, num_groups=1, num_channels=192, metric=4.58 vs. limit=4.715333333333334 +2024-01-15 11:35:03,775 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.3.encoder.layers.1.self_attn1.whiten, num_groups=1, num_channels=512, metric=10.16 vs. limit=10.2075 +2024-01-15 11:35:13,422 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.4.encoder.layers.2.feed_forward1.out_whiten, num_groups=1, num_channels=384, metric=7.84 vs. limit=8.85375 +2024-01-15 11:35:16,676 INFO [train.py:994] (0/2) Epoch 2, batch 250, loss[loss=0.5034, simple_loss=0.4598, pruned_loss=0.286, over 24454.00 frames. ], tot_loss[loss=0.5225, simple_loss=0.469, pruned_loss=0.3121, over 3447947.93 frames. ], batch size: 222, lr: 4.47e-02, grad_scale: 16.0 +2024-01-15 11:35:31,784 WARNING [optim.py:484] (0/2) Clipping_scale=2.0, grad-norm quartiles 2.028e+02 2.695e+02 3.175e+02 3.911e+02 8.566e+02, threshold=6.350e+02, percent-clipped=6.0 +2024-01-15 11:35:34,691 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.bypass.skip_rate, batch_count=3676.6666666666665, ans=0.04949747468305833 +2024-01-15 11:35:39,945 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.0.layers.1.nonlin_attention.whiten1, num_groups=1, num_channels=144, metric=6.06 vs. limit=5.9191666666666665 +2024-01-15 11:35:45,620 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_module1.balancer1.max_abs, batch_count=3710.0, ans=7.31875 +2024-01-15 11:35:54,002 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.2.encoder.layers.1.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=3.60 vs. limit=3.5564999999999998 +2024-01-15 11:35:59,222 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_module1.balancer1.max_abs, batch_count=3743.3333333333335, ans=7.339583333333334 +2024-01-15 11:36:05,354 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.0.layers.0.conv_module1.whiten, num_groups=1, num_channels=192, metric=5.85 vs. limit=8.90375 +2024-01-15 11:36:09,634 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.3.encoder.layers.2.self_attn2.whiten, num_groups=1, num_channels=512, metric=12.28 vs. limit=10.307500000000001 +2024-01-15 11:36:22,207 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_skip_rate, batch_count=3776.6666666666665, ans=0.05837499999999998 +2024-01-15 11:36:25,184 INFO [train.py:994] (0/2) Epoch 2, batch 300, loss[loss=0.4793, simple_loss=0.4425, pruned_loss=0.2657, over 24380.00 frames. ], tot_loss[loss=0.5082, simple_loss=0.4598, pruned_loss=0.2975, over 3753371.62 frames. ], batch size: 275, lr: 4.46e-02, grad_scale: 16.0 +2024-01-15 11:36:56,955 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.1.encoder.layers.1.nonlin_attention.whiten1, num_groups=1, num_channels=192, metric=6.27 vs. limit=5.969166666666666 +2024-01-15 11:37:04,067 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_module1.balancer1.prob, batch_count=3910.0, ans=0.31671875 +2024-01-15 11:37:16,487 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.0.layers.1.feed_forward1.out_proj.dropout_p, batch_count=3910.0, ans=0.2609 +2024-01-15 11:37:23,581 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_module1.balancer1.prob, batch_count=3943.3333333333335, ans=0.31515625 +2024-01-15 11:37:32,404 INFO [train.py:994] (0/2) Epoch 2, batch 350, loss[loss=0.4546, simple_loss=0.4305, pruned_loss=0.2394, over 24595.00 frames. ], tot_loss[loss=0.4947, simple_loss=0.4516, pruned_loss=0.2839, over 3995337.34 frames. ], batch size: 199, lr: 4.46e-02, grad_scale: 16.0 +2024-01-15 11:37:32,733 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_module1.balancer2.min_positive, batch_count=3976.6666666666665, ans=0.07514583333333334 +2024-01-15 11:37:33,102 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.3.encoder.layers.0.conv_module2.whiten, num_groups=1, num_channels=512, metric=4.16 vs. limit=8.99125 +2024-01-15 11:37:37,356 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=3976.6666666666665, ans=0.2602333333333333 +2024-01-15 11:37:44,301 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.4.encoder.layers.1.self_attn2.whiten, num_groups=1, num_channels=384, metric=10.76 vs. limit=10.4825 +2024-01-15 11:37:47,492 WARNING [optim.py:484] (0/2) Clipping_scale=2.0, grad-norm quartiles 1.985e+02 2.596e+02 3.097e+02 3.679e+02 8.153e+02, threshold=6.193e+02, percent-clipped=3.0 +2024-01-15 11:37:54,862 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.0.layers.1.ff3_skip_rate, batch_count=4010.0, ans=0.009997826086956521 +2024-01-15 11:38:12,144 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.2.encoder.layers.1.self_attn2.whiten, num_groups=1, num_channels=384, metric=11.34 vs. limit=10.5325 +2024-01-15 11:38:22,834 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.ff2_skip_rate, batch_count=4076.6666666666665, ans=0.009983333333333334 +2024-01-15 11:38:30,228 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.nonlin_attention.whiten1.whitening_limit, batch_count=4110.0, ans=6.0275 +2024-01-15 11:38:32,458 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.nonlin_attention.balancer.max_positive, batch_count=4110.0, ans=0.7911 +2024-01-15 11:38:36,561 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.4.encoder.layers.2.nonlin_attention.whiten2, num_groups=1, num_channels=384, metric=7.24 vs. limit=7.055 +2024-01-15 11:38:39,133 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.4.encoder.layers.1.self_attn1.whiten, num_groups=1, num_channels=384, metric=11.06 vs. limit=10.5825 +2024-01-15 11:38:40,137 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.ff3_skip_rate, batch_count=4143.333333333333, ans=0.009968840579710146 +2024-01-15 11:38:41,086 INFO [train.py:994] (0/2) Epoch 2, batch 400, loss[loss=0.4244, simple_loss=0.3987, pruned_loss=0.2269, over 24022.00 frames. ], tot_loss[loss=0.48, simple_loss=0.4422, pruned_loss=0.27, over 4185404.75 frames. ], batch size: 131, lr: 4.46e-02, grad_scale: 32.0 +2024-01-15 11:38:42,949 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.5.encoder.layers.0.conv_module2.whiten, num_groups=1, num_channels=256, metric=5.09 vs. limit=9.053749999999999 +2024-01-15 11:38:55,307 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.feed_forward3.hidden_balancer.prob, batch_count=4176.666666666667, ans=0.30421875 +2024-01-15 11:39:00,067 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_module2.balancer1.max_abs, batch_count=4176.666666666667, ans=7.6104166666666675 +2024-01-15 11:39:02,897 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.3.encoder.layers.1.conv_module2.whiten, num_groups=1, num_channels=512, metric=4.04 vs. limit=9.06625 +2024-01-15 11:39:12,162 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.0.layers.0.nonlin_attention.whiten2, num_groups=1, num_channels=192, metric=8.65 vs. limit=7.105 +2024-01-15 11:39:16,415 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.3.encoder.layers.3.nonlin_attention.whiten1, num_groups=1, num_channels=384, metric=6.22 vs. limit=6.0525 +2024-01-15 11:39:30,677 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.bypass.skip_rate, batch_count=4243.333333333333, ans=0.07 +2024-01-15 11:39:44,424 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_module2.balancer2.min_positive, batch_count=4276.666666666667, ans=0.07327083333333334 +2024-01-15 11:39:49,297 INFO [train.py:994] (0/2) Epoch 2, batch 450, loss[loss=0.4625, simple_loss=0.4356, pruned_loss=0.2459, over 24404.00 frames. ], tot_loss[loss=0.4682, simple_loss=0.4347, pruned_loss=0.2588, over 4315581.60 frames. ], batch size: 159, lr: 4.46e-02, grad_scale: 16.0 +2024-01-15 11:39:53,265 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.3.encoder.layers.3.self_attn1.whiten, num_groups=1, num_channels=512, metric=18.11 vs. limit=10.7325 +2024-01-15 11:40:05,380 WARNING [optim.py:484] (0/2) Clipping_scale=2.0, grad-norm quartiles 2.007e+02 2.923e+02 3.521e+02 4.109e+02 6.335e+02, threshold=7.042e+02, percent-clipped=2.0 +2024-01-15 11:40:06,942 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.self_attn_weights.pos_emb_skip_rate, batch_count=4343.333333333333, ans=0.0 +2024-01-15 11:40:07,000 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_skip_rate, batch_count=4343.333333333333, ans=0.04856944444444445 +2024-01-15 11:40:15,569 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.2.encoder.layers.0.self_attn2.whiten, num_groups=1, num_channels=384, metric=12.21 vs. limit=10.7825 +2024-01-15 11:40:21,053 INFO [scaling.py:1118] (0/2) WithLoss: name=encoder.encoders.4.encoder.layers.2.self_attn_weights, loss-sum=1.097e+00 +2024-01-15 11:40:32,482 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.bypass.skip_rate, batch_count=4410.0, ans=0.07 +2024-01-15 11:40:38,490 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.bypass.scale_min, batch_count=4410.0, ans=0.74565 +2024-01-15 11:40:50,193 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.3.encoder.layers.3.self_attn2.whiten, num_groups=1, num_channels=512, metric=12.78 vs. limit=10.8325 +2024-01-15 11:40:50,334 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.2.encoder.layers.1.self_attn1.whiten, num_groups=1, num_channels=384, metric=11.98 vs. limit=10.8325 +2024-01-15 11:40:56,886 INFO [train.py:994] (0/2) Epoch 2, batch 500, loss[loss=0.3904, simple_loss=0.3846, pruned_loss=0.1915, over 24522.00 frames. ], tot_loss[loss=0.4539, simple_loss=0.4257, pruned_loss=0.246, over 4434827.41 frames. ], batch size: 204, lr: 4.45e-02, grad_scale: 16.0 +2024-01-15 11:41:08,183 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.ff2_skip_rate, batch_count=4476.666666666667, ans=0.009896376811594203 +2024-01-15 11:41:21,644 INFO [scaling.py:1118] (0/2) WithLoss: name=encoder.encoders.2.encoder.layers.1.self_attn_weights, loss-sum=0.000e+00 +2024-01-15 11:41:22,176 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.2.encoder.layers.0.feed_forward3.out_whiten, num_groups=1, num_channels=384, metric=8.86 vs. limit=9.19125 +2024-01-15 11:41:35,937 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder_embed.convnext.out_balancer.prob, batch_count=4576.666666666667, ans=0.28546875 +2024-01-15 11:41:43,134 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.feed_forward2.hidden_balancer.prob, batch_count=4576.666666666667, ans=0.28546875 +2024-01-15 11:41:46,701 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=4576.666666666667, ans=0.2542333333333333 +2024-01-15 11:41:46,716 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.ff2_skip_rate, batch_count=4576.666666666667, ans=0.009874637681159421 +2024-01-15 11:41:55,518 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_skip_rate, batch_count=4610.0, ans=0.04745833333333334 +2024-01-15 11:42:03,702 INFO [train.py:994] (0/2) Epoch 2, batch 550, loss[loss=0.4377, simple_loss=0.419, pruned_loss=0.2265, over 22242.00 frames. ], tot_loss[loss=0.4436, simple_loss=0.4197, pruned_loss=0.2364, over 4514148.61 frames. ], batch size: 357, lr: 4.45e-02, grad_scale: 16.0 +2024-01-15 11:42:19,794 WARNING [optim.py:484] (0/2) Clipping_scale=2.0, grad-norm quartiles 2.137e+02 2.755e+02 3.155e+02 3.951e+02 7.770e+02, threshold=6.310e+02, percent-clipped=2.0 +2024-01-15 11:42:40,326 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_skip_rate, batch_count=4710.0, ans=0.04704166666666667 +2024-01-15 11:42:41,480 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_module2.balancer2.min_positive, batch_count=4710.0, ans=0.0705625 +2024-01-15 11:42:49,592 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.1.encoder.layers.1.whiten, num_groups=1, num_channels=256, metric=4.70 vs. limit=5.897333333333333 +2024-01-15 11:43:03,637 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.0.layers.1.feed_forward3.out_whiten, num_groups=1, num_channels=192, metric=8.76 vs. limit=9.29125 +2024-01-15 11:43:10,442 INFO [train.py:994] (0/2) Epoch 2, batch 600, loss[loss=0.3982, simple_loss=0.3955, pruned_loss=0.1941, over 24620.00 frames. ], tot_loss[loss=0.4319, simple_loss=0.4124, pruned_loss=0.2264, over 4569931.48 frames. ], batch size: 199, lr: 4.45e-02, grad_scale: 16.0 +2024-01-15 11:43:10,722 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.nonlin_attention.balancer.prob, batch_count=4810.0, ans=0.27453125 +2024-01-15 11:43:28,601 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.0.layers.0.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=3.69 vs. limit=3.7265 +2024-01-15 11:43:32,204 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_module1.balancer1.prob, batch_count=4843.333333333333, ans=0.27296875 +2024-01-15 11:43:38,608 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.0.layers.1.nonlin_attention.balancer.prob, batch_count=4876.666666666667, ans=0.27140624999999996 +2024-01-15 11:43:43,766 INFO [scaling.py:1118] (0/2) WithLoss: name=encoder.encoders.2.encoder.layers.2.self_attn_weights, loss-sum=0.000e+00 +2024-01-15 11:43:55,780 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.0.layers.0.feed_forward3.hidden_balancer.prob, batch_count=4910.0, ans=0.26984375 +2024-01-15 11:44:16,690 INFO [train.py:994] (0/2) Epoch 2, batch 650, loss[loss=0.3812, simple_loss=0.3857, pruned_loss=0.1807, over 24362.00 frames. ], tot_loss[loss=0.4221, simple_loss=0.407, pruned_loss=0.2175, over 4633902.55 frames. ], batch size: 275, lr: 4.45e-02, grad_scale: 16.0 +2024-01-15 11:44:29,958 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.3.encoder.layers.1.self_attn2.whiten, num_groups=1, num_channels=512, metric=10.98 vs. limit=11.2575 +2024-01-15 11:44:33,078 WARNING [optim.py:484] (0/2) Clipping_scale=2.0, grad-norm quartiles 1.957e+02 2.608e+02 3.015e+02 3.605e+02 6.995e+02, threshold=6.031e+02, percent-clipped=1.0 +2024-01-15 11:44:54,754 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.feed_forward1.hidden_balancer.prob, batch_count=5043.333333333333, ans=0.26359374999999996 +2024-01-15 11:45:02,164 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.balancer1.prob, batch_count=5076.666666666667, ans=0.26203125 +2024-01-15 11:45:03,949 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.2.encoder.layers.2.self_attn2.whiten, num_groups=1, num_channels=384, metric=12.33 vs. limit=11.307500000000001 +2024-01-15 11:45:06,148 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.attention_skip_rate, batch_count=5076.666666666667, ans=0.04551388888888889 +2024-01-15 11:45:16,735 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.attention_skip_rate, batch_count=5110.0, ans=0.045375000000000006 +2024-01-15 11:45:23,476 INFO [train.py:994] (0/2) Epoch 2, batch 700, loss[loss=0.4339, simple_loss=0.4218, pruned_loss=0.2201, over 22139.00 frames. ], tot_loss[loss=0.4123, simple_loss=0.4012, pruned_loss=0.2095, over 4668044.54 frames. ], batch size: 357, lr: 4.44e-02, grad_scale: 16.0 +2024-01-15 11:45:31,551 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_module1.balancer1.min_positive, batch_count=5143.333333333333, ans=0.03392708333333333 +2024-01-15 11:45:36,483 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.0.layers.0.self_attn_weights.pos_emb_skip_rate, batch_count=5176.666666666667, ans=0.0 +2024-01-15 11:45:47,220 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.2.encoder.layers.0.feed_forward2.out_whiten, num_groups=1, num_channels=384, metric=8.51 vs. limit=9.44125 +2024-01-15 11:46:06,481 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_module2.balancer2.prob, batch_count=5243.333333333333, ans=0.25421875 +2024-01-15 11:46:28,220 INFO [train.py:994] (0/2) Epoch 2, batch 750, loss[loss=0.3577, simple_loss=0.3673, pruned_loss=0.1674, over 24443.00 frames. ], tot_loss[loss=0.4032, simple_loss=0.3958, pruned_loss=0.2021, over 4704077.10 frames. ], batch size: 250, lr: 4.44e-02, grad_scale: 16.0 +2024-01-15 11:46:28,468 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_module1.balancer1.prob, batch_count=5310.0, ans=0.25109375 +2024-01-15 11:46:43,734 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.4.encoder.layers.1.conv_module1.whiten, num_groups=1, num_channels=384, metric=3.46 vs. limit=9.50375 +2024-01-15 11:46:44,355 WARNING [optim.py:484] (0/2) Clipping_scale=2.0, grad-norm quartiles 2.125e+02 2.817e+02 3.394e+02 3.988e+02 1.067e+03, threshold=6.787e+02, percent-clipped=3.0 +2024-01-15 11:46:44,779 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_skip_rate, batch_count=5343.333333333333, ans=0.044402777777777784 +2024-01-15 11:47:00,952 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_skip_rate, batch_count=5376.666666666667, ans=0.04426388888888889 +2024-01-15 11:47:14,645 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.0.layers.1.bypass_mid.scale_min, batch_count=5410.0, ans=0.71065 +2024-01-15 11:47:26,069 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.3.encoder.layers.1.feed_forward3.out_whiten, num_groups=1, num_channels=512, metric=8.95 vs. limit=9.54125 +2024-01-15 11:47:26,837 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_module1.balancer1.min_positive, batch_count=5443.333333333333, ans=0.032989583333333336 +2024-01-15 11:47:31,255 INFO [train.py:994] (0/2) Epoch 2, batch 800, loss[loss=0.3631, simple_loss=0.3781, pruned_loss=0.1672, over 24495.00 frames. ], tot_loss[loss=0.3944, simple_loss=0.3906, pruned_loss=0.1953, over 4726222.45 frames. ], batch size: 229, lr: 4.44e-02, grad_scale: 32.0 +2024-01-15 11:47:38,607 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.feed_forward2.hidden_balancer.prob, batch_count=5476.666666666667, ans=0.24328125 +2024-01-15 11:47:56,743 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.1.encoder.layers.0.whiten, num_groups=1, num_channels=256, metric=5.52 vs. limit=6.217333333333333 +2024-01-15 11:48:12,480 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.4.encoder.layers.0.self_attn2.whiten, num_groups=1, num_channels=384, metric=13.00 vs. limit=11.682500000000001 +2024-01-15 11:48:19,841 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.1.encoder.layers.0.nonlin_attention.whiten2, num_groups=1, num_channels=256, metric=7.53 vs. limit=7.805 +2024-01-15 11:48:21,433 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=5610.0, ans=0.2439 +2024-01-15 11:48:23,592 INFO [checkpoint.py:75] (0/2) Saving checkpoint to zipformer_bbpe/exp-context-size-2-lr-epochs-10-spec-aug-20-disable-musan/epoch-2.pt +2024-01-15 11:48:46,759 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=5620.0, ans=0.2438 +2024-01-15 11:48:47,767 INFO [train.py:994] (0/2) Epoch 3, batch 0, loss[loss=0.3324, simple_loss=0.3465, pruned_loss=0.1537, over 24057.00 frames. ], tot_loss[loss=0.3324, simple_loss=0.3465, pruned_loss=0.1537, over 24057.00 frames. ], batch size: 131, lr: 4.40e-02, grad_scale: 32.0 +2024-01-15 11:48:47,767 INFO [train.py:1017] (0/2) Computing validation loss +2024-01-15 11:49:07,717 INFO [zipformer.py:1858] (0/2) name=encoder.encoders.0.layers.1.self_attn_weights, attn_weights_entropy = tensor([5.0816, 4.7679, 4.8115, 4.5483], device='cuda:0') +2024-01-15 11:49:08,288 INFO [train.py:1026] (0/2) Epoch 3, validation: loss=0.2688, simple_loss=0.3253, pruned_loss=0.09388, over 1622729.00 frames. +2024-01-15 11:49:08,289 INFO [train.py:1027] (0/2) Maximum memory allocated so far is 15997MB +2024-01-15 11:49:26,921 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.nonlin_attention.balancer.prob, batch_count=5653.333333333333, ans=0.235 +2024-01-15 11:49:31,523 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.ff2_skip_rate, batch_count=5653.333333333333, ans=0.009640579710144927 +2024-01-15 11:49:32,345 WARNING [optim.py:484] (0/2) Clipping_scale=2.0, grad-norm quartiles 2.086e+02 2.950e+02 3.385e+02 4.084e+02 7.457e+02, threshold=6.770e+02, percent-clipped=2.0 +2024-01-15 11:49:51,514 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.5.out_combiner.scale_min, batch_count=5720.0, ans=0.6998 +2024-01-15 11:49:52,705 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.feed_forward1.hidden_balancer.prob, batch_count=5720.0, ans=0.231875 +2024-01-15 11:49:58,477 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_module2.balancer1.prob, batch_count=5753.333333333333, ans=0.23031249999999998 +2024-01-15 11:50:12,656 INFO [train.py:994] (0/2) Epoch 3, batch 50, loss[loss=0.3479, simple_loss=0.3674, pruned_loss=0.1589, over 24478.00 frames. ], tot_loss[loss=0.3507, simple_loss=0.3652, pruned_loss=0.1629, over 1088933.28 frames. ], batch size: 181, lr: 4.40e-02, grad_scale: 32.0 +2024-01-15 11:50:28,435 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.4.encoder.layers.0.conv_module1.whiten, num_groups=1, num_channels=384, metric=3.83 vs. limit=9.682500000000001 +2024-01-15 11:50:29,389 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.bypass.skip_rate, batch_count=5820.0, ans=0.09899494936611666 +2024-01-15 11:50:34,212 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.0.layers.1.self_attn_weights.pos_emb_skip_rate, batch_count=5820.0, ans=0.0 +2024-01-15 11:50:47,819 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.2.encoder.layers.1.self_attn1.whiten, num_groups=1, num_channels=384, metric=13.79 vs. limit=11.89 +2024-01-15 11:51:15,943 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.3.encoder.layers.0.self_attn2.whiten, num_groups=1, num_channels=512, metric=12.09 vs. limit=11.940000000000001 +2024-01-15 11:51:17,789 INFO [train.py:994] (0/2) Epoch 3, batch 100, loss[loss=0.2791, simple_loss=0.2877, pruned_loss=0.1326, over 18366.00 frames. ], tot_loss[loss=0.3482, simple_loss=0.364, pruned_loss=0.1615, over 1914659.16 frames. ], batch size: 80, lr: 4.40e-02, grad_scale: 32.0 +2024-01-15 11:51:19,212 INFO [scaling.py:1118] (0/2) WithLoss: name=encoder.encoders.0.layers.1.self_attn_weights, loss-sum=0.000e+00 +2024-01-15 11:51:42,954 WARNING [optim.py:484] (0/2) Clipping_scale=2.0, grad-norm quartiles 2.145e+02 2.875e+02 3.360e+02 3.960e+02 7.959e+02, threshold=6.719e+02, percent-clipped=3.0 +2024-01-15 11:51:44,478 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=6020.0, ans=0.23979999999999999 +2024-01-15 11:51:47,107 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.bypass_mid.scale_min, batch_count=6020.0, ans=0.6893 +2024-01-15 11:52:23,038 INFO [train.py:994] (0/2) Epoch 3, batch 150, loss[loss=0.346, simple_loss=0.3682, pruned_loss=0.1585, over 24483.00 frames. ], tot_loss[loss=0.3476, simple_loss=0.3648, pruned_loss=0.161, over 2555678.48 frames. ], batch size: 165, lr: 4.39e-02, grad_scale: 32.0 +2024-01-15 11:52:23,551 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.5.encoder.layers.0.self_attn1.whiten, num_groups=1, num_channels=256, metric=12.20 vs. limit=12.09 +2024-01-15 11:52:50,360 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.feed_forward1.out_proj.dropout_p, batch_count=6186.666666666667, ans=0.23813333333333334 +2024-01-15 11:53:02,947 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.feed_forward2.hidden_balancer.prob, batch_count=6220.0, ans=0.2084375 +2024-01-15 11:53:06,653 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=6220.0, ans=0.23779999999999998 +2024-01-15 11:53:08,300 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.3.encoder.layers.1.self_attn2.whiten, num_groups=1, num_channels=512, metric=16.74 vs. limit=12.165 +2024-01-15 11:53:27,239 INFO [train.py:994] (0/2) Epoch 3, batch 200, loss[loss=0.3146, simple_loss=0.3524, pruned_loss=0.1353, over 24521.00 frames. ], tot_loss[loss=0.3443, simple_loss=0.3633, pruned_loss=0.1589, over 3055310.64 frames. ], batch size: 193, lr: 4.39e-02, grad_scale: 32.0 +2024-01-15 11:53:31,311 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_module2.balancer2.prob, batch_count=6286.666666666667, ans=0.2053125 +2024-01-15 11:53:51,512 WARNING [optim.py:484] (0/2) Clipping_scale=2.0, grad-norm quartiles 2.145e+02 2.732e+02 3.081e+02 3.484e+02 6.309e+02, threshold=6.162e+02, percent-clipped=0.0 +2024-01-15 11:54:04,153 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_skip_rate, batch_count=6386.666666666667, ans=0.04005555555555555 +2024-01-15 11:54:10,580 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.0.layers.0.feed_forward1.out_whiten, num_groups=1, num_channels=192, metric=8.56 vs. limit=9.895 +2024-01-15 11:54:15,205 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.2.encoder.layers.0.nonlin_attention.whiten1, num_groups=1, num_channels=288, metric=6.47 vs. limit=6.596666666666667 +2024-01-15 11:54:18,280 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.0.layers.1.feed_forward3.out_whiten, num_groups=1, num_channels=192, metric=8.65 vs. limit=9.9075 +2024-01-15 11:54:20,542 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.3.encoder.layers.1.self_attn2.whiten, num_groups=1, num_channels=512, metric=13.84 vs. limit=12.315000000000001 +2024-01-15 11:54:31,587 INFO [train.py:994] (0/2) Epoch 3, batch 250, loss[loss=0.3514, simple_loss=0.3778, pruned_loss=0.1612, over 24427.00 frames. ], tot_loss[loss=0.342, simple_loss=0.3628, pruned_loss=0.1575, over 3439317.85 frames. ], batch size: 258, lr: 4.38e-02, grad_scale: 32.0 +2024-01-15 11:54:45,262 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.4.encoder.layers.0.self_attn2.whiten, num_groups=1, num_channels=384, metric=16.10 vs. limit=12.365 +2024-01-15 11:54:56,196 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.bypass_mid.scale_min, batch_count=6520.0, ans=0.6718000000000001 +2024-01-15 11:54:59,294 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.bypass_mid.scale_min, batch_count=6520.0, ans=0.6718000000000001 +2024-01-15 11:54:59,351 INFO [scaling.py:1118] (0/2) WithLoss: name=encoder.encoders.3.encoder.layers.2.self_attn_weights, loss-sum=0.000e+00 +2024-01-15 11:55:18,788 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.1.encoder.layers.0.feed_forward3.out_whiten, num_groups=1, num_channels=256, metric=9.46 vs. limit=9.9575 +2024-01-15 11:55:19,393 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.balancer2.prob, batch_count=6553.333333333333, ans=0.1928125 +2024-01-15 11:55:23,795 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.attention_skip_rate, batch_count=6586.666666666667, ans=0.03922222222222223 +2024-01-15 11:55:29,792 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.ff3_skip_rate, batch_count=6586.666666666667, ans=0.00943768115942029 +2024-01-15 11:55:35,396 INFO [train.py:994] (0/2) Epoch 3, batch 300, loss[loss=0.3143, simple_loss=0.3307, pruned_loss=0.1487, over 23592.00 frames. ], tot_loss[loss=0.338, simple_loss=0.3608, pruned_loss=0.1551, over 3745731.26 frames. ], batch size: 119, lr: 4.38e-02, grad_scale: 32.0 +2024-01-15 11:55:35,727 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.bypass.scale_min, batch_count=6620.0, ans=0.6683 +2024-01-15 11:55:47,555 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.1.out_combiner.scale_min, batch_count=6653.333333333333, ans=0.6671333333333334 +2024-01-15 11:55:47,628 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward3.hidden_balancer.prob, batch_count=6653.333333333333, ans=0.188125 +2024-01-15 11:55:53,742 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.4.encoder.layers.1.self_attn1.whiten, num_groups=1, num_channels=384, metric=12.65 vs. limit=12.49 +2024-01-15 11:55:56,434 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.1.encoder.layers.1.whiten, num_groups=1, num_channels=256, metric=4.99 vs. limit=6.661333333333333 +2024-01-15 11:55:58,415 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.balancer2.prob, batch_count=6653.333333333333, ans=0.188125 +2024-01-15 11:56:00,474 WARNING [optim.py:484] (0/2) Clipping_scale=2.0, grad-norm quartiles 2.241e+02 2.796e+02 3.225e+02 3.857e+02 7.668e+02, threshold=6.450e+02, percent-clipped=1.0 +2024-01-15 11:56:10,548 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.1.encoder.layers.1.feed_forward1.out_whiten, num_groups=1, num_channels=256, metric=10.23 vs. limit=10.0075 +2024-01-15 11:56:24,182 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_skip_rate, batch_count=6720.0, ans=0.03866666666666667 +2024-01-15 11:56:25,416 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_module1.balancer2.prob, batch_count=6720.0, ans=0.185 +2024-01-15 11:56:40,160 INFO [train.py:994] (0/2) Epoch 3, batch 350, loss[loss=0.327, simple_loss=0.3618, pruned_loss=0.1461, over 24483.00 frames. ], tot_loss[loss=0.3349, simple_loss=0.3594, pruned_loss=0.1534, over 3978252.59 frames. ], batch size: 229, lr: 4.38e-02, grad_scale: 32.0 +2024-01-15 11:56:50,683 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.bypass.scale_min, batch_count=6786.666666666667, ans=0.6624666666666666 +2024-01-15 11:56:51,871 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.0.layers.0.feed_forward1.hidden_balancer.prob, batch_count=6820.0, ans=0.1803125 +2024-01-15 11:56:54,900 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.0.layers.1.self_attn_weights.pos_emb_skip_rate, batch_count=6820.0, ans=0.0 +2024-01-15 11:56:56,182 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.balancer2.prob, batch_count=6820.0, ans=0.1803125 +2024-01-15 11:56:58,845 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.5.encoder.layers.0.nonlin_attention.whiten1, num_groups=1, num_channels=192, metric=5.67 vs. limit=6.705 +2024-01-15 11:57:00,201 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.3.encoder.layers.0.conv_module2.whiten, num_groups=1, num_channels=512, metric=3.22 vs. limit=10.057500000000001 +2024-01-15 11:57:21,653 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_skip_rate, batch_count=6886.666666666667, ans=0.03797222222222223 +2024-01-15 11:57:26,889 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.3.encoder.layers.1.self_attn1.whiten, num_groups=1, num_channels=512, metric=12.43 vs. limit=12.665 +2024-01-15 11:57:44,167 INFO [train.py:994] (0/2) Epoch 3, batch 400, loss[loss=0.3388, simple_loss=0.3708, pruned_loss=0.1535, over 24468.00 frames. ], tot_loss[loss=0.3303, simple_loss=0.3563, pruned_loss=0.1508, over 4151182.55 frames. ], batch size: 222, lr: 4.37e-02, grad_scale: 32.0 +2024-01-15 11:57:44,493 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=6953.333333333333, ans=0.23046666666666665 +2024-01-15 11:58:07,839 WARNING [optim.py:484] (0/2) Clipping_scale=2.0, grad-norm quartiles 2.317e+02 2.802e+02 3.170e+02 3.769e+02 6.505e+02, threshold=6.340e+02, percent-clipped=1.0 +2024-01-15 11:58:11,681 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.bypass.scale_min, batch_count=7020.0, ans=0.6543 +2024-01-15 11:58:13,012 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.feed_forward2.hidden_balancer.prob, batch_count=7020.0, ans=0.17093750000000002 +2024-01-15 11:58:15,991 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.1.encoder.layers.0.feed_forward2.out_whiten, num_groups=1, num_channels=256, metric=9.22 vs. limit=10.1325 +2024-01-15 11:58:23,931 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_module2.balancer2.min_abs, batch_count=7053.333333333333, ans=0.3058 +2024-01-15 11:58:23,949 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_module2.balancer2.prob, batch_count=7053.333333333333, ans=0.169375 +2024-01-15 11:58:33,089 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.2.encoder.layers.1.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=3.89 vs. limit=4.058 +2024-01-15 11:58:35,896 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.5.encoder.layers.1.feed_forward1.out_whiten, num_groups=1, num_channels=256, metric=8.95 vs. limit=10.1575 +2024-01-15 11:58:45,271 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.bypass.skip_rate, batch_count=7086.666666666667, ans=0.07 +2024-01-15 11:58:47,420 INFO [train.py:994] (0/2) Epoch 3, batch 450, loss[loss=0.2968, simple_loss=0.3348, pruned_loss=0.1294, over 24204.00 frames. ], tot_loss[loss=0.328, simple_loss=0.3557, pruned_loss=0.1492, over 4297688.87 frames. ], batch size: 140, lr: 4.37e-02, grad_scale: 32.0 +2024-01-15 11:58:56,609 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.3.encoder.layers.3.self_attn1.whiten, num_groups=1, num_channels=512, metric=13.12 vs. limit=12.84 +2024-01-15 11:59:10,943 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.0.layers.0.feed_forward1.out_proj.dropout_p, batch_count=7153.333333333333, ans=0.22846666666666665 +2024-01-15 11:59:26,057 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.0.layers.0.feed_forward2.hidden_balancer.prob, batch_count=7220.0, ans=0.1615625 +2024-01-15 11:59:33,464 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_module2.balancer2.prob, batch_count=7220.0, ans=0.1615625 +2024-01-15 11:59:50,143 INFO [train.py:994] (0/2) Epoch 3, batch 500, loss[loss=0.2858, simple_loss=0.3263, pruned_loss=0.1226, over 24319.00 frames. ], tot_loss[loss=0.3255, simple_loss=0.3546, pruned_loss=0.1475, over 4414872.70 frames. ], batch size: 147, lr: 4.37e-02, grad_scale: 32.0 +2024-01-15 12:00:14,612 WARNING [optim.py:484] (0/2) Clipping_scale=2.0, grad-norm quartiles 2.102e+02 2.884e+02 3.254e+02 3.878e+02 6.671e+02, threshold=6.507e+02, percent-clipped=1.0 +2024-01-15 12:00:28,388 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_module1.balancer2.prob, batch_count=7386.666666666667, ans=0.15375 +2024-01-15 12:00:55,032 INFO [train.py:994] (0/2) Epoch 3, batch 550, loss[loss=0.3383, simple_loss=0.3705, pruned_loss=0.1531, over 23868.00 frames. ], tot_loss[loss=0.3227, simple_loss=0.3531, pruned_loss=0.1455, over 4494032.70 frames. ], batch size: 328, lr: 4.36e-02, grad_scale: 32.0 +2024-01-15 12:00:56,784 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.4.encoder.layers.1.feed_forward2.out_whiten, num_groups=1, num_channels=384, metric=7.02 vs. limit=10.295 +2024-01-15 12:01:05,168 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.4.encoder.layers.1.nonlin_attention.whiten2, num_groups=1, num_channels=384, metric=4.79 vs. limit=8.726666666666667 +2024-01-15 12:01:15,127 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_skip_rate, batch_count=7486.666666666667, ans=0.035472222222222224 +2024-01-15 12:01:18,813 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_skip_rate, batch_count=7520.0, ans=0.035333333333333335 +2024-01-15 12:01:34,578 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.ff3_skip_rate, batch_count=7553.333333333333, ans=0.009227536231884059 +2024-01-15 12:01:57,155 INFO [train.py:994] (0/2) Epoch 3, batch 600, loss[loss=0.3403, simple_loss=0.3758, pruned_loss=0.1524, over 24529.00 frames. ], tot_loss[loss=0.3201, simple_loss=0.3518, pruned_loss=0.1437, over 4568440.98 frames. ], batch size: 165, lr: 4.36e-02, grad_scale: 32.0 +2024-01-15 12:02:06,552 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.bypass.skip_rate, batch_count=7620.0, ans=0.04949747468305833 +2024-01-15 12:02:20,714 WARNING [optim.py:484] (0/2) Clipping_scale=2.0, grad-norm quartiles 2.115e+02 2.650e+02 3.022e+02 3.630e+02 6.228e+02, threshold=6.043e+02, percent-clipped=0.0 +2024-01-15 12:02:53,267 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.0.layers.0.feed_forward2.hidden_balancer.prob, batch_count=7753.333333333333, ans=0.13656249999999998 +2024-01-15 12:02:54,833 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.3.encoder.layers.2.feed_forward3.out_whiten, num_groups=1, num_channels=512, metric=9.27 vs. limit=10.4075 +2024-01-15 12:02:59,060 INFO [train.py:994] (0/2) Epoch 3, batch 650, loss[loss=0.2936, simple_loss=0.3388, pruned_loss=0.1242, over 24524.00 frames. ], tot_loss[loss=0.3172, simple_loss=0.3504, pruned_loss=0.1417, over 4631598.02 frames. ], batch size: 210, lr: 4.35e-02, grad_scale: 32.0 +2024-01-15 12:03:06,624 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.feed_forward3.hidden_balancer.prob, batch_count=7786.666666666667, ans=0.135 +2024-01-15 12:03:07,704 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=7786.666666666667, ans=0.22213333333333332 +2024-01-15 12:03:08,853 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.ff3_skip_rate, batch_count=7786.666666666667, ans=0.009176811594202899 +2024-01-15 12:03:21,129 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.attention_skip_rate, batch_count=7820.0, ans=0.03408333333333334 +2024-01-15 12:03:26,039 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_module2.balancer2.prob, batch_count=7853.333333333333, ans=0.13187500000000002 +2024-01-15 12:03:32,228 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_module2.balancer1.min_positive, batch_count=7853.333333333333, ans=0.025458333333333336 +2024-01-15 12:04:01,249 INFO [train.py:994] (0/2) Epoch 3, batch 700, loss[loss=0.29, simple_loss=0.3353, pruned_loss=0.1223, over 24519.00 frames. ], tot_loss[loss=0.315, simple_loss=0.3494, pruned_loss=0.14, over 4674808.99 frames. ], batch size: 165, lr: 4.35e-02, grad_scale: 32.0 +2024-01-15 12:04:01,617 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_module1.balancer1.max_abs, batch_count=7953.333333333333, ans=9.970833333333333 +2024-01-15 12:04:25,973 WARNING [optim.py:484] (0/2) Clipping_scale=2.0, grad-norm quartiles 2.212e+02 2.889e+02 3.295e+02 3.888e+02 5.755e+02, threshold=6.589e+02, percent-clipped=0.0 +2024-01-15 12:04:26,273 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_skip_rate, batch_count=8020.0, ans=0.03325 +2024-01-15 12:04:27,584 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=8020.0, ans=0.2198 +2024-01-15 12:04:32,200 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.self_attn_weights.pos_emb_skip_rate, batch_count=8020.0, ans=0.0 +2024-01-15 12:05:05,427 INFO [train.py:994] (0/2) Epoch 3, batch 750, loss[loss=0.3014, simple_loss=0.3468, pruned_loss=0.1279, over 24518.00 frames. ], tot_loss[loss=0.313, simple_loss=0.3484, pruned_loss=0.1386, over 4704993.47 frames. ], batch size: 187, lr: 4.35e-02, grad_scale: 32.0 +2024-01-15 12:05:05,793 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.ff3_skip_rate, batch_count=8120.0, ans=0.009104347826086956 +2024-01-15 12:05:09,381 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.self_attn_weights.pos_emb_skip_rate, batch_count=8120.0, ans=0.0 +2024-01-15 12:05:11,836 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=8120.0, ans=0.2188 +2024-01-15 12:05:16,673 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.feed_forward3.hidden_balancer.prob, batch_count=8153.333333333333, ans=0.125 +2024-01-15 12:05:19,088 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.5.out_combiner.scale_min, batch_count=8153.333333333333, ans=0.6146333333333334 +2024-01-15 12:05:23,692 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.self_attn_weights.pos_emb_skip_rate, batch_count=8153.333333333333, ans=0.0 +2024-01-15 12:05:37,223 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.3.encoder.layers.0.nonlin_attention.whiten1, num_groups=1, num_channels=384, metric=6.99 vs. limit=7.046666666666667 +2024-01-15 12:05:38,079 INFO [scaling.py:1118] (0/2) WithLoss: name=encoder.encoders.3.encoder.layers.0.self_attn_weights, loss-sum=0.000e+00 +2024-01-15 12:05:39,673 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.1.encoder.layers.0.feed_forward2.out_whiten, num_groups=1, num_channels=256, metric=8.83 vs. limit=10.57 +2024-01-15 12:05:54,444 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.3.encoder.layers.1.self_attn2.whiten, num_groups=1, num_channels=512, metric=13.37 vs. limit=13.690000000000001 +2024-01-15 12:05:55,406 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.5.encoder.layers.0.conv_module2.whiten, num_groups=1, num_channels=256, metric=4.18 vs. limit=10.595 +2024-01-15 12:06:05,759 INFO [train.py:994] (0/2) Epoch 3, batch 800, loss[loss=0.3003, simple_loss=0.3417, pruned_loss=0.1295, over 24410.00 frames. ], tot_loss[loss=0.3109, simple_loss=0.3475, pruned_loss=0.137, over 4737380.16 frames. ], batch size: 159, lr: 4.34e-02, grad_scale: 32.0 +2024-01-15 12:06:20,030 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.balancer2.prob, batch_count=8320.0, ans=0.125 +2024-01-15 12:06:27,761 WARNING [optim.py:484] (0/2) Clipping_scale=2.0, grad-norm quartiles 1.950e+02 2.927e+02 3.165e+02 3.748e+02 6.702e+02, threshold=6.330e+02, percent-clipped=0.0 +2024-01-15 12:06:31,449 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_module1.balancer1.prob, batch_count=8353.333333333334, ans=0.125 +2024-01-15 12:06:53,417 INFO [scaling.py:1022] (0/2) Whitening: name=encoder_embed.out_whiten, num_groups=1, num_channels=192, metric=5.59 vs. limit=5.684 +2024-01-15 12:06:54,868 INFO [checkpoint.py:75] (0/2) Saving checkpoint to zipformer_bbpe/exp-context-size-2-lr-epochs-10-spec-aug-20-disable-musan/epoch-3.pt +2024-01-15 12:07:17,341 INFO [train.py:994] (0/2) Epoch 4, batch 0, loss[loss=0.2845, simple_loss=0.3305, pruned_loss=0.1193, over 24436.00 frames. ], tot_loss[loss=0.2845, simple_loss=0.3305, pruned_loss=0.1193, over 24436.00 frames. ], batch size: 258, lr: 4.29e-02, grad_scale: 32.0 +2024-01-15 12:07:17,342 INFO [train.py:1017] (0/2) Computing validation loss +2024-01-15 12:07:37,228 INFO [train.py:1026] (0/2) Epoch 4, validation: loss=0.222, simple_loss=0.3027, pruned_loss=0.07067, over 1622729.00 frames. +2024-01-15 12:07:37,229 INFO [train.py:1027] (0/2) Maximum memory allocated so far is 15997MB +2024-01-15 12:08:01,899 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.balancer1.prob, batch_count=8496.666666666666, ans=0.125 +2024-01-15 12:08:02,188 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.4.encoder.layers.0.conv_module2.whiten, num_groups=1, num_channels=384, metric=2.66 vs. limit=10.68625 +2024-01-15 12:08:07,749 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.ff3_skip_rate, batch_count=8496.666666666666, ans=0.009022463768115942 +2024-01-15 12:08:10,674 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=8496.666666666666, ans=0.21503333333333335 +2024-01-15 12:08:17,835 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.feed_forward1.hidden_balancer.prob, batch_count=8530.0, ans=0.125 +2024-01-15 12:08:39,856 INFO [train.py:994] (0/2) Epoch 4, batch 50, loss[loss=0.3007, simple_loss=0.3418, pruned_loss=0.1298, over 24548.00 frames. ], tot_loss[loss=0.2964, simple_loss=0.3375, pruned_loss=0.1276, over 1076772.14 frames. ], batch size: 193, lr: 4.28e-02, grad_scale: 32.0 +2024-01-15 12:09:12,567 WARNING [optim.py:484] (0/2) Clipping_scale=2.0, grad-norm quartiles 2.074e+02 2.645e+02 2.860e+02 3.274e+02 6.285e+02, threshold=5.719e+02, percent-clipped=1.0 +2024-01-15 12:09:20,691 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.bypass.skip_rate, batch_count=8696.666666666666, ans=0.09899494936611666 +2024-01-15 12:09:27,071 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.2.encoder.layers.1.self_attn2.whiten, num_groups=1, num_channels=384, metric=14.72 vs. limit=14.022499999999999 +2024-01-15 12:09:28,245 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_module2.balancer2.min_positive, batch_count=8696.666666666666, ans=0.05 +2024-01-15 12:09:31,933 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_skip_rate, batch_count=8730.0, ans=0.030291666666666668 +2024-01-15 12:09:39,931 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.ff2_skip_rate, batch_count=8730.0, ans=0.008971739130434782 +2024-01-15 12:09:42,904 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.0.layers.0.self_attn2.whiten, num_groups=1, num_channels=192, metric=12.45 vs. limit=14.0725 +2024-01-15 12:09:43,153 INFO [train.py:994] (0/2) Epoch 4, batch 100, loss[loss=0.2792, simple_loss=0.3273, pruned_loss=0.1156, over 24418.00 frames. ], tot_loss[loss=0.2975, simple_loss=0.339, pruned_loss=0.128, over 1910265.62 frames. ], batch size: 159, lr: 4.28e-02, grad_scale: 32.0 +2024-01-15 12:09:44,544 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.0.layers.0.bypass.skip_rate, batch_count=8763.333333333334, ans=0.035 +2024-01-15 12:09:45,785 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_module1.balancer1.prob, batch_count=8763.333333333334, ans=0.125 +2024-01-15 12:09:48,272 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.feed_forward1.hidden_balancer.prob, batch_count=8763.333333333334, ans=0.125 +2024-01-15 12:09:56,238 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.bypass.skip_rate, batch_count=8796.666666666666, ans=0.09899494936611666 +2024-01-15 12:10:15,176 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_module2.balancer1.min_positive, batch_count=8830.0, ans=0.025 +2024-01-15 12:10:15,682 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.1.encoder.layers.1.feed_forward1.out_whiten, num_groups=1, num_channels=256, metric=9.46 vs. limit=10.81125 +2024-01-15 12:10:46,967 INFO [train.py:994] (0/2) Epoch 4, batch 150, loss[loss=0.3029, simple_loss=0.3467, pruned_loss=0.1295, over 24376.00 frames. ], tot_loss[loss=0.2974, simple_loss=0.3393, pruned_loss=0.1277, over 2550591.35 frames. ], batch size: 275, lr: 4.27e-02, grad_scale: 32.0 +2024-01-15 12:10:50,886 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.balancer1.prob, batch_count=8930.0, ans=0.125 +2024-01-15 12:10:51,241 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.3.encoder.layers.2.self_attn1.whiten, num_groups=1, num_channels=512, metric=15.02 vs. limit=14.1975 +2024-01-15 12:11:02,201 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.balancer2.prob, batch_count=8963.333333333334, ans=0.125 +2024-01-15 12:11:10,761 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_module2.balancer2.prob, batch_count=8996.666666666666, ans=0.125 +2024-01-15 12:11:19,236 WARNING [optim.py:484] (0/2) Clipping_scale=2.0, grad-norm quartiles 2.225e+02 2.688e+02 3.071e+02 4.061e+02 8.177e+02, threshold=6.142e+02, percent-clipped=7.0 +2024-01-15 12:11:32,606 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.3.encoder.layers.2.conv_module2.whiten, num_groups=1, num_channels=512, metric=3.00 vs. limit=10.88625 +2024-01-15 12:11:37,118 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.2.out_combiner.scale_min, batch_count=9063.333333333334, ans=0.5827833333333334 +2024-01-15 12:11:42,552 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_module1.balancer1.prob, batch_count=9063.333333333334, ans=0.125 +2024-01-15 12:11:42,569 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.nonlin_attention.balancer.prob, batch_count=9063.333333333334, ans=0.125 +2024-01-15 12:11:50,083 INFO [train.py:994] (0/2) Epoch 4, batch 200, loss[loss=0.2891, simple_loss=0.3363, pruned_loss=0.121, over 24289.00 frames. ], tot_loss[loss=0.2968, simple_loss=0.3393, pruned_loss=0.1271, over 3059583.22 frames. ], batch size: 147, lr: 4.27e-02, grad_scale: 32.0 +2024-01-15 12:11:52,761 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_module1.balancer2.prob, batch_count=9096.666666666666, ans=0.125 +2024-01-15 12:11:57,505 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.bypass_mid.scale_min, batch_count=9096.666666666666, ans=0.5816166666666667 +2024-01-15 12:11:58,752 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.0.layers.1.bypass.scale_min, batch_count=9096.666666666666, ans=0.5816166666666667 +2024-01-15 12:12:00,329 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.3.encoder.layers.3.nonlin_attention.whiten2, num_groups=1, num_channels=512, metric=5.03 vs. limit=9.548333333333332 +2024-01-15 12:12:25,153 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=9163.333333333334, ans=0.20836666666666664 +2024-01-15 12:12:45,658 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_skip_rate, batch_count=9230.0, ans=0.028208333333333335 +2024-01-15 12:12:53,778 INFO [train.py:994] (0/2) Epoch 4, batch 250, loss[loss=0.2912, simple_loss=0.3265, pruned_loss=0.1279, over 23944.00 frames. ], tot_loss[loss=0.2955, simple_loss=0.3381, pruned_loss=0.1265, over 3439598.77 frames. ], batch size: 131, lr: 4.26e-02, grad_scale: 32.0 +2024-01-15 12:13:16,582 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.ff2_skip_rate, batch_count=9296.666666666666, ans=0.008848550724637682 +2024-01-15 12:13:17,736 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.bypass.skip_rate, batch_count=9296.666666666666, ans=0.09899494936611666 +2024-01-15 12:13:19,262 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.4.encoder.layers.0.feed_forward2.out_whiten, num_groups=1, num_channels=384, metric=11.19 vs. limit=10.99875 +2024-01-15 12:13:26,689 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.3.encoder.layers.0.self_attn2.whiten, num_groups=1, num_channels=512, metric=13.28 vs. limit=14.497499999999999 +2024-01-15 12:13:28,412 WARNING [optim.py:484] (0/2) Clipping_scale=2.0, grad-norm quartiles 2.153e+02 2.732e+02 3.101e+02 3.691e+02 6.166e+02, threshold=6.201e+02, percent-clipped=1.0 +2024-01-15 12:13:36,230 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_module1.balancer2.prob, batch_count=9363.333333333334, ans=0.125 +2024-01-15 12:13:59,083 INFO [train.py:994] (0/2) Epoch 4, batch 300, loss[loss=0.2903, simple_loss=0.3306, pruned_loss=0.125, over 24509.00 frames. ], tot_loss[loss=0.2942, simple_loss=0.337, pruned_loss=0.1257, over 3744497.19 frames. ], batch size: 229, lr: 4.26e-02, grad_scale: 32.0 +2024-01-15 12:14:03,857 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_module1.balancer1.prob, batch_count=9430.0, ans=0.125 +2024-01-15 12:14:39,099 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.0.layers.0.balancer_na.min_abs, batch_count=9530.0, ans=0.02 +2024-01-15 12:14:41,680 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_module1.balancer1.prob, batch_count=9530.0, ans=0.125 +2024-01-15 12:14:42,855 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_module2.balancer2.prob, batch_count=9530.0, ans=0.125 +2024-01-15 12:15:03,279 INFO [train.py:994] (0/2) Epoch 4, batch 350, loss[loss=0.2724, simple_loss=0.3205, pruned_loss=0.1122, over 24346.00 frames. ], tot_loss[loss=0.2944, simple_loss=0.3379, pruned_loss=0.1254, over 3989056.37 frames. ], batch size: 153, lr: 4.26e-02, grad_scale: 32.0 +2024-01-15 12:15:08,385 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.balancer1.prob, batch_count=9596.666666666666, ans=0.125 +2024-01-15 12:15:08,486 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_skip_rate, batch_count=9596.666666666666, ans=0.02668055555555556 +2024-01-15 12:15:20,649 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.bypass.skip_rate, batch_count=9630.0, ans=0.07 +2024-01-15 12:15:28,212 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.2.encoder.layers.1.self_attn1.whiten, num_groups=1, num_channels=384, metric=14.16 vs. limit=14.747499999999999 +2024-01-15 12:15:35,762 WARNING [optim.py:484] (0/2) Clipping_scale=2.0, grad-norm quartiles 2.316e+02 2.827e+02 3.214e+02 3.907e+02 6.387e+02, threshold=6.428e+02, percent-clipped=1.0 +2024-01-15 12:15:39,933 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.0.layers.0.nonlin_attention.whiten1, num_groups=1, num_channels=144, metric=7.54 vs. limit=7.4158333333333335 +2024-01-15 12:16:06,347 INFO [train.py:994] (0/2) Epoch 4, batch 400, loss[loss=0.2682, simple_loss=0.3209, pruned_loss=0.1078, over 24331.00 frames. ], tot_loss[loss=0.293, simple_loss=0.337, pruned_loss=0.1245, over 4172830.94 frames. ], batch size: 147, lr: 4.25e-02, grad_scale: 32.0 +2024-01-15 12:16:32,421 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.4.encoder.layers.2.nonlin_attention.whiten2, num_groups=1, num_channels=384, metric=4.73 vs. limit=9.915 +2024-01-15 12:16:42,726 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.0.layers.0.self_attn_weights.pos_emb_skip_rate, batch_count=9830.0, ans=0.0 +2024-01-15 12:16:51,221 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.0.layers.0.self_attn_weights.pos_emb_skip_rate, batch_count=9863.333333333334, ans=0.0 +2024-01-15 12:17:10,519 INFO [train.py:994] (0/2) Epoch 4, batch 450, loss[loss=0.2978, simple_loss=0.347, pruned_loss=0.1243, over 23907.00 frames. ], tot_loss[loss=0.2912, simple_loss=0.3358, pruned_loss=0.1233, over 4325461.02 frames. ], batch size: 328, lr: 4.25e-02, grad_scale: 32.0 +2024-01-15 12:17:43,443 WARNING [optim.py:484] (0/2) Clipping_scale=2.0, grad-norm quartiles 2.280e+02 2.708e+02 3.043e+02 3.598e+02 6.128e+02, threshold=6.087e+02, percent-clipped=0.0 +2024-01-15 12:17:43,745 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.ff2_skip_rate, batch_count=9996.666666666666, ans=0.008696376811594204 +2024-01-15 12:17:51,914 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.feed_forward1.out_proj.dropout_p, batch_count=10030.0, ans=0.1997 +2024-01-15 12:18:10,548 INFO [scaling.py:1118] (0/2) WithLoss: name=encoder.encoders.3.encoder.layers.1.self_attn_weights, loss-sum=0.000e+00 +2024-01-15 12:18:13,762 INFO [train.py:994] (0/2) Epoch 4, batch 500, loss[loss=0.2885, simple_loss=0.3366, pruned_loss=0.1202, over 24497.00 frames. ], tot_loss[loss=0.2898, simple_loss=0.3347, pruned_loss=0.1224, over 4437713.71 frames. ], batch size: 181, lr: 4.24e-02, grad_scale: 32.0 +2024-01-15 12:18:25,118 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.ff3_skip_rate, batch_count=10096.666666666666, ans=0.008674637681159421 +2024-01-15 12:18:33,671 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_module2.balancer2.prob, batch_count=10130.0, ans=0.125 +2024-01-15 12:18:45,416 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.3.encoder.layers.1.self_attn_weights.whiten_keys, num_groups=8, num_channels=256, metric=4.31 vs. limit=4.5245 +2024-01-15 12:19:17,908 INFO [train.py:994] (0/2) Epoch 4, batch 550, loss[loss=0.234, simple_loss=0.2645, pruned_loss=0.1017, over 17840.00 frames. ], tot_loss[loss=0.2877, simple_loss=0.3329, pruned_loss=0.1213, over 4496320.81 frames. ], batch size: 77, lr: 4.24e-02, grad_scale: 32.0 +2024-01-15 12:19:26,644 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.ff3_skip_rate, batch_count=10263.333333333334, ans=0.008638405797101449 +2024-01-15 12:19:50,115 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.2.encoder.layers.0.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=4.66 vs. limit=4.5495 +2024-01-15 12:19:50,510 WARNING [optim.py:484] (0/2) Clipping_scale=2.0, grad-norm quartiles 2.184e+02 2.675e+02 3.062e+02 3.666e+02 5.960e+02, threshold=6.124e+02, percent-clipped=0.0 +2024-01-15 12:20:02,316 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.0.layers.1.feed_forward1.out_proj.dropout_p, batch_count=10363.333333333334, ans=0.19636666666666666 +2024-01-15 12:20:02,755 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.3.encoder.layers.3.self_attn2.whiten, num_groups=1, num_channels=512, metric=18.98 vs. limit=15.2725 +2024-01-15 12:20:07,281 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=10396.666666666666, ans=0.19603333333333334 +2024-01-15 12:20:17,603 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_module1.balancer1.prob, batch_count=10396.666666666666, ans=0.125 +2024-01-15 12:20:20,944 INFO [train.py:994] (0/2) Epoch 4, batch 600, loss[loss=0.292, simple_loss=0.3381, pruned_loss=0.123, over 24306.00 frames. ], tot_loss[loss=0.2864, simple_loss=0.332, pruned_loss=0.1204, over 4562335.62 frames. ], batch size: 285, lr: 4.23e-02, grad_scale: 32.0 +2024-01-15 12:20:36,050 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.balancer1.prob, batch_count=10463.333333333334, ans=0.125 +2024-01-15 12:20:37,291 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.bypass_mid.scale_min, batch_count=10463.333333333334, ans=0.5337833333333334 +2024-01-15 12:20:56,826 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.3.encoder.layers.0.self_attn_weights.whiten_keys, num_groups=8, num_channels=256, metric=4.68 vs. limit=4.5745000000000005 +2024-01-15 12:20:58,836 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_module1.balancer1.prob, batch_count=10530.0, ans=0.125 +2024-01-15 12:21:18,977 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.0.layers.1.feed_forward2.out_whiten, num_groups=1, num_channels=192, metric=8.31 vs. limit=11.46125 +2024-01-15 12:21:24,612 INFO [train.py:994] (0/2) Epoch 4, batch 650, loss[loss=0.2775, simple_loss=0.3225, pruned_loss=0.1163, over 24348.00 frames. ], tot_loss[loss=0.286, simple_loss=0.3318, pruned_loss=0.1201, over 4604066.11 frames. ], batch size: 153, lr: 4.23e-02, grad_scale: 32.0 +2024-01-15 12:21:50,045 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.2.encoder.layers.2.conv_module1.whiten, num_groups=1, num_channels=384, metric=3.82 vs. limit=11.49875 +2024-01-15 12:21:52,866 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.3.encoder.layers.0.nonlin_attention.whiten1, num_groups=1, num_channels=384, metric=7.65 vs. limit=7.6658333333333335 +2024-01-15 12:21:58,959 WARNING [optim.py:484] (0/2) Clipping_scale=2.0, grad-norm quartiles 2.008e+02 2.685e+02 3.057e+02 3.561e+02 6.746e+02, threshold=6.115e+02, percent-clipped=1.0 +2024-01-15 12:21:59,296 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=10663.333333333334, ans=0.19336666666666666 +2024-01-15 12:22:29,230 INFO [train.py:994] (0/2) Epoch 4, batch 700, loss[loss=0.2471, simple_loss=0.305, pruned_loss=0.09457, over 24154.00 frames. ], tot_loss[loss=0.2839, simple_loss=0.3305, pruned_loss=0.1186, over 4651626.09 frames. ], batch size: 140, lr: 4.22e-02, grad_scale: 32.0 +2024-01-15 12:22:30,829 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.bypass.skip_rate, batch_count=10763.333333333334, ans=0.09899494936611666 +2024-01-15 12:23:02,074 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_module2.balancer1.prob, batch_count=10830.0, ans=0.125 +2024-01-15 12:23:04,525 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=10830.0, ans=0.19169999999999998 +2024-01-15 12:23:12,443 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.attention_skip_rate, batch_count=10863.333333333334, ans=0.021402777777777774 +2024-01-15 12:23:31,297 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_module1.balancer1.prob, batch_count=10930.0, ans=0.125 +2024-01-15 12:23:32,152 INFO [train.py:994] (0/2) Epoch 4, batch 750, loss[loss=0.2641, simple_loss=0.3199, pruned_loss=0.1042, over 24545.00 frames. ], tot_loss[loss=0.2823, simple_loss=0.3296, pruned_loss=0.1175, over 4690283.46 frames. ], batch size: 193, lr: 4.22e-02, grad_scale: 64.0 +2024-01-15 12:23:54,361 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=10963.333333333334, ans=0.19036666666666666 +2024-01-15 12:24:01,114 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.4.encoder.layers.1.conv_module1.whiten, num_groups=1, num_channels=384, metric=2.36 vs. limit=11.623750000000001 +2024-01-15 12:24:04,139 WARNING [optim.py:484] (0/2) Clipping_scale=2.0, grad-norm quartiles 2.122e+02 2.570e+02 2.957e+02 3.374e+02 5.779e+02, threshold=5.914e+02, percent-clipped=0.0 +2024-01-15 12:24:10,115 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_skip_rate, batch_count=11030.0, ans=0.020708333333333336 +2024-01-15 12:24:18,638 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.2.out_combiner.scale_min, batch_count=11030.0, ans=0.5139500000000001 +2024-01-15 12:24:31,906 INFO [train.py:994] (0/2) Epoch 4, batch 800, loss[loss=0.2761, simple_loss=0.328, pruned_loss=0.1121, over 24538.00 frames. ], tot_loss[loss=0.2806, simple_loss=0.328, pruned_loss=0.1166, over 4707941.85 frames. ], batch size: 165, lr: 4.21e-02, grad_scale: 64.0 +2024-01-15 12:24:38,363 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.0.layers.0.nonlin_attention.balancer.max_positive, batch_count=11096.666666666666, ans=0.8609666666666667 +2024-01-15 12:24:46,348 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.feed_forward3.hidden_balancer.prob, batch_count=11130.0, ans=0.125 +2024-01-15 12:25:05,148 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.1.encoder.layers.1.nonlin_attention.whiten2, num_groups=1, num_channels=256, metric=9.45 vs. limit=10.581666666666667 +2024-01-15 12:25:05,698 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.attention_skip_rate, batch_count=11196.666666666666, ans=0.020013888888888894 +2024-01-15 12:25:21,436 INFO [checkpoint.py:75] (0/2) Saving checkpoint to zipformer_bbpe/exp-context-size-2-lr-epochs-10-spec-aug-20-disable-musan/epoch-4.pt +2024-01-15 12:25:45,406 INFO [train.py:994] (0/2) Epoch 5, batch 0, loss[loss=0.2651, simple_loss=0.3195, pruned_loss=0.1054, over 24437.00 frames. ], tot_loss[loss=0.2651, simple_loss=0.3195, pruned_loss=0.1054, over 24437.00 frames. ], batch size: 258, lr: 4.14e-02, grad_scale: 64.0 +2024-01-15 12:25:45,407 INFO [train.py:1017] (0/2) Computing validation loss +2024-01-15 12:26:05,014 INFO [train.py:1026] (0/2) Epoch 5, validation: loss=0.2095, simple_loss=0.2939, pruned_loss=0.06254, over 1622729.00 frames. +2024-01-15 12:26:05,014 INFO [train.py:1027] (0/2) Maximum memory allocated so far is 15997MB +2024-01-15 12:26:21,618 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.1.encoder.layers.0.nonlin_attention.whiten2, num_groups=1, num_channels=256, metric=9.90 vs. limit=10.636666666666667 +2024-01-15 12:26:23,287 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_skip_rate, batch_count=11273.333333333334, ans=0.01969444444444444 +2024-01-15 12:26:36,161 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.3.encoder.layers.3.self_attn2.whiten, num_groups=1, num_channels=512, metric=16.11 vs. limit=15.98 +2024-01-15 12:26:41,726 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.ff2_skip_rate, batch_count=11306.666666666666, ans=0.00841159420289855 +2024-01-15 12:26:47,271 WARNING [optim.py:484] (0/2) Clipping_scale=2.0, grad-norm quartiles 2.272e+02 2.795e+02 3.154e+02 3.646e+02 6.070e+02, threshold=6.308e+02, percent-clipped=1.0 +2024-01-15 12:26:52,684 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.3.encoder.layers.3.nonlin_attention.whiten2, num_groups=1, num_channels=512, metric=4.81 vs. limit=10.67 +2024-01-15 12:27:08,763 INFO [train.py:994] (0/2) Epoch 5, batch 50, loss[loss=0.2806, simple_loss=0.3247, pruned_loss=0.1183, over 24531.00 frames. ], tot_loss[loss=0.2712, simple_loss=0.3217, pruned_loss=0.1104, over 1101507.24 frames. ], batch size: 210, lr: 4.14e-02, grad_scale: 64.0 +2024-01-15 12:27:09,022 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.ff2_skip_rate, batch_count=11406.666666666666, ans=0.00838985507246377 +2024-01-15 12:27:31,045 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.5.encoder.layers.1.self_attn1.whiten, num_groups=1, num_channels=256, metric=17.10 vs. limit=16.08 +2024-01-15 12:27:41,967 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.3.encoder.layers.2.self_attn_weights.whiten_keys, num_groups=8, num_channels=256, metric=4.53 vs. limit=4.721 +2024-01-15 12:27:49,970 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.feed_forward3.hidden_balancer.prob, batch_count=11506.666666666666, ans=0.125 +2024-01-15 12:27:52,347 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.0.layers.1.balancer2.prob, batch_count=11506.666666666666, ans=0.125 +2024-01-15 12:27:55,420 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_skip_rate, batch_count=11506.666666666666, ans=0.018722222222222223 +2024-01-15 12:28:09,356 INFO [train.py:994] (0/2) Epoch 5, batch 100, loss[loss=0.2547, simple_loss=0.31, pruned_loss=0.09974, over 24522.00 frames. ], tot_loss[loss=0.2719, simple_loss=0.3224, pruned_loss=0.1107, over 1933565.25 frames. ], batch size: 243, lr: 4.13e-02, grad_scale: 32.0 +2024-01-15 12:28:10,812 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_module2.balancer2.prob, batch_count=11573.333333333334, ans=0.125 +2024-01-15 12:28:29,398 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.0.layers.0.attention_skip_rate, batch_count=11606.666666666666, ans=0.01830555555555556 +2024-01-15 12:28:33,089 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.bypass.skip_rate, batch_count=11640.0, ans=0.07 +2024-01-15 12:28:38,888 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=11640.0, ans=0.18359999999999999 +2024-01-15 12:28:51,705 WARNING [optim.py:484] (0/2) Clipping_scale=2.0, grad-norm quartiles 2.131e+02 2.763e+02 3.191e+02 3.657e+02 4.783e+02, threshold=6.382e+02, percent-clipped=0.0 +2024-01-15 12:28:54,423 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.attention_skip_rate, batch_count=11673.333333333334, ans=0.018027777777777775 +2024-01-15 12:28:55,626 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.bypass.scale_min, batch_count=11673.333333333334, ans=0.4914333333333334 +2024-01-15 12:28:59,340 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.4.encoder.layers.2.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=4.67 vs. limit=4.756 +2024-01-15 12:29:11,264 INFO [train.py:994] (0/2) Epoch 5, batch 150, loss[loss=0.2829, simple_loss=0.3312, pruned_loss=0.1173, over 24517.00 frames. ], tot_loss[loss=0.2724, simple_loss=0.3231, pruned_loss=0.1109, over 2568515.69 frames. ], batch size: 187, lr: 4.13e-02, grad_scale: 32.0 +2024-01-15 12:29:32,409 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.0.layers.1.feed_forward1.out_proj.dropout_p, batch_count=11773.333333333334, ans=0.18226666666666666 +2024-01-15 12:29:38,366 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.1.encoder.layers.1.conv_module1.whiten, num_groups=1, num_channels=256, metric=5.12 vs. limit=11.9275 +2024-01-15 12:29:54,927 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_module2.balancer2.min_positive, batch_count=11840.0, ans=0.05 +2024-01-15 12:29:54,947 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.bypass.scale_min, batch_count=11840.0, ans=0.48560000000000003 +2024-01-15 12:30:13,338 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.1.encoder.layers.0.feed_forward3.out_whiten, num_groups=1, num_channels=256, metric=10.11 vs. limit=11.965 +2024-01-15 12:30:13,649 INFO [train.py:994] (0/2) Epoch 5, batch 200, loss[loss=0.264, simple_loss=0.3256, pruned_loss=0.1012, over 24522.00 frames. ], tot_loss[loss=0.2719, simple_loss=0.3226, pruned_loss=0.1105, over 3070030.25 frames. ], batch size: 229, lr: 4.12e-02, grad_scale: 32.0 +2024-01-15 12:30:17,437 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.0.layers.1.feed_forward1.out_proj.dropout_p, batch_count=11906.666666666666, ans=0.18093333333333333 +2024-01-15 12:30:26,446 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.balancer1.prob, batch_count=11940.0, ans=0.125 +2024-01-15 12:30:26,496 INFO [scaling.py:1118] (0/2) WithLoss: name=encoder.encoders.4.encoder.layers.0.self_attn_weights, loss-sum=0.000e+00 +2024-01-15 12:30:28,960 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.ff3_skip_rate, batch_count=11940.0, ans=0.00827391304347826 +2024-01-15 12:30:35,566 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.nonlin_attention.balancer.prob, batch_count=11940.0, ans=0.125 +2024-01-15 12:30:40,305 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=11973.333333333334, ans=0.18026666666666666 +2024-01-15 12:30:56,392 WARNING [optim.py:484] (0/2) Clipping_scale=2.0, grad-norm quartiles 2.019e+02 2.655e+02 3.060e+02 3.593e+02 5.323e+02, threshold=6.121e+02, percent-clipped=0.0 +2024-01-15 12:31:16,481 INFO [train.py:994] (0/2) Epoch 5, batch 250, loss[loss=0.2951, simple_loss=0.3423, pruned_loss=0.1239, over 23927.00 frames. ], tot_loss[loss=0.2707, simple_loss=0.3218, pruned_loss=0.1097, over 3453540.92 frames. ], batch size: 328, lr: 4.11e-02, grad_scale: 32.0 +2024-01-15 12:32:03,106 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.0.layers.0.feed_forward2.out_whiten, num_groups=1, num_channels=192, metric=12.02 vs. limit=12.065000000000001 +2024-01-15 12:32:05,206 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.3.encoder.layers.2.self_attn2.whiten, num_groups=1, num_channels=512, metric=22.12 vs. limit=16.655 +2024-01-15 12:32:13,793 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.0.layers.0.ff3_skip_rate, batch_count=12206.666666666666, ans=0.008215942028985507 +2024-01-15 12:32:18,796 INFO [train.py:994] (0/2) Epoch 5, batch 300, loss[loss=0.2843, simple_loss=0.3391, pruned_loss=0.1148, over 24510.00 frames. ], tot_loss[loss=0.2703, simple_loss=0.3214, pruned_loss=0.1096, over 3742088.91 frames. ], batch size: 216, lr: 4.11e-02, grad_scale: 32.0 +2024-01-15 12:32:31,431 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.3.encoder.layers.0.self_attn2.whiten, num_groups=1, num_channels=512, metric=16.32 vs. limit=16.705 +2024-01-15 12:32:48,995 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_skip_rate, batch_count=12306.666666666666, ans=0.015388888888888896 +2024-01-15 12:33:01,032 WARNING [optim.py:484] (0/2) Clipping_scale=2.0, grad-norm quartiles 1.998e+02 2.817e+02 3.255e+02 3.789e+02 6.761e+02, threshold=6.509e+02, percent-clipped=1.0 +2024-01-15 12:33:20,946 INFO [train.py:994] (0/2) Epoch 5, batch 350, loss[loss=0.2481, simple_loss=0.3037, pruned_loss=0.09627, over 24480.00 frames. ], tot_loss[loss=0.2698, simple_loss=0.3213, pruned_loss=0.1092, over 3992492.56 frames. ], batch size: 210, lr: 4.10e-02, grad_scale: 32.0 +2024-01-15 12:33:32,005 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.self_attn_weights.pos_emb_skip_rate, batch_count=12406.666666666666, ans=0.0 +2024-01-15 12:33:47,851 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.nonlin_attention.balancer.prob, batch_count=12473.333333333334, ans=0.125 +2024-01-15 12:33:52,764 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_module1.balancer1.prob, batch_count=12473.333333333334, ans=0.125 +2024-01-15 12:33:56,276 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.0.layers.0.feed_forward1.out_proj.dropout_p, batch_count=12473.333333333334, ans=0.17526666666666668 +2024-01-15 12:34:03,607 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.ff3_skip_rate, batch_count=12506.666666666666, ans=0.00815072463768116 +2024-01-15 12:34:05,578 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_module2.balancer1.prob, batch_count=12506.666666666666, ans=0.125 +2024-01-15 12:34:07,210 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.2.encoder.layers.2.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=5.23 vs. limit=4.8759999999999994 +2024-01-15 12:34:15,917 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.self_attn_weights.pos_emb_skip_rate, batch_count=12540.0, ans=0.0 +2024-01-15 12:34:25,154 INFO [train.py:994] (0/2) Epoch 5, batch 400, loss[loss=0.2885, simple_loss=0.3389, pruned_loss=0.119, over 24214.00 frames. ], tot_loss[loss=0.2692, simple_loss=0.3207, pruned_loss=0.1088, over 4168536.47 frames. ], batch size: 311, lr: 4.10e-02, grad_scale: 32.0 +2024-01-15 12:34:46,375 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.3.encoder.layers.0.feed_forward3.out_whiten, num_groups=1, num_channels=512, metric=10.50 vs. limit=12.2275 +2024-01-15 12:34:49,587 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_module1.balancer2.prob, batch_count=12640.0, ans=0.125 +2024-01-15 12:35:06,588 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_module2.balancer2.prob, batch_count=12673.333333333334, ans=0.125 +2024-01-15 12:35:08,720 WARNING [optim.py:484] (0/2) Clipping_scale=2.0, grad-norm quartiles 2.026e+02 2.507e+02 2.821e+02 3.302e+02 6.163e+02, threshold=5.641e+02, percent-clipped=0.0 +2024-01-15 12:35:25,014 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.feed_forward1.hidden_balancer.prob, batch_count=12706.666666666666, ans=0.125 +2024-01-15 12:35:28,850 INFO [train.py:994] (0/2) Epoch 5, batch 450, loss[loss=0.2772, simple_loss=0.3331, pruned_loss=0.1106, over 24327.00 frames. ], tot_loss[loss=0.2685, simple_loss=0.3201, pruned_loss=0.1084, over 4305964.11 frames. ], batch size: 298, lr: 4.09e-02, grad_scale: 32.0 +2024-01-15 12:35:37,187 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.4.encoder.layers.1.conv_module2.whiten, num_groups=1, num_channels=384, metric=2.42 vs. limit=12.2775 +2024-01-15 12:35:42,981 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.0.layers.0.nonlin_attention.balancer.prob, batch_count=12773.333333333334, ans=0.125 +2024-01-15 12:35:59,129 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.bypass.skip_rate, batch_count=12806.666666666666, ans=0.07 +2024-01-15 12:36:08,894 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.feed_forward1.hidden_balancer.prob, batch_count=12840.0, ans=0.125 +2024-01-15 12:36:10,020 INFO [scaling.py:1118] (0/2) WithLoss: name=encoder.encoders.0.layers.0.self_attn_weights, loss-sum=0.000e+00 +2024-01-15 12:36:20,411 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_module1.balancer2.prob, batch_count=12873.333333333334, ans=0.125 +2024-01-15 12:36:32,501 INFO [train.py:994] (0/2) Epoch 5, batch 500, loss[loss=0.2394, simple_loss=0.2854, pruned_loss=0.09673, over 23566.00 frames. ], tot_loss[loss=0.2678, simple_loss=0.3195, pruned_loss=0.108, over 4416086.58 frames. ], batch size: 119, lr: 4.09e-02, grad_scale: 32.0 +2024-01-15 12:36:33,044 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.4.encoder.layers.1.self_attn2.whiten, num_groups=1, num_channels=384, metric=17.36 vs. limit=17.18 +2024-01-15 12:36:40,593 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.2.encoder.layers.1.feed_forward2.out_whiten, num_groups=1, num_channels=384, metric=11.42 vs. limit=12.34 +2024-01-15 12:36:43,716 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.bypass_mid.scale_min, batch_count=12940.0, ans=0.4471 +2024-01-15 12:36:56,292 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.4.encoder.layers.1.conv_module1.whiten, num_groups=1, num_channels=384, metric=2.23 vs. limit=12.3525 +2024-01-15 12:36:59,675 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.ff3_skip_rate, batch_count=12973.333333333334, ans=0.008049275362318841 +2024-01-15 12:37:09,206 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.self_attn_weights.pos_emb_skip_rate, batch_count=13006.666666666666, ans=0.0 +2024-01-15 12:37:15,669 WARNING [optim.py:484] (0/2) Clipping_scale=2.0, grad-norm quartiles 2.000e+02 2.645e+02 3.056e+02 3.686e+02 5.447e+02, threshold=6.112e+02, percent-clipped=0.0 +2024-01-15 12:37:25,206 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.5.encoder.layers.0.nonlin_attention.whiten2, num_groups=1, num_channels=256, metric=5.74 vs. limit=11.52 +2024-01-15 12:37:32,373 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.bypass.skip_rate, batch_count=13040.0, ans=0.04949747468305833 +2024-01-15 12:37:35,490 INFO [train.py:994] (0/2) Epoch 5, batch 550, loss[loss=0.2702, simple_loss=0.3256, pruned_loss=0.1074, over 24510.00 frames. ], tot_loss[loss=0.2679, simple_loss=0.3196, pruned_loss=0.108, over 4492974.04 frames. ], batch size: 229, lr: 4.08e-02, grad_scale: 32.0 +2024-01-15 12:37:38,885 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.bypass.skip_rate, batch_count=13073.333333333334, ans=0.04949747468305833 +2024-01-15 12:37:47,983 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_module1.balancer1.prob, batch_count=13106.666666666666, ans=0.125 +2024-01-15 12:37:49,162 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_module2.balancer1.prob, batch_count=13106.666666666666, ans=0.125 +2024-01-15 12:38:03,945 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.5.encoder.layers.0.self_attn1.whiten, num_groups=1, num_channels=256, metric=18.76 vs. limit=17.355 +2024-01-15 12:38:16,981 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.nonlin_attention.balancer.prob, batch_count=13173.333333333334, ans=0.125 +2024-01-15 12:38:18,275 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_module1.balancer2.prob, batch_count=13173.333333333334, ans=0.125 +2024-01-15 12:38:39,368 INFO [train.py:994] (0/2) Epoch 5, batch 600, loss[loss=0.2355, simple_loss=0.264, pruned_loss=0.1035, over 18024.00 frames. ], tot_loss[loss=0.267, simple_loss=0.3191, pruned_loss=0.1075, over 4556519.37 frames. ], batch size: 76, lr: 4.08e-02, grad_scale: 32.0 +2024-01-15 12:38:52,177 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.3.encoder.layers.0.self_attn1.whiten, num_groups=1, num_channels=512, metric=18.09 vs. limit=17.455 +2024-01-15 12:39:01,183 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.0.layers.1.nonlin_attention.whiten1, num_groups=1, num_channels=144, metric=6.94 vs. limit=8.318333333333333 +2024-01-15 12:39:13,309 INFO [checkpoint.py:75] (0/2) Saving checkpoint to zipformer_bbpe/exp-context-size-2-lr-epochs-10-spec-aug-20-disable-musan/checkpoint-4000.pt +2024-01-15 12:39:16,643 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.0.layers.0.ff2_skip_rate, batch_count=13306.666666666666, ans=0.007976811594202899 +2024-01-15 12:39:22,731 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.5.out_combiner.scale_min, batch_count=13340.0, ans=0.43310000000000004 +2024-01-15 12:39:23,502 WARNING [optim.py:484] (0/2) Clipping_scale=2.0, grad-norm quartiles 2.093e+02 2.526e+02 2.863e+02 3.334e+02 5.005e+02, threshold=5.725e+02, percent-clipped=0.0 +2024-01-15 12:39:30,003 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_module2.balancer2.min_positive, batch_count=13340.0, ans=0.05 +2024-01-15 12:39:37,232 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.0.layers.1.ff2_skip_rate, batch_count=13373.333333333334, ans=0.00796231884057971 +2024-01-15 12:39:44,245 INFO [train.py:994] (0/2) Epoch 5, batch 650, loss[loss=0.2601, simple_loss=0.3155, pruned_loss=0.1023, over 24468.00 frames. ], tot_loss[loss=0.2664, simple_loss=0.3189, pruned_loss=0.1069, over 4615722.45 frames. ], batch size: 170, lr: 4.07e-02, grad_scale: 32.0 +2024-01-15 12:39:51,276 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.balancer1.prob, batch_count=13406.666666666666, ans=0.125 +2024-01-15 12:39:52,983 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.2.encoder.layers.1.nonlin_attention.whiten2, num_groups=1, num_channels=384, metric=22.44 vs. limit=11.703333333333333 +2024-01-15 12:40:00,564 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.4.encoder.layers.1.self_attn2.whiten, num_groups=1, num_channels=384, metric=17.75 vs. limit=17.58 +2024-01-15 12:40:04,988 INFO [scaling.py:1118] (0/2) WithLoss: name=encoder.encoders.4.encoder.layers.0.self_attn_weights, loss-sum=0.000e+00 +2024-01-15 12:40:05,013 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.balancer1.prob, batch_count=13440.0, ans=0.125 +2024-01-15 12:40:14,488 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.bypass_mid.scale_min, batch_count=13473.333333333334, ans=0.42843333333333333 +2024-01-15 12:40:32,164 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.0.layers.1.self_attn2.whiten, num_groups=1, num_channels=192, metric=12.55 vs. limit=17.630000000000003 +2024-01-15 12:40:35,295 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.balancer2.prob, batch_count=13540.0, ans=0.125 +2024-01-15 12:40:48,232 INFO [train.py:994] (0/2) Epoch 5, batch 700, loss[loss=0.2935, simple_loss=0.3446, pruned_loss=0.1212, over 23895.00 frames. ], tot_loss[loss=0.2655, simple_loss=0.3185, pruned_loss=0.1063, over 4661484.62 frames. ], batch size: 328, lr: 4.06e-02, grad_scale: 32.0 +2024-01-15 12:40:52,546 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.3.encoder.layers.1.feed_forward2.out_whiten, num_groups=1, num_channels=512, metric=10.24 vs. limit=12.59 +2024-01-15 12:41:00,509 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.0.layers.1.feed_forward1.out_proj.dropout_p, batch_count=13606.666666666666, ans=0.16393333333333335 +2024-01-15 12:41:16,287 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_module2.balancer1.prob, batch_count=13640.0, ans=0.125 +2024-01-15 12:41:17,866 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.2.encoder.layers.1.feed_forward1.out_whiten, num_groups=1, num_channels=384, metric=11.20 vs. limit=12.615 +2024-01-15 12:41:19,807 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.feed_forward1.hidden_balancer.prob, batch_count=13640.0, ans=0.125 +2024-01-15 12:41:30,114 WARNING [optim.py:484] (0/2) Clipping_scale=2.0, grad-norm quartiles 2.027e+02 2.828e+02 3.267e+02 3.789e+02 7.603e+02, threshold=6.534e+02, percent-clipped=3.0 +2024-01-15 12:41:32,117 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.2.encoder.layers.0.feed_forward1.out_whiten, num_groups=1, num_channels=384, metric=11.89 vs. limit=12.627500000000001 +2024-01-15 12:41:50,772 INFO [train.py:994] (0/2) Epoch 5, batch 750, loss[loss=0.2876, simple_loss=0.345, pruned_loss=0.1151, over 22195.00 frames. ], tot_loss[loss=0.2647, simple_loss=0.3177, pruned_loss=0.1058, over 4683916.40 frames. ], batch size: 357, lr: 4.06e-02, grad_scale: 32.0 +2024-01-15 12:41:51,151 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.balancer1.prob, batch_count=13740.0, ans=0.125 +2024-01-15 12:42:01,273 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.feed_forward2.hidden_balancer.prob, batch_count=13740.0, ans=0.125 +2024-01-15 12:42:44,094 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_module2.balancer1.prob, batch_count=13873.333333333334, ans=0.125 +2024-01-15 12:42:48,640 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.bypass.skip_rate, batch_count=13873.333333333334, ans=0.09899494936611666 +2024-01-15 12:42:49,789 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.bypass.scale_min, batch_count=13873.333333333334, ans=0.4144333333333333 +2024-01-15 12:42:51,801 INFO [train.py:994] (0/2) Epoch 5, batch 800, loss[loss=0.2579, simple_loss=0.3181, pruned_loss=0.09886, over 24353.00 frames. ], tot_loss[loss=0.2635, simple_loss=0.3168, pruned_loss=0.1051, over 4706527.02 frames. ], batch size: 298, lr: 4.05e-02, grad_scale: 32.0 +2024-01-15 12:43:13,944 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.ff3_skip_rate, batch_count=13973.333333333334, ans=0.007831884057971014 +2024-01-15 12:43:15,094 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=13973.333333333334, ans=0.16026666666666667 +2024-01-15 12:43:31,535 WARNING [optim.py:484] (0/2) Clipping_scale=2.0, grad-norm quartiles 1.918e+02 2.625e+02 2.874e+02 3.417e+02 5.842e+02, threshold=5.748e+02, percent-clipped=0.0 +2024-01-15 12:43:31,821 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=14006.666666666666, ans=0.15993333333333334 +2024-01-15 12:43:34,657 INFO [scaling.py:1022] (0/2) Whitening: name=encoder_embed.convnext.out_whiten, num_groups=1, num_channels=128, metric=4.25 vs. limit=5.0 +2024-01-15 12:43:41,428 INFO [checkpoint.py:75] (0/2) Saving checkpoint to zipformer_bbpe/exp-context-size-2-lr-epochs-10-spec-aug-20-disable-musan/epoch-5.pt +2024-01-15 12:44:05,334 INFO [train.py:994] (0/2) Epoch 6, batch 0, loss[loss=0.2669, simple_loss=0.3238, pruned_loss=0.105, over 24525.00 frames. ], tot_loss[loss=0.2669, simple_loss=0.3238, pruned_loss=0.105, over 24525.00 frames. ], batch size: 181, lr: 3.97e-02, grad_scale: 32.0 +2024-01-15 12:44:05,336 INFO [train.py:1017] (0/2) Computing validation loss +2024-01-15 12:44:25,848 INFO [train.py:1026] (0/2) Epoch 6, validation: loss=0.2018, simple_loss=0.2869, pruned_loss=0.05834, over 1622729.00 frames. +2024-01-15 12:44:25,849 INFO [train.py:1027] (0/2) Maximum memory allocated so far is 15997MB +2024-01-15 12:44:30,659 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.4.encoder.layers.0.feed_forward3.out_whiten, num_groups=1, num_channels=384, metric=13.45 vs. limit=12.76875 +2024-01-15 12:44:41,148 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_module1.balancer2.prob, batch_count=14083.333333333334, ans=0.125 +2024-01-15 12:44:57,967 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.0.layers.0.bypass.skip_rate, batch_count=14116.666666666666, ans=0.035 +2024-01-15 12:45:00,723 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.3.encoder.layers.2.self_attn_weights.whiten_keys, num_groups=8, num_channels=256, metric=4.75 vs. limit=5.1175 +2024-01-15 12:45:05,045 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.0.layers.0.feed_forward1.out_proj.dropout_p, batch_count=14150.0, ans=0.1585 +2024-01-15 12:45:20,543 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_module1.balancer2.prob, batch_count=14183.333333333334, ans=0.125 +2024-01-15 12:45:28,488 INFO [train.py:994] (0/2) Epoch 6, batch 50, loss[loss=0.2562, simple_loss=0.3097, pruned_loss=0.1014, over 24451.00 frames. ], tot_loss[loss=0.2552, simple_loss=0.3112, pruned_loss=0.09961, over 1088362.25 frames. ], batch size: 181, lr: 3.97e-02, grad_scale: 32.0 +2024-01-15 12:45:45,870 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.2.encoder.layers.0.self_attn2.whiten, num_groups=1, num_channels=384, metric=18.52 vs. limit=18.1875 +2024-01-15 12:45:51,268 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_module2.balancer1.prob, batch_count=14250.0, ans=0.125 +2024-01-15 12:45:57,325 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=14283.333333333334, ans=0.15716666666666668 +2024-01-15 12:46:01,882 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_skip_rate, batch_count=14283.333333333334, ans=0.007152777777777772 +2024-01-15 12:46:08,475 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.feed_forward3.hidden_balancer.prob, batch_count=14316.666666666666, ans=0.125 +2024-01-15 12:46:16,166 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.feed_forward1.hidden_balancer.prob, batch_count=14316.666666666666, ans=0.125 +2024-01-15 12:46:19,406 WARNING [optim.py:484] (0/2) Clipping_scale=2.0, grad-norm quartiles 2.144e+02 2.532e+02 2.872e+02 3.378e+02 9.523e+02, threshold=5.745e+02, percent-clipped=4.0 +2024-01-15 12:46:20,839 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_module1.balancer1.max_abs, batch_count=14350.0, ans=10.0 +2024-01-15 12:46:29,989 INFO [train.py:994] (0/2) Epoch 6, batch 100, loss[loss=0.2934, simple_loss=0.3431, pruned_loss=0.1218, over 22465.00 frames. ], tot_loss[loss=0.2547, simple_loss=0.3101, pruned_loss=0.09965, over 1911865.04 frames. ], batch size: 357, lr: 3.96e-02, grad_scale: 32.0 +2024-01-15 12:46:31,425 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.ff2_skip_rate, batch_count=14383.333333333334, ans=0.007742753623188405 +2024-01-15 12:46:50,840 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.bypass.scale_min, batch_count=14416.666666666666, ans=0.39541666666666675 +2024-01-15 12:46:53,372 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.5.encoder.layers.1.self_attn2.whiten, num_groups=1, num_channels=256, metric=17.09 vs. limit=18.3125 +2024-01-15 12:47:15,634 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.0.layers.1.feed_forward1.out_proj.dropout_p, batch_count=14483.333333333334, ans=0.15516666666666667 +2024-01-15 12:47:16,939 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_skip_rate, batch_count=14483.333333333334, ans=0.00631944444444444 +2024-01-15 12:47:18,015 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_module2.balancer1.max_abs, batch_count=14483.333333333334, ans=10.0 +2024-01-15 12:47:31,269 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_module1.balancer1.prob, batch_count=14516.666666666666, ans=0.125 +2024-01-15 12:47:33,313 INFO [train.py:994] (0/2) Epoch 6, batch 150, loss[loss=0.2659, simple_loss=0.317, pruned_loss=0.1073, over 24451.00 frames. ], tot_loss[loss=0.2548, simple_loss=0.3106, pruned_loss=0.09947, over 2563356.57 frames. ], batch size: 170, lr: 3.96e-02, grad_scale: 32.0 +2024-01-15 12:48:18,823 INFO [scaling.py:1118] (0/2) WithLoss: name=encoder.encoders.4.encoder.layers.0.self_attn_weights, loss-sum=0.000e+00 +2024-01-15 12:48:24,979 WARNING [optim.py:484] (0/2) Clipping_scale=2.0, grad-norm quartiles 2.141e+02 2.520e+02 2.957e+02 3.469e+02 5.327e+02, threshold=5.915e+02, percent-clipped=0.0 +2024-01-15 12:48:35,508 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.0.layers.0.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=5.29 vs. limit=5.2075 +2024-01-15 12:48:35,716 INFO [train.py:994] (0/2) Epoch 6, batch 200, loss[loss=0.2492, simple_loss=0.3072, pruned_loss=0.09558, over 24413.00 frames. ], tot_loss[loss=0.2556, simple_loss=0.3108, pruned_loss=0.1002, over 3046351.26 frames. ], batch size: 258, lr: 3.95e-02, grad_scale: 32.0 +2024-01-15 12:48:36,414 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.3.encoder.layers.2.conv_module1.whiten, num_groups=1, num_channels=512, metric=2.75 vs. limit=13.01875 +2024-01-15 12:48:45,074 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.ff2_skip_rate, batch_count=14716.666666666666, ans=0.0076702898550724645 +2024-01-15 12:48:56,439 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_skip_rate, batch_count=14750.0, ans=0.005208333333333336 +2024-01-15 12:48:56,891 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.2.encoder.layers.0.conv_module2.whiten, num_groups=1, num_channels=384, metric=4.81 vs. limit=13.03125 +2024-01-15 12:49:05,698 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.balancer1.prob, batch_count=14783.333333333334, ans=0.125 +2024-01-15 12:49:06,904 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_skip_rate, batch_count=14783.333333333334, ans=0.005069444444444439 +2024-01-15 12:49:15,475 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.ff2_skip_rate, batch_count=14816.666666666666, ans=0.007648550724637682 +2024-01-15 12:49:16,764 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.bypass_mid.scale_min, batch_count=14816.666666666666, ans=0.38141666666666674 +2024-01-15 12:49:33,864 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=14850.0, ans=0.15150000000000002 +2024-01-15 12:49:36,901 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_module1.balancer1.max_abs, batch_count=14850.0, ans=10.0 +2024-01-15 12:49:39,792 INFO [train.py:994] (0/2) Epoch 6, batch 250, loss[loss=0.2599, simple_loss=0.3153, pruned_loss=0.1023, over 24398.00 frames. ], tot_loss[loss=0.2553, simple_loss=0.3109, pruned_loss=0.09983, over 3434907.49 frames. ], batch size: 153, lr: 3.94e-02, grad_scale: 16.0 +2024-01-15 12:49:55,649 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=14916.666666666666, ans=0.15083333333333335 +2024-01-15 12:50:09,880 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_module1.balancer1.prob, batch_count=14950.0, ans=0.125 +2024-01-15 12:50:24,301 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_module1.balancer1.max_abs, batch_count=14983.333333333334, ans=10.0 +2024-01-15 12:50:31,385 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_module1.balancer1.prob, batch_count=15016.666666666666, ans=0.125 +2024-01-15 12:50:31,395 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.feed_forward2.hidden_balancer.prob, batch_count=15016.666666666666, ans=0.125 +2024-01-15 12:50:32,176 WARNING [optim.py:484] (0/2) Clipping_scale=2.0, grad-norm quartiles 1.898e+02 2.584e+02 2.958e+02 3.363e+02 5.470e+02, threshold=5.917e+02, percent-clipped=0.0 +2024-01-15 12:50:40,885 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_module2.balancer2.prob, batch_count=15050.0, ans=0.125 +2024-01-15 12:50:41,687 INFO [train.py:994] (0/2) Epoch 6, batch 300, loss[loss=0.2673, simple_loss=0.3252, pruned_loss=0.1047, over 24526.00 frames. ], tot_loss[loss=0.2558, simple_loss=0.3118, pruned_loss=0.09986, over 3750877.53 frames. ], batch size: 204, lr: 3.94e-02, grad_scale: 16.0 +2024-01-15 12:50:47,957 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.self_attn_weights.pos_emb_skip_rate, batch_count=15050.0, ans=0.0 +2024-01-15 12:51:26,055 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_module1.balancer2.prob, batch_count=15150.0, ans=0.125 +2024-01-15 12:51:44,445 INFO [train.py:994] (0/2) Epoch 6, batch 350, loss[loss=0.2469, simple_loss=0.3055, pruned_loss=0.0942, over 24499.00 frames. ], tot_loss[loss=0.2558, simple_loss=0.3117, pruned_loss=0.09995, over 3979517.40 frames. ], batch size: 210, lr: 3.93e-02, grad_scale: 16.0 +2024-01-15 12:51:47,904 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=15216.666666666666, ans=0.14783333333333334 +2024-01-15 12:51:53,715 INFO [scaling.py:1118] (0/2) WithLoss: name=encoder.encoders.0.layers.1.self_attn_weights, loss-sum=0.000e+00 +2024-01-15 12:52:01,274 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.4.encoder.layers.1.feed_forward3.out_whiten, num_groups=1, num_channels=384, metric=12.13 vs. limit=13.21875 +2024-01-15 12:52:10,166 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.feed_forward2.hidden_balancer.prob, batch_count=15283.333333333334, ans=0.125 +2024-01-15 12:52:27,717 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.bypass_mid.scale_min, batch_count=15316.666666666666, ans=0.36391666666666667 +2024-01-15 12:52:37,736 WARNING [optim.py:484] (0/2) Clipping_scale=2.0, grad-norm quartiles 2.145e+02 2.642e+02 3.065e+02 3.528e+02 6.043e+02, threshold=6.131e+02, percent-clipped=0.0 +2024-01-15 12:52:39,152 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.nonlin_attention.balancer.prob, batch_count=15350.0, ans=0.125 +2024-01-15 12:52:45,583 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_module1.balancer2.min_positive, batch_count=15350.0, ans=0.05 +2024-01-15 12:52:47,772 INFO [train.py:994] (0/2) Epoch 6, batch 400, loss[loss=0.2414, simple_loss=0.3044, pruned_loss=0.08921, over 24472.00 frames. ], tot_loss[loss=0.2557, simple_loss=0.3117, pruned_loss=0.09987, over 4171276.55 frames. ], batch size: 222, lr: 3.93e-02, grad_scale: 32.0 +2024-01-15 12:52:53,235 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.3.encoder.layers.2.self_attn1.whiten, num_groups=1, num_channels=512, metric=20.30 vs. limit=19.0375 +2024-01-15 12:52:56,323 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.feed_forward1.hidden_balancer.prob, batch_count=15383.333333333334, ans=0.125 +2024-01-15 12:53:10,962 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_module1.balancer1.prob, batch_count=15416.666666666666, ans=0.125 +2024-01-15 12:53:29,735 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.feed_forward3.hidden_balancer.prob, batch_count=15483.333333333334, ans=0.125 +2024-01-15 12:53:33,495 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.4.encoder.layers.1.feed_forward2.out_whiten, num_groups=1, num_channels=384, metric=10.36 vs. limit=13.30625 +2024-01-15 12:53:50,316 INFO [train.py:994] (0/2) Epoch 6, batch 450, loss[loss=0.2512, simple_loss=0.3034, pruned_loss=0.09944, over 24156.00 frames. ], tot_loss[loss=0.2537, simple_loss=0.3098, pruned_loss=0.09875, over 4306770.29 frames. ], batch size: 140, lr: 3.92e-02, grad_scale: 32.0 +2024-01-15 12:53:50,651 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.ff3_skip_rate, batch_count=15550.0, ans=0.007489130434782609 +2024-01-15 12:54:29,814 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.3.encoder.layers.2.self_attn_weights.whiten_keys, num_groups=8, num_channels=256, metric=4.85 vs. limit=5.3475 +2024-01-15 12:54:32,941 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_module1.balancer2.prob, batch_count=15650.0, ans=0.125 +2024-01-15 12:54:41,177 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.nonlin_attention.balancer.max_positive, batch_count=15683.333333333334, ans=0.9068333333333333 +2024-01-15 12:54:43,351 WARNING [optim.py:484] (0/2) Clipping_scale=2.0, grad-norm quartiles 2.107e+02 2.574e+02 3.083e+02 3.661e+02 5.434e+02, threshold=6.165e+02, percent-clipped=0.0 +2024-01-15 12:54:54,286 INFO [train.py:994] (0/2) Epoch 6, batch 500, loss[loss=0.2488, simple_loss=0.3055, pruned_loss=0.09601, over 24371.00 frames. ], tot_loss[loss=0.253, simple_loss=0.3098, pruned_loss=0.09816, over 4431076.81 frames. ], batch size: 275, lr: 3.92e-02, grad_scale: 32.0 +2024-01-15 12:54:54,592 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_module2.balancer2.prob, batch_count=15716.666666666666, ans=0.125 +2024-01-15 12:55:06,560 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.bypass.scale_min, batch_count=15750.0, ans=0.34875 +2024-01-15 12:55:44,869 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_module2.balancer2.prob, batch_count=15850.0, ans=0.125 +2024-01-15 12:55:48,278 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder_embed.dropout.p, batch_count=15850.0, ans=0.14150000000000001 +2024-01-15 12:55:56,331 INFO [train.py:994] (0/2) Epoch 6, batch 550, loss[loss=0.2522, simple_loss=0.3076, pruned_loss=0.09845, over 24465.00 frames. ], tot_loss[loss=0.2522, simple_loss=0.3092, pruned_loss=0.09764, over 4522679.45 frames. ], batch size: 276, lr: 3.91e-02, grad_scale: 32.0 +2024-01-15 12:55:56,673 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_module2.balancer2.prob, batch_count=15883.333333333334, ans=0.125 +2024-01-15 12:56:24,185 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.nonlin_attention.balancer.prob, batch_count=15950.0, ans=0.125 +2024-01-15 12:56:50,480 WARNING [optim.py:484] (0/2) Clipping_scale=2.0, grad-norm quartiles 2.044e+02 2.391e+02 2.809e+02 3.171e+02 7.983e+02, threshold=5.617e+02, percent-clipped=2.0 +2024-01-15 12:56:59,326 INFO [train.py:994] (0/2) Epoch 6, batch 600, loss[loss=0.2509, simple_loss=0.3039, pruned_loss=0.09894, over 24511.00 frames. ], tot_loss[loss=0.252, simple_loss=0.3091, pruned_loss=0.09747, over 4591531.55 frames. ], batch size: 204, lr: 3.90e-02, grad_scale: 16.0 +2024-01-15 12:57:14,664 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.feed_forward1.hidden_balancer.prob, batch_count=16083.333333333334, ans=0.125 +2024-01-15 12:57:15,769 INFO [scaling.py:1118] (0/2) WithLoss: name=encoder.encoders.3.encoder.layers.1.self_attn_weights, loss-sum=0.000e+00 +2024-01-15 12:57:33,660 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_skip_rate, batch_count=16116.666666666666, ans=0.0 +2024-01-15 12:57:40,930 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_skip_rate, batch_count=16150.0, ans=0.0 +2024-01-15 12:57:49,750 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.feed_forward1.out_proj.dropout_p, batch_count=16183.333333333334, ans=0.13816666666666666 +2024-01-15 12:57:52,236 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=16183.333333333334, ans=0.13816666666666666 +2024-01-15 12:57:53,412 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.balancer2.prob, batch_count=16183.333333333334, ans=0.125 +2024-01-15 12:58:01,232 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_skip_rate, batch_count=16216.666666666666, ans=0.0 +2024-01-15 12:58:02,122 INFO [train.py:994] (0/2) Epoch 6, batch 650, loss[loss=0.2443, simple_loss=0.3054, pruned_loss=0.09158, over 24432.00 frames. ], tot_loss[loss=0.2516, simple_loss=0.3086, pruned_loss=0.09728, over 4612494.74 frames. ], batch size: 250, lr: 3.90e-02, grad_scale: 16.0 +2024-01-15 12:58:08,653 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.3.encoder.layers.2.nonlin_attention.whiten2, num_groups=1, num_channels=512, metric=5.50 vs. limit=13.108333333333333 +2024-01-15 12:58:11,102 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.2.encoder.layers.0.nonlin_attention.whiten1, num_groups=1, num_channels=288, metric=8.02 vs. limit=9.054166666666667 +2024-01-15 12:58:11,736 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_module1.balancer1.prob, batch_count=16216.666666666666, ans=0.125 +2024-01-15 12:58:15,893 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.nonlin_attention.balancer.min_positive, batch_count=16250.0, ans=0.0875 +2024-01-15 12:58:19,674 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.5.encoder.layers.0.self_attn2.whiten, num_groups=1, num_channels=256, metric=16.82 vs. limit=19.6875 +2024-01-15 12:58:55,716 WARNING [optim.py:484] (0/2) Clipping_scale=2.0, grad-norm quartiles 2.004e+02 2.577e+02 2.996e+02 4.100e+02 6.936e+02, threshold=5.993e+02, percent-clipped=2.0 +2024-01-15 12:59:04,783 INFO [train.py:994] (0/2) Epoch 6, batch 700, loss[loss=0.2438, simple_loss=0.301, pruned_loss=0.09328, over 24388.00 frames. ], tot_loss[loss=0.252, simple_loss=0.3095, pruned_loss=0.09725, over 4667674.96 frames. ], batch size: 159, lr: 3.89e-02, grad_scale: 16.0 +2024-01-15 12:59:09,767 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_skip_rate, batch_count=16383.333333333334, ans=0.0 +2024-01-15 12:59:13,247 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.1.encoder.layers.1.conv_module2.whiten, num_groups=1, num_channels=256, metric=3.88 vs. limit=13.64375 +2024-01-15 12:59:19,360 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.0.layers.0.feed_forward3.out_whiten, num_groups=1, num_channels=192, metric=10.64 vs. limit=13.65625 +2024-01-15 12:59:40,477 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.balancer2.prob, batch_count=16450.0, ans=0.125 +2024-01-15 12:59:43,991 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.feed_forward3.hidden_balancer.prob, batch_count=16483.333333333332, ans=0.125 +2024-01-15 13:00:06,855 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_skip_rate, batch_count=16550.0, ans=0.0 +2024-01-15 13:00:07,661 INFO [train.py:994] (0/2) Epoch 6, batch 750, loss[loss=0.2404, simple_loss=0.3035, pruned_loss=0.08863, over 24383.00 frames. ], tot_loss[loss=0.2515, simple_loss=0.309, pruned_loss=0.09696, over 4700646.75 frames. ], batch size: 159, lr: 3.89e-02, grad_scale: 16.0 +2024-01-15 13:00:25,382 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.ff3_skip_rate, batch_count=16583.333333333332, ans=0.007264492753623189 +2024-01-15 13:00:46,826 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.0.layers.0.attention_skip_rate, batch_count=16650.0, ans=0.0 +2024-01-15 13:00:48,026 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=16650.0, ans=0.1335 +2024-01-15 13:00:50,673 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.ff2_skip_rate, batch_count=16650.0, ans=0.00725 +2024-01-15 13:01:00,508 WARNING [optim.py:484] (0/2) Clipping_scale=2.0, grad-norm quartiles 1.978e+02 2.440e+02 2.798e+02 3.396e+02 5.464e+02, threshold=5.595e+02, percent-clipped=0.0 +2024-01-15 13:01:08,938 INFO [train.py:994] (0/2) Epoch 6, batch 800, loss[loss=0.2393, simple_loss=0.3005, pruned_loss=0.0891, over 24168.00 frames. ], tot_loss[loss=0.2504, simple_loss=0.3081, pruned_loss=0.09632, over 4717134.95 frames. ], batch size: 140, lr: 3.88e-02, grad_scale: 32.0 +2024-01-15 13:01:11,884 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.1.encoder.layers.1.conv_module1.whiten, num_groups=1, num_channels=256, metric=5.38 vs. limit=13.76875 +2024-01-15 13:01:51,493 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.0.layers.1.self_attn_weights.pos_emb_skip_rate, batch_count=16816.666666666668, ans=0.0 +2024-01-15 13:01:58,204 INFO [checkpoint.py:75] (0/2) Saving checkpoint to zipformer_bbpe/exp-context-size-2-lr-epochs-10-spec-aug-20-disable-musan/epoch-6.pt +2024-01-15 13:02:20,520 INFO [train.py:994] (0/2) Epoch 7, batch 0, loss[loss=0.2446, simple_loss=0.2999, pruned_loss=0.09468, over 24330.00 frames. ], tot_loss[loss=0.2446, simple_loss=0.2999, pruned_loss=0.09468, over 24330.00 frames. ], batch size: 147, lr: 3.79e-02, grad_scale: 32.0 +2024-01-15 13:02:20,521 INFO [train.py:1017] (0/2) Computing validation loss +2024-01-15 13:02:40,091 INFO [train.py:1026] (0/2) Epoch 7, validation: loss=0.1997, simple_loss=0.2857, pruned_loss=0.05682, over 1622729.00 frames. +2024-01-15 13:02:40,092 INFO [train.py:1027] (0/2) Maximum memory allocated so far is 15997MB +2024-01-15 13:02:43,398 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_module1.balancer1.prob, batch_count=16860.0, ans=0.125 +2024-01-15 13:02:54,514 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.nonlin_attention.balancer.prob, batch_count=16893.333333333332, ans=0.125 +2024-01-15 13:03:13,594 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.ff3_skip_rate, batch_count=16926.666666666668, ans=0.007189855072463768 +2024-01-15 13:03:15,005 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.5.encoder.layers.0.conv_module2.whiten, num_groups=1, num_channels=256, metric=4.91 vs. limit=13.8475 +2024-01-15 13:03:42,736 WARNING [optim.py:484] (0/2) Clipping_scale=2.0, grad-norm quartiles 1.863e+02 2.614e+02 2.980e+02 3.791e+02 6.611e+02, threshold=5.960e+02, percent-clipped=2.0 +2024-01-15 13:03:42,763 INFO [train.py:994] (0/2) Epoch 7, batch 50, loss[loss=0.2587, simple_loss=0.3179, pruned_loss=0.09977, over 24530.00 frames. ], tot_loss[loss=0.2456, simple_loss=0.3031, pruned_loss=0.09399, over 1079337.57 frames. ], batch size: 193, lr: 3.79e-02, grad_scale: 32.0 +2024-01-15 13:03:44,258 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.bypass.skip_rate, batch_count=17026.666666666668, ans=0.07 +2024-01-15 13:03:50,827 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_module1.balancer2.prob, batch_count=17026.666666666668, ans=0.125 +2024-01-15 13:04:01,098 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.3.encoder.layers.1.nonlin_attention.whiten1, num_groups=1, num_channels=384, metric=6.24 vs. limit=9.265 +2024-01-15 13:04:04,634 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.3.encoder.layers.2.conv_module2.whiten, num_groups=1, num_channels=512, metric=3.44 vs. limit=13.8975 +2024-01-15 13:04:21,588 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.feed_forward3.hidden_balancer.prob, batch_count=17126.666666666668, ans=0.125 +2024-01-15 13:04:44,717 INFO [train.py:994] (0/2) Epoch 7, batch 100, loss[loss=0.2295, simple_loss=0.293, pruned_loss=0.08304, over 24308.00 frames. ], tot_loss[loss=0.2448, simple_loss=0.3038, pruned_loss=0.09293, over 1917273.21 frames. ], batch size: 147, lr: 3.78e-02, grad_scale: 16.0 +2024-01-15 13:05:00,278 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.2.encoder.layers.0.feed_forward1.out_whiten, num_groups=1, num_channels=384, metric=13.49 vs. limit=13.96 +2024-01-15 13:05:05,334 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=17226.666666666668, ans=0.1277333333333333 +2024-01-15 13:05:34,291 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_module1.balancer2.min_abs, batch_count=17326.666666666668, ans=0.45990000000000003 +2024-01-15 13:05:39,676 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.3.encoder.layers.3.self_attn2.whiten, num_groups=1, num_channels=512, metric=20.49 vs. limit=20.495 +2024-01-15 13:05:42,364 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_module2.balancer1.prob, batch_count=17326.666666666668, ans=0.125 +2024-01-15 13:05:48,238 INFO [train.py:994] (0/2) Epoch 7, batch 150, loss[loss=0.2524, simple_loss=0.3107, pruned_loss=0.09699, over 24346.00 frames. ], tot_loss[loss=0.2435, simple_loss=0.3028, pruned_loss=0.09215, over 2556707.14 frames. ], batch size: 153, lr: 3.78e-02, grad_scale: 16.0 +2024-01-15 13:05:49,392 WARNING [optim.py:484] (0/2) Clipping_scale=2.0, grad-norm quartiles 2.035e+02 2.480e+02 2.785e+02 3.316e+02 4.883e+02, threshold=5.571e+02, percent-clipped=0.0 +2024-01-15 13:05:56,716 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=17360.0, ans=0.1264 +2024-01-15 13:06:03,931 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.feed_forward2.hidden_balancer.prob, batch_count=17393.333333333332, ans=0.125 +2024-01-15 13:06:11,669 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.nonlin_attention.balancer.prob, batch_count=17393.333333333332, ans=0.125 +2024-01-15 13:06:12,111 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.1.encoder.layers.1.self_attn2.whiten, num_groups=1, num_channels=256, metric=20.26 vs. limit=20.544999999999998 +2024-01-15 13:06:16,908 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.0.layers.1.nonlin_attention.whiten1, num_groups=1, num_channels=144, metric=6.67 vs. limit=9.356666666666667 +2024-01-15 13:06:26,989 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.3.encoder.layers.1.self_attn1.whiten, num_groups=1, num_channels=512, metric=19.66 vs. limit=20.595 +2024-01-15 13:06:44,254 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.bypass_mid.scale_min, batch_count=17493.333333333332, ans=0.2877333333333334 +2024-01-15 13:06:51,732 INFO [train.py:994] (0/2) Epoch 7, batch 200, loss[loss=0.2529, simple_loss=0.3088, pruned_loss=0.09852, over 24522.00 frames. ], tot_loss[loss=0.244, simple_loss=0.3032, pruned_loss=0.09237, over 3048329.07 frames. ], batch size: 236, lr: 3.77e-02, grad_scale: 16.0 +2024-01-15 13:07:00,857 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_module2.balancer1.min_positive, batch_count=17526.666666666668, ans=0.025 +2024-01-15 13:07:30,624 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_module2.balancer2.min_abs, batch_count=17626.666666666668, ans=0.46440000000000003 +2024-01-15 13:07:43,275 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_module1.balancer1.prob, batch_count=17660.0, ans=0.125 +2024-01-15 13:07:45,660 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.balancer1.prob, batch_count=17660.0, ans=0.125 +2024-01-15 13:07:54,130 INFO [train.py:994] (0/2) Epoch 7, batch 250, loss[loss=0.2271, simple_loss=0.2892, pruned_loss=0.08255, over 24436.00 frames. ], tot_loss[loss=0.2439, simple_loss=0.3031, pruned_loss=0.09232, over 3442031.26 frames. ], batch size: 250, lr: 3.76e-02, grad_scale: 16.0 +2024-01-15 13:07:55,377 WARNING [optim.py:484] (0/2) Clipping_scale=2.0, grad-norm quartiles 2.023e+02 2.392e+02 2.709e+02 3.243e+02 5.140e+02, threshold=5.418e+02, percent-clipped=0.0 +2024-01-15 13:08:03,361 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.2.encoder.layers.0.self_attn2.whiten, num_groups=1, num_channels=384, metric=22.98 vs. limit=20.77 +2024-01-15 13:08:11,177 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.1.encoder.layers.0.conv_module2.whiten, num_groups=1, num_channels=256, metric=5.61 vs. limit=14.1475 +2024-01-15 13:08:42,238 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=17793.333333333332, ans=0.12206666666666668 +2024-01-15 13:08:49,299 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_module2.balancer1.prob, batch_count=17826.666666666668, ans=0.125 +2024-01-15 13:08:52,730 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.self_attn_weights.pos_emb_skip_rate, batch_count=17826.666666666668, ans=0.0 +2024-01-15 13:08:55,958 INFO [train.py:994] (0/2) Epoch 7, batch 300, loss[loss=0.224, simple_loss=0.2924, pruned_loss=0.0778, over 24536.00 frames. ], tot_loss[loss=0.245, simple_loss=0.3041, pruned_loss=0.09298, over 3744978.26 frames. ], batch size: 236, lr: 3.76e-02, grad_scale: 8.0 +2024-01-15 13:08:59,846 INFO [scaling.py:1118] (0/2) WithLoss: name=encoder.encoders.1.encoder.layers.1.self_attn_weights, loss-sum=0.000e+00 +2024-01-15 13:09:11,501 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_module1.balancer2.prob, batch_count=17893.333333333332, ans=0.125 +2024-01-15 13:09:13,218 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.3.encoder.layers.1.self_attn1.whiten, num_groups=1, num_channels=512, metric=19.96 vs. limit=20.92 +2024-01-15 13:09:16,254 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_module1.balancer2.prob, batch_count=17893.333333333332, ans=0.125 +2024-01-15 13:09:29,953 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_module2.balancer2.min_abs, batch_count=17926.666666666668, ans=0.4689 +2024-01-15 13:09:35,296 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.self_attn_weights.pos_emb_skip_rate, batch_count=17960.0, ans=0.0 +2024-01-15 13:09:55,183 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.ff2_skip_rate, batch_count=17993.333333333332, ans=0.006957971014492754 +2024-01-15 13:09:58,909 INFO [train.py:994] (0/2) Epoch 7, batch 350, loss[loss=0.2466, simple_loss=0.3071, pruned_loss=0.09304, over 24313.00 frames. ], tot_loss[loss=0.2441, simple_loss=0.3033, pruned_loss=0.09243, over 3971502.85 frames. ], batch size: 285, lr: 3.75e-02, grad_scale: 8.0 +2024-01-15 13:10:00,486 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.bypass.scale_min, batch_count=18026.666666666668, ans=0.2690666666666667 +2024-01-15 13:10:01,282 WARNING [optim.py:484] (0/2) Clipping_scale=2.0, grad-norm quartiles 1.851e+02 2.448e+02 2.766e+02 3.076e+02 2.159e+03, threshold=5.532e+02, percent-clipped=4.0 +2024-01-15 13:10:07,735 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.3.encoder.layers.3.conv_module2.whiten, num_groups=1, num_channels=512, metric=7.12 vs. limit=14.26 +2024-01-15 13:10:16,785 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=18060.0, ans=0.1194 +2024-01-15 13:11:00,523 INFO [train.py:994] (0/2) Epoch 7, batch 400, loss[loss=0.2109, simple_loss=0.2762, pruned_loss=0.07275, over 24547.00 frames. ], tot_loss[loss=0.2431, simple_loss=0.3025, pruned_loss=0.09184, over 4154479.21 frames. ], batch size: 193, lr: 3.75e-02, grad_scale: 16.0 +2024-01-15 13:11:06,810 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.0.layers.1.self_attn_weights.pos_emb_skip_rate, batch_count=18193.333333333332, ans=0.0 +2024-01-15 13:11:08,068 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.attention_skip_rate, batch_count=18193.333333333332, ans=0.0 +2024-01-15 13:11:09,230 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.nonlin_attention.balancer.prob, batch_count=18193.333333333332, ans=0.125 +2024-01-15 13:11:25,088 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.2.encoder.layers.1.feed_forward1.out_whiten, num_groups=1, num_channels=384, metric=14.29 vs. limit=14.3475 +2024-01-15 13:11:28,101 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.0.layers.0.nonlin_attention.balancer.prob, batch_count=18260.0, ans=0.125 +2024-01-15 13:11:33,041 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.2.encoder.layers.2.nonlin_attention.whiten1, num_groups=1, num_channels=288, metric=8.39 vs. limit=9.565000000000001 +2024-01-15 13:11:37,971 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_module1.balancer2.min_positive, batch_count=18293.333333333332, ans=0.05 +2024-01-15 13:11:49,322 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.self_attn_weights.pos_emb_skip_rate, batch_count=18326.666666666668, ans=0.0 +2024-01-15 13:11:58,723 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.0.layers.1.attention_skip_rate, batch_count=18326.666666666668, ans=0.0 +2024-01-15 13:12:01,692 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.2.encoder.layers.1.feed_forward3.out_whiten, num_groups=1, num_channels=384, metric=12.00 vs. limit=14.385 +2024-01-15 13:12:02,130 INFO [train.py:994] (0/2) Epoch 7, batch 450, loss[loss=0.2398, simple_loss=0.2948, pruned_loss=0.09243, over 24467.00 frames. ], tot_loss[loss=0.2419, simple_loss=0.3014, pruned_loss=0.09116, over 4301444.73 frames. ], batch size: 170, lr: 3.74e-02, grad_scale: 16.0 +2024-01-15 13:12:05,167 WARNING [optim.py:484] (0/2) Clipping_scale=2.0, grad-norm quartiles 1.955e+02 2.388e+02 2.626e+02 2.940e+02 4.634e+02, threshold=5.253e+02, percent-clipped=0.0 +2024-01-15 13:12:14,592 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_module1.balancer1.max_abs, batch_count=18393.333333333332, ans=10.0 +2024-01-15 13:12:16,969 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_module2.balancer2.min_abs, batch_count=18393.333333333332, ans=0.4759 +2024-01-15 13:12:23,363 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.3.encoder.layers.0.self_attn2.whiten, num_groups=1, num_channels=512, metric=19.82 vs. limit=21.295 +2024-01-15 13:12:24,292 INFO [scaling.py:1118] (0/2) WithLoss: name=encoder.encoders.5.encoder.layers.1.self_attn_weights, loss-sum=0.000e+00 +2024-01-15 13:12:31,849 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.balancer2.prob, batch_count=18426.666666666668, ans=0.125 +2024-01-15 13:12:33,639 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_module1.balancer2.prob, batch_count=18426.666666666668, ans=0.125 +2024-01-15 13:12:41,780 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.0.layers.1.attention_skip_rate, batch_count=18460.0, ans=0.0 +2024-01-15 13:12:43,010 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.ff2_skip_rate, batch_count=18460.0, ans=0.006856521739130435 +2024-01-15 13:13:05,240 INFO [train.py:994] (0/2) Epoch 7, batch 500, loss[loss=0.2303, simple_loss=0.2912, pruned_loss=0.08465, over 24471.00 frames. ], tot_loss[loss=0.2417, simple_loss=0.3014, pruned_loss=0.09099, over 4418403.11 frames. ], batch size: 170, lr: 3.73e-02, grad_scale: 16.0 +2024-01-15 13:13:10,180 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_module1.balancer1.prob, batch_count=18526.666666666668, ans=0.125 +2024-01-15 13:13:12,650 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.balancer2.prob, batch_count=18526.666666666668, ans=0.125 +2024-01-15 13:13:21,853 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.3.encoder.layers.2.self_attn1.whiten, num_groups=1, num_channels=512, metric=19.80 vs. limit=21.42 +2024-01-15 13:13:26,798 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=18560.0, ans=0.1144 +2024-01-15 13:13:32,656 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=18593.333333333332, ans=0.11406666666666668 +2024-01-15 13:13:36,169 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.feed_forward2.hidden_balancer.prob, batch_count=18593.333333333332, ans=0.125 +2024-01-15 13:14:06,842 INFO [train.py:994] (0/2) Epoch 7, batch 550, loss[loss=0.2462, simple_loss=0.3075, pruned_loss=0.09245, over 24489.00 frames. ], tot_loss[loss=0.242, simple_loss=0.3021, pruned_loss=0.09099, over 4516116.19 frames. ], batch size: 216, lr: 3.73e-02, grad_scale: 16.0 +2024-01-15 13:14:09,604 WARNING [optim.py:484] (0/2) Clipping_scale=2.0, grad-norm quartiles 1.822e+02 2.418e+02 2.722e+02 3.262e+02 6.493e+02, threshold=5.445e+02, percent-clipped=3.0 +2024-01-15 13:14:12,336 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_module1.balancer2.min_abs, batch_count=18693.333333333332, ans=0.48039999999999994 +2024-01-15 13:14:21,344 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.0.layers.0.self_attn_weights.pos_emb_skip_rate, batch_count=18726.666666666668, ans=0.0 +2024-01-15 13:14:22,688 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.balancer1.prob, batch_count=18726.666666666668, ans=0.125 +2024-01-15 13:14:28,448 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.bypass_mid.scale_min, batch_count=18726.666666666668, ans=0.2445666666666667 +2024-01-15 13:14:35,239 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.feed_forward1.hidden_balancer.prob, batch_count=18760.0, ans=0.125 +2024-01-15 13:15:09,672 INFO [train.py:994] (0/2) Epoch 7, batch 600, loss[loss=0.2494, simple_loss=0.306, pruned_loss=0.09636, over 24516.00 frames. ], tot_loss[loss=0.2412, simple_loss=0.3014, pruned_loss=0.09051, over 4576198.92 frames. ], batch size: 236, lr: 3.72e-02, grad_scale: 16.0 +2024-01-15 13:15:14,805 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.balancer_ff3.min_abs, batch_count=18860.0, ans=0.2 +2024-01-15 13:15:23,083 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.balancer1.prob, batch_count=18893.333333333332, ans=0.125 +2024-01-15 13:15:24,830 INFO [scaling.py:1118] (0/2) WithLoss: name=encoder.encoders.1.encoder.layers.1.self_attn_weights, loss-sum=0.000e+00 +2024-01-15 13:15:26,087 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_module1.balancer1.prob, batch_count=18893.333333333332, ans=0.125 +2024-01-15 13:15:29,711 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.feed_forward1.hidden_balancer.prob, batch_count=18893.333333333332, ans=0.125 +2024-01-15 13:15:36,607 INFO [scaling.py:1118] (0/2) WithLoss: name=encoder.encoders.5.encoder.layers.0.self_attn_weights, loss-sum=0.000e+00 +2024-01-15 13:15:40,708 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.1.encoder.layers.0.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=5.16 vs. limit=5.839 +2024-01-15 13:15:43,661 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.feed_forward1.hidden_balancer.prob, batch_count=18926.666666666668, ans=0.125 +2024-01-15 13:16:11,528 INFO [train.py:994] (0/2) Epoch 7, batch 650, loss[loss=0.239, simple_loss=0.298, pruned_loss=0.09001, over 24413.00 frames. ], tot_loss[loss=0.2408, simple_loss=0.3012, pruned_loss=0.09018, over 4639296.22 frames. ], batch size: 258, lr: 3.72e-02, grad_scale: 16.0 +2024-01-15 13:16:13,841 WARNING [optim.py:484] (0/2) Clipping_scale=2.0, grad-norm quartiles 2.002e+02 2.481e+02 2.819e+02 3.403e+02 1.032e+03, threshold=5.637e+02, percent-clipped=2.0 +2024-01-15 13:16:40,300 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.bypass_mid.scale_min, batch_count=19093.333333333332, ans=0.23173333333333346 +2024-01-15 13:17:13,787 INFO [train.py:994] (0/2) Epoch 7, batch 700, loss[loss=0.2142, simple_loss=0.2744, pruned_loss=0.07704, over 23972.00 frames. ], tot_loss[loss=0.2398, simple_loss=0.3, pruned_loss=0.08979, over 4655300.50 frames. ], batch size: 131, lr: 3.71e-02, grad_scale: 16.0 +2024-01-15 13:17:51,635 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.bypass_mid.scale_min, batch_count=19293.333333333332, ans=0.22473333333333345 +2024-01-15 13:17:52,818 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.feed_forward3.hidden_balancer.prob, batch_count=19293.333333333332, ans=0.125 +2024-01-15 13:17:55,136 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.bypass.skip_rate, batch_count=19293.333333333332, ans=0.07 +2024-01-15 13:18:00,159 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.4.encoder.layers.2.whiten, num_groups=1, num_channels=384, metric=3.35 vs. limit=11.717333333333332 +2024-01-15 13:18:16,526 INFO [train.py:994] (0/2) Epoch 7, batch 750, loss[loss=0.2401, simple_loss=0.2997, pruned_loss=0.09023, over 24534.00 frames. ], tot_loss[loss=0.2388, simple_loss=0.2991, pruned_loss=0.08923, over 4687897.82 frames. ], batch size: 243, lr: 3.71e-02, grad_scale: 16.0 +2024-01-15 13:18:18,840 WARNING [optim.py:484] (0/2) Clipping_scale=2.0, grad-norm quartiles 2.030e+02 2.484e+02 2.733e+02 3.222e+02 4.395e+02, threshold=5.467e+02, percent-clipped=0.0 +2024-01-15 13:18:31,966 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.0.layers.0.self_attn1.whiten, num_groups=1, num_channels=192, metric=15.81 vs. limit=22.045 +2024-01-15 13:18:46,319 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_module2.balancer2.prob, batch_count=19426.666666666668, ans=0.125 +2024-01-15 13:18:48,089 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.0.layers.1.self_attn2.whiten.whitening_limit, batch_count=19426.666666666668, ans=22.07 +2024-01-15 13:19:15,985 INFO [train.py:994] (0/2) Epoch 7, batch 800, loss[loss=0.2483, simple_loss=0.3097, pruned_loss=0.09341, over 24354.00 frames. ], tot_loss[loss=0.2385, simple_loss=0.2987, pruned_loss=0.0892, over 4704389.41 frames. ], batch size: 275, lr: 3.70e-02, grad_scale: 32.0 +2024-01-15 13:19:20,171 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_skip_rate, batch_count=19526.666666666668, ans=0.0 +2024-01-15 13:19:20,505 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.3.encoder.layers.2.self_attn_weights.whiten_keys, num_groups=8, num_channels=256, metric=5.08 vs. limit=5.929 +2024-01-15 13:19:22,330 INFO [scaling.py:1118] (0/2) WithLoss: name=encoder.encoders.1.encoder.layers.1.self_attn_weights, loss-sum=0.000e+00 +2024-01-15 13:19:24,559 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=19526.666666666668, ans=0.10473333333333334 +2024-01-15 13:19:35,148 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.feed_forward3.hidden_balancer.prob, batch_count=19560.0, ans=0.125 +2024-01-15 13:20:05,126 INFO [checkpoint.py:75] (0/2) Saving checkpoint to zipformer_bbpe/exp-context-size-2-lr-epochs-10-spec-aug-20-disable-musan/epoch-7.pt +2024-01-15 13:20:28,050 INFO [train.py:994] (0/2) Epoch 8, batch 0, loss[loss=0.2482, simple_loss=0.3038, pruned_loss=0.0963, over 24545.00 frames. ], tot_loss[loss=0.2482, simple_loss=0.3038, pruned_loss=0.0963, over 24545.00 frames. ], batch size: 236, lr: 3.61e-02, grad_scale: 32.0 +2024-01-15 13:20:28,051 INFO [train.py:1017] (0/2) Computing validation loss +2024-01-15 13:20:44,941 INFO [zipformer.py:1858] (0/2) name=encoder.encoders.2.encoder.layers.2.self_attn_weights, attn_weights_entropy = tensor([1.3812, 2.0345, 1.8563, 3.1600], device='cuda:0') +2024-01-15 13:20:45,715 INFO [zipformer.py:1858] (0/2) name=encoder.encoders.2.encoder.layers.2.self_attn_weights, attn_weights_entropy = tensor([1.4035, 2.1873, 2.0037, 3.4994], device='cuda:0') +2024-01-15 13:20:48,228 INFO [train.py:1026] (0/2) Epoch 8, validation: loss=0.1914, simple_loss=0.2786, pruned_loss=0.05211, over 1622729.00 frames. +2024-01-15 13:20:48,228 INFO [train.py:1027] (0/2) Maximum memory allocated so far is 15997MB +2024-01-15 13:20:59,657 WARNING [optim.py:484] (0/2) Clipping_scale=2.0, grad-norm quartiles 2.013e+02 2.481e+02 2.846e+02 3.388e+02 4.975e+02, threshold=5.693e+02, percent-clipped=0.0 +2024-01-15 13:21:22,999 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.feed_forward3.hidden_balancer.prob, batch_count=19736.666666666668, ans=0.125 +2024-01-15 13:21:31,204 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.2.encoder.layers.1.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=5.41 vs. limit=5.9655000000000005 +2024-01-15 13:21:46,665 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.ff3_skip_rate, batch_count=19803.333333333332, ans=0.006564492753623188 +2024-01-15 13:21:49,909 INFO [train.py:994] (0/2) Epoch 8, batch 50, loss[loss=0.2153, simple_loss=0.2773, pruned_loss=0.07659, over 24007.00 frames. ], tot_loss[loss=0.2328, simple_loss=0.2945, pruned_loss=0.08557, over 1093440.73 frames. ], batch size: 131, lr: 3.60e-02, grad_scale: 32.0 +2024-01-15 13:22:03,992 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.3.encoder.layers.2.feed_forward3.out_whiten, num_groups=1, num_channels=512, metric=11.58 vs. limit=14.95125 +2024-01-15 13:22:32,242 INFO [scaling.py:1022] (0/2) Whitening: name=encoder_embed.convnext.out_whiten, num_groups=1, num_channels=128, metric=5.78 vs. limit=5.0 +2024-01-15 13:22:35,086 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_module2.balancer2.prob, batch_count=19936.666666666668, ans=0.125 +2024-01-15 13:22:51,594 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=20003.333333333332, ans=0.1 +2024-01-15 13:22:52,461 INFO [train.py:994] (0/2) Epoch 8, batch 100, loss[loss=0.2334, simple_loss=0.2976, pruned_loss=0.08462, over 24370.00 frames. ], tot_loss[loss=0.2327, simple_loss=0.2946, pruned_loss=0.08543, over 1909187.26 frames. ], batch size: 153, lr: 3.60e-02, grad_scale: 32.0 +2024-01-15 13:23:03,533 WARNING [optim.py:484] (0/2) Clipping_scale=2.0, grad-norm quartiles 1.852e+02 2.423e+02 2.800e+02 3.261e+02 6.993e+02, threshold=5.599e+02, percent-clipped=2.0 +2024-01-15 13:23:31,679 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.3.encoder.layers.1.feed_forward2.out_whiten, num_groups=1, num_channels=512, metric=9.51 vs. limit=15.0 +2024-01-15 13:23:36,798 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.balancer1.prob, batch_count=20103.333333333332, ans=0.125 +2024-01-15 13:23:43,796 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_module2.balancer1.min_positive, batch_count=20136.666666666668, ans=0.025 +2024-01-15 13:23:51,565 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.feed_forward3.hidden_balancer.prob, batch_count=20136.666666666668, ans=0.125 +2024-01-15 13:23:54,928 INFO [train.py:994] (0/2) Epoch 8, batch 150, loss[loss=0.2374, simple_loss=0.3023, pruned_loss=0.08626, over 24369.00 frames. ], tot_loss[loss=0.2331, simple_loss=0.2951, pruned_loss=0.08556, over 2554600.84 frames. ], batch size: 275, lr: 3.59e-02, grad_scale: 32.0 +2024-01-15 13:24:12,881 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_module1.balancer2.prob, batch_count=20203.333333333332, ans=0.125 +2024-01-15 13:24:36,072 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_module2.balancer1.prob, batch_count=20270.0, ans=0.125 +2024-01-15 13:24:46,342 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.bypass.skip_rate, batch_count=20303.333333333332, ans=0.09899494936611666 +2024-01-15 13:24:49,940 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.balancer1.prob, batch_count=20303.333333333332, ans=0.125 +2024-01-15 13:24:51,069 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.bypass.skip_rate, batch_count=20303.333333333332, ans=0.04949747468305833 +2024-01-15 13:24:57,372 INFO [train.py:994] (0/2) Epoch 8, batch 200, loss[loss=0.2484, simple_loss=0.3083, pruned_loss=0.09427, over 24470.00 frames. ], tot_loss[loss=0.2326, simple_loss=0.2942, pruned_loss=0.0855, over 3036006.76 frames. ], batch size: 181, lr: 3.59e-02, grad_scale: 32.0 +2024-01-15 13:24:57,871 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.5.encoder.layers.1.conv_module1.whiten, num_groups=1, num_channels=256, metric=3.73 vs. limit=15.0 +2024-01-15 13:24:59,212 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.3.encoder.layers.3.self_attn1.whiten, num_groups=1, num_channels=512, metric=20.86 vs. limit=22.5 +2024-01-15 13:25:08,732 WARNING [optim.py:484] (0/2) Clipping_scale=2.0, grad-norm quartiles 1.990e+02 2.315e+02 2.435e+02 2.815e+02 4.055e+02, threshold=4.871e+02, percent-clipped=0.0 +2024-01-15 13:25:10,308 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=20370.0, ans=0.1 +2024-01-15 13:25:16,005 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.feed_forward3.hidden_balancer.prob, batch_count=20370.0, ans=0.125 +2024-01-15 13:25:16,126 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.nonlin_attention.balancer.prob, batch_count=20370.0, ans=0.125 +2024-01-15 13:25:31,772 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.self_attn_weights.pos_emb_skip_rate, batch_count=20403.333333333332, ans=0.0 +2024-01-15 13:25:36,453 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_module2.balancer2.min_abs, batch_count=20436.666666666668, ans=0.5 +2024-01-15 13:25:36,473 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.feed_forward1.hidden_balancer.prob, batch_count=20436.666666666668, ans=0.125 +2024-01-15 13:25:40,338 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.3.encoder.layers.3.conv_module1.whiten, num_groups=1, num_channels=512, metric=4.88 vs. limit=15.0 +2024-01-15 13:25:59,670 INFO [train.py:994] (0/2) Epoch 8, batch 250, loss[loss=0.2088, simple_loss=0.2529, pruned_loss=0.08238, over 18425.00 frames. ], tot_loss[loss=0.2335, simple_loss=0.2952, pruned_loss=0.08588, over 3427541.51 frames. ], batch size: 79, lr: 3.58e-02, grad_scale: 32.0 +2024-01-15 13:26:12,338 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.bypass.scale_min, batch_count=20536.666666666668, ans=0.2 +2024-01-15 13:26:39,572 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.bypass.scale_min, batch_count=20603.333333333332, ans=0.2 +2024-01-15 13:26:49,761 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.bypass.scale_min, batch_count=20636.666666666668, ans=0.2 +2024-01-15 13:26:50,934 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_skip_rate, batch_count=20636.666666666668, ans=0.0 +2024-01-15 13:26:54,607 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.bypass.scale_min, batch_count=20636.666666666668, ans=0.2 +2024-01-15 13:26:59,648 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_module1.balancer2.prob, batch_count=20636.666666666668, ans=0.125 +2024-01-15 13:27:01,722 INFO [train.py:994] (0/2) Epoch 8, batch 300, loss[loss=0.2493, simple_loss=0.3105, pruned_loss=0.0941, over 24556.00 frames. ], tot_loss[loss=0.2338, simple_loss=0.296, pruned_loss=0.08586, over 3733098.80 frames. ], batch size: 243, lr: 3.58e-02, grad_scale: 32.0 +2024-01-15 13:27:07,412 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.nonlin_attention.balancer.min_positive, batch_count=20670.0, ans=0.05 +2024-01-15 13:27:13,456 WARNING [optim.py:484] (0/2) Clipping_scale=2.0, grad-norm quartiles 1.844e+02 2.481e+02 2.795e+02 3.167e+02 5.927e+02, threshold=5.590e+02, percent-clipped=2.0 +2024-01-15 13:27:44,663 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.balancer2.prob, batch_count=20770.0, ans=0.125 +2024-01-15 13:27:45,806 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.self_attn_weights.pos_emb_skip_rate, batch_count=20770.0, ans=0.0 +2024-01-15 13:28:04,383 INFO [train.py:994] (0/2) Epoch 8, batch 350, loss[loss=0.2442, simple_loss=0.3146, pruned_loss=0.08688, over 23891.00 frames. ], tot_loss[loss=0.2338, simple_loss=0.2957, pruned_loss=0.08595, over 3970840.97 frames. ], batch size: 328, lr: 3.57e-02, grad_scale: 32.0 +2024-01-15 13:28:32,354 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_module2.balancer1.max_abs, batch_count=20903.333333333332, ans=10.0 +2024-01-15 13:28:32,382 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.balancer2.prob, batch_count=20903.333333333332, ans=0.125 +2024-01-15 13:29:04,374 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.0.layers.1.feed_forward1.out_whiten, num_groups=1, num_channels=192, metric=10.90 vs. limit=15.0 +2024-01-15 13:29:05,933 INFO [train.py:994] (0/2) Epoch 8, batch 400, loss[loss=0.2465, simple_loss=0.3073, pruned_loss=0.09282, over 24493.00 frames. ], tot_loss[loss=0.2329, simple_loss=0.2952, pruned_loss=0.08533, over 4158888.06 frames. ], batch size: 204, lr: 3.56e-02, grad_scale: 32.0 +2024-01-15 13:29:06,182 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.bypass_mid.scale_min, batch_count=21003.333333333332, ans=0.2 +2024-01-15 13:29:14,180 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.nonlin_attention.balancer.prob, batch_count=21003.333333333332, ans=0.125 +2024-01-15 13:29:17,211 WARNING [optim.py:484] (0/2) Clipping_scale=2.0, grad-norm quartiles 1.964e+02 2.300e+02 2.666e+02 3.173e+02 4.906e+02, threshold=5.331e+02, percent-clipped=0.0 +2024-01-15 13:29:23,014 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.attention_skip_rate, batch_count=21036.666666666668, ans=0.0 +2024-01-15 13:29:23,342 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.3.encoder.layers.1.nonlin_attention.whiten1, num_groups=1, num_channels=384, metric=6.51 vs. limit=10.0 +2024-01-15 13:29:40,570 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.bypass.skip_rate, batch_count=21070.0, ans=0.07 +2024-01-15 13:29:42,889 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=21103.333333333332, ans=0.1 +2024-01-15 13:29:52,973 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_module1.balancer1.min_positive, batch_count=21103.333333333332, ans=0.025 +2024-01-15 13:30:08,885 INFO [train.py:994] (0/2) Epoch 8, batch 450, loss[loss=0.2413, simple_loss=0.2992, pruned_loss=0.09176, over 24377.00 frames. ], tot_loss[loss=0.2326, simple_loss=0.295, pruned_loss=0.08514, over 4299149.98 frames. ], batch size: 153, lr: 3.56e-02, grad_scale: 32.0 +2024-01-15 13:31:06,399 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.bypass_mid.scale_min, batch_count=21303.333333333332, ans=0.2 +2024-01-15 13:31:06,440 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.nonlin_attention.balancer.prob, batch_count=21303.333333333332, ans=0.125 +2024-01-15 13:31:11,020 INFO [train.py:994] (0/2) Epoch 8, batch 500, loss[loss=0.233, simple_loss=0.3, pruned_loss=0.08304, over 24516.00 frames. ], tot_loss[loss=0.2327, simple_loss=0.2952, pruned_loss=0.08512, over 4420731.68 frames. ], batch size: 236, lr: 3.55e-02, grad_scale: 32.0 +2024-01-15 13:31:22,112 WARNING [optim.py:484] (0/2) Clipping_scale=2.0, grad-norm quartiles 1.874e+02 2.351e+02 2.729e+02 3.218e+02 7.359e+02, threshold=5.457e+02, percent-clipped=2.0 +2024-01-15 13:31:37,239 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=21403.333333333332, ans=0.1 +2024-01-15 13:32:00,959 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder_embed.conv.5.prob, batch_count=21470.0, ans=0.125 +2024-01-15 13:32:13,222 INFO [train.py:994] (0/2) Epoch 8, batch 550, loss[loss=0.2272, simple_loss=0.2897, pruned_loss=0.08237, over 24310.00 frames. ], tot_loss[loss=0.2331, simple_loss=0.2957, pruned_loss=0.08531, over 4506632.18 frames. ], batch size: 298, lr: 3.55e-02, grad_scale: 32.0 +2024-01-15 13:32:13,521 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.bypass_mid.scale_min, batch_count=21503.333333333332, ans=0.2 +2024-01-15 13:32:41,952 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.attention_skip_rate, batch_count=21570.0, ans=0.0 +2024-01-15 13:33:15,472 INFO [train.py:994] (0/2) Epoch 8, batch 600, loss[loss=0.2395, simple_loss=0.302, pruned_loss=0.08854, over 24491.00 frames. ], tot_loss[loss=0.2329, simple_loss=0.2956, pruned_loss=0.08509, over 4572683.21 frames. ], batch size: 267, lr: 3.54e-02, grad_scale: 32.0 +2024-01-15 13:33:25,943 WARNING [optim.py:484] (0/2) Clipping_scale=2.0, grad-norm quartiles 2.032e+02 2.417e+02 2.801e+02 3.609e+02 5.540e+02, threshold=5.603e+02, percent-clipped=1.0 +2024-01-15 13:33:26,723 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.2.encoder.layers.1.feed_forward3.out_whiten, num_groups=1, num_channels=384, metric=10.83 vs. limit=15.0 +2024-01-15 13:33:28,693 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.ff3_skip_rate, batch_count=21703.333333333332, ans=0.0061514492753623196 +2024-01-15 13:33:52,523 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_module1.balancer2.prob, batch_count=21770.0, ans=0.125 +2024-01-15 13:34:17,509 INFO [train.py:994] (0/2) Epoch 8, batch 650, loss[loss=0.245, simple_loss=0.302, pruned_loss=0.09398, over 24518.00 frames. ], tot_loss[loss=0.2325, simple_loss=0.2952, pruned_loss=0.08497, over 4621960.75 frames. ], batch size: 236, lr: 3.53e-02, grad_scale: 32.0 +2024-01-15 13:34:23,295 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.ff3_skip_rate, batch_count=21836.666666666668, ans=0.006122463768115942 +2024-01-15 13:34:42,964 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.4.encoder.layers.0.feed_forward1.out_whiten, num_groups=1, num_channels=384, metric=12.67 vs. limit=15.0 +2024-01-15 13:35:19,823 INFO [train.py:994] (0/2) Epoch 8, batch 700, loss[loss=0.2387, simple_loss=0.3018, pruned_loss=0.08783, over 24212.00 frames. ], tot_loss[loss=0.2319, simple_loss=0.2947, pruned_loss=0.08451, over 4666551.31 frames. ], batch size: 311, lr: 3.53e-02, grad_scale: 32.0 +2024-01-15 13:35:23,060 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.bypass.skip_rate, batch_count=22003.333333333332, ans=0.07 +2024-01-15 13:35:28,941 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.nonlin_attention.balancer.prob, batch_count=22003.333333333332, ans=0.125 +2024-01-15 13:35:30,135 INFO [scaling.py:1118] (0/2) WithLoss: name=encoder.encoders.3.encoder.layers.3.self_attn_weights, loss-sum=0.000e+00 +2024-01-15 13:35:30,862 WARNING [optim.py:484] (0/2) Clipping_scale=2.0, grad-norm quartiles 1.817e+02 2.323e+02 2.652e+02 3.070e+02 7.446e+02, threshold=5.305e+02, percent-clipped=1.0 +2024-01-15 13:35:35,803 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_module1.balancer2.min_abs, batch_count=22036.666666666668, ans=0.5 +2024-01-15 13:36:21,546 INFO [train.py:994] (0/2) Epoch 8, batch 750, loss[loss=0.1748, simple_loss=0.2269, pruned_loss=0.06134, over 17635.00 frames. ], tot_loss[loss=0.2311, simple_loss=0.2937, pruned_loss=0.08426, over 4679042.93 frames. ], batch size: 75, lr: 3.52e-02, grad_scale: 32.0 +2024-01-15 13:36:33,042 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.feed_forward3.hidden_balancer.prob, batch_count=22203.333333333332, ans=0.125 +2024-01-15 13:36:43,248 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.5.encoder.layers.0.self_attn2.whiten, num_groups=1, num_channels=256, metric=20.93 vs. limit=22.5 +2024-01-15 13:36:45,410 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.balancer2.prob, batch_count=22236.666666666668, ans=0.125 +2024-01-15 13:36:52,440 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_skip_rate, batch_count=22236.666666666668, ans=0.0 +2024-01-15 13:36:55,900 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_module1.balancer1.prob, batch_count=22236.666666666668, ans=0.125 +2024-01-15 13:37:20,732 INFO [train.py:994] (0/2) Epoch 8, batch 800, loss[loss=0.2268, simple_loss=0.2898, pruned_loss=0.08188, over 24487.00 frames. ], tot_loss[loss=0.23, simple_loss=0.2927, pruned_loss=0.08362, over 4713518.26 frames. ], batch size: 222, lr: 3.52e-02, grad_scale: 32.0 +2024-01-15 13:37:30,748 WARNING [optim.py:484] (0/2) Clipping_scale=2.0, grad-norm quartiles 1.814e+02 2.312e+02 2.631e+02 3.113e+02 5.409e+02, threshold=5.262e+02, percent-clipped=1.0 +2024-01-15 13:37:37,856 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.attention_skip_rate, batch_count=22370.0, ans=0.0 +2024-01-15 13:38:01,265 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=22436.666666666668, ans=0.1 +2024-01-15 13:38:09,947 INFO [checkpoint.py:75] (0/2) Saving checkpoint to zipformer_bbpe/exp-context-size-2-lr-epochs-10-spec-aug-20-disable-musan/epoch-8.pt +2024-01-15 13:38:32,542 INFO [train.py:994] (0/2) Epoch 9, batch 0, loss[loss=0.2238, simple_loss=0.2908, pruned_loss=0.07839, over 24421.00 frames. ], tot_loss[loss=0.2238, simple_loss=0.2908, pruned_loss=0.07839, over 24421.00 frames. ], batch size: 258, lr: 3.43e-02, grad_scale: 32.0 +2024-01-15 13:38:32,543 INFO [train.py:1017] (0/2) Computing validation loss +2024-01-15 13:38:52,990 INFO [train.py:1026] (0/2) Epoch 9, validation: loss=0.1878, simple_loss=0.275, pruned_loss=0.05027, over 1622729.00 frames. +2024-01-15 13:38:52,990 INFO [train.py:1027] (0/2) Maximum memory allocated so far is 15997MB +2024-01-15 13:38:56,392 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.attention_skip_rate, batch_count=22480.0, ans=0.0 +2024-01-15 13:39:05,296 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.1.encoder.layers.0.feed_forward3.out_whiten, num_groups=1, num_channels=256, metric=9.82 vs. limit=15.0 +2024-01-15 13:39:15,207 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_skip_rate, batch_count=22513.333333333332, ans=0.0 +2024-01-15 13:39:26,550 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.feed_forward2.hidden_balancer.prob, batch_count=22546.666666666668, ans=0.125 +2024-01-15 13:39:37,628 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.2.encoder.layers.2.feed_forward2.out_whiten, num_groups=1, num_channels=384, metric=16.04 vs. limit=15.0 +2024-01-15 13:39:50,418 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.bypass.skip_rate, batch_count=22613.333333333332, ans=0.07 +2024-01-15 13:39:53,824 INFO [train.py:994] (0/2) Epoch 9, batch 50, loss[loss=0.2299, simple_loss=0.2962, pruned_loss=0.08179, over 24492.00 frames. ], tot_loss[loss=0.2277, simple_loss=0.2909, pruned_loss=0.08228, over 1086861.14 frames. ], batch size: 165, lr: 3.42e-02, grad_scale: 32.0 +2024-01-15 13:39:55,320 INFO [scaling.py:1118] (0/2) WithLoss: name=encoder.encoders.1.encoder.layers.0.self_attn_weights, loss-sum=0.000e+00 +2024-01-15 13:40:14,529 WARNING [optim.py:484] (0/2) Clipping_scale=2.0, grad-norm quartiles 1.991e+02 2.374e+02 2.681e+02 3.103e+02 5.812e+02, threshold=5.362e+02, percent-clipped=2.0 +2024-01-15 13:40:17,247 INFO [scaling.py:1118] (0/2) WithLoss: name=encoder.encoders.3.encoder.layers.1.self_attn_weights, loss-sum=0.000e+00 +2024-01-15 13:40:23,156 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.feed_forward3.hidden_balancer.prob, batch_count=22713.333333333332, ans=0.125 +2024-01-15 13:40:29,168 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_module2.balancer1.max_abs, batch_count=22713.333333333332, ans=10.0 +2024-01-15 13:40:44,504 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.self_attn_weights.pos_emb_skip_rate, batch_count=22780.0, ans=0.0 +2024-01-15 13:40:48,372 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.4.encoder.layers.1.feed_forward1.out_whiten, num_groups=1, num_channels=384, metric=14.55 vs. limit=15.0 +2024-01-15 13:40:55,831 INFO [train.py:994] (0/2) Epoch 9, batch 100, loss[loss=0.2337, simple_loss=0.2973, pruned_loss=0.08502, over 24556.00 frames. ], tot_loss[loss=0.2275, simple_loss=0.2905, pruned_loss=0.08229, over 1912486.86 frames. ], batch size: 176, lr: 3.42e-02, grad_scale: 32.0 +2024-01-15 13:40:58,459 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.nonlin_attention.balancer.prob, batch_count=22813.333333333332, ans=0.125 +2024-01-15 13:41:05,667 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_module1.balancer2.min_positive, batch_count=22813.333333333332, ans=0.05 +2024-01-15 13:41:09,116 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.bypass.skip_rate, batch_count=22846.666666666668, ans=0.04949747468305833 +2024-01-15 13:41:29,589 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.attention_skip_rate, batch_count=22880.0, ans=0.0 +2024-01-15 13:41:31,845 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.0.layers.1.feed_forward1.out_proj.dropout_p, batch_count=22880.0, ans=0.1 +2024-01-15 13:41:34,142 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.balancer1.prob, batch_count=22913.333333333332, ans=0.125 +2024-01-15 13:41:35,789 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.2.encoder.layers.1.conv_module2.whiten, num_groups=1, num_channels=384, metric=4.38 vs. limit=15.0 +2024-01-15 13:41:36,461 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.attention_skip_rate, batch_count=22913.333333333332, ans=0.0 +2024-01-15 13:41:46,424 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.3.encoder.layers.1.feed_forward2.out_whiten, num_groups=1, num_channels=512, metric=11.45 vs. limit=15.0 +2024-01-15 13:41:51,886 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.balancer2.prob, batch_count=22946.666666666668, ans=0.125 +2024-01-15 13:41:59,125 INFO [train.py:994] (0/2) Epoch 9, batch 150, loss[loss=0.2312, simple_loss=0.2893, pruned_loss=0.08653, over 24489.00 frames. ], tot_loss[loss=0.2264, simple_loss=0.2901, pruned_loss=0.08131, over 2568181.12 frames. ], batch size: 187, lr: 3.41e-02, grad_scale: 32.0 +2024-01-15 13:42:06,665 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.5.encoder.layers.0.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=5.52 vs. limit=6.0 +2024-01-15 13:42:12,511 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.nonlin_attention.balancer.max_positive, batch_count=23013.333333333332, ans=0.95 +2024-01-15 13:42:18,649 WARNING [optim.py:484] (0/2) Clipping_scale=2.0, grad-norm quartiles 1.883e+02 2.295e+02 2.541e+02 2.854e+02 4.375e+02, threshold=5.082e+02, percent-clipped=0.0 +2024-01-15 13:42:25,367 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_module2.balancer1.max_abs, batch_count=23046.666666666668, ans=10.0 +2024-01-15 13:42:41,252 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.0.layers.0.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=4.55 vs. limit=6.0 +2024-01-15 13:43:00,461 INFO [train.py:994] (0/2) Epoch 9, batch 200, loss[loss=0.2357, simple_loss=0.2976, pruned_loss=0.08691, over 24355.00 frames. ], tot_loss[loss=0.2271, simple_loss=0.2907, pruned_loss=0.08178, over 3070107.12 frames. ], batch size: 153, lr: 3.41e-02, grad_scale: 32.0 +2024-01-15 13:43:06,639 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.bypass.skip_rate, batch_count=23146.666666666668, ans=0.07 +2024-01-15 13:43:12,020 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.ff3_skip_rate, batch_count=23180.0, ans=0.0058304347826086955 +2024-01-15 13:43:30,140 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.0.layers.1.bypass_mid.scale_min, batch_count=23213.333333333332, ans=0.2 +2024-01-15 13:43:47,446 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_module2.balancer2.min_positive, batch_count=23246.666666666668, ans=0.05 +2024-01-15 13:44:03,118 INFO [train.py:994] (0/2) Epoch 9, batch 250, loss[loss=0.2365, simple_loss=0.303, pruned_loss=0.085, over 22452.00 frames. ], tot_loss[loss=0.2265, simple_loss=0.2901, pruned_loss=0.08145, over 3444519.71 frames. ], batch size: 357, lr: 3.40e-02, grad_scale: 32.0 +2024-01-15 13:44:23,378 WARNING [optim.py:484] (0/2) Clipping_scale=2.0, grad-norm quartiles 1.928e+02 2.470e+02 2.801e+02 3.237e+02 5.591e+02, threshold=5.602e+02, percent-clipped=1.0 +2024-01-15 13:44:31,314 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.4.out_combiner.scale_min, batch_count=23380.0, ans=0.2 +2024-01-15 13:44:34,392 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.bypass.skip_rate, batch_count=23380.0, ans=0.09899494936611666 +2024-01-15 13:44:39,528 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.1.encoder.layers.1.feed_forward1.out_whiten, num_groups=1, num_channels=256, metric=14.62 vs. limit=15.0 +2024-01-15 13:44:47,515 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.4.encoder.layers.2.self_attn2.whiten, num_groups=1, num_channels=384, metric=20.08 vs. limit=22.5 +2024-01-15 13:44:50,695 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder_embed.conv.8.prob, batch_count=23413.333333333332, ans=0.125 +2024-01-15 13:44:53,934 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_module2.balancer2.prob, batch_count=23446.666666666668, ans=0.125 +2024-01-15 13:45:04,046 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_module2.balancer2.prob, batch_count=23446.666666666668, ans=0.125 +2024-01-15 13:45:06,124 INFO [train.py:994] (0/2) Epoch 9, batch 300, loss[loss=0.2108, simple_loss=0.2801, pruned_loss=0.07073, over 24131.00 frames. ], tot_loss[loss=0.2262, simple_loss=0.2899, pruned_loss=0.08122, over 3744485.39 frames. ], batch size: 140, lr: 3.39e-02, grad_scale: 32.0 +2024-01-15 13:45:15,763 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.0.layers.1.ff2_skip_rate, batch_count=23480.0, ans=0.005765217391304348 +2024-01-15 13:45:32,584 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_module1.balancer1.prob, batch_count=23546.666666666668, ans=0.125 +2024-01-15 13:45:40,693 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.attention_skip_rate, batch_count=23546.666666666668, ans=0.0 +2024-01-15 13:45:48,429 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.nonlin_attention.balancer.min_positive, batch_count=23580.0, ans=0.05 +2024-01-15 13:45:57,409 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.feed_forward1.hidden_balancer.prob, batch_count=23613.333333333332, ans=0.125 +2024-01-15 13:45:58,889 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.3.encoder.layers.1.conv_module1.whiten, num_groups=1, num_channels=512, metric=2.21 vs. limit=15.0 +2024-01-15 13:46:03,200 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_module1.balancer2.prob, batch_count=23613.333333333332, ans=0.125 +2024-01-15 13:46:06,107 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.1.encoder.layers.0.feed_forward1.out_whiten, num_groups=1, num_channels=256, metric=12.78 vs. limit=15.0 +2024-01-15 13:46:08,250 INFO [train.py:994] (0/2) Epoch 9, batch 350, loss[loss=0.2305, simple_loss=0.2979, pruned_loss=0.0816, over 24495.00 frames. ], tot_loss[loss=0.2263, simple_loss=0.29, pruned_loss=0.08126, over 3970896.51 frames. ], batch size: 216, lr: 3.39e-02, grad_scale: 32.0 +2024-01-15 13:46:11,086 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.4.encoder.layers.1.self_attn2.whiten, num_groups=1, num_channels=384, metric=19.64 vs. limit=22.5 +2024-01-15 13:46:27,748 WARNING [optim.py:484] (0/2) Clipping_scale=2.0, grad-norm quartiles 1.923e+02 2.389e+02 2.601e+02 3.203e+02 4.963e+02, threshold=5.202e+02, percent-clipped=0.0 +2024-01-15 13:46:30,329 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_module1.balancer1.prob, batch_count=23680.0, ans=0.125 +2024-01-15 13:46:37,994 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=23713.333333333332, ans=0.1 +2024-01-15 13:46:46,865 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=23746.666666666668, ans=0.1 +2024-01-15 13:46:49,191 INFO [scaling.py:1118] (0/2) WithLoss: name=encoder.encoders.1.encoder.layers.0.self_attn_weights, loss-sum=0.000e+00 +2024-01-15 13:46:59,512 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.3.encoder.layers.1.feed_forward2.out_whiten, num_groups=1, num_channels=512, metric=16.46 vs. limit=15.0 +2024-01-15 13:47:01,550 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=23780.0, ans=0.1 +2024-01-15 13:47:05,813 INFO [scaling.py:1022] (0/2) Whitening: name=encoder_embed.out_whiten, num_groups=1, num_channels=192, metric=7.42 vs. limit=8.0 +2024-01-15 13:47:09,687 INFO [train.py:994] (0/2) Epoch 9, batch 400, loss[loss=0.2107, simple_loss=0.2764, pruned_loss=0.07249, over 24501.00 frames. ], tot_loss[loss=0.2264, simple_loss=0.2904, pruned_loss=0.08119, over 4168200.25 frames. ], batch size: 187, lr: 3.38e-02, grad_scale: 32.0 +2024-01-15 13:47:13,088 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.bypass.scale_min, batch_count=23813.333333333332, ans=0.2 +2024-01-15 13:47:25,726 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_skip_rate, batch_count=23846.666666666668, ans=0.0 +2024-01-15 13:47:26,990 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_skip_rate, batch_count=23846.666666666668, ans=0.0 +2024-01-15 13:47:30,695 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.bypass.scale_min, batch_count=23846.666666666668, ans=0.2 +2024-01-15 13:47:30,835 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.5.encoder.layers.1.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=5.07 vs. limit=6.0 +2024-01-15 13:47:47,182 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.2.encoder.layers.1.feed_forward2.out_whiten, num_groups=1, num_channels=384, metric=13.73 vs. limit=15.0 +2024-01-15 13:48:12,628 INFO [train.py:994] (0/2) Epoch 9, batch 450, loss[loss=0.2079, simple_loss=0.2783, pruned_loss=0.06878, over 24532.00 frames. ], tot_loss[loss=0.2261, simple_loss=0.2904, pruned_loss=0.0809, over 4317272.53 frames. ], batch size: 236, lr: 3.38e-02, grad_scale: 32.0 +2024-01-15 13:48:15,238 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.nonlin_attention.balancer.min_positive, batch_count=23980.0, ans=0.05 +2024-01-15 13:48:15,405 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.5.encoder.layers.1.conv_module1.whiten, num_groups=1, num_channels=256, metric=3.16 vs. limit=15.0 +2024-01-15 13:48:28,557 INFO [scaling.py:1118] (0/2) WithLoss: name=encoder.encoders.1.encoder.layers.0.self_attn_weights, loss-sum=0.000e+00 +2024-01-15 13:48:28,622 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_module2.balancer1.prob, batch_count=24013.333333333332, ans=0.125 +2024-01-15 13:48:29,164 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.1.encoder.layers.0.self_attn2.whiten, num_groups=1, num_channels=256, metric=15.77 vs. limit=22.5 +2024-01-15 13:48:33,062 WARNING [optim.py:484] (0/2) Clipping_scale=2.0, grad-norm quartiles 1.903e+02 2.345e+02 2.628e+02 3.128e+02 5.089e+02, threshold=5.255e+02, percent-clipped=0.0 +2024-01-15 13:48:33,684 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.4.encoder.layers.1.nonlin_attention.whiten2, num_groups=1, num_channels=384, metric=3.80 vs. limit=15.0 +2024-01-15 13:48:59,688 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.balancer1.prob, batch_count=24080.0, ans=0.125 +2024-01-15 13:49:15,884 INFO [train.py:994] (0/2) Epoch 9, batch 500, loss[loss=0.213, simple_loss=0.2807, pruned_loss=0.07262, over 24484.00 frames. ], tot_loss[loss=0.2254, simple_loss=0.2899, pruned_loss=0.08043, over 4430473.10 frames. ], batch size: 267, lr: 3.37e-02, grad_scale: 32.0 +2024-01-15 13:49:21,690 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_module2.balancer2.prob, batch_count=24146.666666666668, ans=0.125 +2024-01-15 13:49:23,898 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.feed_forward1.hidden_balancer.prob, batch_count=24146.666666666668, ans=0.125 +2024-01-15 13:49:25,118 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_module1.balancer1.prob, batch_count=24146.666666666668, ans=0.125 +2024-01-15 13:49:31,145 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_skip_rate, batch_count=24180.0, ans=0.0 +2024-01-15 13:49:32,652 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.3.encoder.layers.2.self_attn1.whiten, num_groups=1, num_channels=512, metric=22.11 vs. limit=22.5 +2024-01-15 13:49:46,800 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.ff3_skip_rate, batch_count=24213.333333333332, ans=0.005605797101449276 +2024-01-15 13:50:09,464 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.0.layers.0.self_attn1.whiten, num_groups=1, num_channels=192, metric=14.22 vs. limit=22.5 +2024-01-15 13:50:18,528 INFO [train.py:994] (0/2) Epoch 9, batch 550, loss[loss=0.2092, simple_loss=0.2777, pruned_loss=0.07033, over 24330.00 frames. ], tot_loss[loss=0.2251, simple_loss=0.2897, pruned_loss=0.08021, over 4522649.65 frames. ], batch size: 147, lr: 3.37e-02, grad_scale: 32.0 +2024-01-15 13:50:32,393 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.0.layers.0.bypass_mid.scale_min, batch_count=24346.666666666668, ans=0.2 +2024-01-15 13:50:35,030 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.4.encoder.layers.0.self_attn1.whiten, num_groups=1, num_channels=384, metric=19.77 vs. limit=22.5 +2024-01-15 13:50:38,519 WARNING [optim.py:484] (0/2) Clipping_scale=2.0, grad-norm quartiles 2.063e+02 2.333e+02 2.583e+02 2.991e+02 4.085e+02, threshold=5.165e+02, percent-clipped=0.0 +2024-01-15 13:50:50,990 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.ff3_skip_rate, batch_count=24380.0, ans=0.005569565217391304 +2024-01-15 13:51:08,867 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_module1.balancer2.prob, batch_count=24446.666666666668, ans=0.125 +2024-01-15 13:51:13,496 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.attention_skip_rate, batch_count=24446.666666666668, ans=0.0 +2024-01-15 13:51:20,803 INFO [train.py:994] (0/2) Epoch 9, batch 600, loss[loss=0.2183, simple_loss=0.2848, pruned_loss=0.07591, over 24423.00 frames. ], tot_loss[loss=0.2255, simple_loss=0.2903, pruned_loss=0.08029, over 4595313.53 frames. ], batch size: 258, lr: 3.36e-02, grad_scale: 64.0 +2024-01-15 13:51:23,755 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.3.encoder.layers.2.feed_forward1.out_whiten, num_groups=1, num_channels=512, metric=12.23 vs. limit=15.0 +2024-01-15 13:51:25,722 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_skip_rate, batch_count=24480.0, ans=0.0 +2024-01-15 13:51:32,701 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.4.encoder.layers.0.conv_module2.whiten, num_groups=1, num_channels=384, metric=2.73 vs. limit=15.0 +2024-01-15 13:51:47,762 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.3.encoder.layers.1.nonlin_attention.whiten2, num_groups=1, num_channels=512, metric=5.29 vs. limit=15.0 +2024-01-15 13:52:06,931 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.feed_forward2.hidden_balancer.prob, batch_count=24580.0, ans=0.125 +2024-01-15 13:52:14,766 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.bypass.skip_rate, batch_count=24613.333333333332, ans=0.04949747468305833 +2024-01-15 13:52:16,732 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_module1.balancer1.prob, batch_count=24613.333333333332, ans=0.125 +2024-01-15 13:52:23,555 INFO [train.py:994] (0/2) Epoch 9, batch 650, loss[loss=0.2223, simple_loss=0.2833, pruned_loss=0.08066, over 24541.00 frames. ], tot_loss[loss=0.2251, simple_loss=0.2899, pruned_loss=0.08009, over 4646737.87 frames. ], batch size: 236, lr: 3.36e-02, grad_scale: 64.0 +2024-01-15 13:52:39,387 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.4.encoder.layers.2.feed_forward1.out_whiten, num_groups=1, num_channels=384, metric=13.00 vs. limit=15.0 +2024-01-15 13:52:43,603 WARNING [optim.py:484] (0/2) Clipping_scale=2.0, grad-norm quartiles 1.958e+02 2.423e+02 2.648e+02 3.293e+02 5.337e+02, threshold=5.296e+02, percent-clipped=1.0 +2024-01-15 13:52:51,108 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.3.encoder.layers.3.conv_module1.whiten, num_groups=1, num_channels=512, metric=5.40 vs. limit=15.0 +2024-01-15 13:53:26,553 INFO [train.py:994] (0/2) Epoch 9, batch 700, loss[loss=0.2289, simple_loss=0.2974, pruned_loss=0.08023, over 24224.00 frames. ], tot_loss[loss=0.2238, simple_loss=0.2889, pruned_loss=0.07939, over 4686414.65 frames. ], batch size: 311, lr: 3.35e-02, grad_scale: 64.0 +2024-01-15 13:53:28,103 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.ff2_skip_rate, batch_count=24813.333333333332, ans=0.00547536231884058 +2024-01-15 13:53:54,868 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.2.encoder.layers.1.feed_forward3.out_whiten, num_groups=1, num_channels=384, metric=11.04 vs. limit=15.0 +2024-01-15 13:54:02,737 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_module1.balancer1.prob, batch_count=24913.333333333332, ans=0.125 +2024-01-15 13:54:02,799 INFO [scaling.py:1118] (0/2) WithLoss: name=encoder.encoders.2.encoder.layers.1.self_attn_weights, loss-sum=0.000e+00 +2024-01-15 13:54:13,133 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.bypass.skip_rate, batch_count=24913.333333333332, ans=0.07 +2024-01-15 13:54:19,440 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.balancer1.prob, batch_count=24946.666666666668, ans=0.125 +2024-01-15 13:54:22,514 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.0.layers.1.self_attn1.whiten, num_groups=1, num_channels=192, metric=13.15 vs. limit=22.5 +2024-01-15 13:54:28,929 INFO [train.py:994] (0/2) Epoch 9, batch 750, loss[loss=0.2262, simple_loss=0.2939, pruned_loss=0.07925, over 24499.00 frames. ], tot_loss[loss=0.2235, simple_loss=0.2885, pruned_loss=0.07927, over 4704080.66 frames. ], batch size: 222, lr: 3.34e-02, grad_scale: 32.0 +2024-01-15 13:54:47,291 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.self_attn_weights.pos_emb_skip_rate, batch_count=25013.333333333332, ans=0.0 +2024-01-15 13:54:48,355 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.ff2_skip_rate, batch_count=25013.333333333332, ans=0.005431884057971015 +2024-01-15 13:54:50,485 WARNING [optim.py:484] (0/2) Clipping_scale=2.0, grad-norm quartiles 1.978e+02 2.521e+02 2.840e+02 3.310e+02 7.138e+02, threshold=5.680e+02, percent-clipped=2.0 +2024-01-15 13:55:02,144 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.ff2_skip_rate, batch_count=25046.666666666668, ans=0.00542463768115942 +2024-01-15 13:55:13,978 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_module2.balancer2.prob, batch_count=25080.0, ans=0.125 +2024-01-15 13:55:16,089 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.balancer2.prob, batch_count=25080.0, ans=0.125 +2024-01-15 13:55:29,291 INFO [train.py:994] (0/2) Epoch 9, batch 800, loss[loss=0.2186, simple_loss=0.2889, pruned_loss=0.07418, over 24603.00 frames. ], tot_loss[loss=0.2226, simple_loss=0.2875, pruned_loss=0.07881, over 4722667.50 frames. ], batch size: 199, lr: 3.34e-02, grad_scale: 32.0 +2024-01-15 13:55:44,690 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.nonlin_attention.balancer.prob, batch_count=25180.0, ans=0.125 +2024-01-15 13:56:13,168 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_module2.balancer1.prob, batch_count=25246.666666666668, ans=0.125 +2024-01-15 13:56:18,488 INFO [checkpoint.py:75] (0/2) Saving checkpoint to zipformer_bbpe/exp-context-size-2-lr-epochs-10-spec-aug-20-disable-musan/epoch-9.pt +2024-01-15 13:56:41,985 INFO [train.py:994] (0/2) Epoch 10, batch 0, loss[loss=0.2312, simple_loss=0.2916, pruned_loss=0.08537, over 24497.00 frames. ], tot_loss[loss=0.2312, simple_loss=0.2916, pruned_loss=0.08537, over 24497.00 frames. ], batch size: 210, lr: 3.25e-02, grad_scale: 32.0 +2024-01-15 13:56:41,986 INFO [train.py:1017] (0/2) Computing validation loss +2024-01-15 13:57:01,733 INFO [train.py:1026] (0/2) Epoch 10, validation: loss=0.1857, simple_loss=0.2729, pruned_loss=0.04922, over 1622729.00 frames. +2024-01-15 13:57:01,733 INFO [train.py:1027] (0/2) Maximum memory allocated so far is 15997MB +2024-01-15 13:57:23,272 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.0.layers.1.feed_forward1.hidden_balancer.prob, batch_count=25323.333333333332, ans=0.125 +2024-01-15 13:57:23,408 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=25323.333333333332, ans=0.1 +2024-01-15 13:57:33,276 WARNING [optim.py:484] (0/2) Clipping_scale=2.0, grad-norm quartiles 1.980e+02 2.286e+02 2.610e+02 2.905e+02 4.355e+02, threshold=5.219e+02, percent-clipped=0.0 +2024-01-15 13:57:34,748 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.0.layers.0.nonlin_attention.balancer.prob, batch_count=25356.666666666668, ans=0.125 +2024-01-15 13:57:53,890 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.ff2_skip_rate, batch_count=25423.333333333332, ans=0.005342753623188407 +2024-01-15 13:57:54,975 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.0.layers.1.feed_forward1.hidden_balancer.prob, batch_count=25423.333333333332, ans=0.125 +2024-01-15 13:58:03,489 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.3.encoder.layers.1.self_attn2.whiten, num_groups=1, num_channels=512, metric=19.12 vs. limit=22.5 +2024-01-15 13:58:05,150 INFO [train.py:994] (0/2) Epoch 10, batch 50, loss[loss=0.2392, simple_loss=0.3029, pruned_loss=0.08774, over 24462.00 frames. ], tot_loss[loss=0.219, simple_loss=0.2833, pruned_loss=0.07736, over 1088742.31 frames. ], batch size: 187, lr: 3.25e-02, grad_scale: 32.0 +2024-01-15 13:58:27,931 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_skip_rate, batch_count=25490.0, ans=0.0 +2024-01-15 13:58:59,901 INFO [scaling.py:1118] (0/2) WithLoss: name=encoder.encoders.1.encoder.layers.0.self_attn_weights, loss-sum=0.000e+00 +2024-01-15 13:59:07,245 INFO [train.py:994] (0/2) Epoch 10, batch 100, loss[loss=0.2207, simple_loss=0.2858, pruned_loss=0.07787, over 24458.00 frames. ], tot_loss[loss=0.2184, simple_loss=0.2837, pruned_loss=0.07659, over 1918366.02 frames. ], batch size: 222, lr: 3.24e-02, grad_scale: 32.0 +2024-01-15 13:59:18,788 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.0.layers.1.nonlin_attention.balancer.max_positive, batch_count=25656.666666666668, ans=0.95 +2024-01-15 13:59:21,326 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.self_attn_weights.pos_emb_skip_rate, batch_count=25656.666666666668, ans=0.0 +2024-01-15 13:59:28,794 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.0.layers.1.whiten, num_groups=1, num_channels=192, metric=3.78 vs. limit=12.0 +2024-01-15 13:59:31,271 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.balancer2.prob, batch_count=25690.0, ans=0.125 +2024-01-15 13:59:37,407 WARNING [optim.py:484] (0/2) Clipping_scale=2.0, grad-norm quartiles 1.922e+02 2.364e+02 2.776e+02 3.219e+02 6.201e+02, threshold=5.552e+02, percent-clipped=1.0 +2024-01-15 13:59:53,178 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_skip_rate, batch_count=25723.333333333332, ans=0.0 +2024-01-15 14:00:09,784 INFO [train.py:994] (0/2) Epoch 10, batch 150, loss[loss=0.2193, simple_loss=0.2881, pruned_loss=0.07527, over 23902.00 frames. ], tot_loss[loss=0.2189, simple_loss=0.2846, pruned_loss=0.07662, over 2559766.01 frames. ], batch size: 328, lr: 3.24e-02, grad_scale: 32.0 +2024-01-15 14:00:49,926 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_module2.balancer1.prob, batch_count=25890.0, ans=0.125 +2024-01-15 14:01:10,590 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.feed_forward1.out_proj.dropout_p, batch_count=25956.666666666668, ans=0.1 +2024-01-15 14:01:11,347 INFO [train.py:994] (0/2) Epoch 10, batch 200, loss[loss=0.2324, simple_loss=0.29, pruned_loss=0.08742, over 24563.00 frames. ], tot_loss[loss=0.2191, simple_loss=0.2849, pruned_loss=0.0766, over 3057037.87 frames. ], batch size: 176, lr: 3.23e-02, grad_scale: 32.0 +2024-01-15 14:01:11,622 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_module1.balancer2.min_abs, batch_count=25956.666666666668, ans=0.5 +2024-01-15 14:01:21,469 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_module2.balancer1.min_positive, batch_count=25956.666666666668, ans=0.025 +2024-01-15 14:01:27,405 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=25990.0, ans=0.1 +2024-01-15 14:01:27,436 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.ff2_skip_rate, batch_count=25990.0, ans=0.005219565217391304 +2024-01-15 14:01:36,961 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=26023.333333333332, ans=0.1 +2024-01-15 14:01:38,127 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=26023.333333333332, ans=0.1 +2024-01-15 14:01:40,515 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_module2.balancer1.prob, batch_count=26023.333333333332, ans=0.125 +2024-01-15 14:01:41,184 WARNING [optim.py:484] (0/2) Clipping_scale=2.0, grad-norm quartiles 1.782e+02 2.154e+02 2.425e+02 2.905e+02 5.459e+02, threshold=4.850e+02, percent-clipped=0.0 +2024-01-15 14:01:43,441 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.bypass.skip_rate, batch_count=26023.333333333332, ans=0.09899494936611666 +2024-01-15 14:01:51,753 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_skip_rate, batch_count=26056.666666666668, ans=0.0 +2024-01-15 14:02:13,343 INFO [train.py:994] (0/2) Epoch 10, batch 250, loss[loss=0.2297, simple_loss=0.2957, pruned_loss=0.08179, over 24320.00 frames. ], tot_loss[loss=0.2191, simple_loss=0.2847, pruned_loss=0.07674, over 3426492.34 frames. ], batch size: 285, lr: 3.23e-02, grad_scale: 16.0 +2024-01-15 14:02:17,105 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.bypass.scale_min, batch_count=26123.333333333332, ans=0.2 +2024-01-15 14:02:25,366 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_module2.balancer1.prob, batch_count=26156.666666666668, ans=0.125 +2024-01-15 14:02:34,349 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.bypass.skip_rate, batch_count=26156.666666666668, ans=0.04949747468305833 +2024-01-15 14:02:38,532 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_skip_rate, batch_count=26190.0, ans=0.0 +2024-01-15 14:02:44,550 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=26190.0, ans=0.1 +2024-01-15 14:02:44,552 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.ff3_skip_rate, batch_count=26190.0, ans=0.005176086956521739 +2024-01-15 14:02:56,231 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.feed_forward1.hidden_balancer.prob, batch_count=26223.333333333332, ans=0.125 +2024-01-15 14:03:06,894 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.attention_skip_rate, batch_count=26256.666666666668, ans=0.0 +2024-01-15 14:03:14,810 INFO [train.py:994] (0/2) Epoch 10, batch 300, loss[loss=0.2178, simple_loss=0.2881, pruned_loss=0.07374, over 24479.00 frames. ], tot_loss[loss=0.2184, simple_loss=0.284, pruned_loss=0.07636, over 3727915.22 frames. ], batch size: 181, lr: 3.22e-02, grad_scale: 16.0 +2024-01-15 14:03:30,527 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_skip_rate, batch_count=26323.333333333332, ans=0.0 +2024-01-15 14:03:37,610 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_skip_rate, batch_count=26323.333333333332, ans=0.0 +2024-01-15 14:03:44,444 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.ff3_skip_rate, batch_count=26356.666666666668, ans=0.005139855072463768 +2024-01-15 14:03:45,318 WARNING [optim.py:484] (0/2) Clipping_scale=2.0, grad-norm quartiles 1.872e+02 2.343e+02 2.638e+02 3.055e+02 5.073e+02, threshold=5.275e+02, percent-clipped=1.0 +2024-01-15 14:03:47,158 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.3.encoder.layers.1.nonlin_attention.whiten1, num_groups=1, num_channels=384, metric=5.72 vs. limit=10.0 +2024-01-15 14:04:11,589 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_skip_rate, batch_count=26423.333333333332, ans=0.0 +2024-01-15 14:04:16,077 INFO [train.py:994] (0/2) Epoch 10, batch 350, loss[loss=0.1951, simple_loss=0.2385, pruned_loss=0.07588, over 15797.00 frames. ], tot_loss[loss=0.2181, simple_loss=0.2836, pruned_loss=0.07624, over 3955064.20 frames. ], batch size: 67, lr: 3.22e-02, grad_scale: 16.0 +2024-01-15 14:04:18,975 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=26456.666666666668, ans=0.1 +2024-01-15 14:04:37,915 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_module1.balancer2.prob, batch_count=26490.0, ans=0.125 +2024-01-15 14:04:54,812 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=26556.666666666668, ans=0.1 +2024-01-15 14:05:19,114 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.2.encoder.layers.2.conv_module1.whiten, num_groups=1, num_channels=384, metric=4.33 vs. limit=15.0 +2024-01-15 14:05:19,580 INFO [train.py:994] (0/2) Epoch 10, batch 400, loss[loss=0.2199, simple_loss=0.28, pruned_loss=0.07997, over 24392.00 frames. ], tot_loss[loss=0.2184, simple_loss=0.2842, pruned_loss=0.07631, over 4149848.40 frames. ], batch size: 153, lr: 3.21e-02, grad_scale: 32.0 +2024-01-15 14:05:30,708 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.balancer1.prob, batch_count=26656.666666666668, ans=0.125 +2024-01-15 14:05:35,011 INFO [checkpoint.py:75] (0/2) Saving checkpoint to zipformer_bbpe/exp-context-size-2-lr-epochs-10-spec-aug-20-disable-musan/checkpoint-8000.pt +2024-01-15 14:05:41,110 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.4.encoder.layers.2.feed_forward3.out_whiten, num_groups=1, num_channels=384, metric=12.36 vs. limit=15.0 +2024-01-15 14:05:46,650 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.0.layers.1.ff2_skip_rate, batch_count=26690.0, ans=0.0050673913043478265 +2024-01-15 14:05:46,715 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.self_attn_weights.pos_emb_skip_rate, batch_count=26690.0, ans=0.0 +2024-01-15 14:05:47,891 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=26690.0, ans=0.1 +2024-01-15 14:05:53,442 WARNING [optim.py:484] (0/2) Clipping_scale=2.0, grad-norm quartiles 1.894e+02 2.342e+02 2.602e+02 2.965e+02 5.895e+02, threshold=5.203e+02, percent-clipped=1.0 +2024-01-15 14:06:11,855 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.balancer1.prob, batch_count=26756.666666666668, ans=0.125 +2024-01-15 14:06:15,390 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.feed_forward2.hidden_balancer.prob, batch_count=26756.666666666668, ans=0.125 +2024-01-15 14:06:24,556 INFO [train.py:994] (0/2) Epoch 10, batch 450, loss[loss=0.2295, simple_loss=0.2934, pruned_loss=0.08279, over 24612.00 frames. ], tot_loss[loss=0.2183, simple_loss=0.2844, pruned_loss=0.0761, over 4296270.15 frames. ], batch size: 199, lr: 3.20e-02, grad_scale: 32.0 +2024-01-15 14:06:29,639 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_skip_rate, batch_count=26790.0, ans=0.0 +2024-01-15 14:06:47,725 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.5.encoder.layers.0.conv_module2.whiten, num_groups=1, num_channels=256, metric=5.13 vs. limit=15.0 +2024-01-15 14:07:01,752 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=26890.0, ans=0.1 +2024-01-15 14:07:07,840 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.bypass_mid.scale_min, batch_count=26890.0, ans=0.2 +2024-01-15 14:07:27,532 INFO [train.py:994] (0/2) Epoch 10, batch 500, loss[loss=0.2205, simple_loss=0.2854, pruned_loss=0.07782, over 24494.00 frames. ], tot_loss[loss=0.2189, simple_loss=0.285, pruned_loss=0.07641, over 4417721.85 frames. ], batch size: 229, lr: 3.20e-02, grad_scale: 16.0 +2024-01-15 14:07:36,251 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.ff3_skip_rate, batch_count=26956.666666666668, ans=0.005009420289855072 +2024-01-15 14:07:46,222 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.ff2_skip_rate, batch_count=26990.0, ans=0.005002173913043478 +2024-01-15 14:07:49,592 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.bypass_mid.scale_min, batch_count=26990.0, ans=0.2 +2024-01-15 14:07:57,373 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.0.layers.0.feed_forward2.out_whiten, num_groups=1, num_channels=192, metric=8.44 vs. limit=15.0 +2024-01-15 14:07:58,748 WARNING [optim.py:484] (0/2) Clipping_scale=2.0, grad-norm quartiles 1.904e+02 2.338e+02 2.675e+02 3.125e+02 5.410e+02, threshold=5.349e+02, percent-clipped=1.0 +2024-01-15 14:08:14,020 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_module1.balancer1.prob, batch_count=27056.666666666668, ans=0.125 +2024-01-15 14:08:17,957 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.3.encoder.layers.3.whiten, num_groups=1, num_channels=512, metric=4.22 vs. limit=12.0 +2024-01-15 14:08:20,047 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.feed_forward1.hidden_balancer.prob, batch_count=27090.0, ans=0.125 +2024-01-15 14:08:28,008 INFO [train.py:994] (0/2) Epoch 10, batch 550, loss[loss=0.2169, simple_loss=0.2892, pruned_loss=0.07236, over 24607.00 frames. ], tot_loss[loss=0.2181, simple_loss=0.2844, pruned_loss=0.07592, over 4504722.63 frames. ], batch size: 199, lr: 3.19e-02, grad_scale: 16.0 +2024-01-15 14:08:33,095 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.bypass.scale_min, batch_count=27123.333333333332, ans=0.2 +2024-01-15 14:08:43,238 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_skip_rate, batch_count=27156.666666666668, ans=0.0 +2024-01-15 14:08:51,506 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.balancer1.prob, batch_count=27190.0, ans=0.125 +2024-01-15 14:08:57,555 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.nonlin_attention.balancer.prob, batch_count=27190.0, ans=0.125 +2024-01-15 14:09:14,606 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.2.encoder.layers.1.conv_module1.whiten, num_groups=1, num_channels=384, metric=4.18 vs. limit=15.0 +2024-01-15 14:09:24,332 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_module2.balancer1.prob, batch_count=27256.666666666668, ans=0.125 +2024-01-15 14:09:30,617 INFO [train.py:994] (0/2) Epoch 10, batch 600, loss[loss=0.2325, simple_loss=0.2936, pruned_loss=0.08565, over 24489.00 frames. ], tot_loss[loss=0.2178, simple_loss=0.2839, pruned_loss=0.07592, over 4560249.13 frames. ], batch size: 222, lr: 3.19e-02, grad_scale: 16.0 +2024-01-15 14:09:42,844 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.feed_forward2.hidden_balancer.prob, batch_count=27323.333333333332, ans=0.125 +2024-01-15 14:09:56,490 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.5.encoder.layers.0.feed_forward3.out_whiten, num_groups=1, num_channels=256, metric=11.66 vs. limit=15.0 +2024-01-15 14:10:03,147 WARNING [optim.py:484] (0/2) Clipping_scale=2.0, grad-norm quartiles 1.869e+02 2.330e+02 2.553e+02 2.956e+02 5.056e+02, threshold=5.106e+02, percent-clipped=0.0 +2024-01-15 14:10:10,620 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=27390.0, ans=0.1 +2024-01-15 14:10:15,123 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_module1.balancer1.prob, batch_count=27390.0, ans=0.125 +2024-01-15 14:10:32,634 INFO [train.py:994] (0/2) Epoch 10, batch 650, loss[loss=0.2173, simple_loss=0.2835, pruned_loss=0.07561, over 24497.00 frames. ], tot_loss[loss=0.2169, simple_loss=0.2833, pruned_loss=0.07524, over 4624335.48 frames. ], batch size: 229, lr: 3.18e-02, grad_scale: 16.0 +2024-01-15 14:10:32,904 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.nonlin_attention.balancer.prob, batch_count=27456.666666666668, ans=0.125 +2024-01-15 14:10:40,238 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.self_attn_weights.pos_emb_skip_rate, batch_count=27456.666666666668, ans=0.0 +2024-01-15 14:11:10,027 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_module2.balancer2.min_positive, batch_count=27556.666666666668, ans=0.05 +2024-01-15 14:11:30,900 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.1.encoder.layers.0.nonlin_attention.whiten1, num_groups=1, num_channels=192, metric=7.02 vs. limit=10.0 +2024-01-15 14:11:35,422 INFO [train.py:994] (0/2) Epoch 10, batch 700, loss[loss=0.1837, simple_loss=0.2578, pruned_loss=0.05477, over 24230.00 frames. ], tot_loss[loss=0.2164, simple_loss=0.2827, pruned_loss=0.07503, over 4661937.52 frames. ], batch size: 140, lr: 3.18e-02, grad_scale: 16.0 +2024-01-15 14:11:41,780 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.0.layers.1.nonlin_attention.whiten1, num_groups=1, num_channels=144, metric=6.17 vs. limit=10.0 +2024-01-15 14:11:47,231 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.balancer1.prob, batch_count=27656.666666666668, ans=0.125 +2024-01-15 14:11:49,663 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.5.encoder.layers.1.nonlin_attention.whiten1, num_groups=1, num_channels=192, metric=3.30 vs. limit=10.0 +2024-01-15 14:12:08,168 WARNING [optim.py:484] (0/2) Clipping_scale=2.0, grad-norm quartiles 1.856e+02 2.316e+02 2.694e+02 3.351e+02 5.251e+02, threshold=5.388e+02, percent-clipped=2.0 +2024-01-15 14:12:21,445 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.self_attn_weights.pos_emb_skip_rate, batch_count=27723.333333333332, ans=0.0 +2024-01-15 14:12:24,978 INFO [scaling.py:1118] (0/2) WithLoss: name=encoder.encoders.1.encoder.layers.0.self_attn_weights, loss-sum=0.000e+00 +2024-01-15 14:12:32,305 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=27756.666666666668, ans=0.1 +2024-01-15 14:12:37,759 INFO [train.py:994] (0/2) Epoch 10, batch 750, loss[loss=0.2339, simple_loss=0.2933, pruned_loss=0.08723, over 24483.00 frames. ], tot_loss[loss=0.216, simple_loss=0.2824, pruned_loss=0.07482, over 4685149.96 frames. ], batch size: 165, lr: 3.17e-02, grad_scale: 16.0 +2024-01-15 14:12:38,077 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.feed_forward2.hidden_balancer.prob, batch_count=27790.0, ans=0.125 +2024-01-15 14:12:40,436 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.feed_forward3.hidden_balancer.prob, batch_count=27790.0, ans=0.125 +2024-01-15 14:12:55,424 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.balancer2.prob, batch_count=27823.333333333332, ans=0.125 +2024-01-15 14:13:11,958 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_skip_rate, batch_count=27856.666666666668, ans=0.0 +2024-01-15 14:13:15,302 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.0.layers.0.feed_forward3.hidden_balancer.prob, batch_count=27890.0, ans=0.125 +2024-01-15 14:13:27,940 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.balancer2.prob, batch_count=27923.333333333332, ans=0.125 +2024-01-15 14:13:37,403 INFO [train.py:994] (0/2) Epoch 10, batch 800, loss[loss=0.2154, simple_loss=0.286, pruned_loss=0.07244, over 24370.00 frames. ], tot_loss[loss=0.2161, simple_loss=0.2824, pruned_loss=0.07483, over 4717580.71 frames. ], batch size: 275, lr: 3.17e-02, grad_scale: 32.0 +2024-01-15 14:14:07,569 WARNING [optim.py:484] (0/2) Clipping_scale=2.0, grad-norm quartiles 2.048e+02 2.323e+02 2.666e+02 3.032e+02 5.624e+02, threshold=5.332e+02, percent-clipped=1.0 +2024-01-15 14:14:21,725 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_skip_rate, batch_count=28056.666666666668, ans=0.0 +2024-01-15 14:14:23,767 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_module1.balancer2.prob, batch_count=28090.0, ans=0.125 +2024-01-15 14:14:27,168 INFO [checkpoint.py:75] (0/2) Saving checkpoint to zipformer_bbpe/exp-context-size-2-lr-epochs-10-spec-aug-20-disable-musan/epoch-10.pt +2024-01-15 14:14:51,274 INFO [train.py:994] (0/2) Epoch 11, batch 0, loss[loss=0.2103, simple_loss=0.2785, pruned_loss=0.07107, over 24489.00 frames. ], tot_loss[loss=0.2103, simple_loss=0.2785, pruned_loss=0.07107, over 24489.00 frames. ], batch size: 187, lr: 3.08e-02, grad_scale: 32.0 +2024-01-15 14:14:51,275 INFO [train.py:1017] (0/2) Computing validation loss +2024-01-15 14:15:04,631 INFO [zipformer.py:1858] (0/2) name=encoder.encoders.4.encoder.layers.0.self_attn_weights, attn_weights_entropy = tensor([2.2550, 3.4421, 3.5077, 3.0431], device='cuda:0') +2024-01-15 14:15:11,999 INFO [train.py:1026] (0/2) Epoch 11, validation: loss=0.182, simple_loss=0.2695, pruned_loss=0.04721, over 1622729.00 frames. +2024-01-15 14:15:12,000 INFO [train.py:1027] (0/2) Maximum memory allocated so far is 15997MB +2024-01-15 14:15:37,568 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.self_attn_weights.pos_emb_skip_rate, batch_count=28166.666666666668, ans=0.0 +2024-01-15 14:15:47,099 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_module2.balancer2.min_abs, batch_count=28166.666666666668, ans=0.5 +2024-01-15 14:15:53,852 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.bypass.scale_min, batch_count=28200.0, ans=0.2 +2024-01-15 14:16:00,921 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.out_combiner.scale_min, batch_count=28233.333333333332, ans=0.2 +2024-01-15 14:16:03,865 INFO [scaling.py:1118] (0/2) WithLoss: name=encoder.encoders.2.encoder.layers.2.self_attn_weights, loss-sum=0.000e+00 +2024-01-15 14:16:14,610 INFO [train.py:994] (0/2) Epoch 11, batch 50, loss[loss=0.2144, simple_loss=0.282, pruned_loss=0.07337, over 24590.00 frames. ], tot_loss[loss=0.212, simple_loss=0.2795, pruned_loss=0.07228, over 1093834.30 frames. ], batch size: 199, lr: 3.08e-02, grad_scale: 32.0 +2024-01-15 14:16:18,311 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.feed_forward2.hidden_balancer.prob, batch_count=28266.666666666668, ans=0.125 +2024-01-15 14:16:20,550 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.balancer2.prob, batch_count=28266.666666666668, ans=0.125 +2024-01-15 14:16:21,743 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_module2.balancer2.prob, batch_count=28266.666666666668, ans=0.125 +2024-01-15 14:16:26,500 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.0.layers.1.nonlin_attention.balancer.prob, batch_count=28300.0, ans=0.125 +2024-01-15 14:16:34,476 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=28300.0, ans=0.1 +2024-01-15 14:16:38,512 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.2.encoder.layers.2.nonlin_attention.whiten1, num_groups=1, num_channels=288, metric=8.21 vs. limit=10.0 +2024-01-15 14:16:53,052 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.5.encoder.layers.1.nonlin_attention.whiten2, num_groups=1, num_channels=256, metric=4.14 vs. limit=15.0 +2024-01-15 14:16:54,953 WARNING [optim.py:484] (0/2) Clipping_scale=2.0, grad-norm quartiles 1.836e+02 2.193e+02 2.497e+02 2.820e+02 5.572e+02, threshold=4.995e+02, percent-clipped=1.0 +2024-01-15 14:17:03,094 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.bypass.scale_min, batch_count=28400.0, ans=0.2 +2024-01-15 14:17:03,099 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.ff2_skip_rate, batch_count=28400.0, ans=0.0046956521739130435 +2024-01-15 14:17:16,532 INFO [train.py:994] (0/2) Epoch 11, batch 100, loss[loss=0.1985, simple_loss=0.2691, pruned_loss=0.06397, over 24429.00 frames. ], tot_loss[loss=0.2125, simple_loss=0.2794, pruned_loss=0.07275, over 1911784.45 frames. ], batch size: 159, lr: 3.07e-02, grad_scale: 32.0 +2024-01-15 14:17:36,738 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder_embed.conv.2.prob, batch_count=28466.666666666668, ans=0.125 +2024-01-15 14:17:57,697 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=28533.333333333332, ans=0.1 +2024-01-15 14:18:18,821 INFO [train.py:994] (0/2) Epoch 11, batch 150, loss[loss=0.221, simple_loss=0.2883, pruned_loss=0.07687, over 24474.00 frames. ], tot_loss[loss=0.2128, simple_loss=0.2801, pruned_loss=0.07275, over 2555315.05 frames. ], batch size: 170, lr: 3.07e-02, grad_scale: 32.0 +2024-01-15 14:18:19,080 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.ff3_skip_rate, batch_count=28600.0, ans=0.004652173913043478 +2024-01-15 14:18:38,327 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.4.encoder.layers.1.conv_module2.whiten, num_groups=1, num_channels=384, metric=2.37 vs. limit=15.0 +2024-01-15 14:19:00,354 WARNING [optim.py:484] (0/2) Clipping_scale=2.0, grad-norm quartiles 1.849e+02 2.381e+02 2.888e+02 3.320e+02 5.016e+02, threshold=5.776e+02, percent-clipped=3.0 +2024-01-15 14:19:01,925 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.bypass_mid.scale_min, batch_count=28700.0, ans=0.2 +2024-01-15 14:19:04,404 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.bypass.scale_min, batch_count=28700.0, ans=0.2 +2024-01-15 14:19:15,663 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=28733.333333333332, ans=0.1 +2024-01-15 14:19:15,965 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.3.encoder.layers.2.feed_forward1.out_whiten, num_groups=1, num_channels=512, metric=11.17 vs. limit=15.0 +2024-01-15 14:19:16,083 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.2.encoder.layers.0.feed_forward1.out_whiten, num_groups=1, num_channels=384, metric=14.76 vs. limit=15.0 +2024-01-15 14:19:18,806 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_skip_rate, batch_count=28733.333333333332, ans=0.0 +2024-01-15 14:19:22,148 INFO [train.py:994] (0/2) Epoch 11, batch 200, loss[loss=0.2017, simple_loss=0.272, pruned_loss=0.06569, over 24522.00 frames. ], tot_loss[loss=0.2122, simple_loss=0.2794, pruned_loss=0.07252, over 3046603.35 frames. ], batch size: 243, lr: 3.06e-02, grad_scale: 32.0 +2024-01-15 14:19:50,321 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.bypass.scale_min, batch_count=28833.333333333332, ans=0.2 +2024-01-15 14:19:56,772 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_skip_rate, batch_count=28833.333333333332, ans=0.0 +2024-01-15 14:19:57,975 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.feed_forward2.hidden_balancer.prob, batch_count=28833.333333333332, ans=0.125 +2024-01-15 14:19:59,236 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=28866.666666666668, ans=0.1 +2024-01-15 14:20:06,563 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.self_attn2.whiten.whitening_limit, batch_count=28866.666666666668, ans=22.5 +2024-01-15 14:20:24,841 INFO [train.py:994] (0/2) Epoch 11, batch 250, loss[loss=0.2231, simple_loss=0.2911, pruned_loss=0.07755, over 24471.00 frames. ], tot_loss[loss=0.2133, simple_loss=0.2807, pruned_loss=0.07298, over 3440214.91 frames. ], batch size: 222, lr: 3.06e-02, grad_scale: 32.0 +2024-01-15 14:20:25,142 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.self_attn_weights.pos_emb_skip_rate, batch_count=28933.333333333332, ans=0.0 +2024-01-15 14:20:31,177 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_module1.balancer2.prob, batch_count=28933.333333333332, ans=0.125 +2024-01-15 14:20:46,950 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.balancer1.prob, batch_count=28966.666666666668, ans=0.125 +2024-01-15 14:20:48,031 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder_embed.convnext.layerdrop_rate, batch_count=28966.666666666668, ans=0.015 +2024-01-15 14:20:49,366 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.feed_forward1.hidden_balancer.prob, batch_count=29000.0, ans=0.125 +2024-01-15 14:20:50,561 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.feed_forward2.hidden_balancer.prob, batch_count=29000.0, ans=0.125 +2024-01-15 14:20:53,108 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_module1.balancer1.prob, batch_count=29000.0, ans=0.125 +2024-01-15 14:20:57,589 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.self_attn_weights.pos_emb_skip_rate, batch_count=29000.0, ans=0.0 +2024-01-15 14:21:02,937 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.ff2_skip_rate, batch_count=29033.333333333332, ans=0.004557971014492754 +2024-01-15 14:21:05,722 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.2.encoder.layers.0.feed_forward2.out_whiten, num_groups=1, num_channels=384, metric=11.54 vs. limit=15.0 +2024-01-15 14:21:06,189 WARNING [optim.py:484] (0/2) Clipping_scale=2.0, grad-norm quartiles 1.905e+02 2.235e+02 2.512e+02 2.878e+02 4.566e+02, threshold=5.025e+02, percent-clipped=0.0 +2024-01-15 14:21:18,577 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.2.encoder.layers.0.conv_module1.whiten, num_groups=1, num_channels=384, metric=7.21 vs. limit=15.0 +2024-01-15 14:21:25,657 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.balancer2.prob, batch_count=29066.666666666668, ans=0.125 +2024-01-15 14:21:27,745 INFO [train.py:994] (0/2) Epoch 11, batch 300, loss[loss=0.2094, simple_loss=0.2792, pruned_loss=0.06975, over 24420.00 frames. ], tot_loss[loss=0.2132, simple_loss=0.2807, pruned_loss=0.07283, over 3734695.76 frames. ], batch size: 258, lr: 3.05e-02, grad_scale: 32.0 +2024-01-15 14:21:29,138 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.bypass.scale_min, batch_count=29100.0, ans=0.2 +2024-01-15 14:22:29,673 INFO [train.py:994] (0/2) Epoch 11, batch 350, loss[loss=0.2289, simple_loss=0.2919, pruned_loss=0.08299, over 24494.00 frames. ], tot_loss[loss=0.2126, simple_loss=0.2802, pruned_loss=0.0725, over 3979805.85 frames. ], batch size: 204, lr: 3.05e-02, grad_scale: 32.0 +2024-01-15 14:22:45,923 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.bypass.scale_min, batch_count=29300.0, ans=0.2 +2024-01-15 14:23:01,252 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.self_attn_weights.pos_emb_skip_rate, batch_count=29333.333333333332, ans=0.0 +2024-01-15 14:23:06,019 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_module1.balancer1.prob, batch_count=29366.666666666668, ans=0.125 +2024-01-15 14:23:11,097 WARNING [optim.py:484] (0/2) Clipping_scale=2.0, grad-norm quartiles 2.002e+02 2.304e+02 2.500e+02 2.961e+02 4.886e+02, threshold=5.000e+02, percent-clipped=0.0 +2024-01-15 14:23:15,424 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.2.encoder.layers.2.whiten, num_groups=1, num_channels=384, metric=4.13 vs. limit=12.0 +2024-01-15 14:23:25,061 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.0.layers.0.feed_forward3.hidden_balancer.prob, batch_count=29400.0, ans=0.125 +2024-01-15 14:23:28,695 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.self_attn_weights.pos_emb_skip_rate, batch_count=29400.0, ans=0.0 +2024-01-15 14:23:31,982 INFO [train.py:994] (0/2) Epoch 11, batch 400, loss[loss=0.2233, simple_loss=0.2846, pruned_loss=0.08095, over 24476.00 frames. ], tot_loss[loss=0.213, simple_loss=0.2806, pruned_loss=0.07275, over 4169636.15 frames. ], batch size: 181, lr: 3.04e-02, grad_scale: 32.0 +2024-01-15 14:23:35,966 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_skip_rate, batch_count=29433.333333333332, ans=0.0 +2024-01-15 14:23:44,777 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.0.layers.0.feed_forward1.hidden_balancer.prob, batch_count=29466.666666666668, ans=0.125 +2024-01-15 14:24:18,654 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.bypass.scale_min, batch_count=29533.333333333332, ans=0.2 +2024-01-15 14:24:34,449 INFO [train.py:994] (0/2) Epoch 11, batch 450, loss[loss=0.2109, simple_loss=0.2794, pruned_loss=0.07121, over 24346.00 frames. ], tot_loss[loss=0.212, simple_loss=0.2799, pruned_loss=0.07206, over 4316094.81 frames. ], batch size: 275, lr: 3.04e-02, grad_scale: 32.0 +2024-01-15 14:24:39,525 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.1.encoder.layers.1.feed_forward1.out_whiten, num_groups=1, num_channels=256, metric=14.03 vs. limit=15.0 +2024-01-15 14:24:44,091 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.2.encoder.layers.1.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=5.68 vs. limit=6.0 +2024-01-15 14:24:44,805 INFO [scaling.py:1118] (0/2) WithLoss: name=encoder.encoders.3.encoder.layers.2.self_attn_weights, loss-sum=0.000e+00 +2024-01-15 14:24:59,538 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.0.layers.1.nonlin_attention.balancer.prob, batch_count=29666.666666666668, ans=0.125 +2024-01-15 14:25:15,322 WARNING [optim.py:484] (0/2) Clipping_scale=2.0, grad-norm quartiles 1.891e+02 2.230e+02 2.451e+02 2.754e+02 3.898e+02, threshold=4.902e+02, percent-clipped=0.0 +2024-01-15 14:25:36,653 INFO [train.py:994] (0/2) Epoch 11, batch 500, loss[loss=0.217, simple_loss=0.2837, pruned_loss=0.07517, over 24505.00 frames. ], tot_loss[loss=0.2118, simple_loss=0.2794, pruned_loss=0.07204, over 4414654.81 frames. ], batch size: 181, lr: 3.03e-02, grad_scale: 32.0 +2024-01-15 14:25:37,283 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.3.encoder.layers.1.conv_module2.whiten, num_groups=1, num_channels=512, metric=3.13 vs. limit=15.0 +2024-01-15 14:25:38,092 INFO [scaling.py:1118] (0/2) WithLoss: name=encoder.encoders.1.encoder.layers.0.self_attn_weights, loss-sum=0.000e+00 +2024-01-15 14:26:32,538 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.self_attn_weights.pos_emb_skip_rate, batch_count=29900.0, ans=0.0 +2024-01-15 14:26:38,806 INFO [train.py:994] (0/2) Epoch 11, batch 550, loss[loss=0.216, simple_loss=0.2775, pruned_loss=0.07722, over 24569.00 frames. ], tot_loss[loss=0.2119, simple_loss=0.2795, pruned_loss=0.07218, over 4499603.14 frames. ], batch size: 176, lr: 3.03e-02, grad_scale: 32.0 +2024-01-15 14:26:40,254 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=29933.333333333332, ans=0.1 +2024-01-15 14:26:56,042 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder_embed.conv.8.prob, batch_count=29966.666666666668, ans=0.125 +2024-01-15 14:26:59,557 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.bypass.scale_min, batch_count=29966.666666666668, ans=0.2 +2024-01-15 14:26:59,614 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.bypass_mid.scale_min, batch_count=29966.666666666668, ans=0.2 +2024-01-15 14:27:02,336 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.balancer2.prob, batch_count=30000.0, ans=0.125 +2024-01-15 14:27:06,625 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_module1.balancer2.prob, batch_count=30000.0, ans=0.125 +2024-01-15 14:27:19,727 WARNING [optim.py:484] (0/2) Clipping_scale=2.0, grad-norm quartiles 1.915e+02 2.557e+02 2.916e+02 3.459e+02 5.419e+02, threshold=5.831e+02, percent-clipped=1.0 +2024-01-15 14:27:41,196 INFO [train.py:994] (0/2) Epoch 11, batch 600, loss[loss=0.2128, simple_loss=0.2814, pruned_loss=0.07215, over 24461.00 frames. ], tot_loss[loss=0.212, simple_loss=0.28, pruned_loss=0.07201, over 4580815.95 frames. ], batch size: 267, lr: 3.02e-02, grad_scale: 32.0 +2024-01-15 14:27:41,494 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=30100.0, ans=0.1 +2024-01-15 14:28:42,823 INFO [train.py:994] (0/2) Epoch 11, batch 650, loss[loss=0.2296, simple_loss=0.2991, pruned_loss=0.08006, over 24514.00 frames. ], tot_loss[loss=0.2109, simple_loss=0.279, pruned_loss=0.07143, over 4632949.69 frames. ], batch size: 243, lr: 3.02e-02, grad_scale: 32.0 +2024-01-15 14:29:11,015 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_module2.balancer1.prob, batch_count=30333.333333333332, ans=0.125 +2024-01-15 14:29:24,992 WARNING [optim.py:484] (0/2) Clipping_scale=2.0, grad-norm quartiles 1.788e+02 2.341e+02 2.706e+02 3.080e+02 4.003e+02, threshold=5.412e+02, percent-clipped=0.0 +2024-01-15 14:29:39,611 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_module1.balancer1.min_positive, batch_count=30400.0, ans=0.025 +2024-01-15 14:29:46,608 INFO [train.py:994] (0/2) Epoch 11, batch 700, loss[loss=0.1928, simple_loss=0.2655, pruned_loss=0.06002, over 24415.00 frames. ], tot_loss[loss=0.2112, simple_loss=0.2791, pruned_loss=0.07166, over 4669434.19 frames. ], batch size: 258, lr: 3.01e-02, grad_scale: 32.0 +2024-01-15 14:30:19,158 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.bypass_mid.scale_min, batch_count=30500.0, ans=0.2 +2024-01-15 14:30:26,071 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.ff2_skip_rate, batch_count=30533.333333333332, ans=0.004231884057971015 +2024-01-15 14:30:48,033 INFO [train.py:994] (0/2) Epoch 11, batch 750, loss[loss=0.1912, simple_loss=0.2557, pruned_loss=0.06336, over 23982.00 frames. ], tot_loss[loss=0.2111, simple_loss=0.2789, pruned_loss=0.07159, over 4700697.74 frames. ], batch size: 131, lr: 3.01e-02, grad_scale: 32.0 +2024-01-15 14:30:59,941 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.3.encoder.layers.1.feed_forward1.out_whiten, num_groups=1, num_channels=512, metric=12.11 vs. limit=15.0 +2024-01-15 14:31:01,270 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.0.layers.0.nonlin_attention.balancer.prob, batch_count=30633.333333333332, ans=0.125 +2024-01-15 14:31:01,357 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.self_attn_weights.pos_emb_skip_rate, batch_count=30633.333333333332, ans=0.0 +2024-01-15 14:31:21,709 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_module2.balancer2.prob, batch_count=30666.666666666668, ans=0.125 +2024-01-15 14:31:28,160 WARNING [optim.py:484] (0/2) Clipping_scale=2.0, grad-norm quartiles 1.837e+02 2.345e+02 2.684e+02 3.156e+02 5.957e+02, threshold=5.367e+02, percent-clipped=1.0 +2024-01-15 14:31:34,439 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.self_attn_weights.pos_emb_skip_rate, batch_count=30700.0, ans=0.0 +2024-01-15 14:31:36,552 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.bypass_mid.scale_min, batch_count=30733.333333333332, ans=0.2 +2024-01-15 14:31:43,151 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.0.layers.0.feed_forward1.out_proj.dropout_p, batch_count=30733.333333333332, ans=0.1 +2024-01-15 14:31:45,514 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=30733.333333333332, ans=0.1 +2024-01-15 14:31:47,492 INFO [train.py:994] (0/2) Epoch 11, batch 800, loss[loss=0.1932, simple_loss=0.2541, pruned_loss=0.06618, over 23474.00 frames. ], tot_loss[loss=0.2109, simple_loss=0.2787, pruned_loss=0.07154, over 4717666.51 frames. ], batch size: 119, lr: 3.00e-02, grad_scale: 32.0 +2024-01-15 14:31:56,157 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.1.encoder.layers.1.nonlin_attention.whiten1, num_groups=1, num_channels=192, metric=5.13 vs. limit=10.0 +2024-01-15 14:31:59,099 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.bypass_mid.scale_min, batch_count=30800.0, ans=0.2 +2024-01-15 14:32:01,318 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.bypass.scale_min, batch_count=30800.0, ans=0.2 +2024-01-15 14:32:12,462 INFO [scaling.py:1118] (0/2) WithLoss: name=encoder.encoders.3.encoder.layers.1.self_attn_weights, loss-sum=0.000e+00 +2024-01-15 14:32:18,026 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.feed_forward1.hidden_balancer.prob, batch_count=30833.333333333332, ans=0.125 +2024-01-15 14:32:28,102 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.nonlin_attention.balancer.prob, batch_count=30866.666666666668, ans=0.125 +2024-01-15 14:32:36,607 INFO [checkpoint.py:75] (0/2) Saving checkpoint to zipformer_bbpe/exp-context-size-2-lr-epochs-10-spec-aug-20-disable-musan/epoch-11.pt +2024-01-15 14:33:00,632 INFO [train.py:994] (0/2) Epoch 12, batch 0, loss[loss=0.2054, simple_loss=0.2754, pruned_loss=0.06777, over 24487.00 frames. ], tot_loss[loss=0.2054, simple_loss=0.2754, pruned_loss=0.06777, over 24487.00 frames. ], batch size: 210, lr: 2.93e-02, grad_scale: 32.0 +2024-01-15 14:33:00,633 INFO [train.py:1017] (0/2) Computing validation loss +2024-01-15 14:33:14,006 INFO [zipformer.py:1858] (0/2) name=encoder.encoders.3.encoder.layers.3.self_attn_weights, attn_weights_entropy = tensor([1.7691, 2.0255, 2.6091, 2.7192, 2.5685, 2.8200, 2.6004, 2.6934], + device='cuda:0') +2024-01-15 14:33:20,519 INFO [train.py:1026] (0/2) Epoch 12, validation: loss=0.1784, simple_loss=0.2662, pruned_loss=0.04527, over 1622729.00 frames. +2024-01-15 14:33:20,520 INFO [train.py:1027] (0/2) Maximum memory allocated so far is 15997MB +2024-01-15 14:33:48,828 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.nonlin_attention.balancer.max_positive, batch_count=30976.666666666668, ans=0.95 +2024-01-15 14:33:54,159 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.0.layers.1.ff2_skip_rate, batch_count=30976.666666666668, ans=0.0041355072463768105 +2024-01-15 14:34:10,508 WARNING [optim.py:484] (0/2) Clipping_scale=2.0, grad-norm quartiles 1.934e+02 2.453e+02 2.794e+02 3.264e+02 4.680e+02, threshold=5.589e+02, percent-clipped=0.0 +2024-01-15 14:34:11,193 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.4.encoder.layers.0.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=4.09 vs. limit=6.0 +2024-01-15 14:34:23,792 INFO [train.py:994] (0/2) Epoch 12, batch 50, loss[loss=0.211, simple_loss=0.2745, pruned_loss=0.07377, over 24650.00 frames. ], tot_loss[loss=0.2083, simple_loss=0.2758, pruned_loss=0.07039, over 1084332.10 frames. ], batch size: 199, lr: 2.92e-02, grad_scale: 32.0 +2024-01-15 14:34:25,323 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.bypass.skip_rate, batch_count=31076.666666666668, ans=0.09899494936611666 +2024-01-15 14:34:31,256 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.bypass.scale_min, batch_count=31076.666666666668, ans=0.2 +2024-01-15 14:34:33,608 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.bypass_mid.scale_min, batch_count=31076.666666666668, ans=0.2 +2024-01-15 14:34:36,976 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.bypass_mid.scale_min, batch_count=31110.0, ans=0.2 +2024-01-15 14:35:06,016 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.attention_skip_rate, batch_count=31176.666666666668, ans=0.0 +2024-01-15 14:35:25,479 INFO [train.py:994] (0/2) Epoch 12, batch 100, loss[loss=0.1859, simple_loss=0.259, pruned_loss=0.05638, over 24313.00 frames. ], tot_loss[loss=0.2081, simple_loss=0.2764, pruned_loss=0.0699, over 1916083.79 frames. ], batch size: 147, lr: 2.92e-02, grad_scale: 32.0 +2024-01-15 14:35:29,222 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.self_attn2.whiten.whitening_limit, batch_count=31243.333333333332, ans=22.5 +2024-01-15 14:35:33,506 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.bypass.scale_min, batch_count=31243.333333333332, ans=0.2 +2024-01-15 14:36:09,061 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.0.layers.0.bypass.scale_min, batch_count=31343.333333333332, ans=0.2 +2024-01-15 14:36:15,782 WARNING [optim.py:484] (0/2) Clipping_scale=2.0, grad-norm quartiles 1.841e+02 2.234e+02 2.534e+02 2.829e+02 3.709e+02, threshold=5.069e+02, percent-clipped=0.0 +2024-01-15 14:36:21,737 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.3.encoder.layers.1.nonlin_attention.whiten1, num_groups=1, num_channels=384, metric=5.85 vs. limit=10.0 +2024-01-15 14:36:23,580 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.0.layers.1.balancer1.prob, batch_count=31376.666666666668, ans=0.125 +2024-01-15 14:36:23,614 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.balancer1.prob, batch_count=31376.666666666668, ans=0.125 +2024-01-15 14:36:23,677 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.self_attn_weights.pos_emb_skip_rate, batch_count=31376.666666666668, ans=0.0 +2024-01-15 14:36:25,109 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.3.encoder.layers.3.whiten, num_groups=1, num_channels=512, metric=3.71 vs. limit=12.0 +2024-01-15 14:36:28,124 INFO [train.py:994] (0/2) Epoch 12, batch 150, loss[loss=0.1869, simple_loss=0.2594, pruned_loss=0.05723, over 24477.00 frames. ], tot_loss[loss=0.2065, simple_loss=0.2749, pruned_loss=0.06899, over 2550332.18 frames. ], batch size: 148, lr: 2.91e-02, grad_scale: 16.0 +2024-01-15 14:37:15,775 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_skip_rate, batch_count=31510.0, ans=0.0 +2024-01-15 14:37:19,371 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.self_attn_weights.pos_emb_skip_rate, batch_count=31543.333333333332, ans=0.0 +2024-01-15 14:37:30,130 INFO [train.py:994] (0/2) Epoch 12, batch 200, loss[loss=0.2003, simple_loss=0.2686, pruned_loss=0.06596, over 24533.00 frames. ], tot_loss[loss=0.2067, simple_loss=0.2754, pruned_loss=0.06905, over 3051423.43 frames. ], batch size: 176, lr: 2.91e-02, grad_scale: 16.0 +2024-01-15 14:37:46,546 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.bypass.scale_min, batch_count=31610.0, ans=0.2 +2024-01-15 14:37:58,630 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.0.layers.0.feed_forward2.out_whiten, num_groups=1, num_channels=192, metric=9.56 vs. limit=15.0 +2024-01-15 14:38:21,334 WARNING [optim.py:484] (0/2) Clipping_scale=2.0, grad-norm quartiles 1.969e+02 2.375e+02 2.614e+02 3.067e+02 4.543e+02, threshold=5.228e+02, percent-clipped=0.0 +2024-01-15 14:38:23,930 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.bypass_mid.scale_min, batch_count=31710.0, ans=0.2 +2024-01-15 14:38:32,485 INFO [train.py:994] (0/2) Epoch 12, batch 250, loss[loss=0.1831, simple_loss=0.245, pruned_loss=0.06064, over 23530.00 frames. ], tot_loss[loss=0.2067, simple_loss=0.2752, pruned_loss=0.06908, over 3427800.58 frames. ], batch size: 119, lr: 2.90e-02, grad_scale: 16.0 +2024-01-15 14:38:40,592 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.0.layers.0.feed_forward1.out_proj.dropout_p, batch_count=31743.333333333332, ans=0.1 +2024-01-15 14:38:42,902 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_module1.balancer1.prob, batch_count=31743.333333333332, ans=0.125 +2024-01-15 14:38:43,012 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_module2.balancer1.prob, batch_count=31743.333333333332, ans=0.125 +2024-01-15 14:39:17,357 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_skip_rate, batch_count=31843.333333333332, ans=0.0 +2024-01-15 14:39:17,546 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.5.encoder.layers.0.feed_forward2.out_whiten, num_groups=1, num_channels=256, metric=10.97 vs. limit=15.0 +2024-01-15 14:39:25,493 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.1.encoder.layers.0.nonlin_attention.whiten2, num_groups=1, num_channels=256, metric=7.87 vs. limit=15.0 +2024-01-15 14:39:34,607 INFO [train.py:994] (0/2) Epoch 12, batch 300, loss[loss=0.2079, simple_loss=0.2801, pruned_loss=0.06786, over 24435.00 frames. ], tot_loss[loss=0.2073, simple_loss=0.2758, pruned_loss=0.06941, over 3733093.37 frames. ], batch size: 250, lr: 2.90e-02, grad_scale: 16.0 +2024-01-15 14:39:43,097 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_skip_rate, batch_count=31910.0, ans=0.0 +2024-01-15 14:40:00,160 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.1.encoder.layers.1.feed_forward1.out_whiten, num_groups=1, num_channels=256, metric=11.51 vs. limit=15.0 +2024-01-15 14:40:25,018 WARNING [optim.py:484] (0/2) Clipping_scale=2.0, grad-norm quartiles 1.837e+02 2.325e+02 2.638e+02 3.044e+02 6.193e+02, threshold=5.276e+02, percent-clipped=1.0 +2024-01-15 14:40:30,762 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.attention_skip_rate, batch_count=32043.333333333332, ans=0.0 +2024-01-15 14:40:36,429 INFO [train.py:994] (0/2) Epoch 12, batch 350, loss[loss=0.1848, simple_loss=0.2342, pruned_loss=0.06768, over 17221.00 frames. ], tot_loss[loss=0.2072, simple_loss=0.2756, pruned_loss=0.06942, over 3956946.82 frames. ], batch size: 74, lr: 2.89e-02, grad_scale: 16.0 +2024-01-15 14:40:40,836 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.ff2_skip_rate, batch_count=32076.666666666668, ans=0.0038963768115942024 +2024-01-15 14:40:46,576 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.bypass.scale_min, batch_count=32076.666666666668, ans=0.2 +2024-01-15 14:41:13,275 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.2.encoder.layers.0.nonlin_attention.whiten1, num_groups=1, num_channels=288, metric=7.79 vs. limit=10.0 +2024-01-15 14:41:19,712 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.3.encoder.layers.1.self_attn_weights.whiten_keys, num_groups=8, num_channels=256, metric=5.67 vs. limit=6.0 +2024-01-15 14:41:39,098 INFO [train.py:994] (0/2) Epoch 12, batch 400, loss[loss=0.2143, simple_loss=0.2853, pruned_loss=0.07165, over 24357.00 frames. ], tot_loss[loss=0.2064, simple_loss=0.275, pruned_loss=0.06891, over 4146309.36 frames. ], batch size: 298, lr: 2.89e-02, grad_scale: 32.0 +2024-01-15 14:41:41,815 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.0.layers.1.ff2_skip_rate, batch_count=32243.333333333332, ans=0.003860144927536232 +2024-01-15 14:41:51,822 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.bypass.scale_min, batch_count=32276.666666666668, ans=0.2 +2024-01-15 14:41:55,312 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.attention_skip_rate, batch_count=32276.666666666668, ans=0.0 +2024-01-15 14:41:57,487 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.3.encoder.layers.3.whiten, num_groups=1, num_channels=512, metric=3.49 vs. limit=12.0 +2024-01-15 14:42:12,596 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.1.encoder.layers.1.feed_forward1.out_whiten, num_groups=1, num_channels=256, metric=10.07 vs. limit=15.0 +2024-01-15 14:42:30,262 WARNING [optim.py:484] (0/2) Clipping_scale=2.0, grad-norm quartiles 1.805e+02 2.236e+02 2.460e+02 2.707e+02 3.564e+02, threshold=4.920e+02, percent-clipped=0.0 +2024-01-15 14:42:41,827 INFO [train.py:994] (0/2) Epoch 12, batch 450, loss[loss=0.1735, simple_loss=0.2475, pruned_loss=0.04975, over 24196.00 frames. ], tot_loss[loss=0.2064, simple_loss=0.2748, pruned_loss=0.069, over 4282037.59 frames. ], batch size: 140, lr: 2.88e-02, grad_scale: 32.0 +2024-01-15 14:42:52,187 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=32410.0, ans=0.1 +2024-01-15 14:42:53,285 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder_embed.conv.8.prob, batch_count=32443.333333333332, ans=0.125 +2024-01-15 14:43:04,823 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.1.encoder.layers.1.feed_forward1.out_whiten, num_groups=1, num_channels=256, metric=10.84 vs. limit=15.0 +2024-01-15 14:43:30,815 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.5.encoder.layers.1.whiten, num_groups=1, num_channels=256, metric=6.92 vs. limit=12.0 +2024-01-15 14:43:43,908 INFO [train.py:994] (0/2) Epoch 12, batch 500, loss[loss=0.198, simple_loss=0.266, pruned_loss=0.06494, over 24525.00 frames. ], tot_loss[loss=0.2061, simple_loss=0.2746, pruned_loss=0.06881, over 4398831.35 frames. ], batch size: 204, lr: 2.88e-02, grad_scale: 32.0 +2024-01-15 14:43:57,068 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.5.encoder.layers.0.nonlin_attention.whiten1, num_groups=1, num_channels=192, metric=3.00 vs. limit=10.0 +2024-01-15 14:44:10,191 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.bypass.scale_min, batch_count=32643.333333333332, ans=0.2 +2024-01-15 14:44:20,869 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_module2.balancer1.prob, batch_count=32676.666666666668, ans=0.125 +2024-01-15 14:44:22,601 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_skip_rate, batch_count=32676.666666666668, ans=0.0 +2024-01-15 14:44:22,722 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.ff2_skip_rate, batch_count=32676.666666666668, ans=0.0037659420289855063 +2024-01-15 14:44:34,706 WARNING [optim.py:484] (0/2) Clipping_scale=2.0, grad-norm quartiles 1.859e+02 2.314e+02 2.670e+02 3.200e+02 5.574e+02, threshold=5.340e+02, percent-clipped=2.0 +2024-01-15 14:44:45,427 INFO [train.py:994] (0/2) Epoch 12, batch 550, loss[loss=0.2018, simple_loss=0.2646, pruned_loss=0.06957, over 24405.00 frames. ], tot_loss[loss=0.2054, simple_loss=0.2742, pruned_loss=0.06831, over 4497095.82 frames. ], batch size: 153, lr: 2.88e-02, grad_scale: 32.0 +2024-01-15 14:45:19,414 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.2.encoder.layers.0.self_attn2.whiten, num_groups=1, num_channels=384, metric=22.48 vs. limit=22.5 +2024-01-15 14:45:27,258 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.nonlin_attention.balancer.prob, batch_count=32843.333333333336, ans=0.125 +2024-01-15 14:45:36,808 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.feed_forward1.hidden_balancer.prob, batch_count=32876.666666666664, ans=0.125 +2024-01-15 14:45:44,060 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_module1.balancer2.prob, batch_count=32876.666666666664, ans=0.125 +2024-01-15 14:45:45,214 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_skip_rate, batch_count=32876.666666666664, ans=0.0 +2024-01-15 14:45:45,466 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.5.encoder.layers.0.nonlin_attention.whiten1, num_groups=1, num_channels=192, metric=3.02 vs. limit=10.0 +2024-01-15 14:45:48,556 INFO [train.py:994] (0/2) Epoch 12, batch 600, loss[loss=0.1963, simple_loss=0.2669, pruned_loss=0.06283, over 24465.00 frames. ], tot_loss[loss=0.2048, simple_loss=0.2737, pruned_loss=0.06795, over 4560203.35 frames. ], batch size: 222, lr: 2.87e-02, grad_scale: 32.0 +2024-01-15 14:45:50,464 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.1.encoder.layers.1.nonlin_attention.whiten2, num_groups=1, num_channels=256, metric=9.52 vs. limit=15.0 +2024-01-15 14:45:51,767 INFO [scaling.py:1022] (0/2) Whitening: name=encoder_embed.out_whiten, num_groups=1, num_channels=192, metric=6.88 vs. limit=8.0 +2024-01-15 14:46:25,758 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.2.encoder.layers.2.conv_module1.whiten, num_groups=1, num_channels=384, metric=3.82 vs. limit=15.0 +2024-01-15 14:46:31,953 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_module1.balancer2.min_abs, batch_count=33010.0, ans=0.5 +2024-01-15 14:46:33,012 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.0.layers.1.feed_forward1.hidden_balancer.prob, batch_count=33010.0, ans=0.125 +2024-01-15 14:46:40,526 WARNING [optim.py:484] (0/2) Clipping_scale=2.0, grad-norm quartiles 1.867e+02 2.274e+02 2.530e+02 3.113e+02 5.260e+02, threshold=5.061e+02, percent-clipped=0.0 +2024-01-15 14:46:50,202 INFO [train.py:994] (0/2) Epoch 12, batch 650, loss[loss=0.211, simple_loss=0.2776, pruned_loss=0.0722, over 24514.00 frames. ], tot_loss[loss=0.2054, simple_loss=0.2744, pruned_loss=0.06817, over 4606460.91 frames. ], batch size: 204, lr: 2.87e-02, grad_scale: 16.0 +2024-01-15 14:46:57,692 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_module2.balancer1.prob, batch_count=33076.666666666664, ans=0.125 +2024-01-15 14:47:22,424 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.3.encoder.layers.3.self_attn1.whiten, num_groups=1, num_channels=512, metric=18.27 vs. limit=22.5 +2024-01-15 14:47:31,127 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_module2.balancer2.prob, batch_count=33176.666666666664, ans=0.125 +2024-01-15 14:47:36,190 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.3.encoder.layers.1.conv_module1.whiten, num_groups=1, num_channels=512, metric=2.26 vs. limit=15.0 +2024-01-15 14:47:41,629 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.attention_skip_rate, batch_count=33210.0, ans=0.0 +2024-01-15 14:47:44,122 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_module2.balancer2.min_positive, batch_count=33210.0, ans=0.05 +2024-01-15 14:47:53,431 INFO [train.py:994] (0/2) Epoch 12, batch 700, loss[loss=0.2014, simple_loss=0.2696, pruned_loss=0.06658, over 24378.00 frames. ], tot_loss[loss=0.2049, simple_loss=0.2739, pruned_loss=0.06791, over 4652514.37 frames. ], batch size: 153, lr: 2.86e-02, grad_scale: 16.0 +2024-01-15 14:47:56,541 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.3.encoder.layers.1.feed_forward2.out_whiten, num_groups=1, num_channels=512, metric=15.26 vs. limit=15.0 +2024-01-15 14:48:01,924 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_module1.balancer2.prob, batch_count=33243.333333333336, ans=0.125 +2024-01-15 14:48:20,948 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_module2.balancer2.min_abs, batch_count=33310.0, ans=0.5 +2024-01-15 14:48:23,334 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.bypass.skip_rate, batch_count=33310.0, ans=0.07 +2024-01-15 14:48:30,693 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.bypass.scale_min, batch_count=33343.333333333336, ans=0.2 +2024-01-15 14:48:45,949 WARNING [optim.py:484] (0/2) Clipping_scale=2.0, grad-norm quartiles 1.833e+02 2.288e+02 2.551e+02 2.862e+02 5.398e+02, threshold=5.102e+02, percent-clipped=1.0 +2024-01-15 14:48:49,088 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.0.layers.1.feed_forward3.out_whiten, num_groups=1, num_channels=192, metric=11.06 vs. limit=15.0 +2024-01-15 14:48:55,412 INFO [train.py:994] (0/2) Epoch 12, batch 750, loss[loss=0.208, simple_loss=0.2807, pruned_loss=0.06759, over 24430.00 frames. ], tot_loss[loss=0.205, simple_loss=0.2743, pruned_loss=0.06786, over 4688514.93 frames. ], batch size: 250, lr: 2.86e-02, grad_scale: 16.0 +2024-01-15 14:48:59,304 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.0.layers.1.attention_skip_rate, batch_count=33410.0, ans=0.0 +2024-01-15 14:49:04,624 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.bypass.scale_min, batch_count=33410.0, ans=0.2 +2024-01-15 14:49:38,252 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_skip_rate, batch_count=33510.0, ans=0.0 +2024-01-15 14:49:43,964 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.1.encoder.layers.1.self_attn2.whiten, num_groups=1, num_channels=256, metric=20.69 vs. limit=22.5 +2024-01-15 14:49:55,667 INFO [train.py:994] (0/2) Epoch 12, batch 800, loss[loss=0.2206, simple_loss=0.2833, pruned_loss=0.07891, over 24353.00 frames. ], tot_loss[loss=0.2042, simple_loss=0.2736, pruned_loss=0.06743, over 4718393.21 frames. ], batch size: 153, lr: 2.85e-02, grad_scale: 32.0 +2024-01-15 14:49:58,158 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_module1.balancer2.prob, batch_count=33576.666666666664, ans=0.125 +2024-01-15 14:50:02,585 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.attention_skip_rate, batch_count=33576.666666666664, ans=0.0 +2024-01-15 14:50:16,597 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=33610.0, ans=0.1 +2024-01-15 14:50:18,886 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.feed_forward1.hidden_balancer.prob, batch_count=33643.333333333336, ans=0.125 +2024-01-15 14:50:26,280 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_module1.balancer1.prob, batch_count=33643.333333333336, ans=0.125 +2024-01-15 14:50:43,708 WARNING [optim.py:484] (0/2) Clipping_scale=2.0, grad-norm quartiles 1.987e+02 2.226e+02 2.599e+02 3.021e+02 8.149e+02, threshold=5.199e+02, percent-clipped=1.0 +2024-01-15 14:50:44,883 INFO [checkpoint.py:75] (0/2) Saving checkpoint to zipformer_bbpe/exp-context-size-2-lr-epochs-10-spec-aug-20-disable-musan/epoch-12.pt +2024-01-15 14:51:08,438 INFO [train.py:994] (0/2) Epoch 13, batch 0, loss[loss=0.1988, simple_loss=0.2668, pruned_loss=0.06535, over 24520.00 frames. ], tot_loss[loss=0.1988, simple_loss=0.2668, pruned_loss=0.06535, over 24520.00 frames. ], batch size: 204, lr: 2.78e-02, grad_scale: 32.0 +2024-01-15 14:51:08,440 INFO [train.py:1017] (0/2) Computing validation loss +2024-01-15 14:51:28,552 INFO [train.py:1026] (0/2) Epoch 13, validation: loss=0.1772, simple_loss=0.2649, pruned_loss=0.04477, over 1622729.00 frames. +2024-01-15 14:51:28,553 INFO [train.py:1027] (0/2) Maximum memory allocated so far is 15997MB +2024-01-15 14:51:28,870 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_module1.balancer2.min_positive, batch_count=33720.0, ans=0.05 +2024-01-15 14:51:32,412 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_skip_rate, batch_count=33720.0, ans=0.0 +2024-01-15 14:52:10,280 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.2.encoder.layers.0.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=5.96 vs. limit=6.0 +2024-01-15 14:52:14,550 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.bypass_mid.scale_min, batch_count=33820.0, ans=0.2 +2024-01-15 14:52:22,559 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.bypass_mid.scale_min, batch_count=33853.333333333336, ans=0.2 +2024-01-15 14:52:32,579 INFO [train.py:994] (0/2) Epoch 13, batch 50, loss[loss=0.2031, simple_loss=0.2731, pruned_loss=0.06651, over 24389.00 frames. ], tot_loss[loss=0.2023, simple_loss=0.2715, pruned_loss=0.06661, over 1089187.01 frames. ], batch size: 286, lr: 2.77e-02, grad_scale: 32.0 +2024-01-15 14:52:36,452 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.bypass.scale_min, batch_count=33886.666666666664, ans=0.2 +2024-01-15 14:52:54,606 INFO [scaling.py:1118] (0/2) WithLoss: name=encoder.encoders.5.encoder.layers.1.self_attn_weights, loss-sum=0.000e+00 +2024-01-15 14:52:56,849 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_skip_rate, batch_count=33953.333333333336, ans=0.0 +2024-01-15 14:53:00,775 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.3.encoder.layers.1.feed_forward3.out_whiten, num_groups=1, num_channels=512, metric=10.73 vs. limit=15.0 +2024-01-15 14:53:05,158 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_module2.balancer1.prob, batch_count=33953.333333333336, ans=0.125 +2024-01-15 14:53:15,544 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_module2.balancer1.prob, batch_count=33986.666666666664, ans=0.125 +2024-01-15 14:53:32,692 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=34020.0, ans=0.1 +2024-01-15 14:53:34,167 WARNING [optim.py:484] (0/2) Clipping_scale=2.0, grad-norm quartiles 1.857e+02 2.225e+02 2.592e+02 3.041e+02 4.021e+02, threshold=5.184e+02, percent-clipped=0.0 +2024-01-15 14:53:35,378 INFO [train.py:994] (0/2) Epoch 13, batch 100, loss[loss=0.2203, simple_loss=0.2856, pruned_loss=0.07746, over 24351.00 frames. ], tot_loss[loss=0.2017, simple_loss=0.2711, pruned_loss=0.06614, over 1915133.18 frames. ], batch size: 298, lr: 2.77e-02, grad_scale: 32.0 +2024-01-15 14:53:36,783 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.0.layers.1.feed_forward3.hidden_balancer.prob, batch_count=34053.333333333336, ans=0.125 +2024-01-15 14:53:41,512 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.nonlin_attention.balancer.max_positive, batch_count=34053.333333333336, ans=0.95 +2024-01-15 14:53:46,298 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.ff3_skip_rate, batch_count=34086.666666666664, ans=0.0034594202898550726 +2024-01-15 14:53:52,970 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.feed_forward1.hidden_balancer.prob, batch_count=34086.666666666664, ans=0.125 +2024-01-15 14:53:57,783 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.attention_skip_rate, batch_count=34086.666666666664, ans=0.0 +2024-01-15 14:54:03,072 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.feed_forward3.hidden_balancer.prob, batch_count=34120.0, ans=0.125 +2024-01-15 14:54:18,034 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.feed_forward2.hidden_balancer.prob, batch_count=34153.333333333336, ans=0.125 +2024-01-15 14:54:23,954 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_module2.balancer1.prob, batch_count=34186.666666666664, ans=0.125 +2024-01-15 14:54:25,277 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.bypass.scale_min, batch_count=34186.666666666664, ans=0.2 +2024-01-15 14:54:25,361 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.self_attn_weights.pos_emb_skip_rate, batch_count=34186.666666666664, ans=0.0 +2024-01-15 14:54:29,464 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.ff3_skip_rate, batch_count=34186.666666666664, ans=0.00343768115942029 +2024-01-15 14:54:35,220 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.balancer2.prob, batch_count=34186.666666666664, ans=0.125 +2024-01-15 14:54:37,327 INFO [train.py:994] (0/2) Epoch 13, batch 150, loss[loss=0.1963, simple_loss=0.2673, pruned_loss=0.06263, over 24496.00 frames. ], tot_loss[loss=0.2016, simple_loss=0.2718, pruned_loss=0.06565, over 2569097.32 frames. ], batch size: 243, lr: 2.77e-02, grad_scale: 32.0 +2024-01-15 14:54:39,573 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_module2.balancer2.prob, batch_count=34220.0, ans=0.125 +2024-01-15 14:54:45,274 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.bypass.skip_rate, batch_count=34220.0, ans=0.09899494936611666 +2024-01-15 14:54:52,553 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.bypass.skip_rate, batch_count=34253.333333333336, ans=0.04949747468305833 +2024-01-15 14:54:54,492 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_module1.balancer1.prob, batch_count=34253.333333333336, ans=0.125 +2024-01-15 14:55:00,879 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.4.encoder.layers.1.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=5.10 vs. limit=6.0 +2024-01-15 14:55:13,630 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.3.encoder.layers.2.conv_module2.whiten, num_groups=1, num_channels=512, metric=3.77 vs. limit=15.0 +2024-01-15 14:55:19,784 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=34320.0, ans=0.1 +2024-01-15 14:55:22,201 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.self_attn_weights.pos_emb_skip_rate, batch_count=34320.0, ans=0.0 +2024-01-15 14:55:39,353 WARNING [optim.py:484] (0/2) Clipping_scale=2.0, grad-norm quartiles 1.689e+02 2.355e+02 2.666e+02 3.154e+02 5.036e+02, threshold=5.331e+02, percent-clipped=0.0 +2024-01-15 14:55:40,573 INFO [train.py:994] (0/2) Epoch 13, batch 200, loss[loss=0.1855, simple_loss=0.2521, pruned_loss=0.05943, over 23969.00 frames. ], tot_loss[loss=0.202, simple_loss=0.2724, pruned_loss=0.06581, over 3065169.45 frames. ], batch size: 131, lr: 2.76e-02, grad_scale: 32.0 +2024-01-15 14:55:53,330 INFO [scaling.py:1118] (0/2) WithLoss: name=encoder.encoders.4.encoder.layers.0.self_attn_weights, loss-sum=0.000e+00 +2024-01-15 14:56:08,044 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward2.hidden_balancer.prob, batch_count=34453.333333333336, ans=0.125 +2024-01-15 14:56:17,044 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_module1.balancer2.min_abs, batch_count=34486.666666666664, ans=0.5 +2024-01-15 14:56:19,286 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_module1.balancer2.prob, batch_count=34486.666666666664, ans=0.125 +2024-01-15 14:56:20,576 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.bypass.scale_min, batch_count=34486.666666666664, ans=0.2 +2024-01-15 14:56:21,189 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.1.encoder.layers.0.feed_forward1.out_whiten, num_groups=1, num_channels=256, metric=11.66 vs. limit=15.0 +2024-01-15 14:56:43,202 INFO [train.py:994] (0/2) Epoch 13, batch 250, loss[loss=0.2085, simple_loss=0.282, pruned_loss=0.06747, over 24235.00 frames. ], tot_loss[loss=0.2013, simple_loss=0.272, pruned_loss=0.06534, over 3450553.22 frames. ], batch size: 311, lr: 2.76e-02, grad_scale: 32.0 +2024-01-15 14:56:56,041 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.5.encoder.layers.1.whiten, num_groups=1, num_channels=256, metric=4.59 vs. limit=12.0 +2024-01-15 14:57:02,368 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=34586.666666666664, ans=0.1 +2024-01-15 14:57:09,311 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.0.layers.0.bypass_mid.scale_min, batch_count=34620.0, ans=0.2 +2024-01-15 14:57:10,517 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_module2.balancer2.min_abs, batch_count=34620.0, ans=0.5 +2024-01-15 14:57:15,314 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.self_attn_weights.pos_emb_skip_rate, batch_count=34620.0, ans=0.0 +2024-01-15 14:57:17,865 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.0.layers.0.feed_forward1.out_whiten, num_groups=1, num_channels=192, metric=11.09 vs. limit=15.0 +2024-01-15 14:57:20,740 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=34653.333333333336, ans=0.1 +2024-01-15 14:57:26,810 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=34653.333333333336, ans=0.1 +2024-01-15 14:57:41,759 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.ff2_skip_rate, batch_count=34686.666666666664, ans=0.0033289855072463764 +2024-01-15 14:57:41,818 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.nonlin_attention.balancer.max_positive, batch_count=34686.666666666664, ans=0.95 +2024-01-15 14:57:44,585 WARNING [optim.py:484] (0/2) Clipping_scale=2.0, grad-norm quartiles 1.872e+02 2.193e+02 2.493e+02 2.832e+02 4.514e+02, threshold=4.985e+02, percent-clipped=0.0 +2024-01-15 14:57:45,790 INFO [train.py:994] (0/2) Epoch 13, batch 300, loss[loss=0.2068, simple_loss=0.2793, pruned_loss=0.06713, over 24334.00 frames. ], tot_loss[loss=0.2002, simple_loss=0.2711, pruned_loss=0.06463, over 3757416.93 frames. ], batch size: 298, lr: 2.75e-02, grad_scale: 32.0 +2024-01-15 14:57:47,269 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.0.layers.1.ff3_skip_rate, batch_count=34720.0, ans=0.0033217391304347834 +2024-01-15 14:57:50,832 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_module1.balancer1.prob, batch_count=34720.0, ans=0.125 +2024-01-15 14:58:12,423 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_module1.balancer2.prob, batch_count=34786.666666666664, ans=0.125 +2024-01-15 14:58:47,859 INFO [train.py:994] (0/2) Epoch 13, batch 350, loss[loss=0.2243, simple_loss=0.2922, pruned_loss=0.07819, over 24556.00 frames. ], tot_loss[loss=0.2007, simple_loss=0.2713, pruned_loss=0.06501, over 3988962.35 frames. ], batch size: 193, lr: 2.75e-02, grad_scale: 32.0 +2024-01-15 14:58:58,418 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.0.layers.0.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=5.14 vs. limit=6.0 +2024-01-15 14:59:05,562 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_module1.balancer2.min_positive, batch_count=34920.0, ans=0.05 +2024-01-15 14:59:48,953 WARNING [optim.py:484] (0/2) Clipping_scale=2.0, grad-norm quartiles 1.819e+02 2.352e+02 2.687e+02 3.129e+02 5.863e+02, threshold=5.374e+02, percent-clipped=1.0 +2024-01-15 14:59:49,355 INFO [scaling.py:1118] (0/2) WithLoss: name=encoder.encoders.4.encoder.layers.1.self_attn_weights, loss-sum=0.000e+00 +2024-01-15 14:59:50,224 INFO [train.py:994] (0/2) Epoch 13, batch 400, loss[loss=0.1888, simple_loss=0.2553, pruned_loss=0.06117, over 23922.00 frames. ], tot_loss[loss=0.2, simple_loss=0.2707, pruned_loss=0.06469, over 4172728.48 frames. ], batch size: 131, lr: 2.74e-02, grad_scale: 32.0 +2024-01-15 15:00:01,387 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_module1.balancer1.prob, batch_count=35053.333333333336, ans=0.125 +2024-01-15 15:00:03,840 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_skip_rate, batch_count=35086.666666666664, ans=0.0 +2024-01-15 15:00:10,146 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.1.encoder.layers.0.self_attn2.whiten, num_groups=1, num_channels=256, metric=15.44 vs. limit=22.5 +2024-01-15 15:00:25,218 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.ff2_skip_rate, batch_count=35120.0, ans=0.0032347826086956523 +2024-01-15 15:00:31,002 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=35153.333333333336, ans=0.1 +2024-01-15 15:00:31,009 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_module1.balancer2.prob, batch_count=35153.333333333336, ans=0.125 +2024-01-15 15:00:31,030 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.feed_forward3.hidden_balancer.prob, batch_count=35153.333333333336, ans=0.125 +2024-01-15 15:00:34,513 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=35153.333333333336, ans=0.1 +2024-01-15 15:00:53,429 INFO [train.py:994] (0/2) Epoch 13, batch 450, loss[loss=0.1978, simple_loss=0.2709, pruned_loss=0.06234, over 24482.00 frames. ], tot_loss[loss=0.2, simple_loss=0.2707, pruned_loss=0.06461, over 4315118.64 frames. ], batch size: 187, lr: 2.74e-02, grad_scale: 32.0 +2024-01-15 15:01:02,960 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_module1.balancer1.prob, batch_count=35220.0, ans=0.125 +2024-01-15 15:01:03,267 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.4.encoder.layers.1.nonlin_attention.whiten2, num_groups=1, num_channels=384, metric=3.60 vs. limit=15.0 +2024-01-15 15:01:07,939 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_skip_rate, batch_count=35253.333333333336, ans=0.0 +2024-01-15 15:01:09,954 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_module1.balancer2.prob, batch_count=35253.333333333336, ans=0.125 +2024-01-15 15:01:19,845 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.ff2_skip_rate, batch_count=35286.666666666664, ans=0.003198550724637681 +2024-01-15 15:01:21,016 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_module1.balancer1.prob, batch_count=35286.666666666664, ans=0.125 +2024-01-15 15:01:44,907 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_module2.balancer2.prob, batch_count=35353.333333333336, ans=0.125 +2024-01-15 15:01:54,116 WARNING [optim.py:484] (0/2) Clipping_scale=2.0, grad-norm quartiles 1.682e+02 2.235e+02 2.583e+02 3.206e+02 4.584e+02, threshold=5.166e+02, percent-clipped=0.0 +2024-01-15 15:01:54,441 INFO [scaling.py:1118] (0/2) WithLoss: name=encoder.encoders.2.encoder.layers.1.self_attn_weights, loss-sum=0.000e+00 +2024-01-15 15:01:55,318 INFO [train.py:994] (0/2) Epoch 13, batch 500, loss[loss=0.1954, simple_loss=0.2688, pruned_loss=0.06101, over 24404.00 frames. ], tot_loss[loss=0.1999, simple_loss=0.2706, pruned_loss=0.06462, over 4431934.34 frames. ], batch size: 275, lr: 2.73e-02, grad_scale: 32.0 +2024-01-15 15:02:04,768 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.5.encoder.layers.0.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=4.85 vs. limit=6.0 +2024-01-15 15:02:08,846 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.5.encoder.layers.0.conv_module2.whiten, num_groups=1, num_channels=256, metric=5.72 vs. limit=15.0 +2024-01-15 15:02:11,365 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.4.encoder.layers.0.feed_forward1.out_whiten, num_groups=1, num_channels=384, metric=10.96 vs. limit=15.0 +2024-01-15 15:02:26,670 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.2.encoder.layers.1.feed_forward2.out_whiten, num_groups=1, num_channels=384, metric=13.09 vs. limit=15.0 +2024-01-15 15:02:46,565 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.bypass.skip_rate, batch_count=35520.0, ans=0.07 +2024-01-15 15:02:50,065 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_skip_rate, batch_count=35520.0, ans=0.0 +2024-01-15 15:02:58,366 INFO [train.py:994] (0/2) Epoch 13, batch 550, loss[loss=0.209, simple_loss=0.2779, pruned_loss=0.07009, over 24465.00 frames. ], tot_loss[loss=0.1995, simple_loss=0.2699, pruned_loss=0.06451, over 4516164.01 frames. ], batch size: 222, lr: 2.73e-02, grad_scale: 16.0 +2024-01-15 15:03:02,302 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.bypass.scale_min, batch_count=35553.333333333336, ans=0.2 +2024-01-15 15:03:02,382 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.ff3_skip_rate, batch_count=35553.333333333336, ans=0.0031405797101449274 +2024-01-15 15:03:10,588 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.feed_forward1.hidden_balancer.prob, batch_count=35586.666666666664, ans=0.125 +2024-01-15 15:03:19,070 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.bypass_mid.scale_min, batch_count=35586.666666666664, ans=0.2 +2024-01-15 15:03:19,182 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.feed_forward2.hidden_balancer.prob, batch_count=35586.666666666664, ans=0.125 +2024-01-15 15:03:26,196 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.attention_skip_rate, batch_count=35620.0, ans=0.0 +2024-01-15 15:03:39,265 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.ff3_skip_rate, batch_count=35653.333333333336, ans=0.003118840579710145 +2024-01-15 15:04:00,210 WARNING [optim.py:484] (0/2) Clipping_scale=2.0, grad-norm quartiles 1.854e+02 2.330e+02 2.832e+02 3.202e+02 4.606e+02, threshold=5.663e+02, percent-clipped=0.0 +2024-01-15 15:04:00,238 INFO [train.py:994] (0/2) Epoch 13, batch 600, loss[loss=0.1789, simple_loss=0.2523, pruned_loss=0.05274, over 24217.00 frames. ], tot_loss[loss=0.1997, simple_loss=0.27, pruned_loss=0.0647, over 4568877.05 frames. ], batch size: 140, lr: 2.73e-02, grad_scale: 16.0 +2024-01-15 15:04:27,467 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.4.encoder.layers.2.nonlin_attention.whiten2, num_groups=1, num_channels=384, metric=3.54 vs. limit=15.0 +2024-01-15 15:04:54,503 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=35853.333333333336, ans=0.1 +2024-01-15 15:04:58,151 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward1.hidden_balancer.prob, batch_count=35853.333333333336, ans=0.125 +2024-01-15 15:05:01,304 INFO [train.py:994] (0/2) Epoch 13, batch 650, loss[loss=0.2122, simple_loss=0.288, pruned_loss=0.06819, over 23863.00 frames. ], tot_loss[loss=0.1991, simple_loss=0.2696, pruned_loss=0.0643, over 4629907.47 frames. ], batch size: 328, lr: 2.72e-02, grad_scale: 16.0 +2024-01-15 15:05:05,704 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_module2.balancer2.min_abs, batch_count=35886.666666666664, ans=0.5 +2024-01-15 15:05:06,220 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.0.layers.0.conv_module1.whiten, num_groups=1, num_channels=192, metric=8.42 vs. limit=15.0 +2024-01-15 15:05:11,159 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=35886.666666666664, ans=0.1 +2024-01-15 15:05:15,899 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.nonlin_attention.balancer.prob, batch_count=35920.0, ans=0.125 +2024-01-15 15:05:19,503 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_module2.balancer2.min_abs, batch_count=35920.0, ans=0.5 +2024-01-15 15:05:21,969 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_module1.balancer2.prob, batch_count=35920.0, ans=0.125 +2024-01-15 15:05:24,301 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.feed_forward2.hidden_balancer.prob, batch_count=35920.0, ans=0.125 +2024-01-15 15:05:33,679 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.balancer1.prob, batch_count=35953.333333333336, ans=0.125 +2024-01-15 15:05:43,523 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.bypass.scale_min, batch_count=35986.666666666664, ans=0.2 +2024-01-15 15:06:00,529 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.0.layers.1.nonlin_attention.whiten1, num_groups=1, num_channels=144, metric=5.74 vs. limit=10.0 +2024-01-15 15:06:04,285 WARNING [optim.py:484] (0/2) Clipping_scale=2.0, grad-norm quartiles 1.839e+02 2.223e+02 2.545e+02 2.957e+02 4.129e+02, threshold=5.090e+02, percent-clipped=0.0 +2024-01-15 15:06:04,319 INFO [train.py:994] (0/2) Epoch 13, batch 700, loss[loss=0.1753, simple_loss=0.2298, pruned_loss=0.06046, over 18649.00 frames. ], tot_loss[loss=0.1987, simple_loss=0.2695, pruned_loss=0.064, over 4668376.76 frames. ], batch size: 80, lr: 2.72e-02, grad_scale: 16.0 +2024-01-15 15:06:20,452 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.2.encoder.layers.0.conv_module2.whiten, num_groups=1, num_channels=384, metric=11.01 vs. limit=15.0 +2024-01-15 15:06:33,153 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.5.encoder.layers.1.conv_module2.whiten, num_groups=1, num_channels=256, metric=3.30 vs. limit=15.0 +2024-01-15 15:07:06,167 INFO [train.py:994] (0/2) Epoch 13, batch 750, loss[loss=0.2017, simple_loss=0.2724, pruned_loss=0.0655, over 24416.00 frames. ], tot_loss[loss=0.1981, simple_loss=0.269, pruned_loss=0.06359, over 4695863.86 frames. ], batch size: 258, lr: 2.71e-02, grad_scale: 16.0 +2024-01-15 15:07:11,869 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.feed_forward1.out_proj.dropout_p, batch_count=36220.0, ans=0.1 +2024-01-15 15:07:18,544 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.balancer2.prob, batch_count=36253.333333333336, ans=0.125 +2024-01-15 15:07:24,125 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.feed_forward3.hidden_balancer.prob, batch_count=36253.333333333336, ans=0.125 +2024-01-15 15:07:38,811 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_module1.balancer2.prob, batch_count=36286.666666666664, ans=0.125 +2024-01-15 15:07:39,818 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.0.layers.1.bypass.skip_rate, batch_count=36286.666666666664, ans=0.035 +2024-01-15 15:07:46,716 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.2.encoder.layers.0.conv_module2.whiten, num_groups=1, num_channels=384, metric=11.02 vs. limit=15.0 +2024-01-15 15:07:54,137 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=36353.333333333336, ans=0.1 +2024-01-15 15:08:00,921 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.feed_forward2.hidden_balancer.prob, batch_count=36353.333333333336, ans=0.125 +2024-01-15 15:08:06,426 INFO [train.py:994] (0/2) Epoch 13, batch 800, loss[loss=0.219, simple_loss=0.2839, pruned_loss=0.07702, over 24378.00 frames. ], tot_loss[loss=0.1982, simple_loss=0.2691, pruned_loss=0.06369, over 4728940.80 frames. ], batch size: 153, lr: 2.71e-02, grad_scale: 16.0 +2024-01-15 15:08:07,599 WARNING [optim.py:484] (0/2) Clipping_scale=2.0, grad-norm quartiles 1.913e+02 2.262e+02 2.496e+02 3.133e+02 5.825e+02, threshold=4.992e+02, percent-clipped=1.0 +2024-01-15 15:08:11,041 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.0.layers.0.feed_forward1.out_proj.dropout_p, batch_count=36386.666666666664, ans=0.1 +2024-01-15 15:08:20,783 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.4.encoder.layers.0.conv_module1.whiten, num_groups=1, num_channels=384, metric=2.26 vs. limit=15.0 +2024-01-15 15:08:40,220 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=36486.666666666664, ans=0.1 +2024-01-15 15:08:55,756 INFO [checkpoint.py:75] (0/2) Saving checkpoint to zipformer_bbpe/exp-context-size-2-lr-epochs-10-spec-aug-20-disable-musan/epoch-13.pt +2024-01-15 15:09:18,719 INFO [train.py:994] (0/2) Epoch 14, batch 0, loss[loss=0.1978, simple_loss=0.2712, pruned_loss=0.06222, over 24417.00 frames. ], tot_loss[loss=0.1978, simple_loss=0.2712, pruned_loss=0.06222, over 24417.00 frames. ], batch size: 250, lr: 2.64e-02, grad_scale: 32.0 +2024-01-15 15:09:18,720 INFO [train.py:1017] (0/2) Computing validation loss +2024-01-15 15:09:38,583 INFO [train.py:1026] (0/2) Epoch 14, validation: loss=0.1766, simple_loss=0.2638, pruned_loss=0.04469, over 1622729.00 frames. +2024-01-15 15:09:38,583 INFO [train.py:1027] (0/2) Maximum memory allocated so far is 15997MB +2024-01-15 15:09:50,597 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_module1.balancer2.prob, batch_count=36563.333333333336, ans=0.125 +2024-01-15 15:10:13,558 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.self_attn_weights.pos_emb_skip_rate, batch_count=36596.666666666664, ans=0.0 +2024-01-15 15:10:38,270 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_module1.balancer2.prob, batch_count=36663.333333333336, ans=0.125 +2024-01-15 15:10:42,554 INFO [train.py:994] (0/2) Epoch 14, batch 50, loss[loss=0.2125, simple_loss=0.286, pruned_loss=0.06955, over 23919.00 frames. ], tot_loss[loss=0.195, simple_loss=0.2659, pruned_loss=0.062, over 1085983.09 frames. ], batch size: 328, lr: 2.64e-02, grad_scale: 32.0 +2024-01-15 15:10:44,311 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.4.encoder.layers.2.nonlin_attention.whiten1, num_groups=1, num_channels=288, metric=3.68 vs. limit=10.0 +2024-01-15 15:10:52,670 WARNING [optim.py:484] (0/2) Clipping_scale=2.0, grad-norm quartiles 1.716e+02 2.278e+02 2.632e+02 3.085e+02 4.590e+02, threshold=5.264e+02, percent-clipped=0.0 +2024-01-15 15:10:56,680 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.nonlin_attention.balancer.prob, batch_count=36730.0, ans=0.125 +2024-01-15 15:11:03,348 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=36730.0, ans=0.1 +2024-01-15 15:11:06,742 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.feed_forward2.hidden_balancer.prob, batch_count=36763.333333333336, ans=0.125 +2024-01-15 15:11:12,088 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder_embed.conv.2.prob, batch_count=36763.333333333336, ans=0.125 +2024-01-15 15:11:35,080 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.bypass_mid.scale_min, batch_count=36830.0, ans=0.2 +2024-01-15 15:11:45,599 INFO [train.py:994] (0/2) Epoch 14, batch 100, loss[loss=0.1933, simple_loss=0.2609, pruned_loss=0.06286, over 24501.00 frames. ], tot_loss[loss=0.194, simple_loss=0.2654, pruned_loss=0.06128, over 1921367.15 frames. ], batch size: 243, lr: 2.63e-02, grad_scale: 32.0 +2024-01-15 15:11:59,634 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.feed_forward3.hidden_balancer.prob, batch_count=36896.666666666664, ans=0.125 +2024-01-15 15:12:04,155 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.4.encoder.layers.0.feed_forward3.out_whiten, num_groups=1, num_channels=384, metric=7.77 vs. limit=15.0 +2024-01-15 15:12:08,654 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.ff2_skip_rate, batch_count=36896.666666666664, ans=0.0028485507246376824 +2024-01-15 15:12:13,843 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.2.encoder.layers.2.nonlin_attention.whiten2, num_groups=1, num_channels=384, metric=6.45 vs. limit=15.0 +2024-01-15 15:12:44,340 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.bypass_mid.scale_min, batch_count=36996.666666666664, ans=0.2 +2024-01-15 15:12:48,255 INFO [train.py:994] (0/2) Epoch 14, batch 150, loss[loss=0.2009, simple_loss=0.2722, pruned_loss=0.06478, over 24510.00 frames. ], tot_loss[loss=0.1951, simple_loss=0.2666, pruned_loss=0.06184, over 2562233.72 frames. ], batch size: 229, lr: 2.63e-02, grad_scale: 32.0 +2024-01-15 15:12:50,364 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.0.layers.0.feed_forward1.out_whiten, num_groups=1, num_channels=192, metric=11.48 vs. limit=15.0 +2024-01-15 15:12:55,226 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.balancer2.prob, batch_count=37030.0, ans=0.125 +2024-01-15 15:12:56,451 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_skip_rate, batch_count=37030.0, ans=0.0 +2024-01-15 15:12:58,305 WARNING [optim.py:484] (0/2) Clipping_scale=2.0, grad-norm quartiles 1.946e+02 2.387e+02 2.863e+02 3.406e+02 4.847e+02, threshold=5.726e+02, percent-clipped=0.0 +2024-01-15 15:13:10,693 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.attention_skip_rate, batch_count=37063.333333333336, ans=0.0 +2024-01-15 15:13:16,079 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_module2.balancer2.min_positive, batch_count=37096.666666666664, ans=0.05 +2024-01-15 15:13:49,203 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.bypass_mid.scale_min, batch_count=37163.333333333336, ans=0.2 +2024-01-15 15:13:51,281 INFO [train.py:994] (0/2) Epoch 14, batch 200, loss[loss=0.2053, simple_loss=0.2818, pruned_loss=0.06435, over 24603.00 frames. ], tot_loss[loss=0.195, simple_loss=0.2665, pruned_loss=0.06178, over 3046949.87 frames. ], batch size: 199, lr: 2.62e-02, grad_scale: 16.0 +2024-01-15 15:14:01,209 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_module1.balancer1.prob, batch_count=37196.666666666664, ans=0.125 +2024-01-15 15:14:15,609 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_skip_rate, batch_count=37263.333333333336, ans=0.0 +2024-01-15 15:14:42,559 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.feed_forward2.hidden_balancer.prob, batch_count=37330.0, ans=0.125 +2024-01-15 15:14:48,019 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder_embed.convnext.layerdrop_rate, batch_count=37330.0, ans=0.015 +2024-01-15 15:14:53,868 INFO [train.py:994] (0/2) Epoch 14, batch 250, loss[loss=0.1904, simple_loss=0.2625, pruned_loss=0.05915, over 24519.00 frames. ], tot_loss[loss=0.1936, simple_loss=0.2656, pruned_loss=0.06081, over 3441576.95 frames. ], batch size: 243, lr: 2.62e-02, grad_scale: 16.0 +2024-01-15 15:14:56,510 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_skip_rate, batch_count=37363.333333333336, ans=0.0 +2024-01-15 15:15:03,785 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_module1.balancer1.prob, batch_count=37363.333333333336, ans=0.125 +2024-01-15 15:15:05,908 WARNING [optim.py:484] (0/2) Clipping_scale=2.0, grad-norm quartiles 1.918e+02 2.380e+02 2.749e+02 3.167e+02 5.253e+02, threshold=5.498e+02, percent-clipped=0.0 +2024-01-15 15:15:38,590 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_module1.balancer1.min_positive, batch_count=37463.333333333336, ans=0.025 +2024-01-15 15:15:41,104 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_module2.balancer2.prob, batch_count=37463.333333333336, ans=0.125 +2024-01-15 15:15:57,563 INFO [train.py:994] (0/2) Epoch 14, batch 300, loss[loss=0.225, simple_loss=0.299, pruned_loss=0.07551, over 22525.00 frames. ], tot_loss[loss=0.1943, simple_loss=0.2664, pruned_loss=0.06108, over 3755074.92 frames. ], batch size: 357, lr: 2.62e-02, grad_scale: 16.0 +2024-01-15 15:16:02,696 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_skip_rate, batch_count=37530.0, ans=0.0 +2024-01-15 15:16:03,757 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.0.layers.1.feed_forward2.hidden_balancer.prob, batch_count=37530.0, ans=0.125 +2024-01-15 15:16:09,829 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.ff2_skip_rate, batch_count=37563.333333333336, ans=0.0027036231884057967 +2024-01-15 15:16:12,221 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_module1.balancer2.min_abs, batch_count=37563.333333333336, ans=0.5 +2024-01-15 15:16:25,782 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.4.encoder.layers.2.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=3.82 vs. limit=6.0 +2024-01-15 15:16:41,852 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_module2.balancer2.prob, batch_count=37630.0, ans=0.125 +2024-01-15 15:16:58,028 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_skip_rate, batch_count=37663.333333333336, ans=0.0 +2024-01-15 15:17:00,067 INFO [train.py:994] (0/2) Epoch 14, batch 350, loss[loss=0.1987, simple_loss=0.276, pruned_loss=0.06073, over 24442.00 frames. ], tot_loss[loss=0.1949, simple_loss=0.2666, pruned_loss=0.06159, over 3991166.51 frames. ], batch size: 250, lr: 2.61e-02, grad_scale: 16.0 +2024-01-15 15:17:02,699 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.feed_forward2.hidden_balancer.prob, batch_count=37696.666666666664, ans=0.125 +2024-01-15 15:17:12,200 WARNING [optim.py:484] (0/2) Clipping_scale=2.0, grad-norm quartiles 1.842e+02 2.300e+02 2.540e+02 2.971e+02 4.745e+02, threshold=5.081e+02, percent-clipped=0.0 +2024-01-15 15:17:15,192 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.5.encoder.layers.0.conv_module1.whiten, num_groups=1, num_channels=256, metric=3.74 vs. limit=15.0 +2024-01-15 15:17:15,437 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.2.encoder.layers.1.conv_module2.whiten, num_groups=1, num_channels=384, metric=7.50 vs. limit=15.0 +2024-01-15 15:17:20,993 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.feed_forward3.hidden_balancer.prob, batch_count=37730.0, ans=0.125 +2024-01-15 15:17:23,224 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.balancer2.prob, batch_count=37730.0, ans=0.125 +2024-01-15 15:17:23,790 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.1.encoder.layers.0.conv_module1.whiten, num_groups=1, num_channels=256, metric=3.88 vs. limit=15.0 +2024-01-15 15:17:43,670 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.0.layers.1.balancer2.prob, batch_count=37796.666666666664, ans=0.125 +2024-01-15 15:17:49,029 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.2.encoder.layers.0.self_attn2.whiten, num_groups=1, num_channels=384, metric=22.46 vs. limit=22.5 +2024-01-15 15:17:49,710 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.feed_forward3.hidden_balancer.prob, batch_count=37830.0, ans=0.125 +2024-01-15 15:17:53,364 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.bypass.scale_min, batch_count=37830.0, ans=0.2 +2024-01-15 15:17:54,508 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.0.layers.0.feed_forward2.hidden_balancer.prob, batch_count=37830.0, ans=0.125 +2024-01-15 15:17:56,876 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=37830.0, ans=0.1 +2024-01-15 15:18:03,269 INFO [train.py:994] (0/2) Epoch 14, batch 400, loss[loss=0.1954, simple_loss=0.27, pruned_loss=0.06043, over 24493.00 frames. ], tot_loss[loss=0.1947, simple_loss=0.2661, pruned_loss=0.06162, over 4153066.72 frames. ], batch size: 210, lr: 2.61e-02, grad_scale: 32.0 +2024-01-15 15:18:12,022 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.0.layers.0.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=4.83 vs. limit=6.0 +2024-01-15 15:18:27,524 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward1.hidden_balancer.prob, batch_count=37930.0, ans=0.125 +2024-01-15 15:18:36,765 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_skip_rate, batch_count=37930.0, ans=0.0 +2024-01-15 15:18:53,331 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.self_attn_weights.pos_emb_skip_rate, batch_count=37996.666666666664, ans=0.0 +2024-01-15 15:19:07,465 INFO [train.py:994] (0/2) Epoch 14, batch 450, loss[loss=0.2022, simple_loss=0.2674, pruned_loss=0.06848, over 24489.00 frames. ], tot_loss[loss=0.1939, simple_loss=0.2653, pruned_loss=0.0612, over 4300255.15 frames. ], batch size: 222, lr: 2.60e-02, grad_scale: 32.0 +2024-01-15 15:19:12,528 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=38030.0, ans=0.1 +2024-01-15 15:19:18,091 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.5.encoder.layers.0.self_attn1.whiten, num_groups=1, num_channels=256, metric=15.50 vs. limit=22.5 +2024-01-15 15:19:18,667 WARNING [optim.py:484] (0/2) Clipping_scale=2.0, grad-norm quartiles 1.836e+02 2.289e+02 2.540e+02 2.904e+02 4.197e+02, threshold=5.081e+02, percent-clipped=0.0 +2024-01-15 15:19:22,820 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_skip_rate, batch_count=38063.333333333336, ans=0.0 +2024-01-15 15:19:24,587 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.0.layers.1.feed_forward3.hidden_balancer.prob, batch_count=38063.333333333336, ans=0.125 +2024-01-15 15:19:29,271 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.attention_skip_rate, batch_count=38063.333333333336, ans=0.0 +2024-01-15 15:19:45,307 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_module1.balancer2.min_abs, batch_count=38130.0, ans=0.5 +2024-01-15 15:19:48,305 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.2.encoder.layers.0.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=6.23 vs. limit=6.0 +2024-01-15 15:20:09,861 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.1.encoder.layers.0.self_attn2.whiten, num_groups=1, num_channels=256, metric=16.29 vs. limit=22.5 +2024-01-15 15:20:10,160 INFO [train.py:994] (0/2) Epoch 14, batch 500, loss[loss=0.1973, simple_loss=0.2699, pruned_loss=0.06235, over 24486.00 frames. ], tot_loss[loss=0.1937, simple_loss=0.2651, pruned_loss=0.06112, over 4409409.94 frames. ], batch size: 229, lr: 2.60e-02, grad_scale: 32.0 +2024-01-15 15:20:34,114 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_module1.balancer2.min_abs, batch_count=38263.333333333336, ans=0.5 +2024-01-15 15:20:43,461 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.0.layers.0.feed_forward1.out_proj.dropout_p, batch_count=38263.333333333336, ans=0.1 +2024-01-15 15:21:11,935 INFO [train.py:994] (0/2) Epoch 14, batch 550, loss[loss=0.1934, simple_loss=0.2705, pruned_loss=0.05812, over 24492.00 frames. ], tot_loss[loss=0.1932, simple_loss=0.2646, pruned_loss=0.06092, over 4504850.81 frames. ], batch size: 216, lr: 2.60e-02, grad_scale: 32.0 +2024-01-15 15:21:23,921 WARNING [optim.py:484] (0/2) Clipping_scale=2.0, grad-norm quartiles 1.859e+02 2.229e+02 2.560e+02 3.146e+02 5.169e+02, threshold=5.120e+02, percent-clipped=1.0 +2024-01-15 15:21:36,137 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.0.layers.1.self_attn_weights.pos_emb_skip_rate, batch_count=38430.0, ans=0.0 +2024-01-15 15:21:51,506 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_module1.balancer1.min_positive, batch_count=38463.333333333336, ans=0.025 +2024-01-15 15:21:51,542 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.balancer_ff2.min_abs, batch_count=38463.333333333336, ans=0.1 +2024-01-15 15:22:01,512 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.0.layers.1.conv_module1.whiten, num_groups=1, num_channels=192, metric=6.66 vs. limit=15.0 +2024-01-15 15:22:14,822 INFO [train.py:994] (0/2) Epoch 14, batch 600, loss[loss=0.2036, simple_loss=0.2802, pruned_loss=0.06354, over 23797.00 frames. ], tot_loss[loss=0.1935, simple_loss=0.265, pruned_loss=0.06095, over 4555800.29 frames. ], batch size: 328, lr: 2.59e-02, grad_scale: 32.0 +2024-01-15 15:22:25,244 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.2.encoder.layers.1.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=5.82 vs. limit=6.0 +2024-01-15 15:22:35,270 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.3.encoder.layers.3.conv_module2.whiten, num_groups=1, num_channels=512, metric=7.39 vs. limit=15.0 +2024-01-15 15:22:42,833 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.bypass.skip_rate, batch_count=38596.666666666664, ans=0.07 +2024-01-15 15:22:52,404 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_module2.balancer2.prob, batch_count=38630.0, ans=0.125 +2024-01-15 15:22:59,151 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.bypass.scale_min, batch_count=38630.0, ans=0.2 +2024-01-15 15:23:00,257 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.nonlin_attention.balancer.prob, batch_count=38630.0, ans=0.125 +2024-01-15 15:23:09,307 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.bypass.skip_rate, batch_count=38663.333333333336, ans=0.04949747468305833 +2024-01-15 15:23:15,411 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.3.encoder.layers.3.conv_module2.whiten, num_groups=1, num_channels=512, metric=7.34 vs. limit=15.0 +2024-01-15 15:23:16,371 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=38696.666666666664, ans=0.1 +2024-01-15 15:23:17,315 INFO [train.py:994] (0/2) Epoch 14, batch 650, loss[loss=0.1918, simple_loss=0.2743, pruned_loss=0.05464, over 23877.00 frames. ], tot_loss[loss=0.193, simple_loss=0.2646, pruned_loss=0.06064, over 4598594.53 frames. ], batch size: 328, lr: 2.59e-02, grad_scale: 32.0 +2024-01-15 15:23:28,763 WARNING [optim.py:484] (0/2) Clipping_scale=2.0, grad-norm quartiles 1.895e+02 2.204e+02 2.618e+02 3.440e+02 5.205e+02, threshold=5.237e+02, percent-clipped=1.0 +2024-01-15 15:23:34,495 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_skip_rate, batch_count=38730.0, ans=0.0 +2024-01-15 15:24:20,107 INFO [train.py:994] (0/2) Epoch 14, batch 700, loss[loss=0.1815, simple_loss=0.2548, pruned_loss=0.05408, over 24511.00 frames. ], tot_loss[loss=0.1929, simple_loss=0.2645, pruned_loss=0.06065, over 4640539.73 frames. ], batch size: 243, lr: 2.58e-02, grad_scale: 32.0 +2024-01-15 15:24:25,858 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.ff3_skip_rate, batch_count=38863.333333333336, ans=0.0024210144927536226 +2024-01-15 15:24:29,609 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.5.encoder.layers.0.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=4.43 vs. limit=6.0 +2024-01-15 15:24:40,197 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=38896.666666666664, ans=0.1 +2024-01-15 15:24:41,327 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder_embed.convnext.hidden_balancer.prob, batch_count=38896.666666666664, ans=0.125 +2024-01-15 15:24:45,784 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.feed_forward3.hidden_balancer.prob, batch_count=38930.0, ans=0.125 +2024-01-15 15:24:53,827 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.feed_forward2.hidden_balancer.prob, batch_count=38930.0, ans=0.125 +2024-01-15 15:25:08,551 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.1.encoder.layers.0.conv_module2.whiten, num_groups=1, num_channels=256, metric=8.90 vs. limit=15.0 +2024-01-15 15:25:11,088 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.feed_forward1.out_proj.dropout_p, batch_count=38996.666666666664, ans=0.1 +2024-01-15 15:25:19,786 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_module2.balancer2.prob, batch_count=38996.666666666664, ans=0.125 +2024-01-15 15:25:21,034 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.bypass.scale_min, batch_count=38996.666666666664, ans=0.2 +2024-01-15 15:25:23,097 INFO [train.py:994] (0/2) Epoch 14, batch 750, loss[loss=0.1999, simple_loss=0.276, pruned_loss=0.06186, over 24494.00 frames. ], tot_loss[loss=0.1935, simple_loss=0.2651, pruned_loss=0.06096, over 4674068.78 frames. ], batch size: 229, lr: 2.58e-02, grad_scale: 32.0 +2024-01-15 15:25:33,694 WARNING [optim.py:484] (0/2) Clipping_scale=2.0, grad-norm quartiles 1.808e+02 2.296e+02 2.739e+02 3.023e+02 4.357e+02, threshold=5.479e+02, percent-clipped=0.0 +2024-01-15 15:25:34,010 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_skip_rate, batch_count=39063.333333333336, ans=0.0 +2024-01-15 15:25:37,034 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_skip_rate, batch_count=39063.333333333336, ans=0.0 +2024-01-15 15:25:50,774 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_skip_rate, batch_count=39096.666666666664, ans=0.0 +2024-01-15 15:25:54,208 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.feed_forward3.hidden_balancer.prob, batch_count=39096.666666666664, ans=0.125 +2024-01-15 15:26:03,364 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.attention_skip_rate, batch_count=39130.0, ans=0.0 +2024-01-15 15:26:08,032 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_skip_rate, batch_count=39130.0, ans=0.0 +2024-01-15 15:26:11,958 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.bypass.skip_rate, batch_count=39163.333333333336, ans=0.09899494936611666 +2024-01-15 15:26:12,964 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.ff3_skip_rate, batch_count=39163.333333333336, ans=0.002355797101449274 +2024-01-15 15:26:23,570 INFO [train.py:994] (0/2) Epoch 14, batch 800, loss[loss=0.1917, simple_loss=0.264, pruned_loss=0.05972, over 24546.00 frames. ], tot_loss[loss=0.1936, simple_loss=0.2651, pruned_loss=0.06102, over 4697075.17 frames. ], batch size: 193, lr: 2.58e-02, grad_scale: 32.0 +2024-01-15 15:26:35,992 INFO [scaling.py:1118] (0/2) WithLoss: name=encoder.encoders.0.layers.0.self_attn_weights, loss-sum=0.000e+00 +2024-01-15 15:26:44,449 INFO [scaling.py:1022] (0/2) Whitening: name=encoder_embed.convnext.out_whiten, num_groups=1, num_channels=128, metric=4.71 vs. limit=5.0 +2024-01-15 15:26:44,886 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.balancer2.prob, batch_count=39263.333333333336, ans=0.125 +2024-01-15 15:26:58,265 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_module2.balancer2.prob, batch_count=39296.666666666664, ans=0.125 +2024-01-15 15:27:00,790 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.4.encoder.layers.0.nonlin_attention.whiten1, num_groups=1, num_channels=288, metric=4.23 vs. limit=10.0 +2024-01-15 15:27:13,118 INFO [checkpoint.py:75] (0/2) Saving checkpoint to zipformer_bbpe/exp-context-size-2-lr-epochs-10-spec-aug-20-disable-musan/epoch-14.pt +2024-01-15 15:27:37,317 INFO [train.py:994] (0/2) Epoch 15, batch 0, loss[loss=0.1934, simple_loss=0.2666, pruned_loss=0.06015, over 24360.00 frames. ], tot_loss[loss=0.1934, simple_loss=0.2666, pruned_loss=0.06015, over 24360.00 frames. ], batch size: 298, lr: 2.51e-02, grad_scale: 32.0 +2024-01-15 15:27:37,318 INFO [train.py:1017] (0/2) Computing validation loss +2024-01-15 15:27:57,295 INFO [train.py:1026] (0/2) Epoch 15, validation: loss=0.1734, simple_loss=0.2606, pruned_loss=0.04308, over 1622729.00 frames. +2024-01-15 15:27:57,296 INFO [train.py:1027] (0/2) Maximum memory allocated so far is 15997MB +2024-01-15 15:28:07,967 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.self_attn_weights.pos_emb_skip_rate, batch_count=39340.0, ans=0.0 +2024-01-15 15:28:17,322 WARNING [optim.py:484] (0/2) Clipping_scale=2.0, grad-norm quartiles 1.773e+02 2.162e+02 2.431e+02 2.731e+02 3.877e+02, threshold=4.862e+02, percent-clipped=0.0 +2024-01-15 15:28:21,367 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=39406.666666666664, ans=0.1 +2024-01-15 15:28:27,240 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.ff2_skip_rate, batch_count=39406.666666666664, ans=0.002302898550724638 +2024-01-15 15:28:29,631 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_module1.balancer2.prob, batch_count=39406.666666666664, ans=0.125 +2024-01-15 15:28:40,333 INFO [scaling.py:1118] (0/2) WithLoss: name=encoder.encoders.1.encoder.layers.0.self_attn_weights, loss-sum=0.000e+00 +2024-01-15 15:28:41,616 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_skip_rate, batch_count=39440.0, ans=0.0 +2024-01-15 15:28:55,894 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.bypass.skip_rate, batch_count=39473.333333333336, ans=0.07 +2024-01-15 15:29:00,165 INFO [train.py:994] (0/2) Epoch 15, batch 50, loss[loss=0.1987, simple_loss=0.2707, pruned_loss=0.06336, over 24410.00 frames. ], tot_loss[loss=0.1902, simple_loss=0.2612, pruned_loss=0.05958, over 1084689.22 frames. ], batch size: 258, lr: 2.51e-02, grad_scale: 32.0 +2024-01-15 15:29:00,494 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_module2.balancer1.prob, batch_count=39506.666666666664, ans=0.125 +2024-01-15 15:29:06,456 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.0.layers.0.feed_forward3.hidden_balancer.prob, batch_count=39506.666666666664, ans=0.125 +2024-01-15 15:29:24,787 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward3.hidden_balancer.prob, batch_count=39573.333333333336, ans=0.125 +2024-01-15 15:29:39,158 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.4.encoder.layers.1.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=3.94 vs. limit=6.0 +2024-01-15 15:29:47,295 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.feed_forward1.hidden_balancer.prob, batch_count=39606.666666666664, ans=0.125 +2024-01-15 15:29:48,416 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=39640.0, ans=0.1 +2024-01-15 15:30:00,919 INFO [train.py:994] (0/2) Epoch 15, batch 100, loss[loss=0.1919, simple_loss=0.2649, pruned_loss=0.05943, over 24453.00 frames. ], tot_loss[loss=0.19, simple_loss=0.262, pruned_loss=0.05904, over 1910204.24 frames. ], batch size: 250, lr: 2.50e-02, grad_scale: 32.0 +2024-01-15 15:30:09,553 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder_embed.convnext.out_balancer.prob, batch_count=39673.333333333336, ans=0.125 +2024-01-15 15:30:10,935 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.ff3_skip_rate, batch_count=39673.333333333336, ans=0.0022449275362318843 +2024-01-15 15:30:21,379 WARNING [optim.py:484] (0/2) Clipping_scale=2.0, grad-norm quartiles 1.911e+02 2.236e+02 2.514e+02 3.021e+02 4.488e+02, threshold=5.028e+02, percent-clipped=0.0 +2024-01-15 15:30:33,415 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.2.encoder.layers.1.feed_forward3.out_whiten, num_groups=1, num_channels=384, metric=11.08 vs. limit=15.0 +2024-01-15 15:30:37,958 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_skip_rate, batch_count=39773.333333333336, ans=0.0 +2024-01-15 15:31:02,888 INFO [train.py:994] (0/2) Epoch 15, batch 150, loss[loss=0.205, simple_loss=0.2781, pruned_loss=0.06598, over 22367.00 frames. ], tot_loss[loss=0.191, simple_loss=0.2633, pruned_loss=0.0593, over 2555181.61 frames. ], batch size: 357, lr: 2.50e-02, grad_scale: 32.0 +2024-01-15 15:31:18,592 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=39873.333333333336, ans=0.1 +2024-01-15 15:31:40,190 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.feed_forward1.out_proj.dropout_p, batch_count=39940.0, ans=0.1 +2024-01-15 15:31:45,008 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=39940.0, ans=0.1 +2024-01-15 15:32:01,231 INFO [checkpoint.py:75] (0/2) Saving checkpoint to zipformer_bbpe/exp-context-size-2-lr-epochs-10-spec-aug-20-disable-musan/checkpoint-12000.pt +2024-01-15 15:32:07,631 INFO [train.py:994] (0/2) Epoch 15, batch 200, loss[loss=0.1849, simple_loss=0.2563, pruned_loss=0.05673, over 24498.00 frames. ], tot_loss[loss=0.1916, simple_loss=0.2641, pruned_loss=0.05956, over 3054625.45 frames. ], batch size: 165, lr: 2.50e-02, grad_scale: 32.0 +2024-01-15 15:32:10,349 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_module2.balancer1.min_positive, batch_count=40006.666666666664, ans=0.025 +2024-01-15 15:32:27,425 WARNING [optim.py:484] (0/2) Clipping_scale=2.0, grad-norm quartiles 1.756e+02 2.321e+02 2.884e+02 3.523e+02 5.620e+02, threshold=5.767e+02, percent-clipped=2.0 +2024-01-15 15:32:38,329 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.bypass.skip_rate, batch_count=40073.333333333336, ans=0.09899494936611666 +2024-01-15 15:32:38,360 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_module2.balancer1.prob, batch_count=40073.333333333336, ans=0.125 +2024-01-15 15:32:56,110 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.1.encoder.layers.0.whiten, num_groups=1, num_channels=256, metric=5.22 vs. limit=12.0 +2024-01-15 15:33:08,704 INFO [train.py:994] (0/2) Epoch 15, batch 250, loss[loss=0.1891, simple_loss=0.2636, pruned_loss=0.05733, over 24464.00 frames. ], tot_loss[loss=0.1908, simple_loss=0.2633, pruned_loss=0.05912, over 3443517.67 frames. ], batch size: 181, lr: 2.49e-02, grad_scale: 32.0 +2024-01-15 15:33:16,316 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.feed_forward2.hidden_balancer.prob, batch_count=40173.333333333336, ans=0.125 +2024-01-15 15:33:29,266 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_module1.balancer1.prob, batch_count=40206.666666666664, ans=0.125 +2024-01-15 15:33:52,020 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_module1.balancer2.min_positive, batch_count=40273.333333333336, ans=0.05 +2024-01-15 15:33:54,362 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.0.layers.0.feed_forward3.hidden_balancer.prob, batch_count=40273.333333333336, ans=0.125 +2024-01-15 15:34:07,677 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_module2.balancer1.prob, batch_count=40306.666666666664, ans=0.125 +2024-01-15 15:34:10,759 INFO [train.py:994] (0/2) Epoch 15, batch 300, loss[loss=0.1936, simple_loss=0.2714, pruned_loss=0.05791, over 24529.00 frames. ], tot_loss[loss=0.1909, simple_loss=0.2637, pruned_loss=0.05902, over 3759428.43 frames. ], batch size: 243, lr: 2.49e-02, grad_scale: 32.0 +2024-01-15 15:34:18,248 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.balancer_ff2.min_abs, batch_count=40340.0, ans=0.1 +2024-01-15 15:34:20,600 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.balancer1.prob, batch_count=40340.0, ans=0.125 +2024-01-15 15:34:27,314 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.1.encoder.layers.0.nonlin_attention.whiten1, num_groups=1, num_channels=192, metric=5.59 vs. limit=10.0 +2024-01-15 15:34:29,052 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.nonlin_attention.balancer.min_positive, batch_count=40373.333333333336, ans=0.05 +2024-01-15 15:34:30,323 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.bypass.skip_rate, batch_count=40373.333333333336, ans=0.07 +2024-01-15 15:34:31,009 WARNING [optim.py:484] (0/2) Clipping_scale=2.0, grad-norm quartiles 1.955e+02 2.273e+02 2.449e+02 2.995e+02 5.556e+02, threshold=4.897e+02, percent-clipped=0.0 +2024-01-15 15:34:38,376 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.feed_forward3.hidden_balancer.prob, batch_count=40406.666666666664, ans=0.125 +2024-01-15 15:35:12,591 INFO [train.py:994] (0/2) Epoch 15, batch 350, loss[loss=0.1857, simple_loss=0.26, pruned_loss=0.05574, over 24502.00 frames. ], tot_loss[loss=0.1906, simple_loss=0.2637, pruned_loss=0.0588, over 4006291.66 frames. ], batch size: 210, lr: 2.49e-02, grad_scale: 32.0 +2024-01-15 15:35:22,660 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.feed_forward3.hidden_balancer.prob, batch_count=40506.666666666664, ans=0.125 +2024-01-15 15:35:34,353 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.0.layers.0.balancer1.prob, batch_count=40540.0, ans=0.125 +2024-01-15 15:35:48,780 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=40573.333333333336, ans=0.1 +2024-01-15 15:35:55,081 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.nonlin_attention.balancer.max_positive, batch_count=40606.666666666664, ans=0.95 +2024-01-15 15:35:56,326 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.ff2_skip_rate, batch_count=40606.666666666664, ans=0.0020420289855072465 +2024-01-15 15:36:03,524 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_module2.balancer1.prob, batch_count=40640.0, ans=0.125 +2024-01-15 15:36:07,427 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_skip_rate, batch_count=40640.0, ans=0.0 +2024-01-15 15:36:17,096 INFO [train.py:994] (0/2) Epoch 15, batch 400, loss[loss=0.2132, simple_loss=0.2829, pruned_loss=0.07177, over 24439.00 frames. ], tot_loss[loss=0.1897, simple_loss=0.2625, pruned_loss=0.05845, over 4167099.04 frames. ], batch size: 170, lr: 2.48e-02, grad_scale: 32.0 +2024-01-15 15:36:28,222 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_module2.balancer1.prob, batch_count=40706.666666666664, ans=0.125 +2024-01-15 15:36:37,122 WARNING [optim.py:484] (0/2) Clipping_scale=2.0, grad-norm quartiles 1.804e+02 2.267e+02 2.564e+02 2.924e+02 4.452e+02, threshold=5.127e+02, percent-clipped=0.0 +2024-01-15 15:36:40,493 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.attention_skip_rate, batch_count=40706.666666666664, ans=0.0 +2024-01-15 15:36:47,684 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=40740.0, ans=0.1 +2024-01-15 15:37:19,334 INFO [train.py:994] (0/2) Epoch 15, batch 450, loss[loss=0.1968, simple_loss=0.2686, pruned_loss=0.06252, over 24492.00 frames. ], tot_loss[loss=0.1894, simple_loss=0.2624, pruned_loss=0.05825, over 4303585.40 frames. ], batch size: 229, lr: 2.48e-02, grad_scale: 32.0 +2024-01-15 15:37:19,651 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.balancer1.prob, batch_count=40840.0, ans=0.125 +2024-01-15 15:37:36,103 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=40873.333333333336, ans=0.1 +2024-01-15 15:37:45,995 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.feed_forward3.hidden_balancer.prob, batch_count=40906.666666666664, ans=0.125 +2024-01-15 15:38:13,175 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.bypass.skip_rate, batch_count=40973.333333333336, ans=0.09899494936611666 +2024-01-15 15:38:21,912 INFO [train.py:994] (0/2) Epoch 15, batch 500, loss[loss=0.188, simple_loss=0.2624, pruned_loss=0.05677, over 24523.00 frames. ], tot_loss[loss=0.1894, simple_loss=0.2621, pruned_loss=0.05838, over 4406203.18 frames. ], batch size: 243, lr: 2.48e-02, grad_scale: 32.0 +2024-01-15 15:38:42,159 WARNING [optim.py:484] (0/2) Clipping_scale=2.0, grad-norm quartiles 1.855e+02 2.156e+02 2.340e+02 2.628e+02 4.759e+02, threshold=4.679e+02, percent-clipped=0.0 +2024-01-15 15:38:50,262 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.nonlin_attention.balancer.prob, batch_count=41073.333333333336, ans=0.125 +2024-01-15 15:38:57,177 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.attention_skip_rate, batch_count=41073.333333333336, ans=0.0 +2024-01-15 15:39:05,515 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.self_attn_weights.pos_emb_skip_rate, batch_count=41106.666666666664, ans=0.0 +2024-01-15 15:39:10,770 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder_embed.convnext.layerdrop_rate, batch_count=41140.0, ans=0.015 +2024-01-15 15:39:11,364 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.2.encoder.layers.0.feed_forward3.out_whiten, num_groups=1, num_channels=384, metric=8.13 vs. limit=15.0 +2024-01-15 15:39:24,337 INFO [train.py:994] (0/2) Epoch 15, batch 550, loss[loss=0.1864, simple_loss=0.2588, pruned_loss=0.05699, over 24466.00 frames. ], tot_loss[loss=0.1891, simple_loss=0.262, pruned_loss=0.05811, over 4504655.34 frames. ], batch size: 222, lr: 2.47e-02, grad_scale: 32.0 +2024-01-15 15:39:36,140 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.0.layers.1.feed_forward1.out_proj.dropout_p, batch_count=41206.666666666664, ans=0.1 +2024-01-15 15:39:44,082 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.attention_skip_rate, batch_count=41206.666666666664, ans=0.0 +2024-01-15 15:39:49,479 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.1.encoder.layers.1.whiten, num_groups=1, num_channels=256, metric=4.23 vs. limit=12.0 +2024-01-15 15:39:53,830 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.bypass.skip_rate, batch_count=41240.0, ans=0.04949747468305833 +2024-01-15 15:40:16,445 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_skip_rate, batch_count=41306.666666666664, ans=0.0 +2024-01-15 15:40:27,996 INFO [train.py:994] (0/2) Epoch 15, batch 600, loss[loss=0.1848, simple_loss=0.2549, pruned_loss=0.05735, over 24364.00 frames. ], tot_loss[loss=0.1893, simple_loss=0.262, pruned_loss=0.05833, over 4568758.45 frames. ], batch size: 159, lr: 2.47e-02, grad_scale: 32.0 +2024-01-15 15:40:31,114 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.3.encoder.layers.2.nonlin_attention.whiten1, num_groups=1, num_channels=384, metric=4.67 vs. limit=10.0 +2024-01-15 15:40:47,818 WARNING [optim.py:484] (0/2) Clipping_scale=2.0, grad-norm quartiles 1.812e+02 2.181e+02 2.447e+02 2.810e+02 7.048e+02, threshold=4.893e+02, percent-clipped=1.0 +2024-01-15 15:40:48,110 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.0.layers.1.feed_forward1.out_proj.dropout_p, batch_count=41373.333333333336, ans=0.1 +2024-01-15 15:41:12,087 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_skip_rate, batch_count=41440.0, ans=0.0 +2024-01-15 15:41:18,927 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.bypass.scale_min, batch_count=41473.333333333336, ans=0.2 +2024-01-15 15:41:31,177 INFO [train.py:994] (0/2) Epoch 15, batch 650, loss[loss=0.1901, simple_loss=0.2605, pruned_loss=0.05986, over 24413.00 frames. ], tot_loss[loss=0.1883, simple_loss=0.261, pruned_loss=0.05774, over 4623313.07 frames. ], batch size: 258, lr: 2.46e-02, grad_scale: 32.0 +2024-01-15 15:41:54,683 INFO [scaling.py:1118] (0/2) WithLoss: name=encoder.encoders.2.encoder.layers.1.self_attn_weights, loss-sum=0.000e+00 +2024-01-15 15:42:04,667 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.bypass.scale_min, batch_count=41573.333333333336, ans=0.2 +2024-01-15 15:42:09,951 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_skip_rate, batch_count=41606.666666666664, ans=0.0 +2024-01-15 15:42:12,637 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.4.encoder.layers.1.feed_forward2.out_whiten, num_groups=1, num_channels=384, metric=9.88 vs. limit=15.0 +2024-01-15 15:42:13,715 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.5.out_combiner.scale_min, batch_count=41606.666666666664, ans=0.2 +2024-01-15 15:42:17,311 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.bypass.skip_rate, batch_count=41606.666666666664, ans=0.09899494936611666 +2024-01-15 15:42:23,966 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_skip_rate, batch_count=41640.0, ans=0.0 +2024-01-15 15:42:32,943 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_skip_rate, batch_count=41673.333333333336, ans=0.0 +2024-01-15 15:42:33,815 INFO [train.py:994] (0/2) Epoch 15, batch 700, loss[loss=0.1994, simple_loss=0.2712, pruned_loss=0.06385, over 24216.00 frames. ], tot_loss[loss=0.1887, simple_loss=0.2614, pruned_loss=0.05804, over 4671033.83 frames. ], batch size: 311, lr: 2.46e-02, grad_scale: 32.0 +2024-01-15 15:42:53,562 WARNING [optim.py:484] (0/2) Clipping_scale=2.0, grad-norm quartiles 1.804e+02 2.413e+02 2.693e+02 3.109e+02 4.635e+02, threshold=5.386e+02, percent-clipped=0.0 +2024-01-15 15:42:54,952 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.balancer1.prob, batch_count=41706.666666666664, ans=0.125 +2024-01-15 15:43:19,822 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_module2.balancer2.prob, batch_count=41773.333333333336, ans=0.125 +2024-01-15 15:43:24,923 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.0.layers.0.bypass_mid.scale_min, batch_count=41806.666666666664, ans=0.2 +2024-01-15 15:43:27,467 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_module1.balancer1.prob, batch_count=41806.666666666664, ans=0.125 +2024-01-15 15:43:32,040 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_module1.balancer2.min_abs, batch_count=41806.666666666664, ans=0.5 +2024-01-15 15:43:32,098 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_module1.balancer2.min_abs, batch_count=41806.666666666664, ans=0.5 +2024-01-15 15:43:35,348 INFO [train.py:994] (0/2) Epoch 15, batch 750, loss[loss=0.1969, simple_loss=0.2752, pruned_loss=0.05928, over 24515.00 frames. ], tot_loss[loss=0.1889, simple_loss=0.2617, pruned_loss=0.05812, over 4707612.43 frames. ], batch size: 187, lr: 2.46e-02, grad_scale: 32.0 +2024-01-15 15:43:38,905 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.balancer1.prob, batch_count=41840.0, ans=0.125 +2024-01-15 15:43:47,264 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.0.layers.0.bypass_mid.scale_min, batch_count=41873.333333333336, ans=0.2 +2024-01-15 15:44:02,059 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=41906.666666666664, ans=0.1 +2024-01-15 15:44:20,230 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.balancer1.prob, batch_count=41940.0, ans=0.125 +2024-01-15 15:44:36,115 INFO [train.py:994] (0/2) Epoch 15, batch 800, loss[loss=0.1849, simple_loss=0.2544, pruned_loss=0.05765, over 24416.00 frames. ], tot_loss[loss=0.1886, simple_loss=0.2615, pruned_loss=0.05785, over 4742276.27 frames. ], batch size: 153, lr: 2.45e-02, grad_scale: 32.0 +2024-01-15 15:44:39,781 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=42006.666666666664, ans=0.1 +2024-01-15 15:44:54,407 WARNING [optim.py:484] (0/2) Clipping_scale=2.0, grad-norm quartiles 1.842e+02 2.395e+02 2.798e+02 3.335e+02 4.587e+02, threshold=5.596e+02, percent-clipped=0.0 +2024-01-15 15:45:06,632 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.nonlin_attention.balancer.prob, batch_count=42073.333333333336, ans=0.125 +2024-01-15 15:45:25,124 INFO [checkpoint.py:75] (0/2) Saving checkpoint to zipformer_bbpe/exp-context-size-2-lr-epochs-10-spec-aug-20-disable-musan/epoch-15.pt +2024-01-15 15:45:49,555 INFO [train.py:994] (0/2) Epoch 16, batch 0, loss[loss=0.1783, simple_loss=0.2578, pruned_loss=0.04935, over 24454.00 frames. ], tot_loss[loss=0.1783, simple_loss=0.2578, pruned_loss=0.04935, over 24454.00 frames. ], batch size: 267, lr: 2.39e-02, grad_scale: 32.0 +2024-01-15 15:45:49,556 INFO [train.py:1017] (0/2) Computing validation loss +2024-01-15 15:46:09,916 INFO [train.py:1026] (0/2) Epoch 16, validation: loss=0.1735, simple_loss=0.26, pruned_loss=0.04348, over 1622729.00 frames. +2024-01-15 15:46:09,916 INFO [train.py:1027] (0/2) Maximum memory allocated so far is 15997MB +2024-01-15 15:46:16,907 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.0.layers.1.bypass.scale_min, batch_count=42150.0, ans=0.2 +2024-01-15 15:47:08,342 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_module1.balancer2.prob, batch_count=42283.333333333336, ans=0.125 +2024-01-15 15:47:12,898 INFO [train.py:994] (0/2) Epoch 16, batch 50, loss[loss=0.1899, simple_loss=0.2634, pruned_loss=0.0582, over 24479.00 frames. ], tot_loss[loss=0.1858, simple_loss=0.2582, pruned_loss=0.05665, over 1087444.58 frames. ], batch size: 267, lr: 2.39e-02, grad_scale: 32.0 +2024-01-15 15:47:14,438 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_skip_rate, batch_count=42316.666666666664, ans=0.0 +2024-01-15 15:47:15,786 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.balancer2.prob, batch_count=42316.666666666664, ans=0.125 +2024-01-15 15:47:41,132 WARNING [optim.py:484] (0/2) Clipping_scale=2.0, grad-norm quartiles 1.795e+02 2.203e+02 2.392e+02 2.946e+02 6.436e+02, threshold=4.783e+02, percent-clipped=1.0 +2024-01-15 15:47:41,739 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.4.encoder.layers.2.self_attn1.whiten, num_groups=1, num_channels=384, metric=15.60 vs. limit=22.5 +2024-01-15 15:47:45,074 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.bypass_mid.scale_min, batch_count=42383.333333333336, ans=0.2 +2024-01-15 15:48:02,951 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.ff2_skip_rate, batch_count=42450.0, ans=0.001641304347826087 +2024-01-15 15:48:14,462 INFO [train.py:994] (0/2) Epoch 16, batch 100, loss[loss=0.1928, simple_loss=0.2679, pruned_loss=0.05883, over 24390.00 frames. ], tot_loss[loss=0.1858, simple_loss=0.2588, pruned_loss=0.05643, over 1908073.10 frames. ], batch size: 298, lr: 2.39e-02, grad_scale: 32.0 +2024-01-15 15:48:38,284 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.0.layers.1.feed_forward1.hidden_balancer.prob, batch_count=42550.0, ans=0.125 +2024-01-15 15:49:17,584 INFO [train.py:994] (0/2) Epoch 16, batch 150, loss[loss=0.1873, simple_loss=0.2644, pruned_loss=0.05511, over 24452.00 frames. ], tot_loss[loss=0.1847, simple_loss=0.258, pruned_loss=0.05572, over 2546638.09 frames. ], batch size: 222, lr: 2.38e-02, grad_scale: 32.0 +2024-01-15 15:49:30,163 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_module1.balancer1.prob, batch_count=42683.333333333336, ans=0.125 +2024-01-15 15:49:31,308 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.nonlin_attention.balancer.prob, batch_count=42683.333333333336, ans=0.125 +2024-01-15 15:49:36,260 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.feed_forward2.hidden_balancer.prob, batch_count=42683.333333333336, ans=0.125 +2024-01-15 15:49:40,694 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.self_attn_weights.pos_emb_skip_rate, batch_count=42683.333333333336, ans=0.0 +2024-01-15 15:49:44,580 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.4.encoder.layers.0.feed_forward2.out_whiten, num_groups=1, num_channels=384, metric=11.11 vs. limit=15.0 +2024-01-15 15:49:46,977 WARNING [optim.py:484] (0/2) Clipping_scale=2.0, grad-norm quartiles 1.824e+02 2.275e+02 2.640e+02 3.236e+02 4.692e+02, threshold=5.280e+02, percent-clipped=0.0 +2024-01-15 15:49:53,259 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_module1.balancer2.min_abs, batch_count=42716.666666666664, ans=0.5 +2024-01-15 15:49:56,861 INFO [scaling.py:1118] (0/2) WithLoss: name=encoder.encoders.1.encoder.layers.1.self_attn_weights, loss-sum=0.000e+00 +2024-01-15 15:50:20,922 INFO [train.py:994] (0/2) Epoch 16, batch 200, loss[loss=0.1866, simple_loss=0.2626, pruned_loss=0.05534, over 24437.00 frames. ], tot_loss[loss=0.1854, simple_loss=0.2589, pruned_loss=0.0559, over 3064673.06 frames. ], batch size: 250, lr: 2.38e-02, grad_scale: 32.0 +2024-01-15 15:50:27,369 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=42816.666666666664, ans=0.1 +2024-01-15 15:51:04,580 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.bypass_mid.scale_min, batch_count=42916.666666666664, ans=0.2 +2024-01-15 15:51:14,511 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.5.encoder.layers.0.self_attn1.whiten, num_groups=1, num_channels=256, metric=16.38 vs. limit=22.5 +2024-01-15 15:51:15,385 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=42950.0, ans=0.1 +2024-01-15 15:51:23,944 INFO [train.py:994] (0/2) Epoch 16, batch 250, loss[loss=0.1961, simple_loss=0.2706, pruned_loss=0.06082, over 24482.00 frames. ], tot_loss[loss=0.186, simple_loss=0.2597, pruned_loss=0.05613, over 3447266.59 frames. ], batch size: 181, lr: 2.38e-02, grad_scale: 32.0 +2024-01-15 15:51:25,375 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.feed_forward2.hidden_balancer.prob, batch_count=42983.333333333336, ans=0.125 +2024-01-15 15:51:33,708 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.1.encoder.layers.1.nonlin_attention.whiten1, num_groups=1, num_channels=192, metric=4.99 vs. limit=10.0 +2024-01-15 15:51:39,144 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.0.layers.1.balancer2.prob, batch_count=43016.666666666664, ans=0.125 +2024-01-15 15:51:46,447 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.balancer1.prob, batch_count=43016.666666666664, ans=0.125 +2024-01-15 15:51:52,662 WARNING [optim.py:484] (0/2) Clipping_scale=2.0, grad-norm quartiles 1.712e+02 2.217e+02 2.657e+02 3.438e+02 6.026e+02, threshold=5.313e+02, percent-clipped=3.0 +2024-01-15 15:52:06,194 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.balancer1.prob, batch_count=43083.333333333336, ans=0.125 +2024-01-15 15:52:09,977 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.4.encoder.layers.0.nonlin_attention.whiten1, num_groups=1, num_channels=288, metric=4.14 vs. limit=10.0 +2024-01-15 15:52:15,181 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.feed_forward1.hidden_balancer.prob, batch_count=43116.666666666664, ans=0.125 +2024-01-15 15:52:19,987 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.ff2_skip_rate, batch_count=43116.666666666664, ans=0.001496376811594204 +2024-01-15 15:52:21,143 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.balancer2.prob, batch_count=43116.666666666664, ans=0.125 +2024-01-15 15:52:24,752 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_module1.balancer1.prob, batch_count=43150.0, ans=0.125 +2024-01-15 15:52:25,572 INFO [train.py:994] (0/2) Epoch 16, batch 300, loss[loss=0.1951, simple_loss=0.2744, pruned_loss=0.05797, over 22504.00 frames. ], tot_loss[loss=0.1864, simple_loss=0.2601, pruned_loss=0.05639, over 3747001.21 frames. ], batch size: 357, lr: 2.37e-02, grad_scale: 16.0 +2024-01-15 15:53:23,883 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.balancer2.prob, batch_count=43283.333333333336, ans=0.125 +2024-01-15 15:53:27,088 INFO [train.py:994] (0/2) Epoch 16, batch 350, loss[loss=0.1935, simple_loss=0.267, pruned_loss=0.06003, over 24303.00 frames. ], tot_loss[loss=0.1856, simple_loss=0.2595, pruned_loss=0.05587, over 3993910.27 frames. ], batch size: 285, lr: 2.37e-02, grad_scale: 16.0 +2024-01-15 15:53:31,825 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.4.encoder.layers.0.conv_module1.whiten, num_groups=1, num_channels=384, metric=2.32 vs. limit=15.0 +2024-01-15 15:53:34,530 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.attention_skip_rate, batch_count=43316.666666666664, ans=0.0 +2024-01-15 15:53:44,258 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.4.encoder.layers.2.feed_forward3.out_whiten, num_groups=1, num_channels=384, metric=13.76 vs. limit=15.0 +2024-01-15 15:53:56,849 WARNING [optim.py:484] (0/2) Clipping_scale=2.0, grad-norm quartiles 1.814e+02 2.130e+02 2.322e+02 2.639e+02 6.815e+02, threshold=4.644e+02, percent-clipped=1.0 +2024-01-15 15:54:14,130 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.1.encoder.layers.0.nonlin_attention.whiten2, num_groups=1, num_channels=256, metric=8.69 vs. limit=15.0 +2024-01-15 15:54:19,622 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.0.layers.0.feed_forward2.hidden_balancer.prob, batch_count=43450.0, ans=0.125 +2024-01-15 15:54:30,241 INFO [train.py:994] (0/2) Epoch 16, batch 400, loss[loss=0.1974, simple_loss=0.272, pruned_loss=0.06137, over 23882.00 frames. ], tot_loss[loss=0.1851, simple_loss=0.2589, pruned_loss=0.05565, over 4166090.29 frames. ], batch size: 328, lr: 2.37e-02, grad_scale: 32.0 +2024-01-15 15:54:34,207 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_module1.balancer1.prob, batch_count=43483.333333333336, ans=0.125 +2024-01-15 15:54:52,115 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.ff3_skip_rate, batch_count=43516.666666666664, ans=0.0014094202898550737 +2024-01-15 15:55:07,976 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_module1.balancer1.prob, batch_count=43583.333333333336, ans=0.125 +2024-01-15 15:55:10,329 INFO [scaling.py:1118] (0/2) WithLoss: name=encoder.encoders.0.layers.0.self_attn_weights, loss-sum=0.000e+00 +2024-01-15 15:55:11,570 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.balancer2.prob, batch_count=43583.333333333336, ans=0.125 +2024-01-15 15:55:20,102 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.nonlin_attention.balancer.prob, batch_count=43616.666666666664, ans=0.125 +2024-01-15 15:55:30,814 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.attention_skip_rate, batch_count=43616.666666666664, ans=0.0 +2024-01-15 15:55:30,835 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.ff2_skip_rate, batch_count=43616.666666666664, ans=0.0013876811594202912 +2024-01-15 15:55:32,859 INFO [train.py:994] (0/2) Epoch 16, batch 450, loss[loss=0.1781, simple_loss=0.2574, pruned_loss=0.04936, over 24498.00 frames. ], tot_loss[loss=0.1848, simple_loss=0.2587, pruned_loss=0.05547, over 4311329.06 frames. ], batch size: 181, lr: 2.36e-02, grad_scale: 16.0 +2024-01-15 15:55:36,664 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_module1.balancer1.prob, batch_count=43650.0, ans=0.125 +2024-01-15 15:55:59,809 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.4.encoder.layers.2.conv_module2.whiten, num_groups=1, num_channels=384, metric=2.57 vs. limit=15.0 +2024-01-15 15:56:03,983 WARNING [optim.py:484] (0/2) Clipping_scale=2.0, grad-norm quartiles 1.746e+02 2.141e+02 2.320e+02 2.673e+02 3.975e+02, threshold=4.639e+02, percent-clipped=0.0 +2024-01-15 15:56:16,329 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.feed_forward2.hidden_balancer.prob, batch_count=43750.0, ans=0.125 +2024-01-15 15:56:22,413 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.4.encoder.layers.2.self_attn1.whiten, num_groups=1, num_channels=384, metric=15.10 vs. limit=22.5 +2024-01-15 15:56:35,178 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.4.encoder.layers.2.conv_module2.whiten, num_groups=1, num_channels=384, metric=2.39 vs. limit=15.0 +2024-01-15 15:56:35,844 INFO [train.py:994] (0/2) Epoch 16, batch 500, loss[loss=0.1829, simple_loss=0.2585, pruned_loss=0.05368, over 24612.00 frames. ], tot_loss[loss=0.1848, simple_loss=0.2588, pruned_loss=0.05533, over 4431446.02 frames. ], batch size: 199, lr: 2.36e-02, grad_scale: 16.0 +2024-01-15 15:56:40,854 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.4.encoder.layers.0.feed_forward2.out_whiten, num_groups=1, num_channels=384, metric=13.56 vs. limit=15.0 +2024-01-15 15:56:53,757 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.self_attn2.whiten.whitening_limit, batch_count=43850.0, ans=22.5 +2024-01-15 15:56:59,446 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.2.encoder.layers.2.self_attn2.whiten, num_groups=1, num_channels=384, metric=17.57 vs. limit=22.5 +2024-01-15 15:57:10,348 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_module1.balancer2.min_positive, batch_count=43883.333333333336, ans=0.05 +2024-01-15 15:57:38,744 INFO [train.py:994] (0/2) Epoch 16, batch 550, loss[loss=0.1684, simple_loss=0.2417, pruned_loss=0.04755, over 24382.00 frames. ], tot_loss[loss=0.1847, simple_loss=0.2589, pruned_loss=0.05525, over 4527433.23 frames. ], batch size: 153, lr: 2.36e-02, grad_scale: 16.0 +2024-01-15 15:57:39,077 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=43983.333333333336, ans=0.1 +2024-01-15 15:57:39,081 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_module1.balancer2.min_abs, batch_count=43983.333333333336, ans=0.5 +2024-01-15 15:57:42,834 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.4.encoder.layers.0.feed_forward1.out_whiten, num_groups=1, num_channels=384, metric=13.37 vs. limit=15.0 +2024-01-15 15:58:00,870 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=44016.666666666664, ans=0.1 +2024-01-15 15:58:04,807 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.5.encoder.layers.0.conv_module1.whiten, num_groups=1, num_channels=256, metric=2.24 vs. limit=15.0 +2024-01-15 15:58:10,241 WARNING [optim.py:484] (0/2) Clipping_scale=2.0, grad-norm quartiles 1.806e+02 2.090e+02 2.410e+02 2.866e+02 4.101e+02, threshold=4.819e+02, percent-clipped=0.0 +2024-01-15 15:58:13,597 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_module2.balancer2.prob, batch_count=44050.0, ans=0.125 +2024-01-15 15:58:16,947 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.nonlin_attention.balancer.prob, batch_count=44083.333333333336, ans=0.125 +2024-01-15 15:58:26,871 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.0.layers.1.feed_forward1.hidden_balancer.prob, batch_count=44083.333333333336, ans=0.125 +2024-01-15 15:58:34,273 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.0.layers.0.bypass_mid.scale_min, batch_count=44116.666666666664, ans=0.2 +2024-01-15 15:58:37,184 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.bypass.scale_min, batch_count=44116.666666666664, ans=0.2 +2024-01-15 15:58:41,791 INFO [train.py:994] (0/2) Epoch 16, batch 600, loss[loss=0.1791, simple_loss=0.254, pruned_loss=0.05208, over 24375.00 frames. ], tot_loss[loss=0.1841, simple_loss=0.2585, pruned_loss=0.05486, over 4588723.78 frames. ], batch size: 153, lr: 2.35e-02, grad_scale: 16.0 +2024-01-15 15:58:48,635 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_module1.balancer1.prob, batch_count=44150.0, ans=0.125 +2024-01-15 15:58:51,084 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_module1.balancer1.prob, batch_count=44150.0, ans=0.125 +2024-01-15 15:59:08,973 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.2.encoder.layers.1.conv_module2.whiten, num_groups=1, num_channels=384, metric=5.38 vs. limit=15.0 +2024-01-15 15:59:17,983 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.1.encoder.layers.1.self_attn1.whiten, num_groups=1, num_channels=256, metric=18.67 vs. limit=22.5 +2024-01-15 15:59:22,110 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.0.layers.0.ff3_skip_rate, batch_count=44250.0, ans=0.0012499999999999994 +2024-01-15 15:59:39,669 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.feed_forward3.hidden_balancer.prob, batch_count=44283.333333333336, ans=0.125 +2024-01-15 15:59:41,977 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.feed_forward3.hidden_balancer.prob, batch_count=44283.333333333336, ans=0.125 +2024-01-15 15:59:44,810 INFO [train.py:994] (0/2) Epoch 16, batch 650, loss[loss=0.1866, simple_loss=0.258, pruned_loss=0.05759, over 24356.00 frames. ], tot_loss[loss=0.1837, simple_loss=0.2579, pruned_loss=0.0547, over 4633528.99 frames. ], batch size: 159, lr: 2.35e-02, grad_scale: 16.0 +2024-01-15 15:59:49,947 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_module1.balancer1.prob, batch_count=44316.666666666664, ans=0.125 +2024-01-15 15:59:50,269 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.3.encoder.layers.3.feed_forward1.out_whiten, num_groups=1, num_channels=512, metric=14.03 vs. limit=15.0 +2024-01-15 15:59:55,511 INFO [scaling.py:1118] (0/2) WithLoss: name=encoder.encoders.2.encoder.layers.2.self_attn_weights, loss-sum=0.000e+00 +2024-01-15 16:00:13,731 INFO [scaling.py:1118] (0/2) WithLoss: name=encoder.encoders.0.layers.0.self_attn_weights, loss-sum=0.000e+00 +2024-01-15 16:00:13,855 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.bypass.skip_rate, batch_count=44383.333333333336, ans=0.09899494936611666 +2024-01-15 16:00:15,762 WARNING [optim.py:484] (0/2) Clipping_scale=2.0, grad-norm quartiles 1.747e+02 2.324e+02 2.634e+02 2.982e+02 4.280e+02, threshold=5.267e+02, percent-clipped=0.0 +2024-01-15 16:00:26,082 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=44416.666666666664, ans=0.1 +2024-01-15 16:00:32,485 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.1.encoder.layers.0.conv_module2.whiten, num_groups=1, num_channels=256, metric=9.49 vs. limit=15.0 +2024-01-15 16:00:47,147 INFO [train.py:994] (0/2) Epoch 16, batch 700, loss[loss=0.1814, simple_loss=0.2584, pruned_loss=0.0522, over 24502.00 frames. ], tot_loss[loss=0.1837, simple_loss=0.2579, pruned_loss=0.05478, over 4675722.00 frames. ], batch size: 187, lr: 2.35e-02, grad_scale: 16.0 +2024-01-15 16:00:55,578 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.0.layers.0.bypass.scale_min, batch_count=44483.333333333336, ans=0.2 +2024-01-15 16:01:06,860 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.0.layers.1.feed_forward3.hidden_balancer.prob, batch_count=44516.666666666664, ans=0.125 +2024-01-15 16:01:12,481 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_module2.balancer2.prob, batch_count=44550.0, ans=0.125 +2024-01-15 16:01:31,978 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.5.encoder.layers.0.feed_forward3.out_whiten, num_groups=1, num_channels=256, metric=11.17 vs. limit=15.0 +2024-01-15 16:01:48,821 INFO [train.py:994] (0/2) Epoch 16, batch 750, loss[loss=0.1738, simple_loss=0.256, pruned_loss=0.04579, over 24358.00 frames. ], tot_loss[loss=0.1837, simple_loss=0.2579, pruned_loss=0.0547, over 4712210.05 frames. ], batch size: 275, lr: 2.34e-02, grad_scale: 16.0 +2024-01-15 16:01:53,236 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.bypass_mid.scale_min, batch_count=44650.0, ans=0.2 +2024-01-15 16:01:54,732 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.0.layers.1.self_attn_weights.pos_emb_skip_rate, batch_count=44650.0, ans=0.0 +2024-01-15 16:02:09,764 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.4.encoder.layers.1.self_attn2.whiten, num_groups=1, num_channels=384, metric=15.64 vs. limit=22.5 +2024-01-15 16:02:16,633 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.bypass.scale_min, batch_count=44716.666666666664, ans=0.2 +2024-01-15 16:02:20,416 WARNING [optim.py:484] (0/2) Clipping_scale=2.0, grad-norm quartiles 1.844e+02 2.335e+02 2.612e+02 3.145e+02 7.520e+02, threshold=5.223e+02, percent-clipped=1.0 +2024-01-15 16:02:21,881 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_module2.balancer1.min_positive, batch_count=44716.666666666664, ans=0.025 +2024-01-15 16:02:21,927 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.balancer1.prob, batch_count=44716.666666666664, ans=0.125 +2024-01-15 16:02:48,963 INFO [train.py:994] (0/2) Epoch 16, batch 800, loss[loss=0.184, simple_loss=0.261, pruned_loss=0.05347, over 24430.00 frames. ], tot_loss[loss=0.1832, simple_loss=0.2574, pruned_loss=0.05447, over 4730141.52 frames. ], batch size: 258, lr: 2.34e-02, grad_scale: 32.0 +2024-01-15 16:03:09,886 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.0.layers.0.nonlin_attention.balancer.prob, batch_count=44850.0, ans=0.125 +2024-01-15 16:03:13,477 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.nonlin_attention.balancer.min_positive, batch_count=44883.333333333336, ans=0.05 +2024-01-15 16:03:28,530 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_module2.balancer1.prob, batch_count=44916.666666666664, ans=0.125 +2024-01-15 16:03:38,228 INFO [checkpoint.py:75] (0/2) Saving checkpoint to zipformer_bbpe/exp-context-size-2-lr-epochs-10-spec-aug-20-disable-musan/epoch-16.pt +2024-01-15 16:04:02,760 INFO [train.py:994] (0/2) Epoch 17, batch 0, loss[loss=0.1901, simple_loss=0.2634, pruned_loss=0.05835, over 24522.00 frames. ], tot_loss[loss=0.1901, simple_loss=0.2634, pruned_loss=0.05835, over 24522.00 frames. ], batch size: 229, lr: 2.28e-02, grad_scale: 32.0 +2024-01-15 16:04:02,761 INFO [train.py:1017] (0/2) Computing validation loss +2024-01-15 16:04:10,378 INFO [zipformer.py:1858] (0/2) name=encoder.encoders.4.encoder.layers.2.self_attn_weights, attn_weights_entropy = tensor([2.6429, 3.7059, 3.8161, 3.7968], device='cuda:0') +2024-01-15 16:04:22,811 INFO [train.py:1026] (0/2) Epoch 17, validation: loss=0.1728, simple_loss=0.2592, pruned_loss=0.04315, over 1622729.00 frames. +2024-01-15 16:04:22,812 INFO [train.py:1027] (0/2) Maximum memory allocated so far is 15997MB +2024-01-15 16:04:33,702 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_module2.balancer2.prob, batch_count=44993.333333333336, ans=0.125 +2024-01-15 16:04:44,297 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_module1.balancer1.prob, batch_count=44993.333333333336, ans=0.125 +2024-01-15 16:04:45,499 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.feed_forward1.out_proj.dropout_p, batch_count=45026.666666666664, ans=0.1 +2024-01-15 16:04:48,143 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=45026.666666666664, ans=0.1 +2024-01-15 16:05:02,233 WARNING [optim.py:484] (0/2) Clipping_scale=2.0, grad-norm quartiles 1.790e+02 2.297e+02 2.683e+02 3.319e+02 6.335e+02, threshold=5.366e+02, percent-clipped=3.0 +2024-01-15 16:05:03,947 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.4.encoder.layers.2.self_attn1.whiten, num_groups=1, num_channels=384, metric=15.22 vs. limit=22.5 +2024-01-15 16:05:07,384 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.feed_forward1.hidden_balancer.prob, batch_count=45060.0, ans=0.125 +2024-01-15 16:05:08,663 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.self_attn_weights.pos_emb_skip_rate, batch_count=45060.0, ans=0.0 +2024-01-15 16:05:25,076 INFO [train.py:994] (0/2) Epoch 17, batch 50, loss[loss=0.181, simple_loss=0.2578, pruned_loss=0.05213, over 24345.00 frames. ], tot_loss[loss=0.18, simple_loss=0.2534, pruned_loss=0.05325, over 1087246.90 frames. ], batch size: 285, lr: 2.28e-02, grad_scale: 32.0 +2024-01-15 16:05:26,616 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.bypass.scale_min, batch_count=45126.666666666664, ans=0.2 +2024-01-15 16:05:35,976 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.0.layers.1.self_attn_weights.pos_emb_skip_rate, batch_count=45160.0, ans=0.0 +2024-01-15 16:05:39,628 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_module2.balancer2.prob, batch_count=45160.0, ans=0.125 +2024-01-15 16:05:45,170 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.ff2_skip_rate, batch_count=45160.0, ans=0.0010521739130434776 +2024-01-15 16:05:50,293 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.3.encoder.layers.2.whiten, num_groups=1, num_channels=512, metric=3.49 vs. limit=12.0 +2024-01-15 16:05:56,990 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.feed_forward1.out_proj.dropout_p, batch_count=45193.333333333336, ans=0.1 +2024-01-15 16:05:57,004 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_skip_rate, batch_count=45193.333333333336, ans=0.0 +2024-01-15 16:05:58,396 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.4.encoder.layers.2.nonlin_attention.whiten1, num_groups=1, num_channels=288, metric=3.64 vs. limit=10.0 +2024-01-15 16:06:09,031 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=45226.666666666664, ans=0.1 +2024-01-15 16:06:18,407 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=45260.0, ans=0.1 +2024-01-15 16:06:26,381 INFO [train.py:994] (0/2) Epoch 17, batch 100, loss[loss=0.1755, simple_loss=0.2513, pruned_loss=0.04987, over 24520.00 frames. ], tot_loss[loss=0.1821, simple_loss=0.2557, pruned_loss=0.05426, over 1917860.55 frames. ], batch size: 229, lr: 2.28e-02, grad_scale: 32.0 +2024-01-15 16:06:42,299 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.attention_skip_rate, batch_count=45326.666666666664, ans=0.0 +2024-01-15 16:07:07,394 WARNING [optim.py:484] (0/2) Clipping_scale=2.0, grad-norm quartiles 1.680e+02 2.239e+02 2.511e+02 2.975e+02 4.256e+02, threshold=5.022e+02, percent-clipped=0.0 +2024-01-15 16:07:20,798 INFO [scaling.py:1118] (0/2) WithLoss: name=encoder.encoders.3.encoder.layers.3.self_attn_weights, loss-sum=0.000e+00 +2024-01-15 16:07:31,164 INFO [train.py:994] (0/2) Epoch 17, batch 150, loss[loss=0.1818, simple_loss=0.2616, pruned_loss=0.05101, over 24478.00 frames. ], tot_loss[loss=0.1821, simple_loss=0.2558, pruned_loss=0.05419, over 2555783.55 frames. ], batch size: 187, lr: 2.27e-02, grad_scale: 32.0 +2024-01-15 16:07:38,504 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.ff3_skip_rate, batch_count=45460.0, ans=0.00098695652173913 +2024-01-15 16:08:06,586 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.4.encoder.layers.0.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=4.17 vs. limit=6.0 +2024-01-15 16:08:17,626 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.ff3_skip_rate, batch_count=45560.0, ans=0.0009652173913043474 +2024-01-15 16:08:24,231 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.nonlin_attention.balancer.min_positive, batch_count=45593.333333333336, ans=0.05 +2024-01-15 16:08:26,724 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.nonlin_attention.balancer.max_positive, batch_count=45593.333333333336, ans=0.95 +2024-01-15 16:08:30,153 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.self_attn_weights.pos_emb_skip_rate, batch_count=45593.333333333336, ans=0.0 +2024-01-15 16:08:33,428 INFO [train.py:994] (0/2) Epoch 17, batch 200, loss[loss=0.1869, simple_loss=0.2602, pruned_loss=0.05683, over 24322.00 frames. ], tot_loss[loss=0.1822, simple_loss=0.2564, pruned_loss=0.05396, over 3057727.22 frames. ], batch size: 298, lr: 2.27e-02, grad_scale: 32.0 +2024-01-15 16:08:38,009 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.0.layers.1.balancer1.prob, batch_count=45626.666666666664, ans=0.125 +2024-01-15 16:08:47,973 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.nonlin_attention.balancer.max_positive, batch_count=45660.0, ans=0.95 +2024-01-15 16:08:49,219 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.bypass_mid.scale_min, batch_count=45660.0, ans=0.2 +2024-01-15 16:08:50,365 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_skip_rate, batch_count=45660.0, ans=0.0 +2024-01-15 16:09:05,492 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.5.encoder.layers.1.conv_module2.whiten, num_groups=1, num_channels=256, metric=4.46 vs. limit=15.0 +2024-01-15 16:09:12,514 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.2.encoder.layers.2.feed_forward1.out_whiten, num_groups=1, num_channels=384, metric=13.37 vs. limit=15.0 +2024-01-15 16:09:14,143 WARNING [optim.py:484] (0/2) Clipping_scale=2.0, grad-norm quartiles 1.829e+02 2.236e+02 2.656e+02 3.265e+02 4.776e+02, threshold=5.312e+02, percent-clipped=0.0 +2024-01-15 16:09:14,544 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_module1.balancer1.prob, batch_count=45726.666666666664, ans=0.125 +2024-01-15 16:09:17,973 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.0.layers.0.bypass.skip_rate, batch_count=45726.666666666664, ans=0.035 +2024-01-15 16:09:31,801 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.nonlin_attention.balancer.prob, batch_count=45760.0, ans=0.125 +2024-01-15 16:09:35,434 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.feed_forward3.hidden_balancer.prob, batch_count=45793.333333333336, ans=0.125 +2024-01-15 16:09:36,313 INFO [train.py:994] (0/2) Epoch 17, batch 250, loss[loss=0.1817, simple_loss=0.2591, pruned_loss=0.05214, over 24478.00 frames. ], tot_loss[loss=0.1824, simple_loss=0.2568, pruned_loss=0.05398, over 3447159.23 frames. ], batch size: 267, lr: 2.27e-02, grad_scale: 32.0 +2024-01-15 16:09:40,835 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.0.layers.1.feed_forward1.out_proj.dropout_p, batch_count=45793.333333333336, ans=0.1 +2024-01-15 16:10:28,681 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.2.encoder.layers.2.nonlin_attention.whiten1, num_groups=1, num_channels=288, metric=8.37 vs. limit=10.0 +2024-01-15 16:10:39,418 INFO [train.py:994] (0/2) Epoch 17, batch 300, loss[loss=0.184, simple_loss=0.2607, pruned_loss=0.05362, over 23810.00 frames. ], tot_loss[loss=0.1817, simple_loss=0.2561, pruned_loss=0.05371, over 3750582.50 frames. ], batch size: 328, lr: 2.26e-02, grad_scale: 32.0 +2024-01-15 16:10:44,397 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.self_attn_weights.pos_emb_skip_rate, batch_count=45960.0, ans=0.0 +2024-01-15 16:10:55,940 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.bypass.scale_min, batch_count=45993.333333333336, ans=0.2 +2024-01-15 16:11:14,337 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.nonlin_attention.balancer.prob, batch_count=46026.666666666664, ans=0.125 +2024-01-15 16:11:19,285 WARNING [optim.py:484] (0/2) Clipping_scale=2.0, grad-norm quartiles 1.788e+02 2.220e+02 2.502e+02 2.982e+02 4.452e+02, threshold=5.005e+02, percent-clipped=0.0 +2024-01-15 16:11:27,317 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.self_attn_weights.pos_emb_skip_rate, batch_count=46060.0, ans=0.0 +2024-01-15 16:11:39,253 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.0.layers.1.bypass.scale_min, batch_count=46093.333333333336, ans=0.2 +2024-01-15 16:11:42,062 INFO [train.py:994] (0/2) Epoch 17, batch 350, loss[loss=0.1925, simple_loss=0.2655, pruned_loss=0.05976, over 24232.00 frames. ], tot_loss[loss=0.1823, simple_loss=0.2568, pruned_loss=0.05388, over 3996573.32 frames. ], batch size: 311, lr: 2.26e-02, grad_scale: 32.0 +2024-01-15 16:11:47,219 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.feed_forward3.hidden_balancer.prob, batch_count=46126.666666666664, ans=0.125 +2024-01-15 16:11:50,360 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.ff2_skip_rate, batch_count=46126.666666666664, ans=0.0008420289855072469 +2024-01-15 16:11:59,890 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.ff2_skip_rate, batch_count=46160.0, ans=0.0008347826086956521 +2024-01-15 16:12:08,807 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.0.layers.0.bypass_mid.scale_min, batch_count=46193.333333333336, ans=0.2 +2024-01-15 16:12:36,848 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.feed_forward2.hidden_balancer.prob, batch_count=46260.0, ans=0.125 +2024-01-15 16:12:43,665 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=46293.333333333336, ans=0.1 +2024-01-15 16:12:45,164 INFO [train.py:994] (0/2) Epoch 17, batch 400, loss[loss=0.1556, simple_loss=0.2343, pruned_loss=0.03848, over 24188.00 frames. ], tot_loss[loss=0.1817, simple_loss=0.2564, pruned_loss=0.05352, over 4187097.10 frames. ], batch size: 140, lr: 2.26e-02, grad_scale: 32.0 +2024-01-15 16:12:45,431 INFO [scaling.py:1118] (0/2) WithLoss: name=encoder.encoders.2.encoder.layers.0.self_attn_weights, loss-sum=0.000e+00 +2024-01-15 16:12:46,593 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.0.layers.1.feed_forward1.out_proj.dropout_p, batch_count=46293.333333333336, ans=0.1 +2024-01-15 16:12:54,943 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=46293.333333333336, ans=0.1 +2024-01-15 16:13:16,896 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_module1.balancer2.prob, batch_count=46360.0, ans=0.125 +2024-01-15 16:13:25,828 WARNING [optim.py:484] (0/2) Clipping_scale=2.0, grad-norm quartiles 1.732e+02 2.173e+02 2.430e+02 2.709e+02 4.420e+02, threshold=4.860e+02, percent-clipped=0.0 +2024-01-15 16:13:26,143 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.bypass_mid.scale_min, batch_count=46393.333333333336, ans=0.2 +2024-01-15 16:13:39,920 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.feed_forward2.hidden_balancer.prob, batch_count=46426.666666666664, ans=0.125 +2024-01-15 16:13:48,339 INFO [train.py:994] (0/2) Epoch 17, batch 450, loss[loss=0.1518, simple_loss=0.2199, pruned_loss=0.04185, over 23568.00 frames. ], tot_loss[loss=0.1813, simple_loss=0.2557, pruned_loss=0.0534, over 4325424.51 frames. ], batch size: 119, lr: 2.26e-02, grad_scale: 32.0 +2024-01-15 16:13:52,429 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.4.encoder.layers.2.feed_forward2.out_whiten, num_groups=1, num_channels=384, metric=8.45 vs. limit=15.0 +2024-01-15 16:13:59,271 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=46493.333333333336, ans=0.1 +2024-01-15 16:14:01,139 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=46493.333333333336, ans=0.1 +2024-01-15 16:14:14,992 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.0.layers.0.attention_skip_rate, batch_count=46526.666666666664, ans=0.0 +2024-01-15 16:14:23,497 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.4.encoder.layers.0.nonlin_attention.whiten1, num_groups=1, num_channels=288, metric=4.19 vs. limit=10.0 +2024-01-15 16:14:50,457 INFO [train.py:994] (0/2) Epoch 17, batch 500, loss[loss=0.1849, simple_loss=0.2618, pruned_loss=0.05397, over 24403.00 frames. ], tot_loss[loss=0.1814, simple_loss=0.2559, pruned_loss=0.05349, over 4437593.14 frames. ], batch size: 275, lr: 2.25e-02, grad_scale: 32.0 +2024-01-15 16:14:53,673 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.attention_skip_rate, batch_count=46626.666666666664, ans=0.0 +2024-01-15 16:15:03,308 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.5.encoder.layers.0.feed_forward3.out_whiten, num_groups=1, num_channels=256, metric=13.63 vs. limit=15.0 +2024-01-15 16:15:07,505 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.ff2_skip_rate, batch_count=46660.0, ans=0.0007260869565217393 +2024-01-15 16:15:16,190 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.1.encoder.layers.1.whiten, num_groups=1, num_channels=256, metric=4.03 vs. limit=12.0 +2024-01-15 16:15:28,471 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_module2.balancer1.prob, batch_count=46726.666666666664, ans=0.125 +2024-01-15 16:15:31,133 WARNING [optim.py:484] (0/2) Clipping_scale=2.0, grad-norm quartiles 1.769e+02 2.099e+02 2.328e+02 2.632e+02 3.698e+02, threshold=4.655e+02, percent-clipped=0.0 +2024-01-15 16:15:38,258 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.0.layers.0.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=5.14 vs. limit=6.0 +2024-01-15 16:15:41,184 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.0.layers.0.nonlin_attention.balancer.prob, batch_count=46760.0, ans=0.125 +2024-01-15 16:15:53,386 INFO [train.py:994] (0/2) Epoch 17, batch 550, loss[loss=0.1909, simple_loss=0.2664, pruned_loss=0.0577, over 24525.00 frames. ], tot_loss[loss=0.1808, simple_loss=0.2552, pruned_loss=0.05318, over 4519359.65 frames. ], batch size: 193, lr: 2.25e-02, grad_scale: 32.0 +2024-01-15 16:15:59,045 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.balancer2.prob, batch_count=46793.333333333336, ans=0.125 +2024-01-15 16:16:07,472 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_module2.balancer1.max_abs, batch_count=46826.666666666664, ans=10.0 +2024-01-15 16:16:18,797 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.bypass_mid.scale_min, batch_count=46860.0, ans=0.2 +2024-01-15 16:16:26,538 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_skip_rate, batch_count=46860.0, ans=0.0 +2024-01-15 16:16:37,137 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.feed_forward1.out_proj.dropout_p, batch_count=46893.333333333336, ans=0.1 +2024-01-15 16:16:47,325 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.3.encoder.layers.2.feed_forward2.out_whiten, num_groups=1, num_channels=512, metric=12.88 vs. limit=15.0 +2024-01-15 16:16:51,719 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.attention_skip_rate, batch_count=46926.666666666664, ans=0.0 +2024-01-15 16:16:56,180 INFO [train.py:994] (0/2) Epoch 17, batch 600, loss[loss=0.1733, simple_loss=0.2506, pruned_loss=0.04795, over 24445.00 frames. ], tot_loss[loss=0.1807, simple_loss=0.2553, pruned_loss=0.05307, over 4584182.69 frames. ], batch size: 250, lr: 2.25e-02, grad_scale: 32.0 +2024-01-15 16:17:06,632 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.feed_forward3.hidden_balancer.prob, batch_count=46960.0, ans=0.125 +2024-01-15 16:17:12,459 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.0.layers.0.ff2_skip_rate, batch_count=46993.333333333336, ans=0.0006536231884057952 +2024-01-15 16:17:36,251 WARNING [optim.py:484] (0/2) Clipping_scale=2.0, grad-norm quartiles 1.786e+02 2.170e+02 2.416e+02 2.985e+02 4.371e+02, threshold=4.832e+02, percent-clipped=0.0 +2024-01-15 16:17:49,297 INFO [scaling.py:1118] (0/2) WithLoss: name=encoder.encoders.3.encoder.layers.2.self_attn_weights, loss-sum=0.000e+00 +2024-01-15 16:17:58,329 INFO [train.py:994] (0/2) Epoch 17, batch 650, loss[loss=0.1925, simple_loss=0.2682, pruned_loss=0.05844, over 24345.00 frames. ], tot_loss[loss=0.1811, simple_loss=0.2556, pruned_loss=0.05327, over 4640944.00 frames. ], batch size: 298, lr: 2.24e-02, grad_scale: 32.0 +2024-01-15 16:18:11,915 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.ff3_skip_rate, batch_count=47160.0, ans=0.0006173913043478266 +2024-01-15 16:18:16,973 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.3.encoder.layers.3.nonlin_attention.whiten1, num_groups=1, num_channels=384, metric=4.29 vs. limit=10.0 +2024-01-15 16:18:31,997 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.feed_forward3.hidden_balancer.prob, batch_count=47193.333333333336, ans=0.125 +2024-01-15 16:19:01,098 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.4.encoder.layers.2.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=3.26 vs. limit=6.0 +2024-01-15 16:19:01,747 INFO [train.py:994] (0/2) Epoch 17, batch 700, loss[loss=0.1835, simple_loss=0.2587, pruned_loss=0.05411, over 24491.00 frames. ], tot_loss[loss=0.1799, simple_loss=0.2544, pruned_loss=0.05272, over 4670611.53 frames. ], batch size: 181, lr: 2.24e-02, grad_scale: 32.0 +2024-01-15 16:19:02,073 INFO [scaling.py:1118] (0/2) WithLoss: name=encoder.encoders.3.encoder.layers.0.self_attn_weights, loss-sum=0.000e+00 +2024-01-15 16:19:10,422 INFO [scaling.py:1118] (0/2) WithLoss: name=encoder.encoders.5.encoder.layers.1.self_attn_weights, loss-sum=0.000e+00 +2024-01-15 16:19:16,870 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.4.encoder.layers.0.feed_forward1.out_whiten, num_groups=1, num_channels=384, metric=12.25 vs. limit=15.0 +2024-01-15 16:19:34,363 INFO [scaling.py:1118] (0/2) WithLoss: name=encoder.encoders.0.layers.0.self_attn_weights, loss-sum=0.000e+00 +2024-01-15 16:19:41,207 WARNING [optim.py:484] (0/2) Clipping_scale=2.0, grad-norm quartiles 1.873e+02 2.097e+02 2.365e+02 2.867e+02 5.331e+02, threshold=4.730e+02, percent-clipped=3.0 +2024-01-15 16:20:04,058 INFO [train.py:994] (0/2) Epoch 17, batch 750, loss[loss=0.182, simple_loss=0.2604, pruned_loss=0.05187, over 24533.00 frames. ], tot_loss[loss=0.1804, simple_loss=0.2549, pruned_loss=0.05293, over 4695041.66 frames. ], batch size: 193, lr: 2.24e-02, grad_scale: 32.0 +2024-01-15 16:20:04,355 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.bypass.skip_rate, batch_count=47460.0, ans=0.09899494936611666 +2024-01-15 16:20:17,765 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_skip_rate, batch_count=47493.333333333336, ans=0.0 +2024-01-15 16:20:24,702 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=47493.333333333336, ans=0.1 +2024-01-15 16:20:30,845 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=47526.666666666664, ans=0.1 +2024-01-15 16:21:04,613 INFO [train.py:994] (0/2) Epoch 17, batch 800, loss[loss=0.1929, simple_loss=0.2719, pruned_loss=0.05696, over 23882.00 frames. ], tot_loss[loss=0.1797, simple_loss=0.2544, pruned_loss=0.05254, over 4718998.73 frames. ], batch size: 328, lr: 2.23e-02, grad_scale: 32.0 +2024-01-15 16:21:09,589 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.5.encoder.layers.1.feed_forward1.out_whiten, num_groups=1, num_channels=256, metric=12.13 vs. limit=15.0 +2024-01-15 16:21:39,799 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.bypass.skip_rate, batch_count=47726.666666666664, ans=0.09899494936611666 +2024-01-15 16:21:42,981 WARNING [optim.py:484] (0/2) Clipping_scale=2.0, grad-norm quartiles 1.821e+02 2.266e+02 2.660e+02 3.077e+02 4.335e+02, threshold=5.320e+02, percent-clipped=0.0 +2024-01-15 16:21:43,278 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_module2.balancer2.prob, batch_count=47726.666666666664, ans=0.125 +2024-01-15 16:21:53,873 INFO [checkpoint.py:75] (0/2) Saving checkpoint to zipformer_bbpe/exp-context-size-2-lr-epochs-10-spec-aug-20-disable-musan/epoch-17.pt +2024-01-15 16:22:17,930 INFO [train.py:994] (0/2) Epoch 18, batch 0, loss[loss=0.1995, simple_loss=0.2718, pruned_loss=0.06358, over 24620.00 frames. ], tot_loss[loss=0.1995, simple_loss=0.2718, pruned_loss=0.06358, over 24620.00 frames. ], batch size: 199, lr: 2.18e-02, grad_scale: 32.0 +2024-01-15 16:22:17,931 INFO [train.py:1017] (0/2) Computing validation loss +2024-01-15 16:22:38,460 INFO [train.py:1026] (0/2) Epoch 18, validation: loss=0.1722, simple_loss=0.258, pruned_loss=0.04322, over 1622729.00 frames. +2024-01-15 16:22:38,460 INFO [train.py:1027] (0/2) Maximum memory allocated so far is 15997MB +2024-01-15 16:22:42,399 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.feed_forward1.hidden_balancer.prob, batch_count=47770.0, ans=0.125 +2024-01-15 16:22:51,796 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.feed_forward1.hidden_balancer.prob, batch_count=47803.333333333336, ans=0.125 +2024-01-15 16:22:54,579 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.3.encoder.layers.3.feed_forward3.out_whiten, num_groups=1, num_channels=512, metric=10.89 vs. limit=15.0 +2024-01-15 16:23:20,655 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.2.encoder.layers.1.whiten, num_groups=1, num_channels=384, metric=6.50 vs. limit=12.0 +2024-01-15 16:23:32,277 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.5.out_combiner.scale_min, batch_count=47903.333333333336, ans=0.2 +2024-01-15 16:23:34,096 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.0.layers.1.feed_forward1.out_whiten, num_groups=1, num_channels=192, metric=11.66 vs. limit=15.0 +2024-01-15 16:23:34,607 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_module1.balancer2.prob, batch_count=47903.333333333336, ans=0.125 +2024-01-15 16:23:37,990 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_module2.balancer1.prob, batch_count=47903.333333333336, ans=0.125 +2024-01-15 16:23:40,130 INFO [train.py:994] (0/2) Epoch 18, batch 50, loss[loss=0.1838, simple_loss=0.259, pruned_loss=0.05436, over 24532.00 frames. ], tot_loss[loss=0.1748, simple_loss=0.2497, pruned_loss=0.04993, over 1088974.54 frames. ], batch size: 193, lr: 2.18e-02, grad_scale: 32.0 +2024-01-15 16:23:59,594 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.0.layers.1.attention_skip_rate, batch_count=47970.0, ans=0.0 +2024-01-15 16:24:04,842 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.balancer1.prob, batch_count=48003.333333333336, ans=0.125 +2024-01-15 16:24:11,953 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.ff3_skip_rate, batch_count=48003.333333333336, ans=0.0004340579710144926 +2024-01-15 16:24:22,696 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_module2.balancer2.prob, batch_count=48036.666666666664, ans=0.125 +2024-01-15 16:24:30,477 WARNING [optim.py:484] (0/2) Clipping_scale=2.0, grad-norm quartiles 1.809e+02 2.223e+02 2.437e+02 2.877e+02 4.793e+02, threshold=4.874e+02, percent-clipped=0.0 +2024-01-15 16:24:33,600 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.3.encoder.layers.0.nonlin_attention.whiten2, num_groups=1, num_channels=512, metric=5.22 vs. limit=15.0 +2024-01-15 16:24:42,527 INFO [train.py:994] (0/2) Epoch 18, batch 100, loss[loss=0.2037, simple_loss=0.2793, pruned_loss=0.06403, over 22576.00 frames. ], tot_loss[loss=0.1772, simple_loss=0.2519, pruned_loss=0.05128, over 1907812.23 frames. ], batch size: 357, lr: 2.18e-02, grad_scale: 32.0 +2024-01-15 16:24:44,110 INFO [scaling.py:1118] (0/2) WithLoss: name=encoder.encoders.2.encoder.layers.2.self_attn_weights, loss-sum=0.000e+00 +2024-01-15 16:24:57,495 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.0.layers.1.feed_forward1.out_proj.dropout_p, batch_count=48136.666666666664, ans=0.1 +2024-01-15 16:25:15,796 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_module1.balancer2.prob, batch_count=48170.0, ans=0.125 +2024-01-15 16:25:32,649 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.4.encoder.layers.0.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=4.01 vs. limit=6.0 +2024-01-15 16:25:46,403 INFO [train.py:994] (0/2) Epoch 18, batch 150, loss[loss=0.179, simple_loss=0.256, pruned_loss=0.05105, over 24338.00 frames. ], tot_loss[loss=0.1769, simple_loss=0.2519, pruned_loss=0.05095, over 2554332.09 frames. ], batch size: 285, lr: 2.17e-02, grad_scale: 32.0 +2024-01-15 16:25:56,886 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.1.encoder.layers.0.self_attn2.whiten, num_groups=1, num_channels=256, metric=14.13 vs. limit=22.5 +2024-01-15 16:26:02,094 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.ff2_skip_rate, batch_count=48303.333333333336, ans=0.00036884057971014496 +2024-01-15 16:26:09,318 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.bypass.scale_min, batch_count=48303.333333333336, ans=0.2 +2024-01-15 16:26:12,958 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.ff3_skip_rate, batch_count=48336.666666666664, ans=0.0003615942028985502 +2024-01-15 16:26:14,431 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.4.encoder.layers.1.feed_forward1.out_whiten, num_groups=1, num_channels=384, metric=9.53 vs. limit=15.0 +2024-01-15 16:26:15,796 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.3.encoder.layers.1.feed_forward3.out_whiten, num_groups=1, num_channels=512, metric=11.02 vs. limit=15.0 +2024-01-15 16:26:27,282 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.5.encoder.layers.0.conv_module1.whiten, num_groups=1, num_channels=256, metric=2.26 vs. limit=15.0 +2024-01-15 16:26:34,128 INFO [scaling.py:1118] (0/2) WithLoss: name=encoder.encoders.4.encoder.layers.0.self_attn_weights, loss-sum=0.000e+00 +2024-01-15 16:26:36,205 WARNING [optim.py:484] (0/2) Clipping_scale=2.0, grad-norm quartiles 1.829e+02 2.101e+02 2.311e+02 2.686e+02 3.880e+02, threshold=4.622e+02, percent-clipped=0.0 +2024-01-15 16:26:37,846 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.balancer1.prob, batch_count=48403.333333333336, ans=0.125 +2024-01-15 16:26:42,670 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.5.encoder.layers.1.conv_module1.whiten, num_groups=1, num_channels=256, metric=3.39 vs. limit=15.0 +2024-01-15 16:26:43,702 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.attention_skip_rate, batch_count=48403.333333333336, ans=0.0 +2024-01-15 16:26:48,280 INFO [train.py:994] (0/2) Epoch 18, batch 200, loss[loss=0.1787, simple_loss=0.2534, pruned_loss=0.05198, over 24509.00 frames. ], tot_loss[loss=0.1776, simple_loss=0.2527, pruned_loss=0.05128, over 3057119.28 frames. ], batch size: 210, lr: 2.17e-02, grad_scale: 32.0 +2024-01-15 16:27:08,729 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.attention_skip_rate, batch_count=48470.0, ans=0.0 +2024-01-15 16:27:28,241 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.feed_forward1.hidden_balancer.prob, batch_count=48536.666666666664, ans=0.125 +2024-01-15 16:27:36,628 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.feed_forward2.hidden_balancer.prob, batch_count=48536.666666666664, ans=0.125 +2024-01-15 16:27:43,702 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.ff3_skip_rate, batch_count=48570.0, ans=0.00031086956521739206 +2024-01-15 16:27:53,077 INFO [train.py:994] (0/2) Epoch 18, batch 250, loss[loss=0.1741, simple_loss=0.2485, pruned_loss=0.04982, over 24479.00 frames. ], tot_loss[loss=0.1776, simple_loss=0.2529, pruned_loss=0.05118, over 3448731.52 frames. ], batch size: 267, lr: 2.17e-02, grad_scale: 32.0 +2024-01-15 16:28:18,942 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=48670.0, ans=0.1 +2024-01-15 16:28:21,493 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.0.layers.1.ff3_skip_rate, batch_count=48670.0, ans=0.0002891304347826095 +2024-01-15 16:28:30,080 INFO [scaling.py:1118] (0/2) WithLoss: name=encoder.encoders.4.encoder.layers.1.self_attn_weights, loss-sum=0.000e+00 +2024-01-15 16:28:38,775 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.5.encoder.layers.0.conv_module2.whiten, num_groups=1, num_channels=256, metric=3.25 vs. limit=15.0 +2024-01-15 16:28:44,350 WARNING [optim.py:484] (0/2) Clipping_scale=2.0, grad-norm quartiles 1.879e+02 2.119e+02 2.331e+02 2.735e+02 4.692e+02, threshold=4.662e+02, percent-clipped=2.0 +2024-01-15 16:28:49,585 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_module1.balancer2.prob, batch_count=48736.666666666664, ans=0.125 +2024-01-15 16:28:49,599 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.balancer2.prob, batch_count=48736.666666666664, ans=0.125 +2024-01-15 16:28:51,967 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_module2.balancer1.prob, batch_count=48736.666666666664, ans=0.125 +2024-01-15 16:28:56,403 INFO [train.py:994] (0/2) Epoch 18, batch 300, loss[loss=0.1768, simple_loss=0.2521, pruned_loss=0.05081, over 24351.00 frames. ], tot_loss[loss=0.1772, simple_loss=0.2525, pruned_loss=0.0509, over 3748255.65 frames. ], batch size: 275, lr: 2.16e-02, grad_scale: 32.0 +2024-01-15 16:29:05,739 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.2.encoder.layers.2.self_attn2.whiten, num_groups=1, num_channels=384, metric=17.79 vs. limit=22.5 +2024-01-15 16:29:23,523 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.bypass_mid.scale_min, batch_count=48836.666666666664, ans=0.2 +2024-01-15 16:29:24,612 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_module1.balancer2.prob, batch_count=48836.666666666664, ans=0.125 +2024-01-15 16:29:45,977 INFO [scaling.py:1022] (0/2) Whitening: name=encoder_embed.out_whiten, num_groups=1, num_channels=192, metric=7.47 vs. limit=8.0 +2024-01-15 16:29:55,151 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.ff3_skip_rate, batch_count=48903.333333333336, ans=0.00023840579710144964 +2024-01-15 16:29:56,528 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.4.encoder.layers.1.nonlin_attention.whiten1, num_groups=1, num_channels=288, metric=3.71 vs. limit=10.0 +2024-01-15 16:30:00,013 INFO [train.py:994] (0/2) Epoch 18, batch 350, loss[loss=0.1925, simple_loss=0.2713, pruned_loss=0.05687, over 22351.00 frames. ], tot_loss[loss=0.1766, simple_loss=0.2519, pruned_loss=0.05066, over 3978296.73 frames. ], batch size: 357, lr: 2.16e-02, grad_scale: 32.0 +2024-01-15 16:30:07,021 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.balancer1.prob, batch_count=48936.666666666664, ans=0.125 +2024-01-15 16:30:18,271 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.3.encoder.layers.2.feed_forward3.out_whiten, num_groups=1, num_channels=512, metric=11.24 vs. limit=15.0 +2024-01-15 16:30:28,421 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_skip_rate, batch_count=49003.333333333336, ans=0.0 +2024-01-15 16:30:48,945 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_skip_rate, batch_count=49036.666666666664, ans=0.0 +2024-01-15 16:30:50,934 WARNING [optim.py:484] (0/2) Clipping_scale=2.0, grad-norm quartiles 1.733e+02 2.195e+02 2.456e+02 2.912e+02 4.150e+02, threshold=4.913e+02, percent-clipped=0.0 +2024-01-15 16:30:58,637 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.bypass.scale_min, batch_count=49070.0, ans=0.2 +2024-01-15 16:31:01,132 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.bypass_mid.scale_min, batch_count=49070.0, ans=0.2 +2024-01-15 16:31:04,295 INFO [train.py:994] (0/2) Epoch 18, batch 400, loss[loss=0.1658, simple_loss=0.2435, pruned_loss=0.04407, over 24426.00 frames. ], tot_loss[loss=0.1769, simple_loss=0.252, pruned_loss=0.05091, over 4146932.99 frames. ], batch size: 250, lr: 2.16e-02, grad_scale: 32.0 +2024-01-15 16:31:10,717 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.bypass.scale_min, batch_count=49103.333333333336, ans=0.2 +2024-01-15 16:31:30,756 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.4.encoder.layers.2.self_attn1.whiten, num_groups=1, num_channels=384, metric=14.21 vs. limit=22.5 +2024-01-15 16:32:07,761 INFO [train.py:994] (0/2) Epoch 18, batch 450, loss[loss=0.184, simple_loss=0.2583, pruned_loss=0.05484, over 24438.00 frames. ], tot_loss[loss=0.1774, simple_loss=0.2524, pruned_loss=0.05116, over 4289236.48 frames. ], batch size: 250, lr: 2.16e-02, grad_scale: 32.0 +2024-01-15 16:32:19,070 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.0.layers.1.bypass.skip_rate, batch_count=49270.0, ans=0.035 +2024-01-15 16:32:24,107 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.feed_forward3.hidden_balancer.prob, batch_count=49303.333333333336, ans=0.125 +2024-01-15 16:32:33,196 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.3.encoder.layers.2.self_attn1.whiten, num_groups=1, num_channels=512, metric=21.89 vs. limit=22.5 +2024-01-15 16:32:51,570 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.0.layers.0.feed_forward2.hidden_balancer.prob, batch_count=49370.0, ans=0.125 +2024-01-15 16:32:59,685 WARNING [optim.py:484] (0/2) Clipping_scale=2.0, grad-norm quartiles 1.700e+02 2.073e+02 2.302e+02 2.664e+02 3.981e+02, threshold=4.605e+02, percent-clipped=0.0 +2024-01-15 16:33:07,869 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_skip_rate, batch_count=49403.333333333336, ans=0.0 +2024-01-15 16:33:07,968 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.bypass.scale_min, batch_count=49403.333333333336, ans=0.2 +2024-01-15 16:33:13,183 INFO [train.py:994] (0/2) Epoch 18, batch 500, loss[loss=0.1773, simple_loss=0.2565, pruned_loss=0.0491, over 24496.00 frames. ], tot_loss[loss=0.1775, simple_loss=0.2528, pruned_loss=0.0511, over 4409702.35 frames. ], batch size: 216, lr: 2.15e-02, grad_scale: 32.0 +2024-01-15 16:33:15,927 INFO [scaling.py:1118] (0/2) WithLoss: name=encoder.encoders.3.encoder.layers.2.self_attn_weights, loss-sum=0.000e+00 +2024-01-15 16:33:30,337 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.nonlin_attention.balancer.prob, batch_count=49470.0, ans=0.125 +2024-01-15 16:33:41,339 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_module1.balancer1.prob, batch_count=49503.333333333336, ans=0.125 +2024-01-15 16:33:47,327 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_skip_rate, batch_count=49503.333333333336, ans=0.0 +2024-01-15 16:34:04,454 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.4.encoder.layers.0.self_attn1.whiten, num_groups=1, num_channels=384, metric=18.32 vs. limit=22.5 +2024-01-15 16:34:15,824 INFO [train.py:994] (0/2) Epoch 18, batch 550, loss[loss=0.1722, simple_loss=0.2474, pruned_loss=0.04855, over 24440.00 frames. ], tot_loss[loss=0.1773, simple_loss=0.2525, pruned_loss=0.05105, over 4497570.05 frames. ], batch size: 159, lr: 2.15e-02, grad_scale: 16.0 +2024-01-15 16:34:23,236 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.balancer2.prob, batch_count=49603.333333333336, ans=0.125 +2024-01-15 16:34:59,902 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=49703.333333333336, ans=0.1 +2024-01-15 16:35:05,306 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.2.encoder.layers.1.self_attn1.whiten, num_groups=1, num_channels=384, metric=21.49 vs. limit=22.5 +2024-01-15 16:35:07,107 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.0.layers.0.ff2_skip_rate, batch_count=49736.666666666664, ans=5.724637681159446e-05 +2024-01-15 16:35:08,022 WARNING [optim.py:484] (0/2) Clipping_scale=2.0, grad-norm quartiles 1.782e+02 2.215e+02 2.472e+02 3.117e+02 4.918e+02, threshold=4.944e+02, percent-clipped=1.0 +2024-01-15 16:35:10,682 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.balancer1.prob, batch_count=49736.666666666664, ans=0.125 +2024-01-15 16:35:19,120 INFO [train.py:994] (0/2) Epoch 18, batch 600, loss[loss=0.186, simple_loss=0.259, pruned_loss=0.05647, over 24487.00 frames. ], tot_loss[loss=0.1777, simple_loss=0.2528, pruned_loss=0.05131, over 4565801.70 frames. ], batch size: 216, lr: 2.15e-02, grad_scale: 16.0 +2024-01-15 16:35:31,196 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.attention_skip_rate, batch_count=49803.333333333336, ans=0.0 +2024-01-15 16:36:09,981 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_module2.balancer1.prob, batch_count=49903.333333333336, ans=0.125 +2024-01-15 16:36:20,392 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.bypass.scale_min, batch_count=49903.333333333336, ans=0.2 +2024-01-15 16:36:22,532 INFO [train.py:994] (0/2) Epoch 18, batch 650, loss[loss=0.1727, simple_loss=0.2469, pruned_loss=0.04921, over 24452.00 frames. ], tot_loss[loss=0.1772, simple_loss=0.2523, pruned_loss=0.05107, over 4607837.96 frames. ], batch size: 170, lr: 2.14e-02, grad_scale: 8.0 +2024-01-15 16:36:24,074 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.bypass_mid.scale_min, batch_count=49936.666666666664, ans=0.2 +2024-01-15 16:36:44,971 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.4.encoder.layers.1.self_attn1.whiten, num_groups=1, num_channels=384, metric=15.58 vs. limit=22.5 +2024-01-15 16:36:46,299 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_skip_rate, batch_count=49970.0, ans=0.0 +2024-01-15 16:36:48,772 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.balancer1.prob, batch_count=50003.333333333336, ans=0.125 +2024-01-15 16:37:15,978 WARNING [optim.py:484] (0/2) Clipping_scale=2.0, grad-norm quartiles 1.737e+02 2.026e+02 2.388e+02 2.731e+02 3.877e+02, threshold=4.775e+02, percent-clipped=0.0 +2024-01-15 16:37:25,636 INFO [train.py:994] (0/2) Epoch 18, batch 700, loss[loss=0.1721, simple_loss=0.2437, pruned_loss=0.05025, over 24492.00 frames. ], tot_loss[loss=0.1768, simple_loss=0.252, pruned_loss=0.05077, over 4642368.68 frames. ], batch size: 267, lr: 2.14e-02, grad_scale: 8.0 +2024-01-15 16:37:48,365 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.1.encoder.layers.0.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=5.31 vs. limit=6.0 +2024-01-15 16:38:01,333 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.3.encoder.layers.2.feed_forward3.out_whiten, num_groups=1, num_channels=512, metric=11.19 vs. limit=15.0 +2024-01-15 16:38:22,559 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.0.layers.1.feed_forward2.out_whiten, num_groups=1, num_channels=192, metric=6.30 vs. limit=15.0 +2024-01-15 16:38:24,248 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.0.layers.1.feed_forward1.out_proj.dropout_p, batch_count=50236.666666666664, ans=0.1 +2024-01-15 16:38:29,429 INFO [train.py:994] (0/2) Epoch 18, batch 750, loss[loss=0.1686, simple_loss=0.2454, pruned_loss=0.04594, over 24479.00 frames. ], tot_loss[loss=0.1764, simple_loss=0.2516, pruned_loss=0.05059, over 4680746.65 frames. ], batch size: 222, lr: 2.14e-02, grad_scale: 8.0 +2024-01-15 16:38:51,790 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.5.encoder.layers.1.feed_forward1.out_whiten, num_groups=1, num_channels=256, metric=10.61 vs. limit=15.0 +2024-01-15 16:38:54,629 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_skip_rate, batch_count=50336.666666666664, ans=0.0 +2024-01-15 16:38:55,907 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.feed_forward3.hidden_balancer.prob, batch_count=50336.666666666664, ans=0.125 +2024-01-15 16:39:01,928 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_skip_rate, batch_count=50336.666666666664, ans=0.0 +2024-01-15 16:39:05,349 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=50370.0, ans=0.1 +2024-01-15 16:39:20,544 WARNING [optim.py:484] (0/2) Clipping_scale=2.0, grad-norm quartiles 1.709e+02 2.077e+02 2.417e+02 3.053e+02 4.815e+02, threshold=4.834e+02, percent-clipped=1.0 +2024-01-15 16:39:22,977 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=50403.333333333336, ans=0.1 +2024-01-15 16:39:29,611 INFO [train.py:994] (0/2) Epoch 18, batch 800, loss[loss=0.1907, simple_loss=0.2617, pruned_loss=0.05989, over 24496.00 frames. ], tot_loss[loss=0.1761, simple_loss=0.2514, pruned_loss=0.05042, over 4715475.46 frames. ], batch size: 210, lr: 2.14e-02, grad_scale: 16.0 +2024-01-15 16:39:32,607 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.bypass.scale_min, batch_count=50436.666666666664, ans=0.2 +2024-01-15 16:39:35,251 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.3.encoder.layers.0.nonlin_attention.whiten2, num_groups=1, num_channels=512, metric=4.99 vs. limit=15.0 +2024-01-15 16:40:10,183 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_module2.balancer1.prob, batch_count=50536.666666666664, ans=0.125 +2024-01-15 16:40:17,247 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=50570.0, ans=0.1 +2024-01-15 16:40:17,304 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.bypass.skip_rate, batch_count=50570.0, ans=0.09899494936611666 +2024-01-15 16:40:19,224 INFO [checkpoint.py:75] (0/2) Saving checkpoint to zipformer_bbpe/exp-context-size-2-lr-epochs-10-spec-aug-20-disable-musan/epoch-18.pt +2024-01-15 16:40:42,950 INFO [train.py:994] (0/2) Epoch 19, batch 0, loss[loss=0.1779, simple_loss=0.2536, pruned_loss=0.05113, over 24350.00 frames. ], tot_loss[loss=0.1779, simple_loss=0.2536, pruned_loss=0.05113, over 24350.00 frames. ], batch size: 275, lr: 2.09e-02, grad_scale: 16.0 +2024-01-15 16:40:42,951 INFO [train.py:1017] (0/2) Computing validation loss +2024-01-15 16:41:03,783 INFO [train.py:1026] (0/2) Epoch 19, validation: loss=0.17, simple_loss=0.2559, pruned_loss=0.0421, over 1622729.00 frames. +2024-01-15 16:41:03,784 INFO [train.py:1027] (0/2) Maximum memory allocated so far is 15997MB +2024-01-15 16:41:04,096 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.bypass.scale_min, batch_count=50580.0, ans=0.2 +2024-01-15 16:41:14,589 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=50613.333333333336, ans=0.1 +2024-01-15 16:41:23,357 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.feed_forward3.hidden_balancer.prob, batch_count=50613.333333333336, ans=0.125 +2024-01-15 16:41:27,341 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.2.encoder.layers.1.feed_forward3.out_whiten, num_groups=1, num_channels=384, metric=12.18 vs. limit=15.0 +2024-01-15 16:41:44,931 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=50680.0, ans=0.1 +2024-01-15 16:41:48,407 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.nonlin_attention.balancer.prob, batch_count=50680.0, ans=0.125 +2024-01-15 16:41:55,700 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.4.encoder.layers.2.feed_forward1.out_whiten, num_groups=1, num_channels=384, metric=8.54 vs. limit=15.0 +2024-01-15 16:41:59,608 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.ff2_skip_rate, batch_count=50713.333333333336, ans=0.0 +2024-01-15 16:42:03,052 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.feed_forward1.out_proj.dropout_p, batch_count=50713.333333333336, ans=0.1 +2024-01-15 16:42:05,124 WARNING [optim.py:484] (0/2) Clipping_scale=2.0, grad-norm quartiles 1.800e+02 2.143e+02 2.478e+02 3.058e+02 5.421e+02, threshold=4.957e+02, percent-clipped=1.0 +2024-01-15 16:42:05,152 INFO [train.py:994] (0/2) Epoch 19, batch 50, loss[loss=0.1821, simple_loss=0.2548, pruned_loss=0.05474, over 24525.00 frames. ], tot_loss[loss=0.1714, simple_loss=0.246, pruned_loss=0.04835, over 1091647.57 frames. ], batch size: 165, lr: 2.08e-02, grad_scale: 16.0 +2024-01-15 16:42:24,532 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.feed_forward1.hidden_balancer.prob, batch_count=50780.0, ans=0.125 +2024-01-15 16:42:25,751 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.attention_skip_rate, batch_count=50780.0, ans=0.0 +2024-01-15 16:42:31,770 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.5.encoder.layers.1.whiten, num_groups=1, num_channels=256, metric=4.95 vs. limit=12.0 +2024-01-15 16:42:39,302 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.nonlin_attention.balancer.prob, batch_count=50813.333333333336, ans=0.125 +2024-01-15 16:42:39,367 INFO [scaling.py:1118] (0/2) WithLoss: name=encoder.encoders.3.encoder.layers.1.self_attn_weights, loss-sum=0.000e+00 +2024-01-15 16:42:42,915 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.ff3_skip_rate, batch_count=50846.666666666664, ans=0.0 +2024-01-15 16:43:07,603 INFO [train.py:994] (0/2) Epoch 19, batch 100, loss[loss=0.1799, simple_loss=0.2531, pruned_loss=0.05332, over 24578.00 frames. ], tot_loss[loss=0.1719, simple_loss=0.2472, pruned_loss=0.04827, over 1918490.02 frames. ], batch size: 176, lr: 2.08e-02, grad_scale: 16.0 +2024-01-15 16:43:11,860 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.2.encoder.layers.1.feed_forward3.out_whiten, num_groups=1, num_channels=384, metric=15.05 vs. limit=15.0 +2024-01-15 16:43:24,629 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_skip_rate, batch_count=50946.666666666664, ans=0.0 +2024-01-15 16:43:26,403 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.0.layers.1.feed_forward2.out_whiten, num_groups=1, num_channels=192, metric=6.59 vs. limit=15.0 +2024-01-15 16:43:31,488 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.self_attn_weights.whiten_keys.whitening_limit, batch_count=50980.0, ans=6.0 +2024-01-15 16:43:32,261 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.bypass.scale_min, batch_count=50980.0, ans=0.2 +2024-01-15 16:43:48,317 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.feed_forward2.hidden_balancer.prob, batch_count=51013.333333333336, ans=0.125 +2024-01-15 16:44:09,234 WARNING [optim.py:484] (0/2) Clipping_scale=2.0, grad-norm quartiles 1.706e+02 2.155e+02 2.420e+02 2.780e+02 3.768e+02, threshold=4.840e+02, percent-clipped=0.0 +2024-01-15 16:44:09,267 INFO [train.py:994] (0/2) Epoch 19, batch 150, loss[loss=0.1699, simple_loss=0.2487, pruned_loss=0.04552, over 24407.00 frames. ], tot_loss[loss=0.1735, simple_loss=0.2488, pruned_loss=0.04914, over 2565233.67 frames. ], batch size: 258, lr: 2.08e-02, grad_scale: 16.0 +2024-01-15 16:44:19,303 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_module1.whiten.whitening_limit, batch_count=51080.0, ans=15.0 +2024-01-15 16:44:29,486 INFO [scaling.py:1118] (0/2) WithLoss: name=encoder.encoders.0.layers.1.self_attn_weights, loss-sum=0.000e+00 +2024-01-15 16:44:33,416 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.2.encoder.layers.2.feed_forward3.out_whiten, num_groups=1, num_channels=384, metric=7.70 vs. limit=15.0 +2024-01-15 16:44:43,601 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.0.layers.0.feed_forward1.out_proj.dropout_p, batch_count=51146.666666666664, ans=0.1 +2024-01-15 16:44:49,161 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_module2.balancer2.min_positive, batch_count=51180.0, ans=0.05 +2024-01-15 16:44:53,255 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=51180.0, ans=0.1 +2024-01-15 16:44:54,702 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.3.encoder.layers.0.nonlin_attention.whiten1, num_groups=1, num_channels=384, metric=6.81 vs. limit=10.0 +2024-01-15 16:44:55,460 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.self_attn_weights.pos_emb_skip_rate, batch_count=51180.0, ans=0.0 +2024-01-15 16:45:07,310 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.ff3_skip_rate, batch_count=51213.333333333336, ans=0.0 +2024-01-15 16:45:10,499 INFO [train.py:994] (0/2) Epoch 19, batch 200, loss[loss=0.1771, simple_loss=0.2568, pruned_loss=0.04872, over 24307.00 frames. ], tot_loss[loss=0.1736, simple_loss=0.2489, pruned_loss=0.04912, over 3056343.29 frames. ], batch size: 285, lr: 2.08e-02, grad_scale: 16.0 +2024-01-15 16:45:16,265 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_module2.balancer2.prob, batch_count=51246.666666666664, ans=0.125 +2024-01-15 16:45:21,655 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.self_attn_weights.pos_emb_skip_rate, batch_count=51246.666666666664, ans=0.0 +2024-01-15 16:45:22,898 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.balancer2.prob, batch_count=51280.0, ans=0.125 +2024-01-15 16:45:53,649 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_skip_rate, batch_count=51346.666666666664, ans=0.0 +2024-01-15 16:46:11,633 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.nonlin_attention.balancer.prob, batch_count=51380.0, ans=0.125 +2024-01-15 16:46:13,660 WARNING [optim.py:484] (0/2) Clipping_scale=2.0, grad-norm quartiles 1.675e+02 2.155e+02 2.351e+02 2.778e+02 4.374e+02, threshold=4.701e+02, percent-clipped=0.0 +2024-01-15 16:46:13,688 INFO [train.py:994] (0/2) Epoch 19, batch 250, loss[loss=0.1943, simple_loss=0.2667, pruned_loss=0.06096, over 23919.00 frames. ], tot_loss[loss=0.1741, simple_loss=0.2495, pruned_loss=0.04939, over 3448996.52 frames. ], batch size: 328, lr: 2.07e-02, grad_scale: 16.0 +2024-01-15 16:46:18,710 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.nonlin_attention.balancer.prob, batch_count=51413.333333333336, ans=0.125 +2024-01-15 16:46:54,142 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.attention_skip_rate, batch_count=51513.333333333336, ans=0.0 +2024-01-15 16:46:57,776 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.bypass.skip_rate, batch_count=51513.333333333336, ans=0.07 +2024-01-15 16:47:14,217 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.nonlin_attention.balancer.prob, batch_count=51580.0, ans=0.125 +2024-01-15 16:47:15,126 INFO [train.py:994] (0/2) Epoch 19, batch 300, loss[loss=0.1843, simple_loss=0.2626, pruned_loss=0.05304, over 24541.00 frames. ], tot_loss[loss=0.1736, simple_loss=0.2494, pruned_loss=0.04891, over 3760927.97 frames. ], batch size: 187, lr: 2.07e-02, grad_scale: 16.0 +2024-01-15 16:47:23,211 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.nonlin_attention.balancer.max_positive, batch_count=51580.0, ans=0.95 +2024-01-15 16:47:26,300 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.self_attn_weights.pos_emb_skip_rate, batch_count=51580.0, ans=0.0 +2024-01-15 16:48:16,622 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.bypass_mid.scale_min, batch_count=51746.666666666664, ans=0.2 +2024-01-15 16:48:17,479 WARNING [optim.py:484] (0/2) Clipping_scale=2.0, grad-norm quartiles 1.797e+02 2.144e+02 2.394e+02 2.843e+02 3.932e+02, threshold=4.787e+02, percent-clipped=0.0 +2024-01-15 16:48:17,509 INFO [train.py:994] (0/2) Epoch 19, batch 350, loss[loss=0.1857, simple_loss=0.257, pruned_loss=0.05722, over 24569.00 frames. ], tot_loss[loss=0.1737, simple_loss=0.2495, pruned_loss=0.04898, over 3990934.22 frames. ], batch size: 176, lr: 2.07e-02, grad_scale: 16.0 +2024-01-15 16:48:51,134 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.balancer_ff2.min_abs, batch_count=51813.333333333336, ans=0.1 +2024-01-15 16:48:53,442 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.balancer2.prob, batch_count=51846.666666666664, ans=0.125 +2024-01-15 16:49:03,186 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.3.encoder.layers.3.feed_forward3.out_whiten, num_groups=1, num_channels=512, metric=11.19 vs. limit=15.0 +2024-01-15 16:49:12,311 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.0.layers.0.bypass.skip_rate, batch_count=51880.0, ans=0.035 +2024-01-15 16:49:19,265 INFO [train.py:994] (0/2) Epoch 19, batch 400, loss[loss=0.1434, simple_loss=0.2157, pruned_loss=0.03559, over 23501.00 frames. ], tot_loss[loss=0.1737, simple_loss=0.2497, pruned_loss=0.04887, over 4183343.73 frames. ], batch size: 119, lr: 2.07e-02, grad_scale: 32.0 +2024-01-15 16:49:19,613 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward2.hidden_balancer.prob, batch_count=51913.333333333336, ans=0.125 +2024-01-15 16:49:25,545 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward2.hidden_balancer.prob, batch_count=51913.333333333336, ans=0.125 +2024-01-15 16:49:33,884 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.ff3_skip_rate, batch_count=51946.666666666664, ans=0.0 +2024-01-15 16:50:16,861 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.nonlin_attention.balancer.prob, batch_count=52046.666666666664, ans=0.125 +2024-01-15 16:50:21,894 WARNING [optim.py:484] (0/2) Clipping_scale=2.0, grad-norm quartiles 1.790e+02 2.221e+02 2.607e+02 3.142e+02 4.730e+02, threshold=5.215e+02, percent-clipped=0.0 +2024-01-15 16:50:21,922 INFO [train.py:994] (0/2) Epoch 19, batch 450, loss[loss=0.1588, simple_loss=0.2324, pruned_loss=0.04258, over 24006.00 frames. ], tot_loss[loss=0.1734, simple_loss=0.2494, pruned_loss=0.04866, over 4329887.38 frames. ], batch size: 131, lr: 2.06e-02, grad_scale: 32.0 +2024-01-15 16:50:31,487 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.4.encoder.layers.0.whiten, num_groups=1, num_channels=384, metric=2.98 vs. limit=12.0 +2024-01-15 16:50:42,207 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.1.encoder.layers.0.conv_module1.whiten, num_groups=1, num_channels=256, metric=4.19 vs. limit=15.0 +2024-01-15 16:50:48,229 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.feed_forward1.hidden_balancer.prob, batch_count=52146.666666666664, ans=0.125 +2024-01-15 16:50:48,272 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_skip_rate, batch_count=52146.666666666664, ans=0.0 +2024-01-15 16:50:58,166 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.nonlin_attention.balancer.prob, batch_count=52180.0, ans=0.125 +2024-01-15 16:50:59,377 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_skip_rate, batch_count=52180.0, ans=0.0 +2024-01-15 16:51:09,041 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.5.encoder.layers.0.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=3.77 vs. limit=6.0 +2024-01-15 16:51:21,709 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_module2.balancer1.min_positive, batch_count=52213.333333333336, ans=0.025 +2024-01-15 16:51:23,723 INFO [train.py:994] (0/2) Epoch 19, batch 500, loss[loss=0.1739, simple_loss=0.2522, pruned_loss=0.04782, over 24458.00 frames. ], tot_loss[loss=0.1733, simple_loss=0.2491, pruned_loss=0.04874, over 4428395.42 frames. ], batch size: 210, lr: 2.06e-02, grad_scale: 32.0 +2024-01-15 16:51:31,058 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.0.layers.0.bypass.skip_rate, batch_count=52246.666666666664, ans=0.035 +2024-01-15 16:51:36,175 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.3.encoder.layers.3.feed_forward3.out_whiten, num_groups=1, num_channels=512, metric=14.68 vs. limit=15.0 +2024-01-15 16:51:48,485 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.1.encoder.layers.0.self_attn2.whiten, num_groups=1, num_channels=256, metric=14.18 vs. limit=22.5 +2024-01-15 16:51:53,864 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.5.encoder.layers.1.self_attn2.whiten, num_groups=1, num_channels=256, metric=17.19 vs. limit=22.5 +2024-01-15 16:52:06,864 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_module2.balancer1.min_positive, batch_count=52346.666666666664, ans=0.025 +2024-01-15 16:52:10,457 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_skip_rate, batch_count=52346.666666666664, ans=0.0 +2024-01-15 16:52:19,236 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.0.layers.1.feed_forward1.out_whiten, num_groups=1, num_channels=192, metric=9.24 vs. limit=15.0 +2024-01-15 16:52:20,050 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=52380.0, ans=0.1 +2024-01-15 16:52:25,452 WARNING [optim.py:484] (0/2) Clipping_scale=2.0, grad-norm quartiles 1.709e+02 2.156e+02 2.463e+02 2.947e+02 4.843e+02, threshold=4.926e+02, percent-clipped=0.0 +2024-01-15 16:52:25,480 INFO [train.py:994] (0/2) Epoch 19, batch 550, loss[loss=0.1885, simple_loss=0.2612, pruned_loss=0.05791, over 24575.00 frames. ], tot_loss[loss=0.1733, simple_loss=0.2493, pruned_loss=0.04865, over 4510218.69 frames. ], batch size: 176, lr: 2.06e-02, grad_scale: 32.0 +2024-01-15 16:52:29,975 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_module2.balancer2.prob, batch_count=52413.333333333336, ans=0.125 +2024-01-15 16:52:34,369 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=52413.333333333336, ans=0.1 +2024-01-15 16:52:36,754 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.ff3_skip_rate, batch_count=52413.333333333336, ans=0.0 +2024-01-15 16:52:44,969 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_module2.balancer2.prob, batch_count=52446.666666666664, ans=0.125 +2024-01-15 16:53:18,959 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.2.encoder.layers.1.conv_module2.whiten, num_groups=1, num_channels=384, metric=6.99 vs. limit=15.0 +2024-01-15 16:53:28,419 INFO [train.py:994] (0/2) Epoch 19, batch 600, loss[loss=0.1781, simple_loss=0.2457, pruned_loss=0.0553, over 24484.00 frames. ], tot_loss[loss=0.1728, simple_loss=0.2487, pruned_loss=0.04847, over 4569488.49 frames. ], batch size: 165, lr: 2.05e-02, grad_scale: 32.0 +2024-01-15 16:53:28,989 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.4.encoder.layers.1.feed_forward3.out_whiten, num_groups=1, num_channels=384, metric=8.24 vs. limit=15.0 +2024-01-15 16:53:37,105 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=52580.0, ans=0.1 +2024-01-15 16:53:40,008 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.2.encoder.layers.1.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=5.45 vs. limit=6.0 +2024-01-15 16:53:45,038 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=52613.333333333336, ans=0.1 +2024-01-15 16:53:52,111 INFO [scaling.py:1118] (0/2) WithLoss: name=encoder.encoders.4.encoder.layers.1.self_attn_weights, loss-sum=0.000e+00 +2024-01-15 16:54:02,352 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_module2.balancer1.min_positive, batch_count=52646.666666666664, ans=0.025 +2024-01-15 16:54:10,033 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.feed_forward1.out_proj.dropout_p, batch_count=52680.0, ans=0.1 +2024-01-15 16:54:30,297 WARNING [optim.py:484] (0/2) Clipping_scale=2.0, grad-norm quartiles 1.813e+02 2.052e+02 2.320e+02 2.671e+02 5.625e+02, threshold=4.639e+02, percent-clipped=1.0 +2024-01-15 16:54:30,327 INFO [train.py:994] (0/2) Epoch 19, batch 650, loss[loss=0.1806, simple_loss=0.2593, pruned_loss=0.05091, over 23840.00 frames. ], tot_loss[loss=0.1727, simple_loss=0.2485, pruned_loss=0.04846, over 4620160.19 frames. ], batch size: 328, lr: 2.05e-02, grad_scale: 32.0 +2024-01-15 16:54:44,347 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward3.hidden_balancer.prob, batch_count=52780.0, ans=0.125 +2024-01-15 16:54:46,690 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.0.layers.1.feed_forward3.out_whiten, num_groups=1, num_channels=192, metric=11.29 vs. limit=15.0 +2024-01-15 16:55:02,300 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.4.encoder.layers.0.nonlin_attention.whiten1, num_groups=1, num_channels=288, metric=4.10 vs. limit=10.0 +2024-01-15 16:55:04,416 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_module1.balancer2.prob, batch_count=52813.333333333336, ans=0.125 +2024-01-15 16:55:04,457 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_module2.balancer2.prob, batch_count=52813.333333333336, ans=0.125 +2024-01-15 16:55:19,384 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_module2.balancer1.min_positive, batch_count=52880.0, ans=0.025 +2024-01-15 16:55:32,692 INFO [train.py:994] (0/2) Epoch 19, batch 700, loss[loss=0.1745, simple_loss=0.256, pruned_loss=0.0465, over 24219.00 frames. ], tot_loss[loss=0.1722, simple_loss=0.2482, pruned_loss=0.04809, over 4662383.27 frames. ], batch size: 311, lr: 2.05e-02, grad_scale: 32.0 +2024-01-15 16:55:47,971 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_skip_rate, batch_count=52946.666666666664, ans=0.0 +2024-01-15 16:55:50,403 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.feed_forward1.hidden_balancer.prob, batch_count=52946.666666666664, ans=0.125 +2024-01-15 16:55:56,903 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.2.out_combiner.scale_min, batch_count=52980.0, ans=0.2 +2024-01-15 16:55:58,076 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.balancer_ff2.min_abs, batch_count=52980.0, ans=0.1 +2024-01-15 16:56:35,020 WARNING [optim.py:484] (0/2) Clipping_scale=2.0, grad-norm quartiles 1.688e+02 2.044e+02 2.314e+02 2.797e+02 4.143e+02, threshold=4.628e+02, percent-clipped=0.0 +2024-01-15 16:56:35,048 INFO [train.py:994] (0/2) Epoch 19, batch 750, loss[loss=0.176, simple_loss=0.2571, pruned_loss=0.0475, over 24482.00 frames. ], tot_loss[loss=0.1722, simple_loss=0.2482, pruned_loss=0.04809, over 4700421.76 frames. ], batch size: 187, lr: 2.05e-02, grad_scale: 32.0 +2024-01-15 16:56:43,011 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.balancer2.prob, batch_count=53080.0, ans=0.125 +2024-01-15 16:57:16,005 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_module1.balancer1.prob, batch_count=53180.0, ans=0.125 +2024-01-15 16:57:35,450 INFO [train.py:994] (0/2) Epoch 19, batch 800, loss[loss=0.1879, simple_loss=0.2622, pruned_loss=0.05676, over 24565.00 frames. ], tot_loss[loss=0.1726, simple_loss=0.2486, pruned_loss=0.04823, over 4729749.83 frames. ], batch size: 176, lr: 2.04e-02, grad_scale: 32.0 +2024-01-15 16:57:37,230 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.2.encoder.layers.0.feed_forward2.out_whiten, num_groups=1, num_channels=384, metric=13.09 vs. limit=15.0 +2024-01-15 16:57:50,338 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_module2.balancer1.prob, batch_count=53280.0, ans=0.125 +2024-01-15 16:57:50,661 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.3.encoder.layers.1.feed_forward2.out_whiten, num_groups=1, num_channels=512, metric=13.26 vs. limit=15.0 +2024-01-15 16:58:04,298 INFO [checkpoint.py:75] (0/2) Saving checkpoint to zipformer_bbpe/exp-context-size-2-lr-epochs-10-spec-aug-20-disable-musan/checkpoint-16000.pt +2024-01-15 16:58:10,610 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder_embed.convnext.out_balancer.prob, batch_count=53313.333333333336, ans=0.125 +2024-01-15 16:58:14,086 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.ff3_skip_rate, batch_count=53346.666666666664, ans=0.0 +2024-01-15 16:58:16,303 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_module2.balancer1.prob, batch_count=53346.666666666664, ans=0.125 +2024-01-15 16:58:27,180 INFO [checkpoint.py:75] (0/2) Saving checkpoint to zipformer_bbpe/exp-context-size-2-lr-epochs-10-spec-aug-20-disable-musan/epoch-19.pt +2024-01-15 16:58:50,926 INFO [train.py:994] (0/2) Epoch 20, batch 0, loss[loss=0.1688, simple_loss=0.2435, pruned_loss=0.04705, over 24457.00 frames. ], tot_loss[loss=0.1688, simple_loss=0.2435, pruned_loss=0.04705, over 24457.00 frames. ], batch size: 170, lr: 2.00e-02, grad_scale: 32.0 +2024-01-15 16:58:50,927 INFO [train.py:1017] (0/2) Computing validation loss +2024-01-15 16:59:11,886 INFO [train.py:1026] (0/2) Epoch 20, validation: loss=0.1698, simple_loss=0.2555, pruned_loss=0.042, over 1622729.00 frames. +2024-01-15 16:59:11,887 INFO [train.py:1027] (0/2) Maximum memory allocated so far is 15997MB +2024-01-15 16:59:21,416 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.4.encoder.layers.1.nonlin_attention.whiten1, num_groups=1, num_channels=288, metric=3.76 vs. limit=10.0 +2024-01-15 16:59:21,977 WARNING [optim.py:484] (0/2) Clipping_scale=2.0, grad-norm quartiles 1.658e+02 2.223e+02 2.554e+02 3.029e+02 4.256e+02, threshold=5.107e+02, percent-clipped=0.0 +2024-01-15 16:59:24,820 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.balancer2.prob, batch_count=53423.333333333336, ans=0.125 +2024-01-15 16:59:27,076 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_module1.balancer2.prob, batch_count=53423.333333333336, ans=0.125 +2024-01-15 16:59:57,884 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=53490.0, ans=0.1 +2024-01-15 17:00:06,269 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_module2.balancer1.prob, batch_count=53523.333333333336, ans=0.125 +2024-01-15 17:00:07,834 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.feed_forward2.out_whiten.whitening_limit, batch_count=53523.333333333336, ans=15.0 +2024-01-15 17:00:14,347 INFO [train.py:994] (0/2) Epoch 20, batch 50, loss[loss=0.1715, simple_loss=0.2473, pruned_loss=0.04782, over 24376.00 frames. ], tot_loss[loss=0.1684, simple_loss=0.2439, pruned_loss=0.04648, over 1086914.58 frames. ], batch size: 159, lr: 2.00e-02, grad_scale: 32.0 +2024-01-15 17:00:22,875 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_module2.balancer1.prob, batch_count=53556.666666666664, ans=0.125 +2024-01-15 17:00:48,255 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.3.encoder.layers.2.nonlin_attention.whiten1, num_groups=1, num_channels=384, metric=5.12 vs. limit=10.0 +2024-01-15 17:01:04,569 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.feed_forward3.hidden_balancer.prob, batch_count=53690.0, ans=0.125 +2024-01-15 17:01:16,004 INFO [train.py:994] (0/2) Epoch 20, batch 100, loss[loss=0.1596, simple_loss=0.2409, pruned_loss=0.03919, over 24343.00 frames. ], tot_loss[loss=0.1702, simple_loss=0.2458, pruned_loss=0.04726, over 1914816.99 frames. ], batch size: 147, lr: 1.99e-02, grad_scale: 32.0 +2024-01-15 17:01:26,642 WARNING [optim.py:484] (0/2) Clipping_scale=2.0, grad-norm quartiles 1.665e+02 1.987e+02 2.287e+02 2.655e+02 4.191e+02, threshold=4.574e+02, percent-clipped=0.0 +2024-01-15 17:01:38,882 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_module2.balancer2.prob, batch_count=53756.666666666664, ans=0.125 +2024-01-15 17:01:46,599 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.self_attn_weights.pos_emb_skip_rate, batch_count=53790.0, ans=0.0 +2024-01-15 17:02:00,024 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.feed_forward2.hidden_balancer.prob, batch_count=53823.333333333336, ans=0.125 +2024-01-15 17:02:03,549 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.nonlin_attention.balancer.prob, batch_count=53823.333333333336, ans=0.125 +2024-01-15 17:02:05,851 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.feed_forward2.hidden_balancer.prob, batch_count=53856.666666666664, ans=0.125 +2024-01-15 17:02:18,014 INFO [scaling.py:1118] (0/2) WithLoss: name=encoder.encoders.0.layers.1.self_attn_weights, loss-sum=0.000e+00 +2024-01-15 17:02:18,107 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward2.hidden_balancer.prob, batch_count=53890.0, ans=0.125 +2024-01-15 17:02:18,895 INFO [train.py:994] (0/2) Epoch 20, batch 150, loss[loss=0.1952, simple_loss=0.2765, pruned_loss=0.05699, over 22350.00 frames. ], tot_loss[loss=0.1709, simple_loss=0.2468, pruned_loss=0.04752, over 2563915.79 frames. ], batch size: 357, lr: 1.99e-02, grad_scale: 32.0 +2024-01-15 17:02:23,887 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.balancer2.prob, batch_count=53890.0, ans=0.125 +2024-01-15 17:02:23,936 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.bypass.skip_rate, batch_count=53890.0, ans=0.04949747468305833 +2024-01-15 17:02:25,128 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=53890.0, ans=0.1 +2024-01-15 17:02:47,783 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=53956.666666666664, ans=0.1 +2024-01-15 17:02:59,516 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.bypass.skip_rate, batch_count=53990.0, ans=0.07 +2024-01-15 17:03:13,352 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.self_attn_weights.pos_emb_skip_rate, batch_count=54023.333333333336, ans=0.0 +2024-01-15 17:03:14,824 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.4.encoder.layers.2.feed_forward1.out_whiten, num_groups=1, num_channels=384, metric=9.34 vs. limit=15.0 +2024-01-15 17:03:21,487 INFO [train.py:994] (0/2) Epoch 20, batch 200, loss[loss=0.1841, simple_loss=0.2615, pruned_loss=0.05339, over 24493.00 frames. ], tot_loss[loss=0.171, simple_loss=0.2468, pruned_loss=0.04761, over 3053587.80 frames. ], batch size: 210, lr: 1.99e-02, grad_scale: 32.0 +2024-01-15 17:03:31,624 WARNING [optim.py:484] (0/2) Clipping_scale=2.0, grad-norm quartiles 1.717e+02 2.121e+02 2.376e+02 2.791e+02 4.349e+02, threshold=4.752e+02, percent-clipped=0.0 +2024-01-15 17:03:32,311 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.3.encoder.layers.2.whiten, num_groups=1, num_channels=512, metric=3.84 vs. limit=12.0 +2024-01-15 17:03:53,052 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.nonlin_attention.balancer.prob, batch_count=54123.333333333336, ans=0.125 +2024-01-15 17:03:53,519 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.1.encoder.layers.1.self_attn1.whiten, num_groups=1, num_channels=256, metric=15.98 vs. limit=22.5 +2024-01-15 17:04:05,401 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.feed_forward3.hidden_balancer.prob, batch_count=54156.666666666664, ans=0.125 +2024-01-15 17:04:07,794 INFO [scaling.py:1118] (0/2) WithLoss: name=encoder.encoders.0.layers.1.self_attn_weights, loss-sum=0.000e+00 +2024-01-15 17:04:09,046 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_module2.balancer2.prob, batch_count=54156.666666666664, ans=0.125 +2024-01-15 17:04:20,515 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.bypass.scale_min, batch_count=54190.0, ans=0.2 +2024-01-15 17:04:23,732 INFO [train.py:994] (0/2) Epoch 20, batch 250, loss[loss=0.1778, simple_loss=0.2507, pruned_loss=0.05245, over 24603.00 frames. ], tot_loss[loss=0.171, simple_loss=0.2472, pruned_loss=0.04733, over 3454753.35 frames. ], batch size: 199, lr: 1.99e-02, grad_scale: 16.0 +2024-01-15 17:04:31,906 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.balancer2.prob, batch_count=54223.333333333336, ans=0.125 +2024-01-15 17:04:42,921 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.5.encoder.layers.1.conv_module1.whiten, num_groups=1, num_channels=256, metric=3.01 vs. limit=15.0 +2024-01-15 17:04:57,672 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.3.encoder.layers.1.nonlin_attention.whiten1, num_groups=1, num_channels=384, metric=5.18 vs. limit=10.0 +2024-01-15 17:05:00,831 INFO [scaling.py:1118] (0/2) WithLoss: name=encoder.encoders.3.encoder.layers.0.self_attn_weights, loss-sum=0.000e+00 +2024-01-15 17:05:05,473 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.ff3_skip_rate, batch_count=54323.333333333336, ans=0.0 +2024-01-15 17:05:09,812 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.feed_forward3.hidden_balancer.prob, batch_count=54323.333333333336, ans=0.125 +2024-01-15 17:05:10,975 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_module2.balancer1.max_abs, batch_count=54323.333333333336, ans=10.0 +2024-01-15 17:05:27,042 INFO [train.py:994] (0/2) Epoch 20, batch 300, loss[loss=0.1797, simple_loss=0.2573, pruned_loss=0.05107, over 24510.00 frames. ], tot_loss[loss=0.1703, simple_loss=0.2465, pruned_loss=0.04701, over 3747382.14 frames. ], batch size: 193, lr: 1.98e-02, grad_scale: 16.0 +2024-01-15 17:05:30,868 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.4.out_combiner.scale_min, batch_count=54390.0, ans=0.2 +2024-01-15 17:05:38,418 WARNING [optim.py:484] (0/2) Clipping_scale=2.0, grad-norm quartiles 1.684e+02 2.137e+02 2.346e+02 2.815e+02 4.642e+02, threshold=4.692e+02, percent-clipped=0.0 +2024-01-15 17:05:53,381 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.self_attn_weights.pos_emb_skip_rate, batch_count=54456.666666666664, ans=0.0 +2024-01-15 17:06:29,552 INFO [train.py:994] (0/2) Epoch 20, batch 350, loss[loss=0.1862, simple_loss=0.2675, pruned_loss=0.05248, over 24239.00 frames. ], tot_loss[loss=0.1703, simple_loss=0.2468, pruned_loss=0.0469, over 3991060.97 frames. ], batch size: 311, lr: 1.98e-02, grad_scale: 8.0 +2024-01-15 17:07:05,975 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.2.encoder.layers.0.nonlin_attention.whiten2, num_groups=1, num_channels=384, metric=19.23 vs. limit=15.0 +2024-01-15 17:07:13,554 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.0.layers.0.balancer2.prob, batch_count=54656.666666666664, ans=0.125 +2024-01-15 17:07:32,047 INFO [train.py:994] (0/2) Epoch 20, batch 400, loss[loss=0.1547, simple_loss=0.2361, pruned_loss=0.03668, over 24216.00 frames. ], tot_loss[loss=0.1705, simple_loss=0.247, pruned_loss=0.04697, over 4171053.08 frames. ], batch size: 140, lr: 1.98e-02, grad_scale: 16.0 +2024-01-15 17:07:38,097 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.3.encoder.layers.1.nonlin_attention.whiten1, num_groups=1, num_channels=384, metric=5.08 vs. limit=10.0 +2024-01-15 17:07:45,033 WARNING [optim.py:484] (0/2) Clipping_scale=2.0, grad-norm quartiles 1.720e+02 1.981e+02 2.188e+02 2.566e+02 3.832e+02, threshold=4.376e+02, percent-clipped=0.0 +2024-01-15 17:07:53,496 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_skip_rate, batch_count=54756.666666666664, ans=0.0 +2024-01-15 17:07:57,486 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.2.encoder.layers.0.feed_forward1.out_whiten, num_groups=1, num_channels=384, metric=15.42 vs. limit=15.0 +2024-01-15 17:07:59,336 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.nonlin_attention.balancer.max_positive, batch_count=54790.0, ans=0.95 +2024-01-15 17:08:26,959 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.3.encoder.layers.2.conv_module2.whiten, num_groups=1, num_channels=512, metric=4.19 vs. limit=15.0 +2024-01-15 17:08:34,671 INFO [train.py:994] (0/2) Epoch 20, batch 450, loss[loss=0.1922, simple_loss=0.2692, pruned_loss=0.05766, over 22416.00 frames. ], tot_loss[loss=0.1701, simple_loss=0.2466, pruned_loss=0.04684, over 4312028.42 frames. ], batch size: 358, lr: 1.98e-02, grad_scale: 16.0 +2024-01-15 17:08:44,598 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_module2.balancer1.max_abs, batch_count=54890.0, ans=10.0 +2024-01-15 17:08:54,169 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.balancer2.prob, batch_count=54923.333333333336, ans=0.125 +2024-01-15 17:08:59,468 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=54956.666666666664, ans=0.1 +2024-01-15 17:09:05,917 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=54956.666666666664, ans=0.1 +2024-01-15 17:09:14,443 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.4.encoder.layers.2.nonlin_attention.whiten2, num_groups=1, num_channels=384, metric=3.14 vs. limit=15.0 +2024-01-15 17:09:21,840 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.feed_forward3.hidden_balancer.prob, batch_count=54990.0, ans=0.125 +2024-01-15 17:09:36,355 INFO [train.py:994] (0/2) Epoch 20, batch 500, loss[loss=0.169, simple_loss=0.2454, pruned_loss=0.04633, over 24530.00 frames. ], tot_loss[loss=0.17, simple_loss=0.2464, pruned_loss=0.04678, over 4417465.03 frames. ], batch size: 236, lr: 1.97e-02, grad_scale: 16.0 +2024-01-15 17:09:49,082 WARNING [optim.py:484] (0/2) Clipping_scale=2.0, grad-norm quartiles 1.745e+02 2.194e+02 2.583e+02 3.181e+02 4.072e+02, threshold=5.167e+02, percent-clipped=0.0 +2024-01-15 17:09:49,598 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.5.encoder.layers.1.whiten, num_groups=1, num_channels=256, metric=3.60 vs. limit=12.0 +2024-01-15 17:10:08,268 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.3.encoder.layers.1.self_attn_weights.whiten_keys, num_groups=8, num_channels=256, metric=5.34 vs. limit=6.0 +2024-01-15 17:10:37,936 INFO [train.py:994] (0/2) Epoch 20, batch 550, loss[loss=0.1739, simple_loss=0.2455, pruned_loss=0.0512, over 24392.00 frames. ], tot_loss[loss=0.1692, simple_loss=0.2455, pruned_loss=0.04647, over 4499727.14 frames. ], batch size: 159, lr: 1.97e-02, grad_scale: 16.0 +2024-01-15 17:11:31,649 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_module2.balancer1.prob, batch_count=55356.666666666664, ans=0.125 +2024-01-15 17:11:37,177 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_module2.balancer1.prob, batch_count=55356.666666666664, ans=0.125 +2024-01-15 17:11:41,597 INFO [train.py:994] (0/2) Epoch 20, batch 600, loss[loss=0.1706, simple_loss=0.2487, pruned_loss=0.04623, over 24537.00 frames. ], tot_loss[loss=0.1692, simple_loss=0.2457, pruned_loss=0.04639, over 4569967.96 frames. ], batch size: 193, lr: 1.97e-02, grad_scale: 16.0 +2024-01-15 17:11:49,607 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.0.layers.1.self_attn2.whiten, num_groups=1, num_channels=192, metric=16.69 vs. limit=22.5 +2024-01-15 17:11:53,430 WARNING [optim.py:484] (0/2) Clipping_scale=2.0, grad-norm quartiles 1.775e+02 2.054e+02 2.216e+02 2.564e+02 3.399e+02, threshold=4.432e+02, percent-clipped=0.0 +2024-01-15 17:12:00,293 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.4.out_combiner.scale_min, batch_count=55423.333333333336, ans=0.2 +2024-01-15 17:12:07,936 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=55456.666666666664, ans=0.1 +2024-01-15 17:12:35,245 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.self_attn_weights.pos_emb_skip_rate, batch_count=55523.333333333336, ans=0.0 +2024-01-15 17:12:43,229 INFO [train.py:994] (0/2) Epoch 20, batch 650, loss[loss=0.1757, simple_loss=0.2506, pruned_loss=0.05042, over 24363.00 frames. ], tot_loss[loss=0.1691, simple_loss=0.2457, pruned_loss=0.04627, over 4631479.86 frames. ], batch size: 153, lr: 1.97e-02, grad_scale: 16.0 +2024-01-15 17:12:45,664 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.attention_skip_rate, batch_count=55556.666666666664, ans=0.0 +2024-01-15 17:12:47,585 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=55556.666666666664, ans=0.1 +2024-01-15 17:12:50,101 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=55556.666666666664, ans=0.1 +2024-01-15 17:13:32,523 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_skip_rate, batch_count=55690.0, ans=0.0 +2024-01-15 17:13:38,327 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.0.layers.1.feed_forward1.out_proj.dropout_p, batch_count=55690.0, ans=0.1 +2024-01-15 17:13:43,731 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.0.layers.1.feed_forward1.out_whiten, num_groups=1, num_channels=192, metric=9.95 vs. limit=15.0 +2024-01-15 17:13:46,242 INFO [train.py:994] (0/2) Epoch 20, batch 700, loss[loss=0.1678, simple_loss=0.2461, pruned_loss=0.0448, over 24513.00 frames. ], tot_loss[loss=0.1694, simple_loss=0.246, pruned_loss=0.04635, over 4656144.31 frames. ], batch size: 210, lr: 1.96e-02, grad_scale: 16.0 +2024-01-15 17:13:58,089 WARNING [optim.py:484] (0/2) Clipping_scale=2.0, grad-norm quartiles 1.842e+02 2.109e+02 2.299e+02 2.928e+02 4.254e+02, threshold=4.598e+02, percent-clipped=0.0 +2024-01-15 17:14:20,623 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_module1.balancer2.prob, batch_count=55790.0, ans=0.125 +2024-01-15 17:14:31,881 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_module1.balancer2.min_positive, batch_count=55823.333333333336, ans=0.05 +2024-01-15 17:14:40,726 INFO [scaling.py:1118] (0/2) WithLoss: name=encoder.encoders.5.encoder.layers.0.self_attn_weights, loss-sum=0.000e+00 +2024-01-15 17:14:43,037 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.ff3_skip_rate, batch_count=55856.666666666664, ans=0.0 +2024-01-15 17:14:47,520 INFO [train.py:994] (0/2) Epoch 20, batch 750, loss[loss=0.1546, simple_loss=0.2373, pruned_loss=0.03594, over 24404.00 frames. ], tot_loss[loss=0.1695, simple_loss=0.246, pruned_loss=0.04648, over 4692062.80 frames. ], batch size: 258, lr: 1.96e-02, grad_scale: 16.0 +2024-01-15 17:14:47,770 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.feed_forward3.hidden_balancer.prob, batch_count=55890.0, ans=0.125 +2024-01-15 17:15:10,552 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.ff3_skip_rate, batch_count=55923.333333333336, ans=0.0 +2024-01-15 17:15:14,146 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.self_attn_weights.pos_emb_skip_rate, batch_count=55956.666666666664, ans=0.0 +2024-01-15 17:15:14,233 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.self_attn_weights.pos_emb_skip_rate, batch_count=55956.666666666664, ans=0.0 +2024-01-15 17:15:17,702 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.ff2_skip_rate, batch_count=55956.666666666664, ans=0.0 +2024-01-15 17:15:24,156 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=55990.0, ans=0.1 +2024-01-15 17:15:41,855 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.feed_forward1.out_proj.dropout_p, batch_count=56023.333333333336, ans=0.1 +2024-01-15 17:15:48,433 INFO [train.py:994] (0/2) Epoch 20, batch 800, loss[loss=0.1648, simple_loss=0.2425, pruned_loss=0.04355, over 24406.00 frames. ], tot_loss[loss=0.1691, simple_loss=0.2458, pruned_loss=0.04626, over 4724914.44 frames. ], batch size: 258, lr: 1.96e-02, grad_scale: 32.0 +2024-01-15 17:15:49,714 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.bypass.skip_rate, batch_count=56056.666666666664, ans=0.09899494936611666 +2024-01-15 17:15:59,789 WARNING [optim.py:484] (0/2) Clipping_scale=2.0, grad-norm quartiles 1.756e+02 2.072e+02 2.375e+02 2.936e+02 6.285e+02, threshold=4.751e+02, percent-clipped=2.0 +2024-01-15 17:16:08,593 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.feed_forward2.hidden_balancer.prob, batch_count=56090.0, ans=0.125 +2024-01-15 17:16:10,176 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.2.encoder.layers.0.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=5.52 vs. limit=6.0 +2024-01-15 17:16:19,421 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.feed_forward2.hidden_balancer.prob, batch_count=56123.333333333336, ans=0.125 +2024-01-15 17:16:31,162 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.2.encoder.layers.0.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=5.24 vs. limit=6.0 +2024-01-15 17:16:37,894 INFO [checkpoint.py:75] (0/2) Saving checkpoint to zipformer_bbpe/exp-context-size-2-lr-epochs-10-spec-aug-20-disable-musan/epoch-20.pt +2024-01-15 17:17:01,617 INFO [train.py:994] (0/2) Epoch 21, batch 0, loss[loss=0.1665, simple_loss=0.2316, pruned_loss=0.05071, over 23551.00 frames. ], tot_loss[loss=0.1665, simple_loss=0.2316, pruned_loss=0.05071, over 23551.00 frames. ], batch size: 119, lr: 1.92e-02, grad_scale: 32.0 +2024-01-15 17:17:01,618 INFO [train.py:1017] (0/2) Computing validation loss +2024-01-15 17:17:22,200 INFO [train.py:1026] (0/2) Epoch 21, validation: loss=0.1697, simple_loss=0.2547, pruned_loss=0.04237, over 1622729.00 frames. +2024-01-15 17:17:22,201 INFO [train.py:1027] (0/2) Maximum memory allocated so far is 15997MB +2024-01-15 17:17:35,958 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_module1.balancer2.min_abs, batch_count=56233.333333333336, ans=0.5 +2024-01-15 17:17:59,060 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.1.encoder.layers.1.feed_forward3.out_whiten, num_groups=1, num_channels=256, metric=13.57 vs. limit=15.0 +2024-01-15 17:17:59,627 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.0.layers.0.bypass.skip_rate, batch_count=56300.0, ans=0.035 +2024-01-15 17:18:05,556 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_module1.balancer1.prob, batch_count=56300.0, ans=0.125 +2024-01-15 17:18:17,658 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=56333.333333333336, ans=0.1 +2024-01-15 17:18:24,438 INFO [train.py:994] (0/2) Epoch 21, batch 50, loss[loss=0.1755, simple_loss=0.2472, pruned_loss=0.0519, over 24511.00 frames. ], tot_loss[loss=0.1679, simple_loss=0.244, pruned_loss=0.04591, over 1091300.86 frames. ], batch size: 229, lr: 1.92e-02, grad_scale: 32.0 +2024-01-15 17:18:29,832 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.4.encoder.layers.2.feed_forward2.out_whiten, num_groups=1, num_channels=384, metric=7.81 vs. limit=15.0 +2024-01-15 17:18:30,609 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder_embed.convnext.hidden_balancer.prob, batch_count=56366.666666666664, ans=0.125 +2024-01-15 17:18:34,795 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.balancer2.prob, batch_count=56366.666666666664, ans=0.125 +2024-01-15 17:18:38,754 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.3.encoder.layers.3.self_attn_weights.whiten_keys, num_groups=8, num_channels=256, metric=4.34 vs. limit=6.0 +2024-01-15 17:18:43,918 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.ff3_skip_rate, batch_count=56400.0, ans=0.0 +2024-01-15 17:18:46,977 WARNING [optim.py:484] (0/2) Clipping_scale=2.0, grad-norm quartiles 1.740e+02 1.978e+02 2.192e+02 2.494e+02 4.583e+02, threshold=4.384e+02, percent-clipped=0.0 +2024-01-15 17:18:56,728 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=56433.333333333336, ans=0.1 +2024-01-15 17:19:13,131 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.2.encoder.layers.0.nonlin_attention.whiten1, num_groups=1, num_channels=288, metric=7.04 vs. limit=10.0 +2024-01-15 17:19:27,060 INFO [train.py:994] (0/2) Epoch 21, batch 100, loss[loss=0.1664, simple_loss=0.2422, pruned_loss=0.0453, over 24216.00 frames. ], tot_loss[loss=0.1682, simple_loss=0.2449, pruned_loss=0.04581, over 1919420.69 frames. ], batch size: 140, lr: 1.91e-02, grad_scale: 32.0 +2024-01-15 17:20:07,635 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.nonlin_attention.balancer.prob, batch_count=56633.333333333336, ans=0.125 +2024-01-15 17:20:13,548 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.feed_forward1.hidden_balancer.prob, batch_count=56633.333333333336, ans=0.125 +2024-01-15 17:20:16,936 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_module2.balancer2.prob, batch_count=56666.666666666664, ans=0.125 +2024-01-15 17:20:24,441 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.3.encoder.layers.2.feed_forward2.out_whiten, num_groups=1, num_channels=512, metric=12.46 vs. limit=15.0 +2024-01-15 17:20:29,199 INFO [train.py:994] (0/2) Epoch 21, batch 150, loss[loss=0.1738, simple_loss=0.2517, pruned_loss=0.04794, over 23825.00 frames. ], tot_loss[loss=0.1684, simple_loss=0.2456, pruned_loss=0.04554, over 2572493.54 frames. ], batch size: 328, lr: 1.91e-02, grad_scale: 32.0 +2024-01-15 17:20:29,504 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=56700.0, ans=0.1 +2024-01-15 17:20:40,479 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.2.encoder.layers.1.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=5.36 vs. limit=6.0 +2024-01-15 17:20:50,852 WARNING [optim.py:484] (0/2) Clipping_scale=2.0, grad-norm quartiles 1.737e+02 1.990e+02 2.168e+02 2.546e+02 3.705e+02, threshold=4.335e+02, percent-clipped=0.0 +2024-01-15 17:20:55,331 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=56766.666666666664, ans=0.1 +2024-01-15 17:20:55,375 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.bypass_mid.scale_min, batch_count=56766.666666666664, ans=0.2 +2024-01-15 17:21:04,638 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_module2.balancer1.prob, batch_count=56800.0, ans=0.125 +2024-01-15 17:21:13,546 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.bypass_mid.scale_min, batch_count=56800.0, ans=0.2 +2024-01-15 17:21:18,799 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.0.layers.1.bypass.scale_min, batch_count=56833.333333333336, ans=0.2 +2024-01-15 17:21:21,096 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.0.layers.1.nonlin_attention.balancer.prob, batch_count=56833.333333333336, ans=0.125 +2024-01-15 17:21:24,761 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.bypass_mid.scale_min, batch_count=56833.333333333336, ans=0.2 +2024-01-15 17:21:30,253 INFO [train.py:994] (0/2) Epoch 21, batch 200, loss[loss=0.1817, simple_loss=0.2562, pruned_loss=0.05356, over 24448.00 frames. ], tot_loss[loss=0.1681, simple_loss=0.2451, pruned_loss=0.04552, over 3062983.03 frames. ], batch size: 181, lr: 1.91e-02, grad_scale: 32.0 +2024-01-15 17:21:32,331 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.feed_forward1.hidden_balancer.prob, batch_count=56866.666666666664, ans=0.125 +2024-01-15 17:21:32,885 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.1.encoder.layers.1.feed_forward2.out_whiten, num_groups=1, num_channels=256, metric=7.56 vs. limit=15.0 +2024-01-15 17:21:52,984 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.balancer2.prob, batch_count=56900.0, ans=0.125 +2024-01-15 17:21:58,334 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_module2.balancer2.min_positive, batch_count=56933.333333333336, ans=0.05 +2024-01-15 17:22:05,234 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.feed_forward3.hidden_balancer.prob, batch_count=56933.333333333336, ans=0.125 +2024-01-15 17:22:29,228 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_module1.balancer2.min_positive, batch_count=57000.0, ans=0.05 +2024-01-15 17:22:30,349 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=57000.0, ans=0.1 +2024-01-15 17:22:32,340 INFO [train.py:994] (0/2) Epoch 21, batch 250, loss[loss=0.1778, simple_loss=0.2543, pruned_loss=0.05068, over 24576.00 frames. ], tot_loss[loss=0.1678, simple_loss=0.2451, pruned_loss=0.04521, over 3445229.50 frames. ], batch size: 176, lr: 1.91e-02, grad_scale: 32.0 +2024-01-15 17:22:41,013 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.0.layers.1.nonlin_attention.whiten1, num_groups=1, num_channels=144, metric=6.12 vs. limit=10.0 +2024-01-15 17:22:42,765 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_module1.balancer1.prob, batch_count=57033.333333333336, ans=0.125 +2024-01-15 17:22:49,546 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=57066.666666666664, ans=0.1 +2024-01-15 17:22:55,033 WARNING [optim.py:484] (0/2) Clipping_scale=2.0, grad-norm quartiles 1.694e+02 2.081e+02 2.417e+02 3.180e+02 5.430e+02, threshold=4.834e+02, percent-clipped=4.0 +2024-01-15 17:22:58,819 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_module2.balancer1.prob, batch_count=57100.0, ans=0.125 +2024-01-15 17:23:34,517 INFO [train.py:994] (0/2) Epoch 21, batch 300, loss[loss=0.1795, simple_loss=0.2621, pruned_loss=0.04845, over 23970.00 frames. ], tot_loss[loss=0.1681, simple_loss=0.2453, pruned_loss=0.04541, over 3745593.81 frames. ], batch size: 328, lr: 1.90e-02, grad_scale: 32.0 +2024-01-15 17:23:58,720 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.ff3_skip_rate, batch_count=57266.666666666664, ans=0.0 +2024-01-15 17:23:59,105 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.2.encoder.layers.0.conv_module1.whiten, num_groups=1, num_channels=384, metric=5.56 vs. limit=15.0 +2024-01-15 17:24:08,845 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_module2.balancer1.prob, batch_count=57266.666666666664, ans=0.125 +2024-01-15 17:24:13,495 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.0.layers.0.ff3_skip_rate, batch_count=57300.0, ans=0.0 +2024-01-15 17:24:37,383 INFO [train.py:994] (0/2) Epoch 21, batch 350, loss[loss=0.1709, simple_loss=0.2499, pruned_loss=0.04592, over 24496.00 frames. ], tot_loss[loss=0.1679, simple_loss=0.2451, pruned_loss=0.04533, over 3989883.62 frames. ], batch size: 210, lr: 1.90e-02, grad_scale: 32.0 +2024-01-15 17:24:37,704 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_module1.balancer2.prob, batch_count=57366.666666666664, ans=0.125 +2024-01-15 17:24:47,762 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_skip_rate, batch_count=57366.666666666664, ans=0.0 +2024-01-15 17:24:59,874 WARNING [optim.py:484] (0/2) Clipping_scale=2.0, grad-norm quartiles 1.672e+02 2.143e+02 2.321e+02 2.694e+02 4.146e+02, threshold=4.643e+02, percent-clipped=0.0 +2024-01-15 17:25:28,933 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.bypass.skip_rate, batch_count=57500.0, ans=0.04949747468305833 +2024-01-15 17:25:40,058 INFO [train.py:994] (0/2) Epoch 21, batch 400, loss[loss=0.1749, simple_loss=0.2546, pruned_loss=0.04762, over 24495.00 frames. ], tot_loss[loss=0.1683, simple_loss=0.2455, pruned_loss=0.04552, over 4171869.64 frames. ], batch size: 187, lr: 1.90e-02, grad_scale: 32.0 +2024-01-15 17:25:41,514 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.bypass.scale_min, batch_count=57533.333333333336, ans=0.2 +2024-01-15 17:25:41,728 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.5.encoder.layers.1.whiten, num_groups=1, num_channels=256, metric=5.05 vs. limit=12.0 +2024-01-15 17:25:46,295 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.bypass.skip_rate, batch_count=57533.333333333336, ans=0.09899494936611666 +2024-01-15 17:25:48,680 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_skip_rate, batch_count=57533.333333333336, ans=0.0 +2024-01-15 17:25:51,011 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.ff3_skip_rate, batch_count=57566.666666666664, ans=0.0 +2024-01-15 17:25:56,406 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.0.layers.1.balancer1.prob, batch_count=57566.666666666664, ans=0.125 +2024-01-15 17:26:11,005 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.ff2_skip_rate, batch_count=57600.0, ans=0.0 +2024-01-15 17:26:30,131 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.bypass.scale_min, batch_count=57666.666666666664, ans=0.2 +2024-01-15 17:26:32,826 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.3.encoder.layers.2.self_attn1.whiten, num_groups=1, num_channels=512, metric=21.87 vs. limit=22.5 +2024-01-15 17:26:37,629 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.2.encoder.layers.0.feed_forward3.out_whiten, num_groups=1, num_channels=384, metric=12.45 vs. limit=15.0 +2024-01-15 17:26:41,751 INFO [train.py:994] (0/2) Epoch 21, batch 450, loss[loss=0.1694, simple_loss=0.2463, pruned_loss=0.04622, over 24608.00 frames. ], tot_loss[loss=0.1681, simple_loss=0.2452, pruned_loss=0.04548, over 4312903.76 frames. ], batch size: 199, lr: 1.90e-02, grad_scale: 32.0 +2024-01-15 17:26:45,309 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=57700.0, ans=0.1 +2024-01-15 17:27:04,931 WARNING [optim.py:484] (0/2) Clipping_scale=2.0, grad-norm quartiles 1.716e+02 2.075e+02 2.378e+02 2.796e+02 3.944e+02, threshold=4.755e+02, percent-clipped=0.0 +2024-01-15 17:27:07,692 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_module1.balancer1.min_positive, batch_count=57766.666666666664, ans=0.025 +2024-01-15 17:27:40,742 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder_embed.conv.2.prob, batch_count=57833.333333333336, ans=0.125 +2024-01-15 17:27:45,378 INFO [train.py:994] (0/2) Epoch 21, batch 500, loss[loss=0.1756, simple_loss=0.2501, pruned_loss=0.05051, over 24476.00 frames. ], tot_loss[loss=0.1677, simple_loss=0.2448, pruned_loss=0.04528, over 4427818.19 frames. ], batch size: 181, lr: 1.90e-02, grad_scale: 32.0 +2024-01-15 17:27:51,048 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.feed_forward3.hidden_balancer.prob, batch_count=57866.666666666664, ans=0.125 +2024-01-15 17:27:52,570 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.3.encoder.layers.1.nonlin_attention.whiten1, num_groups=1, num_channels=384, metric=5.57 vs. limit=10.0 +2024-01-15 17:27:57,095 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.5.encoder.layers.1.whiten, num_groups=1, num_channels=256, metric=5.15 vs. limit=12.0 +2024-01-15 17:27:58,176 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.feed_forward2.hidden_balancer.prob, batch_count=57900.0, ans=0.125 +2024-01-15 17:28:13,373 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_skip_rate, batch_count=57933.333333333336, ans=0.0 +2024-01-15 17:28:17,611 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.bypass.scale_min, batch_count=57933.333333333336, ans=0.2 +2024-01-15 17:28:18,770 INFO [scaling.py:1118] (0/2) WithLoss: name=encoder.encoders.4.encoder.layers.2.self_attn_weights, loss-sum=0.000e+00 +2024-01-15 17:28:34,373 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.attention_skip_rate, batch_count=57966.666666666664, ans=0.0 +2024-01-15 17:28:49,553 INFO [train.py:994] (0/2) Epoch 21, batch 550, loss[loss=0.1319, simple_loss=0.1906, pruned_loss=0.03663, over 19395.00 frames. ], tot_loss[loss=0.1674, simple_loss=0.2444, pruned_loss=0.04518, over 4501024.46 frames. ], batch size: 83, lr: 1.89e-02, grad_scale: 32.0 +2024-01-15 17:29:06,375 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.attention_skip_rate, batch_count=58066.666666666664, ans=0.0 +2024-01-15 17:29:12,697 WARNING [optim.py:484] (0/2) Clipping_scale=2.0, grad-norm quartiles 1.721e+02 2.030e+02 2.271e+02 2.828e+02 4.259e+02, threshold=4.542e+02, percent-clipped=0.0 +2024-01-15 17:29:14,269 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.feed_forward3.hidden_balancer.prob, batch_count=58100.0, ans=0.125 +2024-01-15 17:29:22,167 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.feed_forward1.hidden_balancer.prob, batch_count=58100.0, ans=0.125 +2024-01-15 17:29:48,079 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder_embed.convnext.out_balancer.prob, batch_count=58166.666666666664, ans=0.125 +2024-01-15 17:29:52,725 INFO [train.py:994] (0/2) Epoch 21, batch 600, loss[loss=0.1726, simple_loss=0.2504, pruned_loss=0.04739, over 24477.00 frames. ], tot_loss[loss=0.1672, simple_loss=0.2443, pruned_loss=0.0451, over 4576353.18 frames. ], batch size: 267, lr: 1.89e-02, grad_scale: 32.0 +2024-01-15 17:29:55,437 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_skip_rate, batch_count=58200.0, ans=0.0 +2024-01-15 17:30:55,838 INFO [train.py:994] (0/2) Epoch 21, batch 650, loss[loss=0.1504, simple_loss=0.2171, pruned_loss=0.04182, over 23584.00 frames. ], tot_loss[loss=0.1666, simple_loss=0.2435, pruned_loss=0.04486, over 4608522.34 frames. ], batch size: 119, lr: 1.89e-02, grad_scale: 32.0 +2024-01-15 17:31:15,772 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_skip_rate, batch_count=58400.0, ans=0.0 +2024-01-15 17:31:15,860 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=58400.0, ans=0.1 +2024-01-15 17:31:17,899 WARNING [optim.py:484] (0/2) Clipping_scale=2.0, grad-norm quartiles 1.703e+02 1.977e+02 2.187e+02 2.482e+02 3.523e+02, threshold=4.375e+02, percent-clipped=0.0 +2024-01-15 17:31:29,872 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.feed_forward2.hidden_balancer.prob, batch_count=58433.333333333336, ans=0.125 +2024-01-15 17:31:31,022 INFO [scaling.py:1118] (0/2) WithLoss: name=encoder.encoders.0.layers.0.self_attn_weights, loss-sum=0.000e+00 +2024-01-15 17:31:51,116 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_module2.balancer2.prob, batch_count=58500.0, ans=0.125 +2024-01-15 17:31:52,447 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_module1.balancer2.prob, batch_count=58500.0, ans=0.125 +2024-01-15 17:31:53,576 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.ff2_skip_rate, batch_count=58500.0, ans=0.0 +2024-01-15 17:31:58,765 INFO [train.py:994] (0/2) Epoch 21, batch 700, loss[loss=0.1691, simple_loss=0.2449, pruned_loss=0.04667, over 24464.00 frames. ], tot_loss[loss=0.1668, simple_loss=0.2439, pruned_loss=0.04486, over 4656840.43 frames. ], batch size: 181, lr: 1.89e-02, grad_scale: 32.0 +2024-01-15 17:32:00,359 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.feed_forward2.hidden_balancer.prob, batch_count=58533.333333333336, ans=0.125 +2024-01-15 17:32:07,678 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_module2.balancer2.min_positive, batch_count=58533.333333333336, ans=0.05 +2024-01-15 17:32:09,054 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.4.encoder.layers.0.whiten, num_groups=1, num_channels=384, metric=3.29 vs. limit=12.0 +2024-01-15 17:32:13,063 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.bypass.scale_min, batch_count=58566.666666666664, ans=0.2 +2024-01-15 17:32:24,349 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.feed_forward3.hidden_balancer.prob, batch_count=58600.0, ans=0.125 +2024-01-15 17:32:26,656 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.balancer1.prob, batch_count=58600.0, ans=0.125 +2024-01-15 17:33:00,267 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.4.encoder.layers.2.self_attn2.whiten, num_groups=1, num_channels=384, metric=13.55 vs. limit=22.5 +2024-01-15 17:33:00,895 INFO [train.py:994] (0/2) Epoch 21, batch 750, loss[loss=0.1712, simple_loss=0.2519, pruned_loss=0.04526, over 24480.00 frames. ], tot_loss[loss=0.1665, simple_loss=0.2436, pruned_loss=0.04473, over 4700939.45 frames. ], batch size: 267, lr: 1.88e-02, grad_scale: 32.0 +2024-01-15 17:33:04,210 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.1.encoder.layers.1.self_attn2.whiten, num_groups=1, num_channels=256, metric=16.52 vs. limit=22.5 +2024-01-15 17:33:11,329 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=58700.0, ans=0.1 +2024-01-15 17:33:19,454 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.5.encoder.layers.0.whiten, num_groups=1, num_channels=256, metric=3.02 vs. limit=12.0 +2024-01-15 17:33:22,778 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.ff2_skip_rate, batch_count=58733.333333333336, ans=0.0 +2024-01-15 17:33:23,632 WARNING [optim.py:484] (0/2) Clipping_scale=2.0, grad-norm quartiles 1.736e+02 2.081e+02 2.558e+02 3.319e+02 5.261e+02, threshold=5.116e+02, percent-clipped=3.0 +2024-01-15 17:33:26,166 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_module2.balancer1.prob, batch_count=58766.666666666664, ans=0.125 +2024-01-15 17:33:43,656 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.3.encoder.layers.3.self_attn_weights.whiten_keys, num_groups=8, num_channels=256, metric=4.09 vs. limit=6.0 +2024-01-15 17:33:47,760 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_module2.balancer2.prob, batch_count=58800.0, ans=0.125 +2024-01-15 17:34:01,123 INFO [train.py:994] (0/2) Epoch 21, batch 800, loss[loss=0.1636, simple_loss=0.2418, pruned_loss=0.04277, over 24615.00 frames. ], tot_loss[loss=0.1659, simple_loss=0.243, pruned_loss=0.04439, over 4731834.98 frames. ], batch size: 199, lr: 1.88e-02, grad_scale: 32.0 +2024-01-15 17:34:02,440 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.bypass.skip_rate, batch_count=58866.666666666664, ans=0.09899494936611666 +2024-01-15 17:34:11,717 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.3.encoder.layers.2.whiten, num_groups=1, num_channels=512, metric=3.73 vs. limit=12.0 +2024-01-15 17:34:20,553 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.0.layers.0.self_attn_weights.pos_emb_skip_rate, batch_count=58900.0, ans=0.0 +2024-01-15 17:34:20,643 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.bypass.skip_rate, batch_count=58900.0, ans=0.07 +2024-01-15 17:34:49,956 INFO [checkpoint.py:75] (0/2) Saving checkpoint to zipformer_bbpe/exp-context-size-2-lr-epochs-10-spec-aug-20-disable-musan/epoch-21.pt +2024-01-15 17:35:14,660 INFO [train.py:994] (0/2) Epoch 22, batch 0, loss[loss=0.1598, simple_loss=0.2394, pruned_loss=0.04014, over 24379.00 frames. ], tot_loss[loss=0.1598, simple_loss=0.2394, pruned_loss=0.04014, over 24379.00 frames. ], batch size: 275, lr: 1.84e-02, grad_scale: 32.0 +2024-01-15 17:35:14,661 INFO [train.py:1017] (0/2) Computing validation loss +2024-01-15 17:35:29,279 INFO [zipformer.py:1858] (0/2) name=encoder.encoders.3.encoder.layers.3.self_attn_weights, attn_weights_entropy = tensor([1.9328, 1.6469, 2.7071, 2.6990, 2.4791, 2.6593, 2.5854, 2.7017], + device='cuda:0') +2024-01-15 17:35:35,900 INFO [train.py:1026] (0/2) Epoch 22, validation: loss=0.1695, simple_loss=0.2543, pruned_loss=0.04238, over 1622729.00 frames. +2024-01-15 17:35:35,901 INFO [train.py:1027] (0/2) Maximum memory allocated so far is 15997MB +2024-01-15 17:36:01,316 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.bypass_mid.scale_min, batch_count=59076.666666666664, ans=0.2 +2024-01-15 17:36:07,383 WARNING [optim.py:484] (0/2) Clipping_scale=2.0, grad-norm quartiles 1.634e+02 2.084e+02 2.248e+02 2.659e+02 3.763e+02, threshold=4.496e+02, percent-clipped=0.0 +2024-01-15 17:36:21,757 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_module2.balancer1.prob, batch_count=59110.0, ans=0.125 +2024-01-15 17:36:29,470 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=59143.333333333336, ans=0.1 +2024-01-15 17:36:35,113 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.3.encoder.layers.0.conv_module1.whiten, num_groups=1, num_channels=512, metric=2.27 vs. limit=15.0 +2024-01-15 17:36:37,923 INFO [train.py:994] (0/2) Epoch 22, batch 50, loss[loss=0.1687, simple_loss=0.246, pruned_loss=0.04569, over 24363.00 frames. ], tot_loss[loss=0.1647, simple_loss=0.242, pruned_loss=0.04366, over 1087376.73 frames. ], batch size: 298, lr: 1.84e-02, grad_scale: 32.0 +2024-01-15 17:36:42,181 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.3.encoder.layers.3.nonlin_attention.whiten2, num_groups=1, num_channels=512, metric=4.68 vs. limit=15.0 +2024-01-15 17:36:46,533 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.bypass.skip_rate, batch_count=59176.666666666664, ans=0.04949747468305833 +2024-01-15 17:37:07,784 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_skip_rate, batch_count=59243.333333333336, ans=0.0 +2024-01-15 17:37:20,642 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.2.encoder.layers.0.self_attn2.whiten, num_groups=1, num_channels=384, metric=22.72 vs. limit=22.5 +2024-01-15 17:37:39,660 INFO [train.py:994] (0/2) Epoch 22, batch 100, loss[loss=0.1702, simple_loss=0.2438, pruned_loss=0.04826, over 24414.00 frames. ], tot_loss[loss=0.1633, simple_loss=0.2405, pruned_loss=0.04308, over 1907410.28 frames. ], batch size: 258, lr: 1.84e-02, grad_scale: 32.0 +2024-01-15 17:37:44,359 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.4.encoder.layers.1.feed_forward2.out_whiten, num_groups=1, num_channels=384, metric=11.89 vs. limit=15.0 +2024-01-15 17:37:53,069 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.4.encoder.layers.1.feed_forward3.out_whiten, num_groups=1, num_channels=384, metric=8.37 vs. limit=15.0 +2024-01-15 17:37:53,932 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.0.layers.1.feed_forward1.hidden_balancer.prob, batch_count=59376.666666666664, ans=0.125 +2024-01-15 17:37:53,979 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.ff3_skip_rate, batch_count=59376.666666666664, ans=0.0 +2024-01-15 17:37:57,473 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.feed_forward2.hidden_balancer.prob, batch_count=59376.666666666664, ans=0.125 +2024-01-15 17:38:04,714 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.balancer_na.min_abs, batch_count=59410.0, ans=0.02 +2024-01-15 17:38:07,070 INFO [scaling.py:1118] (0/2) WithLoss: name=encoder.encoders.3.encoder.layers.1.self_attn_weights, loss-sum=0.000e+00 +2024-01-15 17:38:10,825 WARNING [optim.py:484] (0/2) Clipping_scale=2.0, grad-norm quartiles 1.619e+02 1.958e+02 2.230e+02 2.471e+02 4.355e+02, threshold=4.460e+02, percent-clipped=0.0 +2024-01-15 17:38:11,174 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_module1.balancer1.max_abs, batch_count=59410.0, ans=10.0 +2024-01-15 17:38:31,879 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.attention_skip_rate, batch_count=59476.666666666664, ans=0.0 +2024-01-15 17:38:38,786 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_module2.balancer1.prob, batch_count=59476.666666666664, ans=0.125 +2024-01-15 17:38:38,839 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_skip_rate, batch_count=59476.666666666664, ans=0.0 +2024-01-15 17:38:42,730 INFO [train.py:994] (0/2) Epoch 22, batch 150, loss[loss=0.174, simple_loss=0.2552, pruned_loss=0.04639, over 22422.00 frames. ], tot_loss[loss=0.1643, simple_loss=0.2417, pruned_loss=0.0434, over 2556044.36 frames. ], batch size: 357, lr: 1.84e-02, grad_scale: 32.0 +2024-01-15 17:38:50,117 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.feed_forward2.hidden_balancer.prob, batch_count=59510.0, ans=0.125 +2024-01-15 17:39:13,768 INFO [scaling.py:1118] (0/2) WithLoss: name=encoder.encoders.2.encoder.layers.2.self_attn_weights, loss-sum=0.000e+00 +2024-01-15 17:39:17,357 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.balancer1.prob, batch_count=59576.666666666664, ans=0.125 +2024-01-15 17:39:19,947 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.4.encoder.layers.2.self_attn2.whiten, num_groups=1, num_channels=384, metric=13.66 vs. limit=22.5 +2024-01-15 17:39:39,802 INFO [scaling.py:1118] (0/2) WithLoss: name=encoder.encoders.2.encoder.layers.1.self_attn_weights, loss-sum=0.000e+00 +2024-01-15 17:39:42,195 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.bypass.scale_min, batch_count=59643.333333333336, ans=0.2 +2024-01-15 17:39:44,218 INFO [train.py:994] (0/2) Epoch 22, batch 200, loss[loss=0.1659, simple_loss=0.2474, pruned_loss=0.04219, over 24470.00 frames. ], tot_loss[loss=0.1645, simple_loss=0.2423, pruned_loss=0.04341, over 3061896.96 frames. ], batch size: 216, lr: 1.83e-02, grad_scale: 32.0 +2024-01-15 17:39:46,934 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.ff2_skip_rate, batch_count=59676.666666666664, ans=0.0 +2024-01-15 17:40:11,896 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_skip_rate, batch_count=59743.333333333336, ans=0.0 +2024-01-15 17:40:14,198 INFO [scaling.py:1118] (0/2) WithLoss: name=encoder.encoders.2.encoder.layers.2.self_attn_weights, loss-sum=0.000e+00 +2024-01-15 17:40:15,061 WARNING [optim.py:484] (0/2) Clipping_scale=2.0, grad-norm quartiles 1.716e+02 2.149e+02 2.370e+02 2.795e+02 5.088e+02, threshold=4.739e+02, percent-clipped=1.0 +2024-01-15 17:40:18,325 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.balancer1.prob, batch_count=59743.333333333336, ans=0.125 +2024-01-15 17:40:31,245 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.0.layers.1.whiten, num_groups=1, num_channels=192, metric=3.85 vs. limit=12.0 +2024-01-15 17:40:46,857 INFO [train.py:994] (0/2) Epoch 22, batch 250, loss[loss=0.1803, simple_loss=0.2512, pruned_loss=0.0547, over 24530.00 frames. ], tot_loss[loss=0.1641, simple_loss=0.2415, pruned_loss=0.04331, over 3440376.25 frames. ], batch size: 187, lr: 1.83e-02, grad_scale: 32.0 +2024-01-15 17:41:00,481 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.5.encoder.layers.1.self_attn1.whiten, num_groups=1, num_channels=256, metric=14.84 vs. limit=22.5 +2024-01-15 17:41:05,060 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.bypass.skip_rate, batch_count=59876.666666666664, ans=0.04949747468305833 +2024-01-15 17:41:25,240 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.ff2_skip_rate, batch_count=59943.333333333336, ans=0.0 +2024-01-15 17:41:45,585 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.self_attn_weights.pos_emb_skip_rate, batch_count=59976.666666666664, ans=0.0 +2024-01-15 17:41:48,911 INFO [train.py:994] (0/2) Epoch 22, batch 300, loss[loss=0.1577, simple_loss=0.2308, pruned_loss=0.04224, over 23973.00 frames. ], tot_loss[loss=0.1648, simple_loss=0.2424, pruned_loss=0.04354, over 3745982.99 frames. ], batch size: 131, lr: 1.83e-02, grad_scale: 32.0 +2024-01-15 17:41:53,079 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.2.encoder.layers.2.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=6.61 vs. limit=6.0 +2024-01-15 17:42:03,632 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.0.layers.1.ff2_skip_rate, batch_count=60043.333333333336, ans=0.0 +2024-01-15 17:42:09,584 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.feed_forward2.hidden_balancer.prob, batch_count=60043.333333333336, ans=0.125 +2024-01-15 17:42:11,849 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.feed_forward2.hidden_balancer.prob, batch_count=60043.333333333336, ans=0.125 +2024-01-15 17:42:11,919 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=60043.333333333336, ans=0.1 +2024-01-15 17:42:19,706 WARNING [optim.py:484] (0/2) Clipping_scale=2.0, grad-norm quartiles 1.740e+02 2.217e+02 2.648e+02 3.149e+02 4.235e+02, threshold=5.295e+02, percent-clipped=0.0 +2024-01-15 17:42:50,681 INFO [train.py:994] (0/2) Epoch 22, batch 350, loss[loss=0.1743, simple_loss=0.2546, pruned_loss=0.04698, over 24529.00 frames. ], tot_loss[loss=0.1648, simple_loss=0.2422, pruned_loss=0.04371, over 3963204.97 frames. ], batch size: 204, lr: 1.83e-02, grad_scale: 32.0 +2024-01-15 17:42:53,890 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=60176.666666666664, ans=0.1 +2024-01-15 17:43:05,127 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=60210.0, ans=0.1 +2024-01-15 17:43:18,860 INFO [scaling.py:1118] (0/2) WithLoss: name=encoder.encoders.2.encoder.layers.1.self_attn_weights, loss-sum=0.000e+00 +2024-01-15 17:43:23,232 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.ff2_skip_rate, batch_count=60243.333333333336, ans=0.0 +2024-01-15 17:43:29,074 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.attention_skip_rate, batch_count=60276.666666666664, ans=0.0 +2024-01-15 17:43:38,349 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=60276.666666666664, ans=0.1 +2024-01-15 17:43:47,104 INFO [scaling.py:1118] (0/2) WithLoss: name=encoder.encoders.1.encoder.layers.1.self_attn_weights, loss-sum=0.000e+00 +2024-01-15 17:43:53,305 INFO [train.py:994] (0/2) Epoch 22, batch 400, loss[loss=0.1483, simple_loss=0.2065, pruned_loss=0.04502, over 18182.00 frames. ], tot_loss[loss=0.1645, simple_loss=0.2418, pruned_loss=0.04357, over 4148549.22 frames. ], batch size: 77, lr: 1.83e-02, grad_scale: 32.0 +2024-01-15 17:44:08,789 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.2.encoder.layers.2.conv_module2.whiten, num_groups=1, num_channels=384, metric=3.28 vs. limit=15.0 +2024-01-15 17:44:10,762 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.feed_forward3.hidden_balancer.prob, batch_count=60376.666666666664, ans=0.125 +2024-01-15 17:44:24,057 WARNING [optim.py:484] (0/2) Clipping_scale=2.0, grad-norm quartiles 1.630e+02 2.042e+02 2.258e+02 2.641e+02 4.188e+02, threshold=4.517e+02, percent-clipped=0.0 +2024-01-15 17:44:30,910 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.balancer2.prob, batch_count=60443.333333333336, ans=0.125 +2024-01-15 17:44:41,051 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.bypass_mid.scale_min, batch_count=60443.333333333336, ans=0.2 +2024-01-15 17:44:51,625 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.balancer1.prob, batch_count=60476.666666666664, ans=0.125 +2024-01-15 17:44:54,835 INFO [train.py:994] (0/2) Epoch 22, batch 450, loss[loss=0.146, simple_loss=0.2242, pruned_loss=0.0339, over 24178.00 frames. ], tot_loss[loss=0.1637, simple_loss=0.2409, pruned_loss=0.04328, over 4281790.04 frames. ], batch size: 140, lr: 1.82e-02, grad_scale: 32.0 +2024-01-15 17:44:55,136 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.bypass_mid.scale_min, batch_count=60510.0, ans=0.2 +2024-01-15 17:45:10,339 INFO [scaling.py:1118] (0/2) WithLoss: name=encoder.encoders.0.layers.1.self_attn_weights, loss-sum=0.000e+00 +2024-01-15 17:45:19,710 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_module2.balancer2.min_abs, batch_count=60576.666666666664, ans=0.5 +2024-01-15 17:45:28,585 INFO [scaling.py:1118] (0/2) WithLoss: name=encoder.encoders.3.encoder.layers.0.self_attn_weights, loss-sum=0.000e+00 +2024-01-15 17:45:38,878 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.5.encoder.layers.0.nonlin_attention.whiten2, num_groups=1, num_channels=256, metric=3.76 vs. limit=15.0 +2024-01-15 17:45:52,613 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.2.out_combiner.scale_min, batch_count=60643.333333333336, ans=0.2 +2024-01-15 17:45:56,877 INFO [train.py:994] (0/2) Epoch 22, batch 500, loss[loss=0.1281, simple_loss=0.2052, pruned_loss=0.02553, over 23689.00 frames. ], tot_loss[loss=0.1639, simple_loss=0.2411, pruned_loss=0.04336, over 4411064.68 frames. ], batch size: 119, lr: 1.82e-02, grad_scale: 32.0 +2024-01-15 17:45:57,547 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.3.encoder.layers.0.nonlin_attention.whiten2, num_groups=1, num_channels=512, metric=4.92 vs. limit=15.0 +2024-01-15 17:46:08,390 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.attention_skip_rate, batch_count=60710.0, ans=0.0 +2024-01-15 17:46:14,742 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.2.encoder.layers.2.nonlin_attention.whiten1, num_groups=1, num_channels=288, metric=8.48 vs. limit=10.0 +2024-01-15 17:46:28,279 WARNING [optim.py:484] (0/2) Clipping_scale=2.0, grad-norm quartiles 1.658e+02 2.049e+02 2.276e+02 2.528e+02 4.336e+02, threshold=4.552e+02, percent-clipped=0.0 +2024-01-15 17:46:36,272 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.0.layers.1.whiten, num_groups=1, num_channels=192, metric=3.79 vs. limit=12.0 +2024-01-15 17:46:41,216 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.self_attn_weights.pos_emb_skip_rate, batch_count=60776.666666666664, ans=0.0 +2024-01-15 17:46:54,657 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.balancer2.prob, batch_count=60810.0, ans=0.125 +2024-01-15 17:46:59,013 INFO [train.py:994] (0/2) Epoch 22, batch 550, loss[loss=0.193, simple_loss=0.2717, pruned_loss=0.05715, over 22477.00 frames. ], tot_loss[loss=0.1637, simple_loss=0.2411, pruned_loss=0.04319, over 4506192.95 frames. ], batch size: 357, lr: 1.82e-02, grad_scale: 32.0 +2024-01-15 17:47:31,410 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_skip_rate, batch_count=60910.0, ans=0.0 +2024-01-15 17:48:01,715 INFO [train.py:994] (0/2) Epoch 22, batch 600, loss[loss=0.1597, simple_loss=0.2409, pruned_loss=0.03922, over 24209.00 frames. ], tot_loss[loss=0.1633, simple_loss=0.2408, pruned_loss=0.04289, over 4556753.46 frames. ], batch size: 140, lr: 1.82e-02, grad_scale: 16.0 +2024-01-15 17:48:06,838 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.self_attn_weights.pos_emb_skip_rate, batch_count=61010.0, ans=0.0 +2024-01-15 17:48:08,098 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.bypass.skip_rate, batch_count=61010.0, ans=0.09899494936611666 +2024-01-15 17:48:33,920 WARNING [optim.py:484] (0/2) Clipping_scale=2.0, grad-norm quartiles 1.577e+02 2.035e+02 2.386e+02 2.905e+02 4.646e+02, threshold=4.772e+02, percent-clipped=1.0 +2024-01-15 17:48:55,715 INFO [scaling.py:1118] (0/2) WithLoss: name=encoder.encoders.3.encoder.layers.0.self_attn_weights, loss-sum=0.000e+00 +2024-01-15 17:49:04,292 INFO [train.py:994] (0/2) Epoch 22, batch 650, loss[loss=0.1636, simple_loss=0.247, pruned_loss=0.0401, over 24365.00 frames. ], tot_loss[loss=0.1634, simple_loss=0.241, pruned_loss=0.0429, over 4610823.77 frames. ], batch size: 275, lr: 1.81e-02, grad_scale: 16.0 +2024-01-15 17:49:06,936 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.ff2_skip_rate, batch_count=61176.666666666664, ans=0.0 +2024-01-15 17:49:17,028 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.nonlin_attention.balancer.min_positive, batch_count=61210.0, ans=0.05 +2024-01-15 17:49:17,105 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.feed_forward3.hidden_balancer.prob, batch_count=61210.0, ans=0.125 +2024-01-15 17:49:18,206 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.balancer2.prob, batch_count=61210.0, ans=0.125 +2024-01-15 17:49:22,881 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.feed_forward2.hidden_balancer.prob, batch_count=61210.0, ans=0.125 +2024-01-15 17:49:24,044 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.balancer1.prob, batch_count=61210.0, ans=0.125 +2024-01-15 17:49:46,521 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.bypass.skip_rate, batch_count=61276.666666666664, ans=0.07 +2024-01-15 17:49:50,196 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.bypass.scale_min, batch_count=61276.666666666664, ans=0.2 +2024-01-15 17:49:55,756 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.2.encoder.layers.0.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=5.69 vs. limit=6.0 +2024-01-15 17:50:06,523 INFO [train.py:994] (0/2) Epoch 22, batch 700, loss[loss=0.1829, simple_loss=0.261, pruned_loss=0.0524, over 24507.00 frames. ], tot_loss[loss=0.1639, simple_loss=0.2415, pruned_loss=0.04317, over 4658336.58 frames. ], batch size: 210, lr: 1.81e-02, grad_scale: 16.0 +2024-01-15 17:50:16,368 INFO [scaling.py:1118] (0/2) WithLoss: name=encoder.encoders.0.layers.0.self_attn_weights, loss-sum=0.000e+00 +2024-01-15 17:50:30,689 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.0.layers.0.self_attn_weights.pos_emb_skip_rate, batch_count=61410.0, ans=0.0 +2024-01-15 17:50:37,630 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_module1.balancer1.prob, batch_count=61410.0, ans=0.125 +2024-01-15 17:50:38,474 WARNING [optim.py:484] (0/2) Clipping_scale=2.0, grad-norm quartiles 1.674e+02 2.041e+02 2.372e+02 2.713e+02 3.962e+02, threshold=4.745e+02, percent-clipped=0.0 +2024-01-15 17:51:07,956 INFO [train.py:994] (0/2) Epoch 22, batch 750, loss[loss=0.1752, simple_loss=0.2567, pruned_loss=0.04683, over 24372.00 frames. ], tot_loss[loss=0.1634, simple_loss=0.241, pruned_loss=0.04293, over 4686374.45 frames. ], batch size: 298, lr: 1.81e-02, grad_scale: 16.0 +2024-01-15 17:51:13,632 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.self_attn_weights.pos_emb_skip_rate, batch_count=61510.0, ans=0.0 +2024-01-15 17:51:16,096 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_module1.balancer2.prob, batch_count=61510.0, ans=0.125 +2024-01-15 17:51:50,464 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_module2.balancer1.prob, batch_count=61610.0, ans=0.125 +2024-01-15 17:52:01,292 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.bypass.skip_rate, batch_count=61643.333333333336, ans=0.07 +2024-01-15 17:52:07,924 INFO [train.py:994] (0/2) Epoch 22, batch 800, loss[loss=0.1787, simple_loss=0.2563, pruned_loss=0.05055, over 22277.00 frames. ], tot_loss[loss=0.1631, simple_loss=0.2405, pruned_loss=0.04289, over 4711723.09 frames. ], batch size: 357, lr: 1.81e-02, grad_scale: 32.0 +2024-01-15 17:52:13,666 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.0.layers.1.bypass.scale_min, batch_count=61676.666666666664, ans=0.2 +2024-01-15 17:52:15,949 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.attention_skip_rate, batch_count=61676.666666666664, ans=0.0 +2024-01-15 17:52:19,336 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.ff2_skip_rate, batch_count=61710.0, ans=0.0 +2024-01-15 17:52:27,671 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=61710.0, ans=0.1 +2024-01-15 17:52:35,849 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_skip_rate, batch_count=61743.333333333336, ans=0.0 +2024-01-15 17:52:37,905 WARNING [optim.py:484] (0/2) Clipping_scale=2.0, grad-norm quartiles 1.710e+02 2.029e+02 2.222e+02 2.719e+02 4.293e+02, threshold=4.444e+02, percent-clipped=0.0 +2024-01-15 17:52:38,119 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.feed_forward2.hidden_balancer.prob, batch_count=61743.333333333336, ans=0.125 +2024-01-15 17:52:42,633 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.bypass.scale_min, batch_count=61776.666666666664, ans=0.2 +2024-01-15 17:52:43,642 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_module2.balancer1.prob, batch_count=61776.666666666664, ans=0.125 +2024-01-15 17:52:56,533 INFO [checkpoint.py:75] (0/2) Saving checkpoint to zipformer_bbpe/exp-context-size-2-lr-epochs-10-spec-aug-20-disable-musan/epoch-22.pt +2024-01-15 17:53:19,022 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.2.encoder.layers.0.feed_forward1.out_whiten, num_groups=1, num_channels=384, metric=14.99 vs. limit=15.0 +2024-01-15 17:53:19,414 INFO [train.py:994] (0/2) Epoch 23, batch 0, loss[loss=0.1406, simple_loss=0.1964, pruned_loss=0.04235, over 19575.00 frames. ], tot_loss[loss=0.1406, simple_loss=0.1964, pruned_loss=0.04235, over 19575.00 frames. ], batch size: 85, lr: 1.77e-02, grad_scale: 32.0 +2024-01-15 17:53:19,415 INFO [train.py:1017] (0/2) Computing validation loss +2024-01-15 17:53:39,996 INFO [train.py:1026] (0/2) Epoch 23, validation: loss=0.1681, simple_loss=0.2529, pruned_loss=0.0416, over 1622729.00 frames. +2024-01-15 17:53:39,997 INFO [train.py:1027] (0/2) Maximum memory allocated so far is 15997MB +2024-01-15 17:53:41,537 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.attention_skip_rate, batch_count=61820.0, ans=0.0 +2024-01-15 17:54:00,634 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_module2.balancer1.prob, batch_count=61853.333333333336, ans=0.125 +2024-01-15 17:54:15,716 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.self_attn_weights.pos_emb_skip_rate, batch_count=61886.666666666664, ans=0.0 +2024-01-15 17:54:23,414 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.0.layers.0.conv_module2.whiten, num_groups=1, num_channels=192, metric=4.83 vs. limit=15.0 +2024-01-15 17:54:42,850 INFO [train.py:994] (0/2) Epoch 23, batch 50, loss[loss=0.1712, simple_loss=0.2402, pruned_loss=0.05106, over 24348.00 frames. ], tot_loss[loss=0.1642, simple_loss=0.2416, pruned_loss=0.04341, over 1095520.52 frames. ], batch size: 153, lr: 1.77e-02, grad_scale: 32.0 +2024-01-15 17:54:47,016 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_module2.balancer1.prob, batch_count=61986.666666666664, ans=0.125 +2024-01-15 17:54:55,922 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=62020.0, ans=0.1 +2024-01-15 17:55:02,522 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.bypass_mid.scale_min, batch_count=62020.0, ans=0.2 +2024-01-15 17:55:04,873 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.bypass_mid.scale_min, batch_count=62020.0, ans=0.2 +2024-01-15 17:55:14,557 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.1.encoder.layers.1.feed_forward1.out_whiten, num_groups=1, num_channels=256, metric=13.55 vs. limit=15.0 +2024-01-15 17:55:24,543 WARNING [optim.py:484] (0/2) Clipping_scale=2.0, grad-norm quartiles 1.702e+02 2.035e+02 2.267e+02 2.598e+02 4.096e+02, threshold=4.534e+02, percent-clipped=0.0 +2024-01-15 17:55:34,107 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.bypass_mid.scale_min, batch_count=62120.0, ans=0.2 +2024-01-15 17:55:44,913 INFO [train.py:994] (0/2) Epoch 23, batch 100, loss[loss=0.1603, simple_loss=0.2363, pruned_loss=0.04219, over 24359.00 frames. ], tot_loss[loss=0.1609, simple_loss=0.2385, pruned_loss=0.04168, over 1921202.08 frames. ], batch size: 153, lr: 1.77e-02, grad_scale: 32.0 +2024-01-15 17:55:57,677 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=62186.666666666664, ans=0.1 +2024-01-15 17:56:38,819 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.2.encoder.layers.2.feed_forward2.out_whiten, num_groups=1, num_channels=384, metric=12.93 vs. limit=15.0 +2024-01-15 17:56:46,320 INFO [train.py:994] (0/2) Epoch 23, batch 150, loss[loss=0.1743, simple_loss=0.2501, pruned_loss=0.04919, over 24540.00 frames. ], tot_loss[loss=0.1624, simple_loss=0.2401, pruned_loss=0.04235, over 2568999.31 frames. ], batch size: 176, lr: 1.77e-02, grad_scale: 32.0 +2024-01-15 17:56:57,217 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_module1.balancer2.min_abs, batch_count=62353.333333333336, ans=0.5 +2024-01-15 17:56:58,688 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.3.encoder.layers.3.feed_forward1.out_whiten, num_groups=1, num_channels=512, metric=8.56 vs. limit=15.0 +2024-01-15 17:57:20,901 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.2.encoder.layers.0.feed_forward1.out_whiten, num_groups=1, num_channels=384, metric=13.35 vs. limit=15.0 +2024-01-15 17:57:26,683 WARNING [optim.py:484] (0/2) Clipping_scale=2.0, grad-norm quartiles 1.664e+02 1.927e+02 2.204e+02 2.443e+02 3.212e+02, threshold=4.407e+02, percent-clipped=0.0 +2024-01-15 17:57:30,076 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.4.encoder.layers.0.feed_forward1.out_whiten, num_groups=1, num_channels=384, metric=10.86 vs. limit=15.0 +2024-01-15 17:57:33,351 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=62420.0, ans=0.1 +2024-01-15 17:57:33,439 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=62420.0, ans=0.1 +2024-01-15 17:57:43,376 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.1.encoder.layers.1.nonlin_attention.whiten2, num_groups=1, num_channels=256, metric=9.40 vs. limit=15.0 +2024-01-15 17:57:47,200 INFO [train.py:994] (0/2) Epoch 23, batch 200, loss[loss=0.1655, simple_loss=0.2421, pruned_loss=0.04443, over 24512.00 frames. ], tot_loss[loss=0.162, simple_loss=0.2399, pruned_loss=0.04205, over 3077185.77 frames. ], batch size: 204, lr: 1.76e-02, grad_scale: 32.0 +2024-01-15 17:57:52,258 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.1.encoder.layers.1.nonlin_attention.whiten1, num_groups=1, num_channels=192, metric=4.80 vs. limit=10.0 +2024-01-15 17:57:58,380 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.balancer1.prob, batch_count=62486.666666666664, ans=0.125 +2024-01-15 17:58:00,812 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_skip_rate, batch_count=62520.0, ans=0.0 +2024-01-15 17:58:09,199 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.5.encoder.layers.1.nonlin_attention.whiten2, num_groups=1, num_channels=256, metric=3.69 vs. limit=15.0 +2024-01-15 17:58:25,577 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.0.layers.1.feed_forward1.out_proj.dropout_p, batch_count=62586.666666666664, ans=0.1 +2024-01-15 17:58:26,771 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.attention_skip_rate, batch_count=62586.666666666664, ans=0.0 +2024-01-15 17:58:27,981 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=62586.666666666664, ans=0.1 +2024-01-15 17:58:31,616 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.ff3_skip_rate, batch_count=62586.666666666664, ans=0.0 +2024-01-15 17:58:48,328 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.ff2_skip_rate, batch_count=62620.0, ans=0.0 +2024-01-15 17:58:50,412 INFO [train.py:994] (0/2) Epoch 23, batch 250, loss[loss=0.161, simple_loss=0.2443, pruned_loss=0.03889, over 24525.00 frames. ], tot_loss[loss=0.1617, simple_loss=0.2399, pruned_loss=0.04176, over 3464531.02 frames. ], batch size: 243, lr: 1.76e-02, grad_scale: 32.0 +2024-01-15 17:58:50,722 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward3.hidden_balancer.prob, batch_count=62653.333333333336, ans=0.125 +2024-01-15 17:59:15,812 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.0.layers.0.bypass.scale_min, batch_count=62720.0, ans=0.2 +2024-01-15 17:59:31,373 WARNING [optim.py:484] (0/2) Clipping_scale=2.0, grad-norm quartiles 1.703e+02 1.957e+02 2.240e+02 2.593e+02 5.482e+02, threshold=4.480e+02, percent-clipped=1.0 +2024-01-15 17:59:31,694 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.feed_forward2.hidden_balancer.prob, batch_count=62753.333333333336, ans=0.125 +2024-01-15 17:59:32,880 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=62753.333333333336, ans=0.1 +2024-01-15 17:59:42,958 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_skip_rate, batch_count=62786.666666666664, ans=0.0 +2024-01-15 17:59:48,928 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_module2.balancer1.prob, batch_count=62786.666666666664, ans=0.125 +2024-01-15 17:59:52,185 INFO [train.py:994] (0/2) Epoch 23, batch 300, loss[loss=0.144, simple_loss=0.2229, pruned_loss=0.03256, over 24002.00 frames. ], tot_loss[loss=0.1625, simple_loss=0.2409, pruned_loss=0.04207, over 3773650.76 frames. ], batch size: 131, lr: 1.76e-02, grad_scale: 32.0 +2024-01-15 18:00:08,985 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.ff3_skip_rate, batch_count=62853.333333333336, ans=0.0 +2024-01-15 18:00:16,240 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.4.encoder.layers.1.nonlin_attention.whiten2, num_groups=1, num_channels=384, metric=3.43 vs. limit=15.0 +2024-01-15 18:00:18,961 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.0.layers.0.whiten, num_groups=1, num_channels=192, metric=4.40 vs. limit=12.0 +2024-01-15 18:00:38,674 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_module1.balancer2.prob, batch_count=62920.0, ans=0.125 +2024-01-15 18:00:54,350 INFO [train.py:994] (0/2) Epoch 23, batch 350, loss[loss=0.1701, simple_loss=0.2471, pruned_loss=0.04654, over 24573.00 frames. ], tot_loss[loss=0.1627, simple_loss=0.2408, pruned_loss=0.04227, over 4005219.62 frames. ], batch size: 176, lr: 1.76e-02, grad_scale: 16.0 +2024-01-15 18:01:00,182 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.nonlin_attention.balancer.prob, batch_count=62986.666666666664, ans=0.125 +2024-01-15 18:01:36,164 WARNING [optim.py:484] (0/2) Clipping_scale=2.0, grad-norm quartiles 1.633e+02 2.040e+02 2.331e+02 2.751e+02 4.067e+02, threshold=4.662e+02, percent-clipped=0.0 +2024-01-15 18:01:43,116 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.bypass_mid.scale_min, batch_count=63120.0, ans=0.2 +2024-01-15 18:01:46,090 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_module2.balancer1.prob, batch_count=63120.0, ans=0.125 +2024-01-15 18:01:49,030 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.0.layers.0.feed_forward2.out_whiten, num_groups=1, num_channels=192, metric=7.70 vs. limit=15.0 +2024-01-15 18:01:52,044 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=63120.0, ans=0.1 +2024-01-15 18:01:56,470 INFO [train.py:994] (0/2) Epoch 23, batch 400, loss[loss=0.156, simple_loss=0.2377, pruned_loss=0.03718, over 24328.00 frames. ], tot_loss[loss=0.1623, simple_loss=0.2402, pruned_loss=0.04222, over 4184407.02 frames. ], batch size: 147, lr: 1.76e-02, grad_scale: 32.0 +2024-01-15 18:02:08,141 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=63186.666666666664, ans=0.1 +2024-01-15 18:02:13,696 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.feed_forward3.hidden_balancer.prob, batch_count=63186.666666666664, ans=0.125 +2024-01-15 18:02:26,791 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.3.encoder.layers.2.self_attn_weights.whiten_keys, num_groups=8, num_channels=256, metric=4.71 vs. limit=6.0 +2024-01-15 18:02:27,861 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.4.encoder.layers.2.self_attn1.whiten, num_groups=1, num_channels=384, metric=14.22 vs. limit=22.5 +2024-01-15 18:02:35,730 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.2.encoder.layers.0.conv_module2.whiten, num_groups=1, num_channels=384, metric=8.61 vs. limit=15.0 +2024-01-15 18:02:37,470 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_module2.balancer1.prob, batch_count=63253.333333333336, ans=0.125 +2024-01-15 18:02:53,616 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.feed_forward2.hidden_balancer.prob, batch_count=63286.666666666664, ans=0.125 +2024-01-15 18:02:58,698 INFO [train.py:994] (0/2) Epoch 23, batch 450, loss[loss=0.1751, simple_loss=0.2539, pruned_loss=0.04818, over 24193.00 frames. ], tot_loss[loss=0.1624, simple_loss=0.2403, pruned_loss=0.04228, over 4321474.23 frames. ], batch size: 311, lr: 1.75e-02, grad_scale: 32.0 +2024-01-15 18:03:00,265 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.balancer1.prob, batch_count=63320.0, ans=0.125 +2024-01-15 18:03:04,261 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.bypass.skip_rate, batch_count=63320.0, ans=0.09899494936611666 +2024-01-15 18:03:06,094 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_module1.balancer2.prob, batch_count=63320.0, ans=0.125 +2024-01-15 18:03:25,879 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.2.encoder.layers.2.feed_forward3.out_whiten, num_groups=1, num_channels=384, metric=9.36 vs. limit=15.0 +2024-01-15 18:03:29,232 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.ff2_skip_rate, batch_count=63386.666666666664, ans=0.0 +2024-01-15 18:03:41,522 WARNING [optim.py:484] (0/2) Clipping_scale=2.0, grad-norm quartiles 1.613e+02 1.977e+02 2.334e+02 2.752e+02 4.665e+02, threshold=4.668e+02, percent-clipped=1.0 +2024-01-15 18:03:48,291 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=63453.333333333336, ans=0.1 +2024-01-15 18:03:57,196 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_module2.balancer2.prob, batch_count=63453.333333333336, ans=0.125 +2024-01-15 18:04:01,701 INFO [train.py:994] (0/2) Epoch 23, batch 500, loss[loss=0.1266, simple_loss=0.1885, pruned_loss=0.03233, over 15859.00 frames. ], tot_loss[loss=0.1621, simple_loss=0.2402, pruned_loss=0.04199, over 4422987.66 frames. ], batch size: 67, lr: 1.75e-02, grad_scale: 32.0 +2024-01-15 18:04:03,270 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=63486.666666666664, ans=0.1 +2024-01-15 18:04:08,555 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.0.layers.0.whiten, num_groups=1, num_channels=192, metric=4.45 vs. limit=12.0 +2024-01-15 18:04:30,239 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.bypass.skip_rate, batch_count=63553.333333333336, ans=0.07 +2024-01-15 18:04:56,336 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.self_attn_weights.pos_emb_skip_rate, batch_count=63620.0, ans=0.0 +2024-01-15 18:04:56,824 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.2.encoder.layers.2.self_attn2.whiten, num_groups=1, num_channels=384, metric=17.00 vs. limit=22.5 +2024-01-15 18:05:03,128 INFO [train.py:994] (0/2) Epoch 23, batch 550, loss[loss=0.1502, simple_loss=0.2311, pruned_loss=0.03471, over 24209.00 frames. ], tot_loss[loss=0.1623, simple_loss=0.2405, pruned_loss=0.04211, over 4506932.13 frames. ], batch size: 140, lr: 1.75e-02, grad_scale: 32.0 +2024-01-15 18:05:05,177 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=63653.333333333336, ans=0.1 +2024-01-15 18:05:33,828 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.nonlin_attention.balancer.prob, batch_count=63720.0, ans=0.125 +2024-01-15 18:05:39,446 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.4.encoder.layers.0.self_attn1.whiten, num_groups=1, num_channels=384, metric=15.82 vs. limit=22.5 +2024-01-15 18:05:45,850 WARNING [optim.py:484] (0/2) Clipping_scale=2.0, grad-norm quartiles 1.552e+02 2.008e+02 2.265e+02 2.948e+02 4.653e+02, threshold=4.530e+02, percent-clipped=0.0 +2024-01-15 18:05:46,115 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.0.layers.0.feed_forward3.hidden_balancer.prob, batch_count=63753.333333333336, ans=0.125 +2024-01-15 18:05:46,205 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=63753.333333333336, ans=0.1 +2024-01-15 18:05:49,690 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.bypass.skip_rate, batch_count=63753.333333333336, ans=0.09899494936611666 +2024-01-15 18:05:54,380 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_skip_rate, batch_count=63786.666666666664, ans=0.0 +2024-01-15 18:05:54,423 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.balancer1.prob, batch_count=63786.666666666664, ans=0.125 +2024-01-15 18:05:56,773 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_module1.balancer2.prob, batch_count=63786.666666666664, ans=0.125 +2024-01-15 18:06:06,067 INFO [train.py:994] (0/2) Epoch 23, batch 600, loss[loss=0.1664, simple_loss=0.2454, pruned_loss=0.04372, over 24443.00 frames. ], tot_loss[loss=0.1624, simple_loss=0.2405, pruned_loss=0.04217, over 4570352.10 frames. ], batch size: 250, lr: 1.75e-02, grad_scale: 32.0 +2024-01-15 18:06:08,722 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.bypass.skip_rate, batch_count=63820.0, ans=0.09899494936611666 +2024-01-15 18:06:15,868 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.nonlin_attention.balancer.max_positive, batch_count=63820.0, ans=0.95 +2024-01-15 18:06:19,887 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.0.layers.1.conv_module1.whiten, num_groups=1, num_channels=192, metric=6.59 vs. limit=15.0 +2024-01-15 18:06:21,555 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.bypass_mid.scale_min, batch_count=63853.333333333336, ans=0.2 +2024-01-15 18:06:29,940 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.nonlin_attention.balancer.min_positive, batch_count=63886.666666666664, ans=0.05 +2024-01-15 18:06:39,234 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.0.layers.0.ff2_skip_rate, batch_count=63886.666666666664, ans=0.0 +2024-01-15 18:06:51,066 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.4.encoder.layers.1.feed_forward1.out_whiten, num_groups=1, num_channels=384, metric=7.65 vs. limit=15.0 +2024-01-15 18:06:59,731 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.5.encoder.layers.1.feed_forward3.out_whiten, num_groups=1, num_channels=256, metric=11.94 vs. limit=15.0 +2024-01-15 18:07:05,455 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_module1.balancer2.prob, batch_count=63953.333333333336, ans=0.125 +2024-01-15 18:07:07,545 INFO [train.py:994] (0/2) Epoch 23, batch 650, loss[loss=0.1598, simple_loss=0.2392, pruned_loss=0.04023, over 24522.00 frames. ], tot_loss[loss=0.1622, simple_loss=0.2406, pruned_loss=0.04192, over 4632216.23 frames. ], batch size: 243, lr: 1.75e-02, grad_scale: 32.0 +2024-01-15 18:07:25,731 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.feed_forward2.hidden_balancer.prob, batch_count=64020.0, ans=0.125 +2024-01-15 18:07:38,598 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.ff2_skip_rate, batch_count=64053.333333333336, ans=0.0 +2024-01-15 18:07:44,436 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.1.encoder.layers.0.feed_forward3.out_whiten, num_groups=1, num_channels=256, metric=9.50 vs. limit=15.0 +2024-01-15 18:07:50,191 WARNING [optim.py:484] (0/2) Clipping_scale=2.0, grad-norm quartiles 1.541e+02 1.943e+02 2.139e+02 2.607e+02 5.108e+02, threshold=4.278e+02, percent-clipped=1.0 +2024-01-15 18:08:09,942 INFO [train.py:994] (0/2) Epoch 23, batch 700, loss[loss=0.1635, simple_loss=0.2404, pruned_loss=0.04336, over 24543.00 frames. ], tot_loss[loss=0.1621, simple_loss=0.2405, pruned_loss=0.04185, over 4675790.68 frames. ], batch size: 165, lr: 1.74e-02, grad_scale: 32.0 +2024-01-15 18:08:12,538 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.2.encoder.layers.2.conv_module1.whiten, num_groups=1, num_channels=384, metric=3.87 vs. limit=15.0 +2024-01-15 18:08:13,313 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.bypass.scale_min, batch_count=64153.333333333336, ans=0.2 +2024-01-15 18:08:22,974 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_module1.balancer1.prob, batch_count=64186.666666666664, ans=0.125 +2024-01-15 18:08:26,405 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_module2.balancer2.prob, batch_count=64186.666666666664, ans=0.125 +2024-01-15 18:08:27,614 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.attention_skip_rate, batch_count=64186.666666666664, ans=0.0 +2024-01-15 18:08:59,071 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.nonlin_attention.balancer.prob, batch_count=64286.666666666664, ans=0.125 +2024-01-15 18:09:00,173 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=64286.666666666664, ans=0.1 +2024-01-15 18:09:12,586 INFO [train.py:994] (0/2) Epoch 23, batch 750, loss[loss=0.1452, simple_loss=0.223, pruned_loss=0.03366, over 24195.00 frames. ], tot_loss[loss=0.1614, simple_loss=0.2395, pruned_loss=0.04161, over 4697628.86 frames. ], batch size: 140, lr: 1.74e-02, grad_scale: 32.0 +2024-01-15 18:09:21,229 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_module1.balancer2.prob, batch_count=64320.0, ans=0.125 +2024-01-15 18:09:53,919 WARNING [optim.py:484] (0/2) Clipping_scale=2.0, grad-norm quartiles 1.497e+02 2.042e+02 2.299e+02 2.655e+02 3.861e+02, threshold=4.598e+02, percent-clipped=0.0 +2024-01-15 18:10:12,646 INFO [train.py:994] (0/2) Epoch 23, batch 800, loss[loss=0.1674, simple_loss=0.2467, pruned_loss=0.04409, over 24493.00 frames. ], tot_loss[loss=0.1615, simple_loss=0.2395, pruned_loss=0.04171, over 4734386.25 frames. ], batch size: 229, lr: 1.74e-02, grad_scale: 32.0 +2024-01-15 18:10:18,246 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=64486.666666666664, ans=0.1 +2024-01-15 18:10:32,640 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder_embed.conv.5.prob, batch_count=64520.0, ans=0.125 +2024-01-15 18:10:32,823 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.nonlin_attention.balancer.prob, batch_count=64520.0, ans=0.125 +2024-01-15 18:10:32,827 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=64520.0, ans=0.1 +2024-01-15 18:11:02,180 INFO [checkpoint.py:75] (0/2) Saving checkpoint to zipformer_bbpe/exp-context-size-2-lr-epochs-10-spec-aug-20-disable-musan/epoch-23.pt +2024-01-15 18:11:25,230 INFO [train.py:994] (0/2) Epoch 24, batch 0, loss[loss=0.1284, simple_loss=0.189, pruned_loss=0.03393, over 18605.00 frames. ], tot_loss[loss=0.1284, simple_loss=0.189, pruned_loss=0.03393, over 18605.00 frames. ], batch size: 81, lr: 1.71e-02, grad_scale: 32.0 +2024-01-15 18:11:25,232 INFO [train.py:1017] (0/2) Computing validation loss +2024-01-15 18:11:45,817 INFO [train.py:1026] (0/2) Epoch 24, validation: loss=0.1707, simple_loss=0.2556, pruned_loss=0.04287, over 1622729.00 frames. +2024-01-15 18:11:45,817 INFO [train.py:1027] (0/2) Maximum memory allocated so far is 15997MB +2024-01-15 18:11:50,176 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.0.layers.1.self_attn2.whiten, num_groups=1, num_channels=192, metric=15.71 vs. limit=22.5 +2024-01-15 18:11:58,194 INFO [scaling.py:1118] (0/2) WithLoss: name=encoder.encoders.3.encoder.layers.1.self_attn_weights, loss-sum=0.000e+00 +2024-01-15 18:12:02,383 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.ff2_skip_rate, batch_count=64663.333333333336, ans=0.0 +2024-01-15 18:12:04,866 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.ff3_skip_rate, batch_count=64663.333333333336, ans=0.0 +2024-01-15 18:12:08,449 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.nonlin_attention.balancer.prob, batch_count=64663.333333333336, ans=0.125 +2024-01-15 18:12:31,043 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.ff3_skip_rate, batch_count=64730.0, ans=0.0 +2024-01-15 18:12:33,394 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.bypass.skip_rate, batch_count=64730.0, ans=0.04949747468305833 +2024-01-15 18:12:37,381 WARNING [optim.py:484] (0/2) Clipping_scale=2.0, grad-norm quartiles 1.627e+02 2.033e+02 2.201e+02 2.614e+02 3.866e+02, threshold=4.401e+02, percent-clipped=0.0 +2024-01-15 18:12:41,066 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=64763.333333333336, ans=0.1 +2024-01-15 18:12:47,966 INFO [train.py:994] (0/2) Epoch 24, batch 50, loss[loss=0.1752, simple_loss=0.2529, pruned_loss=0.04879, over 24490.00 frames. ], tot_loss[loss=0.1605, simple_loss=0.238, pruned_loss=0.04151, over 1086540.00 frames. ], batch size: 187, lr: 1.70e-02, grad_scale: 32.0 +2024-01-15 18:12:52,535 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.bypass.scale_min, batch_count=64796.666666666664, ans=0.2 +2024-01-15 18:13:05,942 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_skip_rate, batch_count=64830.0, ans=0.0 +2024-01-15 18:13:25,854 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.4.encoder.layers.2.feed_forward1.out_whiten, num_groups=1, num_channels=384, metric=7.88 vs. limit=15.0 +2024-01-15 18:13:50,496 INFO [train.py:994] (0/2) Epoch 24, batch 100, loss[loss=0.1722, simple_loss=0.2444, pruned_loss=0.04996, over 24570.00 frames. ], tot_loss[loss=0.1589, simple_loss=0.237, pruned_loss=0.04041, over 1914179.21 frames. ], batch size: 176, lr: 1.70e-02, grad_scale: 32.0 +2024-01-15 18:14:10,358 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.balancer2.prob, batch_count=64996.666666666664, ans=0.125 +2024-01-15 18:14:25,080 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.bypass.scale_min, batch_count=65030.0, ans=0.2 +2024-01-15 18:14:34,791 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.feed_forward1.hidden_balancer.prob, batch_count=65063.333333333336, ans=0.125 +2024-01-15 18:14:42,973 WARNING [optim.py:484] (0/2) Clipping_scale=2.0, grad-norm quartiles 1.411e+02 1.963e+02 2.259e+02 2.725e+02 4.452e+02, threshold=4.517e+02, percent-clipped=1.0 +2024-01-15 18:14:53,856 INFO [train.py:994] (0/2) Epoch 24, batch 150, loss[loss=0.1658, simple_loss=0.2422, pruned_loss=0.04465, over 24473.00 frames. ], tot_loss[loss=0.1594, simple_loss=0.2378, pruned_loss=0.04056, over 2563239.05 frames. ], batch size: 267, lr: 1.70e-02, grad_scale: 32.0 +2024-01-15 18:15:15,979 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.2.encoder.layers.0.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=5.33 vs. limit=6.0 +2024-01-15 18:15:28,377 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.0.layers.1.ff2_skip_rate, batch_count=65196.666666666664, ans=0.0 +2024-01-15 18:15:36,302 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.bypass.skip_rate, batch_count=65230.0, ans=0.09899494936611666 +2024-01-15 18:15:39,748 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.1.encoder.layers.1.feed_forward2.out_whiten, num_groups=1, num_channels=256, metric=7.04 vs. limit=15.0 +2024-01-15 18:15:42,918 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.feed_forward1.out_proj.dropout_p, batch_count=65263.333333333336, ans=0.1 +2024-01-15 18:15:50,028 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.0.layers.1.ff2_skip_rate, batch_count=65263.333333333336, ans=0.0 +2024-01-15 18:15:51,310 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_skip_rate, batch_count=65263.333333333336, ans=0.0 +2024-01-15 18:15:51,340 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.balancer2.prob, batch_count=65263.333333333336, ans=0.125 +2024-01-15 18:15:52,986 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.2.encoder.layers.0.conv_module2.whiten, num_groups=1, num_channels=384, metric=9.04 vs. limit=15.0 +2024-01-15 18:15:53,685 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.feed_forward1.hidden_balancer.prob, batch_count=65263.333333333336, ans=0.125 +2024-01-15 18:15:55,804 INFO [train.py:994] (0/2) Epoch 24, batch 200, loss[loss=0.1586, simple_loss=0.2309, pruned_loss=0.04318, over 24354.00 frames. ], tot_loss[loss=0.1596, simple_loss=0.2379, pruned_loss=0.04067, over 3053391.03 frames. ], batch size: 153, lr: 1.70e-02, grad_scale: 32.0 +2024-01-15 18:15:59,226 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.ff2_skip_rate, batch_count=65296.666666666664, ans=0.0 +2024-01-15 18:16:05,399 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=65296.666666666664, ans=0.1 +2024-01-15 18:16:06,565 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.feed_forward3.hidden_balancer.prob, batch_count=65296.666666666664, ans=0.125 +2024-01-15 18:16:06,618 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.ff3_skip_rate, batch_count=65296.666666666664, ans=0.0 +2024-01-15 18:16:40,003 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.attention_skip_rate, batch_count=65396.666666666664, ans=0.0 +2024-01-15 18:16:46,747 WARNING [optim.py:484] (0/2) Clipping_scale=2.0, grad-norm quartiles 1.618e+02 1.863e+02 2.065e+02 2.277e+02 3.194e+02, threshold=4.131e+02, percent-clipped=0.0 +2024-01-15 18:16:58,441 INFO [train.py:994] (0/2) Epoch 24, batch 250, loss[loss=0.1431, simple_loss=0.2236, pruned_loss=0.03131, over 24210.00 frames. ], tot_loss[loss=0.1587, simple_loss=0.2368, pruned_loss=0.04027, over 3438387.95 frames. ], batch size: 140, lr: 1.70e-02, grad_scale: 32.0 +2024-01-15 18:17:04,535 INFO [scaling.py:1118] (0/2) WithLoss: name=encoder.encoders.2.encoder.layers.2.self_attn_weights, loss-sum=0.000e+00 +2024-01-15 18:17:05,657 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.attention_skip_rate, batch_count=65463.333333333336, ans=0.0 +2024-01-15 18:17:30,743 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_skip_rate, batch_count=65530.0, ans=0.0 +2024-01-15 18:17:47,482 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.nonlin_attention.balancer.min_positive, batch_count=65596.66666666667, ans=0.05 +2024-01-15 18:18:00,115 INFO [train.py:994] (0/2) Epoch 24, batch 300, loss[loss=0.1747, simple_loss=0.2545, pruned_loss=0.04741, over 24485.00 frames. ], tot_loss[loss=0.1599, simple_loss=0.238, pruned_loss=0.04086, over 3748085.14 frames. ], batch size: 187, lr: 1.70e-02, grad_scale: 32.0 +2024-01-15 18:18:13,946 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.attention_skip_rate, batch_count=65663.33333333333, ans=0.0 +2024-01-15 18:18:25,669 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=65696.66666666667, ans=0.1 +2024-01-15 18:18:31,805 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.balancer1.prob, batch_count=65696.66666666667, ans=0.125 +2024-01-15 18:18:43,127 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.0.layers.0.feed_forward1.out_proj.dropout_p, batch_count=65730.0, ans=0.1 +2024-01-15 18:18:43,174 INFO [scaling.py:1118] (0/2) WithLoss: name=encoder.encoders.1.encoder.layers.1.self_attn_weights, loss-sum=0.000e+00 +2024-01-15 18:18:50,109 WARNING [optim.py:484] (0/2) Clipping_scale=2.0, grad-norm quartiles 1.726e+02 1.979e+02 2.184e+02 2.464e+02 3.972e+02, threshold=4.368e+02, percent-clipped=0.0 +2024-01-15 18:18:52,802 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.ff3_skip_rate, batch_count=65763.33333333333, ans=0.0 +2024-01-15 18:18:53,922 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.balancer1.prob, batch_count=65763.33333333333, ans=0.125 +2024-01-15 18:19:02,058 INFO [train.py:994] (0/2) Epoch 24, batch 350, loss[loss=0.163, simple_loss=0.2423, pruned_loss=0.04182, over 24377.00 frames. ], tot_loss[loss=0.1596, simple_loss=0.2379, pruned_loss=0.04069, over 3977871.49 frames. ], batch size: 275, lr: 1.69e-02, grad_scale: 32.0 +2024-01-15 18:19:10,907 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.3.encoder.layers.3.feed_forward3.out_whiten, num_groups=1, num_channels=512, metric=9.84 vs. limit=15.0 +2024-01-15 18:19:14,536 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.1.encoder.layers.0.conv_module1.whiten, num_groups=1, num_channels=256, metric=6.34 vs. limit=15.0 +2024-01-15 18:19:16,223 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.attention_skip_rate, batch_count=65830.0, ans=0.0 +2024-01-15 18:19:26,359 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=65863.33333333333, ans=0.1 +2024-01-15 18:19:29,833 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.bypass_mid.scale_min, batch_count=65863.33333333333, ans=0.2 +2024-01-15 18:19:40,244 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.feed_forward1.hidden_balancer.prob, batch_count=65896.66666666667, ans=0.125 +2024-01-15 18:20:02,432 INFO [train.py:994] (0/2) Epoch 24, batch 400, loss[loss=0.1543, simple_loss=0.2352, pruned_loss=0.03672, over 24484.00 frames. ], tot_loss[loss=0.1596, simple_loss=0.2379, pruned_loss=0.04065, over 4163397.82 frames. ], batch size: 187, lr: 1.69e-02, grad_scale: 32.0 +2024-01-15 18:20:11,200 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.feed_forward3.hidden_balancer.prob, batch_count=65963.33333333333, ans=0.125 +2024-01-15 18:20:13,541 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.balancer1.prob, batch_count=65996.66666666667, ans=0.125 +2024-01-15 18:20:20,158 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.2.encoder.layers.1.nonlin_attention.whiten2, num_groups=1, num_channels=384, metric=10.19 vs. limit=15.0 +2024-01-15 18:20:20,789 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.bypass.skip_rate, batch_count=65996.66666666667, ans=0.04949747468305833 +2024-01-15 18:20:44,553 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_skip_rate, batch_count=66063.33333333333, ans=0.0 +2024-01-15 18:20:50,613 INFO [scaling.py:1118] (0/2) WithLoss: name=encoder.encoders.3.encoder.layers.3.self_attn_weights, loss-sum=0.000e+00 +2024-01-15 18:20:53,869 WARNING [optim.py:484] (0/2) Clipping_scale=2.0, grad-norm quartiles 1.573e+02 1.999e+02 2.258e+02 2.615e+02 4.418e+02, threshold=4.515e+02, percent-clipped=1.0 +2024-01-15 18:21:04,454 INFO [train.py:994] (0/2) Epoch 24, batch 450, loss[loss=0.153, simple_loss=0.2289, pruned_loss=0.03852, over 24535.00 frames. ], tot_loss[loss=0.159, simple_loss=0.2373, pruned_loss=0.0404, over 4295999.40 frames. ], batch size: 176, lr: 1.69e-02, grad_scale: 32.0 +2024-01-15 18:21:04,749 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.ff2_skip_rate, batch_count=66130.0, ans=0.0 +2024-01-15 18:21:23,774 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.0.layers.0.bypass.skip_rate, batch_count=66163.33333333333, ans=0.035 +2024-01-15 18:22:07,833 INFO [train.py:994] (0/2) Epoch 24, batch 500, loss[loss=0.1601, simple_loss=0.2329, pruned_loss=0.0436, over 24478.00 frames. ], tot_loss[loss=0.1593, simple_loss=0.2375, pruned_loss=0.04058, over 4415930.41 frames. ], batch size: 165, lr: 1.69e-02, grad_scale: 32.0 +2024-01-15 18:22:10,454 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_module1.balancer1.prob, batch_count=66296.66666666667, ans=0.125 +2024-01-15 18:22:39,976 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.1.encoder.layers.1.whiten, num_groups=1, num_channels=256, metric=4.25 vs. limit=12.0 +2024-01-15 18:22:51,854 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.balancer2.prob, batch_count=66396.66666666667, ans=0.125 +2024-01-15 18:22:59,403 WARNING [optim.py:484] (0/2) Clipping_scale=2.0, grad-norm quartiles 1.575e+02 2.052e+02 2.388e+02 2.760e+02 4.154e+02, threshold=4.776e+02, percent-clipped=0.0 +2024-01-15 18:23:05,872 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.bypass_mid.scale_min, batch_count=66430.0, ans=0.2 +2024-01-15 18:23:10,353 INFO [train.py:994] (0/2) Epoch 24, batch 550, loss[loss=0.1563, simple_loss=0.2371, pruned_loss=0.0377, over 24459.00 frames. ], tot_loss[loss=0.1592, simple_loss=0.2375, pruned_loss=0.04044, over 4512856.50 frames. ], batch size: 250, lr: 1.69e-02, grad_scale: 32.0 +2024-01-15 18:23:13,011 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.bypass.scale_min, batch_count=66463.33333333333, ans=0.2 +2024-01-15 18:23:19,814 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=66463.33333333333, ans=0.1 +2024-01-15 18:23:27,584 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_module2.balancer1.prob, batch_count=66496.66666666667, ans=0.125 +2024-01-15 18:23:33,548 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_module1.balancer2.prob, batch_count=66496.66666666667, ans=0.125 +2024-01-15 18:23:49,476 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.4.encoder.layers.1.conv_module1.whiten, num_groups=1, num_channels=384, metric=2.69 vs. limit=15.0 +2024-01-15 18:24:03,460 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.balancer2.prob, batch_count=66596.66666666667, ans=0.125 +2024-01-15 18:24:14,007 INFO [train.py:994] (0/2) Epoch 24, batch 600, loss[loss=0.1499, simple_loss=0.2325, pruned_loss=0.03367, over 24380.00 frames. ], tot_loss[loss=0.1591, simple_loss=0.2375, pruned_loss=0.04039, over 4583130.88 frames. ], batch size: 275, lr: 1.68e-02, grad_scale: 32.0 +2024-01-15 18:24:20,199 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.feed_forward1.out_proj.dropout_p, batch_count=66630.0, ans=0.1 +2024-01-15 18:24:26,226 INFO [checkpoint.py:75] (0/2) Saving checkpoint to zipformer_bbpe/exp-context-size-2-lr-epochs-10-spec-aug-20-disable-musan/checkpoint-20000.pt +2024-01-15 18:24:30,025 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_module1.balancer1.prob, batch_count=66663.33333333333, ans=0.125 +2024-01-15 18:24:34,324 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=66663.33333333333, ans=0.1 +2024-01-15 18:24:51,513 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_skip_rate, batch_count=66696.66666666667, ans=0.0 +2024-01-15 18:25:00,387 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.bypass.scale_min, batch_count=66730.0, ans=0.2 +2024-01-15 18:25:04,046 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.feed_forward3.hidden_balancer.prob, batch_count=66730.0, ans=0.125 +2024-01-15 18:25:07,909 WARNING [optim.py:484] (0/2) Clipping_scale=2.0, grad-norm quartiles 1.583e+02 1.946e+02 2.215e+02 2.637e+02 3.675e+02, threshold=4.429e+02, percent-clipped=0.0 +2024-01-15 18:25:18,670 INFO [train.py:994] (0/2) Epoch 24, batch 650, loss[loss=0.1536, simple_loss=0.2349, pruned_loss=0.03611, over 24474.00 frames. ], tot_loss[loss=0.1586, simple_loss=0.2369, pruned_loss=0.04017, over 4629194.38 frames. ], batch size: 267, lr: 1.68e-02, grad_scale: 32.0 +2024-01-15 18:25:21,319 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.self_attn_weights.pos_emb_skip_rate, batch_count=66796.66666666667, ans=0.0 +2024-01-15 18:25:46,450 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.balancer1.prob, batch_count=66863.33333333333, ans=0.125 +2024-01-15 18:25:48,679 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.feed_forward3.hidden_balancer.prob, batch_count=66863.33333333333, ans=0.125 +2024-01-15 18:26:05,903 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.2.encoder.layers.0.conv_module2.whiten, num_groups=1, num_channels=384, metric=8.26 vs. limit=15.0 +2024-01-15 18:26:21,265 INFO [train.py:994] (0/2) Epoch 24, batch 700, loss[loss=0.1605, simple_loss=0.2397, pruned_loss=0.04063, over 24604.00 frames. ], tot_loss[loss=0.1587, simple_loss=0.2369, pruned_loss=0.04023, over 4673138.93 frames. ], batch size: 199, lr: 1.68e-02, grad_scale: 32.0 +2024-01-15 18:26:30,853 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.0.layers.1.balancer2.prob, batch_count=66963.33333333333, ans=0.125 +2024-01-15 18:26:49,320 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.0.layers.1.nonlin_attention.whiten1, num_groups=1, num_channels=144, metric=6.39 vs. limit=10.0 +2024-01-15 18:26:52,184 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.0.layers.1.bypass.skip_rate, batch_count=67030.0, ans=0.035 +2024-01-15 18:27:13,876 WARNING [optim.py:484] (0/2) Clipping_scale=2.0, grad-norm quartiles 1.651e+02 1.976e+02 2.136e+02 2.638e+02 3.753e+02, threshold=4.272e+02, percent-clipped=0.0 +2024-01-15 18:27:15,613 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.4.encoder.layers.1.feed_forward1.out_whiten, num_groups=1, num_channels=384, metric=6.70 vs. limit=15.0 +2024-01-15 18:27:20,107 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.balancer2.prob, batch_count=67096.66666666667, ans=0.125 +2024-01-15 18:27:25,226 INFO [train.py:994] (0/2) Epoch 24, batch 750, loss[loss=0.1818, simple_loss=0.2577, pruned_loss=0.05293, over 24417.00 frames. ], tot_loss[loss=0.1589, simple_loss=0.2372, pruned_loss=0.0403, over 4697920.74 frames. ], batch size: 159, lr: 1.68e-02, grad_scale: 32.0 +2024-01-15 18:28:01,573 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.balancer1.prob, batch_count=67230.0, ans=0.125 +2024-01-15 18:28:08,983 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.bypass.scale_min, batch_count=67230.0, ans=0.2 +2024-01-15 18:28:18,002 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.bypass.skip_rate, batch_count=67263.33333333333, ans=0.09899494936611666 +2024-01-15 18:28:23,469 INFO [scaling.py:1118] (0/2) WithLoss: name=encoder.encoders.4.encoder.layers.0.self_attn_weights, loss-sum=0.000e+00 +2024-01-15 18:28:24,261 INFO [train.py:994] (0/2) Epoch 24, batch 800, loss[loss=0.1625, simple_loss=0.2422, pruned_loss=0.04144, over 24498.00 frames. ], tot_loss[loss=0.1591, simple_loss=0.2374, pruned_loss=0.04046, over 4715813.56 frames. ], batch size: 222, lr: 1.68e-02, grad_scale: 32.0 +2024-01-15 18:28:36,643 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.attention_skip_rate, batch_count=67330.0, ans=0.0 +2024-01-15 18:28:40,057 INFO [scaling.py:1118] (0/2) WithLoss: name=encoder.encoders.4.encoder.layers.2.self_attn_weights, loss-sum=0.000e+00 +2024-01-15 18:29:04,307 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.3.encoder.layers.0.self_attn1.whiten, num_groups=1, num_channels=512, metric=18.40 vs. limit=22.5 +2024-01-15 18:29:11,604 WARNING [optim.py:484] (0/2) Clipping_scale=2.0, grad-norm quartiles 1.577e+02 1.963e+02 2.169e+02 2.600e+02 5.090e+02, threshold=4.339e+02, percent-clipped=1.0 +2024-01-15 18:29:11,776 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.feed_forward1.hidden_balancer.prob, batch_count=67430.0, ans=0.125 +2024-01-15 18:29:13,733 INFO [checkpoint.py:75] (0/2) Saving checkpoint to zipformer_bbpe/exp-context-size-2-lr-epochs-10-spec-aug-20-disable-musan/epoch-24.pt +2024-01-15 18:29:36,185 INFO [train.py:994] (0/2) Epoch 25, batch 0, loss[loss=0.1527, simple_loss=0.2313, pruned_loss=0.03701, over 24478.00 frames. ], tot_loss[loss=0.1527, simple_loss=0.2313, pruned_loss=0.03701, over 24478.00 frames. ], batch size: 165, lr: 1.65e-02, grad_scale: 32.0 +2024-01-15 18:29:36,186 INFO [train.py:1017] (0/2) Computing validation loss +2024-01-15 18:29:55,376 INFO [train.py:1026] (0/2) Epoch 25, validation: loss=0.1673, simple_loss=0.2515, pruned_loss=0.04159, over 1622729.00 frames. +2024-01-15 18:29:55,377 INFO [train.py:1027] (0/2) Maximum memory allocated so far is 15997MB +2024-01-15 18:29:56,094 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.3.encoder.layers.0.self_attn1.whiten, num_groups=1, num_channels=512, metric=19.15 vs. limit=22.5 +2024-01-15 18:29:58,473 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.3.encoder.layers.2.feed_forward1.out_whiten, num_groups=1, num_channels=512, metric=10.36 vs. limit=15.0 +2024-01-15 18:30:11,035 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.3.encoder.layers.3.conv_module1.whiten, num_groups=1, num_channels=512, metric=8.56 vs. limit=15.0 +2024-01-15 18:30:33,323 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.attention_skip_rate, batch_count=67540.0, ans=0.0 +2024-01-15 18:30:35,073 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.1.encoder.layers.0.self_attn1.whiten, num_groups=1, num_channels=256, metric=15.99 vs. limit=22.5 +2024-01-15 18:30:37,993 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_module2.balancer2.min_positive, batch_count=67540.0, ans=0.05 +2024-01-15 18:30:57,377 INFO [train.py:994] (0/2) Epoch 25, batch 50, loss[loss=0.1292, simple_loss=0.1941, pruned_loss=0.03211, over 16741.00 frames. ], tot_loss[loss=0.1576, simple_loss=0.235, pruned_loss=0.04007, over 1080543.02 frames. ], batch size: 72, lr: 1.64e-02, grad_scale: 32.0 +2024-01-15 18:31:11,756 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.2.encoder.layers.1.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=4.99 vs. limit=6.0 +2024-01-15 18:31:28,542 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.1.encoder.layers.0.conv_module1.whiten, num_groups=1, num_channels=256, metric=4.55 vs. limit=15.0 +2024-01-15 18:31:57,091 WARNING [optim.py:484] (0/2) Clipping_scale=2.0, grad-norm quartiles 1.667e+02 2.049e+02 2.347e+02 2.774e+02 4.343e+02, threshold=4.695e+02, percent-clipped=1.0 +2024-01-15 18:31:59,478 INFO [train.py:994] (0/2) Epoch 25, batch 100, loss[loss=0.1594, simple_loss=0.2354, pruned_loss=0.04167, over 24497.00 frames. ], tot_loss[loss=0.1582, simple_loss=0.2361, pruned_loss=0.04015, over 1908466.82 frames. ], batch size: 187, lr: 1.64e-02, grad_scale: 32.0 +2024-01-15 18:32:28,567 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.nonlin_attention.balancer.prob, batch_count=67840.0, ans=0.125 +2024-01-15 18:32:35,169 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.bypass.skip_rate, batch_count=67840.0, ans=0.09899494936611666 +2024-01-15 18:32:51,563 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.3.encoder.layers.3.feed_forward2.out_whiten, num_groups=1, num_channels=512, metric=13.97 vs. limit=15.0 +2024-01-15 18:33:02,621 INFO [train.py:994] (0/2) Epoch 25, batch 150, loss[loss=0.145, simple_loss=0.2241, pruned_loss=0.03293, over 24205.00 frames. ], tot_loss[loss=0.1577, simple_loss=0.236, pruned_loss=0.03976, over 2553213.11 frames. ], batch size: 140, lr: 1.64e-02, grad_scale: 32.0 +2024-01-15 18:33:24,672 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.0.layers.1.attention_skip_rate, batch_count=67973.33333333333, ans=0.0 +2024-01-15 18:33:24,804 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_module2.balancer2.prob, batch_count=67973.33333333333, ans=0.125 +2024-01-15 18:33:29,313 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.bypass.scale_min, batch_count=68006.66666666667, ans=0.2 +2024-01-15 18:33:34,010 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.0.layers.1.bypass.skip_rate, batch_count=68006.66666666667, ans=0.035 +2024-01-15 18:33:45,283 INFO [scaling.py:1118] (0/2) WithLoss: name=encoder.encoders.3.encoder.layers.1.self_attn_weights, loss-sum=0.000e+00 +2024-01-15 18:33:51,744 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.0.layers.1.balancer1.prob, batch_count=68073.33333333333, ans=0.125 +2024-01-15 18:33:57,542 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.0.layers.1.balancer1.prob, batch_count=68073.33333333333, ans=0.125 +2024-01-15 18:34:02,707 WARNING [optim.py:484] (0/2) Clipping_scale=2.0, grad-norm quartiles 1.662e+02 2.019e+02 2.286e+02 2.815e+02 4.282e+02, threshold=4.572e+02, percent-clipped=0.0 +2024-01-15 18:34:05,153 INFO [train.py:994] (0/2) Epoch 25, batch 200, loss[loss=0.1668, simple_loss=0.239, pruned_loss=0.0473, over 24465.00 frames. ], tot_loss[loss=0.1578, simple_loss=0.2361, pruned_loss=0.03981, over 3050923.78 frames. ], batch size: 170, lr: 1.64e-02, grad_scale: 32.0 +2024-01-15 18:34:08,364 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.1.encoder.layers.0.nonlin_attention.whiten1, num_groups=1, num_channels=192, metric=5.70 vs. limit=10.0 +2024-01-15 18:34:21,607 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.3.encoder.layers.3.self_attn2.whiten, num_groups=1, num_channels=512, metric=15.24 vs. limit=22.5 +2024-01-15 18:34:32,579 INFO [scaling.py:1118] (0/2) WithLoss: name=encoder.encoders.0.layers.0.self_attn_weights, loss-sum=0.000e+00 +2024-01-15 18:34:32,699 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=68173.33333333333, ans=0.1 +2024-01-15 18:34:41,483 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_skip_rate, batch_count=68206.66666666667, ans=0.0 +2024-01-15 18:34:45,031 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.bypass.scale_min, batch_count=68206.66666666667, ans=0.2 +2024-01-15 18:34:46,055 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder_embed.dropout.p, batch_count=68206.66666666667, ans=0.1 +2024-01-15 18:34:47,350 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_skip_rate, batch_count=68206.66666666667, ans=0.0 +2024-01-15 18:34:47,425 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward1.hidden_balancer.prob, batch_count=68206.66666666667, ans=0.125 +2024-01-15 18:34:47,451 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_module1.balancer2.prob, batch_count=68206.66666666667, ans=0.125 +2024-01-15 18:34:48,828 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.5.encoder.layers.0.nonlin_attention.whiten1, num_groups=1, num_channels=192, metric=2.83 vs. limit=10.0 +2024-01-15 18:34:57,398 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.0.layers.0.bypass.skip_rate, batch_count=68240.0, ans=0.035 +2024-01-15 18:35:07,284 INFO [train.py:994] (0/2) Epoch 25, batch 250, loss[loss=0.1655, simple_loss=0.2432, pruned_loss=0.04394, over 24486.00 frames. ], tot_loss[loss=0.1583, simple_loss=0.2366, pruned_loss=0.03998, over 3445038.90 frames. ], batch size: 181, lr: 1.64e-02, grad_scale: 32.0 +2024-01-15 18:35:11,097 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.0.layers.0.bypass.skip_rate, batch_count=68273.33333333333, ans=0.035 +2024-01-15 18:35:21,305 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.0.layers.0.feed_forward2.hidden_balancer.prob, batch_count=68306.66666666667, ans=0.125 +2024-01-15 18:35:26,122 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_skip_rate, batch_count=68306.66666666667, ans=0.0 +2024-01-15 18:35:36,144 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.nonlin_attention.balancer.prob, batch_count=68340.0, ans=0.125 +2024-01-15 18:35:50,913 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=68373.33333333333, ans=0.1 +2024-01-15 18:35:54,464 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_module1.balancer2.prob, batch_count=68373.33333333333, ans=0.125 +2024-01-15 18:35:58,719 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.feed_forward1.hidden_balancer.prob, batch_count=68406.66666666667, ans=0.125 +2024-01-15 18:36:02,251 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_module1.balancer2.prob, batch_count=68406.66666666667, ans=0.125 +2024-01-15 18:36:06,620 WARNING [optim.py:484] (0/2) Clipping_scale=2.0, grad-norm quartiles 1.569e+02 1.897e+02 2.000e+02 2.240e+02 4.389e+02, threshold=4.000e+02, percent-clipped=0.0 +2024-01-15 18:36:09,514 INFO [train.py:994] (0/2) Epoch 25, batch 300, loss[loss=0.1629, simple_loss=0.2442, pruned_loss=0.04077, over 23879.00 frames. ], tot_loss[loss=0.1582, simple_loss=0.2368, pruned_loss=0.03981, over 3749969.29 frames. ], batch size: 328, lr: 1.63e-02, grad_scale: 32.0 +2024-01-15 18:36:33,405 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.ff2_skip_rate, batch_count=68506.66666666667, ans=0.0 +2024-01-15 18:36:43,511 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.1.encoder.layers.1.whiten, num_groups=1, num_channels=256, metric=4.31 vs. limit=12.0 +2024-01-15 18:36:55,772 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.1.encoder.layers.0.feed_forward3.out_whiten, num_groups=1, num_channels=256, metric=11.62 vs. limit=15.0 +2024-01-15 18:36:56,407 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.attention_skip_rate, batch_count=68540.0, ans=0.0 +2024-01-15 18:37:13,004 INFO [train.py:994] (0/2) Epoch 25, batch 350, loss[loss=0.1729, simple_loss=0.25, pruned_loss=0.04796, over 24438.00 frames. ], tot_loss[loss=0.1575, simple_loss=0.236, pruned_loss=0.03953, over 3979578.73 frames. ], batch size: 170, lr: 1.63e-02, grad_scale: 16.0 +2024-01-15 18:37:30,216 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.2.encoder.layers.0.feed_forward3.out_whiten, num_groups=1, num_channels=384, metric=10.18 vs. limit=15.0 +2024-01-15 18:37:38,527 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_skip_rate, batch_count=68673.33333333333, ans=0.0 +2024-01-15 18:37:39,964 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.3.encoder.layers.2.nonlin_attention.whiten1, num_groups=1, num_channels=384, metric=4.46 vs. limit=10.0 +2024-01-15 18:37:51,201 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.attention_skip_rate, batch_count=68706.66666666667, ans=0.0 +2024-01-15 18:38:03,997 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.0.layers.0.ff3_skip_rate, batch_count=68740.0, ans=0.0 +2024-01-15 18:38:04,006 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.0.layers.1.feed_forward2.hidden_balancer.prob, batch_count=68740.0, ans=0.125 +2024-01-15 18:38:13,307 WARNING [optim.py:484] (0/2) Clipping_scale=2.0, grad-norm quartiles 1.625e+02 1.840e+02 2.010e+02 2.261e+02 4.053e+02, threshold=4.019e+02, percent-clipped=1.0 +2024-01-15 18:38:13,684 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_module2.balancer2.prob, batch_count=68773.33333333333, ans=0.125 +2024-01-15 18:38:14,547 INFO [train.py:994] (0/2) Epoch 25, batch 400, loss[loss=0.1598, simple_loss=0.2366, pruned_loss=0.04146, over 24491.00 frames. ], tot_loss[loss=0.1576, simple_loss=0.2359, pruned_loss=0.0396, over 4159145.64 frames. ], batch size: 210, lr: 1.63e-02, grad_scale: 32.0 +2024-01-15 18:38:51,139 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.4.encoder.layers.1.feed_forward2.out_whiten, num_groups=1, num_channels=384, metric=9.46 vs. limit=15.0 +2024-01-15 18:38:57,447 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.feed_forward3.hidden_balancer.prob, batch_count=68873.33333333333, ans=0.125 +2024-01-15 18:38:57,543 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.bypass_mid.scale_min, batch_count=68873.33333333333, ans=0.2 +2024-01-15 18:39:01,019 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.self_attn_weights.pos_emb_skip_rate, batch_count=68873.33333333333, ans=0.0 +2024-01-15 18:39:05,729 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_skip_rate, batch_count=68906.66666666667, ans=0.0 +2024-01-15 18:39:06,960 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.feed_forward3.hidden_balancer.prob, batch_count=68906.66666666667, ans=0.125 +2024-01-15 18:39:08,149 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.balancer1.prob, batch_count=68906.66666666667, ans=0.125 +2024-01-15 18:39:15,922 INFO [train.py:994] (0/2) Epoch 25, batch 450, loss[loss=0.1631, simple_loss=0.2385, pruned_loss=0.04383, over 24552.00 frames. ], tot_loss[loss=0.157, simple_loss=0.2355, pruned_loss=0.03928, over 4304608.92 frames. ], batch size: 176, lr: 1.63e-02, grad_scale: 16.0 +2024-01-15 18:40:02,238 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_skip_rate, batch_count=69040.0, ans=0.0 +2024-01-15 18:40:04,378 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.bypass.skip_rate, batch_count=69073.33333333333, ans=0.07 +2024-01-15 18:40:06,352 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.balancer1.prob, batch_count=69073.33333333333, ans=0.125 +2024-01-15 18:40:18,566 WARNING [optim.py:484] (0/2) Clipping_scale=2.0, grad-norm quartiles 1.611e+02 1.852e+02 2.038e+02 2.316e+02 3.412e+02, threshold=4.077e+02, percent-clipped=0.0 +2024-01-15 18:40:18,594 INFO [train.py:994] (0/2) Epoch 25, batch 500, loss[loss=0.161, simple_loss=0.242, pruned_loss=0.03996, over 24505.00 frames. ], tot_loss[loss=0.1569, simple_loss=0.2356, pruned_loss=0.03915, over 4427270.53 frames. ], batch size: 187, lr: 1.63e-02, grad_scale: 16.0 +2024-01-15 18:40:23,662 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.bypass.skip_rate, batch_count=69106.66666666667, ans=0.07 +2024-01-15 18:40:53,942 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_skip_rate, batch_count=69206.66666666667, ans=0.0 +2024-01-15 18:41:19,570 INFO [train.py:994] (0/2) Epoch 25, batch 550, loss[loss=0.1417, simple_loss=0.1991, pruned_loss=0.0421, over 18093.00 frames. ], tot_loss[loss=0.1566, simple_loss=0.2354, pruned_loss=0.03895, over 4515605.74 frames. ], batch size: 77, lr: 1.63e-02, grad_scale: 8.0 +2024-01-15 18:41:30,469 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_module1.balancer2.prob, batch_count=69273.33333333333, ans=0.125 +2024-01-15 18:41:34,104 INFO [scaling.py:1118] (0/2) WithLoss: name=encoder.encoders.3.encoder.layers.1.self_attn_weights, loss-sum=0.000e+00 +2024-01-15 18:41:38,148 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.1.encoder.layers.1.nonlin_attention.whiten1, num_groups=1, num_channels=192, metric=4.78 vs. limit=10.0 +2024-01-15 18:41:42,655 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.bypass.skip_rate, batch_count=69306.66666666667, ans=0.09899494936611666 +2024-01-15 18:42:05,025 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_skip_rate, batch_count=69373.33333333333, ans=0.0 +2024-01-15 18:42:08,606 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.nonlin_attention.balancer.prob, batch_count=69406.66666666667, ans=0.125 +2024-01-15 18:42:11,034 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=69406.66666666667, ans=0.1 +2024-01-15 18:42:22,460 INFO [train.py:994] (0/2) Epoch 25, batch 600, loss[loss=0.1595, simple_loss=0.2391, pruned_loss=0.03998, over 24392.00 frames. ], tot_loss[loss=0.1568, simple_loss=0.2355, pruned_loss=0.0391, over 4577779.79 frames. ], batch size: 275, lr: 1.62e-02, grad_scale: 8.0 +2024-01-15 18:42:23,596 WARNING [optim.py:484] (0/2) Clipping_scale=2.0, grad-norm quartiles 1.655e+02 1.874e+02 2.040e+02 2.282e+02 3.344e+02, threshold=4.081e+02, percent-clipped=0.0 +2024-01-15 18:42:50,803 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.3.encoder.layers.1.feed_forward3.out_whiten, num_groups=1, num_channels=512, metric=17.27 vs. limit=15.0 +2024-01-15 18:42:59,760 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_module1.balancer2.prob, batch_count=69540.0, ans=0.125 +2024-01-15 18:43:23,168 INFO [train.py:994] (0/2) Epoch 25, batch 650, loss[loss=0.1351, simple_loss=0.2078, pruned_loss=0.03119, over 24044.00 frames. ], tot_loss[loss=0.157, simple_loss=0.2358, pruned_loss=0.03909, over 4632467.33 frames. ], batch size: 132, lr: 1.62e-02, grad_scale: 8.0 +2024-01-15 18:43:33,512 INFO [scaling.py:1118] (0/2) WithLoss: name=encoder.encoders.0.layers.1.self_attn_weights, loss-sum=0.000e+00 +2024-01-15 18:44:03,811 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.3.encoder.layers.1.feed_forward3.out_whiten, num_groups=1, num_channels=512, metric=11.02 vs. limit=15.0 +2024-01-15 18:44:08,103 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.bypass.scale_min, batch_count=69706.66666666667, ans=0.2 +2024-01-15 18:44:09,636 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.3.encoder.layers.2.self_attn1.whiten, num_groups=1, num_channels=512, metric=17.67 vs. limit=22.5 +2024-01-15 18:44:13,325 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.2.encoder.layers.1.feed_forward1.out_whiten, num_groups=1, num_channels=384, metric=14.16 vs. limit=15.0 +2024-01-15 18:44:22,688 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.bypass_mid.scale_min, batch_count=69740.0, ans=0.2 +2024-01-15 18:44:25,309 INFO [train.py:994] (0/2) Epoch 25, batch 700, loss[loss=0.1605, simple_loss=0.2373, pruned_loss=0.04182, over 24606.00 frames. ], tot_loss[loss=0.1573, simple_loss=0.2364, pruned_loss=0.03913, over 4682899.32 frames. ], batch size: 199, lr: 1.62e-02, grad_scale: 8.0 +2024-01-15 18:44:26,459 WARNING [optim.py:484] (0/2) Clipping_scale=2.0, grad-norm quartiles 1.644e+02 1.989e+02 2.242e+02 2.586e+02 3.902e+02, threshold=4.483e+02, percent-clipped=0.0 +2024-01-15 18:44:42,016 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.0.layers.0.feed_forward1.out_proj.dropout_p, batch_count=69806.66666666667, ans=0.1 +2024-01-15 18:44:56,740 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.2.encoder.layers.1.self_attn2.whiten, num_groups=1, num_channels=384, metric=19.98 vs. limit=22.5 +2024-01-15 18:45:23,669 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.feed_forward2.hidden_balancer.prob, batch_count=69906.66666666667, ans=0.125 +2024-01-15 18:45:27,040 INFO [train.py:994] (0/2) Epoch 25, batch 750, loss[loss=0.1264, simple_loss=0.1867, pruned_loss=0.03306, over 18848.00 frames. ], tot_loss[loss=0.1566, simple_loss=0.2353, pruned_loss=0.03892, over 4697276.83 frames. ], batch size: 82, lr: 1.62e-02, grad_scale: 8.0 +2024-01-15 18:45:32,981 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.ff3_skip_rate, batch_count=69940.0, ans=0.0 +2024-01-15 18:45:58,567 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.ff2_skip_rate, batch_count=70006.66666666667, ans=0.0 +2024-01-15 18:46:27,544 INFO [train.py:994] (0/2) Epoch 25, batch 800, loss[loss=0.1534, simple_loss=0.2332, pruned_loss=0.03686, over 24500.00 frames. ], tot_loss[loss=0.1568, simple_loss=0.2356, pruned_loss=0.03896, over 4727592.71 frames. ], batch size: 210, lr: 1.62e-02, grad_scale: 16.0 +2024-01-15 18:46:28,647 WARNING [optim.py:484] (0/2) Clipping_scale=2.0, grad-norm quartiles 1.641e+02 1.948e+02 2.174e+02 2.457e+02 3.780e+02, threshold=4.348e+02, percent-clipped=0.0 +2024-01-15 18:46:37,747 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.feed_forward3.hidden_balancer.prob, batch_count=70140.0, ans=0.125 +2024-01-15 18:46:44,611 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.4.encoder.layers.2.nonlin_attention.whiten2, num_groups=1, num_channels=384, metric=3.08 vs. limit=15.0 +2024-01-15 18:47:05,082 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.nonlin_attention.balancer.prob, batch_count=70206.66666666667, ans=0.125 +2024-01-15 18:47:15,936 INFO [checkpoint.py:75] (0/2) Saving checkpoint to zipformer_bbpe/exp-context-size-2-lr-epochs-10-spec-aug-20-disable-musan/epoch-25.pt +2024-01-15 18:47:38,887 INFO [train.py:994] (0/2) Epoch 26, batch 0, loss[loss=0.1524, simple_loss=0.2318, pruned_loss=0.0365, over 24449.00 frames. ], tot_loss[loss=0.1524, simple_loss=0.2318, pruned_loss=0.0365, over 24449.00 frames. ], batch size: 222, lr: 1.59e-02, grad_scale: 32.0 +2024-01-15 18:47:38,888 INFO [train.py:1017] (0/2) Computing validation loss +2024-01-15 18:47:59,161 INFO [train.py:1026] (0/2) Epoch 26, validation: loss=0.1671, simple_loss=0.2515, pruned_loss=0.04137, over 1622729.00 frames. +2024-01-15 18:47:59,161 INFO [train.py:1027] (0/2) Maximum memory allocated so far is 15997MB +2024-01-15 18:48:08,312 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.balancer2.prob, batch_count=70250.0, ans=0.125 +2024-01-15 18:48:11,873 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.attention_skip_rate, batch_count=70283.33333333333, ans=0.0 +2024-01-15 18:48:17,794 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_skip_rate, batch_count=70283.33333333333, ans=0.0 +2024-01-15 18:48:21,442 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_skip_rate, batch_count=70283.33333333333, ans=0.0 +2024-01-15 18:48:21,846 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.2.encoder.layers.2.feed_forward3.out_whiten, num_groups=1, num_channels=384, metric=9.23 vs. limit=15.0 +2024-01-15 18:48:29,858 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.feed_forward1.hidden_balancer.prob, batch_count=70316.66666666667, ans=0.125 +2024-01-15 18:48:34,965 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.2.encoder.layers.0.feed_forward3.out_whiten, num_groups=1, num_channels=384, metric=11.97 vs. limit=15.0 +2024-01-15 18:48:50,330 INFO [scaling.py:1118] (0/2) WithLoss: name=encoder.encoders.4.encoder.layers.1.self_attn_weights, loss-sum=0.000e+00 +2024-01-15 18:48:51,834 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.3.encoder.layers.1.nonlin_attention.whiten1, num_groups=1, num_channels=384, metric=5.22 vs. limit=10.0 +2024-01-15 18:49:01,132 INFO [train.py:994] (0/2) Epoch 26, batch 50, loss[loss=0.1489, simple_loss=0.2299, pruned_loss=0.03395, over 24486.00 frames. ], tot_loss[loss=0.1567, simple_loss=0.2343, pruned_loss=0.03952, over 1089486.71 frames. ], batch size: 216, lr: 1.59e-02, grad_scale: 32.0 +2024-01-15 18:49:01,902 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.1.encoder.layers.1.conv_module2.whiten, num_groups=1, num_channels=256, metric=5.48 vs. limit=15.0 +2024-01-15 18:49:04,259 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.2.encoder.layers.1.conv_module1.whiten, num_groups=1, num_channels=384, metric=5.39 vs. limit=15.0 +2024-01-15 18:49:05,606 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.0.layers.1.feed_forward2.out_whiten, num_groups=1, num_channels=192, metric=6.54 vs. limit=15.0 +2024-01-15 18:49:10,545 WARNING [optim.py:484] (0/2) Clipping_scale=2.0, grad-norm quartiles 1.574e+02 1.886e+02 2.079e+02 2.344e+02 3.682e+02, threshold=4.157e+02, percent-clipped=0.0 +2024-01-15 18:49:10,826 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_module1.balancer1.prob, batch_count=70416.66666666667, ans=0.125 +2024-01-15 18:49:27,819 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.5.encoder.layers.1.conv_module2.whiten, num_groups=1, num_channels=256, metric=3.40 vs. limit=15.0 +2024-01-15 18:49:54,954 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.4.encoder.layers.2.feed_forward2.out_whiten, num_groups=1, num_channels=384, metric=7.37 vs. limit=15.0 +2024-01-15 18:50:02,568 INFO [train.py:994] (0/2) Epoch 26, batch 100, loss[loss=0.1629, simple_loss=0.2384, pruned_loss=0.0437, over 24494.00 frames. ], tot_loss[loss=0.1556, simple_loss=0.2338, pruned_loss=0.03865, over 1905208.87 frames. ], batch size: 210, lr: 1.58e-02, grad_scale: 32.0 +2024-01-15 18:50:04,899 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.5.encoder.layers.0.feed_forward1.out_whiten, num_groups=1, num_channels=256, metric=10.25 vs. limit=15.0 +2024-01-15 18:50:27,807 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.0.layers.1.balancer2.prob, batch_count=70650.0, ans=0.125 +2024-01-15 18:50:36,349 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.bypass.scale_min, batch_count=70650.0, ans=0.2 +2024-01-15 18:50:48,267 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.feed_forward1.hidden_balancer.prob, batch_count=70683.33333333333, ans=0.125 +2024-01-15 18:50:54,060 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=70716.66666666667, ans=0.1 +2024-01-15 18:51:05,401 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.1.encoder.layers.1.nonlin_attention.whiten1, num_groups=1, num_channels=192, metric=4.71 vs. limit=10.0 +2024-01-15 18:51:05,739 INFO [train.py:994] (0/2) Epoch 26, batch 150, loss[loss=0.147, simple_loss=0.2299, pruned_loss=0.03201, over 24310.00 frames. ], tot_loss[loss=0.1552, simple_loss=0.2338, pruned_loss=0.03825, over 2560697.56 frames. ], batch size: 285, lr: 1.58e-02, grad_scale: 32.0 +2024-01-15 18:51:10,822 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_skip_rate, batch_count=70750.0, ans=0.0 +2024-01-15 18:51:15,097 WARNING [optim.py:484] (0/2) Clipping_scale=2.0, grad-norm quartiles 1.572e+02 1.828e+02 2.027e+02 2.407e+02 4.770e+02, threshold=4.053e+02, percent-clipped=1.0 +2024-01-15 18:51:34,465 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.ff2_skip_rate, batch_count=70816.66666666667, ans=0.0 +2024-01-15 18:51:41,249 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.bypass.scale_min, batch_count=70850.0, ans=0.2 +2024-01-15 18:51:56,522 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_module1.balancer1.prob, batch_count=70883.33333333333, ans=0.125 +2024-01-15 18:52:06,885 INFO [train.py:994] (0/2) Epoch 26, batch 200, loss[loss=0.1637, simple_loss=0.2397, pruned_loss=0.04379, over 24446.00 frames. ], tot_loss[loss=0.1559, simple_loss=0.2345, pruned_loss=0.03859, over 3056449.57 frames. ], batch size: 170, lr: 1.58e-02, grad_scale: 32.0 +2024-01-15 18:52:10,818 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.feed_forward2.hidden_balancer.prob, batch_count=70916.66666666667, ans=0.125 +2024-01-15 18:52:28,713 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.self_attn_weights.pos_emb_skip_rate, batch_count=70950.0, ans=0.0 +2024-01-15 18:52:44,406 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward1.hidden_balancer.prob, batch_count=71016.66666666667, ans=0.125 +2024-01-15 18:52:46,510 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.2.encoder.layers.2.feed_forward3.out_whiten, num_groups=1, num_channels=384, metric=10.78 vs. limit=15.0 +2024-01-15 18:52:50,812 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_module2.balancer1.prob, batch_count=71016.66666666667, ans=0.125 +2024-01-15 18:52:56,694 INFO [scaling.py:1118] (0/2) WithLoss: name=encoder.encoders.3.encoder.layers.0.self_attn_weights, loss-sum=0.000e+00 +2024-01-15 18:53:07,958 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.nonlin_attention.balancer.prob, batch_count=71083.33333333333, ans=0.125 +2024-01-15 18:53:08,812 INFO [train.py:994] (0/2) Epoch 26, batch 250, loss[loss=0.138, simple_loss=0.2169, pruned_loss=0.0296, over 23941.00 frames. ], tot_loss[loss=0.1553, simple_loss=0.2341, pruned_loss=0.03832, over 3447405.75 frames. ], batch size: 131, lr: 1.58e-02, grad_scale: 32.0 +2024-01-15 18:53:14,427 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_module1.balancer2.min_abs, batch_count=71083.33333333333, ans=0.5 +2024-01-15 18:53:18,920 WARNING [optim.py:484] (0/2) Clipping_scale=2.0, grad-norm quartiles 1.661e+02 1.897e+02 2.050e+02 2.398e+02 3.623e+02, threshold=4.101e+02, percent-clipped=0.0 +2024-01-15 18:53:46,084 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.nonlin_attention.balancer.min_positive, batch_count=71183.33333333333, ans=0.05 +2024-01-15 18:53:47,266 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.attention_skip_rate, batch_count=71183.33333333333, ans=0.0 +2024-01-15 18:53:48,400 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.0.layers.1.balancer2.prob, batch_count=71183.33333333333, ans=0.125 +2024-01-15 18:54:09,653 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.balancer2.prob, batch_count=71250.0, ans=0.125 +2024-01-15 18:54:10,536 INFO [train.py:994] (0/2) Epoch 26, batch 300, loss[loss=0.1651, simple_loss=0.2477, pruned_loss=0.04127, over 23885.00 frames. ], tot_loss[loss=0.1553, simple_loss=0.2341, pruned_loss=0.03823, over 3753357.63 frames. ], batch size: 328, lr: 1.58e-02, grad_scale: 32.0 +2024-01-15 18:54:12,241 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.5.encoder.layers.1.self_attn1.whiten, num_groups=1, num_channels=256, metric=16.20 vs. limit=22.5 +2024-01-15 18:54:57,201 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.feed_forward1.hidden_balancer.prob, batch_count=71350.0, ans=0.125 +2024-01-15 18:54:57,270 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.balancer2.prob, batch_count=71350.0, ans=0.125 +2024-01-15 18:55:00,699 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_module1.balancer2.prob, batch_count=71383.33333333333, ans=0.125 +2024-01-15 18:55:02,921 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.nonlin_attention.balancer.prob, batch_count=71383.33333333333, ans=0.125 +2024-01-15 18:55:11,956 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_module2.balancer1.prob, batch_count=71416.66666666667, ans=0.125 +2024-01-15 18:55:12,829 INFO [train.py:994] (0/2) Epoch 26, batch 350, loss[loss=0.1577, simple_loss=0.2441, pruned_loss=0.03565, over 24463.00 frames. ], tot_loss[loss=0.1556, simple_loss=0.2345, pruned_loss=0.03837, over 3991547.04 frames. ], batch size: 267, lr: 1.58e-02, grad_scale: 32.0 +2024-01-15 18:55:19,664 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.bypass.skip_rate, batch_count=71416.66666666667, ans=0.07 +2024-01-15 18:55:22,876 WARNING [optim.py:484] (0/2) Clipping_scale=2.0, grad-norm quartiles 1.564e+02 1.903e+02 2.138e+02 2.406e+02 3.843e+02, threshold=4.276e+02, percent-clipped=0.0 +2024-01-15 18:55:35,661 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.0.layers.0.bypass.skip_rate, batch_count=71450.0, ans=0.035 +2024-01-15 18:55:43,964 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.ff3_skip_rate, batch_count=71483.33333333333, ans=0.0 +2024-01-15 18:56:08,481 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=71550.0, ans=0.1 +2024-01-15 18:56:14,683 INFO [train.py:994] (0/2) Epoch 26, batch 400, loss[loss=0.1568, simple_loss=0.2381, pruned_loss=0.03778, over 24426.00 frames. ], tot_loss[loss=0.1554, simple_loss=0.2342, pruned_loss=0.03831, over 4170957.87 frames. ], batch size: 258, lr: 1.57e-02, grad_scale: 32.0 +2024-01-15 18:56:14,936 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.bypass_mid.scale_min, batch_count=71583.33333333333, ans=0.2 +2024-01-15 18:56:18,567 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.bypass.skip_rate, batch_count=71583.33333333333, ans=0.04949747468305833 +2024-01-15 18:56:50,800 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.0.layers.0.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=4.70 vs. limit=6.0 +2024-01-15 18:56:53,129 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.0.layers.1.bypass.scale_min, batch_count=71683.33333333333, ans=0.2 +2024-01-15 18:57:15,695 INFO [train.py:994] (0/2) Epoch 26, batch 450, loss[loss=0.1718, simple_loss=0.2525, pruned_loss=0.04562, over 23894.00 frames. ], tot_loss[loss=0.1552, simple_loss=0.2341, pruned_loss=0.03819, over 4306998.87 frames. ], batch size: 328, lr: 1.57e-02, grad_scale: 32.0 +2024-01-15 18:57:16,573 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.1.encoder.layers.0.feed_forward1.out_whiten, num_groups=1, num_channels=256, metric=10.78 vs. limit=15.0 +2024-01-15 18:57:25,864 WARNING [optim.py:484] (0/2) Clipping_scale=2.0, grad-norm quartiles 1.515e+02 1.875e+02 2.085e+02 2.426e+02 4.105e+02, threshold=4.170e+02, percent-clipped=0.0 +2024-01-15 18:58:11,327 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_module1.balancer1.prob, batch_count=71883.33333333333, ans=0.125 +2024-01-15 18:58:18,237 INFO [train.py:994] (0/2) Epoch 26, batch 500, loss[loss=0.1643, simple_loss=0.2365, pruned_loss=0.04604, over 24361.00 frames. ], tot_loss[loss=0.1552, simple_loss=0.234, pruned_loss=0.0382, over 4421927.51 frames. ], batch size: 153, lr: 1.57e-02, grad_scale: 32.0 +2024-01-15 18:58:20,170 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.2.encoder.layers.2.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=4.98 vs. limit=6.0 +2024-01-15 18:58:42,532 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.feed_forward1.hidden_balancer.prob, batch_count=71983.33333333333, ans=0.125 +2024-01-15 18:58:55,152 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.ff3_skip_rate, batch_count=72016.66666666667, ans=0.0 +2024-01-15 18:59:20,815 INFO [train.py:994] (0/2) Epoch 26, batch 550, loss[loss=0.1544, simple_loss=0.2286, pruned_loss=0.0401, over 24565.00 frames. ], tot_loss[loss=0.1545, simple_loss=0.2332, pruned_loss=0.03791, over 4492448.41 frames. ], batch size: 176, lr: 1.57e-02, grad_scale: 32.0 +2024-01-15 18:59:22,285 INFO [scaling.py:1118] (0/2) WithLoss: name=encoder.encoders.2.encoder.layers.0.self_attn_weights, loss-sum=0.000e+00 +2024-01-15 18:59:30,773 WARNING [optim.py:484] (0/2) Clipping_scale=2.0, grad-norm quartiles 1.560e+02 1.885e+02 2.122e+02 2.518e+02 4.046e+02, threshold=4.244e+02, percent-clipped=0.0 +2024-01-15 18:59:33,892 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.3.encoder.layers.0.self_attn1.whiten, num_groups=1, num_channels=512, metric=17.25 vs. limit=22.5 +2024-01-15 18:59:47,166 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_module2.balancer2.min_abs, batch_count=72150.0, ans=0.5 +2024-01-15 18:59:58,285 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.bypass_mid.scale_min, batch_count=72183.33333333333, ans=0.2 +2024-01-15 19:00:23,056 INFO [train.py:994] (0/2) Epoch 26, batch 600, loss[loss=0.1443, simple_loss=0.2253, pruned_loss=0.03168, over 24307.00 frames. ], tot_loss[loss=0.155, simple_loss=0.2338, pruned_loss=0.03815, over 4571533.36 frames. ], batch size: 285, lr: 1.57e-02, grad_scale: 32.0 +2024-01-15 19:00:24,606 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=72250.0, ans=0.1 +2024-01-15 19:00:42,653 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.3.encoder.layers.2.nonlin_attention.whiten1, num_groups=1, num_channels=384, metric=4.68 vs. limit=10.0 +2024-01-15 19:01:11,753 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_module2.balancer1.prob, batch_count=72383.33333333333, ans=0.125 +2024-01-15 19:01:24,241 INFO [train.py:994] (0/2) Epoch 26, batch 650, loss[loss=0.1599, simple_loss=0.243, pruned_loss=0.03846, over 24389.00 frames. ], tot_loss[loss=0.1546, simple_loss=0.2335, pruned_loss=0.03789, over 4618818.48 frames. ], batch size: 275, lr: 1.57e-02, grad_scale: 32.0 +2024-01-15 19:01:34,941 WARNING [optim.py:484] (0/2) Clipping_scale=2.0, grad-norm quartiles 1.586e+02 1.987e+02 2.293e+02 2.786e+02 4.279e+02, threshold=4.586e+02, percent-clipped=3.0 +2024-01-15 19:01:35,230 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.nonlin_attention.balancer.prob, batch_count=72416.66666666667, ans=0.125 +2024-01-15 19:01:45,827 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.0.layers.1.balancer1.prob, batch_count=72450.0, ans=0.125 +2024-01-15 19:02:26,166 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=72583.33333333333, ans=0.1 +2024-01-15 19:02:27,049 INFO [train.py:994] (0/2) Epoch 26, batch 700, loss[loss=0.1439, simple_loss=0.2028, pruned_loss=0.04251, over 18656.00 frames. ], tot_loss[loss=0.1551, simple_loss=0.2339, pruned_loss=0.03811, over 4649156.25 frames. ], batch size: 80, lr: 1.56e-02, grad_scale: 32.0 +2024-01-15 19:02:31,904 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder_embed.convnext.layerdrop_rate, batch_count=72583.33333333333, ans=0.015 +2024-01-15 19:02:34,415 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.ff2_skip_rate, batch_count=72583.33333333333, ans=0.0 +2024-01-15 19:02:38,539 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.2.encoder.layers.1.feed_forward1.out_whiten, num_groups=1, num_channels=384, metric=14.01 vs. limit=15.0 +2024-01-15 19:02:51,250 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=72650.0, ans=0.1 +2024-01-15 19:03:11,312 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.5.encoder.layers.1.feed_forward3.out_whiten, num_groups=1, num_channels=256, metric=13.17 vs. limit=15.0 +2024-01-15 19:03:29,158 INFO [train.py:994] (0/2) Epoch 26, batch 750, loss[loss=0.1461, simple_loss=0.2288, pruned_loss=0.03172, over 24494.00 frames. ], tot_loss[loss=0.1545, simple_loss=0.2335, pruned_loss=0.03774, over 4691414.77 frames. ], batch size: 187, lr: 1.56e-02, grad_scale: 32.0 +2024-01-15 19:03:29,484 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.bypass.skip_rate, batch_count=72750.0, ans=0.09899494936611666 +2024-01-15 19:03:35,996 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.attention_skip_rate, batch_count=72750.0, ans=0.0 +2024-01-15 19:03:39,721 WARNING [optim.py:484] (0/2) Clipping_scale=2.0, grad-norm quartiles 1.458e+02 1.874e+02 2.013e+02 2.241e+02 3.260e+02, threshold=4.026e+02, percent-clipped=0.0 +2024-01-15 19:03:43,653 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.nonlin_attention.balancer.prob, batch_count=72783.33333333333, ans=0.125 +2024-01-15 19:03:44,621 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_module1.balancer2.prob, batch_count=72783.33333333333, ans=0.125 +2024-01-15 19:03:53,006 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.feed_forward1.hidden_balancer.prob, batch_count=72816.66666666667, ans=0.125 +2024-01-15 19:03:57,667 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.nonlin_attention.balancer.prob, batch_count=72816.66666666667, ans=0.125 +2024-01-15 19:04:01,812 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_module2.balancer1.min_positive, batch_count=72816.66666666667, ans=0.025 +2024-01-15 19:04:02,874 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.0.layers.0.self_attn_weights.pos_emb_skip_rate, batch_count=72816.66666666667, ans=0.0 +2024-01-15 19:04:29,126 INFO [train.py:994] (0/2) Epoch 26, batch 800, loss[loss=0.1533, simple_loss=0.2336, pruned_loss=0.03645, over 24504.00 frames. ], tot_loss[loss=0.1546, simple_loss=0.2336, pruned_loss=0.03783, over 4724267.17 frames. ], batch size: 187, lr: 1.56e-02, grad_scale: 32.0 +2024-01-15 19:04:37,109 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.feed_forward2.hidden_balancer.prob, batch_count=72916.66666666667, ans=0.125 +2024-01-15 19:04:43,315 INFO [scaling.py:1118] (0/2) WithLoss: name=encoder.encoders.3.encoder.layers.0.self_attn_weights, loss-sum=0.000e+00 +2024-01-15 19:05:05,137 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_module2.balancer2.prob, batch_count=73016.66666666667, ans=0.125 +2024-01-15 19:05:18,070 INFO [checkpoint.py:75] (0/2) Saving checkpoint to zipformer_bbpe/exp-context-size-2-lr-epochs-10-spec-aug-20-disable-musan/epoch-26.pt +2024-01-15 19:05:42,607 INFO [train.py:994] (0/2) Epoch 27, batch 0, loss[loss=0.1629, simple_loss=0.2386, pruned_loss=0.0436, over 24389.00 frames. ], tot_loss[loss=0.1629, simple_loss=0.2386, pruned_loss=0.0436, over 24389.00 frames. ], batch size: 159, lr: 1.53e-02, grad_scale: 32.0 +2024-01-15 19:05:42,608 INFO [train.py:1017] (0/2) Computing validation loss +2024-01-15 19:05:56,740 INFO [zipformer.py:1858] (0/2) name=encoder.encoders.0.layers.1.self_attn_weights, attn_weights_entropy = tensor([5.4425, 5.2070, 5.1376, 5.1091], device='cuda:0') +2024-01-15 19:06:03,129 INFO [zipformer.py:1858] (0/2) name=encoder.encoders.1.encoder.layers.1.self_attn_weights, attn_weights_entropy = tensor([4.7109, 4.0274, 4.4398, 3.8789], device='cuda:0') +2024-01-15 19:06:03,773 INFO [train.py:1026] (0/2) Epoch 27, validation: loss=0.1674, simple_loss=0.2515, pruned_loss=0.04165, over 1622729.00 frames. +2024-01-15 19:06:03,774 INFO [train.py:1027] (0/2) Maximum memory allocated so far is 15997MB +2024-01-15 19:06:05,341 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.feed_forward2.hidden_balancer.prob, batch_count=73060.0, ans=0.125 +2024-01-15 19:06:13,477 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.4.encoder.layers.0.nonlin_attention.whiten2, num_groups=1, num_channels=384, metric=3.41 vs. limit=15.0 +2024-01-15 19:06:19,566 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.2.encoder.layers.2.nonlin_attention.whiten1, num_groups=1, num_channels=288, metric=8.48 vs. limit=10.0 +2024-01-15 19:06:22,301 WARNING [optim.py:484] (0/2) Clipping_scale=2.0, grad-norm quartiles 1.442e+02 1.897e+02 2.083e+02 2.532e+02 4.087e+02, threshold=4.166e+02, percent-clipped=1.0 +2024-01-15 19:06:25,185 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.0.layers.1.feed_forward2.out_whiten, num_groups=1, num_channels=192, metric=6.45 vs. limit=15.0 +2024-01-15 19:06:37,047 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_module2.balancer1.prob, batch_count=73126.66666666667, ans=0.125 +2024-01-15 19:06:38,534 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.4.encoder.layers.0.feed_forward2.out_whiten, num_groups=1, num_channels=384, metric=10.79 vs. limit=15.0 +2024-01-15 19:06:48,428 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.2.encoder.layers.0.self_attn2.whiten, num_groups=1, num_channels=384, metric=22.24 vs. limit=22.5 +2024-01-15 19:07:06,926 INFO [train.py:994] (0/2) Epoch 27, batch 50, loss[loss=0.1576, simple_loss=0.2412, pruned_loss=0.03706, over 22351.00 frames. ], tot_loss[loss=0.1533, simple_loss=0.2315, pruned_loss=0.03755, over 1082425.96 frames. ], batch size: 357, lr: 1.53e-02, grad_scale: 32.0 +2024-01-15 19:07:11,162 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.3.encoder.layers.3.whiten, num_groups=1, num_channels=512, metric=3.43 vs. limit=12.0 +2024-01-15 19:07:13,304 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_module2.balancer1.prob, batch_count=73226.66666666667, ans=0.125 +2024-01-15 19:07:26,425 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.self_attn_weights.pos_emb_skip_rate, batch_count=73260.0, ans=0.0 +2024-01-15 19:07:27,630 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_module1.balancer1.prob, batch_count=73260.0, ans=0.125 +2024-01-15 19:07:35,441 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.1.encoder.layers.1.self_attn2.whiten, num_groups=1, num_channels=256, metric=19.74 vs. limit=22.5 +2024-01-15 19:07:47,936 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.0.layers.0.bypass.skip_rate, batch_count=73326.66666666667, ans=0.035 +2024-01-15 19:08:10,273 INFO [train.py:994] (0/2) Epoch 27, batch 100, loss[loss=0.1584, simple_loss=0.2397, pruned_loss=0.03851, over 24496.00 frames. ], tot_loss[loss=0.1527, simple_loss=0.2311, pruned_loss=0.03719, over 1912462.14 frames. ], batch size: 181, lr: 1.53e-02, grad_scale: 32.0 +2024-01-15 19:08:19,405 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.feed_forward1.out_proj.dropout_p, batch_count=73393.33333333333, ans=0.1 +2024-01-15 19:08:28,683 WARNING [optim.py:484] (0/2) Clipping_scale=2.0, grad-norm quartiles 1.568e+02 1.827e+02 2.038e+02 2.394e+02 3.395e+02, threshold=4.076e+02, percent-clipped=0.0 +2024-01-15 19:08:33,718 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.nonlin_attention.balancer.min_positive, batch_count=73460.0, ans=0.05 +2024-01-15 19:08:34,910 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_skip_rate, batch_count=73460.0, ans=0.0 +2024-01-15 19:08:47,055 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.attention_skip_rate, batch_count=73493.33333333333, ans=0.0 +2024-01-15 19:08:49,556 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=73493.33333333333, ans=0.1 +2024-01-15 19:08:53,658 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.0.layers.0.self_attn2.whiten, num_groups=1, num_channels=192, metric=13.37 vs. limit=22.5 +2024-01-15 19:09:11,689 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.ff2_skip_rate, batch_count=73560.0, ans=0.0 +2024-01-15 19:09:12,656 INFO [train.py:994] (0/2) Epoch 27, batch 150, loss[loss=0.1612, simple_loss=0.2431, pruned_loss=0.03962, over 24554.00 frames. ], tot_loss[loss=0.1529, simple_loss=0.2316, pruned_loss=0.03711, over 2552997.52 frames. ], batch size: 176, lr: 1.53e-02, grad_scale: 32.0 +2024-01-15 19:09:14,132 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.bypass.skip_rate, batch_count=73560.0, ans=0.09899494936611666 +2024-01-15 19:09:24,892 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.5.encoder.layers.0.nonlin_attention.whiten1, num_groups=1, num_channels=192, metric=2.88 vs. limit=10.0 +2024-01-15 19:09:45,433 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.bypass.skip_rate, batch_count=73626.66666666667, ans=0.09899494936611666 +2024-01-15 19:10:04,550 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_module1.balancer1.max_abs, batch_count=73693.33333333333, ans=10.0 +2024-01-15 19:10:13,764 INFO [train.py:994] (0/2) Epoch 27, batch 200, loss[loss=0.1379, simple_loss=0.2193, pruned_loss=0.02829, over 24306.00 frames. ], tot_loss[loss=0.1521, simple_loss=0.2309, pruned_loss=0.03665, over 3053092.23 frames. ], batch size: 147, lr: 1.53e-02, grad_scale: 32.0 +2024-01-15 19:10:32,947 WARNING [optim.py:484] (0/2) Clipping_scale=2.0, grad-norm quartiles 1.480e+02 1.764e+02 1.943e+02 2.218e+02 3.609e+02, threshold=3.886e+02, percent-clipped=0.0 +2024-01-15 19:10:37,027 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.4.encoder.layers.2.nonlin_attention.whiten1, num_groups=1, num_channels=288, metric=3.45 vs. limit=10.0 +2024-01-15 19:11:00,452 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_module1.balancer1.max_abs, batch_count=73826.66666666667, ans=10.0 +2024-01-15 19:11:16,884 INFO [train.py:994] (0/2) Epoch 27, batch 250, loss[loss=0.1347, simple_loss=0.2084, pruned_loss=0.03052, over 23594.00 frames. ], tot_loss[loss=0.1526, simple_loss=0.2316, pruned_loss=0.03674, over 3445570.94 frames. ], batch size: 119, lr: 1.53e-02, grad_scale: 32.0 +2024-01-15 19:12:09,839 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.feed_forward1.hidden_balancer.prob, batch_count=74026.66666666667, ans=0.125 +2024-01-15 19:12:13,372 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.nonlin_attention.balancer.max_positive, batch_count=74026.66666666667, ans=0.95 +2024-01-15 19:12:19,137 INFO [train.py:994] (0/2) Epoch 27, batch 300, loss[loss=0.1542, simple_loss=0.2316, pruned_loss=0.03843, over 24299.00 frames. ], tot_loss[loss=0.1526, simple_loss=0.2317, pruned_loss=0.0368, over 3750895.68 frames. ], batch size: 147, lr: 1.52e-02, grad_scale: 32.0 +2024-01-15 19:12:21,825 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.feed_forward1.hidden_balancer.prob, batch_count=74060.0, ans=0.125 +2024-01-15 19:12:38,384 WARNING [optim.py:484] (0/2) Clipping_scale=2.0, grad-norm quartiles 1.487e+02 1.848e+02 2.123e+02 2.511e+02 4.010e+02, threshold=4.247e+02, percent-clipped=2.0 +2024-01-15 19:12:54,226 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=74126.66666666667, ans=0.1 +2024-01-15 19:12:59,603 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_module1.balancer1.prob, batch_count=74160.0, ans=0.125 +2024-01-15 19:13:14,397 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.feed_forward2.hidden_balancer.prob, batch_count=74193.33333333333, ans=0.125 +2024-01-15 19:13:21,949 INFO [train.py:994] (0/2) Epoch 27, batch 350, loss[loss=0.1543, simple_loss=0.2358, pruned_loss=0.03645, over 24529.00 frames. ], tot_loss[loss=0.1528, simple_loss=0.232, pruned_loss=0.03676, over 3984880.03 frames. ], batch size: 193, lr: 1.52e-02, grad_scale: 32.0 +2024-01-15 19:13:27,914 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.bypass.skip_rate, batch_count=74226.66666666667, ans=0.07 +2024-01-15 19:13:28,434 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.2.encoder.layers.0.conv_module1.whiten, num_groups=1, num_channels=384, metric=7.04 vs. limit=15.0 +2024-01-15 19:13:52,814 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_skip_rate, batch_count=74293.33333333333, ans=0.0 +2024-01-15 19:14:05,743 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.0.layers.0.nonlin_attention.whiten1.whitening_limit, batch_count=74326.66666666667, ans=10.0 +2024-01-15 19:14:22,135 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=74393.33333333333, ans=0.1 +2024-01-15 19:14:23,094 INFO [train.py:994] (0/2) Epoch 27, batch 400, loss[loss=0.1322, simple_loss=0.2082, pruned_loss=0.02813, over 23969.00 frames. ], tot_loss[loss=0.1524, simple_loss=0.2317, pruned_loss=0.03658, over 4175016.87 frames. ], batch size: 131, lr: 1.52e-02, grad_scale: 32.0 +2024-01-15 19:14:30,049 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_module1.balancer1.max_abs, batch_count=74393.33333333333, ans=10.0 +2024-01-15 19:14:43,691 WARNING [optim.py:484] (0/2) Clipping_scale=2.0, grad-norm quartiles 1.495e+02 1.874e+02 2.027e+02 2.380e+02 3.576e+02, threshold=4.054e+02, percent-clipped=0.0 +2024-01-15 19:15:03,270 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.attention_skip_rate, batch_count=74493.33333333333, ans=0.0 +2024-01-15 19:15:04,378 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.0.layers.1.feed_forward1.hidden_balancer.prob, batch_count=74493.33333333333, ans=0.125 +2024-01-15 19:15:26,517 INFO [train.py:994] (0/2) Epoch 27, batch 450, loss[loss=0.1411, simple_loss=0.2215, pruned_loss=0.0304, over 24445.00 frames. ], tot_loss[loss=0.1521, simple_loss=0.2312, pruned_loss=0.03646, over 4311283.67 frames. ], batch size: 250, lr: 1.52e-02, grad_scale: 16.0 +2024-01-15 19:15:29,142 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.attention_skip_rate, batch_count=74560.0, ans=0.0 +2024-01-15 19:15:36,257 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.balancer1.prob, batch_count=74560.0, ans=0.125 +2024-01-15 19:16:13,870 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.1.encoder.layers.1.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=3.66 vs. limit=6.0 +2024-01-15 19:16:24,419 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.0.layers.1.feed_forward2.out_whiten, num_groups=1, num_channels=192, metric=6.27 vs. limit=15.0 +2024-01-15 19:16:28,373 INFO [train.py:994] (0/2) Epoch 27, batch 500, loss[loss=0.163, simple_loss=0.2484, pruned_loss=0.03879, over 22339.00 frames. ], tot_loss[loss=0.1522, simple_loss=0.2314, pruned_loss=0.03651, over 4422062.54 frames. ], batch size: 357, lr: 1.52e-02, grad_scale: 16.0 +2024-01-15 19:16:38,506 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.ff2_skip_rate, batch_count=74726.66666666667, ans=0.0 +2024-01-15 19:16:49,345 WARNING [optim.py:484] (0/2) Clipping_scale=2.0, grad-norm quartiles 1.619e+02 1.903e+02 2.137e+02 2.532e+02 4.096e+02, threshold=4.275e+02, percent-clipped=1.0 +2024-01-15 19:17:00,916 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=74793.33333333333, ans=0.1 +2024-01-15 19:17:00,951 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_module1.balancer2.prob, batch_count=74793.33333333333, ans=0.125 +2024-01-15 19:17:00,953 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.bypass_mid.scale_min, batch_count=74793.33333333333, ans=0.2 +2024-01-15 19:17:14,806 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.0.layers.1.feed_forward3.out_whiten, num_groups=1, num_channels=192, metric=12.79 vs. limit=15.0 +2024-01-15 19:17:22,529 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.balancer1.prob, batch_count=74860.0, ans=0.125 +2024-01-15 19:17:24,730 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.feed_forward3.hidden_balancer.prob, batch_count=74860.0, ans=0.125 +2024-01-15 19:17:28,226 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.1.encoder.layers.1.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=3.63 vs. limit=6.0 +2024-01-15 19:17:29,622 INFO [train.py:994] (0/2) Epoch 27, batch 550, loss[loss=0.1391, simple_loss=0.2144, pruned_loss=0.03189, over 24183.00 frames. ], tot_loss[loss=0.1521, simple_loss=0.2313, pruned_loss=0.0365, over 4498290.22 frames. ], batch size: 140, lr: 1.52e-02, grad_scale: 16.0 +2024-01-15 19:17:45,182 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.0.layers.0.conv_module2.whiten, num_groups=1, num_channels=192, metric=5.30 vs. limit=15.0 +2024-01-15 19:18:01,133 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.self_attn_weights.pos_emb_skip_rate, batch_count=74960.0, ans=0.0 +2024-01-15 19:18:05,871 INFO [scaling.py:1118] (0/2) WithLoss: name=encoder.encoders.3.encoder.layers.2.self_attn_weights, loss-sum=0.000e+00 +2024-01-15 19:18:23,457 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder_embed.conv.5.prob, batch_count=75026.66666666667, ans=0.125 +2024-01-15 19:18:31,959 INFO [train.py:994] (0/2) Epoch 27, batch 600, loss[loss=0.1638, simple_loss=0.2477, pruned_loss=0.03996, over 23826.00 frames. ], tot_loss[loss=0.1523, simple_loss=0.2315, pruned_loss=0.03654, over 4561025.48 frames. ], batch size: 328, lr: 1.52e-02, grad_scale: 16.0 +2024-01-15 19:18:42,796 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.self_attn_weights.pos_emb_skip_rate, batch_count=75093.33333333333, ans=0.0 +2024-01-15 19:18:53,252 WARNING [optim.py:484] (0/2) Clipping_scale=2.0, grad-norm quartiles 1.506e+02 1.823e+02 2.003e+02 2.203e+02 3.481e+02, threshold=4.005e+02, percent-clipped=0.0 +2024-01-15 19:19:03,248 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.bypass_mid.scale_min, batch_count=75126.66666666667, ans=0.2 +2024-01-15 19:19:13,213 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.4.encoder.layers.0.self_attn2.whiten, num_groups=1, num_channels=384, metric=16.70 vs. limit=22.5 +2024-01-15 19:19:25,021 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.2.encoder.layers.2.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=4.70 vs. limit=6.0 +2024-01-15 19:19:33,523 INFO [train.py:994] (0/2) Epoch 27, batch 650, loss[loss=0.1434, simple_loss=0.2226, pruned_loss=0.0321, over 24425.00 frames. ], tot_loss[loss=0.1519, simple_loss=0.231, pruned_loss=0.03644, over 4609764.19 frames. ], batch size: 258, lr: 1.51e-02, grad_scale: 8.0 +2024-01-15 19:19:45,547 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.ff2_skip_rate, batch_count=75260.0, ans=0.0 +2024-01-15 19:19:52,982 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.4.encoder.layers.2.feed_forward2.out_whiten, num_groups=1, num_channels=384, metric=7.67 vs. limit=15.0 +2024-01-15 19:20:09,317 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.balancer1.prob, batch_count=75293.33333333333, ans=0.125 +2024-01-15 19:20:10,869 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.2.encoder.layers.2.nonlin_attention.whiten2, num_groups=1, num_channels=384, metric=6.18 vs. limit=15.0 +2024-01-15 19:20:15,346 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.0.layers.1.nonlin_attention.balancer.prob, batch_count=75326.66666666667, ans=0.125 +2024-01-15 19:20:15,353 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.0.layers.1.balancer2.prob, batch_count=75326.66666666667, ans=0.125 +2024-01-15 19:20:18,849 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.balancer1.prob, batch_count=75326.66666666667, ans=0.125 +2024-01-15 19:20:27,223 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_module1.balancer2.prob, batch_count=75360.0, ans=0.125 +2024-01-15 19:20:36,208 INFO [train.py:994] (0/2) Epoch 27, batch 700, loss[loss=0.1199, simple_loss=0.1994, pruned_loss=0.02021, over 23922.00 frames. ], tot_loss[loss=0.1513, simple_loss=0.2303, pruned_loss=0.03618, over 4653844.84 frames. ], batch size: 131, lr: 1.51e-02, grad_scale: 8.0 +2024-01-15 19:20:58,459 WARNING [optim.py:484] (0/2) Clipping_scale=2.0, grad-norm quartiles 1.538e+02 1.855e+02 2.007e+02 2.333e+02 4.096e+02, threshold=4.014e+02, percent-clipped=1.0 +2024-01-15 19:21:16,102 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.5.encoder.layers.0.feed_forward2.out_whiten, num_groups=1, num_channels=256, metric=10.09 vs. limit=15.0 +2024-01-15 19:21:32,118 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=75526.66666666667, ans=0.1 +2024-01-15 19:21:36,639 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder_embed.dropout.p, batch_count=75560.0, ans=0.1 +2024-01-15 19:21:37,759 INFO [train.py:994] (0/2) Epoch 27, batch 750, loss[loss=0.1378, simple_loss=0.1976, pruned_loss=0.03895, over 17245.00 frames. ], tot_loss[loss=0.1516, simple_loss=0.2304, pruned_loss=0.03637, over 4687466.42 frames. ], batch size: 74, lr: 1.51e-02, grad_scale: 4.0 +2024-01-15 19:21:47,222 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.5.encoder.layers.0.self_attn1.whiten, num_groups=1, num_channels=256, metric=14.00 vs. limit=22.5 +2024-01-15 19:22:12,745 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_skip_rate, batch_count=75626.66666666667, ans=0.0 +2024-01-15 19:22:14,056 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.5.encoder.layers.1.nonlin_attention.whiten2, num_groups=1, num_channels=256, metric=3.71 vs. limit=15.0 +2024-01-15 19:22:18,290 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_module2.balancer2.min_positive, batch_count=75660.0, ans=0.05 +2024-01-15 19:22:37,644 INFO [train.py:994] (0/2) Epoch 27, batch 800, loss[loss=0.1576, simple_loss=0.238, pruned_loss=0.03855, over 24534.00 frames. ], tot_loss[loss=0.1515, simple_loss=0.2303, pruned_loss=0.03637, over 4706517.12 frames. ], batch size: 243, lr: 1.51e-02, grad_scale: 8.0 +2024-01-15 19:22:58,369 INFO [scaling.py:1118] (0/2) WithLoss: name=encoder.encoders.3.encoder.layers.0.self_attn_weights, loss-sum=0.000e+00 +2024-01-15 19:22:59,180 WARNING [optim.py:484] (0/2) Clipping_scale=2.0, grad-norm quartiles 1.516e+02 1.822e+02 1.929e+02 2.256e+02 3.473e+02, threshold=3.858e+02, percent-clipped=0.0 +2024-01-15 19:23:12,140 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_module2.balancer1.prob, batch_count=75826.66666666667, ans=0.125 +2024-01-15 19:23:20,765 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_module2.balancer1.max_abs, batch_count=75826.66666666667, ans=10.0 +2024-01-15 19:23:26,049 INFO [checkpoint.py:75] (0/2) Saving checkpoint to zipformer_bbpe/exp-context-size-2-lr-epochs-10-spec-aug-20-disable-musan/epoch-27.pt +2024-01-15 19:23:48,347 INFO [train.py:994] (0/2) Epoch 28, batch 0, loss[loss=0.1572, simple_loss=0.2364, pruned_loss=0.03897, over 24555.00 frames. ], tot_loss[loss=0.1572, simple_loss=0.2364, pruned_loss=0.03897, over 24555.00 frames. ], batch size: 176, lr: 1.48e-02, grad_scale: 16.0 +2024-01-15 19:23:48,348 INFO [train.py:1017] (0/2) Computing validation loss +2024-01-15 19:24:08,992 INFO [train.py:1026] (0/2) Epoch 28, validation: loss=0.1682, simple_loss=0.2519, pruned_loss=0.04225, over 1622729.00 frames. +2024-01-15 19:24:08,993 INFO [train.py:1027] (0/2) Maximum memory allocated so far is 15997MB +2024-01-15 19:24:31,520 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.4.encoder.layers.1.conv_module1.whiten, num_groups=1, num_channels=384, metric=2.67 vs. limit=15.0 +2024-01-15 19:24:37,997 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.bypass.scale_min, batch_count=75936.66666666667, ans=0.2 +2024-01-15 19:24:38,015 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.balancer2.prob, batch_count=75936.66666666667, ans=0.125 +2024-01-15 19:24:46,261 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.feed_forward1.hidden_balancer.prob, batch_count=75970.0, ans=0.125 +2024-01-15 19:24:59,291 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.balancer2.prob, batch_count=76003.33333333333, ans=0.125 +2024-01-15 19:25:11,357 INFO [train.py:994] (0/2) Epoch 28, batch 50, loss[loss=0.1678, simple_loss=0.2501, pruned_loss=0.04272, over 24535.00 frames. ], tot_loss[loss=0.1531, simple_loss=0.232, pruned_loss=0.0371, over 1095530.17 frames. ], batch size: 193, lr: 1.48e-02, grad_scale: 16.0 +2024-01-15 19:25:19,370 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.attention_skip_rate, batch_count=76036.66666666667, ans=0.0 +2024-01-15 19:25:41,365 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.5.encoder.layers.0.feed_forward3.out_whiten, num_groups=1, num_channels=256, metric=13.81 vs. limit=15.0 +2024-01-15 19:25:43,831 WARNING [optim.py:484] (0/2) Clipping_scale=2.0, grad-norm quartiles 1.582e+02 1.909e+02 2.094e+02 2.541e+02 4.142e+02, threshold=4.189e+02, percent-clipped=2.0 +2024-01-15 19:26:07,462 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.4.encoder.layers.0.conv_module1.whiten, num_groups=1, num_channels=384, metric=3.38 vs. limit=15.0 +2024-01-15 19:26:07,841 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.0.layers.0.conv_module2.whiten, num_groups=1, num_channels=192, metric=6.95 vs. limit=15.0 +2024-01-15 19:26:12,314 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_skip_rate, batch_count=76203.33333333333, ans=0.0 +2024-01-15 19:26:12,394 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.bypass_mid.scale_min, batch_count=76203.33333333333, ans=0.2 +2024-01-15 19:26:13,246 INFO [train.py:994] (0/2) Epoch 28, batch 100, loss[loss=0.1289, simple_loss=0.2008, pruned_loss=0.02846, over 23587.00 frames. ], tot_loss[loss=0.1518, simple_loss=0.2309, pruned_loss=0.03639, over 1916870.17 frames. ], batch size: 119, lr: 1.48e-02, grad_scale: 16.0 +2024-01-15 19:26:34,695 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.2.encoder.layers.1.feed_forward3.out_whiten, num_groups=1, num_channels=384, metric=8.77 vs. limit=15.0 +2024-01-15 19:26:41,956 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=76270.0, ans=0.1 +2024-01-15 19:26:52,176 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.ff2_skip_rate, batch_count=76303.33333333333, ans=0.0 +2024-01-15 19:26:59,628 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.nonlin_attention.balancer.min_positive, batch_count=76303.33333333333, ans=0.05 +2024-01-15 19:27:15,448 INFO [train.py:994] (0/2) Epoch 28, batch 150, loss[loss=0.1541, simple_loss=0.2358, pruned_loss=0.03622, over 22385.00 frames. ], tot_loss[loss=0.1515, simple_loss=0.2308, pruned_loss=0.0361, over 2566365.29 frames. ], batch size: 357, lr: 1.48e-02, grad_scale: 16.0 +2024-01-15 19:27:19,424 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.ff3_skip_rate, batch_count=76370.0, ans=0.0 +2024-01-15 19:27:32,525 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.1.encoder.layers.1.self_attn2.whiten, num_groups=1, num_channels=256, metric=15.63 vs. limit=22.5 +2024-01-15 19:27:40,835 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.ff3_skip_rate, batch_count=76436.66666666667, ans=0.0 +2024-01-15 19:27:48,902 WARNING [optim.py:484] (0/2) Clipping_scale=2.0, grad-norm quartiles 1.474e+02 1.798e+02 1.936e+02 2.219e+02 3.188e+02, threshold=3.872e+02, percent-clipped=0.0 +2024-01-15 19:27:56,889 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.ff3_skip_rate, batch_count=76470.0, ans=0.0 +2024-01-15 19:28:04,723 INFO [scaling.py:1118] (0/2) WithLoss: name=encoder.encoders.3.encoder.layers.3.self_attn_weights, loss-sum=0.000e+00 +2024-01-15 19:28:15,854 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.balancer1.prob, batch_count=76503.33333333333, ans=0.125 +2024-01-15 19:28:17,851 INFO [train.py:994] (0/2) Epoch 28, batch 200, loss[loss=0.1389, simple_loss=0.2073, pruned_loss=0.03528, over 23487.00 frames. ], tot_loss[loss=0.1517, simple_loss=0.2311, pruned_loss=0.03612, over 3072608.11 frames. ], batch size: 119, lr: 1.48e-02, grad_scale: 8.0 +2024-01-15 19:28:18,152 INFO [scaling.py:1118] (0/2) WithLoss: name=encoder.encoders.3.encoder.layers.1.self_attn_weights, loss-sum=0.000e+00 +2024-01-15 19:28:26,391 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=76536.66666666667, ans=0.1 +2024-01-15 19:28:30,502 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_module1.balancer1.prob, batch_count=76570.0, ans=0.125 +2024-01-15 19:28:33,148 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.3.encoder.layers.3.nonlin_attention.whiten1, num_groups=1, num_channels=384, metric=4.16 vs. limit=10.0 +2024-01-15 19:28:37,492 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=76570.0, ans=0.1 +2024-01-15 19:29:19,034 INFO [train.py:994] (0/2) Epoch 28, batch 250, loss[loss=0.1266, simple_loss=0.2027, pruned_loss=0.0253, over 23577.00 frames. ], tot_loss[loss=0.152, simple_loss=0.2313, pruned_loss=0.03635, over 3457849.90 frames. ], batch size: 119, lr: 1.48e-02, grad_scale: 8.0 +2024-01-15 19:29:24,861 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.balancer1.prob, batch_count=76703.33333333333, ans=0.125 +2024-01-15 19:29:25,852 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder_embed.conv.2.prob, batch_count=76703.33333333333, ans=0.125 +2024-01-15 19:29:31,265 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.bypass.scale_min, batch_count=76736.66666666667, ans=0.2 +2024-01-15 19:29:51,517 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.nonlin_attention.balancer.min_positive, batch_count=76770.0, ans=0.05 +2024-01-15 19:29:52,418 WARNING [optim.py:484] (0/2) Clipping_scale=2.0, grad-norm quartiles 1.566e+02 1.863e+02 2.047e+02 2.419e+02 3.522e+02, threshold=4.093e+02, percent-clipped=0.0 +2024-01-15 19:29:53,856 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_module2.balancer2.prob, batch_count=76770.0, ans=0.125 +2024-01-15 19:29:55,839 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.0.layers.0.nonlin_attention.whiten1, num_groups=1, num_channels=144, metric=9.83 vs. limit=10.0 +2024-01-15 19:30:00,775 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.4.encoder.layers.1.feed_forward2.out_whiten, num_groups=1, num_channels=384, metric=9.41 vs. limit=15.0 +2024-01-15 19:30:07,461 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.0.layers.0.self_attn_weights.pos_emb_skip_rate, batch_count=76836.66666666667, ans=0.0 +2024-01-15 19:30:20,746 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=76870.0, ans=0.1 +2024-01-15 19:30:21,174 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.2.encoder.layers.2.nonlin_attention.whiten2, num_groups=1, num_channels=384, metric=5.79 vs. limit=15.0 +2024-01-15 19:30:21,647 INFO [train.py:994] (0/2) Epoch 28, batch 300, loss[loss=0.1541, simple_loss=0.234, pruned_loss=0.03705, over 24523.00 frames. ], tot_loss[loss=0.1513, simple_loss=0.2304, pruned_loss=0.03607, over 3751831.89 frames. ], batch size: 193, lr: 1.47e-02, grad_scale: 8.0 +2024-01-15 19:30:30,061 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.balancer1.prob, batch_count=76870.0, ans=0.125 +2024-01-15 19:30:37,078 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=76903.33333333333, ans=0.1 +2024-01-15 19:31:16,500 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.feed_forward2.hidden_balancer.prob, batch_count=77003.33333333333, ans=0.125 +2024-01-15 19:31:23,234 INFO [train.py:994] (0/2) Epoch 28, batch 350, loss[loss=0.153, simple_loss=0.2344, pruned_loss=0.03582, over 24593.00 frames. ], tot_loss[loss=0.151, simple_loss=0.2302, pruned_loss=0.0359, over 3976869.44 frames. ], batch size: 199, lr: 1.47e-02, grad_scale: 8.0 +2024-01-15 19:31:26,335 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.3.encoder.layers.0.feed_forward3.out_whiten, num_groups=1, num_channels=512, metric=10.70 vs. limit=15.0 +2024-01-15 19:31:48,492 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.0.layers.0.bypass_mid.scale_min, batch_count=77103.33333333333, ans=0.2 +2024-01-15 19:31:52,063 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_module2.balancer2.prob, batch_count=77103.33333333333, ans=0.125 +2024-01-15 19:31:56,320 WARNING [optim.py:484] (0/2) Clipping_scale=2.0, grad-norm quartiles 1.491e+02 1.863e+02 2.087e+02 2.688e+02 4.509e+02, threshold=4.173e+02, percent-clipped=2.0 +2024-01-15 19:32:05,432 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.2.encoder.layers.2.feed_forward1.out_whiten, num_groups=1, num_channels=384, metric=14.38 vs. limit=15.0 +2024-01-15 19:32:13,281 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.feed_forward2.hidden_balancer.prob, batch_count=77170.0, ans=0.125 +2024-01-15 19:32:24,690 INFO [train.py:994] (0/2) Epoch 28, batch 400, loss[loss=0.1469, simple_loss=0.2295, pruned_loss=0.03217, over 23870.00 frames. ], tot_loss[loss=0.1506, simple_loss=0.2297, pruned_loss=0.03578, over 4139846.06 frames. ], batch size: 328, lr: 1.47e-02, grad_scale: 16.0 +2024-01-15 19:32:29,305 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_module1.balancer2.min_abs, batch_count=77203.33333333333, ans=0.5 +2024-01-15 19:32:38,042 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=77236.66666666667, ans=0.1 +2024-01-15 19:32:40,393 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder_embed.convnext.hidden_balancer.prob, batch_count=77236.66666666667, ans=0.125 +2024-01-15 19:32:49,922 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.bypass_mid.scale_min, batch_count=77270.0, ans=0.2 +2024-01-15 19:32:54,688 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_module1.balancer1.prob, batch_count=77270.0, ans=0.125 +2024-01-15 19:33:05,421 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_module2.balancer2.prob, batch_count=77303.33333333333, ans=0.125 +2024-01-15 19:33:06,508 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=77303.33333333333, ans=0.1 +2024-01-15 19:33:11,065 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.bypass_mid.scale_min, batch_count=77303.33333333333, ans=0.2 +2024-01-15 19:33:17,255 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.3.encoder.layers.0.self_attn2.whiten, num_groups=1, num_channels=512, metric=15.73 vs. limit=22.5 +2024-01-15 19:33:27,954 INFO [train.py:994] (0/2) Epoch 28, batch 450, loss[loss=0.1437, simple_loss=0.2281, pruned_loss=0.02967, over 24437.00 frames. ], tot_loss[loss=0.1505, simple_loss=0.2299, pruned_loss=0.03554, over 4282950.42 frames. ], batch size: 250, lr: 1.47e-02, grad_scale: 16.0 +2024-01-15 19:33:32,865 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.feed_forward3.hidden_balancer.prob, batch_count=77370.0, ans=0.125 +2024-01-15 19:33:39,215 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.4.encoder.layers.1.feed_forward3.out_whiten, num_groups=1, num_channels=384, metric=8.53 vs. limit=15.0 +2024-01-15 19:33:55,664 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.ff2_skip_rate, batch_count=77436.66666666667, ans=0.0 +2024-01-15 19:34:01,319 WARNING [optim.py:484] (0/2) Clipping_scale=2.0, grad-norm quartiles 1.397e+02 1.800e+02 2.039e+02 2.585e+02 3.637e+02, threshold=4.077e+02, percent-clipped=0.0 +2024-01-15 19:34:05,163 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.ff2_skip_rate, batch_count=77470.0, ans=0.0 +2024-01-15 19:34:06,374 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_module1.balancer2.prob, batch_count=77470.0, ans=0.125 +2024-01-15 19:34:10,641 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.balancer2.prob, batch_count=77470.0, ans=0.125 +2024-01-15 19:34:30,168 INFO [train.py:994] (0/2) Epoch 28, batch 500, loss[loss=0.1485, simple_loss=0.2263, pruned_loss=0.03535, over 24412.00 frames. ], tot_loss[loss=0.1503, simple_loss=0.2296, pruned_loss=0.03548, over 4396069.69 frames. ], batch size: 159, lr: 1.47e-02, grad_scale: 16.0 +2024-01-15 19:34:36,853 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.0.layers.0.feed_forward2.hidden_balancer.prob, batch_count=77536.66666666667, ans=0.125 +2024-01-15 19:34:44,491 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.1.encoder.layers.0.nonlin_attention.whiten2, num_groups=1, num_channels=256, metric=6.99 vs. limit=15.0 +2024-01-15 19:34:53,462 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.attention_skip_rate, batch_count=77603.33333333333, ans=0.0 +2024-01-15 19:35:06,553 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_module1.balancer2.prob, batch_count=77636.66666666667, ans=0.125 +2024-01-15 19:35:32,513 INFO [train.py:994] (0/2) Epoch 28, batch 550, loss[loss=0.1584, simple_loss=0.2389, pruned_loss=0.03894, over 24504.00 frames. ], tot_loss[loss=0.1501, simple_loss=0.2296, pruned_loss=0.03525, over 4497108.77 frames. ], batch size: 229, lr: 1.47e-02, grad_scale: 16.0 +2024-01-15 19:36:05,812 WARNING [optim.py:484] (0/2) Clipping_scale=2.0, grad-norm quartiles 1.512e+02 1.862e+02 2.099e+02 2.436e+02 4.478e+02, threshold=4.198e+02, percent-clipped=3.0 +2024-01-15 19:36:09,606 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_module1.balancer1.prob, batch_count=77803.33333333333, ans=0.125 +2024-01-15 19:36:26,216 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.balancer1.prob, batch_count=77836.66666666667, ans=0.125 +2024-01-15 19:36:34,266 INFO [train.py:994] (0/2) Epoch 28, batch 600, loss[loss=0.1534, simple_loss=0.2362, pruned_loss=0.03533, over 24524.00 frames. ], tot_loss[loss=0.1503, simple_loss=0.2298, pruned_loss=0.03543, over 4566392.14 frames. ], batch size: 193, lr: 1.47e-02, grad_scale: 16.0 +2024-01-15 19:36:34,619 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.balancer2.prob, batch_count=77870.0, ans=0.125 +2024-01-15 19:36:44,096 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.nonlin_attention.whiten2.whitening_limit, batch_count=77870.0, ans=15.0 +2024-01-15 19:36:44,714 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_skip_rate, batch_count=77870.0, ans=0.0 +2024-01-15 19:36:58,478 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.ff3_skip_rate, batch_count=77936.66666666667, ans=0.0 +2024-01-15 19:37:16,401 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.feed_forward3.hidden_balancer.prob, batch_count=77970.0, ans=0.125 +2024-01-15 19:37:25,336 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.3.encoder.layers.0.self_attn2.whiten, num_groups=1, num_channels=512, metric=14.98 vs. limit=22.5 +2024-01-15 19:37:25,417 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.1.encoder.layers.1.whiten, num_groups=1, num_channels=256, metric=4.04 vs. limit=12.0 +2024-01-15 19:37:32,885 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.ff3_skip_rate, batch_count=78003.33333333333, ans=0.0 +2024-01-15 19:37:34,085 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.feed_forward3.hidden_balancer.prob, batch_count=78003.33333333333, ans=0.125 +2024-01-15 19:37:36,712 INFO [train.py:994] (0/2) Epoch 28, batch 650, loss[loss=0.16, simple_loss=0.2422, pruned_loss=0.03884, over 24260.00 frames. ], tot_loss[loss=0.15, simple_loss=0.2294, pruned_loss=0.03527, over 4612748.77 frames. ], batch size: 311, lr: 1.46e-02, grad_scale: 16.0 +2024-01-15 19:37:39,199 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.4.encoder.layers.0.feed_forward3.out_whiten, num_groups=1, num_channels=384, metric=8.71 vs. limit=15.0 +2024-01-15 19:37:43,715 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.ff3_skip_rate, batch_count=78036.66666666667, ans=0.0 +2024-01-15 19:37:48,394 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_module1.balancer1.prob, batch_count=78070.0, ans=0.125 +2024-01-15 19:38:10,584 WARNING [optim.py:484] (0/2) Clipping_scale=2.0, grad-norm quartiles 1.562e+02 2.024e+02 2.393e+02 2.788e+02 5.477e+02, threshold=4.785e+02, percent-clipped=2.0 +2024-01-15 19:38:17,980 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.bypass.scale_min, batch_count=78136.66666666667, ans=0.2 +2024-01-15 19:38:30,954 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_module2.balancer2.prob, batch_count=78170.0, ans=0.125 +2024-01-15 19:38:32,137 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_module2.balancer2.prob, batch_count=78170.0, ans=0.125 +2024-01-15 19:38:39,081 INFO [train.py:994] (0/2) Epoch 28, batch 700, loss[loss=0.146, simple_loss=0.2274, pruned_loss=0.03229, over 24445.00 frames. ], tot_loss[loss=0.1501, simple_loss=0.2296, pruned_loss=0.0353, over 4657870.87 frames. ], batch size: 267, lr: 1.46e-02, grad_scale: 16.0 +2024-01-15 19:39:03,416 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.4.encoder.layers.2.whiten, num_groups=1, num_channels=384, metric=2.78 vs. limit=12.0 +2024-01-15 19:39:38,840 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.bypass_mid.scale_min, batch_count=78336.66666666667, ans=0.2 +2024-01-15 19:39:40,867 INFO [train.py:994] (0/2) Epoch 28, batch 750, loss[loss=0.1586, simple_loss=0.2402, pruned_loss=0.03854, over 24365.00 frames. ], tot_loss[loss=0.1501, simple_loss=0.2296, pruned_loss=0.03533, over 4688397.35 frames. ], batch size: 298, lr: 1.46e-02, grad_scale: 16.0 +2024-01-15 19:39:43,611 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.ff3_skip_rate, batch_count=78370.0, ans=0.0 +2024-01-15 19:39:47,718 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_module2.balancer2.min_abs, batch_count=78370.0, ans=0.5 +2024-01-15 19:40:07,241 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=78436.66666666667, ans=0.1 +2024-01-15 19:40:14,776 WARNING [optim.py:484] (0/2) Clipping_scale=2.0, grad-norm quartiles 1.479e+02 1.835e+02 2.101e+02 2.568e+02 3.560e+02, threshold=4.202e+02, percent-clipped=0.0 +2024-01-15 19:40:15,114 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_skip_rate, batch_count=78436.66666666667, ans=0.0 +2024-01-15 19:40:37,019 INFO [scaling.py:1118] (0/2) WithLoss: name=encoder.encoders.3.encoder.layers.1.self_attn_weights, loss-sum=0.000e+00 +2024-01-15 19:40:41,195 INFO [train.py:994] (0/2) Epoch 28, batch 800, loss[loss=0.1549, simple_loss=0.2307, pruned_loss=0.03951, over 24455.00 frames. ], tot_loss[loss=0.15, simple_loss=0.2295, pruned_loss=0.03529, over 4720638.18 frames. ], batch size: 210, lr: 1.46e-02, grad_scale: 32.0 +2024-01-15 19:40:43,614 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.attention_skip_rate, batch_count=78536.66666666667, ans=0.0 +2024-01-15 19:41:15,013 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.ff2_skip_rate, batch_count=78636.66666666667, ans=0.0 +2024-01-15 19:41:16,200 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.feed_forward2.hidden_balancer.prob, batch_count=78636.66666666667, ans=0.125 +2024-01-15 19:41:25,505 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_module1.balancer1.prob, batch_count=78636.66666666667, ans=0.125 +2024-01-15 19:41:30,759 INFO [checkpoint.py:75] (0/2) Saving checkpoint to zipformer_bbpe/exp-context-size-2-lr-epochs-10-spec-aug-20-disable-musan/epoch-28.pt +2024-01-15 19:41:52,745 INFO [train.py:994] (0/2) Epoch 29, batch 0, loss[loss=0.1428, simple_loss=0.2218, pruned_loss=0.03193, over 24347.00 frames. ], tot_loss[loss=0.1428, simple_loss=0.2218, pruned_loss=0.03193, over 24347.00 frames. ], batch size: 298, lr: 1.44e-02, grad_scale: 32.0 +2024-01-15 19:41:52,746 INFO [train.py:1017] (0/2) Computing validation loss +2024-01-15 19:42:03,236 INFO [zipformer.py:1858] (0/2) name=encoder.encoders.5.encoder.layers.0.self_attn_weights, attn_weights_entropy = tensor([2.4767, 2.4149, 3.7085, 2.3031], device='cuda:0') +2024-01-15 19:42:04,749 INFO [zipformer.py:1858] (0/2) name=encoder.encoders.3.encoder.layers.3.self_attn_weights, attn_weights_entropy = tensor([2.4703, 2.4689, 3.1993, 3.0785, 2.9903, 3.1475, 3.0508, 2.9937], + device='cuda:0') +2024-01-15 19:42:12,482 INFO [train.py:1026] (0/2) Epoch 29, validation: loss=0.1669, simple_loss=0.2498, pruned_loss=0.042, over 1622729.00 frames. +2024-01-15 19:42:12,482 INFO [train.py:1027] (0/2) Maximum memory allocated so far is 15997MB +2024-01-15 19:42:12,671 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.0.layers.0.feed_forward2.hidden_balancer.prob, batch_count=78680.0, ans=0.125 +2024-01-15 19:42:14,493 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.1.encoder.layers.1.feed_forward3.out_whiten, num_groups=1, num_channels=256, metric=11.31 vs. limit=15.0 +2024-01-15 19:42:54,939 WARNING [optim.py:484] (0/2) Clipping_scale=2.0, grad-norm quartiles 1.492e+02 1.800e+02 2.035e+02 2.389e+02 3.115e+02, threshold=4.070e+02, percent-clipped=0.0 +2024-01-15 19:43:14,031 INFO [train.py:994] (0/2) Epoch 29, batch 50, loss[loss=0.155, simple_loss=0.2304, pruned_loss=0.03982, over 24502.00 frames. ], tot_loss[loss=0.1501, simple_loss=0.2289, pruned_loss=0.03567, over 1097384.26 frames. ], batch size: 210, lr: 1.43e-02, grad_scale: 32.0 +2024-01-15 19:43:17,928 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_skip_rate, batch_count=78846.66666666667, ans=0.0 +2024-01-15 19:43:39,422 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.4.encoder.layers.0.feed_forward3.out_whiten, num_groups=1, num_channels=384, metric=10.32 vs. limit=15.0 +2024-01-15 19:43:45,831 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=78913.33333333333, ans=0.1 +2024-01-15 19:44:00,691 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_module2.balancer2.prob, batch_count=78946.66666666667, ans=0.125 +2024-01-15 19:44:08,429 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_module2.balancer1.prob, batch_count=78980.0, ans=0.125 +2024-01-15 19:44:16,256 INFO [train.py:994] (0/2) Epoch 29, batch 100, loss[loss=0.1274, simple_loss=0.1955, pruned_loss=0.0296, over 23613.00 frames. ], tot_loss[loss=0.1486, simple_loss=0.2275, pruned_loss=0.03487, over 1910635.68 frames. ], batch size: 119, lr: 1.43e-02, grad_scale: 32.0 +2024-01-15 19:44:40,137 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_module1.balancer1.min_positive, batch_count=79080.0, ans=0.025 +2024-01-15 19:44:50,867 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.self_attn_weights.pos_emb_skip_rate, batch_count=79080.0, ans=0.0 +2024-01-15 19:44:58,780 WARNING [optim.py:484] (0/2) Clipping_scale=2.0, grad-norm quartiles 1.562e+02 1.850e+02 1.997e+02 2.293e+02 3.475e+02, threshold=3.995e+02, percent-clipped=0.0 +2024-01-15 19:44:59,094 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_module2.balancer2.prob, batch_count=79113.33333333333, ans=0.125 +2024-01-15 19:45:12,672 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.0.layers.1.nonlin_attention.whiten1, num_groups=1, num_channels=144, metric=6.00 vs. limit=10.0 +2024-01-15 19:45:15,548 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.feed_forward1.hidden_balancer.prob, batch_count=79146.66666666667, ans=0.125 +2024-01-15 19:45:17,654 INFO [train.py:994] (0/2) Epoch 29, batch 150, loss[loss=0.1532, simple_loss=0.2354, pruned_loss=0.03545, over 23923.00 frames. ], tot_loss[loss=0.1491, simple_loss=0.2278, pruned_loss=0.0352, over 2554441.35 frames. ], batch size: 328, lr: 1.43e-02, grad_scale: 32.0 +2024-01-15 19:45:41,480 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.feed_forward1.hidden_balancer.prob, batch_count=79246.66666666667, ans=0.125 +2024-01-15 19:45:53,893 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.0.layers.1.nonlin_attention.whiten1, num_groups=1, num_channels=144, metric=5.88 vs. limit=10.0 +2024-01-15 19:46:02,105 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.2.encoder.layers.2.feed_forward1.out_whiten, num_groups=1, num_channels=384, metric=15.14 vs. limit=15.0 +2024-01-15 19:46:10,356 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_module1.balancer1.prob, batch_count=79313.33333333333, ans=0.125 +2024-01-15 19:46:20,281 INFO [train.py:994] (0/2) Epoch 29, batch 200, loss[loss=0.1477, simple_loss=0.2318, pruned_loss=0.03183, over 24310.00 frames. ], tot_loss[loss=0.1491, simple_loss=0.2285, pruned_loss=0.03485, over 3060964.77 frames. ], batch size: 285, lr: 1.43e-02, grad_scale: 32.0 +2024-01-15 19:46:38,054 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.2.encoder.layers.2.feed_forward2.out_whiten, num_groups=1, num_channels=384, metric=12.45 vs. limit=15.0 +2024-01-15 19:46:55,560 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.0.layers.1.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=3.39 vs. limit=6.0 +2024-01-15 19:46:56,073 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.0.layers.1.feed_forward1.out_proj.dropout_p, batch_count=79446.66666666667, ans=0.1 +2024-01-15 19:47:02,592 WARNING [optim.py:484] (0/2) Clipping_scale=2.0, grad-norm quartiles 1.436e+02 1.852e+02 2.095e+02 2.443e+02 3.716e+02, threshold=4.190e+02, percent-clipped=0.0 +2024-01-15 19:47:22,028 INFO [train.py:994] (0/2) Epoch 29, batch 250, loss[loss=0.1309, simple_loss=0.2077, pruned_loss=0.02709, over 24233.00 frames. ], tot_loss[loss=0.1491, simple_loss=0.2288, pruned_loss=0.03469, over 3445992.85 frames. ], batch size: 140, lr: 1.43e-02, grad_scale: 32.0 +2024-01-15 19:47:27,482 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.bypass.scale_min, batch_count=79513.33333333333, ans=0.2 +2024-01-15 19:47:53,628 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_skip_rate, batch_count=79580.0, ans=0.0 +2024-01-15 19:48:09,378 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=79613.33333333333, ans=0.1 +2024-01-15 19:48:24,481 INFO [train.py:994] (0/2) Epoch 29, batch 300, loss[loss=0.1568, simple_loss=0.237, pruned_loss=0.03828, over 24365.00 frames. ], tot_loss[loss=0.1494, simple_loss=0.2294, pruned_loss=0.03471, over 3759803.42 frames. ], batch size: 275, lr: 1.43e-02, grad_scale: 32.0 +2024-01-15 19:48:28,278 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=79680.0, ans=0.1 +2024-01-15 19:48:40,579 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.3.encoder.layers.2.nonlin_attention.whiten1, num_groups=1, num_channels=384, metric=4.34 vs. limit=10.0 +2024-01-15 19:48:44,575 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.feed_forward3.hidden_balancer.prob, batch_count=79713.33333333333, ans=0.125 +2024-01-15 19:48:44,595 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=79713.33333333333, ans=0.1 +2024-01-15 19:48:45,767 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.balancer1.prob, batch_count=79713.33333333333, ans=0.125 +2024-01-15 19:49:06,084 WARNING [optim.py:484] (0/2) Clipping_scale=2.0, grad-norm quartiles 1.569e+02 1.816e+02 2.120e+02 2.493e+02 4.503e+02, threshold=4.241e+02, percent-clipped=1.0 +2024-01-15 19:49:26,232 INFO [train.py:994] (0/2) Epoch 29, batch 350, loss[loss=0.168, simple_loss=0.2341, pruned_loss=0.05095, over 24556.00 frames. ], tot_loss[loss=0.1491, simple_loss=0.2288, pruned_loss=0.03469, over 3990219.40 frames. ], batch size: 176, lr: 1.43e-02, grad_scale: 32.0 +2024-01-15 19:49:41,916 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_module2.balancer2.prob, batch_count=79880.0, ans=0.125 +2024-01-15 19:49:41,959 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_module2.balancer2.prob, batch_count=79880.0, ans=0.125 +2024-01-15 19:49:50,723 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.1.encoder.layers.0.nonlin_attention.whiten2, num_groups=1, num_channels=256, metric=7.01 vs. limit=15.0 +2024-01-15 19:49:52,600 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.feed_forward2.hidden_balancer.prob, batch_count=79913.33333333333, ans=0.125 +2024-01-15 19:50:16,730 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.0.layers.1.bypass_mid.scale_min, batch_count=79980.0, ans=0.2 +2024-01-15 19:50:21,523 INFO [checkpoint.py:75] (0/2) Saving checkpoint to zipformer_bbpe/exp-context-size-2-lr-epochs-10-spec-aug-20-disable-musan/checkpoint-24000.pt +2024-01-15 19:50:28,867 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_skip_rate, batch_count=80013.33333333333, ans=0.0 +2024-01-15 19:50:29,659 INFO [train.py:994] (0/2) Epoch 29, batch 400, loss[loss=0.131, simple_loss=0.2124, pruned_loss=0.02478, over 24144.00 frames. ], tot_loss[loss=0.1488, simple_loss=0.2283, pruned_loss=0.03466, over 4164784.49 frames. ], batch size: 140, lr: 1.43e-02, grad_scale: 32.0 +2024-01-15 19:50:35,299 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.feed_forward2.hidden_balancer.prob, batch_count=80013.33333333333, ans=0.125 +2024-01-15 19:50:43,693 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.self_attn_weights.pos_emb_skip_rate, batch_count=80046.66666666667, ans=0.0 +2024-01-15 19:50:47,146 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.bypass.skip_rate, batch_count=80046.66666666667, ans=0.09899494936611666 +2024-01-15 19:51:11,278 WARNING [optim.py:484] (0/2) Clipping_scale=2.0, grad-norm quartiles 1.476e+02 1.814e+02 2.020e+02 2.310e+02 3.488e+02, threshold=4.040e+02, percent-clipped=0.0 +2024-01-15 19:51:12,751 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.ff3_skip_rate, batch_count=80113.33333333333, ans=0.0 +2024-01-15 19:51:12,791 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=80113.33333333333, ans=0.1 +2024-01-15 19:51:30,814 INFO [train.py:994] (0/2) Epoch 29, batch 450, loss[loss=0.161, simple_loss=0.2414, pruned_loss=0.0403, over 24540.00 frames. ], tot_loss[loss=0.1486, simple_loss=0.2278, pruned_loss=0.03466, over 4280559.68 frames. ], batch size: 204, lr: 1.42e-02, grad_scale: 32.0 +2024-01-15 19:51:52,059 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=80213.33333333333, ans=0.1 +2024-01-15 19:51:59,138 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=80246.66666666667, ans=0.1 +2024-01-15 19:52:17,185 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.3.encoder.layers.3.self_attn1.whiten, num_groups=1, num_channels=512, metric=14.07 vs. limit=22.5 +2024-01-15 19:52:22,681 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.feed_forward2.hidden_balancer.prob, batch_count=80313.33333333333, ans=0.125 +2024-01-15 19:52:31,793 INFO [train.py:994] (0/2) Epoch 29, batch 500, loss[loss=0.1452, simple_loss=0.2263, pruned_loss=0.03206, over 24508.00 frames. ], tot_loss[loss=0.1489, simple_loss=0.2282, pruned_loss=0.03478, over 4398336.87 frames. ], batch size: 187, lr: 1.42e-02, grad_scale: 32.0 +2024-01-15 19:52:46,056 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_skip_rate, batch_count=80380.0, ans=0.0 +2024-01-15 19:52:47,305 INFO [scaling.py:1118] (0/2) WithLoss: name=encoder.encoders.1.encoder.layers.1.self_attn_weights, loss-sum=0.000e+00 +2024-01-15 19:52:48,513 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.bypass_mid.scale_min, batch_count=80380.0, ans=0.2 +2024-01-15 19:53:06,627 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.bypass.scale_min, batch_count=80413.33333333333, ans=0.2 +2024-01-15 19:53:14,689 WARNING [optim.py:484] (0/2) Clipping_scale=2.0, grad-norm quartiles 1.489e+02 1.815e+02 2.023e+02 2.405e+02 3.729e+02, threshold=4.047e+02, percent-clipped=0.0 +2024-01-15 19:53:34,789 INFO [train.py:994] (0/2) Epoch 29, batch 550, loss[loss=0.1457, simple_loss=0.2256, pruned_loss=0.03295, over 24506.00 frames. ], tot_loss[loss=0.1486, simple_loss=0.228, pruned_loss=0.03463, over 4480687.74 frames. ], batch size: 210, lr: 1.42e-02, grad_scale: 32.0 +2024-01-15 19:53:46,753 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_module1.balancer1.prob, batch_count=80546.66666666667, ans=0.125 +2024-01-15 19:53:53,825 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=80546.66666666667, ans=0.1 +2024-01-15 19:53:54,191 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.3.encoder.layers.3.nonlin_attention.whiten1, num_groups=1, num_channels=384, metric=4.26 vs. limit=10.0 +2024-01-15 19:53:58,014 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.self_attn_weights.pos_emb_skip_rate, batch_count=80580.0, ans=0.0 +2024-01-15 19:54:00,924 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.bypass_mid.scale_min, batch_count=80580.0, ans=0.2 +2024-01-15 19:54:19,507 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.0.layers.1.feed_forward1.hidden_balancer.prob, batch_count=80613.33333333333, ans=0.125 +2024-01-15 19:54:36,485 INFO [train.py:994] (0/2) Epoch 29, batch 600, loss[loss=0.1307, simple_loss=0.208, pruned_loss=0.02675, over 24008.00 frames. ], tot_loss[loss=0.1488, simple_loss=0.2284, pruned_loss=0.03462, over 4557720.18 frames. ], batch size: 131, lr: 1.42e-02, grad_scale: 32.0 +2024-01-15 19:54:40,565 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.4.encoder.layers.2.self_attn2.whiten, num_groups=1, num_channels=384, metric=12.67 vs. limit=22.5 +2024-01-15 19:55:06,315 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.feed_forward3.hidden_balancer.prob, batch_count=80746.66666666667, ans=0.125 +2024-01-15 19:55:18,325 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.bypass_mid.scale_min, batch_count=80780.0, ans=0.2 +2024-01-15 19:55:20,316 WARNING [optim.py:484] (0/2) Clipping_scale=2.0, grad-norm quartiles 1.540e+02 1.854e+02 2.059e+02 2.378e+02 3.411e+02, threshold=4.118e+02, percent-clipped=0.0 +2024-01-15 19:55:38,458 INFO [train.py:994] (0/2) Epoch 29, batch 650, loss[loss=0.1524, simple_loss=0.2408, pruned_loss=0.03202, over 23846.00 frames. ], tot_loss[loss=0.1486, simple_loss=0.2283, pruned_loss=0.03444, over 4628450.12 frames. ], batch size: 328, lr: 1.42e-02, grad_scale: 16.0 +2024-01-15 19:56:14,694 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.feed_forward1.hidden_balancer.prob, batch_count=80946.66666666667, ans=0.125 +2024-01-15 19:56:20,639 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_module1.balancer2.prob, batch_count=80946.66666666667, ans=0.125 +2024-01-15 19:56:40,298 INFO [train.py:994] (0/2) Epoch 29, batch 700, loss[loss=0.1433, simple_loss=0.227, pruned_loss=0.02984, over 24385.00 frames. ], tot_loss[loss=0.1486, simple_loss=0.2282, pruned_loss=0.03447, over 4657568.18 frames. ], batch size: 258, lr: 1.42e-02, grad_scale: 16.0 +2024-01-15 19:56:40,880 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.3.encoder.layers.3.conv_module2.whiten, num_groups=1, num_channels=512, metric=8.57 vs. limit=15.0 +2024-01-15 19:56:44,195 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.feed_forward1.out_proj.dropout_p, batch_count=81013.33333333333, ans=0.1 +2024-01-15 19:56:45,632 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.3.encoder.layers.2.nonlin_attention.whiten2, num_groups=1, num_channels=512, metric=5.91 vs. limit=15.0 +2024-01-15 19:57:10,185 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.nonlin_attention.balancer.min_positive, batch_count=81080.0, ans=0.05 +2024-01-15 19:57:23,205 WARNING [optim.py:484] (0/2) Clipping_scale=2.0, grad-norm quartiles 1.490e+02 1.881e+02 2.019e+02 2.378e+02 3.656e+02, threshold=4.037e+02, percent-clipped=0.0 +2024-01-15 19:57:41,599 INFO [train.py:994] (0/2) Epoch 29, batch 750, loss[loss=0.156, simple_loss=0.2399, pruned_loss=0.03604, over 24526.00 frames. ], tot_loss[loss=0.1487, simple_loss=0.2283, pruned_loss=0.03451, over 4688258.10 frames. ], batch size: 193, lr: 1.42e-02, grad_scale: 16.0 +2024-01-15 19:57:52,805 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_module1.balancer1.prob, batch_count=81180.0, ans=0.125 +2024-01-15 19:58:13,453 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.0.layers.1.feed_forward1.out_proj.dropout_p, batch_count=81246.66666666667, ans=0.1 +2024-01-15 19:58:25,087 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.bypass_mid.scale_min, batch_count=81280.0, ans=0.2 +2024-01-15 19:58:32,872 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.attention_skip_rate, batch_count=81313.33333333333, ans=0.0 +2024-01-15 19:58:37,499 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.0.layers.0.self_attn_weights.pos_emb_skip_rate, batch_count=81313.33333333333, ans=0.0 +2024-01-15 19:58:41,830 INFO [train.py:994] (0/2) Epoch 29, batch 800, loss[loss=0.1507, simple_loss=0.2356, pruned_loss=0.03287, over 24464.00 frames. ], tot_loss[loss=0.1489, simple_loss=0.2286, pruned_loss=0.03456, over 4720417.96 frames. ], batch size: 267, lr: 1.41e-02, grad_scale: 32.0 +2024-01-15 19:58:43,643 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.2.encoder.layers.0.whiten, num_groups=1, num_channels=384, metric=6.29 vs. limit=12.0 +2024-01-15 19:58:46,453 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.bypass.scale_min, batch_count=81346.66666666667, ans=0.2 +2024-01-15 19:58:49,479 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.5.encoder.layers.1.feed_forward3.out_whiten, num_groups=1, num_channels=256, metric=13.57 vs. limit=15.0 +2024-01-15 19:58:58,215 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.bypass_mid.scale_min, batch_count=81380.0, ans=0.2 +2024-01-15 19:59:08,070 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.3.encoder.layers.3.feed_forward1.out_whiten, num_groups=1, num_channels=512, metric=9.49 vs. limit=15.0 +2024-01-15 19:59:18,979 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.bypass_mid.scale_min, batch_count=81446.66666666667, ans=0.2 +2024-01-15 19:59:21,921 WARNING [optim.py:484] (0/2) Clipping_scale=2.0, grad-norm quartiles 1.562e+02 1.819e+02 1.963e+02 2.267e+02 3.581e+02, threshold=3.926e+02, percent-clipped=0.0 +2024-01-15 19:59:30,522 INFO [checkpoint.py:75] (0/2) Saving checkpoint to zipformer_bbpe/exp-context-size-2-lr-epochs-10-spec-aug-20-disable-musan/epoch-29.pt +2024-01-15 19:59:53,286 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.bypass.skip_rate, batch_count=81490.0, ans=0.09899494936611666 +2024-01-15 19:59:54,086 INFO [train.py:994] (0/2) Epoch 30, batch 0, loss[loss=0.1487, simple_loss=0.2301, pruned_loss=0.03363, over 24553.00 frames. ], tot_loss[loss=0.1487, simple_loss=0.2301, pruned_loss=0.03363, over 24553.00 frames. ], batch size: 193, lr: 1.39e-02, grad_scale: 32.0 +2024-01-15 19:59:54,087 INFO [train.py:1017] (0/2) Computing validation loss +2024-01-15 20:00:14,344 INFO [train.py:1026] (0/2) Epoch 30, validation: loss=0.1667, simple_loss=0.2496, pruned_loss=0.04196, over 1622729.00 frames. +2024-01-15 20:00:14,344 INFO [train.py:1027] (0/2) Maximum memory allocated so far is 15997MB +2024-01-15 20:00:20,070 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_module1.balancer1.prob, batch_count=81490.0, ans=0.125 +2024-01-15 20:00:38,464 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_module1.balancer2.prob, batch_count=81556.66666666667, ans=0.125 +2024-01-15 20:00:55,685 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.feed_forward3.hidden_balancer.prob, batch_count=81590.0, ans=0.125 +2024-01-15 20:00:56,324 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.0.layers.1.whiten, num_groups=1, num_channels=192, metric=3.78 vs. limit=12.0 +2024-01-15 20:01:14,397 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=81623.33333333333, ans=0.1 +2024-01-15 20:01:16,552 INFO [train.py:994] (0/2) Epoch 30, batch 50, loss[loss=0.1424, simple_loss=0.2221, pruned_loss=0.03135, over 24498.00 frames. ], tot_loss[loss=0.1501, simple_loss=0.2303, pruned_loss=0.03498, over 1096007.27 frames. ], batch size: 243, lr: 1.39e-02, grad_scale: 32.0 +2024-01-15 20:01:17,250 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.2.encoder.layers.1.nonlin_attention.whiten1, num_groups=1, num_channels=288, metric=5.77 vs. limit=10.0 +2024-01-15 20:01:28,447 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.1.encoder.layers.1.self_attn2.whiten, num_groups=1, num_channels=256, metric=18.79 vs. limit=22.5 +2024-01-15 20:01:40,395 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_module1.balancer1.prob, batch_count=81723.33333333333, ans=0.125 +2024-01-15 20:01:51,563 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.balancer1.prob, batch_count=81723.33333333333, ans=0.125 +2024-01-15 20:01:56,758 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.bypass.skip_rate, batch_count=81756.66666666667, ans=0.07 +2024-01-15 20:02:09,649 WARNING [optim.py:484] (0/2) Clipping_scale=2.0, grad-norm quartiles 1.554e+02 1.802e+02 2.012e+02 2.445e+02 3.493e+02, threshold=4.024e+02, percent-clipped=0.0 +2024-01-15 20:02:18,815 INFO [train.py:994] (0/2) Epoch 30, batch 100, loss[loss=0.1481, simple_loss=0.2266, pruned_loss=0.03481, over 24507.00 frames. ], tot_loss[loss=0.1495, simple_loss=0.2292, pruned_loss=0.03494, over 1923067.43 frames. ], batch size: 165, lr: 1.39e-02, grad_scale: 32.0 +2024-01-15 20:02:48,382 INFO [scaling.py:1118] (0/2) WithLoss: name=encoder.encoders.2.encoder.layers.0.self_attn_weights, loss-sum=0.000e+00 +2024-01-15 20:03:03,967 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.balancer2.prob, batch_count=81923.33333333333, ans=0.125 +2024-01-15 20:03:21,388 INFO [train.py:994] (0/2) Epoch 30, batch 150, loss[loss=0.1596, simple_loss=0.2359, pruned_loss=0.04164, over 24579.00 frames. ], tot_loss[loss=0.1493, simple_loss=0.2293, pruned_loss=0.03462, over 2560294.49 frames. ], batch size: 176, lr: 1.39e-02, grad_scale: 32.0 +2024-01-15 20:03:26,767 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.feed_forward1.hidden_balancer.prob, batch_count=81990.0, ans=0.125 +2024-01-15 20:03:27,929 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.feed_forward2.hidden_balancer.prob, batch_count=81990.0, ans=0.125 +2024-01-15 20:03:35,725 INFO [scaling.py:1118] (0/2) WithLoss: name=encoder.encoders.3.encoder.layers.3.self_attn_weights, loss-sum=0.000e+00 +2024-01-15 20:03:46,722 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_module1.balancer1.prob, batch_count=82056.66666666667, ans=0.125 +2024-01-15 20:03:53,750 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.bypass_mid.scale_min, batch_count=82056.66666666667, ans=0.2 +2024-01-15 20:04:00,180 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.feed_forward3.hidden_balancer.prob, batch_count=82090.0, ans=0.125 +2024-01-15 20:04:01,936 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.2.encoder.layers.1.whiten, num_groups=1, num_channels=384, metric=6.80 vs. limit=12.0 +2024-01-15 20:04:07,182 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.3.encoder.layers.3.nonlin_attention.whiten1, num_groups=1, num_channels=384, metric=4.28 vs. limit=10.0 +2024-01-15 20:04:14,831 WARNING [optim.py:484] (0/2) Clipping_scale=2.0, grad-norm quartiles 1.551e+02 1.884e+02 2.201e+02 2.660e+02 3.501e+02, threshold=4.402e+02, percent-clipped=0.0 +2024-01-15 20:04:23,585 INFO [train.py:994] (0/2) Epoch 30, batch 200, loss[loss=0.1634, simple_loss=0.2442, pruned_loss=0.04127, over 22556.00 frames. ], tot_loss[loss=0.1489, simple_loss=0.2289, pruned_loss=0.03442, over 3051411.62 frames. ], batch size: 357, lr: 1.39e-02, grad_scale: 32.0 +2024-01-15 20:04:52,596 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.3.encoder.layers.0.feed_forward1.out_whiten, num_groups=1, num_channels=512, metric=13.55 vs. limit=15.0 +2024-01-15 20:04:53,862 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.2.encoder.layers.0.self_attn2.whiten, num_groups=1, num_channels=384, metric=21.70 vs. limit=22.5 +2024-01-15 20:05:20,664 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_module1.balancer2.prob, batch_count=82290.0, ans=0.125 +2024-01-15 20:05:23,541 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.balancer1.prob, batch_count=82290.0, ans=0.125 +2024-01-15 20:05:25,596 INFO [train.py:994] (0/2) Epoch 30, batch 250, loss[loss=0.1547, simple_loss=0.2374, pruned_loss=0.03601, over 24477.00 frames. ], tot_loss[loss=0.1484, simple_loss=0.2287, pruned_loss=0.034, over 3439907.05 frames. ], batch size: 222, lr: 1.38e-02, grad_scale: 32.0 +2024-01-15 20:06:12,264 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.0.layers.1.self_attn2.whiten, num_groups=1, num_channels=192, metric=14.90 vs. limit=22.5 +2024-01-15 20:06:19,049 WARNING [optim.py:484] (0/2) Clipping_scale=2.0, grad-norm quartiles 1.471e+02 1.830e+02 2.048e+02 2.345e+02 3.197e+02, threshold=4.097e+02, percent-clipped=0.0 +2024-01-15 20:06:27,320 INFO [train.py:994] (0/2) Epoch 30, batch 300, loss[loss=0.1496, simple_loss=0.2342, pruned_loss=0.03251, over 24528.00 frames. ], tot_loss[loss=0.148, simple_loss=0.2282, pruned_loss=0.03392, over 3743905.58 frames. ], batch size: 236, lr: 1.38e-02, grad_scale: 32.0 +2024-01-15 20:06:32,108 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.3.encoder.layers.1.whiten, num_groups=1, num_channels=512, metric=4.10 vs. limit=12.0 +2024-01-15 20:06:35,265 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_skip_rate, batch_count=82490.0, ans=0.0 +2024-01-15 20:06:35,352 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.balancer2.prob, batch_count=82490.0, ans=0.125 +2024-01-15 20:07:15,040 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.self_attn_weights.pos_emb_skip_rate, batch_count=82590.0, ans=0.0 +2024-01-15 20:07:22,831 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=82623.33333333333, ans=0.1 +2024-01-15 20:07:30,095 INFO [train.py:994] (0/2) Epoch 30, batch 350, loss[loss=0.1425, simple_loss=0.2291, pruned_loss=0.02801, over 24215.00 frames. ], tot_loss[loss=0.1477, simple_loss=0.2279, pruned_loss=0.03377, over 3979995.21 frames. ], batch size: 311, lr: 1.38e-02, grad_scale: 32.0 +2024-01-15 20:07:39,002 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.ff2_skip_rate, batch_count=82656.66666666667, ans=0.0 +2024-01-15 20:07:50,241 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.bypass.skip_rate, batch_count=82690.0, ans=0.09899494936611666 +2024-01-15 20:08:00,791 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.4.encoder.layers.1.nonlin_attention.whiten2, num_groups=1, num_channels=384, metric=3.32 vs. limit=15.0 +2024-01-15 20:08:01,785 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.self_attn_weights.pos_emb_skip_rate, batch_count=82723.33333333333, ans=0.0 +2024-01-15 20:08:12,296 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.feed_forward1.hidden_balancer.prob, batch_count=82756.66666666667, ans=0.125 +2024-01-15 20:08:17,769 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.attention_skip_rate, batch_count=82756.66666666667, ans=0.0 +2024-01-15 20:08:23,256 WARNING [optim.py:484] (0/2) Clipping_scale=2.0, grad-norm quartiles 1.505e+02 1.788e+02 1.997e+02 2.329e+02 3.830e+02, threshold=3.994e+02, percent-clipped=0.0 +2024-01-15 20:08:32,281 INFO [train.py:994] (0/2) Epoch 30, batch 400, loss[loss=0.1476, simple_loss=0.2312, pruned_loss=0.03202, over 24493.00 frames. ], tot_loss[loss=0.1474, simple_loss=0.2274, pruned_loss=0.03375, over 4162924.43 frames. ], batch size: 267, lr: 1.38e-02, grad_scale: 32.0 +2024-01-15 20:09:07,144 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.feed_forward1.hidden_balancer.prob, batch_count=82890.0, ans=0.125 +2024-01-15 20:09:12,924 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_skip_rate, batch_count=82923.33333333333, ans=0.0 +2024-01-15 20:09:33,911 INFO [train.py:994] (0/2) Epoch 30, batch 450, loss[loss=0.1424, simple_loss=0.2225, pruned_loss=0.03118, over 24502.00 frames. ], tot_loss[loss=0.147, simple_loss=0.2268, pruned_loss=0.03359, over 4293646.67 frames. ], batch size: 267, lr: 1.38e-02, grad_scale: 32.0 +2024-01-15 20:09:45,453 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.bypass.scale_min, batch_count=83023.33333333333, ans=0.2 +2024-01-15 20:09:45,488 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.nonlin_attention.balancer.prob, batch_count=83023.33333333333, ans=0.125 +2024-01-15 20:09:52,557 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.0.layers.1.attention_skip_rate, batch_count=83023.33333333333, ans=0.0 +2024-01-15 20:10:22,307 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.1.encoder.layers.0.conv_module2.whiten, num_groups=1, num_channels=256, metric=9.60 vs. limit=15.0 +2024-01-15 20:10:27,449 WARNING [optim.py:484] (0/2) Clipping_scale=2.0, grad-norm quartiles 1.489e+02 1.855e+02 2.114e+02 2.682e+02 3.873e+02, threshold=4.227e+02, percent-clipped=0.0 +2024-01-15 20:10:35,666 INFO [train.py:994] (0/2) Epoch 30, batch 500, loss[loss=0.146, simple_loss=0.2279, pruned_loss=0.03202, over 24421.00 frames. ], tot_loss[loss=0.1468, simple_loss=0.2263, pruned_loss=0.03368, over 4389548.32 frames. ], batch size: 250, lr: 1.38e-02, grad_scale: 32.0 +2024-01-15 20:10:37,785 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.feed_forward3.hidden_balancer.prob, batch_count=83156.66666666667, ans=0.125 +2024-01-15 20:10:40,231 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.feed_forward3.hidden_balancer.prob, batch_count=83156.66666666667, ans=0.125 +2024-01-15 20:10:54,209 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.0.layers.0.self_attn1.whiten, num_groups=1, num_channels=192, metric=13.45 vs. limit=22.5 +2024-01-15 20:10:55,893 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.attention_skip_rate, batch_count=83190.0, ans=0.0 +2024-01-15 20:11:03,646 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.ff2_skip_rate, batch_count=83223.33333333333, ans=0.0 +2024-01-15 20:11:37,928 INFO [train.py:994] (0/2) Epoch 30, batch 550, loss[loss=0.1525, simple_loss=0.2361, pruned_loss=0.03445, over 24466.00 frames. ], tot_loss[loss=0.1474, simple_loss=0.2271, pruned_loss=0.03387, over 4495237.87 frames. ], batch size: 222, lr: 1.38e-02, grad_scale: 32.0 +2024-01-15 20:11:50,523 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_module1.balancer1.prob, batch_count=83356.66666666667, ans=0.125 +2024-01-15 20:12:00,517 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.bypass.scale_min, batch_count=83356.66666666667, ans=0.2 +2024-01-15 20:12:28,815 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.balancer2.prob, batch_count=83456.66666666667, ans=0.125 +2024-01-15 20:12:31,412 WARNING [optim.py:484] (0/2) Clipping_scale=2.0, grad-norm quartiles 1.527e+02 1.924e+02 2.195e+02 2.587e+02 4.884e+02, threshold=4.390e+02, percent-clipped=1.0 +2024-01-15 20:12:34,192 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=83456.66666666667, ans=0.1 +2024-01-15 20:12:39,647 INFO [train.py:994] (0/2) Epoch 30, batch 600, loss[loss=0.158, simple_loss=0.2414, pruned_loss=0.03726, over 24267.00 frames. ], tot_loss[loss=0.1469, simple_loss=0.2268, pruned_loss=0.03347, over 4554570.06 frames. ], batch size: 311, lr: 1.38e-02, grad_scale: 32.0 +2024-01-15 20:12:42,655 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.self_attn_weights.whiten_keys.whitening_limit, batch_count=83490.0, ans=6.0 +2024-01-15 20:12:51,361 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_module2.balancer2.prob, batch_count=83523.33333333333, ans=0.125 +2024-01-15 20:12:51,620 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.4.encoder.layers.1.self_attn1.whiten, num_groups=1, num_channels=384, metric=15.57 vs. limit=22.5 +2024-01-15 20:13:20,259 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.0.layers.1.nonlin_attention.whiten1, num_groups=1, num_channels=144, metric=6.45 vs. limit=10.0 +2024-01-15 20:13:27,202 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.feed_forward2.hidden_balancer.prob, batch_count=83590.0, ans=0.125 +2024-01-15 20:13:41,418 INFO [train.py:994] (0/2) Epoch 30, batch 650, loss[loss=0.143, simple_loss=0.2219, pruned_loss=0.03206, over 24551.00 frames. ], tot_loss[loss=0.1468, simple_loss=0.2265, pruned_loss=0.03358, over 4613011.94 frames. ], batch size: 176, lr: 1.37e-02, grad_scale: 32.0 +2024-01-15 20:13:53,103 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_skip_rate, batch_count=83690.0, ans=0.0 +2024-01-15 20:14:00,008 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.feed_forward1.hidden_balancer.prob, batch_count=83690.0, ans=0.125 +2024-01-15 20:14:15,476 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_module1.balancer2.prob, batch_count=83723.33333333333, ans=0.125 +2024-01-15 20:14:19,268 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.4.encoder.layers.2.nonlin_attention.whiten1, num_groups=1, num_channels=288, metric=3.39 vs. limit=10.0 +2024-01-15 20:14:20,552 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.3.encoder.layers.0.self_attn2.whiten, num_groups=1, num_channels=512, metric=15.26 vs. limit=22.5 +2024-01-15 20:14:24,236 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.nonlin_attention.whiten1.whitening_limit, batch_count=83756.66666666667, ans=10.0 +2024-01-15 20:14:35,315 WARNING [optim.py:484] (0/2) Clipping_scale=2.0, grad-norm quartiles 1.453e+02 1.791e+02 1.959e+02 2.214e+02 2.829e+02, threshold=3.919e+02, percent-clipped=0.0 +2024-01-15 20:14:39,192 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.self_attn_weights.pos_emb_skip_rate, batch_count=83790.0, ans=0.0 +2024-01-15 20:14:43,495 INFO [train.py:994] (0/2) Epoch 30, batch 700, loss[loss=0.1406, simple_loss=0.2279, pruned_loss=0.02663, over 24205.00 frames. ], tot_loss[loss=0.147, simple_loss=0.2267, pruned_loss=0.03362, over 4658430.35 frames. ], batch size: 311, lr: 1.37e-02, grad_scale: 32.0 +2024-01-15 20:14:55,980 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.3.encoder.layers.0.feed_forward1.out_whiten, num_groups=1, num_channels=512, metric=13.75 vs. limit=15.0 +2024-01-15 20:15:01,656 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.ff2_skip_rate, batch_count=83856.66666666667, ans=0.0 +2024-01-15 20:15:04,069 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_skip_rate, batch_count=83856.66666666667, ans=0.0 +2024-01-15 20:15:05,183 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_module2.balancer1.prob, batch_count=83856.66666666667, ans=0.125 +2024-01-15 20:15:10,989 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_module2.balancer1.max_abs, batch_count=83890.0, ans=10.0 +2024-01-15 20:15:12,284 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.5.out_combiner.scale_min, batch_count=83890.0, ans=0.2 +2024-01-15 20:15:19,400 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.bypass.skip_rate, batch_count=83923.33333333333, ans=0.07 +2024-01-15 20:15:36,633 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.nonlin_attention.balancer.prob, batch_count=83956.66666666667, ans=0.125 +2024-01-15 20:15:42,444 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=83956.66666666667, ans=0.1 +2024-01-15 20:15:44,502 INFO [train.py:994] (0/2) Epoch 30, batch 750, loss[loss=0.1488, simple_loss=0.2302, pruned_loss=0.03365, over 24475.00 frames. ], tot_loss[loss=0.147, simple_loss=0.2269, pruned_loss=0.03352, over 4690164.82 frames. ], batch size: 222, lr: 1.37e-02, grad_scale: 32.0 +2024-01-15 20:15:49,511 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=83990.0, ans=0.1 +2024-01-15 20:15:50,701 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_skip_rate, batch_count=83990.0, ans=0.0 +2024-01-15 20:15:54,244 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_module1.balancer1.max_abs, batch_count=83990.0, ans=10.0 +2024-01-15 20:16:36,765 WARNING [optim.py:484] (0/2) Clipping_scale=2.0, grad-norm quartiles 1.548e+02 1.859e+02 2.058e+02 2.418e+02 3.544e+02, threshold=4.115e+02, percent-clipped=0.0 +2024-01-15 20:16:44,705 INFO [train.py:994] (0/2) Epoch 30, batch 800, loss[loss=0.1619, simple_loss=0.2479, pruned_loss=0.03799, over 23893.00 frames. ], tot_loss[loss=0.1469, simple_loss=0.2268, pruned_loss=0.03348, over 4723842.88 frames. ], batch size: 328, lr: 1.37e-02, grad_scale: 32.0 +2024-01-15 20:17:18,579 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.3.encoder.layers.2.self_attn_weights.whiten_keys, num_groups=8, num_channels=256, metric=4.49 vs. limit=6.0 +2024-01-15 20:17:33,020 INFO [checkpoint.py:75] (0/2) Saving checkpoint to zipformer_bbpe/exp-context-size-2-lr-epochs-10-spec-aug-20-disable-musan/epoch-30.pt +2024-01-15 20:17:56,016 INFO [train.py:994] (0/2) Epoch 31, batch 0, loss[loss=0.1596, simple_loss=0.2378, pruned_loss=0.04067, over 24537.00 frames. ], tot_loss[loss=0.1596, simple_loss=0.2378, pruned_loss=0.04067, over 24537.00 frames. ], batch size: 193, lr: 1.35e-02, grad_scale: 32.0 +2024-01-15 20:17:56,017 INFO [train.py:1017] (0/2) Computing validation loss +2024-01-15 20:18:05,942 INFO [zipformer.py:1858] (0/2) name=encoder.encoders.3.encoder.layers.1.self_attn_weights, attn_weights_entropy = tensor([2.7985, 2.4488, 3.1809, 3.1416, 3.1391, 2.9484, 2.9251, 2.8792], + device='cuda:0') +2024-01-15 20:18:06,648 INFO [zipformer.py:1858] (0/2) name=encoder.encoders.1.encoder.layers.1.self_attn_weights, attn_weights_entropy = tensor([4.2253, 3.4467, 3.8608, 3.4189], device='cuda:0') +2024-01-15 20:18:16,806 INFO [train.py:1026] (0/2) Epoch 31, validation: loss=0.1652, simple_loss=0.2487, pruned_loss=0.04091, over 1622729.00 frames. +2024-01-15 20:18:16,807 INFO [train.py:1027] (0/2) Maximum memory allocated so far is 15997MB +2024-01-15 20:18:52,751 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.feed_forward3.hidden_balancer.prob, batch_count=84400.0, ans=0.125 +2024-01-15 20:19:12,613 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.nonlin_attention.balancer.prob, batch_count=84433.33333333333, ans=0.125 +2024-01-15 20:19:18,312 WARNING [optim.py:484] (0/2) Clipping_scale=2.0, grad-norm quartiles 1.496e+02 1.819e+02 1.963e+02 2.349e+02 3.862e+02, threshold=3.926e+02, percent-clipped=0.0 +2024-01-15 20:19:18,341 INFO [train.py:994] (0/2) Epoch 31, batch 50, loss[loss=0.1552, simple_loss=0.2359, pruned_loss=0.03722, over 24454.00 frames. ], tot_loss[loss=0.1454, simple_loss=0.2249, pruned_loss=0.03298, over 1095833.41 frames. ], batch size: 170, lr: 1.35e-02, grad_scale: 32.0 +2024-01-15 20:19:48,154 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_skip_rate, batch_count=84533.33333333333, ans=0.0 +2024-01-15 20:19:50,633 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.feed_forward2.hidden_balancer.prob, batch_count=84533.33333333333, ans=0.125 +2024-01-15 20:19:58,444 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.0.layers.1.conv_module2.whiten, num_groups=1, num_channels=192, metric=3.11 vs. limit=15.0 +2024-01-15 20:20:08,497 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.ff2_skip_rate, batch_count=84600.0, ans=0.0 +2024-01-15 20:20:21,167 INFO [train.py:994] (0/2) Epoch 31, batch 100, loss[loss=0.1586, simple_loss=0.2358, pruned_loss=0.04071, over 24444.00 frames. ], tot_loss[loss=0.146, simple_loss=0.2257, pruned_loss=0.03312, over 1925711.27 frames. ], batch size: 250, lr: 1.35e-02, grad_scale: 32.0 +2024-01-15 20:20:29,856 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.4.encoder.layers.2.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=2.62 vs. limit=6.0 +2024-01-15 20:20:31,033 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.5.encoder.layers.0.self_attn2.whiten, num_groups=1, num_channels=256, metric=14.58 vs. limit=22.5 +2024-01-15 20:20:39,378 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.0.layers.1.feed_forward2.hidden_balancer.prob, batch_count=84666.66666666667, ans=0.125 +2024-01-15 20:21:18,614 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.feed_forward3.hidden_balancer.prob, batch_count=84766.66666666667, ans=0.125 +2024-01-15 20:21:23,157 WARNING [optim.py:484] (0/2) Clipping_scale=2.0, grad-norm quartiles 1.391e+02 1.828e+02 2.015e+02 2.325e+02 4.082e+02, threshold=4.031e+02, percent-clipped=1.0 +2024-01-15 20:21:23,185 INFO [train.py:994] (0/2) Epoch 31, batch 150, loss[loss=0.1441, simple_loss=0.2259, pruned_loss=0.03111, over 24562.00 frames. ], tot_loss[loss=0.1461, simple_loss=0.226, pruned_loss=0.03309, over 2570510.96 frames. ], batch size: 176, lr: 1.35e-02, grad_scale: 32.0 +2024-01-15 20:21:34,647 INFO [scaling.py:1118] (0/2) WithLoss: name=encoder.encoders.3.encoder.layers.2.self_attn_weights, loss-sum=0.000e+00 +2024-01-15 20:22:00,583 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.balancer1.prob, batch_count=84900.0, ans=0.125 +2024-01-15 20:22:01,220 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.0.layers.0.nonlin_attention.whiten2, num_groups=1, num_channels=192, metric=6.83 vs. limit=15.0 +2024-01-15 20:22:17,819 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.attention_skip_rate, batch_count=84933.33333333333, ans=0.0 +2024-01-15 20:22:22,441 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_skip_rate, batch_count=84933.33333333333, ans=0.0 +2024-01-15 20:22:25,069 INFO [train.py:994] (0/2) Epoch 31, batch 200, loss[loss=0.1419, simple_loss=0.2212, pruned_loss=0.03129, over 24204.00 frames. ], tot_loss[loss=0.1463, simple_loss=0.2263, pruned_loss=0.03312, over 3068469.95 frames. ], batch size: 140, lr: 1.34e-02, grad_scale: 32.0 +2024-01-15 20:22:27,662 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.feed_forward2.hidden_balancer.prob, batch_count=84966.66666666667, ans=0.125 +2024-01-15 20:22:45,931 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.0.layers.0.feed_forward3.hidden_balancer.prob, batch_count=85000.0, ans=0.125 +2024-01-15 20:22:56,210 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_skip_rate, batch_count=85033.33333333333, ans=0.0 +2024-01-15 20:23:27,461 WARNING [optim.py:484] (0/2) Clipping_scale=2.0, grad-norm quartiles 1.517e+02 1.737e+02 1.884e+02 2.109e+02 3.532e+02, threshold=3.768e+02, percent-clipped=0.0 +2024-01-15 20:23:27,493 INFO [train.py:994] (0/2) Epoch 31, batch 250, loss[loss=0.1571, simple_loss=0.2353, pruned_loss=0.03942, over 24516.00 frames. ], tot_loss[loss=0.1465, simple_loss=0.2264, pruned_loss=0.03329, over 3446371.81 frames. ], batch size: 165, lr: 1.34e-02, grad_scale: 32.0 +2024-01-15 20:23:27,810 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=85133.33333333333, ans=0.1 +2024-01-15 20:23:30,108 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.ff2_skip_rate, batch_count=85133.33333333333, ans=0.0 +2024-01-15 20:23:38,214 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_skip_rate, batch_count=85166.66666666667, ans=0.0 +2024-01-15 20:23:55,871 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.ff3_skip_rate, batch_count=85200.0, ans=0.0 +2024-01-15 20:24:12,547 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.self_attn1.whiten.whitening_limit, batch_count=85233.33333333333, ans=22.5 +2024-01-15 20:24:18,326 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.5.encoder.layers.1.conv_module2.whiten, num_groups=1, num_channels=256, metric=4.20 vs. limit=15.0 +2024-01-15 20:24:22,843 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=85266.66666666667, ans=0.1 +2024-01-15 20:24:24,044 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.feed_forward1.hidden_balancer.prob, batch_count=85266.66666666667, ans=0.125 +2024-01-15 20:24:28,425 INFO [train.py:994] (0/2) Epoch 31, batch 300, loss[loss=0.1207, simple_loss=0.1938, pruned_loss=0.0238, over 23562.00 frames. ], tot_loss[loss=0.1464, simple_loss=0.2266, pruned_loss=0.03312, over 3749614.69 frames. ], batch size: 119, lr: 1.34e-02, grad_scale: 32.0 +2024-01-15 20:24:36,948 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_module2.balancer2.prob, batch_count=85300.0, ans=0.125 +2024-01-15 20:24:43,540 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.4.encoder.layers.0.conv_module2.whiten, num_groups=1, num_channels=384, metric=3.48 vs. limit=15.0 +2024-01-15 20:24:58,995 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_module2.balancer1.min_positive, batch_count=85366.66666666667, ans=0.025 +2024-01-15 20:25:00,189 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_module1.balancer1.prob, batch_count=85366.66666666667, ans=0.125 +2024-01-15 20:25:08,841 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.nonlin_attention.balancer.prob, batch_count=85400.0, ans=0.125 +2024-01-15 20:25:11,241 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.attention_skip_rate, batch_count=85400.0, ans=0.0 +2024-01-15 20:25:25,839 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.bypass_mid.scale_min, batch_count=85433.33333333333, ans=0.2 +2024-01-15 20:25:30,946 WARNING [optim.py:484] (0/2) Clipping_scale=2.0, grad-norm quartiles 1.391e+02 1.870e+02 2.116e+02 2.844e+02 4.869e+02, threshold=4.233e+02, percent-clipped=5.0 +2024-01-15 20:25:30,975 INFO [train.py:994] (0/2) Epoch 31, batch 350, loss[loss=0.1548, simple_loss=0.2269, pruned_loss=0.04135, over 24519.00 frames. ], tot_loss[loss=0.1467, simple_loss=0.2268, pruned_loss=0.03328, over 3985101.85 frames. ], batch size: 165, lr: 1.34e-02, grad_scale: 32.0 +2024-01-15 20:25:31,292 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_module1.balancer2.prob, batch_count=85466.66666666667, ans=0.125 +2024-01-15 20:25:33,670 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.attention_skip_rate, batch_count=85466.66666666667, ans=0.0 +2024-01-15 20:25:34,806 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_module2.balancer1.prob, batch_count=85466.66666666667, ans=0.125 +2024-01-15 20:25:53,606 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.2.encoder.layers.2.self_attn2.whiten, num_groups=1, num_channels=384, metric=17.87 vs. limit=22.5 +2024-01-15 20:25:54,633 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.3.encoder.layers.3.whiten, num_groups=1, num_channels=512, metric=3.16 vs. limit=12.0 +2024-01-15 20:26:03,095 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.2.encoder.layers.1.whiten, num_groups=1, num_channels=384, metric=5.88 vs. limit=12.0 +2024-01-15 20:26:08,494 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.0.layers.1.self_attn_weights.pos_emb_skip_rate, batch_count=85566.66666666667, ans=0.0 +2024-01-15 20:26:15,367 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.bypass.skip_rate, batch_count=85566.66666666667, ans=0.04949747468305833 +2024-01-15 20:26:31,186 INFO [train.py:994] (0/2) Epoch 31, batch 400, loss[loss=0.1509, simple_loss=0.2277, pruned_loss=0.03711, over 24470.00 frames. ], tot_loss[loss=0.1468, simple_loss=0.2269, pruned_loss=0.03334, over 4166409.17 frames. ], batch size: 165, lr: 1.34e-02, grad_scale: 32.0 +2024-01-15 20:27:09,333 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.0.layers.1.ff3_skip_rate, batch_count=85733.33333333333, ans=0.0 +2024-01-15 20:27:18,827 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.bypass.skip_rate, batch_count=85733.33333333333, ans=0.04949747468305833 +2024-01-15 20:27:29,978 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.0.layers.0.feed_forward1.out_whiten, num_groups=1, num_channels=192, metric=9.56 vs. limit=15.0 +2024-01-15 20:27:33,136 WARNING [optim.py:484] (0/2) Clipping_scale=2.0, grad-norm quartiles 1.542e+02 1.759e+02 1.988e+02 2.277e+02 3.260e+02, threshold=3.975e+02, percent-clipped=0.0 +2024-01-15 20:27:33,164 INFO [train.py:994] (0/2) Epoch 31, batch 450, loss[loss=0.1165, simple_loss=0.1812, pruned_loss=0.02588, over 18973.00 frames. ], tot_loss[loss=0.1468, simple_loss=0.2267, pruned_loss=0.0334, over 4310297.52 frames. ], batch size: 81, lr: 1.34e-02, grad_scale: 32.0 +2024-01-15 20:27:55,180 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.ff3_skip_rate, batch_count=85833.33333333333, ans=0.0 +2024-01-15 20:28:06,792 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.feed_forward2.hidden_balancer.prob, batch_count=85866.66666666667, ans=0.125 +2024-01-15 20:28:16,180 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.feed_forward3.hidden_balancer.prob, batch_count=85900.0, ans=0.125 +2024-01-15 20:28:16,210 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=85900.0, ans=0.1 +2024-01-15 20:28:17,383 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_module1.balancer1.max_abs, batch_count=85900.0, ans=10.0 +2024-01-15 20:28:21,527 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.ff2_skip_rate, batch_count=85933.33333333333, ans=0.0 +2024-01-15 20:28:25,097 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_skip_rate, batch_count=85933.33333333333, ans=0.0 +2024-01-15 20:28:25,135 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.nonlin_attention.balancer.prob, batch_count=85933.33333333333, ans=0.125 +2024-01-15 20:28:31,635 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_module2.balancer1.prob, batch_count=85933.33333333333, ans=0.125 +2024-01-15 20:28:34,935 INFO [train.py:994] (0/2) Epoch 31, batch 500, loss[loss=0.1465, simple_loss=0.2276, pruned_loss=0.03269, over 24488.00 frames. ], tot_loss[loss=0.1467, simple_loss=0.2266, pruned_loss=0.03341, over 4422854.67 frames. ], batch size: 210, lr: 1.34e-02, grad_scale: 32.0 +2024-01-15 20:28:42,290 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.0.layers.1.bypass.skip_rate, batch_count=85966.66666666667, ans=0.035 +2024-01-15 20:28:46,176 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.self_attn_weights.pos_emb_skip_rate, batch_count=86000.0, ans=0.0 +2024-01-15 20:29:15,602 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.3.encoder.layers.0.nonlin_attention.whiten2, num_groups=1, num_channels=512, metric=4.71 vs. limit=15.0 +2024-01-15 20:29:16,303 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_module2.balancer2.prob, batch_count=86066.66666666667, ans=0.125 +2024-01-15 20:29:17,509 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.balancer2.prob, batch_count=86066.66666666667, ans=0.125 +2024-01-15 20:29:21,825 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.ff3_skip_rate, batch_count=86066.66666666667, ans=0.0 +2024-01-15 20:29:37,059 WARNING [optim.py:484] (0/2) Clipping_scale=2.0, grad-norm quartiles 1.543e+02 1.823e+02 1.990e+02 2.284e+02 3.467e+02, threshold=3.981e+02, percent-clipped=0.0 +2024-01-15 20:29:37,088 INFO [train.py:994] (0/2) Epoch 31, batch 550, loss[loss=0.1396, simple_loss=0.2203, pruned_loss=0.02944, over 24479.00 frames. ], tot_loss[loss=0.1463, simple_loss=0.2261, pruned_loss=0.03321, over 4508028.47 frames. ], batch size: 267, lr: 1.34e-02, grad_scale: 32.0 +2024-01-15 20:29:55,097 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder_embed.conv.8.prob, batch_count=86166.66666666667, ans=0.125 +2024-01-15 20:30:02,364 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_module1.balancer1.prob, batch_count=86200.0, ans=0.125 +2024-01-15 20:30:02,579 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.4.encoder.layers.2.conv_module1.whiten, num_groups=1, num_channels=384, metric=2.52 vs. limit=15.0 +2024-01-15 20:30:19,129 INFO [scaling.py:1118] (0/2) WithLoss: name=encoder.encoders.2.encoder.layers.0.self_attn_weights, loss-sum=0.000e+00 +2024-01-15 20:30:27,528 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.balancer2.prob, batch_count=86266.66666666667, ans=0.125 +2024-01-15 20:30:35,176 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder_embed.conv.8.prob, batch_count=86266.66666666667, ans=0.125 +2024-01-15 20:30:35,396 INFO [scaling.py:1118] (0/2) WithLoss: name=encoder.encoders.5.encoder.layers.0.self_attn_weights, loss-sum=0.000e+00 +2024-01-15 20:30:40,443 INFO [train.py:994] (0/2) Epoch 31, batch 600, loss[loss=0.1473, simple_loss=0.2243, pruned_loss=0.03515, over 24380.00 frames. ], tot_loss[loss=0.1461, simple_loss=0.226, pruned_loss=0.03307, over 4571528.70 frames. ], batch size: 153, lr: 1.33e-02, grad_scale: 32.0 +2024-01-15 20:30:44,327 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_skip_rate, batch_count=86300.0, ans=0.0 +2024-01-15 20:31:10,606 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.balancer2.prob, batch_count=86366.66666666667, ans=0.125 +2024-01-15 20:31:12,946 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.attention_skip_rate, batch_count=86366.66666666667, ans=0.0 +2024-01-15 20:31:19,968 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_module1.balancer1.prob, batch_count=86400.0, ans=0.125 +2024-01-15 20:31:22,970 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_skip_rate, batch_count=86400.0, ans=0.0 +2024-01-15 20:31:35,410 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.bypass.skip_rate, batch_count=86433.33333333333, ans=0.07 +2024-01-15 20:31:42,294 WARNING [optim.py:484] (0/2) Clipping_scale=2.0, grad-norm quartiles 1.540e+02 1.828e+02 1.966e+02 2.239e+02 3.295e+02, threshold=3.932e+02, percent-clipped=0.0 +2024-01-15 20:31:42,322 INFO [train.py:994] (0/2) Epoch 31, batch 650, loss[loss=0.1194, simple_loss=0.1832, pruned_loss=0.02776, over 16703.00 frames. ], tot_loss[loss=0.1454, simple_loss=0.2255, pruned_loss=0.03269, over 4619587.83 frames. ], batch size: 71, lr: 1.33e-02, grad_scale: 32.0 +2024-01-15 20:31:45,354 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.3.encoder.layers.3.feed_forward3.out_whiten, num_groups=1, num_channels=512, metric=10.69 vs. limit=15.0 +2024-01-15 20:31:46,174 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_skip_rate, batch_count=86466.66666666667, ans=0.0 +2024-01-15 20:31:49,261 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_module2.balancer1.prob, batch_count=86466.66666666667, ans=0.125 +2024-01-15 20:32:04,000 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_skip_rate, batch_count=86500.0, ans=0.0 +2024-01-15 20:32:12,368 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.bypass_mid.scale_min, batch_count=86533.33333333333, ans=0.2 +2024-01-15 20:32:39,319 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.bypass.scale_min, batch_count=86600.0, ans=0.2 +2024-01-15 20:32:41,673 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.0.layers.0.feed_forward1.hidden_balancer.prob, batch_count=86600.0, ans=0.125 +2024-01-15 20:32:44,959 INFO [train.py:994] (0/2) Epoch 31, batch 700, loss[loss=0.1646, simple_loss=0.2481, pruned_loss=0.04055, over 22465.00 frames. ], tot_loss[loss=0.1452, simple_loss=0.2251, pruned_loss=0.03263, over 4655808.79 frames. ], batch size: 357, lr: 1.33e-02, grad_scale: 32.0 +2024-01-15 20:33:00,265 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.out_combiner.scale_min, batch_count=86666.66666666667, ans=0.2 +2024-01-15 20:33:03,859 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=86666.66666666667, ans=0.1 +2024-01-15 20:33:07,970 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.0.layers.0.bypass_mid.scale_min, batch_count=86666.66666666667, ans=0.2 +2024-01-15 20:33:17,097 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.nonlin_attention.balancer.prob, batch_count=86700.0, ans=0.125 +2024-01-15 20:33:45,488 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_module2.balancer1.prob, batch_count=86766.66666666667, ans=0.125 +2024-01-15 20:33:47,589 WARNING [optim.py:484] (0/2) Clipping_scale=2.0, grad-norm quartiles 1.533e+02 1.770e+02 1.940e+02 2.237e+02 3.282e+02, threshold=3.880e+02, percent-clipped=0.0 +2024-01-15 20:33:47,618 INFO [train.py:994] (0/2) Epoch 31, batch 750, loss[loss=0.1402, simple_loss=0.2245, pruned_loss=0.028, over 24407.00 frames. ], tot_loss[loss=0.145, simple_loss=0.2252, pruned_loss=0.03243, over 4700636.37 frames. ], batch size: 275, lr: 1.33e-02, grad_scale: 32.0 +2024-01-15 20:33:47,884 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_module1.balancer2.prob, batch_count=86800.0, ans=0.125 +2024-01-15 20:33:55,598 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=86800.0, ans=0.1 +2024-01-15 20:34:01,461 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_module1.balancer1.prob, batch_count=86833.33333333333, ans=0.125 +2024-01-15 20:34:15,190 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=86866.66666666667, ans=0.1 +2024-01-15 20:34:20,860 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.bypass_mid.scale_min, batch_count=86866.66666666667, ans=0.2 +2024-01-15 20:34:22,439 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.bypass_mid.scale_min, batch_count=86866.66666666667, ans=0.2 +2024-01-15 20:34:23,585 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.ff2_skip_rate, batch_count=86900.0, ans=0.0 +2024-01-15 20:34:26,927 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=86900.0, ans=0.1 +2024-01-15 20:34:47,294 INFO [train.py:994] (0/2) Epoch 31, batch 800, loss[loss=0.1512, simple_loss=0.2288, pruned_loss=0.03676, over 24529.00 frames. ], tot_loss[loss=0.1451, simple_loss=0.2252, pruned_loss=0.03253, over 4723551.34 frames. ], batch size: 193, lr: 1.33e-02, grad_scale: 32.0 +2024-01-15 20:34:47,576 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.bypass_mid.scale_min, batch_count=86966.66666666667, ans=0.2 +2024-01-15 20:34:58,839 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.feed_forward3.hidden_balancer.prob, batch_count=87000.0, ans=0.125 +2024-01-15 20:35:03,816 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.0.layers.1.nonlin_attention.whiten1, num_groups=1, num_channels=144, metric=5.93 vs. limit=10.0 +2024-01-15 20:35:08,296 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.ff3_skip_rate, batch_count=87000.0, ans=0.0 +2024-01-15 20:35:24,284 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_module2.balancer2.prob, batch_count=87066.66666666667, ans=0.125 +2024-01-15 20:35:25,343 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=87066.66666666667, ans=0.1 +2024-01-15 20:35:36,072 INFO [checkpoint.py:75] (0/2) Saving checkpoint to zipformer_bbpe/exp-context-size-2-lr-epochs-10-spec-aug-20-disable-musan/epoch-31.pt +2024-01-15 20:36:01,632 INFO [train.py:994] (0/2) Epoch 32, batch 0, loss[loss=0.1535, simple_loss=0.2397, pruned_loss=0.03365, over 22638.00 frames. ], tot_loss[loss=0.1535, simple_loss=0.2397, pruned_loss=0.03365, over 22638.00 frames. ], batch size: 357, lr: 1.31e-02, grad_scale: 32.0 +2024-01-15 20:36:01,634 INFO [train.py:1017] (0/2) Computing validation loss +2024-01-15 20:36:22,987 INFO [train.py:1026] (0/2) Epoch 32, validation: loss=0.1657, simple_loss=0.2488, pruned_loss=0.04124, over 1622729.00 frames. +2024-01-15 20:36:22,989 INFO [train.py:1027] (0/2) Maximum memory allocated so far is 15997MB +2024-01-15 20:36:29,264 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=87110.0, ans=0.1 +2024-01-15 20:36:31,439 WARNING [optim.py:484] (0/2) Clipping_scale=2.0, grad-norm quartiles 1.407e+02 1.789e+02 2.040e+02 2.421e+02 3.531e+02, threshold=4.081e+02, percent-clipped=0.0 +2024-01-15 20:36:38,732 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.2.encoder.layers.2.self_attn1.whiten, num_groups=1, num_channels=384, metric=22.13 vs. limit=22.5 +2024-01-15 20:36:54,173 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=87176.66666666667, ans=0.1 +2024-01-15 20:36:54,226 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.feed_forward2.hidden_balancer.prob, batch_count=87176.66666666667, ans=0.125 +2024-01-15 20:37:16,285 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=87243.33333333333, ans=0.1 +2024-01-15 20:37:24,718 INFO [train.py:994] (0/2) Epoch 32, batch 50, loss[loss=0.1435, simple_loss=0.2219, pruned_loss=0.03253, over 24327.00 frames. ], tot_loss[loss=0.1431, simple_loss=0.223, pruned_loss=0.03158, over 1089216.74 frames. ], batch size: 285, lr: 1.31e-02, grad_scale: 16.0 +2024-01-15 20:37:33,243 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_module2.balancer1.max_abs, batch_count=87276.66666666667, ans=10.0 +2024-01-15 20:37:57,071 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder_embed.dropout.p, batch_count=87343.33333333333, ans=0.1 +2024-01-15 20:37:58,349 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.feed_forward2.hidden_balancer.prob, batch_count=87343.33333333333, ans=0.125 +2024-01-15 20:38:02,014 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.feed_forward3.hidden_balancer.prob, batch_count=87376.66666666667, ans=0.125 +2024-01-15 20:38:16,326 INFO [scaling.py:1118] (0/2) WithLoss: name=encoder.encoders.3.encoder.layers.1.self_attn_weights, loss-sum=0.000e+00 +2024-01-15 20:38:26,678 INFO [train.py:994] (0/2) Epoch 32, batch 100, loss[loss=0.1549, simple_loss=0.2339, pruned_loss=0.03801, over 24475.00 frames. ], tot_loss[loss=0.1447, simple_loss=0.2244, pruned_loss=0.03254, over 1911714.22 frames. ], batch size: 170, lr: 1.31e-02, grad_scale: 16.0 +2024-01-15 20:38:32,335 INFO [scaling.py:1118] (0/2) WithLoss: name=encoder.encoders.4.encoder.layers.1.self_attn_weights, loss-sum=0.000e+00 +2024-01-15 20:38:36,503 WARNING [optim.py:484] (0/2) Clipping_scale=2.0, grad-norm quartiles 1.506e+02 1.827e+02 2.037e+02 2.266e+02 3.836e+02, threshold=4.073e+02, percent-clipped=0.0 +2024-01-15 20:38:46,896 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.balancer1.prob, batch_count=87476.66666666667, ans=0.125 +2024-01-15 20:38:50,390 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.ff2_skip_rate, batch_count=87510.0, ans=0.0 +2024-01-15 20:38:51,806 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.4.encoder.layers.0.feed_forward2.out_whiten, num_groups=1, num_channels=384, metric=10.96 vs. limit=15.0 +2024-01-15 20:38:55,680 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.bypass_mid.scale_min, batch_count=87510.0, ans=0.2 +2024-01-15 20:39:00,717 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.2.encoder.layers.1.conv_module2.whiten, num_groups=1, num_channels=384, metric=5.37 vs. limit=15.0 +2024-01-15 20:39:02,780 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_module2.balancer1.prob, batch_count=87543.33333333333, ans=0.125 +2024-01-15 20:39:10,367 INFO [scaling.py:1118] (0/2) WithLoss: name=encoder.encoders.1.encoder.layers.1.self_attn_weights, loss-sum=0.000e+00 +2024-01-15 20:39:28,369 INFO [train.py:994] (0/2) Epoch 32, batch 150, loss[loss=0.1321, simple_loss=0.2063, pruned_loss=0.02892, over 23455.00 frames. ], tot_loss[loss=0.1444, simple_loss=0.224, pruned_loss=0.0324, over 2543860.68 frames. ], batch size: 119, lr: 1.31e-02, grad_scale: 16.0 +2024-01-15 20:39:31,603 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.0.layers.0.feed_forward2.out_whiten, num_groups=1, num_channels=192, metric=8.16 vs. limit=15.0 +2024-01-15 20:39:44,694 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.bypass.scale_min, batch_count=87643.33333333333, ans=0.2 +2024-01-15 20:39:59,451 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.bypass_mid.scale_min, batch_count=87676.66666666667, ans=0.2 +2024-01-15 20:40:08,650 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.2.encoder.layers.0.conv_module1.whiten, num_groups=1, num_channels=384, metric=9.85 vs. limit=15.0 +2024-01-15 20:40:22,888 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_skip_rate, batch_count=87743.33333333333, ans=0.0 +2024-01-15 20:40:30,368 INFO [train.py:994] (0/2) Epoch 32, batch 200, loss[loss=0.1328, simple_loss=0.2147, pruned_loss=0.0254, over 24416.00 frames. ], tot_loss[loss=0.1434, simple_loss=0.2231, pruned_loss=0.03185, over 3046156.92 frames. ], batch size: 258, lr: 1.30e-02, grad_scale: 16.0 +2024-01-15 20:40:40,438 WARNING [optim.py:484] (0/2) Clipping_scale=2.0, grad-norm quartiles 1.492e+02 1.788e+02 2.022e+02 2.395e+02 4.005e+02, threshold=4.044e+02, percent-clipped=0.0 +2024-01-15 20:40:46,792 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.ff2_skip_rate, batch_count=87810.0, ans=0.0 +2024-01-15 20:40:47,941 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.ff3_skip_rate, batch_count=87810.0, ans=0.0 +2024-01-15 20:40:50,416 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_module2.balancer2.prob, batch_count=87810.0, ans=0.125 +2024-01-15 20:40:51,581 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_module2.balancer2.prob, batch_count=87810.0, ans=0.125 +2024-01-15 20:41:12,336 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_module2.balancer2.min_abs, batch_count=87876.66666666667, ans=0.5 +2024-01-15 20:41:13,533 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.ff2_skip_rate, batch_count=87876.66666666667, ans=0.0 +2024-01-15 20:41:16,976 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.bypass.scale_min, batch_count=87876.66666666667, ans=0.2 +2024-01-15 20:41:26,613 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.1.encoder.layers.0.conv_module1.whiten, num_groups=1, num_channels=256, metric=7.39 vs. limit=15.0 +2024-01-15 20:41:27,590 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.3.encoder.layers.2.feed_forward3.out_whiten, num_groups=1, num_channels=512, metric=10.79 vs. limit=15.0 +2024-01-15 20:41:30,504 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.2.encoder.layers.2.self_attn1.whiten, num_groups=1, num_channels=384, metric=21.50 vs. limit=22.5 +2024-01-15 20:41:31,432 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.4.encoder.layers.2.feed_forward3.out_whiten, num_groups=1, num_channels=384, metric=12.62 vs. limit=15.0 +2024-01-15 20:41:31,598 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.3.encoder.layers.0.feed_forward3.out_whiten, num_groups=1, num_channels=512, metric=10.28 vs. limit=15.0 +2024-01-15 20:41:32,073 INFO [train.py:994] (0/2) Epoch 32, batch 250, loss[loss=0.144, simple_loss=0.2264, pruned_loss=0.03075, over 24471.00 frames. ], tot_loss[loss=0.1441, simple_loss=0.2241, pruned_loss=0.03205, over 3430478.29 frames. ], batch size: 170, lr: 1.30e-02, grad_scale: 16.0 +2024-01-15 20:42:26,508 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=88076.66666666667, ans=0.1 +2024-01-15 20:42:32,591 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.nonlin_attention.balancer.min_positive, batch_count=88110.0, ans=0.05 +2024-01-15 20:42:33,437 INFO [train.py:994] (0/2) Epoch 32, batch 300, loss[loss=0.1428, simple_loss=0.2285, pruned_loss=0.02856, over 24348.00 frames. ], tot_loss[loss=0.1441, simple_loss=0.2242, pruned_loss=0.03201, over 3739566.50 frames. ], batch size: 298, lr: 1.30e-02, grad_scale: 16.0 +2024-01-15 20:42:44,253 WARNING [optim.py:484] (0/2) Clipping_scale=2.0, grad-norm quartiles 1.481e+02 1.751e+02 1.891e+02 2.061e+02 3.059e+02, threshold=3.782e+02, percent-clipped=0.0 +2024-01-15 20:43:15,665 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.self_attn_weights.pos_emb_skip_rate, batch_count=88210.0, ans=0.0 +2024-01-15 20:43:36,473 INFO [train.py:994] (0/2) Epoch 32, batch 350, loss[loss=0.1532, simple_loss=0.233, pruned_loss=0.03675, over 24507.00 frames. ], tot_loss[loss=0.1444, simple_loss=0.2247, pruned_loss=0.03208, over 3984718.93 frames. ], batch size: 210, lr: 1.30e-02, grad_scale: 16.0 +2024-01-15 20:44:06,737 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_module2.balancer2.prob, batch_count=88343.33333333333, ans=0.125 +2024-01-15 20:44:28,757 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=88410.0, ans=0.1 +2024-01-15 20:44:31,884 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_module1.balancer1.max_abs, batch_count=88410.0, ans=10.0 +2024-01-15 20:44:38,792 INFO [train.py:994] (0/2) Epoch 32, batch 400, loss[loss=0.1556, simple_loss=0.2452, pruned_loss=0.033, over 22471.00 frames. ], tot_loss[loss=0.1442, simple_loss=0.2246, pruned_loss=0.0319, over 4175676.53 frames. ], batch size: 358, lr: 1.30e-02, grad_scale: 32.0 +2024-01-15 20:44:41,437 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.balancer1.prob, batch_count=88443.33333333333, ans=0.125 +2024-01-15 20:44:49,976 WARNING [optim.py:484] (0/2) Clipping_scale=2.0, grad-norm quartiles 1.376e+02 1.793e+02 1.951e+02 2.365e+02 3.576e+02, threshold=3.902e+02, percent-clipped=0.0 +2024-01-15 20:44:57,904 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=88476.66666666667, ans=0.1 +2024-01-15 20:45:05,349 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.3.encoder.layers.1.conv_module1.whiten, num_groups=1, num_channels=512, metric=2.20 vs. limit=15.0 +2024-01-15 20:45:12,459 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.balancer2.prob, batch_count=88510.0, ans=0.125 +2024-01-15 20:45:14,968 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.ff2_skip_rate, batch_count=88543.33333333333, ans=0.0 +2024-01-15 20:45:22,371 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.0.layers.1.feed_forward1.out_proj.dropout_p, batch_count=88543.33333333333, ans=0.1 +2024-01-15 20:45:25,911 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.feed_forward2.hidden_balancer.prob, batch_count=88543.33333333333, ans=0.125 +2024-01-15 20:45:33,371 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.4.encoder.layers.1.feed_forward2.out_whiten, num_groups=1, num_channels=384, metric=9.03 vs. limit=15.0 +2024-01-15 20:45:34,733 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.2.encoder.layers.2.self_attn1.whiten, num_groups=1, num_channels=384, metric=21.76 vs. limit=22.5 +2024-01-15 20:45:40,458 INFO [train.py:994] (0/2) Epoch 32, batch 450, loss[loss=0.1613, simple_loss=0.2431, pruned_loss=0.03977, over 24501.00 frames. ], tot_loss[loss=0.144, simple_loss=0.2244, pruned_loss=0.03182, over 4316290.30 frames. ], batch size: 187, lr: 1.30e-02, grad_scale: 16.0 +2024-01-15 20:45:45,336 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.0.layers.1.feed_forward2.hidden_balancer.prob, batch_count=88610.0, ans=0.125 +2024-01-15 20:45:47,291 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.5.encoder.layers.0.conv_module2.whiten, num_groups=1, num_channels=256, metric=2.86 vs. limit=15.0 +2024-01-15 20:46:13,578 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.5.encoder.layers.1.nonlin_attention.whiten2, num_groups=1, num_channels=256, metric=3.84 vs. limit=15.0 +2024-01-15 20:46:18,198 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.bypass_mid.scale_min, batch_count=88710.0, ans=0.2 +2024-01-15 20:46:42,864 INFO [train.py:994] (0/2) Epoch 32, batch 500, loss[loss=0.1467, simple_loss=0.2245, pruned_loss=0.03444, over 24527.00 frames. ], tot_loss[loss=0.1447, simple_loss=0.225, pruned_loss=0.03224, over 4427337.43 frames. ], batch size: 204, lr: 1.30e-02, grad_scale: 16.0 +2024-01-15 20:46:53,657 WARNING [optim.py:484] (0/2) Clipping_scale=2.0, grad-norm quartiles 1.476e+02 1.760e+02 2.034e+02 2.346e+02 3.813e+02, threshold=4.069e+02, percent-clipped=0.0 +2024-01-15 20:47:21,861 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_module2.balancer2.prob, batch_count=88876.66666666667, ans=0.125 +2024-01-15 20:47:26,023 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.nonlin_attention.balancer.prob, batch_count=88876.66666666667, ans=0.125 +2024-01-15 20:47:30,552 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.ff3_skip_rate, batch_count=88876.66666666667, ans=0.0 +2024-01-15 20:47:44,586 INFO [train.py:994] (0/2) Epoch 32, batch 550, loss[loss=0.143, simple_loss=0.2215, pruned_loss=0.03225, over 24389.00 frames. ], tot_loss[loss=0.1445, simple_loss=0.225, pruned_loss=0.03203, over 4519057.89 frames. ], batch size: 159, lr: 1.30e-02, grad_scale: 16.0 +2024-01-15 20:47:50,781 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.0.layers.0.bypass.scale_min, batch_count=88943.33333333333, ans=0.2 +2024-01-15 20:48:21,137 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.bypass_mid.scale_min, batch_count=89043.33333333333, ans=0.2 +2024-01-15 20:48:39,774 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.ff3_skip_rate, batch_count=89076.66666666667, ans=0.0 +2024-01-15 20:48:43,336 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=89076.66666666667, ans=0.1 +2024-01-15 20:48:46,602 INFO [train.py:994] (0/2) Epoch 32, batch 600, loss[loss=0.1548, simple_loss=0.2336, pruned_loss=0.03794, over 24536.00 frames. ], tot_loss[loss=0.1445, simple_loss=0.2247, pruned_loss=0.03208, over 4579757.07 frames. ], batch size: 187, lr: 1.30e-02, grad_scale: 16.0 +2024-01-15 20:48:48,058 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.balancer2.prob, batch_count=89110.0, ans=0.125 +2024-01-15 20:48:51,528 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.0.layers.0.ff3_skip_rate, batch_count=89110.0, ans=0.0 +2024-01-15 20:48:52,772 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=89110.0, ans=0.1 +2024-01-15 20:48:57,412 WARNING [optim.py:484] (0/2) Clipping_scale=2.0, grad-norm quartiles 1.489e+02 1.784e+02 2.002e+02 2.290e+02 4.373e+02, threshold=4.003e+02, percent-clipped=1.0 +2024-01-15 20:49:22,889 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.2.encoder.layers.1.feed_forward2.out_whiten, num_groups=1, num_channels=384, metric=10.60 vs. limit=15.0 +2024-01-15 20:49:48,091 INFO [train.py:994] (0/2) Epoch 32, batch 650, loss[loss=0.1515, simple_loss=0.2332, pruned_loss=0.03486, over 24501.00 frames. ], tot_loss[loss=0.1441, simple_loss=0.2242, pruned_loss=0.03203, over 4620723.19 frames. ], batch size: 181, lr: 1.29e-02, grad_scale: 16.0 +2024-01-15 20:49:48,221 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder_embed.dropout.p, batch_count=89276.66666666667, ans=0.1 +2024-01-15 20:50:15,968 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_skip_rate, batch_count=89343.33333333333, ans=0.0 +2024-01-15 20:50:37,190 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.bypass.scale_min, batch_count=89410.0, ans=0.2 +2024-01-15 20:50:42,007 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.nonlin_attention.balancer.prob, batch_count=89410.0, ans=0.125 +2024-01-15 20:50:51,336 INFO [train.py:994] (0/2) Epoch 32, batch 700, loss[loss=0.1445, simple_loss=0.2314, pruned_loss=0.02878, over 24240.00 frames. ], tot_loss[loss=0.1447, simple_loss=0.2248, pruned_loss=0.0323, over 4653681.41 frames. ], batch size: 311, lr: 1.29e-02, grad_scale: 16.0 +2024-01-15 20:51:01,837 WARNING [optim.py:484] (0/2) Clipping_scale=2.0, grad-norm quartiles 1.524e+02 1.726e+02 1.881e+02 2.158e+02 2.890e+02, threshold=3.763e+02, percent-clipped=0.0 +2024-01-15 20:51:07,053 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_module2.balancer2.prob, batch_count=89476.66666666667, ans=0.125 +2024-01-15 20:51:22,450 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_skip_rate, batch_count=89510.0, ans=0.0 +2024-01-15 20:51:28,100 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=89543.33333333333, ans=0.1 +2024-01-15 20:51:32,751 INFO [scaling.py:1118] (0/2) WithLoss: name=encoder.encoders.0.layers.1.self_attn_weights, loss-sum=0.000e+00 +2024-01-15 20:51:41,694 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.ff2_skip_rate, batch_count=89576.66666666667, ans=0.0 +2024-01-15 20:51:49,246 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_module2.balancer1.prob, batch_count=89576.66666666667, ans=0.125 +2024-01-15 20:51:52,582 INFO [train.py:994] (0/2) Epoch 32, batch 750, loss[loss=0.1514, simple_loss=0.2323, pruned_loss=0.03527, over 24477.00 frames. ], tot_loss[loss=0.1445, simple_loss=0.2247, pruned_loss=0.03217, over 4694048.11 frames. ], batch size: 222, lr: 1.29e-02, grad_scale: 16.0 +2024-01-15 20:52:20,351 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.2.encoder.layers.2.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=4.62 vs. limit=6.0 +2024-01-15 20:52:32,058 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=89710.0, ans=0.1 +2024-01-15 20:52:41,034 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.balancer1.prob, batch_count=89743.33333333333, ans=0.125 +2024-01-15 20:52:50,390 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_module1.balancer1.prob, batch_count=89743.33333333333, ans=0.125 +2024-01-15 20:52:52,477 INFO [train.py:994] (0/2) Epoch 32, batch 800, loss[loss=0.1397, simple_loss=0.2239, pruned_loss=0.02772, over 24475.00 frames. ], tot_loss[loss=0.1442, simple_loss=0.2243, pruned_loss=0.0321, over 4712733.44 frames. ], batch size: 222, lr: 1.29e-02, grad_scale: 32.0 +2024-01-15 20:52:53,796 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=89776.66666666667, ans=0.1 +2024-01-15 20:52:57,164 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.balancer1.prob, batch_count=89776.66666666667, ans=0.125 +2024-01-15 20:53:02,382 WARNING [optim.py:484] (0/2) Clipping_scale=2.0, grad-norm quartiles 1.514e+02 1.755e+02 1.980e+02 2.439e+02 4.753e+02, threshold=3.960e+02, percent-clipped=3.0 +2024-01-15 20:53:06,089 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.bypass_mid.scale_min, batch_count=89810.0, ans=0.2 +2024-01-15 20:53:29,389 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_skip_rate, batch_count=89876.66666666667, ans=0.0 +2024-01-15 20:53:37,013 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.feed_forward3.hidden_balancer.prob, batch_count=89910.0, ans=0.125 +2024-01-15 20:53:41,173 INFO [checkpoint.py:75] (0/2) Saving checkpoint to zipformer_bbpe/exp-context-size-2-lr-epochs-10-spec-aug-20-disable-musan/epoch-32.pt +2024-01-15 20:54:02,065 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.0.layers.0.feed_forward1.out_proj.dropout_p, batch_count=89920.0, ans=0.1 +2024-01-15 20:54:02,967 INFO [train.py:994] (0/2) Epoch 33, batch 0, loss[loss=0.1413, simple_loss=0.2252, pruned_loss=0.0287, over 24206.00 frames. ], tot_loss[loss=0.1413, simple_loss=0.2252, pruned_loss=0.0287, over 24206.00 frames. ], batch size: 311, lr: 1.27e-02, grad_scale: 32.0 +2024-01-15 20:54:02,968 INFO [train.py:1017] (0/2) Computing validation loss +2024-01-15 20:54:23,932 INFO [train.py:1026] (0/2) Epoch 33, validation: loss=0.1663, simple_loss=0.2485, pruned_loss=0.04203, over 1622729.00 frames. +2024-01-15 20:54:23,936 INFO [train.py:1027] (0/2) Maximum memory allocated so far is 15997MB +2024-01-15 20:54:43,089 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.1.encoder.layers.1.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=3.52 vs. limit=6.0 +2024-01-15 20:54:44,892 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.bypass_mid.scale_min, batch_count=89953.33333333333, ans=0.2 +2024-01-15 20:55:00,860 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_skip_rate, batch_count=90020.0, ans=0.0 +2024-01-15 20:55:26,533 INFO [train.py:994] (0/2) Epoch 33, batch 50, loss[loss=0.1415, simple_loss=0.221, pruned_loss=0.03103, over 24488.00 frames. ], tot_loss[loss=0.1434, simple_loss=0.2236, pruned_loss=0.0316, over 1089484.45 frames. ], batch size: 170, lr: 1.27e-02, grad_scale: 32.0 +2024-01-15 20:55:26,888 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.bypass_mid.scale_min, batch_count=90086.66666666667, ans=0.2 +2024-01-15 20:55:32,711 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_skip_rate, batch_count=90086.66666666667, ans=0.0 +2024-01-15 20:55:35,041 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_module2.balancer1.prob, batch_count=90086.66666666667, ans=0.125 +2024-01-15 20:55:46,545 WARNING [optim.py:484] (0/2) Clipping_scale=2.0, grad-norm quartiles 1.417e+02 1.799e+02 2.020e+02 2.508e+02 4.258e+02, threshold=4.039e+02, percent-clipped=1.0 +2024-01-15 20:55:48,093 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.5.out_combiner.scale_min, batch_count=90120.0, ans=0.2 +2024-01-15 20:55:49,176 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_module2.balancer2.prob, batch_count=90120.0, ans=0.125 +2024-01-15 20:55:52,749 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.feed_forward1.hidden_balancer.prob, batch_count=90153.33333333333, ans=0.125 +2024-01-15 20:55:57,539 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.bypass.scale_min, batch_count=90153.33333333333, ans=0.2 +2024-01-15 20:56:27,952 INFO [train.py:994] (0/2) Epoch 33, batch 100, loss[loss=0.1484, simple_loss=0.2327, pruned_loss=0.032, over 24423.00 frames. ], tot_loss[loss=0.1443, simple_loss=0.2248, pruned_loss=0.03188, over 1926088.79 frames. ], batch size: 159, lr: 1.27e-02, grad_scale: 32.0 +2024-01-15 20:56:32,268 INFO [scaling.py:1118] (0/2) WithLoss: name=encoder.encoders.1.encoder.layers.0.self_attn_weights, loss-sum=0.000e+00 +2024-01-15 20:56:37,716 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=90253.33333333333, ans=0.1 +2024-01-15 20:56:38,895 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_skip_rate, batch_count=90253.33333333333, ans=0.0 +2024-01-15 20:56:40,023 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.0.layers.1.balancer2.prob, batch_count=90286.66666666667, ans=0.125 +2024-01-15 20:56:43,582 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_module1.balancer1.prob, batch_count=90286.66666666667, ans=0.125 +2024-01-15 20:57:18,864 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.ff3_skip_rate, batch_count=90386.66666666667, ans=0.0 +2024-01-15 20:57:30,420 INFO [train.py:994] (0/2) Epoch 33, batch 150, loss[loss=0.1418, simple_loss=0.2233, pruned_loss=0.03018, over 24502.00 frames. ], tot_loss[loss=0.1446, simple_loss=0.2253, pruned_loss=0.0319, over 2577034.27 frames. ], batch size: 236, lr: 1.27e-02, grad_scale: 32.0 +2024-01-15 20:57:36,602 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.ff3_skip_rate, batch_count=90420.0, ans=0.0 +2024-01-15 20:57:45,962 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.0.layers.1.feed_forward1.out_proj.dropout_p, batch_count=90453.33333333333, ans=0.1 +2024-01-15 20:57:48,938 INFO [scaling.py:1118] (0/2) WithLoss: name=encoder.encoders.4.encoder.layers.1.self_attn_weights, loss-sum=0.000e+00 +2024-01-15 20:57:49,750 WARNING [optim.py:484] (0/2) Clipping_scale=2.0, grad-norm quartiles 1.457e+02 1.876e+02 2.056e+02 2.342e+02 3.508e+02, threshold=4.112e+02, percent-clipped=0.0 +2024-01-15 20:58:19,308 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_module1.balancer2.prob, batch_count=90553.33333333333, ans=0.125 +2024-01-15 20:58:31,946 INFO [train.py:994] (0/2) Epoch 33, batch 200, loss[loss=0.153, simple_loss=0.2297, pruned_loss=0.03812, over 24452.00 frames. ], tot_loss[loss=0.1437, simple_loss=0.2242, pruned_loss=0.03156, over 3071453.23 frames. ], batch size: 170, lr: 1.27e-02, grad_scale: 32.0 +2024-01-15 20:58:34,753 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.3.encoder.layers.2.conv_module2.whiten, num_groups=1, num_channels=512, metric=3.90 vs. limit=15.0 +2024-01-15 20:58:37,414 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.bypass.skip_rate, batch_count=90586.66666666667, ans=0.07 +2024-01-15 20:58:41,663 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=90586.66666666667, ans=0.1 +2024-01-15 20:59:09,208 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.ff3_skip_rate, batch_count=90686.66666666667, ans=0.0 +2024-01-15 20:59:12,795 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_module1.balancer1.prob, batch_count=90686.66666666667, ans=0.125 +2024-01-15 20:59:13,956 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=90686.66666666667, ans=0.1 +2024-01-15 20:59:22,148 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.ff3_skip_rate, batch_count=90720.0, ans=0.0 +2024-01-15 20:59:34,011 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.bypass_mid.scale_min, batch_count=90753.33333333333, ans=0.2 +2024-01-15 20:59:34,979 INFO [train.py:994] (0/2) Epoch 33, batch 250, loss[loss=0.1281, simple_loss=0.2099, pruned_loss=0.02316, over 23916.00 frames. ], tot_loss[loss=0.1439, simple_loss=0.2243, pruned_loss=0.03179, over 3443297.52 frames. ], batch size: 131, lr: 1.27e-02, grad_scale: 32.0 +2024-01-15 20:59:49,495 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.bypass.skip_rate, batch_count=90786.66666666667, ans=0.04949747468305833 +2024-01-15 20:59:50,475 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.0.layers.1.nonlin_attention.balancer.max_positive, batch_count=90786.66666666667, ans=0.95 +2024-01-15 20:59:54,428 WARNING [optim.py:484] (0/2) Clipping_scale=2.0, grad-norm quartiles 1.486e+02 1.800e+02 1.980e+02 2.341e+02 4.603e+02, threshold=3.961e+02, percent-clipped=1.0 +2024-01-15 20:59:54,804 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_module1.balancer1.max_abs, batch_count=90786.66666666667, ans=10.0 +2024-01-15 20:59:58,580 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.nonlin_attention.whiten2.whitening_limit, batch_count=90820.0, ans=15.0 +2024-01-15 21:00:00,145 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_module2.balancer1.max_abs, batch_count=90820.0, ans=10.0 +2024-01-15 21:00:12,123 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.2.encoder.layers.1.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=4.86 vs. limit=6.0 +2024-01-15 21:00:15,550 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.3.encoder.layers.3.nonlin_attention.whiten2, num_groups=1, num_channels=512, metric=4.14 vs. limit=15.0 +2024-01-15 21:00:23,103 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.nonlin_attention.balancer.prob, batch_count=90886.66666666667, ans=0.125 +2024-01-15 21:00:34,268 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.0.layers.1.balancer2.prob, batch_count=90886.66666666667, ans=0.125 +2024-01-15 21:00:36,344 INFO [train.py:994] (0/2) Epoch 33, batch 300, loss[loss=0.1616, simple_loss=0.2398, pruned_loss=0.04165, over 24499.00 frames. ], tot_loss[loss=0.1438, simple_loss=0.2235, pruned_loss=0.03206, over 3726701.94 frames. ], batch size: 187, lr: 1.27e-02, grad_scale: 32.0 +2024-01-15 21:00:42,658 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_skip_rate, batch_count=90920.0, ans=0.0 +2024-01-15 21:00:59,170 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_module1.balancer2.prob, batch_count=90953.33333333333, ans=0.125 +2024-01-15 21:01:06,273 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_module2.balancer1.min_positive, batch_count=90986.66666666667, ans=0.025 +2024-01-15 21:01:37,854 INFO [train.py:994] (0/2) Epoch 33, batch 350, loss[loss=0.1453, simple_loss=0.2281, pruned_loss=0.03127, over 24480.00 frames. ], tot_loss[loss=0.144, simple_loss=0.2241, pruned_loss=0.03196, over 3972676.89 frames. ], batch size: 181, lr: 1.26e-02, grad_scale: 32.0 +2024-01-15 21:01:40,926 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.4.encoder.layers.0.feed_forward1.out_whiten, num_groups=1, num_channels=384, metric=10.33 vs. limit=15.0 +2024-01-15 21:01:46,884 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder_embed.convnext.layerdrop_rate, batch_count=91086.66666666667, ans=0.015 +2024-01-15 21:01:57,158 WARNING [optim.py:484] (0/2) Clipping_scale=2.0, grad-norm quartiles 1.376e+02 1.815e+02 2.000e+02 2.389e+02 3.512e+02, threshold=3.999e+02, percent-clipped=0.0 +2024-01-15 21:02:25,049 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.3.encoder.layers.2.self_attn1.whiten, num_groups=1, num_channels=512, metric=18.89 vs. limit=22.5 +2024-01-15 21:02:25,908 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.bypass_mid.scale_min, batch_count=91220.0, ans=0.2 +2024-01-15 21:02:32,983 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.0.layers.0.feed_forward3.out_whiten, num_groups=1, num_channels=192, metric=11.34 vs. limit=15.0 +2024-01-15 21:02:39,661 INFO [train.py:994] (0/2) Epoch 33, batch 400, loss[loss=0.1479, simple_loss=0.2303, pruned_loss=0.03278, over 24416.00 frames. ], tot_loss[loss=0.1438, simple_loss=0.224, pruned_loss=0.03186, over 4159830.27 frames. ], batch size: 258, lr: 1.26e-02, grad_scale: 32.0 +2024-01-15 21:02:42,307 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.feed_forward1.hidden_balancer.prob, batch_count=91253.33333333333, ans=0.125 +2024-01-15 21:02:50,484 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_skip_rate, batch_count=91286.66666666667, ans=0.0 +2024-01-15 21:02:54,548 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.feed_forward2.hidden_balancer.prob, batch_count=91286.66666666667, ans=0.125 +2024-01-15 21:03:02,244 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.feed_forward1.hidden_balancer.prob, batch_count=91286.66666666667, ans=0.125 +2024-01-15 21:03:03,402 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.bypass.scale_min, batch_count=91320.0, ans=0.2 +2024-01-15 21:03:06,927 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_module2.balancer2.prob, batch_count=91320.0, ans=0.125 +2024-01-15 21:03:21,669 INFO [scaling.py:1022] (0/2) Whitening: name=encoder_embed.convnext.out_whiten, num_groups=1, num_channels=128, metric=4.33 vs. limit=5.0 +2024-01-15 21:03:24,505 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.0.layers.1.self_attn_weights.pos_emb_skip_rate, batch_count=91353.33333333333, ans=0.0 +2024-01-15 21:03:36,983 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=91386.66666666667, ans=0.1 +2024-01-15 21:03:39,547 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.5.encoder.layers.1.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=2.66 vs. limit=6.0 +2024-01-15 21:03:41,392 INFO [train.py:994] (0/2) Epoch 33, batch 450, loss[loss=0.131, simple_loss=0.2062, pruned_loss=0.02784, over 23399.00 frames. ], tot_loss[loss=0.1438, simple_loss=0.2239, pruned_loss=0.03182, over 4313053.94 frames. ], batch size: 119, lr: 1.26e-02, grad_scale: 32.0 +2024-01-15 21:03:49,235 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.balancer1.prob, batch_count=91420.0, ans=0.125 +2024-01-15 21:04:02,607 WARNING [optim.py:484] (0/2) Clipping_scale=2.0, grad-norm quartiles 1.480e+02 1.828e+02 2.096e+02 2.286e+02 3.771e+02, threshold=4.192e+02, percent-clipped=0.0 +2024-01-15 21:04:18,950 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.feed_forward1.hidden_balancer.prob, batch_count=91520.0, ans=0.125 +2024-01-15 21:04:25,475 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_skip_rate, batch_count=91520.0, ans=0.0 +2024-01-15 21:04:25,536 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.bypass.skip_rate, batch_count=91520.0, ans=0.07 +2024-01-15 21:04:30,158 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_module1.balancer1.prob, batch_count=91553.33333333333, ans=0.125 +2024-01-15 21:04:31,241 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.0.layers.1.bypass_mid.scale_min, batch_count=91553.33333333333, ans=0.2 +2024-01-15 21:04:43,196 INFO [train.py:994] (0/2) Epoch 33, batch 500, loss[loss=0.1537, simple_loss=0.2327, pruned_loss=0.03736, over 24537.00 frames. ], tot_loss[loss=0.1438, simple_loss=0.2238, pruned_loss=0.03188, over 4421036.29 frames. ], batch size: 236, lr: 1.26e-02, grad_scale: 32.0 +2024-01-15 21:04:49,396 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.2.encoder.layers.1.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=5.04 vs. limit=6.0 +2024-01-15 21:05:06,038 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.0.layers.1.bypass_mid.scale_min, batch_count=91620.0, ans=0.2 +2024-01-15 21:05:40,794 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.nonlin_attention.balancer.prob, batch_count=91720.0, ans=0.125 +2024-01-15 21:05:45,239 INFO [train.py:994] (0/2) Epoch 33, batch 550, loss[loss=0.1409, simple_loss=0.2241, pruned_loss=0.02882, over 24494.00 frames. ], tot_loss[loss=0.1435, simple_loss=0.2235, pruned_loss=0.03173, over 4503667.54 frames. ], batch size: 216, lr: 1.26e-02, grad_scale: 32.0 +2024-01-15 21:06:04,640 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.balancer2.prob, batch_count=91786.66666666667, ans=0.125 +2024-01-15 21:06:06,608 WARNING [optim.py:484] (0/2) Clipping_scale=2.0, grad-norm quartiles 1.471e+02 1.687e+02 1.895e+02 2.179e+02 3.230e+02, threshold=3.789e+02, percent-clipped=0.0 +2024-01-15 21:06:22,752 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.bypass.skip_rate, batch_count=91853.33333333333, ans=0.09899494936611666 +2024-01-15 21:06:47,476 INFO [train.py:994] (0/2) Epoch 33, batch 600, loss[loss=0.1507, simple_loss=0.2271, pruned_loss=0.03711, over 24458.00 frames. ], tot_loss[loss=0.1434, simple_loss=0.2237, pruned_loss=0.03156, over 4578597.37 frames. ], batch size: 250, lr: 1.26e-02, grad_scale: 32.0 +2024-01-15 21:07:11,337 INFO [scaling.py:1118] (0/2) WithLoss: name=encoder.encoders.2.encoder.layers.0.self_attn_weights, loss-sum=0.000e+00 +2024-01-15 21:07:48,517 INFO [scaling.py:1118] (0/2) WithLoss: name=encoder.encoders.2.encoder.layers.2.self_attn_weights, loss-sum=0.000e+00 +2024-01-15 21:07:49,420 INFO [train.py:994] (0/2) Epoch 33, batch 650, loss[loss=0.1475, simple_loss=0.2309, pruned_loss=0.03203, over 24527.00 frames. ], tot_loss[loss=0.1437, simple_loss=0.2241, pruned_loss=0.03163, over 4636997.96 frames. ], batch size: 210, lr: 1.26e-02, grad_scale: 32.0 +2024-01-15 21:08:01,109 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.attention_skip_rate, batch_count=92120.0, ans=0.0 +2024-01-15 21:08:04,466 INFO [scaling.py:1118] (0/2) WithLoss: name=encoder.encoders.0.layers.0.self_attn_weights, loss-sum=0.000e+00 +2024-01-15 21:08:10,670 WARNING [optim.py:484] (0/2) Clipping_scale=2.0, grad-norm quartiles 1.572e+02 1.688e+02 1.875e+02 2.082e+02 2.904e+02, threshold=3.750e+02, percent-clipped=0.0 +2024-01-15 21:08:13,277 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_module1.balancer1.prob, batch_count=92153.33333333333, ans=0.125 +2024-01-15 21:08:40,340 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=92220.0, ans=0.1 +2024-01-15 21:08:51,411 INFO [train.py:994] (0/2) Epoch 33, batch 700, loss[loss=0.139, simple_loss=0.2209, pruned_loss=0.02858, over 24474.00 frames. ], tot_loss[loss=0.1442, simple_loss=0.2246, pruned_loss=0.03195, over 4681406.80 frames. ], batch size: 250, lr: 1.26e-02, grad_scale: 32.0 +2024-01-15 21:08:52,759 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_module1.balancer1.prob, batch_count=92253.33333333333, ans=0.125 +2024-01-15 21:08:53,986 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.balancer2.prob, batch_count=92253.33333333333, ans=0.125 +2024-01-15 21:09:35,230 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.0.layers.0.balancer1.prob, batch_count=92353.33333333333, ans=0.125 +2024-01-15 21:09:53,365 INFO [train.py:994] (0/2) Epoch 33, batch 750, loss[loss=0.1488, simple_loss=0.2319, pruned_loss=0.03283, over 24365.00 frames. ], tot_loss[loss=0.1438, simple_loss=0.2241, pruned_loss=0.0318, over 4719771.94 frames. ], batch size: 298, lr: 1.26e-02, grad_scale: 32.0 +2024-01-15 21:09:58,795 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_module2.balancer1.prob, batch_count=92420.0, ans=0.125 +2024-01-15 21:10:08,202 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.bypass.scale_min, batch_count=92453.33333333333, ans=0.2 +2024-01-15 21:10:08,856 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.0.layers.0.self_attn1.whiten, num_groups=1, num_channels=192, metric=12.97 vs. limit=22.5 +2024-01-15 21:10:13,741 WARNING [optim.py:484] (0/2) Clipping_scale=2.0, grad-norm quartiles 1.344e+02 1.781e+02 1.903e+02 2.114e+02 3.493e+02, threshold=3.806e+02, percent-clipped=0.0 +2024-01-15 21:10:52,262 INFO [train.py:994] (0/2) Epoch 33, batch 800, loss[loss=0.146, simple_loss=0.2258, pruned_loss=0.03306, over 24465.00 frames. ], tot_loss[loss=0.1435, simple_loss=0.2236, pruned_loss=0.03169, over 4732479.77 frames. ], batch size: 170, lr: 1.25e-02, grad_scale: 32.0 +2024-01-15 21:10:54,945 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.5.encoder.layers.1.feed_forward1.out_whiten, num_groups=1, num_channels=256, metric=10.44 vs. limit=15.0 +2024-01-15 21:11:04,588 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.2.encoder.layers.2.feed_forward3.out_whiten, num_groups=1, num_channels=384, metric=13.48 vs. limit=15.0 +2024-01-15 21:11:26,151 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.bypass.scale_min, batch_count=92686.66666666667, ans=0.2 +2024-01-15 21:11:31,902 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.bypass.scale_min, batch_count=92686.66666666667, ans=0.2 +2024-01-15 21:11:41,739 INFO [checkpoint.py:75] (0/2) Saving checkpoint to zipformer_bbpe/exp-context-size-2-lr-epochs-10-spec-aug-20-disable-musan/epoch-33.pt +2024-01-15 21:12:03,076 INFO [train.py:994] (0/2) Epoch 34, batch 0, loss[loss=0.144, simple_loss=0.2217, pruned_loss=0.03317, over 24488.00 frames. ], tot_loss[loss=0.144, simple_loss=0.2217, pruned_loss=0.03317, over 24488.00 frames. ], batch size: 187, lr: 1.24e-02, grad_scale: 32.0 +2024-01-15 21:12:03,077 INFO [train.py:1017] (0/2) Computing validation loss +2024-01-15 21:12:23,919 INFO [train.py:1026] (0/2) Epoch 34, validation: loss=0.166, simple_loss=0.2489, pruned_loss=0.04151, over 1622729.00 frames. +2024-01-15 21:12:23,919 INFO [train.py:1027] (0/2) Maximum memory allocated so far is 15997MB +2024-01-15 21:12:37,141 INFO [scaling.py:1118] (0/2) WithLoss: name=encoder.encoders.0.layers.1.self_attn_weights, loss-sum=0.000e+00 +2024-01-15 21:12:45,900 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.3.encoder.layers.2.feed_forward2.out_whiten, num_groups=1, num_channels=512, metric=15.33 vs. limit=15.0 +2024-01-15 21:12:47,350 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder_embed.conv.5.prob, batch_count=92796.66666666667, ans=0.125 +2024-01-15 21:12:52,441 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.2.encoder.layers.2.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=4.77 vs. limit=6.0 +2024-01-15 21:12:52,823 WARNING [optim.py:484] (0/2) Clipping_scale=2.0, grad-norm quartiles 1.566e+02 1.787e+02 1.980e+02 2.213e+02 3.780e+02, threshold=3.960e+02, percent-clipped=0.0 +2024-01-15 21:12:53,805 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.0.layers.1.self_attn1.whiten, num_groups=1, num_channels=192, metric=12.32 vs. limit=22.5 +2024-01-15 21:13:08,210 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_module2.balancer1.prob, batch_count=92830.0, ans=0.125 +2024-01-15 21:13:09,987 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.1.encoder.layers.0.feed_forward2.out_whiten, num_groups=1, num_channels=256, metric=8.34 vs. limit=15.0 +2024-01-15 21:13:24,347 INFO [train.py:994] (0/2) Epoch 34, batch 50, loss[loss=0.1549, simple_loss=0.2334, pruned_loss=0.0382, over 24470.00 frames. ], tot_loss[loss=0.1436, simple_loss=0.2239, pruned_loss=0.03164, over 1092111.00 frames. ], batch size: 250, lr: 1.24e-02, grad_scale: 32.0 +2024-01-15 21:13:31,639 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.feed_forward2.hidden_balancer.prob, batch_count=92896.66666666667, ans=0.125 +2024-01-15 21:13:41,055 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.0.layers.1.feed_forward1.out_proj.dropout_p, batch_count=92930.0, ans=0.1 +2024-01-15 21:13:48,082 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.bypass_mid.scale_min, batch_count=92963.33333333333, ans=0.2 +2024-01-15 21:13:56,273 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_module2.balancer2.prob, batch_count=92963.33333333333, ans=0.125 +2024-01-15 21:14:04,550 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.0.layers.1.balancer2.prob, batch_count=92996.66666666667, ans=0.125 +2024-01-15 21:14:20,453 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.2.encoder.layers.2.feed_forward1.out_whiten, num_groups=1, num_channels=384, metric=13.08 vs. limit=15.0 +2024-01-15 21:14:25,649 INFO [train.py:994] (0/2) Epoch 34, batch 100, loss[loss=0.1532, simple_loss=0.2324, pruned_loss=0.03699, over 24457.00 frames. ], tot_loss[loss=0.1418, simple_loss=0.2227, pruned_loss=0.03047, over 1929350.88 frames. ], batch size: 170, lr: 1.23e-02, grad_scale: 32.0 +2024-01-15 21:14:25,980 INFO [scaling.py:1118] (0/2) WithLoss: name=encoder.encoders.2.encoder.layers.2.self_attn_weights, loss-sum=0.000e+00 +2024-01-15 21:14:26,519 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.1.encoder.layers.1.conv_module2.whiten, num_groups=1, num_channels=256, metric=8.78 vs. limit=15.0 +2024-01-15 21:14:33,100 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_skip_rate, batch_count=93063.33333333333, ans=0.0 +2024-01-15 21:14:40,360 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=93096.66666666667, ans=0.1 +2024-01-15 21:14:56,458 WARNING [optim.py:484] (0/2) Clipping_scale=2.0, grad-norm quartiles 1.468e+02 1.672e+02 1.813e+02 2.051e+02 3.232e+02, threshold=3.625e+02, percent-clipped=0.0 +2024-01-15 21:15:04,066 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.bypass_mid.scale_min, batch_count=93163.33333333333, ans=0.2 +2024-01-15 21:15:17,588 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_skip_rate, batch_count=93196.66666666667, ans=0.0 +2024-01-15 21:15:18,680 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_module1.balancer1.prob, batch_count=93196.66666666667, ans=0.125 +2024-01-15 21:15:28,398 INFO [train.py:994] (0/2) Epoch 34, batch 150, loss[loss=0.156, simple_loss=0.2368, pruned_loss=0.03766, over 24522.00 frames. ], tot_loss[loss=0.1425, simple_loss=0.2232, pruned_loss=0.03087, over 2575626.20 frames. ], batch size: 204, lr: 1.23e-02, grad_scale: 16.0 +2024-01-15 21:15:40,673 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.3.encoder.layers.0.feed_forward2.out_whiten, num_groups=1, num_channels=512, metric=8.10 vs. limit=15.0 +2024-01-15 21:16:05,194 INFO [checkpoint.py:75] (0/2) Saving checkpoint to zipformer_bbpe/exp-context-size-2-lr-epochs-10-spec-aug-20-disable-musan/checkpoint-28000.pt +2024-01-15 21:16:32,298 INFO [train.py:994] (0/2) Epoch 34, batch 200, loss[loss=0.1468, simple_loss=0.2227, pruned_loss=0.03545, over 24391.00 frames. ], tot_loss[loss=0.1424, simple_loss=0.2229, pruned_loss=0.03089, over 3055087.52 frames. ], batch size: 159, lr: 1.23e-02, grad_scale: 16.0 +2024-01-15 21:16:43,269 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.bypass_mid.scale_min, batch_count=93396.66666666667, ans=0.2 +2024-01-15 21:16:49,164 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.0.layers.1.feed_forward1.out_proj.dropout_p, batch_count=93430.0, ans=0.1 +2024-01-15 21:16:58,437 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.bypass.scale_min, batch_count=93463.33333333333, ans=0.2 +2024-01-15 21:17:03,584 WARNING [optim.py:484] (0/2) Clipping_scale=2.0, grad-norm quartiles 1.424e+02 1.716e+02 2.028e+02 2.257e+02 3.544e+02, threshold=4.056e+02, percent-clipped=0.0 +2024-01-15 21:17:10,490 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.ff2_skip_rate, batch_count=93496.66666666667, ans=0.0 +2024-01-15 21:17:26,078 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=93530.0, ans=0.1 +2024-01-15 21:17:26,167 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.5.out_combiner.scale_min, batch_count=93530.0, ans=0.2 +2024-01-15 21:17:35,378 INFO [train.py:994] (0/2) Epoch 34, batch 250, loss[loss=0.1491, simple_loss=0.2324, pruned_loss=0.03286, over 24508.00 frames. ], tot_loss[loss=0.1419, simple_loss=0.2223, pruned_loss=0.03071, over 3431435.46 frames. ], batch size: 204, lr: 1.23e-02, grad_scale: 16.0 +2024-01-15 21:17:35,692 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.feed_forward3.hidden_balancer.prob, batch_count=93563.33333333333, ans=0.125 +2024-01-15 21:17:35,733 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.ff2_skip_rate, batch_count=93563.33333333333, ans=0.0 +2024-01-15 21:17:46,285 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_module1.balancer1.prob, batch_count=93596.66666666667, ans=0.125 +2024-01-15 21:17:54,158 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.5.encoder.layers.0.nonlin_attention.whiten2, num_groups=1, num_channels=256, metric=3.71 vs. limit=15.0 +2024-01-15 21:18:13,158 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_module2.balancer2.prob, batch_count=93663.33333333333, ans=0.125 +2024-01-15 21:18:23,137 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.feed_forward3.hidden_balancer.prob, batch_count=93696.66666666667, ans=0.125 +2024-01-15 21:18:31,995 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.nonlin_attention.balancer.prob, batch_count=93696.66666666667, ans=0.125 +2024-01-15 21:18:36,377 INFO [train.py:994] (0/2) Epoch 34, batch 300, loss[loss=0.1438, simple_loss=0.2211, pruned_loss=0.03322, over 24433.00 frames. ], tot_loss[loss=0.1418, simple_loss=0.2222, pruned_loss=0.03072, over 3729269.14 frames. ], batch size: 159, lr: 1.23e-02, grad_scale: 16.0 +2024-01-15 21:19:05,947 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_module1.balancer2.prob, batch_count=93796.66666666667, ans=0.125 +2024-01-15 21:19:07,348 WARNING [optim.py:484] (0/2) Clipping_scale=2.0, grad-norm quartiles 1.570e+02 1.906e+02 2.356e+02 3.034e+02 5.293e+02, threshold=4.711e+02, percent-clipped=5.0 +2024-01-15 21:19:37,988 INFO [train.py:994] (0/2) Epoch 34, batch 350, loss[loss=0.16, simple_loss=0.2382, pruned_loss=0.0409, over 24487.00 frames. ], tot_loss[loss=0.1416, simple_loss=0.2219, pruned_loss=0.03069, over 3973943.97 frames. ], batch size: 181, lr: 1.23e-02, grad_scale: 16.0 +2024-01-15 21:19:41,835 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=93896.66666666667, ans=0.1 +2024-01-15 21:19:59,486 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_module2.balancer1.prob, batch_count=93930.0, ans=0.125 +2024-01-15 21:20:00,985 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.4.encoder.layers.1.nonlin_attention.whiten1, num_groups=1, num_channels=288, metric=3.54 vs. limit=10.0 +2024-01-15 21:20:09,019 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=93963.33333333333, ans=0.1 +2024-01-15 21:20:40,063 INFO [train.py:994] (0/2) Epoch 34, batch 400, loss[loss=0.1266, simple_loss=0.2123, pruned_loss=0.02046, over 24277.00 frames. ], tot_loss[loss=0.1411, simple_loss=0.2212, pruned_loss=0.03046, over 4157765.21 frames. ], batch size: 147, lr: 1.23e-02, grad_scale: 32.0 +2024-01-15 21:20:49,260 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=94063.33333333333, ans=0.1 +2024-01-15 21:20:50,491 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=94063.33333333333, ans=0.1 +2024-01-15 21:20:57,733 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.bypass_mid.scale_min, batch_count=94096.66666666667, ans=0.2 +2024-01-15 21:21:07,903 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_module2.balancer2.prob, batch_count=94130.0, ans=0.125 +2024-01-15 21:21:11,710 WARNING [optim.py:484] (0/2) Clipping_scale=2.0, grad-norm quartiles 1.483e+02 1.754e+02 1.948e+02 2.280e+02 3.320e+02, threshold=3.895e+02, percent-clipped=0.0 +2024-01-15 21:21:12,006 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.1.out_combiner.scale_min, batch_count=94130.0, ans=0.2 +2024-01-15 21:21:13,306 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.bypass.scale_min, batch_count=94130.0, ans=0.2 +2024-01-15 21:21:18,393 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.1.encoder.layers.1.nonlin_attention.whiten1, num_groups=1, num_channels=192, metric=5.06 vs. limit=10.0 +2024-01-15 21:21:21,826 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.2.encoder.layers.2.conv_module1.whiten, num_groups=1, num_channels=384, metric=3.69 vs. limit=15.0 +2024-01-15 21:21:26,097 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=94163.33333333333, ans=0.1 +2024-01-15 21:21:29,068 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.feed_forward2.hidden_balancer.prob, batch_count=94196.66666666667, ans=0.125 +2024-01-15 21:21:40,459 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.self_attn_weights.pos_emb_skip_rate, batch_count=94196.66666666667, ans=0.0 +2024-01-15 21:21:41,613 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.bypass.scale_min, batch_count=94230.0, ans=0.2 +2024-01-15 21:21:42,438 INFO [train.py:994] (0/2) Epoch 34, batch 450, loss[loss=0.1385, simple_loss=0.2245, pruned_loss=0.02623, over 24482.00 frames. ], tot_loss[loss=0.141, simple_loss=0.2211, pruned_loss=0.03041, over 4302308.58 frames. ], batch size: 216, lr: 1.23e-02, grad_scale: 32.0 +2024-01-15 21:21:51,167 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.5.encoder.layers.0.nonlin_attention.whiten2, num_groups=1, num_channels=256, metric=3.69 vs. limit=15.0 +2024-01-15 21:22:18,213 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.bypass_mid.scale_min, batch_count=94330.0, ans=0.2 +2024-01-15 21:22:23,723 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.4.encoder.layers.2.feed_forward1.out_whiten, num_groups=1, num_channels=384, metric=7.49 vs. limit=15.0 +2024-01-15 21:22:38,153 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.0.layers.0.ff2_skip_rate, batch_count=94363.33333333333, ans=0.0 +2024-01-15 21:22:41,803 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.feed_forward3.hidden_balancer.prob, batch_count=94363.33333333333, ans=0.125 +2024-01-15 21:22:43,987 INFO [train.py:994] (0/2) Epoch 34, batch 500, loss[loss=0.139, simple_loss=0.2245, pruned_loss=0.02677, over 24522.00 frames. ], tot_loss[loss=0.1412, simple_loss=0.2212, pruned_loss=0.03057, over 4408794.63 frames. ], batch size: 229, lr: 1.23e-02, grad_scale: 32.0 +2024-01-15 21:22:55,295 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder_embed.convnext.hidden_balancer.prob, batch_count=94430.0, ans=0.125 +2024-01-15 21:22:57,289 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_module2.balancer1.prob, batch_count=94430.0, ans=0.125 +2024-01-15 21:23:08,421 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.1.encoder.layers.1.self_attn2.whiten, num_groups=1, num_channels=256, metric=16.17 vs. limit=22.5 +2024-01-15 21:23:09,015 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.feed_forward3.hidden_balancer.prob, batch_count=94463.33333333333, ans=0.125 +2024-01-15 21:23:11,360 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_module1.balancer2.min_positive, batch_count=94463.33333333333, ans=0.05 +2024-01-15 21:23:15,325 WARNING [optim.py:484] (0/2) Clipping_scale=2.0, grad-norm quartiles 1.444e+02 1.724e+02 1.952e+02 2.350e+02 3.869e+02, threshold=3.904e+02, percent-clipped=0.0 +2024-01-15 21:23:23,415 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.nonlin_attention.balancer.prob, batch_count=94496.66666666667, ans=0.125 +2024-01-15 21:23:41,510 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_module2.balancer2.prob, batch_count=94530.0, ans=0.125 +2024-01-15 21:23:45,960 INFO [train.py:994] (0/2) Epoch 34, batch 550, loss[loss=0.1401, simple_loss=0.2214, pruned_loss=0.02937, over 24357.00 frames. ], tot_loss[loss=0.141, simple_loss=0.2213, pruned_loss=0.03037, over 4502619.00 frames. ], batch size: 275, lr: 1.23e-02, grad_scale: 32.0 +2024-01-15 21:24:15,067 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_module1.balancer1.prob, batch_count=94630.0, ans=0.125 +2024-01-15 21:24:16,565 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.4.encoder.layers.0.feed_forward1.out_whiten, num_groups=1, num_channels=384, metric=9.37 vs. limit=15.0 +2024-01-15 21:24:31,090 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.ff2_skip_rate, batch_count=94663.33333333333, ans=0.0 +2024-01-15 21:24:47,658 INFO [train.py:994] (0/2) Epoch 34, batch 600, loss[loss=0.1417, simple_loss=0.2243, pruned_loss=0.02952, over 24461.00 frames. ], tot_loss[loss=0.1416, simple_loss=0.2221, pruned_loss=0.03053, over 4571758.68 frames. ], batch size: 222, lr: 1.22e-02, grad_scale: 16.0 +2024-01-15 21:25:01,602 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_module2.balancer1.prob, batch_count=94763.33333333333, ans=0.125 +2024-01-15 21:25:07,012 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.4.out_combiner.scale_min, batch_count=94763.33333333333, ans=0.2 +2024-01-15 21:25:19,623 WARNING [optim.py:484] (0/2) Clipping_scale=2.0, grad-norm quartiles 1.368e+02 1.774e+02 2.106e+02 2.731e+02 3.699e+02, threshold=4.212e+02, percent-clipped=0.0 +2024-01-15 21:25:29,361 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.bypass.skip_rate, batch_count=94830.0, ans=0.07 +2024-01-15 21:25:32,497 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.0.layers.0.nonlin_attention.whiten1, num_groups=1, num_channels=144, metric=9.36 vs. limit=10.0 +2024-01-15 21:25:35,275 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.feed_forward3.hidden_balancer.prob, batch_count=94830.0, ans=0.125 +2024-01-15 21:25:49,121 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.2.encoder.layers.0.whiten, num_groups=1, num_channels=384, metric=6.21 vs. limit=12.0 +2024-01-15 21:25:49,521 INFO [train.py:994] (0/2) Epoch 34, batch 650, loss[loss=0.1422, simple_loss=0.2284, pruned_loss=0.02807, over 24502.00 frames. ], tot_loss[loss=0.1413, simple_loss=0.2219, pruned_loss=0.03039, over 4620836.89 frames. ], batch size: 181, lr: 1.22e-02, grad_scale: 16.0 +2024-01-15 21:26:00,013 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=94896.66666666667, ans=0.1 +2024-01-15 21:26:09,533 INFO [scaling.py:1118] (0/2) WithLoss: name=encoder.encoders.0.layers.1.self_attn_weights, loss-sum=0.000e+00 +2024-01-15 21:26:24,742 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_module2.balancer2.prob, batch_count=94963.33333333333, ans=0.125 +2024-01-15 21:26:40,596 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.nonlin_attention.balancer.max_positive, batch_count=95030.0, ans=0.95 +2024-01-15 21:26:40,679 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.feed_forward1.hidden_balancer.prob, batch_count=95030.0, ans=0.125 +2024-01-15 21:26:51,501 INFO [train.py:994] (0/2) Epoch 34, batch 700, loss[loss=0.1489, simple_loss=0.2289, pruned_loss=0.03438, over 24572.00 frames. ], tot_loss[loss=0.1415, simple_loss=0.2219, pruned_loss=0.03053, over 4659817.39 frames. ], batch size: 176, lr: 1.22e-02, grad_scale: 16.0 +2024-01-15 21:27:14,292 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_module2.balancer1.prob, batch_count=95096.66666666667, ans=0.125 +2024-01-15 21:27:14,902 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.0.layers.1.feed_forward3.out_whiten, num_groups=1, num_channels=192, metric=9.92 vs. limit=15.0 +2024-01-15 21:27:23,407 WARNING [optim.py:484] (0/2) Clipping_scale=2.0, grad-norm quartiles 1.377e+02 1.691e+02 1.887e+02 2.144e+02 3.212e+02, threshold=3.773e+02, percent-clipped=0.0 +2024-01-15 21:27:24,904 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_skip_rate, batch_count=95130.0, ans=0.0 +2024-01-15 21:27:33,770 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.balancer2.prob, batch_count=95163.33333333333, ans=0.125 +2024-01-15 21:27:33,986 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.5.encoder.layers.1.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=2.68 vs. limit=6.0 +2024-01-15 21:27:35,185 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.4.encoder.layers.0.conv_module1.whiten, num_groups=1, num_channels=384, metric=3.25 vs. limit=15.0 +2024-01-15 21:27:40,213 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.bypass.skip_rate, batch_count=95196.66666666667, ans=0.04949747468305833 +2024-01-15 21:27:48,374 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=95196.66666666667, ans=0.1 +2024-01-15 21:27:52,602 INFO [train.py:994] (0/2) Epoch 34, batch 750, loss[loss=0.149, simple_loss=0.2258, pruned_loss=0.03611, over 24568.00 frames. ], tot_loss[loss=0.1417, simple_loss=0.222, pruned_loss=0.03065, over 4692653.65 frames. ], batch size: 176, lr: 1.22e-02, grad_scale: 16.0 +2024-01-15 21:27:56,970 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.1.encoder.layers.0.self_attn2.whiten, num_groups=1, num_channels=256, metric=15.02 vs. limit=22.5 +2024-01-15 21:28:12,280 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.3.encoder.layers.0.self_attn_weights.whiten_keys, num_groups=8, num_channels=256, metric=5.27 vs. limit=6.0 +2024-01-15 21:28:14,297 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_skip_rate, batch_count=95263.33333333333, ans=0.0 +2024-01-15 21:28:16,622 INFO [scaling.py:1118] (0/2) WithLoss: name=encoder.encoders.0.layers.1.self_attn_weights, loss-sum=0.000e+00 +2024-01-15 21:28:43,367 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_module2.balancer2.prob, batch_count=95363.33333333333, ans=0.125 +2024-01-15 21:28:47,779 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.feed_forward3.hidden_balancer.prob, batch_count=95363.33333333333, ans=0.125 +2024-01-15 21:28:50,440 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.3.encoder.layers.2.conv_module1.whiten, num_groups=1, num_channels=512, metric=4.20 vs. limit=15.0 +2024-01-15 21:28:53,181 INFO [train.py:994] (0/2) Epoch 34, batch 800, loss[loss=0.1399, simple_loss=0.2244, pruned_loss=0.0277, over 24443.00 frames. ], tot_loss[loss=0.1414, simple_loss=0.2218, pruned_loss=0.03051, over 4715500.89 frames. ], batch size: 250, lr: 1.22e-02, grad_scale: 32.0 +2024-01-15 21:29:00,067 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.feed_forward3.hidden_balancer.prob, batch_count=95396.66666666667, ans=0.125 +2024-01-15 21:29:15,565 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=95463.33333333333, ans=0.1 +2024-01-15 21:29:23,107 WARNING [optim.py:484] (0/2) Clipping_scale=2.0, grad-norm quartiles 1.457e+02 1.735e+02 1.860e+02 2.155e+02 3.923e+02, threshold=3.720e+02, percent-clipped=1.0 +2024-01-15 21:29:25,591 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.attention_skip_rate, batch_count=95463.33333333333, ans=0.0 +2024-01-15 21:29:32,158 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.attention_skip_rate, batch_count=95496.66666666667, ans=0.0 +2024-01-15 21:29:37,146 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.0.layers.0.whiten, num_groups=1, num_channels=192, metric=4.46 vs. limit=12.0 +2024-01-15 21:29:41,656 INFO [checkpoint.py:75] (0/2) Saving checkpoint to zipformer_bbpe/exp-context-size-2-lr-epochs-10-spec-aug-20-disable-musan/epoch-34.pt +2024-01-15 21:30:03,686 INFO [train.py:994] (0/2) Epoch 35, batch 0, loss[loss=0.1537, simple_loss=0.2356, pruned_loss=0.03588, over 24611.00 frames. ], tot_loss[loss=0.1537, simple_loss=0.2356, pruned_loss=0.03588, over 24611.00 frames. ], batch size: 199, lr: 1.20e-02, grad_scale: 32.0 +2024-01-15 21:30:03,687 INFO [train.py:1017] (0/2) Computing validation loss +2024-01-15 21:30:24,125 INFO [zipformer.py:1858] (0/2) name=encoder.encoders.4.encoder.layers.2.self_attn_weights, attn_weights_entropy = tensor([2.1069, 2.7723, 2.9187, 2.8873], device='cuda:0') +2024-01-15 21:30:24,582 INFO [train.py:1026] (0/2) Epoch 35, validation: loss=0.1658, simple_loss=0.2479, pruned_loss=0.04189, over 1622729.00 frames. +2024-01-15 21:30:24,582 INFO [train.py:1027] (0/2) Maximum memory allocated so far is 15997MB +2024-01-15 21:30:38,093 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.bypass.scale_min, batch_count=95573.33333333333, ans=0.2 +2024-01-15 21:30:54,901 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.ff3_skip_rate, batch_count=95606.66666666667, ans=0.0 +2024-01-15 21:31:26,715 INFO [train.py:994] (0/2) Epoch 35, batch 50, loss[loss=0.1481, simple_loss=0.231, pruned_loss=0.03256, over 24477.00 frames. ], tot_loss[loss=0.1411, simple_loss=0.2211, pruned_loss=0.03056, over 1094562.42 frames. ], batch size: 222, lr: 1.20e-02, grad_scale: 32.0 +2024-01-15 21:31:48,236 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_skip_rate, batch_count=95740.0, ans=0.0 +2024-01-15 21:31:49,470 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=95740.0, ans=0.1 +2024-01-15 21:32:04,349 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_skip_rate, batch_count=95806.66666666667, ans=0.0 +2024-01-15 21:32:07,580 WARNING [optim.py:484] (0/2) Clipping_scale=2.0, grad-norm quartiles 1.453e+02 1.768e+02 1.965e+02 2.373e+02 3.841e+02, threshold=3.929e+02, percent-clipped=1.0 +2024-01-15 21:32:15,651 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.ff3_skip_rate, batch_count=95840.0, ans=0.0 +2024-01-15 21:32:23,965 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.attention_skip_rate, batch_count=95840.0, ans=0.0 +2024-01-15 21:32:29,081 INFO [train.py:994] (0/2) Epoch 35, batch 100, loss[loss=0.1378, simple_loss=0.228, pruned_loss=0.02381, over 23974.00 frames. ], tot_loss[loss=0.1409, simple_loss=0.2209, pruned_loss=0.03044, over 1920694.24 frames. ], batch size: 328, lr: 1.20e-02, grad_scale: 32.0 +2024-01-15 21:32:29,933 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.0.layers.1.nonlin_attention.whiten2, num_groups=1, num_channels=192, metric=7.81 vs. limit=15.0 +2024-01-15 21:32:30,560 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_skip_rate, batch_count=95873.33333333333, ans=0.0 +2024-01-15 21:32:40,730 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=95906.66666666667, ans=0.1 +2024-01-15 21:33:04,583 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.balancer2.prob, batch_count=95940.0, ans=0.125 +2024-01-15 21:33:09,226 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=95973.33333333333, ans=0.1 +2024-01-15 21:33:16,664 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.5.encoder.layers.1.feed_forward2.out_whiten, num_groups=1, num_channels=256, metric=8.69 vs. limit=15.0 +2024-01-15 21:33:31,787 INFO [train.py:994] (0/2) Epoch 35, batch 150, loss[loss=0.1466, simple_loss=0.2364, pruned_loss=0.02841, over 22467.00 frames. ], tot_loss[loss=0.1417, simple_loss=0.2222, pruned_loss=0.03054, over 2569188.93 frames. ], batch size: 358, lr: 1.20e-02, grad_scale: 32.0 +2024-01-15 21:33:39,718 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.0.layers.0.whiten, num_groups=1, num_channels=192, metric=4.57 vs. limit=12.0 +2024-01-15 21:33:51,963 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=96073.33333333333, ans=0.1 +2024-01-15 21:34:03,100 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.self_attn_weights.pos_emb_skip_rate, batch_count=96106.66666666667, ans=0.0 +2024-01-15 21:34:03,143 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.feed_forward1.hidden_balancer.prob, batch_count=96106.66666666667, ans=0.125 +2024-01-15 21:34:05,564 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.feed_forward2.hidden_balancer.prob, batch_count=96106.66666666667, ans=0.125 +2024-01-15 21:34:12,746 WARNING [optim.py:484] (0/2) Clipping_scale=2.0, grad-norm quartiles 1.495e+02 1.731e+02 1.860e+02 2.113e+02 3.783e+02, threshold=3.719e+02, percent-clipped=0.0 +2024-01-15 21:34:14,304 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=96140.0, ans=0.1 +2024-01-15 21:34:14,342 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=96140.0, ans=0.1 +2024-01-15 21:34:33,578 INFO [train.py:994] (0/2) Epoch 35, batch 200, loss[loss=0.1401, simple_loss=0.2207, pruned_loss=0.02971, over 24526.00 frames. ], tot_loss[loss=0.1412, simple_loss=0.2218, pruned_loss=0.03029, over 3049916.61 frames. ], batch size: 236, lr: 1.20e-02, grad_scale: 32.0 +2024-01-15 21:34:34,276 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.2.encoder.layers.2.feed_forward1.out_whiten, num_groups=1, num_channels=384, metric=13.93 vs. limit=15.0 +2024-01-15 21:34:39,311 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_module2.balancer2.prob, batch_count=96206.66666666667, ans=0.125 +2024-01-15 21:34:42,825 INFO [scaling.py:1118] (0/2) WithLoss: name=encoder.encoders.3.encoder.layers.1.self_attn_weights, loss-sum=0.000e+00 +2024-01-15 21:34:52,763 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.ff2_skip_rate, batch_count=96240.0, ans=0.0 +2024-01-15 21:34:53,887 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.feed_forward3.hidden_balancer.prob, batch_count=96240.0, ans=0.125 +2024-01-15 21:34:57,567 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.ff2_skip_rate, batch_count=96273.33333333333, ans=0.0 +2024-01-15 21:35:04,269 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.bypass_mid.scale_min, batch_count=96273.33333333333, ans=0.2 +2024-01-15 21:35:34,933 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.feed_forward3.hidden_balancer.prob, batch_count=96340.0, ans=0.125 +2024-01-15 21:35:36,916 INFO [train.py:994] (0/2) Epoch 35, batch 250, loss[loss=0.1371, simple_loss=0.2202, pruned_loss=0.02702, over 24481.00 frames. ], tot_loss[loss=0.1406, simple_loss=0.221, pruned_loss=0.03008, over 3427116.71 frames. ], batch size: 222, lr: 1.20e-02, grad_scale: 32.0 +2024-01-15 21:35:37,161 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.0.layers.1.feed_forward1.hidden_balancer.prob, batch_count=96373.33333333333, ans=0.125 +2024-01-15 21:35:51,975 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=96406.66666666667, ans=0.1 +2024-01-15 21:36:11,106 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=96440.0, ans=0.1 +2024-01-15 21:36:16,348 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.nonlin_attention.whiten1.whitening_limit, batch_count=96473.33333333333, ans=10.0 +2024-01-15 21:36:17,964 WARNING [optim.py:484] (0/2) Clipping_scale=2.0, grad-norm quartiles 1.455e+02 1.809e+02 1.950e+02 2.177e+02 3.641e+02, threshold=3.900e+02, percent-clipped=0.0 +2024-01-15 21:36:18,237 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.nonlin_attention.balancer.prob, batch_count=96473.33333333333, ans=0.125 +2024-01-15 21:36:39,511 INFO [train.py:994] (0/2) Epoch 35, batch 300, loss[loss=0.1417, simple_loss=0.2215, pruned_loss=0.03101, over 24529.00 frames. ], tot_loss[loss=0.1414, simple_loss=0.2221, pruned_loss=0.03039, over 3745173.94 frames. ], batch size: 165, lr: 1.20e-02, grad_scale: 32.0 +2024-01-15 21:36:54,085 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.0.layers.1.feed_forward3.out_whiten, num_groups=1, num_channels=192, metric=11.68 vs. limit=15.0 +2024-01-15 21:37:17,832 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.3.encoder.layers.3.feed_forward1.out_whiten, num_groups=1, num_channels=512, metric=8.23 vs. limit=15.0 +2024-01-15 21:37:34,931 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=96673.33333333333, ans=0.1 +2024-01-15 21:37:37,202 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.0.layers.0.feed_forward1.out_proj.dropout_p, batch_count=96673.33333333333, ans=0.1 +2024-01-15 21:37:42,390 INFO [train.py:994] (0/2) Epoch 35, batch 350, loss[loss=0.1315, simple_loss=0.2151, pruned_loss=0.02391, over 24369.00 frames. ], tot_loss[loss=0.141, simple_loss=0.2215, pruned_loss=0.0302, over 3970160.61 frames. ], batch size: 275, lr: 1.20e-02, grad_scale: 32.0 +2024-01-15 21:37:51,701 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.0.layers.1.feed_forward1.out_proj.dropout_p, batch_count=96706.66666666667, ans=0.1 +2024-01-15 21:37:55,261 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.bypass_mid.scale_min, batch_count=96740.0, ans=0.2 +2024-01-15 21:38:16,590 INFO [scaling.py:1118] (0/2) WithLoss: name=encoder.encoders.3.encoder.layers.0.self_attn_weights, loss-sum=0.000e+00 +2024-01-15 21:38:23,217 WARNING [optim.py:484] (0/2) Clipping_scale=2.0, grad-norm quartiles 1.422e+02 1.755e+02 1.951e+02 2.334e+02 4.164e+02, threshold=3.901e+02, percent-clipped=2.0 +2024-01-15 21:38:31,251 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_module2.balancer1.prob, batch_count=96840.0, ans=0.125 +2024-01-15 21:38:44,480 INFO [train.py:994] (0/2) Epoch 35, batch 400, loss[loss=0.1309, simple_loss=0.2197, pruned_loss=0.02104, over 24484.00 frames. ], tot_loss[loss=0.1411, simple_loss=0.2219, pruned_loss=0.03014, over 4169882.28 frames. ], batch size: 267, lr: 1.19e-02, grad_scale: 32.0 +2024-01-15 21:39:00,635 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=96906.66666666667, ans=0.1 +2024-01-15 21:39:11,022 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.4.encoder.layers.0.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=2.91 vs. limit=6.0 +2024-01-15 21:39:11,939 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.bypass.skip_rate, batch_count=96940.0, ans=0.09899494936611666 +2024-01-15 21:39:46,492 INFO [train.py:994] (0/2) Epoch 35, batch 450, loss[loss=0.1423, simple_loss=0.223, pruned_loss=0.03084, over 24410.00 frames. ], tot_loss[loss=0.141, simple_loss=0.2216, pruned_loss=0.03018, over 4301833.60 frames. ], batch size: 258, lr: 1.19e-02, grad_scale: 32.0 +2024-01-15 21:40:07,466 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_module2.balancer2.prob, batch_count=97073.33333333333, ans=0.125 +2024-01-15 21:40:27,088 WARNING [optim.py:484] (0/2) Clipping_scale=2.0, grad-norm quartiles 1.433e+02 1.789e+02 2.051e+02 2.389e+02 3.427e+02, threshold=4.102e+02, percent-clipped=0.0 +2024-01-15 21:40:29,160 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.1.encoder.layers.0.conv_module2.whiten, num_groups=1, num_channels=256, metric=8.18 vs. limit=15.0 +2024-01-15 21:40:48,592 INFO [train.py:994] (0/2) Epoch 35, batch 500, loss[loss=0.1482, simple_loss=0.2316, pruned_loss=0.03238, over 24485.00 frames. ], tot_loss[loss=0.1413, simple_loss=0.222, pruned_loss=0.03029, over 4415167.48 frames. ], batch size: 210, lr: 1.19e-02, grad_scale: 32.0 +2024-01-15 21:40:49,100 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.5.encoder.layers.0.whiten, num_groups=1, num_channels=256, metric=4.17 vs. limit=12.0 +2024-01-15 21:40:54,897 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_module1.balancer1.max_abs, batch_count=97206.66666666667, ans=10.0 +2024-01-15 21:41:15,994 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=97273.33333333333, ans=0.1 +2024-01-15 21:41:22,914 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.attention_skip_rate, batch_count=97273.33333333333, ans=0.0 +2024-01-15 21:41:37,171 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.3.encoder.layers.1.feed_forward2.out_whiten, num_groups=1, num_channels=512, metric=16.01 vs. limit=15.0 +2024-01-15 21:41:49,351 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_module1.balancer1.prob, batch_count=97373.33333333333, ans=0.125 +2024-01-15 21:41:50,084 INFO [train.py:994] (0/2) Epoch 35, batch 550, loss[loss=0.1552, simple_loss=0.2357, pruned_loss=0.03738, over 24547.00 frames. ], tot_loss[loss=0.1404, simple_loss=0.2208, pruned_loss=0.03002, over 4483726.40 frames. ], batch size: 193, lr: 1.19e-02, grad_scale: 32.0 +2024-01-15 21:41:50,316 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.0.layers.0.self_attn_weights.pos_emb_skip_rate, batch_count=97373.33333333333, ans=0.0 +2024-01-15 21:42:08,323 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.bypass.skip_rate, batch_count=97406.66666666667, ans=0.07 +2024-01-15 21:42:30,845 WARNING [optim.py:484] (0/2) Clipping_scale=2.0, grad-norm quartiles 1.463e+02 1.707e+02 1.808e+02 2.124e+02 2.901e+02, threshold=3.617e+02, percent-clipped=0.0 +2024-01-15 21:42:37,794 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.bypass.skip_rate, batch_count=97473.33333333333, ans=0.09899494936611666 +2024-01-15 21:42:52,613 INFO [train.py:994] (0/2) Epoch 35, batch 600, loss[loss=0.1464, simple_loss=0.2276, pruned_loss=0.0326, over 24521.00 frames. ], tot_loss[loss=0.1406, simple_loss=0.2212, pruned_loss=0.03003, over 4567171.08 frames. ], batch size: 204, lr: 1.19e-02, grad_scale: 32.0 +2024-01-15 21:42:54,042 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=97540.0, ans=0.1 +2024-01-15 21:42:55,129 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_module1.balancer1.prob, batch_count=97540.0, ans=0.125 +2024-01-15 21:42:59,349 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.ff2_skip_rate, batch_count=97540.0, ans=0.0 +2024-01-15 21:43:00,949 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.2.encoder.layers.2.conv_module2.whiten, num_groups=1, num_channels=384, metric=2.87 vs. limit=15.0 +2024-01-15 21:43:05,208 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_module2.balancer2.prob, batch_count=97573.33333333333, ans=0.125 +2024-01-15 21:43:07,509 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.ff3_skip_rate, batch_count=97573.33333333333, ans=0.0 +2024-01-15 21:43:28,717 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_module2.balancer1.prob, batch_count=97640.0, ans=0.125 +2024-01-15 21:43:29,149 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.3.encoder.layers.0.feed_forward3.out_whiten, num_groups=1, num_channels=512, metric=12.24 vs. limit=15.0 +2024-01-15 21:43:54,524 INFO [train.py:994] (0/2) Epoch 35, batch 650, loss[loss=0.1389, simple_loss=0.2208, pruned_loss=0.02848, over 24315.00 frames. ], tot_loss[loss=0.1404, simple_loss=0.221, pruned_loss=0.02984, over 4616837.82 frames. ], batch size: 285, lr: 1.19e-02, grad_scale: 32.0 +2024-01-15 21:44:00,153 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_module2.balancer2.prob, batch_count=97706.66666666667, ans=0.125 +2024-01-15 21:44:07,215 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_module1.balancer2.prob, batch_count=97740.0, ans=0.125 +2024-01-15 21:44:16,239 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.balancer2.prob, batch_count=97740.0, ans=0.125 +2024-01-15 21:44:35,331 WARNING [optim.py:484] (0/2) Clipping_scale=2.0, grad-norm quartiles 1.467e+02 1.709e+02 1.864e+02 2.143e+02 3.994e+02, threshold=3.728e+02, percent-clipped=1.0 +2024-01-15 21:44:37,979 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.ff2_skip_rate, batch_count=97806.66666666667, ans=0.0 +2024-01-15 21:44:40,975 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.bypass.skip_rate, batch_count=97806.66666666667, ans=0.07 +2024-01-15 21:44:50,899 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.0.layers.1.feed_forward3.hidden_balancer.prob, batch_count=97840.0, ans=0.125 +2024-01-15 21:44:56,758 INFO [train.py:994] (0/2) Epoch 35, batch 700, loss[loss=0.09857, simple_loss=0.1661, pruned_loss=0.01551, over 17177.00 frames. ], tot_loss[loss=0.1405, simple_loss=0.2211, pruned_loss=0.02991, over 4662319.47 frames. ], batch size: 74, lr: 1.19e-02, grad_scale: 32.0 +2024-01-15 21:45:00,576 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_module1.balancer1.prob, batch_count=97873.33333333333, ans=0.125 +2024-01-15 21:45:04,156 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.balancer2.prob, batch_count=97873.33333333333, ans=0.125 +2024-01-15 21:45:14,313 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_skip_rate, batch_count=97906.66666666667, ans=0.0 +2024-01-15 21:45:17,268 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.attention_skip_rate, batch_count=97906.66666666667, ans=0.0 +2024-01-15 21:45:20,674 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.bypass_mid.scale_min, batch_count=97940.0, ans=0.2 +2024-01-15 21:45:43,777 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.4.encoder.layers.1.nonlin_attention.whiten1, num_groups=1, num_channels=288, metric=3.59 vs. limit=10.0 +2024-01-15 21:45:58,383 INFO [train.py:994] (0/2) Epoch 35, batch 750, loss[loss=0.1251, simple_loss=0.2032, pruned_loss=0.0235, over 24005.00 frames. ], tot_loss[loss=0.1402, simple_loss=0.2209, pruned_loss=0.02979, over 4697583.95 frames. ], batch size: 131, lr: 1.19e-02, grad_scale: 16.0 +2024-01-15 21:46:05,943 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.bypass.scale_min, batch_count=98040.0, ans=0.2 +2024-01-15 21:46:16,511 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.bypass_mid.scale_min, batch_count=98073.33333333333, ans=0.2 +2024-01-15 21:46:17,742 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.balancer1.prob, batch_count=98073.33333333333, ans=0.125 +2024-01-15 21:46:38,474 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.bypass.skip_rate, batch_count=98140.0, ans=0.07 +2024-01-15 21:46:40,434 WARNING [optim.py:484] (0/2) Clipping_scale=2.0, grad-norm quartiles 1.465e+02 1.691e+02 1.844e+02 2.221e+02 4.100e+02, threshold=3.688e+02, percent-clipped=1.0 +2024-01-15 21:46:43,966 INFO [scaling.py:1118] (0/2) WithLoss: name=encoder.encoders.3.encoder.layers.2.self_attn_weights, loss-sum=0.000e+00 +2024-01-15 21:46:56,011 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=98173.33333333333, ans=0.1 +2024-01-15 21:46:58,136 INFO [train.py:994] (0/2) Epoch 35, batch 800, loss[loss=0.1359, simple_loss=0.2198, pruned_loss=0.02602, over 24517.00 frames. ], tot_loss[loss=0.1403, simple_loss=0.2211, pruned_loss=0.02977, over 4733725.59 frames. ], batch size: 243, lr: 1.19e-02, grad_scale: 32.0 +2024-01-15 21:46:59,483 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.ff3_skip_rate, batch_count=98206.66666666667, ans=0.0 +2024-01-15 21:47:46,412 INFO [checkpoint.py:75] (0/2) Saving checkpoint to zipformer_bbpe/exp-context-size-2-lr-epochs-10-spec-aug-20-disable-musan/epoch-35.pt +2024-01-15 21:48:08,385 INFO [train.py:994] (0/2) Epoch 36, batch 0, loss[loss=0.1535, simple_loss=0.234, pruned_loss=0.03654, over 24535.00 frames. ], tot_loss[loss=0.1535, simple_loss=0.234, pruned_loss=0.03654, over 24535.00 frames. ], batch size: 193, lr: 1.17e-02, grad_scale: 32.0 +2024-01-15 21:48:08,386 INFO [train.py:1017] (0/2) Computing validation loss +2024-01-15 21:48:25,827 INFO [zipformer.py:1858] (0/2) name=encoder.encoders.1.encoder.layers.0.self_attn_weights, attn_weights_entropy = tensor([5.0601, 4.4780, 4.6724, 4.6805], device='cuda:0') +2024-01-15 21:48:29,547 INFO [train.py:1026] (0/2) Epoch 36, validation: loss=0.1669, simple_loss=0.2489, pruned_loss=0.04246, over 1622729.00 frames. +2024-01-15 21:48:29,548 INFO [train.py:1027] (0/2) Maximum memory allocated so far is 15997MB +2024-01-15 21:48:30,144 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.3.encoder.layers.2.feed_forward1.out_whiten, num_groups=1, num_channels=512, metric=7.13 vs. limit=15.0 +2024-01-15 21:48:34,450 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.0.layers.1.self_attn_weights.pos_emb_skip_rate, batch_count=98350.0, ans=0.0 +2024-01-15 21:48:41,680 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.ff3_skip_rate, batch_count=98383.33333333333, ans=0.0 +2024-01-15 21:48:45,299 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.attention_skip_rate, batch_count=98383.33333333333, ans=0.0 +2024-01-15 21:48:47,189 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.balancer2.prob, batch_count=98383.33333333333, ans=0.125 +2024-01-15 21:49:20,785 WARNING [optim.py:484] (0/2) Clipping_scale=2.0, grad-norm quartiles 1.404e+02 1.762e+02 1.998e+02 2.319e+02 4.869e+02, threshold=3.996e+02, percent-clipped=3.0 +2024-01-15 21:49:31,327 INFO [train.py:994] (0/2) Epoch 36, batch 50, loss[loss=0.1469, simple_loss=0.2237, pruned_loss=0.03503, over 24480.00 frames. ], tot_loss[loss=0.1369, simple_loss=0.2165, pruned_loss=0.02865, over 1063795.34 frames. ], batch size: 222, lr: 1.17e-02, grad_scale: 32.0 +2024-01-15 21:49:37,510 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.feed_forward2.hidden_balancer.prob, batch_count=98516.66666666667, ans=0.125 +2024-01-15 21:49:44,623 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.1.encoder.layers.0.feed_forward2.out_whiten, num_groups=1, num_channels=256, metric=7.53 vs. limit=15.0 +2024-01-15 21:49:49,357 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.balancer2.prob, batch_count=98550.0, ans=0.125 +2024-01-15 21:49:52,204 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.2.encoder.layers.0.conv_module2.whiten, num_groups=1, num_channels=384, metric=6.36 vs. limit=15.0 +2024-01-15 21:49:56,378 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.nonlin_attention.balancer.prob, batch_count=98583.33333333333, ans=0.125 +2024-01-15 21:49:56,441 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_module2.balancer2.min_positive, batch_count=98583.33333333333, ans=0.05 +2024-01-15 21:49:59,296 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.2.encoder.layers.1.conv_module2.whiten, num_groups=1, num_channels=384, metric=5.26 vs. limit=15.0 +2024-01-15 21:50:00,027 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.attention_skip_rate, batch_count=98583.33333333333, ans=0.0 +2024-01-15 21:50:22,176 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.feed_forward3.hidden_balancer.prob, batch_count=98650.0, ans=0.125 +2024-01-15 21:50:23,336 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_module1.balancer1.prob, batch_count=98650.0, ans=0.125 +2024-01-15 21:50:26,178 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.nonlin_attention.balancer.prob, batch_count=98650.0, ans=0.125 +2024-01-15 21:50:32,952 INFO [train.py:994] (0/2) Epoch 36, batch 100, loss[loss=0.1474, simple_loss=0.2251, pruned_loss=0.03483, over 24455.00 frames. ], tot_loss[loss=0.139, simple_loss=0.2188, pruned_loss=0.0296, over 1893245.30 frames. ], batch size: 165, lr: 1.17e-02, grad_scale: 32.0 +2024-01-15 21:50:53,624 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_module2.balancer2.prob, batch_count=98716.66666666667, ans=0.125 +2024-01-15 21:50:54,768 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.0.layers.1.balancer1.prob, batch_count=98716.66666666667, ans=0.125 +2024-01-15 21:51:06,503 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=98750.0, ans=0.1 +2024-01-15 21:51:09,029 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.feed_forward1.hidden_balancer.prob, batch_count=98783.33333333333, ans=0.125 +2024-01-15 21:51:14,915 INFO [scaling.py:1118] (0/2) WithLoss: name=encoder.encoders.4.encoder.layers.0.self_attn_weights, loss-sum=0.000e+00 +2024-01-15 21:51:23,131 WARNING [optim.py:484] (0/2) Clipping_scale=2.0, grad-norm quartiles 1.431e+02 1.714e+02 1.831e+02 2.083e+02 3.031e+02, threshold=3.661e+02, percent-clipped=0.0 +2024-01-15 21:51:25,161 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.feed_forward1.hidden_balancer.prob, batch_count=98816.66666666667, ans=0.125 +2024-01-15 21:51:34,210 INFO [train.py:994] (0/2) Epoch 36, batch 150, loss[loss=0.1296, simple_loss=0.211, pruned_loss=0.02411, over 24453.00 frames. ], tot_loss[loss=0.1386, simple_loss=0.2189, pruned_loss=0.02916, over 2546118.07 frames. ], batch size: 267, lr: 1.17e-02, grad_scale: 32.0 +2024-01-15 21:51:43,835 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.balancer1.prob, batch_count=98850.0, ans=0.125 +2024-01-15 21:51:45,103 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.balancer2.prob, batch_count=98883.33333333333, ans=0.125 +2024-01-15 21:51:47,983 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_skip_rate, batch_count=98883.33333333333, ans=0.0 +2024-01-15 21:52:02,538 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.ff3_skip_rate, batch_count=98916.66666666667, ans=0.0 +2024-01-15 21:52:02,589 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.attention_skip_rate, batch_count=98916.66666666667, ans=0.0 +2024-01-15 21:52:34,930 INFO [train.py:994] (0/2) Epoch 36, batch 200, loss[loss=0.1314, simple_loss=0.2152, pruned_loss=0.02374, over 24506.00 frames. ], tot_loss[loss=0.1383, simple_loss=0.2186, pruned_loss=0.02899, over 3029338.64 frames. ], batch size: 181, lr: 1.17e-02, grad_scale: 32.0 +2024-01-15 21:52:45,875 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.0.layers.1.feed_forward1.out_proj.dropout_p, batch_count=99016.66666666667, ans=0.1 +2024-01-15 21:52:45,982 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_module1.balancer2.prob, batch_count=99016.66666666667, ans=0.125 +2024-01-15 21:52:56,746 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_module2.whiten.whitening_limit, batch_count=99050.0, ans=15.0 +2024-01-15 21:53:08,441 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.5.encoder.layers.1.feed_forward1.out_whiten, num_groups=1, num_channels=256, metric=10.01 vs. limit=15.0 +2024-01-15 21:53:10,921 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.3.encoder.layers.0.self_attn1.whiten, num_groups=1, num_channels=512, metric=15.51 vs. limit=22.5 +2024-01-15 21:53:25,430 WARNING [optim.py:484] (0/2) Clipping_scale=2.0, grad-norm quartiles 1.492e+02 1.822e+02 2.260e+02 2.664e+02 3.639e+02, threshold=4.520e+02, percent-clipped=0.0 +2024-01-15 21:53:26,948 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.feed_forward1.hidden_balancer.prob, batch_count=99150.0, ans=0.125 +2024-01-15 21:53:37,076 INFO [train.py:994] (0/2) Epoch 36, batch 250, loss[loss=0.1429, simple_loss=0.2223, pruned_loss=0.03168, over 24468.00 frames. ], tot_loss[loss=0.1388, simple_loss=0.2193, pruned_loss=0.02913, over 3421922.49 frames. ], batch size: 216, lr: 1.17e-02, grad_scale: 32.0 +2024-01-15 21:53:43,427 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.feed_forward1.hidden_balancer.prob, batch_count=99183.33333333333, ans=0.125 +2024-01-15 21:54:03,137 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.balancer1.prob, batch_count=99250.0, ans=0.125 +2024-01-15 21:54:04,182 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.feed_forward1.hidden_balancer.prob, batch_count=99250.0, ans=0.125 +2024-01-15 21:54:27,190 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.2.encoder.layers.2.feed_forward2.out_whiten, num_groups=1, num_channels=384, metric=12.22 vs. limit=15.0 +2024-01-15 21:54:38,282 INFO [train.py:994] (0/2) Epoch 36, batch 300, loss[loss=0.135, simple_loss=0.2167, pruned_loss=0.02671, over 24483.00 frames. ], tot_loss[loss=0.1396, simple_loss=0.2202, pruned_loss=0.02951, over 3734400.84 frames. ], batch size: 222, lr: 1.17e-02, grad_scale: 32.0 +2024-01-15 21:54:47,540 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_module2.balancer1.min_positive, batch_count=99350.0, ans=0.025 +2024-01-15 21:54:49,493 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.0.layers.1.attention_skip_rate, batch_count=99350.0, ans=0.0 +2024-01-15 21:55:03,624 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.nonlin_attention.balancer.prob, batch_count=99416.66666666667, ans=0.125 +2024-01-15 21:55:28,564 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_skip_rate, batch_count=99483.33333333333, ans=0.0 +2024-01-15 21:55:29,378 WARNING [optim.py:484] (0/2) Clipping_scale=2.0, grad-norm quartiles 1.468e+02 1.712e+02 1.848e+02 2.107e+02 3.475e+02, threshold=3.696e+02, percent-clipped=0.0 +2024-01-15 21:55:40,650 INFO [train.py:994] (0/2) Epoch 36, batch 350, loss[loss=0.123, simple_loss=0.1992, pruned_loss=0.02343, over 23951.00 frames. ], tot_loss[loss=0.1397, simple_loss=0.2202, pruned_loss=0.02956, over 3969870.56 frames. ], batch size: 131, lr: 1.16e-02, grad_scale: 32.0 +2024-01-15 21:55:44,353 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.2.encoder.layers.0.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=5.48 vs. limit=6.0 +2024-01-15 21:55:48,423 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.0.layers.1.feed_forward1.out_proj.dropout_p, batch_count=99516.66666666667, ans=0.1 +2024-01-15 21:55:56,697 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.self_attn_weights.pos_emb_skip_rate, batch_count=99550.0, ans=0.0 +2024-01-15 21:55:58,689 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.5.encoder.layers.1.conv_module1.whiten, num_groups=1, num_channels=256, metric=3.06 vs. limit=15.0 +2024-01-15 21:56:02,353 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.4.encoder.layers.0.feed_forward1.out_whiten, num_groups=1, num_channels=384, metric=9.59 vs. limit=15.0 +2024-01-15 21:56:33,519 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_module1.balancer1.prob, batch_count=99650.0, ans=0.125 +2024-01-15 21:56:40,123 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_skip_rate, batch_count=99650.0, ans=0.0 +2024-01-15 21:56:42,308 INFO [train.py:994] (0/2) Epoch 36, batch 400, loss[loss=0.1215, simple_loss=0.2048, pruned_loss=0.01912, over 24205.00 frames. ], tot_loss[loss=0.1395, simple_loss=0.2199, pruned_loss=0.02951, over 4159812.93 frames. ], batch size: 140, lr: 1.16e-02, grad_scale: 32.0 +2024-01-15 21:56:52,762 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.bypass.scale_min, batch_count=99683.33333333333, ans=0.2 +2024-01-15 21:57:08,629 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.balancer2.prob, batch_count=99750.0, ans=0.125 +2024-01-15 21:57:09,811 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_skip_rate, batch_count=99750.0, ans=0.0 +2024-01-15 21:57:20,375 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.1.encoder.layers.1.self_attn1.whiten, num_groups=1, num_channels=256, metric=15.37 vs. limit=22.5 +2024-01-15 21:57:33,420 WARNING [optim.py:484] (0/2) Clipping_scale=2.0, grad-norm quartiles 1.474e+02 1.721e+02 1.850e+02 2.143e+02 3.369e+02, threshold=3.701e+02, percent-clipped=0.0 +2024-01-15 21:57:44,586 INFO [train.py:994] (0/2) Epoch 36, batch 450, loss[loss=0.151, simple_loss=0.2327, pruned_loss=0.03464, over 24513.00 frames. ], tot_loss[loss=0.1398, simple_loss=0.2205, pruned_loss=0.02957, over 4316055.03 frames. ], batch size: 204, lr: 1.16e-02, grad_scale: 32.0 +2024-01-15 21:57:53,751 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.attention_skip_rate, batch_count=99850.0, ans=0.0 +2024-01-15 21:58:09,882 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.5.encoder.layers.1.whiten, num_groups=1, num_channels=256, metric=6.06 vs. limit=12.0 +2024-01-15 21:58:26,798 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_module1.balancer2.min_abs, batch_count=99950.0, ans=0.5 +2024-01-15 21:58:30,984 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.nonlin_attention.balancer.prob, batch_count=99950.0, ans=0.125 +2024-01-15 21:58:35,725 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.nonlin_attention.balancer.max_positive, batch_count=99983.33333333333, ans=0.95 +2024-01-15 21:58:39,608 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.ff2_skip_rate, batch_count=99983.33333333333, ans=0.0 +2024-01-15 21:58:46,929 INFO [train.py:994] (0/2) Epoch 36, batch 500, loss[loss=0.1423, simple_loss=0.2191, pruned_loss=0.03274, over 24502.00 frames. ], tot_loss[loss=0.1399, simple_loss=0.2206, pruned_loss=0.02962, over 4417764.37 frames. ], batch size: 165, lr: 1.16e-02, grad_scale: 32.0 +2024-01-15 21:58:52,189 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.3.encoder.layers.1.feed_forward3.out_whiten, num_groups=1, num_channels=512, metric=10.75 vs. limit=15.0 +2024-01-15 21:58:57,481 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.3.encoder.layers.3.self_attn2.whiten, num_groups=1, num_channels=512, metric=13.95 vs. limit=22.5 +2024-01-15 21:59:04,356 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.5.encoder.layers.0.feed_forward3.out_whiten, num_groups=1, num_channels=256, metric=12.41 vs. limit=15.0 +2024-01-15 21:59:04,588 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.2.encoder.layers.0.whiten, num_groups=1, num_channels=384, metric=6.17 vs. limit=12.0 +2024-01-15 21:59:17,442 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.ff2_skip_rate, batch_count=100083.33333333333, ans=0.0 +2024-01-15 21:59:31,064 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=100116.66666666667, ans=0.1 +2024-01-15 21:59:37,393 WARNING [optim.py:484] (0/2) Clipping_scale=2.0, grad-norm quartiles 1.404e+02 1.694e+02 1.886e+02 2.106e+02 3.014e+02, threshold=3.772e+02, percent-clipped=0.0 +2024-01-15 21:59:42,314 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.0.layers.0.ff3_skip_rate, batch_count=100150.0, ans=0.0 +2024-01-15 21:59:48,487 INFO [train.py:994] (0/2) Epoch 36, batch 550, loss[loss=0.1476, simple_loss=0.2231, pruned_loss=0.03609, over 24584.00 frames. ], tot_loss[loss=0.1395, simple_loss=0.2203, pruned_loss=0.02942, over 4510113.23 frames. ], batch size: 176, lr: 1.16e-02, grad_scale: 32.0 +2024-01-15 22:00:23,865 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder_embed.convnext.out_balancer.prob, batch_count=100283.33333333333, ans=0.125 +2024-01-15 22:00:25,273 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.self_attn_weights.pos_emb_skip_rate, batch_count=100283.33333333333, ans=0.0 +2024-01-15 22:00:37,151 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.balancer1.prob, batch_count=100316.66666666667, ans=0.125 +2024-01-15 22:00:45,808 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.1.encoder.layers.1.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=3.90 vs. limit=6.0 +2024-01-15 22:00:49,609 INFO [train.py:994] (0/2) Epoch 36, batch 600, loss[loss=0.1469, simple_loss=0.2233, pruned_loss=0.0352, over 24478.00 frames. ], tot_loss[loss=0.1398, simple_loss=0.2206, pruned_loss=0.02952, over 4585553.73 frames. ], batch size: 170, lr: 1.16e-02, grad_scale: 32.0 +2024-01-15 22:00:50,923 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder_embed.convnext.out_balancer.prob, batch_count=100350.0, ans=0.125 +2024-01-15 22:01:00,440 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.feed_forward1.hidden_balancer.prob, batch_count=100350.0, ans=0.125 +2024-01-15 22:01:05,316 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.feed_forward3.hidden_balancer.prob, batch_count=100383.33333333333, ans=0.125 +2024-01-15 22:01:11,046 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_skip_rate, batch_count=100383.33333333333, ans=0.0 +2024-01-15 22:01:18,085 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_module2.balancer2.min_positive, batch_count=100416.66666666667, ans=0.05 +2024-01-15 22:01:22,136 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.feed_forward1.hidden_balancer.prob, batch_count=100416.66666666667, ans=0.125 +2024-01-15 22:01:40,247 WARNING [optim.py:484] (0/2) Clipping_scale=2.0, grad-norm quartiles 1.461e+02 1.713e+02 1.963e+02 2.375e+02 3.594e+02, threshold=3.927e+02, percent-clipped=0.0 +2024-01-15 22:01:52,221 INFO [train.py:994] (0/2) Epoch 36, batch 650, loss[loss=0.1496, simple_loss=0.2234, pruned_loss=0.03793, over 24508.00 frames. ], tot_loss[loss=0.1399, simple_loss=0.2209, pruned_loss=0.02951, over 4646243.16 frames. ], batch size: 165, lr: 1.16e-02, grad_scale: 32.0 +2024-01-15 22:01:53,059 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.1.encoder.layers.1.self_attn1.whiten, num_groups=1, num_channels=256, metric=18.29 vs. limit=22.5 +2024-01-15 22:01:58,399 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=100516.66666666667, ans=0.1 +2024-01-15 22:02:08,211 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.3.encoder.layers.0.self_attn1.whiten, num_groups=1, num_channels=512, metric=15.73 vs. limit=22.5 +2024-01-15 22:02:15,608 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.nonlin_attention.balancer.prob, batch_count=100583.33333333333, ans=0.125 +2024-01-15 22:02:29,410 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.2.encoder.layers.2.whiten, num_groups=1, num_channels=384, metric=3.55 vs. limit=12.0 +2024-01-15 22:02:53,866 INFO [train.py:994] (0/2) Epoch 36, batch 700, loss[loss=0.1303, simple_loss=0.2119, pruned_loss=0.02436, over 24475.00 frames. ], tot_loss[loss=0.1399, simple_loss=0.2206, pruned_loss=0.02959, over 4674066.26 frames. ], batch size: 267, lr: 1.16e-02, grad_scale: 32.0 +2024-01-15 22:02:56,585 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_module1.balancer1.min_positive, batch_count=100683.33333333333, ans=0.025 +2024-01-15 22:02:56,980 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.2.encoder.layers.2.feed_forward3.out_whiten, num_groups=1, num_channels=384, metric=9.35 vs. limit=15.0 +2024-01-15 22:02:57,812 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.attention_skip_rate, batch_count=100683.33333333333, ans=0.0 +2024-01-15 22:03:00,222 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.nonlin_attention.balancer.prob, batch_count=100683.33333333333, ans=0.125 +2024-01-15 22:03:44,977 WARNING [optim.py:484] (0/2) Clipping_scale=2.0, grad-norm quartiles 1.506e+02 1.713e+02 1.930e+02 2.201e+02 3.344e+02, threshold=3.860e+02, percent-clipped=0.0 +2024-01-15 22:03:56,277 INFO [train.py:994] (0/2) Epoch 36, batch 750, loss[loss=0.138, simple_loss=0.2209, pruned_loss=0.02756, over 24521.00 frames. ], tot_loss[loss=0.1398, simple_loss=0.2204, pruned_loss=0.02959, over 4688596.13 frames. ], batch size: 243, lr: 1.16e-02, grad_scale: 32.0 +2024-01-15 22:04:07,926 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_module2.balancer2.prob, batch_count=100883.33333333333, ans=0.125 +2024-01-15 22:04:29,992 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.4.encoder.layers.1.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=2.93 vs. limit=6.0 +2024-01-15 22:04:35,428 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.self_attn_weights.pos_emb_skip_rate, batch_count=100950.0, ans=0.0 +2024-01-15 22:04:39,977 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=100950.0, ans=0.1 +2024-01-15 22:04:48,206 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.self_attn2.whiten.whitening_limit, batch_count=100983.33333333333, ans=22.5 +2024-01-15 22:04:49,880 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder_embed.convnext.layerdrop_rate, batch_count=100983.33333333333, ans=0.015 +2024-01-15 22:04:51,601 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.3.encoder.layers.1.conv_module1.whiten, num_groups=1, num_channels=512, metric=2.13 vs. limit=15.0 +2024-01-15 22:04:55,446 INFO [train.py:994] (0/2) Epoch 36, batch 800, loss[loss=0.1374, simple_loss=0.2144, pruned_loss=0.03026, over 24507.00 frames. ], tot_loss[loss=0.1395, simple_loss=0.2201, pruned_loss=0.02948, over 4706917.11 frames. ], batch size: 165, lr: 1.16e-02, grad_scale: 32.0 +2024-01-15 22:05:13,416 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.0.layers.1.feed_forward1.out_proj.dropout_p, batch_count=101050.0, ans=0.1 +2024-01-15 22:05:16,697 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.feed_forward2.hidden_balancer.prob, batch_count=101050.0, ans=0.125 +2024-01-15 22:05:23,545 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_skip_rate, batch_count=101083.33333333333, ans=0.0 +2024-01-15 22:05:35,605 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.0.layers.0.feed_forward1.out_proj.dropout_p, batch_count=101116.66666666667, ans=0.1 +2024-01-15 22:05:42,005 WARNING [optim.py:484] (0/2) Clipping_scale=2.0, grad-norm quartiles 1.400e+02 1.756e+02 1.942e+02 2.533e+02 4.367e+02, threshold=3.885e+02, percent-clipped=2.0 +2024-01-15 22:05:44,229 INFO [checkpoint.py:75] (0/2) Saving checkpoint to zipformer_bbpe/exp-context-size-2-lr-epochs-10-spec-aug-20-disable-musan/epoch-36.pt +2024-01-15 22:06:06,398 INFO [train.py:994] (0/2) Epoch 37, batch 0, loss[loss=0.1359, simple_loss=0.2117, pruned_loss=0.03005, over 24539.00 frames. ], tot_loss[loss=0.1359, simple_loss=0.2117, pruned_loss=0.03005, over 24539.00 frames. ], batch size: 236, lr: 1.14e-02, grad_scale: 32.0 +2024-01-15 22:06:06,399 INFO [train.py:1017] (0/2) Computing validation loss +2024-01-15 22:06:27,578 INFO [train.py:1026] (0/2) Epoch 37, validation: loss=0.1661, simple_loss=0.2481, pruned_loss=0.042, over 1622729.00 frames. +2024-01-15 22:06:27,578 INFO [train.py:1027] (0/2) Maximum memory allocated so far is 15997MB +2024-01-15 22:06:40,530 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.0.layers.1.balancer1.prob, batch_count=101193.33333333333, ans=0.125 +2024-01-15 22:06:44,042 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.bypass.skip_rate, batch_count=101193.33333333333, ans=0.04949747468305833 +2024-01-15 22:06:46,414 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.0.layers.1.bypass_mid.scale_min, batch_count=101193.33333333333, ans=0.2 +2024-01-15 22:06:56,488 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.0.layers.0.bypass_mid.scale_min, batch_count=101226.66666666667, ans=0.2 +2024-01-15 22:06:59,506 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.2.encoder.layers.1.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=5.23 vs. limit=6.0 +2024-01-15 22:07:13,967 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.attention_skip_rate, batch_count=101260.0, ans=0.0 +2024-01-15 22:07:23,996 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_module1.balancer1.prob, batch_count=101293.33333333333, ans=0.125 +2024-01-15 22:07:25,167 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=101293.33333333333, ans=0.1 +2024-01-15 22:07:28,358 INFO [train.py:994] (0/2) Epoch 37, batch 50, loss[loss=0.1196, simple_loss=0.1905, pruned_loss=0.02434, over 23542.00 frames. ], tot_loss[loss=0.1377, simple_loss=0.217, pruned_loss=0.02918, over 1091490.35 frames. ], batch size: 119, lr: 1.14e-02, grad_scale: 32.0 +2024-01-15 22:07:44,629 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=101360.0, ans=0.1 +2024-01-15 22:07:48,170 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_module1.balancer1.prob, batch_count=101360.0, ans=0.125 +2024-01-15 22:08:05,414 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.whiten.whitening_limit, batch_count=101426.66666666667, ans=12.0 +2024-01-15 22:08:18,441 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.whiten.whitening_limit, batch_count=101460.0, ans=12.0 +2024-01-15 22:08:26,507 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.balancer1.prob, batch_count=101460.0, ans=0.125 +2024-01-15 22:08:28,551 WARNING [optim.py:484] (0/2) Clipping_scale=2.0, grad-norm quartiles 1.396e+02 1.658e+02 1.797e+02 2.024e+02 3.025e+02, threshold=3.594e+02, percent-clipped=0.0 +2024-01-15 22:08:31,613 INFO [train.py:994] (0/2) Epoch 37, batch 100, loss[loss=0.1447, simple_loss=0.2193, pruned_loss=0.03501, over 24373.00 frames. ], tot_loss[loss=0.1381, simple_loss=0.2184, pruned_loss=0.02889, over 1923616.17 frames. ], batch size: 153, lr: 1.14e-02, grad_scale: 32.0 +2024-01-15 22:08:37,230 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.attention_skip_rate, batch_count=101493.33333333333, ans=0.0 +2024-01-15 22:08:44,604 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.4.encoder.layers.0.feed_forward3.out_whiten, num_groups=1, num_channels=384, metric=14.97 vs. limit=15.0 +2024-01-15 22:08:49,223 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.5.encoder.layers.1.self_attn2.whiten, num_groups=1, num_channels=256, metric=17.20 vs. limit=22.5 +2024-01-15 22:09:34,749 INFO [train.py:994] (0/2) Epoch 37, batch 150, loss[loss=0.1479, simple_loss=0.2233, pruned_loss=0.03622, over 24485.00 frames. ], tot_loss[loss=0.1389, simple_loss=0.2195, pruned_loss=0.02916, over 2570751.55 frames. ], batch size: 204, lr: 1.14e-02, grad_scale: 32.0 +2024-01-15 22:09:35,053 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.bypass.scale_min, batch_count=101660.0, ans=0.2 +2024-01-15 22:10:02,961 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_module1.whiten.whitening_limit, batch_count=101726.66666666667, ans=15.0 +2024-01-15 22:10:17,928 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=101760.0, ans=0.1 +2024-01-15 22:10:28,399 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.1.encoder.layers.1.conv_module2.whiten, num_groups=1, num_channels=256, metric=9.14 vs. limit=15.0 +2024-01-15 22:10:35,957 WARNING [optim.py:484] (0/2) Clipping_scale=2.0, grad-norm quartiles 1.486e+02 1.745e+02 1.959e+02 2.425e+02 3.649e+02, threshold=3.918e+02, percent-clipped=1.0 +2024-01-15 22:10:37,164 INFO [train.py:994] (0/2) Epoch 37, batch 200, loss[loss=0.147, simple_loss=0.2284, pruned_loss=0.03285, over 24421.00 frames. ], tot_loss[loss=0.1385, simple_loss=0.2194, pruned_loss=0.02879, over 3072275.46 frames. ], batch size: 250, lr: 1.14e-02, grad_scale: 16.0 +2024-01-15 22:10:58,927 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.feed_forward3.hidden_balancer.prob, batch_count=101860.0, ans=0.125 +2024-01-15 22:11:22,880 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_module2.balancer2.min_abs, batch_count=101926.66666666667, ans=0.5 +2024-01-15 22:11:33,604 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.3.encoder.layers.1.self_attn2.whiten, num_groups=1, num_channels=512, metric=15.99 vs. limit=22.5 +2024-01-15 22:11:40,447 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.1.encoder.layers.0.self_attn1.whiten, num_groups=1, num_channels=256, metric=15.16 vs. limit=22.5 +2024-01-15 22:11:40,813 INFO [train.py:994] (0/2) Epoch 37, batch 250, loss[loss=0.1396, simple_loss=0.2175, pruned_loss=0.03084, over 24392.00 frames. ], tot_loss[loss=0.1389, simple_loss=0.2198, pruned_loss=0.02898, over 3452923.33 frames. ], batch size: 159, lr: 1.14e-02, grad_scale: 16.0 +2024-01-15 22:11:52,161 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_module1.balancer2.prob, batch_count=102026.66666666667, ans=0.125 +2024-01-15 22:12:01,227 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.ff3_skip_rate, batch_count=102026.66666666667, ans=0.0 +2024-01-15 22:12:01,265 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.nonlin_attention.balancer.min_positive, batch_count=102026.66666666667, ans=0.05 +2024-01-15 22:12:38,108 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.ff3_skip_rate, batch_count=102126.66666666667, ans=0.0 +2024-01-15 22:12:39,860 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.2.encoder.layers.0.feed_forward3.out_whiten, num_groups=1, num_channels=384, metric=13.48 vs. limit=15.0 +2024-01-15 22:12:41,715 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.ff2_skip_rate, batch_count=102126.66666666667, ans=0.0 +2024-01-15 22:12:42,615 WARNING [optim.py:484] (0/2) Clipping_scale=2.0, grad-norm quartiles 1.381e+02 1.698e+02 1.912e+02 2.193e+02 3.408e+02, threshold=3.824e+02, percent-clipped=0.0 +2024-01-15 22:12:43,830 INFO [train.py:994] (0/2) Epoch 37, batch 300, loss[loss=0.1423, simple_loss=0.2246, pruned_loss=0.03001, over 24473.00 frames. ], tot_loss[loss=0.1385, simple_loss=0.2192, pruned_loss=0.02892, over 3741130.37 frames. ], batch size: 222, lr: 1.14e-02, grad_scale: 16.0 +2024-01-15 22:13:27,212 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder_embed.conv.8.prob, batch_count=102260.0, ans=0.125 +2024-01-15 22:13:29,631 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.0.layers.0.bypass.skip_rate, batch_count=102260.0, ans=0.035 +2024-01-15 22:13:34,985 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.2.encoder.layers.2.nonlin_attention.whiten2, num_groups=1, num_channels=384, metric=5.79 vs. limit=15.0 +2024-01-15 22:13:45,217 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.2.encoder.layers.1.whiten, num_groups=1, num_channels=384, metric=5.62 vs. limit=12.0 +2024-01-15 22:13:46,842 INFO [train.py:994] (0/2) Epoch 37, batch 350, loss[loss=0.142, simple_loss=0.2254, pruned_loss=0.02926, over 24432.00 frames. ], tot_loss[loss=0.1386, simple_loss=0.2195, pruned_loss=0.02886, over 3972443.62 frames. ], batch size: 250, lr: 1.13e-02, grad_scale: 16.0 +2024-01-15 22:13:59,806 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_module1.balancer1.prob, batch_count=102360.0, ans=0.125 +2024-01-15 22:14:10,304 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.balancer2.prob, batch_count=102360.0, ans=0.125 +2024-01-15 22:14:18,797 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_module2.balancer1.prob, batch_count=102393.33333333333, ans=0.125 +2024-01-15 22:14:19,950 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_module1.balancer1.prob, batch_count=102393.33333333333, ans=0.125 +2024-01-15 22:14:33,903 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.feed_forward1.hidden_balancer.prob, batch_count=102426.66666666667, ans=0.125 +2024-01-15 22:14:49,287 WARNING [optim.py:484] (0/2) Clipping_scale=2.0, grad-norm quartiles 1.454e+02 1.670e+02 1.836e+02 2.122e+02 2.808e+02, threshold=3.672e+02, percent-clipped=0.0 +2024-01-15 22:14:50,532 INFO [train.py:994] (0/2) Epoch 37, batch 400, loss[loss=0.1408, simple_loss=0.2191, pruned_loss=0.03127, over 24589.00 frames. ], tot_loss[loss=0.1388, simple_loss=0.2195, pruned_loss=0.02907, over 4150325.75 frames. ], batch size: 199, lr: 1.13e-02, grad_scale: 32.0 +2024-01-15 22:15:10,403 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=102526.66666666667, ans=0.1 +2024-01-15 22:15:15,750 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.ff2_skip_rate, batch_count=102560.0, ans=0.0 +2024-01-15 22:15:18,436 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.3.encoder.layers.1.nonlin_attention.whiten2, num_groups=1, num_channels=512, metric=5.37 vs. limit=15.0 +2024-01-15 22:15:21,549 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_skip_rate, batch_count=102560.0, ans=0.0 +2024-01-15 22:15:26,963 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.0.layers.0.feed_forward3.hidden_balancer.prob, batch_count=102593.33333333333, ans=0.125 +2024-01-15 22:15:53,513 INFO [train.py:994] (0/2) Epoch 37, batch 450, loss[loss=0.1397, simple_loss=0.2248, pruned_loss=0.02734, over 24524.00 frames. ], tot_loss[loss=0.1389, simple_loss=0.2197, pruned_loss=0.02908, over 4293669.36 frames. ], batch size: 187, lr: 1.13e-02, grad_scale: 32.0 +2024-01-15 22:16:13,873 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.0.layers.0.feed_forward1.out_proj.dropout_p, batch_count=102693.33333333333, ans=0.1 +2024-01-15 22:16:46,058 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.ff3_skip_rate, batch_count=102793.33333333333, ans=0.0 +2024-01-15 22:16:53,284 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.ff3_skip_rate, batch_count=102793.33333333333, ans=0.0 +2024-01-15 22:16:55,304 WARNING [optim.py:484] (0/2) Clipping_scale=2.0, grad-norm quartiles 1.448e+02 1.753e+02 1.977e+02 2.299e+02 3.213e+02, threshold=3.954e+02, percent-clipped=0.0 +2024-01-15 22:16:56,531 INFO [train.py:994] (0/2) Epoch 37, batch 500, loss[loss=0.1468, simple_loss=0.2211, pruned_loss=0.03631, over 24373.00 frames. ], tot_loss[loss=0.1389, simple_loss=0.2194, pruned_loss=0.02919, over 4398800.61 frames. ], batch size: 153, lr: 1.13e-02, grad_scale: 32.0 +2024-01-15 22:17:12,512 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.attention_skip_rate, batch_count=102860.0, ans=0.0 +2024-01-15 22:17:15,979 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.0.layers.1.feed_forward2.hidden_balancer.prob, batch_count=102860.0, ans=0.125 +2024-01-15 22:17:17,757 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.0.layers.0.nonlin_attention.whiten2, num_groups=1, num_channels=192, metric=6.27 vs. limit=15.0 +2024-01-15 22:17:47,218 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.0.layers.1.feed_forward2.hidden_balancer.prob, batch_count=102960.0, ans=0.125 +2024-01-15 22:17:59,396 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_module1.balancer2.prob, batch_count=102993.33333333333, ans=0.125 +2024-01-15 22:18:00,225 INFO [train.py:994] (0/2) Epoch 37, batch 550, loss[loss=0.1223, simple_loss=0.1984, pruned_loss=0.02308, over 24057.00 frames. ], tot_loss[loss=0.1389, simple_loss=0.2194, pruned_loss=0.0292, over 4484631.59 frames. ], batch size: 131, lr: 1.13e-02, grad_scale: 32.0 +2024-01-15 22:18:18,255 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=103026.66666666667, ans=0.1 +2024-01-15 22:18:29,288 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.bypass.skip_rate, batch_count=103060.0, ans=0.04949747468305833 +2024-01-15 22:18:31,790 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_module2.balancer1.min_positive, batch_count=103060.0, ans=0.025 +2024-01-15 22:18:40,216 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.0.layers.0.ff2_skip_rate, batch_count=103093.33333333333, ans=0.0 +2024-01-15 22:18:40,273 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=103093.33333333333, ans=0.1 +2024-01-15 22:19:01,393 WARNING [optim.py:484] (0/2) Clipping_scale=2.0, grad-norm quartiles 1.375e+02 1.771e+02 1.926e+02 2.307e+02 3.355e+02, threshold=3.851e+02, percent-clipped=0.0 +2024-01-15 22:19:02,683 INFO [train.py:994] (0/2) Epoch 37, batch 600, loss[loss=0.1486, simple_loss=0.2258, pruned_loss=0.03564, over 24393.00 frames. ], tot_loss[loss=0.1389, simple_loss=0.2195, pruned_loss=0.02914, over 4554053.13 frames. ], batch size: 153, lr: 1.13e-02, grad_scale: 32.0 +2024-01-15 22:19:28,767 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.ff2_skip_rate, batch_count=103226.66666666667, ans=0.0 +2024-01-15 22:20:06,194 INFO [train.py:994] (0/2) Epoch 37, batch 650, loss[loss=0.1434, simple_loss=0.2268, pruned_loss=0.03004, over 24531.00 frames. ], tot_loss[loss=0.1387, simple_loss=0.2193, pruned_loss=0.02904, over 4611661.42 frames. ], batch size: 236, lr: 1.13e-02, grad_scale: 32.0 +2024-01-15 22:20:08,060 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_skip_rate, batch_count=103326.66666666667, ans=0.0 +2024-01-15 22:20:20,057 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.feed_forward2.hidden_balancer.prob, batch_count=103360.0, ans=0.125 +2024-01-15 22:20:26,011 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_module2.balancer2.prob, batch_count=103360.0, ans=0.125 +2024-01-15 22:20:43,209 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.4.encoder.layers.2.nonlin_attention.whiten1, num_groups=1, num_channels=288, metric=3.48 vs. limit=10.0 +2024-01-15 22:20:51,210 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.ff2_skip_rate, batch_count=103426.66666666667, ans=0.0 +2024-01-15 22:21:07,942 WARNING [optim.py:484] (0/2) Clipping_scale=2.0, grad-norm quartiles 1.441e+02 1.744e+02 1.856e+02 2.130e+02 3.630e+02, threshold=3.712e+02, percent-clipped=0.0 +2024-01-15 22:21:09,209 INFO [train.py:994] (0/2) Epoch 37, batch 700, loss[loss=0.1093, simple_loss=0.1698, pruned_loss=0.0244, over 19207.00 frames. ], tot_loss[loss=0.1387, simple_loss=0.2192, pruned_loss=0.02908, over 4648442.79 frames. ], batch size: 82, lr: 1.13e-02, grad_scale: 32.0 +2024-01-15 22:21:19,323 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.5.encoder.layers.0.conv_module1.whiten, num_groups=1, num_channels=256, metric=2.41 vs. limit=15.0 +2024-01-15 22:21:55,689 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_module1.balancer1.prob, batch_count=103593.33333333333, ans=0.125 +2024-01-15 22:21:58,018 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_skip_rate, batch_count=103593.33333333333, ans=0.0 +2024-01-15 22:21:58,128 INFO [scaling.py:1118] (0/2) WithLoss: name=encoder.encoders.4.encoder.layers.2.self_attn_weights, loss-sum=0.000e+00 +2024-01-15 22:22:02,816 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_skip_rate, batch_count=103626.66666666667, ans=0.0 +2024-01-15 22:22:05,171 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_module2.balancer1.prob, batch_count=103626.66666666667, ans=0.125 +2024-01-15 22:22:07,531 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_skip_rate, batch_count=103626.66666666667, ans=0.0 +2024-01-15 22:22:10,000 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.ff2_skip_rate, batch_count=103626.66666666667, ans=0.0 +2024-01-15 22:22:12,134 INFO [train.py:994] (0/2) Epoch 37, batch 750, loss[loss=0.1436, simple_loss=0.2184, pruned_loss=0.03436, over 24364.00 frames. ], tot_loss[loss=0.1387, simple_loss=0.2192, pruned_loss=0.02904, over 4686074.84 frames. ], batch size: 153, lr: 1.13e-02, grad_scale: 32.0 +2024-01-15 22:22:19,070 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_module2.balancer2.prob, batch_count=103660.0, ans=0.125 +2024-01-15 22:22:30,567 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.bypass.skip_rate, batch_count=103693.33333333333, ans=0.09899494936611666 +2024-01-15 22:22:30,589 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.feed_forward1.out_proj.dropout_p, batch_count=103693.33333333333, ans=0.1 +2024-01-15 22:22:32,974 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.2.out_combiner.scale_min, batch_count=103693.33333333333, ans=0.2 +2024-01-15 22:22:40,733 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.balancer2.prob, batch_count=103726.66666666667, ans=0.125 +2024-01-15 22:22:40,950 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.self_attn_weights.whiten_keys.whitening_limit, batch_count=103726.66666666667, ans=6.0 +2024-01-15 22:22:53,689 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_module2.balancer1.prob, batch_count=103760.0, ans=0.125 +2024-01-15 22:23:02,527 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.bypass.skip_rate, batch_count=103793.33333333333, ans=0.04949747468305833 +2024-01-15 22:23:11,432 WARNING [optim.py:484] (0/2) Clipping_scale=2.0, grad-norm quartiles 1.420e+02 1.720e+02 2.004e+02 2.343e+02 4.090e+02, threshold=4.009e+02, percent-clipped=1.0 +2024-01-15 22:23:12,633 INFO [train.py:994] (0/2) Epoch 37, batch 800, loss[loss=0.1138, simple_loss=0.1984, pruned_loss=0.01461, over 24317.00 frames. ], tot_loss[loss=0.1383, simple_loss=0.219, pruned_loss=0.02882, over 4709603.29 frames. ], batch size: 147, lr: 1.13e-02, grad_scale: 32.0 +2024-01-15 22:23:19,046 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.bypass_mid.scale_min, batch_count=103826.66666666667, ans=0.2 +2024-01-15 22:23:27,065 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.balancer1.prob, batch_count=103860.0, ans=0.125 +2024-01-15 22:24:02,420 INFO [checkpoint.py:75] (0/2) Saving checkpoint to zipformer_bbpe/exp-context-size-2-lr-epochs-10-spec-aug-20-disable-musan/epoch-37.pt +2024-01-15 22:24:23,941 INFO [train.py:994] (0/2) Epoch 38, batch 0, loss[loss=0.1385, simple_loss=0.2148, pruned_loss=0.03115, over 24472.00 frames. ], tot_loss[loss=0.1385, simple_loss=0.2148, pruned_loss=0.03115, over 24472.00 frames. ], batch size: 170, lr: 1.11e-02, grad_scale: 32.0 +2024-01-15 22:24:23,942 INFO [train.py:1017] (0/2) Computing validation loss +2024-01-15 22:24:44,664 INFO [train.py:1026] (0/2) Epoch 38, validation: loss=0.1668, simple_loss=0.2482, pruned_loss=0.04272, over 1622729.00 frames. +2024-01-15 22:24:44,665 INFO [train.py:1027] (0/2) Maximum memory allocated so far is 15997MB +2024-01-15 22:24:49,552 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder_embed.convnext.layerdrop_rate, batch_count=103970.0, ans=0.015 +2024-01-15 22:24:50,763 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.0.layers.0.attention_skip_rate, batch_count=103970.0, ans=0.0 +2024-01-15 22:24:54,792 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_skip_rate, batch_count=103970.0, ans=0.0 +2024-01-15 22:24:55,953 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_module1.balancer1.prob, batch_count=104003.33333333333, ans=0.125 +2024-01-15 22:25:07,365 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.4.encoder.layers.2.conv_module1.whiten, num_groups=1, num_channels=384, metric=2.49 vs. limit=15.0 +2024-01-15 22:25:13,687 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=104036.66666666667, ans=0.1 +2024-01-15 22:25:17,112 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_skip_rate, batch_count=104036.66666666667, ans=0.0 +2024-01-15 22:25:34,256 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.3.encoder.layers.1.nonlin_attention.whiten1, num_groups=1, num_channels=384, metric=4.88 vs. limit=10.0 +2024-01-15 22:25:35,225 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.4.encoder.layers.1.feed_forward3.out_whiten, num_groups=1, num_channels=384, metric=15.06 vs. limit=15.0 +2024-01-15 22:25:46,232 INFO [train.py:994] (0/2) Epoch 38, batch 50, loss[loss=0.1437, simple_loss=0.2218, pruned_loss=0.03277, over 24507.00 frames. ], tot_loss[loss=0.1357, simple_loss=0.2157, pruned_loss=0.02791, over 1076723.11 frames. ], batch size: 243, lr: 1.11e-02, grad_scale: 32.0 +2024-01-15 22:25:53,869 WARNING [optim.py:484] (0/2) Clipping_scale=2.0, grad-norm quartiles 1.467e+02 1.647e+02 1.866e+02 2.059e+02 3.431e+02, threshold=3.732e+02, percent-clipped=0.0 +2024-01-15 22:25:58,801 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.feed_forward1.hidden_balancer.prob, batch_count=104170.0, ans=0.125 +2024-01-15 22:26:02,891 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.ff2_skip_rate, batch_count=104170.0, ans=0.0 +2024-01-15 22:26:26,354 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_skip_rate, batch_count=104236.66666666667, ans=0.0 +2024-01-15 22:26:43,227 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.0.layers.0.bypass_mid.scale_min, batch_count=104270.0, ans=0.2 +2024-01-15 22:26:47,753 INFO [train.py:994] (0/2) Epoch 38, batch 100, loss[loss=0.1459, simple_loss=0.2259, pruned_loss=0.03294, over 24490.00 frames. ], tot_loss[loss=0.1369, simple_loss=0.2173, pruned_loss=0.02823, over 1916400.75 frames. ], batch size: 165, lr: 1.11e-02, grad_scale: 32.0 +2024-01-15 22:27:00,518 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.ff3_skip_rate, batch_count=104336.66666666667, ans=0.0 +2024-01-15 22:27:41,443 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.2.encoder.layers.1.whiten, num_groups=1, num_channels=384, metric=6.15 vs. limit=12.0 +2024-01-15 22:27:49,352 INFO [train.py:994] (0/2) Epoch 38, batch 150, loss[loss=0.1415, simple_loss=0.2196, pruned_loss=0.03166, over 24456.00 frames. ], tot_loss[loss=0.1374, simple_loss=0.2178, pruned_loss=0.02852, over 2571898.44 frames. ], batch size: 222, lr: 1.11e-02, grad_scale: 32.0 +2024-01-15 22:27:57,184 WARNING [optim.py:484] (0/2) Clipping_scale=2.0, grad-norm quartiles 1.421e+02 1.711e+02 1.943e+02 2.161e+02 3.646e+02, threshold=3.887e+02, percent-clipped=0.0 +2024-01-15 22:27:57,657 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.feed_forward3.out_whiten.whitening_limit, batch_count=104470.0, ans=15.0 +2024-01-15 22:28:30,493 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.balancer1.prob, batch_count=104570.0, ans=0.125 +2024-01-15 22:28:51,173 INFO [train.py:994] (0/2) Epoch 38, batch 200, loss[loss=0.1481, simple_loss=0.2302, pruned_loss=0.03299, over 24465.00 frames. ], tot_loss[loss=0.1376, simple_loss=0.2178, pruned_loss=0.02865, over 3063466.20 frames. ], batch size: 216, lr: 1.11e-02, grad_scale: 32.0 +2024-01-15 22:29:14,079 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward2.hidden_balancer.prob, batch_count=104670.0, ans=0.125 +2024-01-15 22:29:38,112 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.0.layers.0.feed_forward2.hidden_balancer.prob, batch_count=104736.66666666667, ans=0.125 +2024-01-15 22:29:39,595 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.4.encoder.layers.1.self_attn1.whiten, num_groups=1, num_channels=384, metric=13.52 vs. limit=22.5 +2024-01-15 22:29:49,811 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=104770.0, ans=0.1 +2024-01-15 22:29:53,052 INFO [train.py:994] (0/2) Epoch 38, batch 250, loss[loss=0.1332, simple_loss=0.2143, pruned_loss=0.02604, over 24427.00 frames. ], tot_loss[loss=0.1377, simple_loss=0.2181, pruned_loss=0.0286, over 3437928.45 frames. ], batch size: 250, lr: 1.11e-02, grad_scale: 16.0 +2024-01-15 22:30:00,275 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.2.encoder.layers.0.whiten, num_groups=1, num_channels=384, metric=6.55 vs. limit=12.0 +2024-01-15 22:30:01,875 WARNING [optim.py:484] (0/2) Clipping_scale=2.0, grad-norm quartiles 1.373e+02 1.688e+02 1.935e+02 2.329e+02 4.264e+02, threshold=3.870e+02, percent-clipped=1.0 +2024-01-15 22:30:21,854 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.0.layers.1.whiten, num_groups=1, num_channels=192, metric=3.67 vs. limit=12.0 +2024-01-15 22:30:34,739 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.ff2_skip_rate, batch_count=104903.33333333333, ans=0.0 +2024-01-15 22:30:37,007 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_skip_rate, batch_count=104903.33333333333, ans=0.0 +2024-01-15 22:30:45,656 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.bypass_mid.scale_min, batch_count=104936.66666666667, ans=0.2 +2024-01-15 22:30:55,917 INFO [train.py:994] (0/2) Epoch 38, batch 300, loss[loss=0.1323, simple_loss=0.2141, pruned_loss=0.0252, over 24322.00 frames. ], tot_loss[loss=0.1376, simple_loss=0.218, pruned_loss=0.02862, over 3720007.66 frames. ], batch size: 285, lr: 1.11e-02, grad_scale: 16.0 +2024-01-15 22:31:08,063 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=105003.33333333333, ans=0.1 +2024-01-15 22:31:24,360 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_module1.balancer1.max_abs, batch_count=105036.66666666667, ans=10.0 +2024-01-15 22:31:26,763 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_module1.balancer2.prob, batch_count=105036.66666666667, ans=0.125 +2024-01-15 22:31:57,309 INFO [train.py:994] (0/2) Epoch 38, batch 350, loss[loss=0.1404, simple_loss=0.2253, pruned_loss=0.02772, over 22381.00 frames. ], tot_loss[loss=0.1379, simple_loss=0.2184, pruned_loss=0.02866, over 3968775.34 frames. ], batch size: 357, lr: 1.11e-02, grad_scale: 16.0 +2024-01-15 22:32:05,517 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.4.encoder.layers.2.conv_module1.whiten, num_groups=1, num_channels=384, metric=2.36 vs. limit=15.0 +2024-01-15 22:32:06,171 WARNING [optim.py:484] (0/2) Clipping_scale=2.0, grad-norm quartiles 1.427e+02 1.724e+02 1.882e+02 2.215e+02 3.681e+02, threshold=3.763e+02, percent-clipped=0.0 +2024-01-15 22:32:12,771 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder_embed.conv.8.prob, batch_count=105170.0, ans=0.125 +2024-01-15 22:32:32,894 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.3.encoder.layers.3.feed_forward1.out_whiten, num_groups=1, num_channels=512, metric=12.83 vs. limit=15.0 +2024-01-15 22:32:59,802 INFO [train.py:994] (0/2) Epoch 38, batch 400, loss[loss=0.113, simple_loss=0.1939, pruned_loss=0.01607, over 24183.00 frames. ], tot_loss[loss=0.1374, simple_loss=0.2181, pruned_loss=0.02839, over 4144643.87 frames. ], batch size: 140, lr: 1.10e-02, grad_scale: 32.0 +2024-01-15 22:33:12,867 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward3.hidden_balancer.prob, batch_count=105336.66666666667, ans=0.125 +2024-01-15 22:33:17,407 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_module1.balancer2.prob, batch_count=105336.66666666667, ans=0.125 +2024-01-15 22:33:23,920 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.feed_forward3.hidden_balancer.prob, batch_count=105370.0, ans=0.125 +2024-01-15 22:33:27,416 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=105370.0, ans=0.1 +2024-01-15 22:33:31,551 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.feed_forward3.hidden_balancer.prob, batch_count=105370.0, ans=0.125 +2024-01-15 22:33:39,787 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=105403.33333333333, ans=0.1 +2024-01-15 22:33:56,580 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.ff3_skip_rate, batch_count=105436.66666666667, ans=0.0 +2024-01-15 22:34:02,000 INFO [train.py:994] (0/2) Epoch 38, batch 450, loss[loss=0.1368, simple_loss=0.2201, pruned_loss=0.02676, over 24473.00 frames. ], tot_loss[loss=0.1376, simple_loss=0.2185, pruned_loss=0.02834, over 4303595.25 frames. ], batch size: 222, lr: 1.10e-02, grad_scale: 32.0 +2024-01-15 22:34:10,363 WARNING [optim.py:484] (0/2) Clipping_scale=2.0, grad-norm quartiles 1.421e+02 1.663e+02 1.877e+02 2.191e+02 2.912e+02, threshold=3.754e+02, percent-clipped=0.0 +2024-01-15 22:34:54,873 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_skip_rate, batch_count=105603.33333333333, ans=0.0 +2024-01-15 22:34:57,197 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.self_attn_weights.pos_emb_skip_rate, batch_count=105603.33333333333, ans=0.0 +2024-01-15 22:35:03,884 INFO [train.py:994] (0/2) Epoch 38, batch 500, loss[loss=0.1301, simple_loss=0.2133, pruned_loss=0.02346, over 24181.00 frames. ], tot_loss[loss=0.1375, simple_loss=0.2182, pruned_loss=0.02843, over 4407709.52 frames. ], batch size: 311, lr: 1.10e-02, grad_scale: 16.0 +2024-01-15 22:35:35,136 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.3.encoder.layers.0.whiten, num_groups=1, num_channels=512, metric=4.59 vs. limit=12.0 +2024-01-15 22:35:41,103 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_module2.whiten.whitening_limit, batch_count=105736.66666666667, ans=15.0 +2024-01-15 22:35:41,776 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.feed_forward1.hidden_balancer.prob, batch_count=105736.66666666667, ans=0.125 +2024-01-15 22:35:46,625 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_module1.balancer1.max_abs, batch_count=105736.66666666667, ans=10.0 +2024-01-15 22:35:49,307 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.4.encoder.layers.0.conv_module1.whiten, num_groups=1, num_channels=384, metric=2.99 vs. limit=15.0 +2024-01-15 22:36:02,119 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_module1.balancer2.prob, batch_count=105770.0, ans=0.125 +2024-01-15 22:36:06,065 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.3.encoder.layers.0.self_attn2.whiten, num_groups=1, num_channels=512, metric=13.35 vs. limit=22.5 +2024-01-15 22:36:06,543 INFO [train.py:994] (0/2) Epoch 38, batch 550, loss[loss=0.1198, simple_loss=0.2001, pruned_loss=0.01973, over 24322.00 frames. ], tot_loss[loss=0.1378, simple_loss=0.2187, pruned_loss=0.02848, over 4504373.53 frames. ], batch size: 147, lr: 1.10e-02, grad_scale: 16.0 +2024-01-15 22:36:15,913 WARNING [optim.py:484] (0/2) Clipping_scale=2.0, grad-norm quartiles 1.386e+02 1.718e+02 2.000e+02 2.477e+02 3.890e+02, threshold=4.000e+02, percent-clipped=2.0 +2024-01-15 22:36:19,929 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.4.out_combiner.scale_min, batch_count=105836.66666666667, ans=0.2 +2024-01-15 22:36:35,421 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_module1.balancer1.max_abs, batch_count=105870.0, ans=10.0 +2024-01-15 22:36:38,920 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.feed_forward2.hidden_balancer.prob, batch_count=105870.0, ans=0.125 +2024-01-15 22:36:41,269 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.0.layers.1.feed_forward3.hidden_balancer.prob, batch_count=105870.0, ans=0.125 +2024-01-15 22:37:04,048 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_module1.balancer2.prob, batch_count=105936.66666666667, ans=0.125 +2024-01-15 22:37:08,519 INFO [train.py:994] (0/2) Epoch 38, batch 600, loss[loss=0.1393, simple_loss=0.2253, pruned_loss=0.02664, over 24486.00 frames. ], tot_loss[loss=0.1378, simple_loss=0.2188, pruned_loss=0.02844, over 4563768.69 frames. ], batch size: 222, lr: 1.10e-02, grad_scale: 16.0 +2024-01-15 22:37:09,232 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.2.encoder.layers.0.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=5.70 vs. limit=6.0 +2024-01-15 22:37:30,304 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.feed_forward1.out_proj.dropout_p, batch_count=106003.33333333333, ans=0.1 +2024-01-15 22:37:38,332 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward1.hidden_balancer.prob, batch_count=106036.66666666667, ans=0.125 +2024-01-15 22:37:52,491 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.balancer1.prob, batch_count=106070.0, ans=0.125 +2024-01-15 22:37:55,967 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.nonlin_attention.balancer.prob, batch_count=106070.0, ans=0.125 +2024-01-15 22:38:04,284 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.balancer1.prob, batch_count=106103.33333333333, ans=0.125 +2024-01-15 22:38:08,357 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=106103.33333333333, ans=0.1 +2024-01-15 22:38:08,362 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=106103.33333333333, ans=0.1 +2024-01-15 22:38:11,078 INFO [train.py:994] (0/2) Epoch 38, batch 650, loss[loss=0.1341, simple_loss=0.2177, pruned_loss=0.02522, over 24516.00 frames. ], tot_loss[loss=0.138, simple_loss=0.2191, pruned_loss=0.02843, over 4620192.69 frames. ], batch size: 210, lr: 1.10e-02, grad_scale: 16.0 +2024-01-15 22:38:17,266 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=106136.66666666667, ans=0.1 +2024-01-15 22:38:20,424 WARNING [optim.py:484] (0/2) Clipping_scale=2.0, grad-norm quartiles 1.326e+02 1.653e+02 1.885e+02 2.174e+02 2.953e+02, threshold=3.771e+02, percent-clipped=0.0 +2024-01-15 22:38:24,470 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.5.encoder.layers.0.self_attn2.whiten, num_groups=1, num_channels=256, metric=15.08 vs. limit=22.5 +2024-01-15 22:38:31,359 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.ff2_skip_rate, batch_count=106170.0, ans=0.0 +2024-01-15 22:38:52,463 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.ff3_skip_rate, batch_count=106236.66666666667, ans=0.0 +2024-01-15 22:38:56,158 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=106236.66666666667, ans=0.1 +2024-01-15 22:39:01,992 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.2.encoder.layers.2.conv_module1.whiten, num_groups=1, num_channels=384, metric=3.59 vs. limit=15.0 +2024-01-15 22:39:11,936 INFO [train.py:994] (0/2) Epoch 38, batch 700, loss[loss=0.139, simple_loss=0.2239, pruned_loss=0.02706, over 24453.00 frames. ], tot_loss[loss=0.1376, simple_loss=0.2187, pruned_loss=0.02822, over 4668851.62 frames. ], batch size: 250, lr: 1.10e-02, grad_scale: 16.0 +2024-01-15 22:39:27,684 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_module1.balancer1.prob, batch_count=106336.66666666667, ans=0.125 +2024-01-15 22:39:33,393 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.nonlin_attention.balancer.prob, batch_count=106336.66666666667, ans=0.125 +2024-01-15 22:39:34,649 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_module2.balancer2.prob, batch_count=106336.66666666667, ans=0.125 +2024-01-15 22:39:42,013 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.3.encoder.layers.0.whiten, num_groups=1, num_channels=512, metric=4.78 vs. limit=12.0 +2024-01-15 22:39:47,422 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.2.encoder.layers.2.feed_forward2.out_whiten, num_groups=1, num_channels=384, metric=14.31 vs. limit=15.0 +2024-01-15 22:39:49,335 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.0.layers.1.feed_forward1.out_proj.dropout_p, batch_count=106403.33333333333, ans=0.1 +2024-01-15 22:40:13,463 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.2.encoder.layers.2.nonlin_attention.whiten1, num_groups=1, num_channels=288, metric=8.46 vs. limit=10.0 +2024-01-15 22:40:13,872 INFO [train.py:994] (0/2) Epoch 38, batch 750, loss[loss=0.1387, simple_loss=0.2225, pruned_loss=0.02742, over 24324.00 frames. ], tot_loss[loss=0.1378, simple_loss=0.219, pruned_loss=0.02826, over 4700646.44 frames. ], batch size: 275, lr: 1.10e-02, grad_scale: 16.0 +2024-01-15 22:40:23,816 WARNING [optim.py:484] (0/2) Clipping_scale=2.0, grad-norm quartiles 1.394e+02 1.724e+02 1.869e+02 2.193e+02 3.130e+02, threshold=3.738e+02, percent-clipped=0.0 +2024-01-15 22:40:31,061 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_module1.balancer1.prob, batch_count=106503.33333333333, ans=0.125 +2024-01-15 22:40:44,827 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_skip_rate, batch_count=106536.66666666667, ans=0.0 +2024-01-15 22:40:51,575 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.nonlin_attention.balancer.prob, batch_count=106570.0, ans=0.125 +2024-01-15 22:40:57,553 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.1.encoder.layers.0.nonlin_attention.whiten1, num_groups=1, num_channels=192, metric=5.19 vs. limit=10.0 +2024-01-15 22:41:07,031 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.feed_forward2.hidden_balancer.prob, batch_count=106603.33333333333, ans=0.125 +2024-01-15 22:41:08,377 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.5.encoder.layers.0.conv_module2.whiten, num_groups=1, num_channels=256, metric=2.79 vs. limit=15.0 +2024-01-15 22:41:12,535 INFO [train.py:994] (0/2) Epoch 38, batch 800, loss[loss=0.1134, simple_loss=0.1786, pruned_loss=0.0241, over 17009.00 frames. ], tot_loss[loss=0.1374, simple_loss=0.2184, pruned_loss=0.02818, over 4714907.40 frames. ], batch size: 71, lr: 1.10e-02, grad_scale: 32.0 +2024-01-15 22:41:15,364 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.3.encoder.layers.2.self_attn2.whiten, num_groups=1, num_channels=512, metric=14.33 vs. limit=22.5 +2024-01-15 22:41:22,404 INFO [checkpoint.py:75] (0/2) Saving checkpoint to zipformer_bbpe/exp-context-size-2-lr-epochs-10-spec-aug-20-disable-musan/checkpoint-32000.pt +2024-01-15 22:41:25,206 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.2.encoder.layers.1.feed_forward1.out_whiten, num_groups=1, num_channels=384, metric=13.42 vs. limit=15.0 +2024-01-15 22:41:25,233 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.2.encoder.layers.0.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=5.35 vs. limit=6.0 +2024-01-15 22:41:25,864 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=106670.0, ans=0.1 +2024-01-15 22:41:30,317 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_skip_rate, batch_count=106670.0, ans=0.0 +2024-01-15 22:41:39,138 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_skip_rate, batch_count=106703.33333333333, ans=0.0 +2024-01-15 22:41:49,115 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.feed_forward2.hidden_balancer.prob, batch_count=106736.66666666667, ans=0.125 +2024-01-15 22:42:03,170 INFO [checkpoint.py:75] (0/2) Saving checkpoint to zipformer_bbpe/exp-context-size-2-lr-epochs-10-spec-aug-20-disable-musan/epoch-38.pt +2024-01-15 22:42:24,289 INFO [train.py:994] (0/2) Epoch 39, batch 0, loss[loss=0.1462, simple_loss=0.2243, pruned_loss=0.03406, over 24556.00 frames. ], tot_loss[loss=0.1462, simple_loss=0.2243, pruned_loss=0.03406, over 24556.00 frames. ], batch size: 176, lr: 1.08e-02, grad_scale: 32.0 +2024-01-15 22:42:24,289 INFO [train.py:1017] (0/2) Computing validation loss +2024-01-15 22:42:44,176 INFO [zipformer.py:1858] (0/2) name=encoder.encoders.4.encoder.layers.0.self_attn_weights, attn_weights_entropy = tensor([1.6063, 2.6247, 2.7199, 2.2390], device='cuda:0') +2024-01-15 22:42:45,266 INFO [train.py:1026] (0/2) Epoch 39, validation: loss=0.1671, simple_loss=0.2478, pruned_loss=0.04323, over 1622729.00 frames. +2024-01-15 22:42:45,266 INFO [train.py:1027] (0/2) Maximum memory allocated so far is 15997MB +2024-01-15 22:42:51,763 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.5.encoder.layers.0.nonlin_attention.whiten1, num_groups=1, num_channels=192, metric=2.77 vs. limit=10.0 +2024-01-15 22:42:58,567 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.self_attn_weights.pos_emb_skip_rate, batch_count=106813.33333333333, ans=0.0 +2024-01-15 22:42:58,579 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_module1.balancer1.prob, batch_count=106813.33333333333, ans=0.125 +2024-01-15 22:43:03,612 WARNING [optim.py:484] (0/2) Clipping_scale=2.0, grad-norm quartiles 1.446e+02 1.753e+02 1.942e+02 2.163e+02 3.117e+02, threshold=3.885e+02, percent-clipped=0.0 +2024-01-15 22:43:21,085 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.0.layers.0.bypass_mid.scale_min, batch_count=106880.0, ans=0.2 +2024-01-15 22:43:22,418 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.bypass.scale_min, batch_count=106880.0, ans=0.2 +2024-01-15 22:43:27,685 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_module1.balancer1.prob, batch_count=106880.0, ans=0.125 +2024-01-15 22:43:35,127 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.bypass.scale_min, batch_count=106913.33333333333, ans=0.2 +2024-01-15 22:43:46,640 INFO [train.py:994] (0/2) Epoch 39, batch 50, loss[loss=0.1356, simple_loss=0.2151, pruned_loss=0.02806, over 24446.00 frames. ], tot_loss[loss=0.1353, simple_loss=0.2157, pruned_loss=0.02747, over 1076893.47 frames. ], batch size: 250, lr: 1.08e-02, grad_scale: 32.0 +2024-01-15 22:44:17,361 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=107013.33333333333, ans=0.1 +2024-01-15 22:44:49,779 INFO [train.py:994] (0/2) Epoch 39, batch 100, loss[loss=0.1289, simple_loss=0.2107, pruned_loss=0.02351, over 24405.00 frames. ], tot_loss[loss=0.1351, simple_loss=0.2155, pruned_loss=0.02738, over 1911767.81 frames. ], batch size: 258, lr: 1.08e-02, grad_scale: 32.0 +2024-01-15 22:44:58,179 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=107113.33333333333, ans=0.1 +2024-01-15 22:45:03,957 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_module1.balancer1.min_positive, batch_count=107146.66666666667, ans=0.025 +2024-01-15 22:45:08,974 WARNING [optim.py:484] (0/2) Clipping_scale=2.0, grad-norm quartiles 1.490e+02 1.665e+02 1.865e+02 2.261e+02 2.773e+02, threshold=3.730e+02, percent-clipped=0.0 +2024-01-15 22:45:09,335 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.attention_skip_rate, batch_count=107146.66666666667, ans=0.0 +2024-01-15 22:45:51,052 INFO [train.py:994] (0/2) Epoch 39, batch 150, loss[loss=0.132, simple_loss=0.2151, pruned_loss=0.02446, over 24606.00 frames. ], tot_loss[loss=0.1354, simple_loss=0.2161, pruned_loss=0.02734, over 2548593.68 frames. ], batch size: 199, lr: 1.08e-02, grad_scale: 16.0 +2024-01-15 22:46:36,352 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.3.encoder.layers.0.feed_forward2.out_whiten, num_groups=1, num_channels=512, metric=8.79 vs. limit=15.0 +2024-01-15 22:46:37,063 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.nonlin_attention.balancer.max_positive, batch_count=107380.0, ans=0.95 +2024-01-15 22:46:54,033 INFO [train.py:994] (0/2) Epoch 39, batch 200, loss[loss=0.144, simple_loss=0.2238, pruned_loss=0.03215, over 24309.00 frames. ], tot_loss[loss=0.136, simple_loss=0.2165, pruned_loss=0.02776, over 3052773.70 frames. ], batch size: 298, lr: 1.08e-02, grad_scale: 16.0 +2024-01-15 22:46:56,130 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.4.out_combiner.scale_min, batch_count=107446.66666666667, ans=0.2 +2024-01-15 22:47:13,492 WARNING [optim.py:484] (0/2) Clipping_scale=2.0, grad-norm quartiles 1.380e+02 1.667e+02 1.845e+02 2.071e+02 3.015e+02, threshold=3.690e+02, percent-clipped=0.0 +2024-01-15 22:47:56,383 INFO [train.py:994] (0/2) Epoch 39, batch 250, loss[loss=0.1386, simple_loss=0.222, pruned_loss=0.0276, over 24428.00 frames. ], tot_loss[loss=0.1355, simple_loss=0.2163, pruned_loss=0.02735, over 3448843.87 frames. ], batch size: 250, lr: 1.08e-02, grad_scale: 16.0 +2024-01-15 22:48:12,063 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.3.encoder.layers.0.feed_forward1.out_whiten, num_groups=1, num_channels=512, metric=14.33 vs. limit=15.0 +2024-01-15 22:48:13,900 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=107646.66666666667, ans=0.1 +2024-01-15 22:48:21,475 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_module2.balancer1.prob, batch_count=107680.0, ans=0.125 +2024-01-15 22:48:29,581 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.0.layers.0.balancer1.prob, batch_count=107680.0, ans=0.125 +2024-01-15 22:48:29,634 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_skip_rate, batch_count=107680.0, ans=0.0 +2024-01-15 22:48:33,646 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder_embed.convnext.hidden_balancer.prob, batch_count=107713.33333333333, ans=0.125 +2024-01-15 22:48:36,100 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.nonlin_attention.balancer.max_positive, batch_count=107713.33333333333, ans=0.95 +2024-01-15 22:48:40,946 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.balancer_ff2.min_abs, batch_count=107713.33333333333, ans=0.1 +2024-01-15 22:48:48,633 INFO [scaling.py:1118] (0/2) WithLoss: name=encoder.encoders.1.encoder.layers.1.self_attn_weights, loss-sum=0.000e+00 +2024-01-15 22:48:58,310 INFO [train.py:994] (0/2) Epoch 39, batch 300, loss[loss=0.1405, simple_loss=0.2198, pruned_loss=0.03062, over 24508.00 frames. ], tot_loss[loss=0.136, simple_loss=0.2168, pruned_loss=0.02761, over 3754591.55 frames. ], batch size: 165, lr: 1.08e-02, grad_scale: 16.0 +2024-01-15 22:49:01,027 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=107780.0, ans=0.1 +2024-01-15 22:49:17,962 WARNING [optim.py:484] (0/2) Clipping_scale=2.0, grad-norm quartiles 1.422e+02 1.701e+02 1.871e+02 2.220e+02 3.630e+02, threshold=3.743e+02, percent-clipped=0.0 +2024-01-15 22:49:41,711 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward2.hidden_balancer.prob, batch_count=107880.0, ans=0.125 +2024-01-15 22:49:42,834 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.attention_skip_rate, batch_count=107880.0, ans=0.0 +2024-01-15 22:49:52,401 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.0.layers.1.nonlin_attention.whiten1, num_groups=1, num_channels=144, metric=5.95 vs. limit=10.0 +2024-01-15 22:50:00,448 INFO [train.py:994] (0/2) Epoch 39, batch 350, loss[loss=0.1384, simple_loss=0.2208, pruned_loss=0.02796, over 24364.00 frames. ], tot_loss[loss=0.1361, simple_loss=0.2171, pruned_loss=0.02752, over 3988298.18 frames. ], batch size: 298, lr: 1.08e-02, grad_scale: 16.0 +2024-01-15 22:50:05,231 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder_embed.convnext.layerdrop_rate, batch_count=107946.66666666667, ans=0.015 +2024-01-15 22:50:05,334 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_module1.balancer2.prob, batch_count=107946.66666666667, ans=0.125 +2024-01-15 22:50:06,503 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=107946.66666666667, ans=0.1 +2024-01-15 22:50:30,942 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.5.encoder.layers.0.feed_forward2.out_whiten, num_groups=1, num_channels=256, metric=9.93 vs. limit=15.0 +2024-01-15 22:50:32,068 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.5.encoder.layers.1.conv_module2.balancer2.prob, batch_count=108013.33333333333, ans=0.125 +2024-01-15 22:50:33,151 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.ff2_skip_rate, batch_count=108013.33333333333, ans=0.0 +2024-01-15 22:50:54,626 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.bypass_mid.scale_min, batch_count=108080.0, ans=0.2 +2024-01-15 22:51:02,423 INFO [train.py:994] (0/2) Epoch 39, batch 400, loss[loss=0.1343, simple_loss=0.2196, pruned_loss=0.02451, over 23846.00 frames. ], tot_loss[loss=0.1358, simple_loss=0.2168, pruned_loss=0.02739, over 4171079.87 frames. ], batch size: 328, lr: 1.08e-02, grad_scale: 32.0 +2024-01-15 22:51:15,224 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=108146.66666666667, ans=0.1 +2024-01-15 22:51:22,583 WARNING [optim.py:484] (0/2) Clipping_scale=2.0, grad-norm quartiles 1.422e+02 1.708e+02 1.948e+02 2.231e+02 3.344e+02, threshold=3.895e+02, percent-clipped=0.0 +2024-01-15 22:51:37,801 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.4.encoder.layers.1.feed_forward3.out_whiten, num_groups=1, num_channels=384, metric=7.82 vs. limit=15.0 +2024-01-15 22:51:56,035 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.nonlin_attention.balancer.prob, batch_count=108246.66666666667, ans=0.125 +2024-01-15 22:52:04,629 INFO [train.py:994] (0/2) Epoch 39, batch 450, loss[loss=0.125, simple_loss=0.2097, pruned_loss=0.02015, over 24213.00 frames. ], tot_loss[loss=0.1361, simple_loss=0.2169, pruned_loss=0.02759, over 4309474.23 frames. ], batch size: 311, lr: 1.08e-02, grad_scale: 32.0 +2024-01-15 22:52:07,150 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_skip_rate, batch_count=108280.0, ans=0.0 +2024-01-15 22:52:13,187 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.1.encoder.layers.0.conv_module1.whiten, num_groups=1, num_channels=256, metric=6.64 vs. limit=15.0 +2024-01-15 22:52:25,125 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.ff3_skip_rate, batch_count=108313.33333333333, ans=0.0 +2024-01-15 22:52:46,259 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.5.encoder.layers.0.conv_module2.whiten, num_groups=1, num_channels=256, metric=2.88 vs. limit=15.0 +2024-01-15 22:52:47,955 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward2.hidden_balancer.prob, batch_count=108380.0, ans=0.125 +2024-01-15 22:52:50,228 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.ff3_skip_rate, batch_count=108380.0, ans=0.0 +2024-01-15 22:53:03,145 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=108413.33333333333, ans=0.1 +2024-01-15 22:53:03,208 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.bypass.scale_min, batch_count=108413.33333333333, ans=0.2 +2024-01-15 22:53:07,082 INFO [train.py:994] (0/2) Epoch 39, batch 500, loss[loss=0.1387, simple_loss=0.2203, pruned_loss=0.02855, over 24493.00 frames. ], tot_loss[loss=0.1358, simple_loss=0.2168, pruned_loss=0.02741, over 4432460.65 frames. ], batch size: 229, lr: 1.08e-02, grad_scale: 32.0 +2024-01-15 22:53:26,418 WARNING [optim.py:484] (0/2) Clipping_scale=2.0, grad-norm quartiles 1.459e+02 1.682e+02 1.844e+02 2.205e+02 3.574e+02, threshold=3.688e+02, percent-clipped=0.0 +2024-01-15 22:54:03,097 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.ff3_skip_rate, batch_count=108580.0, ans=0.0 +2024-01-15 22:54:04,908 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.0.layers.0.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=4.31 vs. limit=6.0 +2024-01-15 22:54:06,752 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.ff2_skip_rate, batch_count=108580.0, ans=0.0 +2024-01-15 22:54:08,752 INFO [train.py:994] (0/2) Epoch 39, batch 550, loss[loss=0.1445, simple_loss=0.2246, pruned_loss=0.03226, over 24523.00 frames. ], tot_loss[loss=0.1365, simple_loss=0.2176, pruned_loss=0.02773, over 4518658.24 frames. ], batch size: 165, lr: 1.08e-02, grad_scale: 32.0 +2024-01-15 22:54:17,319 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.bypass_mid.scale_min, batch_count=108613.33333333333, ans=0.2 +2024-01-15 22:54:23,464 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.0.layers.0.whiten, num_groups=1, num_channels=192, metric=4.14 vs. limit=12.0 +2024-01-15 22:54:28,336 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.0.layers.0.ff3_skip_rate, batch_count=108646.66666666667, ans=0.0 +2024-01-15 22:54:51,934 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_module1.balancer1.prob, batch_count=108713.33333333333, ans=0.125 +2024-01-15 22:55:00,285 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.conv_skip_rate, batch_count=108746.66666666667, ans=0.0 +2024-01-15 22:55:06,195 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.5.encoder.layers.1.conv_module1.whiten, num_groups=1, num_channels=256, metric=3.23 vs. limit=15.0 +2024-01-15 22:55:10,599 INFO [train.py:994] (0/2) Epoch 39, batch 600, loss[loss=0.1501, simple_loss=0.2309, pruned_loss=0.03469, over 24413.00 frames. ], tot_loss[loss=0.1366, simple_loss=0.2177, pruned_loss=0.02768, over 4593170.63 frames. ], batch size: 159, lr: 1.07e-02, grad_scale: 32.0 +2024-01-15 22:55:24,164 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.3.encoder.layers.2.nonlin_attention.whiten2, num_groups=1, num_channels=512, metric=5.49 vs. limit=15.0 +2024-01-15 22:55:29,441 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.1.out_combiner.scale_min, batch_count=108813.33333333333, ans=0.2 +2024-01-15 22:55:30,394 WARNING [optim.py:484] (0/2) Clipping_scale=2.0, grad-norm quartiles 1.451e+02 1.623e+02 1.740e+02 1.924e+02 3.185e+02, threshold=3.479e+02, percent-clipped=0.0 +2024-01-15 22:55:31,829 INFO [scaling.py:1118] (0/2) WithLoss: name=encoder.encoders.0.layers.0.self_attn_weights, loss-sum=0.000e+00 +2024-01-15 22:55:36,649 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_skip_rate, batch_count=108846.66666666667, ans=0.0 +2024-01-15 22:55:52,209 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=108880.0, ans=0.1 +2024-01-15 22:56:13,024 INFO [train.py:994] (0/2) Epoch 39, batch 650, loss[loss=0.1211, simple_loss=0.2023, pruned_loss=0.01996, over 24247.00 frames. ], tot_loss[loss=0.1366, simple_loss=0.2179, pruned_loss=0.02772, over 4636555.43 frames. ], batch size: 140, lr: 1.07e-02, grad_scale: 32.0 +2024-01-15 22:56:18,428 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.feed_forward3.out_whiten.whitening_limit, batch_count=108946.66666666667, ans=15.0 +2024-01-15 22:56:35,920 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.0.layers.0.bypass_mid.scale_min, batch_count=108980.0, ans=0.2 +2024-01-15 22:56:45,850 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.2.encoder.layers.0.feed_forward1.out_whiten, num_groups=1, num_channels=384, metric=12.51 vs. limit=15.0 +2024-01-15 22:56:48,981 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.balancer2.prob, batch_count=109046.66666666667, ans=0.125 +2024-01-15 22:57:14,414 INFO [train.py:994] (0/2) Epoch 39, batch 700, loss[loss=0.1404, simple_loss=0.2216, pruned_loss=0.02958, over 24455.00 frames. ], tot_loss[loss=0.1366, simple_loss=0.2177, pruned_loss=0.02772, over 4671015.68 frames. ], batch size: 222, lr: 1.07e-02, grad_scale: 32.0 +2024-01-15 22:57:34,443 WARNING [optim.py:484] (0/2) Clipping_scale=2.0, grad-norm quartiles 1.385e+02 1.721e+02 2.012e+02 2.362e+02 3.911e+02, threshold=4.024e+02, percent-clipped=3.0 +2024-01-15 22:57:40,698 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_module1.balancer2.prob, batch_count=109180.0, ans=0.125 +2024-01-15 22:58:10,330 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.bypass.scale_min, batch_count=109246.66666666667, ans=0.2 +2024-01-15 22:58:13,437 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.feed_forward3.hidden_balancer.prob, batch_count=109246.66666666667, ans=0.125 +2024-01-15 22:58:16,423 INFO [train.py:994] (0/2) Epoch 39, batch 750, loss[loss=0.1261, simple_loss=0.2075, pruned_loss=0.02231, over 24230.00 frames. ], tot_loss[loss=0.1362, simple_loss=0.2172, pruned_loss=0.02758, over 4702008.35 frames. ], batch size: 140, lr: 1.07e-02, grad_scale: 32.0 +2024-01-15 22:58:17,937 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=109280.0, ans=0.1 +2024-01-15 22:59:10,502 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.0.layers.1.bypass_mid.scale_min, batch_count=109413.33333333333, ans=0.2 +2024-01-15 22:59:15,872 INFO [train.py:994] (0/2) Epoch 39, batch 800, loss[loss=0.1436, simple_loss=0.2222, pruned_loss=0.03249, over 24482.00 frames. ], tot_loss[loss=0.1364, simple_loss=0.2176, pruned_loss=0.0276, over 4738676.55 frames. ], batch size: 222, lr: 1.07e-02, grad_scale: 32.0 +2024-01-15 22:59:34,622 WARNING [optim.py:484] (0/2) Clipping_scale=2.0, grad-norm quartiles 1.413e+02 1.634e+02 1.767e+02 1.970e+02 3.701e+02, threshold=3.534e+02, percent-clipped=0.0 +2024-01-15 22:59:42,316 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.1.encoder.layers.1.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=3.45 vs. limit=6.0 +2024-01-15 23:00:01,171 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.4.encoder.layers.2.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=2.67 vs. limit=6.0 +2024-01-15 23:00:02,986 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.nonlin_attention.balancer.prob, batch_count=109580.0, ans=0.125 +2024-01-15 23:00:05,069 INFO [checkpoint.py:75] (0/2) Saving checkpoint to zipformer_bbpe/exp-context-size-2-lr-epochs-10-spec-aug-20-disable-musan/epoch-39.pt +2024-01-15 23:00:27,704 INFO [train.py:994] (0/2) Epoch 40, batch 0, loss[loss=0.133, simple_loss=0.2161, pruned_loss=0.02496, over 24454.00 frames. ], tot_loss[loss=0.133, simple_loss=0.2161, pruned_loss=0.02496, over 24454.00 frames. ], batch size: 267, lr: 1.06e-02, grad_scale: 32.0 +2024-01-15 23:00:27,705 INFO [train.py:1017] (0/2) Computing validation loss +2024-01-15 23:00:47,942 INFO [train.py:1026] (0/2) Epoch 40, validation: loss=0.1662, simple_loss=0.2482, pruned_loss=0.04211, over 1622729.00 frames. +2024-01-15 23:00:47,943 INFO [train.py:1027] (0/2) Maximum memory allocated so far is 15997MB +2024-01-15 23:00:50,513 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder_embed.conv.5.prob, batch_count=109590.0, ans=0.125 +2024-01-15 23:01:25,719 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.balancer1.prob, batch_count=109690.0, ans=0.125 +2024-01-15 23:01:49,410 INFO [train.py:994] (0/2) Epoch 40, batch 50, loss[loss=0.1282, simple_loss=0.2079, pruned_loss=0.02427, over 24331.00 frames. ], tot_loss[loss=0.1345, simple_loss=0.2155, pruned_loss=0.02673, over 1085651.98 frames. ], batch size: 147, lr: 1.06e-02, grad_scale: 32.0 +2024-01-15 23:01:55,622 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.2.encoder.layers.1.self_attn_weights.whiten_keys, num_groups=4, num_channels=128, metric=5.14 vs. limit=6.0 +2024-01-15 23:02:00,429 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.2.encoder.layers.0.feed_forward1.out_whiten, num_groups=1, num_channels=384, metric=13.50 vs. limit=15.0 +2024-01-15 23:02:18,488 WARNING [optim.py:484] (0/2) Clipping_scale=2.0, grad-norm quartiles 1.360e+02 1.725e+02 1.948e+02 2.244e+02 4.405e+02, threshold=3.895e+02, percent-clipped=1.0 +2024-01-15 23:02:23,467 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.feed_forward1.out_proj.dropout_p, batch_count=109823.33333333333, ans=0.1 +2024-01-15 23:02:51,550 INFO [train.py:994] (0/2) Epoch 40, batch 100, loss[loss=0.1456, simple_loss=0.2261, pruned_loss=0.03253, over 24498.00 frames. ], tot_loss[loss=0.135, simple_loss=0.2159, pruned_loss=0.02699, over 1913613.33 frames. ], batch size: 267, lr: 1.06e-02, grad_scale: 32.0 +2024-01-15 23:03:05,901 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.0.layers.0.conv_module2.balancer1.max_abs, batch_count=109956.66666666667, ans=10.0 +2024-01-15 23:03:16,414 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.0.layers.0.ff3_skip_rate, batch_count=109990.0, ans=0.0 +2024-01-15 23:03:20,252 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.bypass.scale_min, batch_count=109990.0, ans=0.2 +2024-01-15 23:03:21,612 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.5.encoder.layers.1.feed_forward2.out_whiten, num_groups=1, num_channels=256, metric=9.06 vs. limit=15.0 +2024-01-15 23:03:36,296 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.1.encoder.layers.0.conv_module2.whiten, num_groups=1, num_channels=256, metric=11.30 vs. limit=15.0 +2024-01-15 23:03:40,377 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_module2.balancer1.prob, batch_count=110056.66666666667, ans=0.125 +2024-01-15 23:03:43,228 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.3.encoder.layers.3.self_attn1.whiten, num_groups=1, num_channels=512, metric=12.97 vs. limit=22.5 +2024-01-15 23:03:47,626 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_module1.balancer1.min_positive, batch_count=110056.66666666667, ans=0.025 +2024-01-15 23:03:50,095 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_skip_rate, batch_count=110056.66666666667, ans=0.0 +2024-01-15 23:03:53,391 INFO [train.py:994] (0/2) Epoch 40, batch 150, loss[loss=0.1319, simple_loss=0.2171, pruned_loss=0.02338, over 24387.00 frames. ], tot_loss[loss=0.1347, simple_loss=0.2156, pruned_loss=0.0269, over 2545329.97 frames. ], batch size: 258, lr: 1.06e-02, grad_scale: 32.0 +2024-01-15 23:04:04,412 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.feed_forward2.hidden_balancer.prob, batch_count=110090.0, ans=0.125 +2024-01-15 23:04:04,414 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.conv_module2.balancer1.min_positive, batch_count=110090.0, ans=0.025 +2024-01-15 23:04:04,481 INFO [scaling.py:1118] (0/2) WithLoss: name=encoder.encoders.5.encoder.layers.0.self_attn_weights, loss-sum=0.000e+00 +2024-01-15 23:04:21,718 WARNING [optim.py:484] (0/2) Clipping_scale=2.0, grad-norm quartiles 1.371e+02 1.612e+02 1.772e+02 1.915e+02 2.986e+02, threshold=3.544e+02, percent-clipped=0.0 +2024-01-15 23:04:29,547 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.conv_skip_rate, batch_count=110156.66666666667, ans=0.0 +2024-01-15 23:04:56,640 INFO [train.py:994] (0/2) Epoch 40, batch 200, loss[loss=0.1308, simple_loss=0.2116, pruned_loss=0.02503, over 24533.00 frames. ], tot_loss[loss=0.1353, simple_loss=0.216, pruned_loss=0.02736, over 3041631.80 frames. ], batch size: 236, lr: 1.05e-02, grad_scale: 32.0 +2024-01-15 23:05:05,188 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.0.layers.1.feed_forward1.out_proj.dropout_p, batch_count=110256.66666666667, ans=0.1 +2024-01-15 23:05:07,678 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.attention_skip_rate, batch_count=110290.0, ans=0.0 +2024-01-15 23:05:15,393 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_module2.balancer2.min_positive, batch_count=110290.0, ans=0.05 +2024-01-15 23:05:18,989 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.attention_skip_rate, batch_count=110290.0, ans=0.0 +2024-01-15 23:05:25,449 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.conv_module1.balancer2.prob, batch_count=110323.33333333333, ans=0.125 +2024-01-15 23:05:30,050 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_module1.balancer1.prob, batch_count=110323.33333333333, ans=0.125 +2024-01-15 23:05:41,667 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.4.encoder.layers.0.feed_forward1.out_whiten, num_groups=1, num_channels=384, metric=7.84 vs. limit=15.0 +2024-01-15 23:05:52,701 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_module1.balancer2.prob, batch_count=110390.0, ans=0.125 +2024-01-15 23:05:55,046 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.balancer2.prob, batch_count=110390.0, ans=0.125 +2024-01-15 23:05:58,331 INFO [train.py:994] (0/2) Epoch 40, batch 250, loss[loss=0.1182, simple_loss=0.2048, pruned_loss=0.01584, over 23933.00 frames. ], tot_loss[loss=0.1351, simple_loss=0.2159, pruned_loss=0.02711, over 3435869.40 frames. ], batch size: 131, lr: 1.05e-02, grad_scale: 32.0 +2024-01-15 23:06:03,895 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.0.layers.0.ff3_skip_rate, batch_count=110423.33333333333, ans=0.0 +2024-01-15 23:06:03,915 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.feed_forward1.hidden_balancer.prob, batch_count=110423.33333333333, ans=0.125 +2024-01-15 23:06:18,625 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=110456.66666666667, ans=0.1 +2024-01-15 23:06:19,967 INFO [scaling.py:1118] (0/2) WithLoss: name=encoder.encoders.3.encoder.layers.3.self_attn_weights, loss-sum=0.000e+00 +2024-01-15 23:06:26,699 WARNING [optim.py:484] (0/2) Clipping_scale=2.0, grad-norm quartiles 1.275e+02 1.651e+02 1.792e+02 2.080e+02 3.773e+02, threshold=3.583e+02, percent-clipped=1.0 +2024-01-15 23:06:41,170 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.bypass.skip_rate, batch_count=110523.33333333333, ans=0.09899494936611666 +2024-01-15 23:06:58,327 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.0.layers.1.balancer1.prob, batch_count=110556.66666666667, ans=0.125 +2024-01-15 23:07:00,562 INFO [train.py:994] (0/2) Epoch 40, batch 300, loss[loss=0.1418, simple_loss=0.224, pruned_loss=0.0298, over 24540.00 frames. ], tot_loss[loss=0.1351, simple_loss=0.216, pruned_loss=0.0271, over 3739204.12 frames. ], batch size: 236, lr: 1.05e-02, grad_scale: 32.0 +2024-01-15 23:07:13,375 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.5.encoder.layers.1.nonlin_attention.whiten1, num_groups=1, num_channels=192, metric=2.88 vs. limit=10.0 +2024-01-15 23:07:14,344 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.nonlin_attention.balancer.prob, batch_count=110623.33333333333, ans=0.125 +2024-01-15 23:07:20,895 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.feed_forward3.hidden_balancer.prob, batch_count=110623.33333333333, ans=0.125 +2024-01-15 23:07:26,842 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.feed_forward1.out_proj.dropout_p, batch_count=110656.66666666667, ans=0.1 +2024-01-15 23:07:49,032 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.3.encoder.layers.0.conv_module1.whiten, num_groups=1, num_channels=512, metric=2.49 vs. limit=15.0 +2024-01-15 23:08:03,658 INFO [train.py:994] (0/2) Epoch 40, batch 350, loss[loss=0.1405, simple_loss=0.22, pruned_loss=0.03051, over 24452.00 frames. ], tot_loss[loss=0.1347, simple_loss=0.2161, pruned_loss=0.02666, over 3978880.59 frames. ], batch size: 170, lr: 1.05e-02, grad_scale: 32.0 +2024-01-15 23:08:18,801 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.balancer2.prob, batch_count=110790.0, ans=0.125 +2024-01-15 23:08:32,142 WARNING [optim.py:484] (0/2) Clipping_scale=2.0, grad-norm quartiles 1.357e+02 1.654e+02 1.827e+02 2.159e+02 3.996e+02, threshold=3.655e+02, percent-clipped=2.0 +2024-01-15 23:08:53,920 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.self_attn_weights.pos_emb_skip_rate, batch_count=110890.0, ans=0.0 +2024-01-15 23:09:03,221 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.2.balancer2.prob, batch_count=110890.0, ans=0.125 +2024-01-15 23:09:04,449 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.feed_forward1.out_proj.dropout_p, batch_count=110923.33333333333, ans=0.1 +2024-01-15 23:09:05,331 INFO [train.py:994] (0/2) Epoch 40, batch 400, loss[loss=0.1385, simple_loss=0.2245, pruned_loss=0.02623, over 24505.00 frames. ], tot_loss[loss=0.135, simple_loss=0.2163, pruned_loss=0.02684, over 4168075.23 frames. ], batch size: 267, lr: 1.05e-02, grad_scale: 32.0 +2024-01-15 23:09:13,702 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.3.encoder.layers.1.feed_forward1.out_whiten, num_groups=1, num_channels=512, metric=11.84 vs. limit=15.0 +2024-01-15 23:09:21,154 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.1.out_combiner.scale_min, batch_count=110956.66666666667, ans=0.2 +2024-01-15 23:09:21,793 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.0.layers.1.nonlin_attention.whiten1, num_groups=1, num_channels=144, metric=5.98 vs. limit=10.0 +2024-01-15 23:09:51,783 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.0.layers.1.ff3_skip_rate, batch_count=111023.33333333333, ans=0.0 +2024-01-15 23:10:07,368 INFO [train.py:994] (0/2) Epoch 40, batch 450, loss[loss=0.1424, simple_loss=0.228, pruned_loss=0.02845, over 24473.00 frames. ], tot_loss[loss=0.1352, simple_loss=0.2163, pruned_loss=0.0271, over 4305114.05 frames. ], batch size: 222, lr: 1.05e-02, grad_scale: 32.0 +2024-01-15 23:10:20,536 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.3.encoder.layers.1.conv_module2.whiten, num_groups=1, num_channels=512, metric=2.85 vs. limit=15.0 +2024-01-15 23:10:36,319 WARNING [optim.py:484] (0/2) Clipping_scale=2.0, grad-norm quartiles 1.450e+02 1.698e+02 1.858e+02 2.185e+02 3.048e+02, threshold=3.715e+02, percent-clipped=0.0 +2024-01-15 23:10:40,349 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.ff2_skip_rate, batch_count=111156.66666666667, ans=0.0 +2024-01-15 23:10:41,510 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_module2.balancer2.prob, batch_count=111156.66666666667, ans=0.125 +2024-01-15 23:10:47,870 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.1.out_combiner.scale_min, batch_count=111190.0, ans=0.2 +2024-01-15 23:11:02,777 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.bypass.skip_rate, batch_count=111223.33333333333, ans=0.07 +2024-01-15 23:11:09,520 INFO [train.py:994] (0/2) Epoch 40, batch 500, loss[loss=0.1144, simple_loss=0.1861, pruned_loss=0.02129, over 23548.00 frames. ], tot_loss[loss=0.1351, simple_loss=0.2161, pruned_loss=0.02708, over 4421140.47 frames. ], batch size: 119, lr: 1.05e-02, grad_scale: 32.0 +2024-01-15 23:11:09,760 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.conv_module2.balancer1.prob, batch_count=111256.66666666667, ans=0.125 +2024-01-15 23:11:17,363 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_module1.balancer1.prob, batch_count=111256.66666666667, ans=0.125 +2024-01-15 23:11:20,781 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.0.layers.1.bypass.scale_min, batch_count=111290.0, ans=0.2 +2024-01-15 23:11:30,786 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.out_combiner.scale_min, batch_count=111290.0, ans=0.2 +2024-01-15 23:11:40,026 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.nonlin_attention.balancer.prob, batch_count=111323.33333333333, ans=0.125 +2024-01-15 23:11:43,532 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.conv_skip_rate, batch_count=111323.33333333333, ans=0.0 +2024-01-15 23:11:49,973 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.1.encoder.layers.1.self_attn2.whiten, num_groups=1, num_channels=256, metric=16.16 vs. limit=22.5 +2024-01-15 23:11:54,737 INFO [scaling.py:1118] (0/2) WithLoss: name=encoder.encoders.4.encoder.layers.0.self_attn_weights, loss-sum=0.000e+00 +2024-01-15 23:12:11,769 INFO [train.py:994] (0/2) Epoch 40, batch 550, loss[loss=0.1149, simple_loss=0.1958, pruned_loss=0.01702, over 23981.00 frames. ], tot_loss[loss=0.1347, simple_loss=0.2157, pruned_loss=0.02684, over 4512457.36 frames. ], batch size: 131, lr: 1.05e-02, grad_scale: 32.0 +2024-01-15 23:12:13,162 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.1.encoder.layers.1.conv_module1.balancer2.prob, batch_count=111423.33333333333, ans=0.125 +2024-01-15 23:12:25,696 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.self_attn_weights.pos_emb_skip_rate, batch_count=111456.66666666667, ans=0.0 +2024-01-15 23:12:33,595 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.4.encoder.layers.0.conv_module1.balancer1.prob, batch_count=111456.66666666667, ans=0.125 +2024-01-15 23:12:39,615 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.balancer_ff2.min_abs, batch_count=111490.0, ans=0.1 +2024-01-15 23:12:40,492 WARNING [optim.py:484] (0/2) Clipping_scale=2.0, grad-norm quartiles 1.335e+02 1.659e+02 1.800e+02 2.093e+02 3.891e+02, threshold=3.601e+02, percent-clipped=1.0 +2024-01-15 23:12:49,841 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.feed_forward2.hidden_balancer.prob, batch_count=111523.33333333333, ans=0.125 +2024-01-15 23:12:55,176 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.1.conv_module2.balancer2.prob, batch_count=111523.33333333333, ans=0.125 +2024-01-15 23:12:57,673 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.0.layers.1.feed_forward1.hidden_balancer.prob, batch_count=111523.33333333333, ans=0.125 +2024-01-15 23:13:06,031 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.attention_skip_rate, batch_count=111556.66666666667, ans=0.0 +2024-01-15 23:13:14,804 INFO [train.py:994] (0/2) Epoch 40, batch 600, loss[loss=0.1254, simple_loss=0.2092, pruned_loss=0.02083, over 24203.00 frames. ], tot_loss[loss=0.1352, simple_loss=0.2165, pruned_loss=0.02697, over 4584024.95 frames. ], batch size: 140, lr: 1.05e-02, grad_scale: 32.0 +2024-01-15 23:13:21,663 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.self_attn_weights.pos_emb_skip_rate, batch_count=111590.0, ans=0.0 +2024-01-15 23:13:34,448 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.bypass.scale_min, batch_count=111623.33333333333, ans=0.2 +2024-01-15 23:13:39,019 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.4.encoder.layers.1.feed_forward1.out_whiten, num_groups=1, num_channels=384, metric=6.35 vs. limit=15.0 +2024-01-15 23:13:47,957 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.2.encoder.layers.0.self_attn2.whiten, num_groups=1, num_channels=384, metric=28.84 vs. limit=22.5 +2024-01-15 23:14:08,065 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.2.encoder.layers.1.ff3_skip_rate, batch_count=111723.33333333333, ans=0.0 +2024-01-15 23:14:16,601 INFO [train.py:994] (0/2) Epoch 40, batch 650, loss[loss=0.1122, simple_loss=0.1875, pruned_loss=0.01843, over 24103.00 frames. ], tot_loss[loss=0.1352, simple_loss=0.2164, pruned_loss=0.02702, over 4642478.82 frames. ], batch size: 132, lr: 1.05e-02, grad_scale: 32.0 +2024-01-15 23:14:19,247 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=111756.66666666667, ans=0.1 +2024-01-15 23:14:19,352 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.feed_forward1.out_proj.dropout_p, batch_count=111756.66666666667, ans=0.1 +2024-01-15 23:14:28,100 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.0.conv_module2.balancer2.min_positive, batch_count=111790.0, ans=0.05 +2024-01-15 23:14:30,911 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.2.encoder.layers.0.nonlin_attention.whiten2, num_groups=1, num_channels=384, metric=10.16 vs. limit=15.0 +2024-01-15 23:14:35,543 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.3.encoder.layers.0.whiten, num_groups=1, num_channels=512, metric=4.87 vs. limit=12.0 +2024-01-15 23:14:44,874 WARNING [optim.py:484] (0/2) Clipping_scale=2.0, grad-norm quartiles 1.423e+02 1.625e+02 1.766e+02 2.027e+02 4.095e+02, threshold=3.531e+02, percent-clipped=1.0 +2024-01-15 23:14:45,239 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.3.encoder.layers.3.bypass_mid.scale_min, batch_count=111823.33333333333, ans=0.2 +2024-01-15 23:15:18,246 INFO [train.py:994] (0/2) Epoch 40, batch 700, loss[loss=0.1456, simple_loss=0.2219, pruned_loss=0.03461, over 24381.00 frames. ], tot_loss[loss=0.1352, simple_loss=0.2163, pruned_loss=0.02699, over 4687513.21 frames. ], batch size: 153, lr: 1.05e-02, grad_scale: 32.0 +2024-01-15 23:15:57,659 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.0.layers.1.conv_module2.balancer2.prob, batch_count=112023.33333333333, ans=0.125 +2024-01-15 23:16:20,892 INFO [train.py:994] (0/2) Epoch 40, batch 750, loss[loss=0.1207, simple_loss=0.2065, pruned_loss=0.01744, over 24297.00 frames. ], tot_loss[loss=0.1351, simple_loss=0.2162, pruned_loss=0.02703, over 4713184.85 frames. ], batch size: 147, lr: 1.05e-02, grad_scale: 32.0 +2024-01-15 23:16:22,652 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.4.encoder.layers.1.nonlin_attention.whiten1, num_groups=1, num_channels=288, metric=3.60 vs. limit=10.0 +2024-01-15 23:16:28,537 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.4.encoder.layers.1.whiten, num_groups=1, num_channels=384, metric=2.72 vs. limit=12.0 +2024-01-15 23:16:39,686 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.4.encoder.layers.2.balancer1.prob, batch_count=112123.33333333333, ans=0.125 +2024-01-15 23:16:40,888 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.4.encoder.layers.1.conv_module1.balancer1.prob, batch_count=112123.33333333333, ans=0.125 +2024-01-15 23:16:44,889 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.2.encoder.layers.0.conv_module1.balancer1.min_positive, batch_count=112156.66666666667, ans=0.025 +2024-01-15 23:16:49,277 WARNING [optim.py:484] (0/2) Clipping_scale=2.0, grad-norm quartiles 1.344e+02 1.684e+02 1.887e+02 2.105e+02 4.185e+02, threshold=3.775e+02, percent-clipped=1.0 +2024-01-15 23:16:54,322 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.1.encoder.layers.0.balancer2.prob, batch_count=112156.66666666667, ans=0.125 +2024-01-15 23:16:54,420 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.5.encoder.layers.0.conv_module1.balancer1.prob, batch_count=112156.66666666667, ans=0.125 +2024-01-15 23:17:12,193 INFO [scaling.py:213] (0/2) ScheduledFloat: name=encoder.encoders.2.encoder.layers.2.self_attn_weights.pos_emb_skip_rate, batch_count=112223.33333333333, ans=0.0 +2024-01-15 23:17:20,981 INFO [train.py:994] (0/2) Epoch 40, batch 800, loss[loss=0.1462, simple_loss=0.2302, pruned_loss=0.03107, over 24521.00 frames. ], tot_loss[loss=0.1356, simple_loss=0.2167, pruned_loss=0.02722, over 4741863.07 frames. ], batch size: 204, lr: 1.05e-02, grad_scale: 32.0 +2024-01-15 23:17:57,520 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.5.encoder.layers.1.conv_module1.whiten, num_groups=1, num_channels=256, metric=3.26 vs. limit=15.0 +2024-01-15 23:18:01,037 INFO [scaling.py:1022] (0/2) Whitening: name=encoder.encoders.3.encoder.layers.1.nonlin_attention.whiten1, num_groups=1, num_channels=384, metric=4.92 vs. limit=10.0 +2024-01-15 23:18:08,990 INFO [checkpoint.py:75] (0/2) Saving checkpoint to zipformer_bbpe/exp-context-size-2-lr-epochs-10-spec-aug-20-disable-musan/epoch-40.pt +2024-01-15 23:18:13,150 INFO [train.py:1256] (0/2) Done!