pkufool commited on
Commit
9f390a4
1 Parent(s): 8abfeaa

add tal-csasr byte level bpe model

Browse files
Files changed (44) hide show
  1. data/lang_bbpe_500/bbpe.model +3 -0
  2. data/lang_bbpe_500/words.txt +0 -0
  3. exp/cpu_jit.pt +3 -0
  4. exp/export.sh +25 -0
  5. exp/log/log-train-2023-03-27-13-26-44-0 +0 -0
  6. exp/log/log-train-2023-03-27-13-26-44-1 +0 -0
  7. exp/log/log-train-2023-03-27-13-26-44-2 +0 -0
  8. exp/log/log-train-2023-03-27-13-26-44-3 +0 -0
  9. exp/log/log-train-2023-03-27-13-27-32-0 +0 -0
  10. exp/log/log-train-2023-03-27-13-27-32-1 +0 -0
  11. exp/log/log-train-2023-03-27-13-27-32-2 +0 -0
  12. exp/log/log-train-2023-03-27-13-27-32-3 +0 -0
  13. exp/log/log-train-2023-03-27-13-56-27-0 +0 -0
  14. exp/log/log-train-2023-03-27-13-56-27-1 +0 -0
  15. exp/log/log-train-2023-03-27-13-56-27-2 +0 -0
  16. exp/log/log-train-2023-03-27-13-56-27-3 +0 -0
  17. exp/log/log-train-2023-03-27-14-21-46-0 +20 -0
  18. exp/log/log-train-2023-03-27-14-21-46-1 +20 -0
  19. exp/log/log-train-2023-03-27-14-21-46-2 +20 -0
  20. exp/log/log-train-2023-03-27-14-21-46-3 +20 -0
  21. exp/log/log-train-2023-03-27-14-40-06-0 +23 -0
  22. exp/log/log-train-2023-03-27-14-40-06-1 +23 -0
  23. exp/log/log-train-2023-03-27-14-40-06-2 +23 -0
  24. exp/log/log-train-2023-03-27-14-40-06-3 +23 -0
  25. exp/log/log-train-2023-03-27-14-47-20-0 +0 -0
  26. exp/log/log-train-2023-03-27-14-47-20-1 +0 -0
  27. exp/log/log-train-2023-03-27-14-47-20-2 +0 -0
  28. exp/log/log-train-2023-03-27-14-47-20-3 +0 -0
  29. exp/log/log-train-2023-03-30-10-00-09-0 +30 -0
  30. exp/log/log-train-2023-03-30-10-00-09-1 +29 -0
  31. exp/log/log-train-2023-03-30-10-00-09-2 +29 -0
  32. exp/log/log-train-2023-03-30-10-00-09-3 +29 -0
  33. exp/pretrained.pt +3 -0
  34. exp/tensorboard/events.out.tfevents.1679894804.de-74279-k2-train-7-1218101249-5d97868c7c-v8ngc.234117.0 +3 -0
  35. exp/tensorboard/events.out.tfevents.1679894852.de-74279-k2-train-7-1218101249-5d97868c7c-v8ngc.235039.0 +3 -0
  36. exp/tensorboard/events.out.tfevents.1679896587.de-74279-k2-train-9-0208143539-7dcb6bfd79-b6fdq.3200367.0 +3 -0
  37. exp/tensorboard/events.out.tfevents.1679898106.de-74279-k2-train-9-0208143539-7dcb6bfd79-b6fdq.3202112.0 +3 -0
  38. exp/tensorboard/events.out.tfevents.1679899206.de-74279-k2-train-9-0208143539-7dcb6bfd79-b6fdq.3202877.0 +3 -0
  39. exp/tensorboard/events.out.tfevents.1679899640.de-74279-k2-train-9-0208143539-7dcb6bfd79-b6fdq.3203328.0 +3 -0
  40. exp/tensorboard/events.out.tfevents.1680141609.de-74279-k2-train-9-0208143539-7dcb6bfd79-b6fdq.1268684.0 +3 -0
  41. test_waves/210_36476_210_8341_1_1533271973_7057520_132.wav +0 -0
  42. test_waves/210_36476_210_8341_1_1533271973_7057520_138.wav +0 -0
  43. test_waves/210_36476_210_8341_1_1533271973_7057520_145.wav +0 -0
  44. test_waves/210_36476_210_8341_1_1533271973_7057520_148.wav +0 -0
data/lang_bbpe_500/bbpe.model ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:425bde0ab72466999079e5981318e85ab4d2fc51d858aa910c0e42068068534f
3
+ size 245583
data/lang_bbpe_500/words.txt ADDED
The diff for this file is too large to render. See raw diff
 
exp/cpu_jit.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:39a441707d1bc4d6587cdfe75fe7dea80977a6eb5d87d88771b4e1ca4ffc7931
3
+ size 358526334
exp/export.sh ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env bash
2
+
3
+ set -x
4
+
5
+ K2_ROOT=/ceph-hw/kangwei/code/k2_release/k2
6
+ ICEFALL=/ceph-hw/kangwei/code2/icefall_bbpe
7
+
8
+ export PYTHONPATH=$K2_ROOT/k2/python:$PYTHONPATH
9
+ export PYTHONPATH=$K2_ROOT/build/lib:$PYTHONPATH
10
+ export PYTHONPATH=$ICEFALL:$PYTHONPATH
11
+
12
+ export CUDA_VISIBLE_DEVICES=""
13
+
14
+ ./pruned_transducer_stateless7_bbpe/export.py \
15
+ --epoch 35 \
16
+ --avg 26 \
17
+ --bpe-model data/lang_bbpe_500/bbpe.model \
18
+ --exp-dir ./pruned_transducer_stateless7_bbpe/exp
19
+
20
+ ./pruned_transducer_stateless7_bbpe/export.py \
21
+ --epoch 35 \
22
+ --avg 26 \
23
+ --bpe-model data/lang_bbpe_500/bbpe.model \
24
+ --exp-dir ./pruned_transducer_stateless7_bbpe/exp
25
+ --jit 1
exp/log/log-train-2023-03-27-13-26-44-0 ADDED
The diff for this file is too large to render. See raw diff
 
exp/log/log-train-2023-03-27-13-26-44-1 ADDED
The diff for this file is too large to render. See raw diff
 
exp/log/log-train-2023-03-27-13-26-44-2 ADDED
The diff for this file is too large to render. See raw diff
 
exp/log/log-train-2023-03-27-13-26-44-3 ADDED
The diff for this file is too large to render. See raw diff
 
exp/log/log-train-2023-03-27-13-27-32-0 ADDED
The diff for this file is too large to render. See raw diff
 
exp/log/log-train-2023-03-27-13-27-32-1 ADDED
The diff for this file is too large to render. See raw diff
 
exp/log/log-train-2023-03-27-13-27-32-2 ADDED
The diff for this file is too large to render. See raw diff
 
exp/log/log-train-2023-03-27-13-27-32-3 ADDED
The diff for this file is too large to render. See raw diff
 
exp/log/log-train-2023-03-27-13-56-27-0 ADDED
The diff for this file is too large to render. See raw diff
 
exp/log/log-train-2023-03-27-13-56-27-1 ADDED
The diff for this file is too large to render. See raw diff
 
exp/log/log-train-2023-03-27-13-56-27-2 ADDED
The diff for this file is too large to render. See raw diff
 
exp/log/log-train-2023-03-27-13-56-27-3 ADDED
The diff for this file is too large to render. See raw diff
 
exp/log/log-train-2023-03-27-14-21-46-0 ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ 2023-03-27 14:21:46,453 INFO [train.py:962] (0/4) Training started
2
+ 2023-03-27 14:21:46,457 INFO [train.py:972] (0/4) Device: cuda:0
3
+ 2023-03-27 14:21:46,461 INFO [train.py:981] (0/4) {'frame_shift_ms': 10.0, 'allowed_excess_duration_ratio': 0.1, 'best_train_loss': inf, 'best_valid_loss': inf, 'best_train_epoch': -1, 'best_valid_epoch': -1, 'batch_idx_train': 0, 'log_interval': 50, 'reset_interval': 200, 'valid_interval': 3000, 'feature_dim': 80, 'subsampling_factor': 4, 'warm_step': 2000, 'env_info': {'k2-version': '1.23.4', 'k2-build-type': 'Release', 'k2-with-cuda': True, 'k2-git-sha1': '9426c9f730820d291f5dcb06be337662595fa7b4', 'k2-git-date': 'Sun Feb 5 17:35:01 2023', 'lhotse-version': '1.13.0.dev+git.4cbd1bde.clean', 'torch-version': '1.10.0+cu102', 'torch-cuda-available': True, 'torch-cuda-version': '10.2', 'python-version': '3.8', 'icefall-git-branch': 'bbpe', 'icefall-git-sha1': 'e03c10a-dirty', 'icefall-git-date': 'Mon Mar 27 00:05:03 2023', 'icefall-path': '/ceph-kw/kangwei/code/icefall_bbpe', 'k2-path': '/ceph-hw/kangwei/code/k2_release/k2/k2/python/k2/__init__.py', 'lhotse-path': '/ceph-hw/kangwei/dev_tools/anaconda3/envs/rnnt2/lib/python3.8/site-packages/lhotse-1.13.0.dev0+git.4cbd1bde.clean-py3.8.egg/lhotse/__init__.py', 'hostname': 'de-74279-k2-train-9-0208143539-7dcb6bfd79-b6fdq', 'IP address': '10.177.13.150'}, 'world_size': 4, 'master_port': 12535, 'tensorboard': True, 'num_epochs': 50, 'start_epoch': 1, 'start_batch': 0, 'exp_dir': PosixPath('pruned_transducer_stateless7_bbpe/exp'), 'bbpe_model': 'data/lang_bbpe_500/bbpe.model', 'base_lr': 0.05, 'lr_batches': 5000, 'lr_epochs': 3.5, 'context_size': 2, 'prune_range': 5, 'lm_scale': 0.25, 'am_scale': 0.0, 'simple_loss_scale': 0.5, 'seed': 42, 'print_diagnostics': False, 'inf_check': False, 'save_every_n': 2000, 'keep_last_k': 30, 'average_period': 200, 'use_fp16': True, 'num_encoder_layers': '2,4,3,2,4', 'feedforward_dims': '1024,1024,2048,2048,1024', 'nhead': '8,8,8,8,8', 'encoder_dims': '384,384,384,384,384', 'attention_dims': '192,192,192,192,192', 'encoder_unmasked_dims': '256,256,256,256,256', 'zipformer_downsampling_factors': '1,2,4,8,2', 'cnn_module_kernels': '31,31,31,31,31', 'decoder_dim': 512, 'joiner_dim': 512, 'manifest_dir': PosixPath('data/fbank'), 'max_duration': 800, 'bucketing_sampler': True, 'num_buckets': 300, 'concatenate_cuts': False, 'duration_factor': 1.0, 'gap': 1.0, 'on_the_fly_feats': False, 'shuffle': True, 'drop_last': True, 'return_cuts': True, 'num_workers': 2, 'enable_spec_aug': True, 'spec_aug_time_warp_factor': 80, 'enable_musan': True, 'input_strategy': 'PrecomputedFeatures', 'blank_id': 0, 'vocab_size': 500}
4
+ 2023-03-27 14:21:46,461 INFO [train.py:983] (0/4) About to create model
5
+ 2023-03-27 14:21:47,388 INFO [zipformer.py:178] (0/4) At encoder stack 4, which has downsampling_factor=2, we will combine the outputs of layers 1 and 3, with downsampling_factors=2 and 8.
6
+ 2023-03-27 14:21:47,412 INFO [train.py:987] (0/4) Number of model parameters: 70369391
7
+ 2023-03-27 14:21:53,940 INFO [train.py:1002] (0/4) Using DDP
8
+ 2023-03-27 14:21:54,279 INFO [asr_datamodule.py:407] (0/4) About to get train cuts
9
+ 2023-03-27 14:21:54,282 INFO [asr_datamodule.py:224] (0/4) About to get Musan cuts
10
+ 2023-03-27 14:21:57,491 INFO [asr_datamodule.py:229] (0/4) Enable MUSAN
11
+ 2023-03-27 14:21:57,491 INFO [asr_datamodule.py:252] (0/4) Enable SpecAugment
12
+ 2023-03-27 14:21:57,491 INFO [asr_datamodule.py:253] (0/4) Time warp factor: 80
13
+ 2023-03-27 14:21:57,491 INFO [asr_datamodule.py:263] (0/4) Num frame mask: 10
14
+ 2023-03-27 14:21:57,492 INFO [asr_datamodule.py:276] (0/4) About to create train dataset
15
+ 2023-03-27 14:21:57,492 INFO [asr_datamodule.py:303] (0/4) Using DynamicBucketingSampler.
16
+ 2023-03-27 14:22:08,200 INFO [asr_datamodule.py:320] (0/4) About to create train dataloader
17
+ 2023-03-27 14:22:08,201 INFO [asr_datamodule.py:414] (0/4) About to get dev cuts
18
+ 2023-03-27 14:22:08,203 INFO [asr_datamodule.py:351] (0/4) About to create dev dataset
19
+ 2023-03-27 14:22:08,710 INFO [asr_datamodule.py:370] (0/4) About to create dev dataloader
20
+ 2023-03-27 14:22:08,711 INFO [train.py:1203] (0/4) Sanity check -- see if any of the batches in epoch 1 would cause OOM.
exp/log/log-train-2023-03-27-14-21-46-1 ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ 2023-03-27 14:21:46,475 INFO [train.py:962] (1/4) Training started
2
+ 2023-03-27 14:21:46,476 INFO [train.py:972] (1/4) Device: cuda:1
3
+ 2023-03-27 14:21:46,479 INFO [train.py:981] (1/4) {'frame_shift_ms': 10.0, 'allowed_excess_duration_ratio': 0.1, 'best_train_loss': inf, 'best_valid_loss': inf, 'best_train_epoch': -1, 'best_valid_epoch': -1, 'batch_idx_train': 0, 'log_interval': 50, 'reset_interval': 200, 'valid_interval': 3000, 'feature_dim': 80, 'subsampling_factor': 4, 'warm_step': 2000, 'env_info': {'k2-version': '1.23.4', 'k2-build-type': 'Release', 'k2-with-cuda': True, 'k2-git-sha1': '9426c9f730820d291f5dcb06be337662595fa7b4', 'k2-git-date': 'Sun Feb 5 17:35:01 2023', 'lhotse-version': '1.13.0.dev+git.4cbd1bde.clean', 'torch-version': '1.10.0+cu102', 'torch-cuda-available': True, 'torch-cuda-version': '10.2', 'python-version': '3.8', 'icefall-git-branch': 'bbpe', 'icefall-git-sha1': 'e03c10a-dirty', 'icefall-git-date': 'Mon Mar 27 00:05:03 2023', 'icefall-path': '/ceph-kw/kangwei/code/icefall_bbpe', 'k2-path': '/ceph-hw/kangwei/code/k2_release/k2/k2/python/k2/__init__.py', 'lhotse-path': '/ceph-hw/kangwei/dev_tools/anaconda3/envs/rnnt2/lib/python3.8/site-packages/lhotse-1.13.0.dev0+git.4cbd1bde.clean-py3.8.egg/lhotse/__init__.py', 'hostname': 'de-74279-k2-train-9-0208143539-7dcb6bfd79-b6fdq', 'IP address': '10.177.13.150'}, 'world_size': 4, 'master_port': 12535, 'tensorboard': True, 'num_epochs': 50, 'start_epoch': 1, 'start_batch': 0, 'exp_dir': PosixPath('pruned_transducer_stateless7_bbpe/exp'), 'bbpe_model': 'data/lang_bbpe_500/bbpe.model', 'base_lr': 0.05, 'lr_batches': 5000, 'lr_epochs': 3.5, 'context_size': 2, 'prune_range': 5, 'lm_scale': 0.25, 'am_scale': 0.0, 'simple_loss_scale': 0.5, 'seed': 42, 'print_diagnostics': False, 'inf_check': False, 'save_every_n': 2000, 'keep_last_k': 30, 'average_period': 200, 'use_fp16': True, 'num_encoder_layers': '2,4,3,2,4', 'feedforward_dims': '1024,1024,2048,2048,1024', 'nhead': '8,8,8,8,8', 'encoder_dims': '384,384,384,384,384', 'attention_dims': '192,192,192,192,192', 'encoder_unmasked_dims': '256,256,256,256,256', 'zipformer_downsampling_factors': '1,2,4,8,2', 'cnn_module_kernels': '31,31,31,31,31', 'decoder_dim': 512, 'joiner_dim': 512, 'manifest_dir': PosixPath('data/fbank'), 'max_duration': 800, 'bucketing_sampler': True, 'num_buckets': 300, 'concatenate_cuts': False, 'duration_factor': 1.0, 'gap': 1.0, 'on_the_fly_feats': False, 'shuffle': True, 'drop_last': True, 'return_cuts': True, 'num_workers': 2, 'enable_spec_aug': True, 'spec_aug_time_warp_factor': 80, 'enable_musan': True, 'input_strategy': 'PrecomputedFeatures', 'blank_id': 0, 'vocab_size': 500}
4
+ 2023-03-27 14:21:46,479 INFO [train.py:983] (1/4) About to create model
5
+ 2023-03-27 14:21:47,468 INFO [zipformer.py:178] (1/4) At encoder stack 4, which has downsampling_factor=2, we will combine the outputs of layers 1 and 3, with downsampling_factors=2 and 8.
6
+ 2023-03-27 14:21:47,492 INFO [train.py:987] (1/4) Number of model parameters: 70369391
7
+ 2023-03-27 14:21:53,959 INFO [train.py:1002] (1/4) Using DDP
8
+ 2023-03-27 14:21:54,278 INFO [asr_datamodule.py:407] (1/4) About to get train cuts
9
+ 2023-03-27 14:21:54,282 INFO [asr_datamodule.py:224] (1/4) About to get Musan cuts
10
+ 2023-03-27 14:21:57,610 INFO [asr_datamodule.py:229] (1/4) Enable MUSAN
11
+ 2023-03-27 14:21:57,610 INFO [asr_datamodule.py:252] (1/4) Enable SpecAugment
12
+ 2023-03-27 14:21:57,610 INFO [asr_datamodule.py:253] (1/4) Time warp factor: 80
13
+ 2023-03-27 14:21:57,610 INFO [asr_datamodule.py:263] (1/4) Num frame mask: 10
14
+ 2023-03-27 14:21:57,610 INFO [asr_datamodule.py:276] (1/4) About to create train dataset
15
+ 2023-03-27 14:21:57,611 INFO [asr_datamodule.py:303] (1/4) Using DynamicBucketingSampler.
16
+ 2023-03-27 14:22:07,928 INFO [asr_datamodule.py:320] (1/4) About to create train dataloader
17
+ 2023-03-27 14:22:07,929 INFO [asr_datamodule.py:414] (1/4) About to get dev cuts
18
+ 2023-03-27 14:22:07,931 INFO [asr_datamodule.py:351] (1/4) About to create dev dataset
19
+ 2023-03-27 14:22:08,423 INFO [asr_datamodule.py:370] (1/4) About to create dev dataloader
20
+ 2023-03-27 14:22:08,424 INFO [train.py:1203] (1/4) Sanity check -- see if any of the batches in epoch 1 would cause OOM.
exp/log/log-train-2023-03-27-14-21-46-2 ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ 2023-03-27 14:21:46,476 INFO [train.py:962] (2/4) Training started
2
+ 2023-03-27 14:21:46,476 INFO [train.py:972] (2/4) Device: cuda:2
3
+ 2023-03-27 14:21:46,480 INFO [train.py:981] (2/4) {'frame_shift_ms': 10.0, 'allowed_excess_duration_ratio': 0.1, 'best_train_loss': inf, 'best_valid_loss': inf, 'best_train_epoch': -1, 'best_valid_epoch': -1, 'batch_idx_train': 0, 'log_interval': 50, 'reset_interval': 200, 'valid_interval': 3000, 'feature_dim': 80, 'subsampling_factor': 4, 'warm_step': 2000, 'env_info': {'k2-version': '1.23.4', 'k2-build-type': 'Release', 'k2-with-cuda': True, 'k2-git-sha1': '9426c9f730820d291f5dcb06be337662595fa7b4', 'k2-git-date': 'Sun Feb 5 17:35:01 2023', 'lhotse-version': '1.13.0.dev+git.4cbd1bde.clean', 'torch-version': '1.10.0+cu102', 'torch-cuda-available': True, 'torch-cuda-version': '10.2', 'python-version': '3.8', 'icefall-git-branch': 'bbpe', 'icefall-git-sha1': 'e03c10a-dirty', 'icefall-git-date': 'Mon Mar 27 00:05:03 2023', 'icefall-path': '/ceph-kw/kangwei/code/icefall_bbpe', 'k2-path': '/ceph-hw/kangwei/code/k2_release/k2/k2/python/k2/__init__.py', 'lhotse-path': '/ceph-hw/kangwei/dev_tools/anaconda3/envs/rnnt2/lib/python3.8/site-packages/lhotse-1.13.0.dev0+git.4cbd1bde.clean-py3.8.egg/lhotse/__init__.py', 'hostname': 'de-74279-k2-train-9-0208143539-7dcb6bfd79-b6fdq', 'IP address': '10.177.13.150'}, 'world_size': 4, 'master_port': 12535, 'tensorboard': True, 'num_epochs': 50, 'start_epoch': 1, 'start_batch': 0, 'exp_dir': PosixPath('pruned_transducer_stateless7_bbpe/exp'), 'bbpe_model': 'data/lang_bbpe_500/bbpe.model', 'base_lr': 0.05, 'lr_batches': 5000, 'lr_epochs': 3.5, 'context_size': 2, 'prune_range': 5, 'lm_scale': 0.25, 'am_scale': 0.0, 'simple_loss_scale': 0.5, 'seed': 42, 'print_diagnostics': False, 'inf_check': False, 'save_every_n': 2000, 'keep_last_k': 30, 'average_period': 200, 'use_fp16': True, 'num_encoder_layers': '2,4,3,2,4', 'feedforward_dims': '1024,1024,2048,2048,1024', 'nhead': '8,8,8,8,8', 'encoder_dims': '384,384,384,384,384', 'attention_dims': '192,192,192,192,192', 'encoder_unmasked_dims': '256,256,256,256,256', 'zipformer_downsampling_factors': '1,2,4,8,2', 'cnn_module_kernels': '31,31,31,31,31', 'decoder_dim': 512, 'joiner_dim': 512, 'manifest_dir': PosixPath('data/fbank'), 'max_duration': 800, 'bucketing_sampler': True, 'num_buckets': 300, 'concatenate_cuts': False, 'duration_factor': 1.0, 'gap': 1.0, 'on_the_fly_feats': False, 'shuffle': True, 'drop_last': True, 'return_cuts': True, 'num_workers': 2, 'enable_spec_aug': True, 'spec_aug_time_warp_factor': 80, 'enable_musan': True, 'input_strategy': 'PrecomputedFeatures', 'blank_id': 0, 'vocab_size': 500}
4
+ 2023-03-27 14:21:46,480 INFO [train.py:983] (2/4) About to create model
5
+ 2023-03-27 14:21:47,417 INFO [zipformer.py:178] (2/4) At encoder stack 4, which has downsampling_factor=2, we will combine the outputs of layers 1 and 3, with downsampling_factors=2 and 8.
6
+ 2023-03-27 14:21:47,441 INFO [train.py:987] (2/4) Number of model parameters: 70369391
7
+ 2023-03-27 14:21:53,897 INFO [train.py:1002] (2/4) Using DDP
8
+ 2023-03-27 14:21:54,279 INFO [asr_datamodule.py:407] (2/4) About to get train cuts
9
+ 2023-03-27 14:21:54,282 INFO [asr_datamodule.py:224] (2/4) About to get Musan cuts
10
+ 2023-03-27 14:21:57,583 INFO [asr_datamodule.py:229] (2/4) Enable MUSAN
11
+ 2023-03-27 14:21:57,584 INFO [asr_datamodule.py:252] (2/4) Enable SpecAugment
12
+ 2023-03-27 14:21:57,584 INFO [asr_datamodule.py:253] (2/4) Time warp factor: 80
13
+ 2023-03-27 14:21:57,584 INFO [asr_datamodule.py:263] (2/4) Num frame mask: 10
14
+ 2023-03-27 14:21:57,584 INFO [asr_datamodule.py:276] (2/4) About to create train dataset
15
+ 2023-03-27 14:21:57,585 INFO [asr_datamodule.py:303] (2/4) Using DynamicBucketingSampler.
16
+ 2023-03-27 14:22:07,801 INFO [asr_datamodule.py:320] (2/4) About to create train dataloader
17
+ 2023-03-27 14:22:07,802 INFO [asr_datamodule.py:414] (2/4) About to get dev cuts
18
+ 2023-03-27 14:22:07,804 INFO [asr_datamodule.py:351] (2/4) About to create dev dataset
19
+ 2023-03-27 14:22:08,308 INFO [asr_datamodule.py:370] (2/4) About to create dev dataloader
20
+ 2023-03-27 14:22:08,308 INFO [train.py:1203] (2/4) Sanity check -- see if any of the batches in epoch 1 would cause OOM.
exp/log/log-train-2023-03-27-14-21-46-3 ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ 2023-03-27 14:21:46,467 INFO [train.py:962] (3/4) Training started
2
+ 2023-03-27 14:21:46,468 INFO [train.py:972] (3/4) Device: cuda:3
3
+ 2023-03-27 14:21:46,471 INFO [train.py:981] (3/4) {'frame_shift_ms': 10.0, 'allowed_excess_duration_ratio': 0.1, 'best_train_loss': inf, 'best_valid_loss': inf, 'best_train_epoch': -1, 'best_valid_epoch': -1, 'batch_idx_train': 0, 'log_interval': 50, 'reset_interval': 200, 'valid_interval': 3000, 'feature_dim': 80, 'subsampling_factor': 4, 'warm_step': 2000, 'env_info': {'k2-version': '1.23.4', 'k2-build-type': 'Release', 'k2-with-cuda': True, 'k2-git-sha1': '9426c9f730820d291f5dcb06be337662595fa7b4', 'k2-git-date': 'Sun Feb 5 17:35:01 2023', 'lhotse-version': '1.13.0.dev+git.4cbd1bde.clean', 'torch-version': '1.10.0+cu102', 'torch-cuda-available': True, 'torch-cuda-version': '10.2', 'python-version': '3.8', 'icefall-git-branch': 'bbpe', 'icefall-git-sha1': 'e03c10a-dirty', 'icefall-git-date': 'Mon Mar 27 00:05:03 2023', 'icefall-path': '/ceph-kw/kangwei/code/icefall_bbpe', 'k2-path': '/ceph-hw/kangwei/code/k2_release/k2/k2/python/k2/__init__.py', 'lhotse-path': '/ceph-hw/kangwei/dev_tools/anaconda3/envs/rnnt2/lib/python3.8/site-packages/lhotse-1.13.0.dev0+git.4cbd1bde.clean-py3.8.egg/lhotse/__init__.py', 'hostname': 'de-74279-k2-train-9-0208143539-7dcb6bfd79-b6fdq', 'IP address': '10.177.13.150'}, 'world_size': 4, 'master_port': 12535, 'tensorboard': True, 'num_epochs': 50, 'start_epoch': 1, 'start_batch': 0, 'exp_dir': PosixPath('pruned_transducer_stateless7_bbpe/exp'), 'bbpe_model': 'data/lang_bbpe_500/bbpe.model', 'base_lr': 0.05, 'lr_batches': 5000, 'lr_epochs': 3.5, 'context_size': 2, 'prune_range': 5, 'lm_scale': 0.25, 'am_scale': 0.0, 'simple_loss_scale': 0.5, 'seed': 42, 'print_diagnostics': False, 'inf_check': False, 'save_every_n': 2000, 'keep_last_k': 30, 'average_period': 200, 'use_fp16': True, 'num_encoder_layers': '2,4,3,2,4', 'feedforward_dims': '1024,1024,2048,2048,1024', 'nhead': '8,8,8,8,8', 'encoder_dims': '384,384,384,384,384', 'attention_dims': '192,192,192,192,192', 'encoder_unmasked_dims': '256,256,256,256,256', 'zipformer_downsampling_factors': '1,2,4,8,2', 'cnn_module_kernels': '31,31,31,31,31', 'decoder_dim': 512, 'joiner_dim': 512, 'manifest_dir': PosixPath('data/fbank'), 'max_duration': 800, 'bucketing_sampler': True, 'num_buckets': 300, 'concatenate_cuts': False, 'duration_factor': 1.0, 'gap': 1.0, 'on_the_fly_feats': False, 'shuffle': True, 'drop_last': True, 'return_cuts': True, 'num_workers': 2, 'enable_spec_aug': True, 'spec_aug_time_warp_factor': 80, 'enable_musan': True, 'input_strategy': 'PrecomputedFeatures', 'blank_id': 0, 'vocab_size': 500}
4
+ 2023-03-27 14:21:46,472 INFO [train.py:983] (3/4) About to create model
5
+ 2023-03-27 14:21:47,379 INFO [zipformer.py:178] (3/4) At encoder stack 4, which has downsampling_factor=2, we will combine the outputs of layers 1 and 3, with downsampling_factors=2 and 8.
6
+ 2023-03-27 14:21:47,402 INFO [train.py:987] (3/4) Number of model parameters: 70369391
7
+ 2023-03-27 14:21:53,844 INFO [train.py:1002] (3/4) Using DDP
8
+ 2023-03-27 14:21:54,279 INFO [asr_datamodule.py:407] (3/4) About to get train cuts
9
+ 2023-03-27 14:21:54,282 INFO [asr_datamodule.py:224] (3/4) About to get Musan cuts
10
+ 2023-03-27 14:21:57,545 INFO [asr_datamodule.py:229] (3/4) Enable MUSAN
11
+ 2023-03-27 14:21:57,545 INFO [asr_datamodule.py:252] (3/4) Enable SpecAugment
12
+ 2023-03-27 14:21:57,545 INFO [asr_datamodule.py:253] (3/4) Time warp factor: 80
13
+ 2023-03-27 14:21:57,545 INFO [asr_datamodule.py:263] (3/4) Num frame mask: 10
14
+ 2023-03-27 14:21:57,546 INFO [asr_datamodule.py:276] (3/4) About to create train dataset
15
+ 2023-03-27 14:21:57,546 INFO [asr_datamodule.py:303] (3/4) Using DynamicBucketingSampler.
16
+ 2023-03-27 14:22:07,765 INFO [asr_datamodule.py:320] (3/4) About to create train dataloader
17
+ 2023-03-27 14:22:07,766 INFO [asr_datamodule.py:414] (3/4) About to get dev cuts
18
+ 2023-03-27 14:22:07,769 INFO [asr_datamodule.py:351] (3/4) About to create dev dataset
19
+ 2023-03-27 14:22:08,272 INFO [asr_datamodule.py:370] (3/4) About to create dev dataloader
20
+ 2023-03-27 14:22:08,272 INFO [train.py:1203] (3/4) Sanity check -- see if any of the batches in epoch 1 would cause OOM.
exp/log/log-train-2023-03-27-14-40-06-0 ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ 2023-03-27 14:40:06,228 INFO [train.py:962] (0/4) Training started
2
+ 2023-03-27 14:40:06,231 INFO [train.py:972] (0/4) Device: cuda:0
3
+ 2023-03-27 14:40:06,235 INFO [train.py:981] (0/4) {'frame_shift_ms': 10.0, 'allowed_excess_duration_ratio': 0.1, 'best_train_loss': inf, 'best_valid_loss': inf, 'best_train_epoch': -1, 'best_valid_epoch': -1, 'batch_idx_train': 0, 'log_interval': 50, 'reset_interval': 200, 'valid_interval': 3000, 'feature_dim': 80, 'subsampling_factor': 4, 'warm_step': 2000, 'env_info': {'k2-version': '1.23.4', 'k2-build-type': 'Release', 'k2-with-cuda': True, 'k2-git-sha1': '9426c9f730820d291f5dcb06be337662595fa7b4', 'k2-git-date': 'Sun Feb 5 17:35:01 2023', 'lhotse-version': '1.13.0.dev+git.4cbd1bde.clean', 'torch-version': '1.10.0+cu102', 'torch-cuda-available': True, 'torch-cuda-version': '10.2', 'python-version': '3.8', 'icefall-git-branch': 'bbpe', 'icefall-git-sha1': 'e03c10a-dirty', 'icefall-git-date': 'Mon Mar 27 00:05:03 2023', 'icefall-path': '/ceph-kw/kangwei/code/icefall_bbpe', 'k2-path': '/ceph-hw/kangwei/code/k2_release/k2/k2/python/k2/__init__.py', 'lhotse-path': '/ceph-hw/kangwei/dev_tools/anaconda3/envs/rnnt2/lib/python3.8/site-packages/lhotse-1.13.0.dev0+git.4cbd1bde.clean-py3.8.egg/lhotse/__init__.py', 'hostname': 'de-74279-k2-train-9-0208143539-7dcb6bfd79-b6fdq', 'IP address': '10.177.13.150'}, 'world_size': 4, 'master_port': 12535, 'tensorboard': True, 'num_epochs': 50, 'start_epoch': 1, 'start_batch': 0, 'exp_dir': PosixPath('pruned_transducer_stateless7_bbpe/exp'), 'bbpe_model': 'data/lang_bbpe_500/bbpe.model', 'base_lr': 0.05, 'lr_batches': 5000, 'lr_epochs': 3.5, 'context_size': 2, 'prune_range': 5, 'lm_scale': 0.25, 'am_scale': 0.0, 'simple_loss_scale': 0.5, 'seed': 42, 'print_diagnostics': False, 'inf_check': False, 'save_every_n': 2000, 'keep_last_k': 30, 'average_period': 200, 'use_fp16': True, 'num_encoder_layers': '2,4,3,2,4', 'feedforward_dims': '1024,1024,2048,2048,1024', 'nhead': '8,8,8,8,8', 'encoder_dims': '384,384,384,384,384', 'attention_dims': '192,192,192,192,192', 'encoder_unmasked_dims': '256,256,256,256,256', 'zipformer_downsampling_factors': '1,2,4,8,2', 'cnn_module_kernels': '31,31,31,31,31', 'decoder_dim': 512, 'joiner_dim': 512, 'manifest_dir': PosixPath('data/fbank'), 'max_duration': 800, 'bucketing_sampler': True, 'num_buckets': 300, 'concatenate_cuts': False, 'duration_factor': 1.0, 'gap': 1.0, 'on_the_fly_feats': False, 'shuffle': True, 'drop_last': True, 'return_cuts': True, 'num_workers': 2, 'enable_spec_aug': True, 'spec_aug_time_warp_factor': 80, 'enable_musan': True, 'input_strategy': 'PrecomputedFeatures', 'blank_id': 0, 'vocab_size': 500}
4
+ 2023-03-27 14:40:06,235 INFO [train.py:983] (0/4) About to create model
5
+ 2023-03-27 14:40:07,179 INFO [zipformer.py:178] (0/4) At encoder stack 4, which has downsampling_factor=2, we will combine the outputs of layers 1 and 3, with downsampling_factors=2 and 8.
6
+ 2023-03-27 14:40:07,203 INFO [train.py:987] (0/4) Number of model parameters: 70369391
7
+ 2023-03-27 14:40:13,784 INFO [train.py:1002] (0/4) Using DDP
8
+ 2023-03-27 14:40:14,128 INFO [asr_datamodule.py:407] (0/4) About to get train cuts
9
+ 2023-03-27 14:40:14,130 INFO [train.py:1083] (0/4) Filtering short and long utterances.
10
+ 2023-03-27 14:40:14,130 INFO [train.py:1086] (0/4) Tokenizing and encoding texts in train cuts.
11
+ 2023-03-27 14:40:14,130 INFO [asr_datamodule.py:224] (0/4) About to get Musan cuts
12
+ 2023-03-27 14:40:17,371 INFO [asr_datamodule.py:229] (0/4) Enable MUSAN
13
+ 2023-03-27 14:40:17,371 INFO [asr_datamodule.py:252] (0/4) Enable SpecAugment
14
+ 2023-03-27 14:40:17,371 INFO [asr_datamodule.py:253] (0/4) Time warp factor: 80
15
+ 2023-03-27 14:40:17,371 INFO [asr_datamodule.py:263] (0/4) Num frame mask: 10
16
+ 2023-03-27 14:40:17,372 INFO [asr_datamodule.py:276] (0/4) About to create train dataset
17
+ 2023-03-27 14:40:17,372 INFO [asr_datamodule.py:303] (0/4) Using DynamicBucketingSampler.
18
+ 2023-03-27 14:40:28,100 INFO [asr_datamodule.py:320] (0/4) About to create train dataloader
19
+ 2023-03-27 14:40:28,101 INFO [asr_datamodule.py:414] (0/4) About to get dev cuts
20
+ 2023-03-27 14:40:28,103 INFO [train.py:1102] (0/4) Tokenizing and encoding texts in valid cuts.
21
+ 2023-03-27 14:40:28,103 INFO [asr_datamodule.py:351] (0/4) About to create dev dataset
22
+ 2023-03-27 14:40:28,934 INFO [asr_datamodule.py:370] (0/4) About to create dev dataloader
23
+ 2023-03-27 14:40:28,934 INFO [train.py:1209] (0/4) Sanity check -- see if any of the batches in epoch 1 would cause OOM.
exp/log/log-train-2023-03-27-14-40-06-1 ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ 2023-03-27 14:40:06,228 INFO [train.py:962] (1/4) Training started
2
+ 2023-03-27 14:40:06,228 INFO [train.py:972] (1/4) Device: cuda:1
3
+ 2023-03-27 14:40:06,235 INFO [train.py:981] (1/4) {'frame_shift_ms': 10.0, 'allowed_excess_duration_ratio': 0.1, 'best_train_loss': inf, 'best_valid_loss': inf, 'best_train_epoch': -1, 'best_valid_epoch': -1, 'batch_idx_train': 0, 'log_interval': 50, 'reset_interval': 200, 'valid_interval': 3000, 'feature_dim': 80, 'subsampling_factor': 4, 'warm_step': 2000, 'env_info': {'k2-version': '1.23.4', 'k2-build-type': 'Release', 'k2-with-cuda': True, 'k2-git-sha1': '9426c9f730820d291f5dcb06be337662595fa7b4', 'k2-git-date': 'Sun Feb 5 17:35:01 2023', 'lhotse-version': '1.13.0.dev+git.4cbd1bde.clean', 'torch-version': '1.10.0+cu102', 'torch-cuda-available': True, 'torch-cuda-version': '10.2', 'python-version': '3.8', 'icefall-git-branch': 'bbpe', 'icefall-git-sha1': 'e03c10a-dirty', 'icefall-git-date': 'Mon Mar 27 00:05:03 2023', 'icefall-path': '/ceph-kw/kangwei/code/icefall_bbpe', 'k2-path': '/ceph-hw/kangwei/code/k2_release/k2/k2/python/k2/__init__.py', 'lhotse-path': '/ceph-hw/kangwei/dev_tools/anaconda3/envs/rnnt2/lib/python3.8/site-packages/lhotse-1.13.0.dev0+git.4cbd1bde.clean-py3.8.egg/lhotse/__init__.py', 'hostname': 'de-74279-k2-train-9-0208143539-7dcb6bfd79-b6fdq', 'IP address': '10.177.13.150'}, 'world_size': 4, 'master_port': 12535, 'tensorboard': True, 'num_epochs': 50, 'start_epoch': 1, 'start_batch': 0, 'exp_dir': PosixPath('pruned_transducer_stateless7_bbpe/exp'), 'bbpe_model': 'data/lang_bbpe_500/bbpe.model', 'base_lr': 0.05, 'lr_batches': 5000, 'lr_epochs': 3.5, 'context_size': 2, 'prune_range': 5, 'lm_scale': 0.25, 'am_scale': 0.0, 'simple_loss_scale': 0.5, 'seed': 42, 'print_diagnostics': False, 'inf_check': False, 'save_every_n': 2000, 'keep_last_k': 30, 'average_period': 200, 'use_fp16': True, 'num_encoder_layers': '2,4,3,2,4', 'feedforward_dims': '1024,1024,2048,2048,1024', 'nhead': '8,8,8,8,8', 'encoder_dims': '384,384,384,384,384', 'attention_dims': '192,192,192,192,192', 'encoder_unmasked_dims': '256,256,256,256,256', 'zipformer_downsampling_factors': '1,2,4,8,2', 'cnn_module_kernels': '31,31,31,31,31', 'decoder_dim': 512, 'joiner_dim': 512, 'manifest_dir': PosixPath('data/fbank'), 'max_duration': 800, 'bucketing_sampler': True, 'num_buckets': 300, 'concatenate_cuts': False, 'duration_factor': 1.0, 'gap': 1.0, 'on_the_fly_feats': False, 'shuffle': True, 'drop_last': True, 'return_cuts': True, 'num_workers': 2, 'enable_spec_aug': True, 'spec_aug_time_warp_factor': 80, 'enable_musan': True, 'input_strategy': 'PrecomputedFeatures', 'blank_id': 0, 'vocab_size': 500}
4
+ 2023-03-27 14:40:06,236 INFO [train.py:983] (1/4) About to create model
5
+ 2023-03-27 14:40:07,148 INFO [zipformer.py:178] (1/4) At encoder stack 4, which has downsampling_factor=2, we will combine the outputs of layers 1 and 3, with downsampling_factors=2 and 8.
6
+ 2023-03-27 14:40:07,172 INFO [train.py:987] (1/4) Number of model parameters: 70369391
7
+ 2023-03-27 14:40:13,726 INFO [train.py:1002] (1/4) Using DDP
8
+ 2023-03-27 14:40:14,127 INFO [asr_datamodule.py:407] (1/4) About to get train cuts
9
+ 2023-03-27 14:40:14,130 INFO [train.py:1083] (1/4) Filtering short and long utterances.
10
+ 2023-03-27 14:40:14,130 INFO [train.py:1086] (1/4) Tokenizing and encoding texts in train cuts.
11
+ 2023-03-27 14:40:14,130 INFO [asr_datamodule.py:224] (1/4) About to get Musan cuts
12
+ 2023-03-27 14:40:17,429 INFO [asr_datamodule.py:229] (1/4) Enable MUSAN
13
+ 2023-03-27 14:40:17,429 INFO [asr_datamodule.py:252] (1/4) Enable SpecAugment
14
+ 2023-03-27 14:40:17,429 INFO [asr_datamodule.py:253] (1/4) Time warp factor: 80
15
+ 2023-03-27 14:40:17,430 INFO [asr_datamodule.py:263] (1/4) Num frame mask: 10
16
+ 2023-03-27 14:40:17,430 INFO [asr_datamodule.py:276] (1/4) About to create train dataset
17
+ 2023-03-27 14:40:17,430 INFO [asr_datamodule.py:303] (1/4) Using DynamicBucketingSampler.
18
+ 2023-03-27 14:40:27,645 INFO [asr_datamodule.py:320] (1/4) About to create train dataloader
19
+ 2023-03-27 14:40:27,646 INFO [asr_datamodule.py:414] (1/4) About to get dev cuts
20
+ 2023-03-27 14:40:27,654 INFO [train.py:1102] (1/4) Tokenizing and encoding texts in valid cuts.
21
+ 2023-03-27 14:40:27,655 INFO [asr_datamodule.py:351] (1/4) About to create dev dataset
22
+ 2023-03-27 14:40:28,469 INFO [asr_datamodule.py:370] (1/4) About to create dev dataloader
23
+ 2023-03-27 14:40:28,470 INFO [train.py:1209] (1/4) Sanity check -- see if any of the batches in epoch 1 would cause OOM.
exp/log/log-train-2023-03-27-14-40-06-2 ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ 2023-03-27 14:40:06,228 INFO [train.py:962] (2/4) Training started
2
+ 2023-03-27 14:40:06,228 INFO [train.py:972] (2/4) Device: cuda:2
3
+ 2023-03-27 14:40:06,235 INFO [train.py:981] (2/4) {'frame_shift_ms': 10.0, 'allowed_excess_duration_ratio': 0.1, 'best_train_loss': inf, 'best_valid_loss': inf, 'best_train_epoch': -1, 'best_valid_epoch': -1, 'batch_idx_train': 0, 'log_interval': 50, 'reset_interval': 200, 'valid_interval': 3000, 'feature_dim': 80, 'subsampling_factor': 4, 'warm_step': 2000, 'env_info': {'k2-version': '1.23.4', 'k2-build-type': 'Release', 'k2-with-cuda': True, 'k2-git-sha1': '9426c9f730820d291f5dcb06be337662595fa7b4', 'k2-git-date': 'Sun Feb 5 17:35:01 2023', 'lhotse-version': '1.13.0.dev+git.4cbd1bde.clean', 'torch-version': '1.10.0+cu102', 'torch-cuda-available': True, 'torch-cuda-version': '10.2', 'python-version': '3.8', 'icefall-git-branch': 'bbpe', 'icefall-git-sha1': 'e03c10a-dirty', 'icefall-git-date': 'Mon Mar 27 00:05:03 2023', 'icefall-path': '/ceph-kw/kangwei/code/icefall_bbpe', 'k2-path': '/ceph-hw/kangwei/code/k2_release/k2/k2/python/k2/__init__.py', 'lhotse-path': '/ceph-hw/kangwei/dev_tools/anaconda3/envs/rnnt2/lib/python3.8/site-packages/lhotse-1.13.0.dev0+git.4cbd1bde.clean-py3.8.egg/lhotse/__init__.py', 'hostname': 'de-74279-k2-train-9-0208143539-7dcb6bfd79-b6fdq', 'IP address': '10.177.13.150'}, 'world_size': 4, 'master_port': 12535, 'tensorboard': True, 'num_epochs': 50, 'start_epoch': 1, 'start_batch': 0, 'exp_dir': PosixPath('pruned_transducer_stateless7_bbpe/exp'), 'bbpe_model': 'data/lang_bbpe_500/bbpe.model', 'base_lr': 0.05, 'lr_batches': 5000, 'lr_epochs': 3.5, 'context_size': 2, 'prune_range': 5, 'lm_scale': 0.25, 'am_scale': 0.0, 'simple_loss_scale': 0.5, 'seed': 42, 'print_diagnostics': False, 'inf_check': False, 'save_every_n': 2000, 'keep_last_k': 30, 'average_period': 200, 'use_fp16': True, 'num_encoder_layers': '2,4,3,2,4', 'feedforward_dims': '1024,1024,2048,2048,1024', 'nhead': '8,8,8,8,8', 'encoder_dims': '384,384,384,384,384', 'attention_dims': '192,192,192,192,192', 'encoder_unmasked_dims': '256,256,256,256,256', 'zipformer_downsampling_factors': '1,2,4,8,2', 'cnn_module_kernels': '31,31,31,31,31', 'decoder_dim': 512, 'joiner_dim': 512, 'manifest_dir': PosixPath('data/fbank'), 'max_duration': 800, 'bucketing_sampler': True, 'num_buckets': 300, 'concatenate_cuts': False, 'duration_factor': 1.0, 'gap': 1.0, 'on_the_fly_feats': False, 'shuffle': True, 'drop_last': True, 'return_cuts': True, 'num_workers': 2, 'enable_spec_aug': True, 'spec_aug_time_warp_factor': 80, 'enable_musan': True, 'input_strategy': 'PrecomputedFeatures', 'blank_id': 0, 'vocab_size': 500}
4
+ 2023-03-27 14:40:06,235 INFO [train.py:983] (2/4) About to create model
5
+ 2023-03-27 14:40:07,155 INFO [zipformer.py:178] (2/4) At encoder stack 4, which has downsampling_factor=2, we will combine the outputs of layers 1 and 3, with downsampling_factors=2 and 8.
6
+ 2023-03-27 14:40:07,179 INFO [train.py:987] (2/4) Number of model parameters: 70369391
7
+ 2023-03-27 14:40:13,683 INFO [train.py:1002] (2/4) Using DDP
8
+ 2023-03-27 14:40:14,127 INFO [asr_datamodule.py:407] (2/4) About to get train cuts
9
+ 2023-03-27 14:40:14,130 INFO [train.py:1083] (2/4) Filtering short and long utterances.
10
+ 2023-03-27 14:40:14,130 INFO [train.py:1086] (2/4) Tokenizing and encoding texts in train cuts.
11
+ 2023-03-27 14:40:14,130 INFO [asr_datamodule.py:224] (2/4) About to get Musan cuts
12
+ 2023-03-27 14:40:17,426 INFO [asr_datamodule.py:229] (2/4) Enable MUSAN
13
+ 2023-03-27 14:40:17,427 INFO [asr_datamodule.py:252] (2/4) Enable SpecAugment
14
+ 2023-03-27 14:40:17,427 INFO [asr_datamodule.py:253] (2/4) Time warp factor: 80
15
+ 2023-03-27 14:40:17,427 INFO [asr_datamodule.py:263] (2/4) Num frame mask: 10
16
+ 2023-03-27 14:40:17,427 INFO [asr_datamodule.py:276] (2/4) About to create train dataset
17
+ 2023-03-27 14:40:17,428 INFO [asr_datamodule.py:303] (2/4) Using DynamicBucketingSampler.
18
+ 2023-03-27 14:40:27,722 INFO [asr_datamodule.py:320] (2/4) About to create train dataloader
19
+ 2023-03-27 14:40:27,722 INFO [asr_datamodule.py:414] (2/4) About to get dev cuts
20
+ 2023-03-27 14:40:27,724 INFO [train.py:1102] (2/4) Tokenizing and encoding texts in valid cuts.
21
+ 2023-03-27 14:40:27,724 INFO [asr_datamodule.py:351] (2/4) About to create dev dataset
22
+ 2023-03-27 14:40:28,563 INFO [asr_datamodule.py:370] (2/4) About to create dev dataloader
23
+ 2023-03-27 14:40:28,563 INFO [train.py:1209] (2/4) Sanity check -- see if any of the batches in epoch 1 would cause OOM.
exp/log/log-train-2023-03-27-14-40-06-3 ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ 2023-03-27 14:40:06,230 INFO [train.py:962] (3/4) Training started
2
+ 2023-03-27 14:40:06,230 INFO [train.py:972] (3/4) Device: cuda:3
3
+ 2023-03-27 14:40:06,235 INFO [train.py:981] (3/4) {'frame_shift_ms': 10.0, 'allowed_excess_duration_ratio': 0.1, 'best_train_loss': inf, 'best_valid_loss': inf, 'best_train_epoch': -1, 'best_valid_epoch': -1, 'batch_idx_train': 0, 'log_interval': 50, 'reset_interval': 200, 'valid_interval': 3000, 'feature_dim': 80, 'subsampling_factor': 4, 'warm_step': 2000, 'env_info': {'k2-version': '1.23.4', 'k2-build-type': 'Release', 'k2-with-cuda': True, 'k2-git-sha1': '9426c9f730820d291f5dcb06be337662595fa7b4', 'k2-git-date': 'Sun Feb 5 17:35:01 2023', 'lhotse-version': '1.13.0.dev+git.4cbd1bde.clean', 'torch-version': '1.10.0+cu102', 'torch-cuda-available': True, 'torch-cuda-version': '10.2', 'python-version': '3.8', 'icefall-git-branch': 'bbpe', 'icefall-git-sha1': 'e03c10a-dirty', 'icefall-git-date': 'Mon Mar 27 00:05:03 2023', 'icefall-path': '/ceph-kw/kangwei/code/icefall_bbpe', 'k2-path': '/ceph-hw/kangwei/code/k2_release/k2/k2/python/k2/__init__.py', 'lhotse-path': '/ceph-hw/kangwei/dev_tools/anaconda3/envs/rnnt2/lib/python3.8/site-packages/lhotse-1.13.0.dev0+git.4cbd1bde.clean-py3.8.egg/lhotse/__init__.py', 'hostname': 'de-74279-k2-train-9-0208143539-7dcb6bfd79-b6fdq', 'IP address': '10.177.13.150'}, 'world_size': 4, 'master_port': 12535, 'tensorboard': True, 'num_epochs': 50, 'start_epoch': 1, 'start_batch': 0, 'exp_dir': PosixPath('pruned_transducer_stateless7_bbpe/exp'), 'bbpe_model': 'data/lang_bbpe_500/bbpe.model', 'base_lr': 0.05, 'lr_batches': 5000, 'lr_epochs': 3.5, 'context_size': 2, 'prune_range': 5, 'lm_scale': 0.25, 'am_scale': 0.0, 'simple_loss_scale': 0.5, 'seed': 42, 'print_diagnostics': False, 'inf_check': False, 'save_every_n': 2000, 'keep_last_k': 30, 'average_period': 200, 'use_fp16': True, 'num_encoder_layers': '2,4,3,2,4', 'feedforward_dims': '1024,1024,2048,2048,1024', 'nhead': '8,8,8,8,8', 'encoder_dims': '384,384,384,384,384', 'attention_dims': '192,192,192,192,192', 'encoder_unmasked_dims': '256,256,256,256,256', 'zipformer_downsampling_factors': '1,2,4,8,2', 'cnn_module_kernels': '31,31,31,31,31', 'decoder_dim': 512, 'joiner_dim': 512, 'manifest_dir': PosixPath('data/fbank'), 'max_duration': 800, 'bucketing_sampler': True, 'num_buckets': 300, 'concatenate_cuts': False, 'duration_factor': 1.0, 'gap': 1.0, 'on_the_fly_feats': False, 'shuffle': True, 'drop_last': True, 'return_cuts': True, 'num_workers': 2, 'enable_spec_aug': True, 'spec_aug_time_warp_factor': 80, 'enable_musan': True, 'input_strategy': 'PrecomputedFeatures', 'blank_id': 0, 'vocab_size': 500}
4
+ 2023-03-27 14:40:06,235 INFO [train.py:983] (3/4) About to create model
5
+ 2023-03-27 14:40:07,176 INFO [zipformer.py:178] (3/4) At encoder stack 4, which has downsampling_factor=2, we will combine the outputs of layers 1 and 3, with downsampling_factors=2 and 8.
6
+ 2023-03-27 14:40:07,199 INFO [train.py:987] (3/4) Number of model parameters: 70369391
7
+ 2023-03-27 14:40:13,754 INFO [train.py:1002] (3/4) Using DDP
8
+ 2023-03-27 14:40:14,127 INFO [asr_datamodule.py:407] (3/4) About to get train cuts
9
+ 2023-03-27 14:40:14,130 INFO [train.py:1083] (3/4) Filtering short and long utterances.
10
+ 2023-03-27 14:40:14,130 INFO [train.py:1086] (3/4) Tokenizing and encoding texts in train cuts.
11
+ 2023-03-27 14:40:14,130 INFO [asr_datamodule.py:224] (3/4) About to get Musan cuts
12
+ 2023-03-27 14:40:17,446 INFO [asr_datamodule.py:229] (3/4) Enable MUSAN
13
+ 2023-03-27 14:40:17,446 INFO [asr_datamodule.py:252] (3/4) Enable SpecAugment
14
+ 2023-03-27 14:40:17,446 INFO [asr_datamodule.py:253] (3/4) Time warp factor: 80
15
+ 2023-03-27 14:40:17,447 INFO [asr_datamodule.py:263] (3/4) Num frame mask: 10
16
+ 2023-03-27 14:40:17,447 INFO [asr_datamodule.py:276] (3/4) About to create train dataset
17
+ 2023-03-27 14:40:17,447 INFO [asr_datamodule.py:303] (3/4) Using DynamicBucketingSampler.
18
+ 2023-03-27 14:40:27,714 INFO [asr_datamodule.py:320] (3/4) About to create train dataloader
19
+ 2023-03-27 14:40:27,716 INFO [asr_datamodule.py:414] (3/4) About to get dev cuts
20
+ 2023-03-27 14:40:27,719 INFO [train.py:1102] (3/4) Tokenizing and encoding texts in valid cuts.
21
+ 2023-03-27 14:40:27,719 INFO [asr_datamodule.py:351] (3/4) About to create dev dataset
22
+ 2023-03-27 14:40:28,551 INFO [asr_datamodule.py:370] (3/4) About to create dev dataloader
23
+ 2023-03-27 14:40:28,552 INFO [train.py:1209] (3/4) Sanity check -- see if any of the batches in epoch 1 would cause OOM.
exp/log/log-train-2023-03-27-14-47-20-0 ADDED
The diff for this file is too large to render. See raw diff
 
exp/log/log-train-2023-03-27-14-47-20-1 ADDED
The diff for this file is too large to render. See raw diff
 
exp/log/log-train-2023-03-27-14-47-20-2 ADDED
The diff for this file is too large to render. See raw diff
 
exp/log/log-train-2023-03-27-14-47-20-3 ADDED
The diff for this file is too large to render. See raw diff
 
exp/log/log-train-2023-03-30-10-00-09-0 ADDED
@@ -0,0 +1,30 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ 2023-03-30 10:00:09,160 INFO [train.py:962] (0/4) Training started
2
+ 2023-03-30 10:00:09,163 INFO [train.py:972] (0/4) Device: cuda:0
3
+ 2023-03-30 10:00:09,171 INFO [train.py:981] (0/4) {'frame_shift_ms': 10.0, 'allowed_excess_duration_ratio': 0.1, 'best_train_loss': inf, 'best_valid_loss': inf, 'best_train_epoch': -1, 'best_valid_epoch': -1, 'batch_idx_train': 0, 'log_interval': 50, 'reset_interval': 200, 'valid_interval': 3000, 'feature_dim': 80, 'subsampling_factor': 4, 'warm_step': 2000, 'env_info': {'k2-version': '1.23.4', 'k2-build-type': 'Release', 'k2-with-cuda': True, 'k2-git-sha1': '9426c9f730820d291f5dcb06be337662595fa7b4', 'k2-git-date': 'Sun Feb 5 17:35:01 2023', 'lhotse-version': '1.13.0.dev+git.4cbd1bde.clean', 'torch-version': '1.10.0+cu102', 'torch-cuda-available': True, 'torch-cuda-version': '10.2', 'python-version': '3.8', 'icefall-git-branch': 'bbpe', 'icefall-git-sha1': 'a7e0d24-dirty', 'icefall-git-date': 'Tue Mar 28 18:53:54 2023', 'icefall-path': '/ceph-kw/kangwei/code/icefall_bbpe', 'k2-path': '/ceph-hw/kangwei/code/k2_release/k2/k2/python/k2/__init__.py', 'lhotse-path': '/ceph-hw/kangwei/dev_tools/anaconda3/envs/rnnt2/lib/python3.8/site-packages/lhotse-1.13.0.dev0+git.4cbd1bde.clean-py3.8.egg/lhotse/__init__.py', 'hostname': 'de-74279-k2-train-9-0208143539-7dcb6bfd79-b6fdq', 'IP address': '10.177.13.150'}, 'world_size': 4, 'master_port': 12535, 'tensorboard': True, 'num_epochs': 50, 'start_epoch': 48, 'start_batch': 0, 'exp_dir': PosixPath('pruned_transducer_stateless7_bbpe/exp'), 'bbpe_model': 'data/lang_bbpe_500/bbpe.model', 'base_lr': 0.05, 'lr_batches': 5000, 'lr_epochs': 3.5, 'context_size': 2, 'prune_range': 5, 'lm_scale': 0.25, 'am_scale': 0.0, 'simple_loss_scale': 0.5, 'seed': 42, 'print_diagnostics': False, 'inf_check': False, 'save_every_n': 2000, 'keep_last_k': 30, 'average_period': 200, 'use_fp16': True, 'num_encoder_layers': '2,4,3,2,4', 'feedforward_dims': '1024,1024,2048,2048,1024', 'nhead': '8,8,8,8,8', 'encoder_dims': '384,384,384,384,384', 'attention_dims': '192,192,192,192,192', 'encoder_unmasked_dims': '256,256,256,256,256', 'zipformer_downsampling_factors': '1,2,4,8,2', 'cnn_module_kernels': '31,31,31,31,31', 'decoder_dim': 512, 'joiner_dim': 512, 'manifest_dir': PosixPath('data/fbank'), 'max_duration': 800, 'bucketing_sampler': True, 'num_buckets': 300, 'concatenate_cuts': False, 'duration_factor': 1.0, 'gap': 1.0, 'on_the_fly_feats': False, 'shuffle': True, 'drop_last': True, 'return_cuts': True, 'num_workers': 2, 'enable_spec_aug': True, 'spec_aug_time_warp_factor': 80, 'enable_musan': True, 'input_strategy': 'PrecomputedFeatures', 'blank_id': 0, 'vocab_size': 500}
4
+ 2023-03-30 10:00:09,171 INFO [train.py:983] (0/4) About to create model
5
+ 2023-03-30 10:00:10,388 INFO [zipformer.py:178] (0/4) At encoder stack 4, which has downsampling_factor=2, we will combine the outputs of layers 1 and 3, with downsampling_factors=2 and 8.
6
+ 2023-03-30 10:00:10,412 INFO [train.py:987] (0/4) Number of model parameters: 70369391
7
+ 2023-03-30 10:00:11,412 INFO [checkpoint.py:112] (0/4) Loading checkpoint from pruned_transducer_stateless7_bbpe/exp/epoch-47.pt
8
+ 2023-03-30 10:00:18,352 INFO [checkpoint.py:131] (0/4) Loading averaged model
9
+ 2023-03-30 10:00:28,182 INFO [train.py:1002] (0/4) Using DDP
10
+ 2023-03-30 10:00:29,369 INFO [train.py:1019] (0/4) Loading optimizer state dict
11
+ 2023-03-30 10:00:30,470 INFO [train.py:1027] (0/4) Loading scheduler state dict
12
+ 2023-03-30 10:00:30,470 INFO [asr_datamodule.py:407] (0/4) About to get train cuts
13
+ 2023-03-30 10:00:30,475 INFO [train.py:1083] (0/4) Filtering short and long utterances.
14
+ 2023-03-30 10:00:30,475 INFO [train.py:1086] (0/4) Tokenizing and encoding texts in train cuts.
15
+ 2023-03-30 10:00:30,475 INFO [asr_datamodule.py:224] (0/4) About to get Musan cuts
16
+ 2023-03-30 10:00:34,273 INFO [asr_datamodule.py:229] (0/4) Enable MUSAN
17
+ 2023-03-30 10:00:34,274 INFO [asr_datamodule.py:252] (0/4) Enable SpecAugment
18
+ 2023-03-30 10:00:34,274 INFO [asr_datamodule.py:253] (0/4) Time warp factor: 80
19
+ 2023-03-30 10:00:34,274 INFO [asr_datamodule.py:263] (0/4) Num frame mask: 10
20
+ 2023-03-30 10:00:34,274 INFO [asr_datamodule.py:276] (0/4) About to create train dataset
21
+ 2023-03-30 10:00:34,275 INFO [asr_datamodule.py:303] (0/4) Using DynamicBucketingSampler.
22
+ 2023-03-30 10:00:46,518 INFO [asr_datamodule.py:320] (0/4) About to create train dataloader
23
+ 2023-03-30 10:00:46,519 INFO [asr_datamodule.py:414] (0/4) About to get dev cuts
24
+ 2023-03-30 10:00:46,523 INFO [train.py:1102] (0/4) Tokenizing and encoding texts in valid cuts.
25
+ 2023-03-30 10:00:46,523 INFO [asr_datamodule.py:351] (0/4) About to create dev dataset
26
+ 2023-03-30 10:00:47,475 INFO [asr_datamodule.py:370] (0/4) About to create dev dataloader
27
+ 2023-03-30 10:00:47,476 INFO [train.py:1119] (0/4) Loading grad scaler state dict
28
+ 2023-03-30 10:01:31,680 INFO [train.py:1188] (0/4) Saving batch to pruned_transducer_stateless7_bbpe/exp/batch-b406dd29-9b57-6d64-c490-5c0914c25b99.pt
29
+ 2023-03-30 10:01:31,809 INFO [train.py:1194] (0/4) features shape: torch.Size([85, 939, 80])
30
+ 2023-03-30 10:01:31,815 INFO [train.py:1198] (0/4) num tokens: 3836
exp/log/log-train-2023-03-30-10-00-09-1 ADDED
@@ -0,0 +1,29 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ 2023-03-30 10:00:09,177 INFO [train.py:962] (1/4) Training started
2
+ 2023-03-30 10:00:09,177 INFO [train.py:972] (1/4) Device: cuda:1
3
+ 2023-03-30 10:00:09,181 INFO [train.py:981] (1/4) {'frame_shift_ms': 10.0, 'allowed_excess_duration_ratio': 0.1, 'best_train_loss': inf, 'best_valid_loss': inf, 'best_train_epoch': -1, 'best_valid_epoch': -1, 'batch_idx_train': 0, 'log_interval': 50, 'reset_interval': 200, 'valid_interval': 3000, 'feature_dim': 80, 'subsampling_factor': 4, 'warm_step': 2000, 'env_info': {'k2-version': '1.23.4', 'k2-build-type': 'Release', 'k2-with-cuda': True, 'k2-git-sha1': '9426c9f730820d291f5dcb06be337662595fa7b4', 'k2-git-date': 'Sun Feb 5 17:35:01 2023', 'lhotse-version': '1.13.0.dev+git.4cbd1bde.clean', 'torch-version': '1.10.0+cu102', 'torch-cuda-available': True, 'torch-cuda-version': '10.2', 'python-version': '3.8', 'icefall-git-branch': 'bbpe', 'icefall-git-sha1': 'a7e0d24-dirty', 'icefall-git-date': 'Tue Mar 28 18:53:54 2023', 'icefall-path': '/ceph-kw/kangwei/code/icefall_bbpe', 'k2-path': '/ceph-hw/kangwei/code/k2_release/k2/k2/python/k2/__init__.py', 'lhotse-path': '/ceph-hw/kangwei/dev_tools/anaconda3/envs/rnnt2/lib/python3.8/site-packages/lhotse-1.13.0.dev0+git.4cbd1bde.clean-py3.8.egg/lhotse/__init__.py', 'hostname': 'de-74279-k2-train-9-0208143539-7dcb6bfd79-b6fdq', 'IP address': '10.177.13.150'}, 'world_size': 4, 'master_port': 12535, 'tensorboard': True, 'num_epochs': 50, 'start_epoch': 48, 'start_batch': 0, 'exp_dir': PosixPath('pruned_transducer_stateless7_bbpe/exp'), 'bbpe_model': 'data/lang_bbpe_500/bbpe.model', 'base_lr': 0.05, 'lr_batches': 5000, 'lr_epochs': 3.5, 'context_size': 2, 'prune_range': 5, 'lm_scale': 0.25, 'am_scale': 0.0, 'simple_loss_scale': 0.5, 'seed': 42, 'print_diagnostics': False, 'inf_check': False, 'save_every_n': 2000, 'keep_last_k': 30, 'average_period': 200, 'use_fp16': True, 'num_encoder_layers': '2,4,3,2,4', 'feedforward_dims': '1024,1024,2048,2048,1024', 'nhead': '8,8,8,8,8', 'encoder_dims': '384,384,384,384,384', 'attention_dims': '192,192,192,192,192', 'encoder_unmasked_dims': '256,256,256,256,256', 'zipformer_downsampling_factors': '1,2,4,8,2', 'cnn_module_kernels': '31,31,31,31,31', 'decoder_dim': 512, 'joiner_dim': 512, 'manifest_dir': PosixPath('data/fbank'), 'max_duration': 800, 'bucketing_sampler': True, 'num_buckets': 300, 'concatenate_cuts': False, 'duration_factor': 1.0, 'gap': 1.0, 'on_the_fly_feats': False, 'shuffle': True, 'drop_last': True, 'return_cuts': True, 'num_workers': 2, 'enable_spec_aug': True, 'spec_aug_time_warp_factor': 80, 'enable_musan': True, 'input_strategy': 'PrecomputedFeatures', 'blank_id': 0, 'vocab_size': 500}
4
+ 2023-03-30 10:00:09,181 INFO [train.py:983] (1/4) About to create model
5
+ 2023-03-30 10:00:10,219 INFO [zipformer.py:178] (1/4) At encoder stack 4, which has downsampling_factor=2, we will combine the outputs of layers 1 and 3, with downsampling_factors=2 and 8.
6
+ 2023-03-30 10:00:10,245 INFO [train.py:987] (1/4) Number of model parameters: 70369391
7
+ 2023-03-30 10:00:10,245 INFO [checkpoint.py:112] (1/4) Loading checkpoint from pruned_transducer_stateless7_bbpe/exp/epoch-47.pt
8
+ 2023-03-30 10:00:28,814 INFO [train.py:1002] (1/4) Using DDP
9
+ 2023-03-30 10:00:29,385 INFO [train.py:1019] (1/4) Loading optimizer state dict
10
+ 2023-03-30 10:00:30,575 INFO [train.py:1027] (1/4) Loading scheduler state dict
11
+ 2023-03-30 10:00:30,575 INFO [asr_datamodule.py:407] (1/4) About to get train cuts
12
+ 2023-03-30 10:00:30,579 INFO [train.py:1083] (1/4) Filtering short and long utterances.
13
+ 2023-03-30 10:00:30,579 INFO [train.py:1086] (1/4) Tokenizing and encoding texts in train cuts.
14
+ 2023-03-30 10:00:30,579 INFO [asr_datamodule.py:224] (1/4) About to get Musan cuts
15
+ 2023-03-30 10:00:34,318 INFO [asr_datamodule.py:229] (1/4) Enable MUSAN
16
+ 2023-03-30 10:00:34,318 INFO [asr_datamodule.py:252] (1/4) Enable SpecAugment
17
+ 2023-03-30 10:00:34,318 INFO [asr_datamodule.py:253] (1/4) Time warp factor: 80
18
+ 2023-03-30 10:00:34,318 INFO [asr_datamodule.py:263] (1/4) Num frame mask: 10
19
+ 2023-03-30 10:00:34,319 INFO [asr_datamodule.py:276] (1/4) About to create train dataset
20
+ 2023-03-30 10:00:34,319 INFO [asr_datamodule.py:303] (1/4) Using DynamicBucketingSampler.
21
+ 2023-03-30 10:00:46,491 INFO [asr_datamodule.py:320] (1/4) About to create train dataloader
22
+ 2023-03-30 10:00:46,492 INFO [asr_datamodule.py:414] (1/4) About to get dev cuts
23
+ 2023-03-30 10:00:46,494 INFO [train.py:1102] (1/4) Tokenizing and encoding texts in valid cuts.
24
+ 2023-03-30 10:00:46,494 INFO [asr_datamodule.py:351] (1/4) About to create dev dataset
25
+ 2023-03-30 10:00:47,369 INFO [asr_datamodule.py:370] (1/4) About to create dev dataloader
26
+ 2023-03-30 10:00:47,370 INFO [train.py:1119] (1/4) Loading grad scaler state dict
27
+ 2023-03-30 10:01:32,484 INFO [train.py:1188] (1/4) Saving batch to pruned_transducer_stateless7_bbpe/exp/batch-b406dd29-9b57-6d64-c490-5c0914c25b99.pt
28
+ 2023-03-30 10:01:33,057 INFO [train.py:1194] (1/4) features shape: torch.Size([259, 308, 80])
29
+ 2023-03-30 10:01:33,065 INFO [train.py:1198] (1/4) num tokens: 4037
exp/log/log-train-2023-03-30-10-00-09-2 ADDED
@@ -0,0 +1,29 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ 2023-03-30 10:00:09,174 INFO [train.py:962] (2/4) Training started
2
+ 2023-03-30 10:00:09,174 INFO [train.py:972] (2/4) Device: cuda:2
3
+ 2023-03-30 10:00:09,177 INFO [train.py:981] (2/4) {'frame_shift_ms': 10.0, 'allowed_excess_duration_ratio': 0.1, 'best_train_loss': inf, 'best_valid_loss': inf, 'best_train_epoch': -1, 'best_valid_epoch': -1, 'batch_idx_train': 0, 'log_interval': 50, 'reset_interval': 200, 'valid_interval': 3000, 'feature_dim': 80, 'subsampling_factor': 4, 'warm_step': 2000, 'env_info': {'k2-version': '1.23.4', 'k2-build-type': 'Release', 'k2-with-cuda': True, 'k2-git-sha1': '9426c9f730820d291f5dcb06be337662595fa7b4', 'k2-git-date': 'Sun Feb 5 17:35:01 2023', 'lhotse-version': '1.13.0.dev+git.4cbd1bde.clean', 'torch-version': '1.10.0+cu102', 'torch-cuda-available': True, 'torch-cuda-version': '10.2', 'python-version': '3.8', 'icefall-git-branch': 'bbpe', 'icefall-git-sha1': 'a7e0d24-dirty', 'icefall-git-date': 'Tue Mar 28 18:53:54 2023', 'icefall-path': '/ceph-kw/kangwei/code/icefall_bbpe', 'k2-path': '/ceph-hw/kangwei/code/k2_release/k2/k2/python/k2/__init__.py', 'lhotse-path': '/ceph-hw/kangwei/dev_tools/anaconda3/envs/rnnt2/lib/python3.8/site-packages/lhotse-1.13.0.dev0+git.4cbd1bde.clean-py3.8.egg/lhotse/__init__.py', 'hostname': 'de-74279-k2-train-9-0208143539-7dcb6bfd79-b6fdq', 'IP address': '10.177.13.150'}, 'world_size': 4, 'master_port': 12535, 'tensorboard': True, 'num_epochs': 50, 'start_epoch': 48, 'start_batch': 0, 'exp_dir': PosixPath('pruned_transducer_stateless7_bbpe/exp'), 'bbpe_model': 'data/lang_bbpe_500/bbpe.model', 'base_lr': 0.05, 'lr_batches': 5000, 'lr_epochs': 3.5, 'context_size': 2, 'prune_range': 5, 'lm_scale': 0.25, 'am_scale': 0.0, 'simple_loss_scale': 0.5, 'seed': 42, 'print_diagnostics': False, 'inf_check': False, 'save_every_n': 2000, 'keep_last_k': 30, 'average_period': 200, 'use_fp16': True, 'num_encoder_layers': '2,4,3,2,4', 'feedforward_dims': '1024,1024,2048,2048,1024', 'nhead': '8,8,8,8,8', 'encoder_dims': '384,384,384,384,384', 'attention_dims': '192,192,192,192,192', 'encoder_unmasked_dims': '256,256,256,256,256', 'zipformer_downsampling_factors': '1,2,4,8,2', 'cnn_module_kernels': '31,31,31,31,31', 'decoder_dim': 512, 'joiner_dim': 512, 'manifest_dir': PosixPath('data/fbank'), 'max_duration': 800, 'bucketing_sampler': True, 'num_buckets': 300, 'concatenate_cuts': False, 'duration_factor': 1.0, 'gap': 1.0, 'on_the_fly_feats': False, 'shuffle': True, 'drop_last': True, 'return_cuts': True, 'num_workers': 2, 'enable_spec_aug': True, 'spec_aug_time_warp_factor': 80, 'enable_musan': True, 'input_strategy': 'PrecomputedFeatures', 'blank_id': 0, 'vocab_size': 500}
4
+ 2023-03-30 10:00:09,178 INFO [train.py:983] (2/4) About to create model
5
+ 2023-03-30 10:00:10,291 INFO [zipformer.py:178] (2/4) At encoder stack 4, which has downsampling_factor=2, we will combine the outputs of layers 1 and 3, with downsampling_factors=2 and 8.
6
+ 2023-03-30 10:00:10,315 INFO [train.py:987] (2/4) Number of model parameters: 70369391
7
+ 2023-03-30 10:00:10,316 INFO [checkpoint.py:112] (2/4) Loading checkpoint from pruned_transducer_stateless7_bbpe/exp/epoch-47.pt
8
+ 2023-03-30 10:00:28,798 INFO [train.py:1002] (2/4) Using DDP
9
+ 2023-03-30 10:00:29,385 INFO [train.py:1019] (2/4) Loading optimizer state dict
10
+ 2023-03-30 10:00:30,659 INFO [train.py:1027] (2/4) Loading scheduler state dict
11
+ 2023-03-30 10:00:30,659 INFO [asr_datamodule.py:407] (2/4) About to get train cuts
12
+ 2023-03-30 10:00:30,663 INFO [train.py:1083] (2/4) Filtering short and long utterances.
13
+ 2023-03-30 10:00:30,663 INFO [train.py:1086] (2/4) Tokenizing and encoding texts in train cuts.
14
+ 2023-03-30 10:00:30,664 INFO [asr_datamodule.py:224] (2/4) About to get Musan cuts
15
+ 2023-03-30 10:00:34,402 INFO [asr_datamodule.py:229] (2/4) Enable MUSAN
16
+ 2023-03-30 10:00:34,402 INFO [asr_datamodule.py:252] (2/4) Enable SpecAugment
17
+ 2023-03-30 10:00:34,402 INFO [asr_datamodule.py:253] (2/4) Time warp factor: 80
18
+ 2023-03-30 10:00:34,403 INFO [asr_datamodule.py:263] (2/4) Num frame mask: 10
19
+ 2023-03-30 10:00:34,403 INFO [asr_datamodule.py:276] (2/4) About to create train dataset
20
+ 2023-03-30 10:00:34,403 INFO [asr_datamodule.py:303] (2/4) Using DynamicBucketingSampler.
21
+ 2023-03-30 10:00:46,777 INFO [asr_datamodule.py:320] (2/4) About to create train dataloader
22
+ 2023-03-30 10:00:46,778 INFO [asr_datamodule.py:414] (2/4) About to get dev cuts
23
+ 2023-03-30 10:00:46,781 INFO [train.py:1102] (2/4) Tokenizing and encoding texts in valid cuts.
24
+ 2023-03-30 10:00:46,781 INFO [asr_datamodule.py:351] (2/4) About to create dev dataset
25
+ 2023-03-30 10:00:47,703 INFO [asr_datamodule.py:370] (2/4) About to create dev dataloader
26
+ 2023-03-30 10:00:47,703 INFO [train.py:1119] (2/4) Loading grad scaler state dict
27
+ 2023-03-30 10:01:32,650 INFO [train.py:1188] (2/4) Saving batch to pruned_transducer_stateless7_bbpe/exp/batch-b406dd29-9b57-6d64-c490-5c0914c25b99.pt
28
+ 2023-03-30 10:01:33,056 INFO [train.py:1194] (2/4) features shape: torch.Size([62, 1289, 80])
29
+ 2023-03-30 10:01:33,063 INFO [train.py:1198] (2/4) num tokens: 4120
exp/log/log-train-2023-03-30-10-00-09-3 ADDED
@@ -0,0 +1,29 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ 2023-03-30 10:00:09,172 INFO [train.py:962] (3/4) Training started
2
+ 2023-03-30 10:00:09,172 INFO [train.py:972] (3/4) Device: cuda:3
3
+ 2023-03-30 10:00:09,176 INFO [train.py:981] (3/4) {'frame_shift_ms': 10.0, 'allowed_excess_duration_ratio': 0.1, 'best_train_loss': inf, 'best_valid_loss': inf, 'best_train_epoch': -1, 'best_valid_epoch': -1, 'batch_idx_train': 0, 'log_interval': 50, 'reset_interval': 200, 'valid_interval': 3000, 'feature_dim': 80, 'subsampling_factor': 4, 'warm_step': 2000, 'env_info': {'k2-version': '1.23.4', 'k2-build-type': 'Release', 'k2-with-cuda': True, 'k2-git-sha1': '9426c9f730820d291f5dcb06be337662595fa7b4', 'k2-git-date': 'Sun Feb 5 17:35:01 2023', 'lhotse-version': '1.13.0.dev+git.4cbd1bde.clean', 'torch-version': '1.10.0+cu102', 'torch-cuda-available': True, 'torch-cuda-version': '10.2', 'python-version': '3.8', 'icefall-git-branch': 'bbpe', 'icefall-git-sha1': 'a7e0d24-dirty', 'icefall-git-date': 'Tue Mar 28 18:53:54 2023', 'icefall-path': '/ceph-kw/kangwei/code/icefall_bbpe', 'k2-path': '/ceph-hw/kangwei/code/k2_release/k2/k2/python/k2/__init__.py', 'lhotse-path': '/ceph-hw/kangwei/dev_tools/anaconda3/envs/rnnt2/lib/python3.8/site-packages/lhotse-1.13.0.dev0+git.4cbd1bde.clean-py3.8.egg/lhotse/__init__.py', 'hostname': 'de-74279-k2-train-9-0208143539-7dcb6bfd79-b6fdq', 'IP address': '10.177.13.150'}, 'world_size': 4, 'master_port': 12535, 'tensorboard': True, 'num_epochs': 50, 'start_epoch': 48, 'start_batch': 0, 'exp_dir': PosixPath('pruned_transducer_stateless7_bbpe/exp'), 'bbpe_model': 'data/lang_bbpe_500/bbpe.model', 'base_lr': 0.05, 'lr_batches': 5000, 'lr_epochs': 3.5, 'context_size': 2, 'prune_range': 5, 'lm_scale': 0.25, 'am_scale': 0.0, 'simple_loss_scale': 0.5, 'seed': 42, 'print_diagnostics': False, 'inf_check': False, 'save_every_n': 2000, 'keep_last_k': 30, 'average_period': 200, 'use_fp16': True, 'num_encoder_layers': '2,4,3,2,4', 'feedforward_dims': '1024,1024,2048,2048,1024', 'nhead': '8,8,8,8,8', 'encoder_dims': '384,384,384,384,384', 'attention_dims': '192,192,192,192,192', 'encoder_unmasked_dims': '256,256,256,256,256', 'zipformer_downsampling_factors': '1,2,4,8,2', 'cnn_module_kernels': '31,31,31,31,31', 'decoder_dim': 512, 'joiner_dim': 512, 'manifest_dir': PosixPath('data/fbank'), 'max_duration': 800, 'bucketing_sampler': True, 'num_buckets': 300, 'concatenate_cuts': False, 'duration_factor': 1.0, 'gap': 1.0, 'on_the_fly_feats': False, 'shuffle': True, 'drop_last': True, 'return_cuts': True, 'num_workers': 2, 'enable_spec_aug': True, 'spec_aug_time_warp_factor': 80, 'enable_musan': True, 'input_strategy': 'PrecomputedFeatures', 'blank_id': 0, 'vocab_size': 500}
4
+ 2023-03-30 10:00:09,176 INFO [train.py:983] (3/4) About to create model
5
+ 2023-03-30 10:00:10,427 INFO [zipformer.py:178] (3/4) At encoder stack 4, which has downsampling_factor=2, we will combine the outputs of layers 1 and 3, with downsampling_factors=2 and 8.
6
+ 2023-03-30 10:00:10,452 INFO [train.py:987] (3/4) Number of model parameters: 70369391
7
+ 2023-03-30 10:00:10,454 INFO [checkpoint.py:112] (3/4) Loading checkpoint from pruned_transducer_stateless7_bbpe/exp/epoch-47.pt
8
+ 2023-03-30 10:00:28,886 INFO [train.py:1002] (3/4) Using DDP
9
+ 2023-03-30 10:00:29,382 INFO [train.py:1019] (3/4) Loading optimizer state dict
10
+ 2023-03-30 10:00:30,510 INFO [train.py:1027] (3/4) Loading scheduler state dict
11
+ 2023-03-30 10:00:30,510 INFO [asr_datamodule.py:407] (3/4) About to get train cuts
12
+ 2023-03-30 10:00:30,514 INFO [train.py:1083] (3/4) Filtering short and long utterances.
13
+ 2023-03-30 10:00:30,514 INFO [train.py:1086] (3/4) Tokenizing and encoding texts in train cuts.
14
+ 2023-03-30 10:00:30,514 INFO [asr_datamodule.py:224] (3/4) About to get Musan cuts
15
+ 2023-03-30 10:00:34,230 INFO [asr_datamodule.py:229] (3/4) Enable MUSAN
16
+ 2023-03-30 10:00:34,230 INFO [asr_datamodule.py:252] (3/4) Enable SpecAugment
17
+ 2023-03-30 10:00:34,230 INFO [asr_datamodule.py:253] (3/4) Time warp factor: 80
18
+ 2023-03-30 10:00:34,230 INFO [asr_datamodule.py:263] (3/4) Num frame mask: 10
19
+ 2023-03-30 10:00:34,231 INFO [asr_datamodule.py:276] (3/4) About to create train dataset
20
+ 2023-03-30 10:00:34,231 INFO [asr_datamodule.py:303] (3/4) Using DynamicBucketingSampler.
21
+ 2023-03-30 10:00:46,645 INFO [asr_datamodule.py:320] (3/4) About to create train dataloader
22
+ 2023-03-30 10:00:46,647 INFO [asr_datamodule.py:414] (3/4) About to get dev cuts
23
+ 2023-03-30 10:00:46,649 INFO [train.py:1102] (3/4) Tokenizing and encoding texts in valid cuts.
24
+ 2023-03-30 10:00:46,649 INFO [asr_datamodule.py:351] (3/4) About to create dev dataset
25
+ 2023-03-30 10:00:47,573 INFO [asr_datamodule.py:370] (3/4) About to create dev dataloader
26
+ 2023-03-30 10:00:47,574 INFO [train.py:1119] (3/4) Loading grad scaler state dict
27
+ 2023-03-30 10:01:32,411 INFO [train.py:1188] (3/4) Saving batch to pruned_transducer_stateless7_bbpe/exp/batch-b406dd29-9b57-6d64-c490-5c0914c25b99.pt
28
+ 2023-03-30 10:01:32,531 INFO [train.py:1194] (3/4) features shape: torch.Size([55, 1440, 80])
29
+ 2023-03-30 10:01:32,538 INFO [train.py:1198] (3/4) num tokens: 3873
exp/pretrained.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:761e7827ef1b168cf7e1c35394e61474882a266dc66b82be44f2bd7643ea2484
3
+ size 281766253
exp/tensorboard/events.out.tfevents.1679894804.de-74279-k2-train-7-1218101249-5d97868c7c-v8ngc.234117.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d89e4d3f5e31e07e90a4dedc6606bd89260087391eb421ef2bb23e48e345b421
3
+ size 40
exp/tensorboard/events.out.tfevents.1679894852.de-74279-k2-train-7-1218101249-5d97868c7c-v8ngc.235039.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ef31d55a13b77d0ca54c2cfc278d8afe8c4ebc8ee20d3fd12e912c9c7295525e
3
+ size 40
exp/tensorboard/events.out.tfevents.1679896587.de-74279-k2-train-9-0208143539-7dcb6bfd79-b6fdq.3200367.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:234f100c553af9e792652fbd95cd8634c5bff2952521f62c5457d0ac8d2aed76
3
+ size 1652
exp/tensorboard/events.out.tfevents.1679898106.de-74279-k2-train-9-0208143539-7dcb6bfd79-b6fdq.3202112.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:346139952f458eef654696e5c20af104416a7a39b0788048338a5e9f7a646f0c
3
+ size 40
exp/tensorboard/events.out.tfevents.1679899206.de-74279-k2-train-9-0208143539-7dcb6bfd79-b6fdq.3202877.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2f6ae2e7ffb5f3b5e561bf3819772b2b8f4d660a2da52b326a619484a73f13d9
3
+ size 40
exp/tensorboard/events.out.tfevents.1679899640.de-74279-k2-train-9-0208143539-7dcb6bfd79-b6fdq.3203328.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a98234779ebbf701ef04c1354c086d499559a269e26016977293fb36a553f9fd
3
+ size 872115
exp/tensorboard/events.out.tfevents.1680141609.de-74279-k2-train-9-0208143539-7dcb6bfd79-b6fdq.1268684.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:bacfb4f0d4abf2818c837cf03e0bf5f9b8c19263596890ffbfebc306b47e9232
3
+ size 91
test_waves/210_36476_210_8341_1_1533271973_7057520_132.wav ADDED
Binary file (163 kB). View file
 
test_waves/210_36476_210_8341_1_1533271973_7057520_138.wav ADDED
Binary file (150 kB). View file
 
test_waves/210_36476_210_8341_1_1533271973_7057520_145.wav ADDED
Binary file (283 kB). View file
 
test_waves/210_36476_210_8341_1_1533271973_7057520_148.wav ADDED
Binary file (565 kB). View file