initial commit
Browse files- train_960h_hubert_large/epoch-12-avg-7.pt +3 -0
- train_960h_hubert_large/modified_beam_search/errs-test-clean-beam_size_4-epoch-12-avg-7-modified_beam_search-beam-size-4-use-averaged-model.txt +0 -0
- train_960h_hubert_large/modified_beam_search/errs-test-other-beam_size_4-epoch-12-avg-7-modified_beam_search-beam-size-4-use-averaged-model.txt +0 -0
- train_960h_hubert_large/modified_beam_search/log-decode-epoch-12-avg-7-modified_beam_search-beam-size-4-use-averaged-model-2022-09-29-15-45-49 +18 -0
- train_960h_hubert_large/modified_beam_search/log-decode-epoch-12-avg-7-modified_beam_search-beam-size-4-use-averaged-model-2022-09-29-15-48-40 +19 -0
- train_960h_hubert_large/modified_beam_search/log-decode-epoch-12-avg-7-modified_beam_search-beam-size-4-use-averaged-model-2022-09-29-15-50-44 +47 -0
- train_960h_hubert_large/modified_beam_search/log-decode-epoch-12-avg-7-modified_beam_search-beam-size-4-use-averaged-model-2022-09-29-17-18-28 +23 -0
- train_960h_hubert_large/modified_beam_search/recogs-test-clean-beam_size_4-epoch-12-avg-7-modified_beam_search-beam-size-4-use-averaged-model.txt +0 -0
- train_960h_hubert_large/modified_beam_search/recogs-test-other-beam_size_4-epoch-12-avg-7-modified_beam_search-beam-size-4-use-averaged-model.txt +0 -0
- train_960h_hubert_large/modified_beam_search/wer-summary-test-clean-beam_size_4-epoch-12-avg-7-modified_beam_search-beam-size-4-use-averaged-model.txt +2 -0
- train_960h_hubert_large/modified_beam_search/wer-summary-test-other-beam_size_4-epoch-12-avg-7-modified_beam_search-beam-size-4-use-averaged-model.txt +2 -0
- train_960h_hubert_large/tensorboard/events.out.tfevents.1662251451.de-74279-k2-train-1-0307195509-567fcb96d6-kdztg.296923.0 +3 -0
- train_960h_hubert_large/tensorboard/events.out.tfevents.1662356744.de-74279-k2-train-1-0307195509-567fcb96d6-kdztg.3343040.0 +3 -0
- train_960h_hubert_large/tensorboard/events.out.tfevents.1662453430.de-74279-k2-train-1-0307195509-567fcb96d6-kdztg.2420747.0 +3 -0
- train_960h_hubert_large/tensorboard/events.out.tfevents.1662459882.de-74279-k2-train-1-0307195509-567fcb96d6-kdztg.2572016.0 +3 -0
- train_960h_hubert_large/tensorboard/events.out.tfevents.1662473223.de-74279-k2-train-1-0307195509-567fcb96d6-kdztg.2752641.0 +3 -0
- train_960h_hubert_large/tensorboard/events.out.tfevents.1662597179.de-74279-k2-train-1-0307195509-567fcb96d6-kdztg.2059958.0 +3 -0
- train_960h_hubert_large/tensorboard/events.out.tfevents.1662792497.de-74279-k2-train-6-0701202559-8476c48f5f-xmr4s.274438.0 +3 -0
train_960h_hubert_large/epoch-12-avg-7.pt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:4e7e6c9ccc9b3326e0aeeb75c4c6a3ed180787368ada6fc007c9f73bdfa08291
|
3 |
+
size 1278592845
|
train_960h_hubert_large/modified_beam_search/errs-test-clean-beam_size_4-epoch-12-avg-7-modified_beam_search-beam-size-4-use-averaged-model.txt
ADDED
The diff for this file is too large to render.
See raw diff
|
|
train_960h_hubert_large/modified_beam_search/errs-test-other-beam_size_4-epoch-12-avg-7-modified_beam_search-beam-size-4-use-averaged-model.txt
ADDED
The diff for this file is too large to render.
See raw diff
|
|
train_960h_hubert_large/modified_beam_search/log-decode-epoch-12-avg-7-modified_beam_search-beam-size-4-use-averaged-model-2022-09-29-15-45-49
ADDED
@@ -0,0 +1,18 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
2022-09-29 15:45:49,073 INFO [decode.py:463] Decoding started
|
2 |
+
2022-09-29 15:45:49,073 INFO [decode.py:469] Device: cuda:0
|
3 |
+
2022-09-29 15:45:49,075 INFO [decode.py:479] {'best_train_loss': inf, 'best_valid_loss': inf, 'best_train_epoch': -1, 'best_valid_epoch': -1, 'batch_idx_train': 0, 'log_interval': 50, 'reset_interval': 200, 'valid_interval': 3000, 'encoder_dim': 1024, 'decoder_dim': 640, 'joiner_dim': 640, 'env_info': {'k2-version': '1.19', 'k2-build-type': 'Release', 'k2-with-cuda': True, 'k2-git-sha1': '13f26eaae8ce253e229189b830c01d1c65cd8516', 'k2-git-date': 'Tue Aug 23 03:56:40 2022', 'lhotse-version': '1.5.0', 'torch-version': '1.12.0', 'torch-cuda-available': True, 'torch-cuda-version': '10.2', 'python-version': '3.9', 'icefall-git-branch': 'finetune_hubert', 'icefall-git-sha1': '4078aff-dirty', 'icefall-git-date': 'Tue Sep 27 16:19:22 2022', 'icefall-path': '/ceph-data4/yangxiaoyu/softwares/icefall_development/icefall_forked', 'k2-path': '/ceph-data4/yangxiaoyu/softwares/anaconda3/envs/k2/lib/python3.9/site-packages/k2/__init__.py', 'lhotse-path': '/ceph-data4/yangxiaoyu/softwares/anaconda3/envs/k2/lib/python3.9/site-packages/lhotse/__init__.py', 'hostname': 'de-74279-k2-train-1-0307195509-567fcb96d6-kdztg', 'IP address': '10.177.22.10'}, 'epoch': 12, 'iter': 0, 'avg': 7, 'use_averaged_model': True, 'exp_dir': PosixPath('finetune_hubert_transducer/exp_960h_TSA_freeze10k_normalized_total32k_with_musan'), 'bpe_model': 'data/lang_bpe_500/bpe.model', 'decoding_method': 'modified_beam_search', 'beam_size': 4, 'beam': 4, 'max_contexts': 4, 'max_states': 8, 'context_size': 2, 'max_sym_per_frame': 1, 'full_libri': True, 'manifest_dir': PosixPath('data/fbank'), 'max_duration': 600, 'bucketing_sampler': True, 'num_buckets': 30, 'concatenate_cuts': False, 'duration_factor': 1.0, 'gap': 1.0, 'on_the_fly_feats': False, 'shuffle': True, 'drop_last': True, 'return_cuts': True, 'num_workers': 2, 'enable_spec_aug': True, 'spec_aug_time_warp_factor': 80, 'enable_musan': True, 'input_strategy': 'AudioSamples', 'hubert_model_dir': '/ceph-data4/yangxiaoyu/pretrained_models/hubert_large_ll60k.pt', 'hubert_freeze_finetune_updates': 0, 'hubert_mask_prob': 0.65, 'hubert_mask_channel_prob': 0.5, 'hubert_mask_channel_length': 64, 'hubert_subsample_output': True, 'hubert_subsample_mode': 'concat_tanh', 'res_dir': PosixPath('finetune_hubert_transducer/exp_960h_TSA_freeze10k_normalized_total32k_with_musan/modified_beam_search'), 'suffix': 'epoch-12-avg-7-modified_beam_search-beam-size-4-use-averaged-model', 'blank_id': 0, 'unk_id': 2, 'vocab_size': 500}
|
4 |
+
2022-09-29 15:45:49,076 INFO [decode.py:481] About to create model
|
5 |
+
2022-09-29 15:45:51,596 INFO [hubert_pretraining.py:116] current directory is /ceph-data4/yangxiaoyu/softwares/icefall_development/icefall_forked/egs/librispeech/ASR
|
6 |
+
2022-09-29 15:45:51,596 INFO [hubert_pretraining.py:117] HubertPretrainingTask Config {'_name': 'hubert_pretraining', 'data': '/checkpoint/wnhsu/data/librivox', 'fine_tuning': False, 'labels': ['lyr9.km500'], 'label_dir': '/checkpoint/wnhsu/experiments/hubert/kmeans_20210121/km_dataset_librivox.model_iter_2.all', 'label_rate': 50.0, 'sample_rate': 16000, 'normalize': True, 'enable_padding': False, 'max_keep_size': None, 'max_sample_size': 250000, 'min_sample_size': 32000, 'single_target': False, 'random_crop': True, 'pad_audio': False}
|
7 |
+
2022-09-29 15:45:51,613 INFO [hubert.py:250] HubertModel Config: {'_name': 'hubert', 'label_rate': 50.0, 'extractor_mode': layer_norm, 'encoder_layers': 24, 'encoder_embed_dim': 1024, 'encoder_ffn_embed_dim': 4096, 'encoder_attention_heads': 16, 'activation_fn': gelu, 'layer_type': transformer, 'dropout': 0.0, 'attention_dropout': 0.0, 'activation_dropout': 0.0, 'encoder_layerdrop': 0.0, 'dropout_input': 0.0, 'dropout_features': 0.0, 'final_dim': 768, 'untie_final_proj': True, 'layer_norm_first': True, 'conv_feature_layers': '[(512,10,5)] + [(512,3,2)] * 4 + [(512,2,2)] * 2', 'conv_bias': False, 'logit_temp': 0.1, 'target_glu': False, 'feature_grad_mult': 1.0, 'mask_length': 10, 'mask_prob': 0.8, 'mask_selection': static, 'mask_other': 0.0, 'no_mask_overlap': False, 'mask_min_space': 1, 'mask_channel_length': 10, 'mask_channel_prob': 0.0, 'mask_channel_selection': static, 'mask_channel_other': 0.0, 'no_mask_channel_overlap': False, 'mask_channel_min_space': 1, 'conv_pos': 128, 'conv_pos_groups': 16, 'latent_temp': [2.0, 0.5, 0.999995], 'skip_masked': False, 'skip_nomask': True, 'checkpoint_activations': False, 'required_seq_len_multiple': 2, 'depthwise_conv_kernel_size': 31, 'attn_type': '', 'pos_enc_type': 'abs', 'fp16': False}
|
8 |
+
2022-09-29 15:45:59,200 WARNING [hubert_encoder.py:51] Overwriting mask channel length to 64
|
9 |
+
2022-09-29 15:45:59,201 WARNING [hubert_encoder.py:58] Overwriting mask channel prob to 0.5. Original ckpt: 0.0
|
10 |
+
2022-09-29 15:45:59,201 WARNING [hubert_encoder.py:65] Overwriting mask prob to 0.65. Original ckpt to 0.8
|
11 |
+
2022-09-29 15:45:59,221 INFO [hubert_encoder.py:85] Subsample output!}
|
12 |
+
2022-09-29 15:45:59,221 INFO [hubert_encoder.py:86] Sequential(
|
13 |
+
(0): Dropout(p=0.1, inplace=False)
|
14 |
+
(1): Linear(in_features=2048, out_features=1024, bias=True)
|
15 |
+
(2): Tanh()
|
16 |
+
)
|
17 |
+
2022-09-29 15:45:59,268 INFO [decode.py:484] Set hubert encoder training to false
|
18 |
+
2022-09-29 15:45:59,269 INFO [decode.py:552] Calculating the averaged model over epoch range from 5 (excluded) to 12
|
train_960h_hubert_large/modified_beam_search/log-decode-epoch-12-avg-7-modified_beam_search-beam-size-4-use-averaged-model-2022-09-29-15-48-40
ADDED
@@ -0,0 +1,19 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
2022-09-29 15:48:40,404 INFO [decode.py:463] Decoding started
|
2 |
+
2022-09-29 15:48:40,405 INFO [decode.py:469] Device: cuda:0
|
3 |
+
2022-09-29 15:48:40,408 INFO [decode.py:479] {'best_train_loss': inf, 'best_valid_loss': inf, 'best_train_epoch': -1, 'best_valid_epoch': -1, 'batch_idx_train': 0, 'log_interval': 50, 'reset_interval': 200, 'valid_interval': 3000, 'encoder_dim': 1024, 'decoder_dim': 640, 'joiner_dim': 640, 'env_info': {'k2-version': '1.19', 'k2-build-type': 'Release', 'k2-with-cuda': True, 'k2-git-sha1': '13f26eaae8ce253e229189b830c01d1c65cd8516', 'k2-git-date': 'Tue Aug 23 03:56:40 2022', 'lhotse-version': '1.5.0', 'torch-version': '1.12.0', 'torch-cuda-available': True, 'torch-cuda-version': '10.2', 'python-version': '3.9', 'icefall-git-branch': 'finetune_hubert', 'icefall-git-sha1': '4078aff-dirty', 'icefall-git-date': 'Tue Sep 27 16:19:22 2022', 'icefall-path': '/ceph-data4/yangxiaoyu/softwares/icefall_development/icefall_forked', 'k2-path': '/ceph-data4/yangxiaoyu/softwares/anaconda3/envs/k2/lib/python3.9/site-packages/k2/__init__.py', 'lhotse-path': '/ceph-data4/yangxiaoyu/softwares/anaconda3/envs/k2/lib/python3.9/site-packages/lhotse/__init__.py', 'hostname': 'de-74279-k2-train-7-0616225511-78bf4545d8-tv52r', 'IP address': '10.177.77.9'}, 'epoch': 12, 'iter': 0, 'avg': 7, 'use_averaged_model': True, 'exp_dir': PosixPath('finetune_hubert_transducer/exp_960h_TSA_freeze10k_normalized_total32k_with_musan'), 'bpe_model': 'data/lang_bpe_500/bpe.model', 'decoding_method': 'modified_beam_search', 'beam_size': 4, 'beam': 4, 'max_contexts': 4, 'max_states': 8, 'context_size': 2, 'max_sym_per_frame': 1, 'full_libri': True, 'manifest_dir': PosixPath('data/fbank'), 'max_duration': 600, 'bucketing_sampler': True, 'num_buckets': 30, 'concatenate_cuts': False, 'duration_factor': 1.0, 'gap': 1.0, 'on_the_fly_feats': False, 'shuffle': True, 'drop_last': True, 'return_cuts': True, 'num_workers': 2, 'enable_spec_aug': True, 'spec_aug_time_warp_factor': 80, 'enable_musan': True, 'input_strategy': 'AudioSamples', 'hubert_model_dir': '/ceph-data4/yangxiaoyu/pretrained_models/hubert_large_ll60k.pt', 'hubert_freeze_finetune_updates': 0, 'hubert_mask_prob': 0.65, 'hubert_mask_channel_prob': 0.5, 'hubert_mask_channel_length': 64, 'hubert_subsample_output': True, 'hubert_subsample_mode': 'concat_tanh', 'res_dir': PosixPath('finetune_hubert_transducer/exp_960h_TSA_freeze10k_normalized_total32k_with_musan/modified_beam_search'), 'suffix': 'epoch-12-avg-7-modified_beam_search-beam-size-4-use-averaged-model', 'blank_id': 0, 'unk_id': 2, 'vocab_size': 500}
|
4 |
+
2022-09-29 15:48:40,408 INFO [decode.py:481] About to create model
|
5 |
+
2022-09-29 15:48:48,797 INFO [hubert_pretraining.py:116] current directory is /ceph-data4/yangxiaoyu/softwares/icefall_development/icefall_forked/egs/librispeech/ASR
|
6 |
+
2022-09-29 15:48:48,797 INFO [hubert_pretraining.py:117] HubertPretrainingTask Config {'_name': 'hubert_pretraining', 'data': '/checkpoint/wnhsu/data/librivox', 'fine_tuning': False, 'labels': ['lyr9.km500'], 'label_dir': '/checkpoint/wnhsu/experiments/hubert/kmeans_20210121/km_dataset_librivox.model_iter_2.all', 'label_rate': 50.0, 'sample_rate': 16000, 'normalize': True, 'enable_padding': False, 'max_keep_size': None, 'max_sample_size': 250000, 'min_sample_size': 32000, 'single_target': False, 'random_crop': True, 'pad_audio': False}
|
7 |
+
2022-09-29 15:48:48,829 INFO [hubert.py:250] HubertModel Config: {'_name': 'hubert', 'label_rate': 50.0, 'extractor_mode': layer_norm, 'encoder_layers': 24, 'encoder_embed_dim': 1024, 'encoder_ffn_embed_dim': 4096, 'encoder_attention_heads': 16, 'activation_fn': gelu, 'layer_type': transformer, 'dropout': 0.0, 'attention_dropout': 0.0, 'activation_dropout': 0.0, 'encoder_layerdrop': 0.0, 'dropout_input': 0.0, 'dropout_features': 0.0, 'final_dim': 768, 'untie_final_proj': True, 'layer_norm_first': True, 'conv_feature_layers': '[(512,10,5)] + [(512,3,2)] * 4 + [(512,2,2)] * 2', 'conv_bias': False, 'logit_temp': 0.1, 'target_glu': False, 'feature_grad_mult': 1.0, 'mask_length': 10, 'mask_prob': 0.8, 'mask_selection': static, 'mask_other': 0.0, 'no_mask_overlap': False, 'mask_min_space': 1, 'mask_channel_length': 10, 'mask_channel_prob': 0.0, 'mask_channel_selection': static, 'mask_channel_other': 0.0, 'no_mask_channel_overlap': False, 'mask_channel_min_space': 1, 'conv_pos': 128, 'conv_pos_groups': 16, 'latent_temp': [2.0, 0.5, 0.999995], 'skip_masked': False, 'skip_nomask': True, 'checkpoint_activations': False, 'required_seq_len_multiple': 2, 'depthwise_conv_kernel_size': 31, 'attn_type': '', 'pos_enc_type': 'abs', 'fp16': False}
|
8 |
+
2022-09-29 15:48:59,600 WARNING [hubert_encoder.py:51] Overwriting mask channel length to 64
|
9 |
+
2022-09-29 15:48:59,600 WARNING [hubert_encoder.py:58] Overwriting mask channel prob to 0.5. Original ckpt: 0.0
|
10 |
+
2022-09-29 15:48:59,600 WARNING [hubert_encoder.py:65] Overwriting mask prob to 0.65. Original ckpt to 0.8
|
11 |
+
2022-09-29 15:48:59,625 INFO [hubert_encoder.py:85] Subsample output!}
|
12 |
+
2022-09-29 15:48:59,625 INFO [hubert_encoder.py:86] Sequential(
|
13 |
+
(0): Dropout(p=0.1, inplace=False)
|
14 |
+
(1): Linear(in_features=2048, out_features=1024, bias=True)
|
15 |
+
(2): Tanh()
|
16 |
+
)
|
17 |
+
2022-09-29 15:48:59,683 INFO [decode.py:484] Set hubert encoder training to false
|
18 |
+
2022-09-29 15:48:59,683 INFO [decode.py:552] Calculating the averaged model over epoch range from 5 (excluded) to 12
|
19 |
+
2022-09-29 15:49:39,142 INFO [checkpoint.py:470] Skipping loading a Long tensor encoder.num_updates
|
train_960h_hubert_large/modified_beam_search/log-decode-epoch-12-avg-7-modified_beam_search-beam-size-4-use-averaged-model-2022-09-29-15-50-44
ADDED
@@ -0,0 +1,47 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
2022-09-29 15:50:44,861 INFO [decode.py:463] Decoding started
|
2 |
+
2022-09-29 15:50:44,862 INFO [decode.py:469] Device: cuda:0
|
3 |
+
2022-09-29 15:50:44,866 INFO [decode.py:479] {'best_train_loss': inf, 'best_valid_loss': inf, 'best_train_epoch': -1, 'best_valid_epoch': -1, 'batch_idx_train': 0, 'log_interval': 50, 'reset_interval': 200, 'valid_interval': 3000, 'encoder_dim': 1024, 'decoder_dim': 512, 'joiner_dim': 512, 'env_info': {'k2-version': '1.19', 'k2-build-type': 'Release', 'k2-with-cuda': True, 'k2-git-sha1': '13f26eaae8ce253e229189b830c01d1c65cd8516', 'k2-git-date': 'Tue Aug 23 03:56:40 2022', 'lhotse-version': '1.5.0', 'torch-version': '1.12.0', 'torch-cuda-available': True, 'torch-cuda-version': '10.2', 'python-version': '3.9', 'icefall-git-branch': 'finetune_hubert', 'icefall-git-sha1': '4078aff-dirty', 'icefall-git-date': 'Tue Sep 27 16:19:22 2022', 'icefall-path': '/ceph-data4/yangxiaoyu/softwares/icefall_development/icefall_forked', 'k2-path': '/ceph-data4/yangxiaoyu/softwares/anaconda3/envs/k2/lib/python3.9/site-packages/k2/__init__.py', 'lhotse-path': '/ceph-data4/yangxiaoyu/softwares/anaconda3/envs/k2/lib/python3.9/site-packages/lhotse/__init__.py', 'hostname': 'de-74279-k2-train-7-0616225511-78bf4545d8-tv52r', 'IP address': '10.177.77.9'}, 'epoch': 12, 'iter': 0, 'avg': 7, 'use_averaged_model': True, 'exp_dir': PosixPath('finetune_hubert_transducer/exp_960h_TSA_freeze10k_normalized_total32k_with_musan'), 'bpe_model': 'data/lang_bpe_500/bpe.model', 'decoding_method': 'modified_beam_search', 'beam_size': 4, 'beam': 4, 'max_contexts': 4, 'max_states': 8, 'context_size': 2, 'max_sym_per_frame': 1, 'full_libri': True, 'manifest_dir': PosixPath('data/fbank'), 'max_duration': 600, 'bucketing_sampler': True, 'num_buckets': 30, 'concatenate_cuts': False, 'duration_factor': 1.0, 'gap': 1.0, 'on_the_fly_feats': False, 'shuffle': True, 'drop_last': True, 'return_cuts': True, 'num_workers': 2, 'enable_spec_aug': True, 'spec_aug_time_warp_factor': 80, 'enable_musan': True, 'input_strategy': 'AudioSamples', 'hubert_model_dir': '/ceph-data4/yangxiaoyu/pretrained_models/hubert_large_ll60k.pt', 'hubert_freeze_finetune_updates': 0, 'hubert_mask_prob': 0.65, 'hubert_mask_channel_prob': 0.5, 'hubert_mask_channel_length': 64, 'hubert_subsample_output': True, 'hubert_subsample_mode': 'concat_tanh', 'res_dir': PosixPath('finetune_hubert_transducer/exp_960h_TSA_freeze10k_normalized_total32k_with_musan/modified_beam_search'), 'suffix': 'epoch-12-avg-7-modified_beam_search-beam-size-4-use-averaged-model', 'blank_id': 0, 'unk_id': 2, 'vocab_size': 500}
|
4 |
+
2022-09-29 15:50:44,866 INFO [decode.py:481] About to create model
|
5 |
+
2022-09-29 15:50:48,361 INFO [hubert_pretraining.py:116] current directory is /ceph-data4/yangxiaoyu/softwares/icefall_development/icefall_forked/egs/librispeech/ASR
|
6 |
+
2022-09-29 15:50:48,361 INFO [hubert_pretraining.py:117] HubertPretrainingTask Config {'_name': 'hubert_pretraining', 'data': '/checkpoint/wnhsu/data/librivox', 'fine_tuning': False, 'labels': ['lyr9.km500'], 'label_dir': '/checkpoint/wnhsu/experiments/hubert/kmeans_20210121/km_dataset_librivox.model_iter_2.all', 'label_rate': 50.0, 'sample_rate': 16000, 'normalize': True, 'enable_padding': False, 'max_keep_size': None, 'max_sample_size': 250000, 'min_sample_size': 32000, 'single_target': False, 'random_crop': True, 'pad_audio': False}
|
7 |
+
2022-09-29 15:50:48,384 INFO [hubert.py:250] HubertModel Config: {'_name': 'hubert', 'label_rate': 50.0, 'extractor_mode': layer_norm, 'encoder_layers': 24, 'encoder_embed_dim': 1024, 'encoder_ffn_embed_dim': 4096, 'encoder_attention_heads': 16, 'activation_fn': gelu, 'layer_type': transformer, 'dropout': 0.0, 'attention_dropout': 0.0, 'activation_dropout': 0.0, 'encoder_layerdrop': 0.0, 'dropout_input': 0.0, 'dropout_features': 0.0, 'final_dim': 768, 'untie_final_proj': True, 'layer_norm_first': True, 'conv_feature_layers': '[(512,10,5)] + [(512,3,2)] * 4 + [(512,2,2)] * 2', 'conv_bias': False, 'logit_temp': 0.1, 'target_glu': False, 'feature_grad_mult': 1.0, 'mask_length': 10, 'mask_prob': 0.8, 'mask_selection': static, 'mask_other': 0.0, 'no_mask_overlap': False, 'mask_min_space': 1, 'mask_channel_length': 10, 'mask_channel_prob': 0.0, 'mask_channel_selection': static, 'mask_channel_other': 0.0, 'no_mask_channel_overlap': False, 'mask_channel_min_space': 1, 'conv_pos': 128, 'conv_pos_groups': 16, 'latent_temp': [2.0, 0.5, 0.999995], 'skip_masked': False, 'skip_nomask': True, 'checkpoint_activations': False, 'required_seq_len_multiple': 2, 'depthwise_conv_kernel_size': 31, 'attn_type': '', 'pos_enc_type': 'abs', 'fp16': False}
|
8 |
+
2022-09-29 15:50:57,395 WARNING [hubert_encoder.py:51] Overwriting mask channel length to 64
|
9 |
+
2022-09-29 15:50:57,395 WARNING [hubert_encoder.py:58] Overwriting mask channel prob to 0.5. Original ckpt: 0.0
|
10 |
+
2022-09-29 15:50:57,395 WARNING [hubert_encoder.py:65] Overwriting mask prob to 0.65. Original ckpt to 0.8
|
11 |
+
2022-09-29 15:50:57,420 INFO [hubert_encoder.py:85] Subsample output!}
|
12 |
+
2022-09-29 15:50:57,420 INFO [hubert_encoder.py:86] Sequential(
|
13 |
+
(0): Dropout(p=0.1, inplace=False)
|
14 |
+
(1): Linear(in_features=2048, out_features=1024, bias=True)
|
15 |
+
(2): Tanh()
|
16 |
+
)
|
17 |
+
2022-09-29 15:50:57,494 INFO [decode.py:484] Set hubert encoder training to false
|
18 |
+
2022-09-29 15:50:57,494 INFO [decode.py:552] Calculating the averaged model over epoch range from 5 (excluded) to 12
|
19 |
+
2022-09-29 15:51:26,843 INFO [checkpoint.py:470] Skipping loading a Long tensor encoder.num_updates
|
20 |
+
2022-09-29 15:51:26,988 INFO [decode.py:576] Number of model parameters: 319603304
|
21 |
+
2022-09-29 15:51:26,988 INFO [asr_datamodule.py:445] About to get test-clean cuts
|
22 |
+
2022-09-29 15:51:26,991 INFO [asr_datamodule.py:452] About to get test-other cuts
|
23 |
+
2022-09-29 15:51:35,733 INFO [decode.py:374] batch 0/?, cuts processed until now is 43
|
24 |
+
2022-09-29 15:52:34,361 INFO [decode.py:374] batch 10/?, cuts processed until now is 638
|
25 |
+
2022-09-29 15:53:30,011 INFO [decode.py:374] batch 20/?, cuts processed until now is 1432
|
26 |
+
2022-09-29 15:54:24,723 INFO [decode.py:374] batch 30/?, cuts processed until now is 2090
|
27 |
+
2022-09-29 15:55:01,686 INFO [decode.py:374] batch 40/?, cuts processed until now is 2574
|
28 |
+
2022-09-29 15:55:05,379 INFO [decode.py:392] The transcripts are stored in finetune_hubert_transducer/exp_960h_TSA_freeze10k_normalized_total32k_with_musan/modified_beam_search/recogs-test-clean-beam_size_4-epoch-12-avg-7-modified_beam_search-beam-size-4-use-averaged-model.txt
|
29 |
+
2022-09-29 15:55:05,467 INFO [utils.py:429] [test-clean-beam_size_4] %WER 1.93% [1015 / 52576, 98 ins, 81 del, 836 sub ]
|
30 |
+
2022-09-29 15:55:05,680 INFO [decode.py:405] Wrote detailed error stats to finetune_hubert_transducer/exp_960h_TSA_freeze10k_normalized_total32k_with_musan/modified_beam_search/errs-test-clean-beam_size_4-epoch-12-avg-7-modified_beam_search-beam-size-4-use-averaged-model.txt
|
31 |
+
2022-09-29 15:55:05,681 INFO [decode.py:422]
|
32 |
+
For test-clean, WER of different settings are:
|
33 |
+
beam_size_4 1.93 best for test-clean
|
34 |
+
|
35 |
+
2022-09-29 15:55:13,047 INFO [decode.py:374] batch 0/?, cuts processed until now is 52
|
36 |
+
2022-09-29 15:56:08,870 INFO [decode.py:374] batch 10/?, cuts processed until now is 742
|
37 |
+
2022-09-29 15:57:02,236 INFO [decode.py:374] batch 20/?, cuts processed until now is 1647
|
38 |
+
2022-09-29 15:57:58,256 INFO [decode.py:374] batch 30/?, cuts processed until now is 2388
|
39 |
+
2022-09-29 15:58:32,252 INFO [decode.py:374] batch 40/?, cuts processed until now is 2870
|
40 |
+
2022-09-29 15:58:35,985 INFO [decode.py:392] The transcripts are stored in finetune_hubert_transducer/exp_960h_TSA_freeze10k_normalized_total32k_with_musan/modified_beam_search/recogs-test-other-beam_size_4-epoch-12-avg-7-modified_beam_search-beam-size-4-use-averaged-model.txt
|
41 |
+
2022-09-29 15:58:36,076 INFO [utils.py:429] [test-other-beam_size_4] %WER 3.93% [2058 / 52343, 186 ins, 168 del, 1704 sub ]
|
42 |
+
2022-09-29 15:58:36,376 INFO [decode.py:405] Wrote detailed error stats to finetune_hubert_transducer/exp_960h_TSA_freeze10k_normalized_total32k_with_musan/modified_beam_search/errs-test-other-beam_size_4-epoch-12-avg-7-modified_beam_search-beam-size-4-use-averaged-model.txt
|
43 |
+
2022-09-29 15:58:36,377 INFO [decode.py:422]
|
44 |
+
For test-other, WER of different settings are:
|
45 |
+
beam_size_4 3.93 best for test-other
|
46 |
+
|
47 |
+
2022-09-29 15:58:36,377 INFO [decode.py:606] Done!
|
train_960h_hubert_large/modified_beam_search/log-decode-epoch-12-avg-7-modified_beam_search-beam-size-4-use-averaged-model-2022-09-29-17-18-28
ADDED
@@ -0,0 +1,23 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
2022-09-29 17:18:28,616 INFO [decode.py:463] Decoding started
|
2 |
+
2022-09-29 17:18:28,617 INFO [decode.py:469] Device: cuda:0
|
3 |
+
2022-09-29 17:18:28,619 INFO [decode.py:479] {'best_train_loss': inf, 'best_valid_loss': inf, 'best_train_epoch': -1, 'best_valid_epoch': -1, 'batch_idx_train': 0, 'log_interval': 50, 'reset_interval': 200, 'valid_interval': 3000, 'encoder_dim': 1024, 'decoder_dim': 512, 'joiner_dim': 512, 'env_info': {'k2-version': '1.19', 'k2-build-type': 'Release', 'k2-with-cuda': True, 'k2-git-sha1': '13f26eaae8ce253e229189b830c01d1c65cd8516', 'k2-git-date': 'Tue Aug 23 03:56:40 2022', 'lhotse-version': '1.5.0', 'torch-version': '1.12.0', 'torch-cuda-available': True, 'torch-cuda-version': '10.2', 'python-version': '3.9', 'icefall-git-branch': 'finetune_hubert', 'icefall-git-sha1': '4078aff-dirty', 'icefall-git-date': 'Tue Sep 27 16:19:22 2022', 'icefall-path': '/ceph-data4/yangxiaoyu/softwares/icefall_development/icefall_forked', 'k2-path': '/ceph-data4/yangxiaoyu/softwares/anaconda3/envs/k2/lib/python3.9/site-packages/k2/__init__.py', 'lhotse-path': '/ceph-data4/yangxiaoyu/softwares/anaconda3/envs/k2/lib/python3.9/site-packages/lhotse/__init__.py', 'hostname': 'de-74279-k2-train-7-0616225511-78bf4545d8-tv52r', 'IP address': '10.177.77.9'}, 'epoch': 12, 'iter': 0, 'avg': 7, 'use_averaged_model': True, 'exp_dir': PosixPath('finetune_hubert_transducer/exp_960h_TSA_freeze10k_normalized_total32k_with_musan'), 'bpe_model': 'data/lang_bpe_500/bpe.model', 'decoding_method': 'modified_beam_search', 'beam_size': 4, 'beam': 4, 'max_contexts': 4, 'max_states': 8, 'context_size': 2, 'max_sym_per_frame': 1, 'full_libri': True, 'manifest_dir': PosixPath('data/fbank'), 'max_duration': 600, 'bucketing_sampler': True, 'num_buckets': 30, 'concatenate_cuts': False, 'duration_factor': 1.0, 'gap': 1.0, 'on_the_fly_feats': False, 'shuffle': True, 'drop_last': True, 'return_cuts': True, 'num_workers': 2, 'enable_spec_aug': True, 'spec_aug_time_warp_factor': 80, 'enable_musan': True, 'input_strategy': 'AudioSamples', 'hubert_model_dir': '/ceph-data4/yangxiaoyu/pretrained_models/hubert_large_ll60k.pt', 'hubert_freeze_finetune_updates': 0, 'hubert_mask_prob': 0.65, 'hubert_mask_channel_prob': 0.5, 'hubert_mask_channel_length': 64, 'hubert_subsample_output': True, 'hubert_subsample_mode': 'concat_tanh', 'res_dir': PosixPath('finetune_hubert_transducer/exp_960h_TSA_freeze10k_normalized_total32k_with_musan/modified_beam_search'), 'suffix': 'epoch-12-avg-7-modified_beam_search-beam-size-4-use-averaged-model', 'blank_id': 0, 'unk_id': 2, 'vocab_size': 500}
|
4 |
+
2022-09-29 17:18:28,620 INFO [decode.py:481] About to create model
|
5 |
+
2022-09-29 17:18:35,266 INFO [hubert_pretraining.py:116] current directory is /ceph-data4/yangxiaoyu/softwares/icefall_development/icefall_forked/egs/librispeech/ASR
|
6 |
+
2022-09-29 17:18:35,266 INFO [hubert_pretraining.py:117] HubertPretrainingTask Config {'_name': 'hubert_pretraining', 'data': '/checkpoint/wnhsu/data/librivox', 'fine_tuning': False, 'labels': ['lyr9.km500'], 'label_dir': '/checkpoint/wnhsu/experiments/hubert/kmeans_20210121/km_dataset_librivox.model_iter_2.all', 'label_rate': 50.0, 'sample_rate': 16000, 'normalize': True, 'enable_padding': False, 'max_keep_size': None, 'max_sample_size': 250000, 'min_sample_size': 32000, 'single_target': False, 'random_crop': True, 'pad_audio': False}
|
7 |
+
2022-09-29 17:18:35,287 INFO [hubert.py:250] HubertModel Config: {'_name': 'hubert', 'label_rate': 50.0, 'extractor_mode': layer_norm, 'encoder_layers': 24, 'encoder_embed_dim': 1024, 'encoder_ffn_embed_dim': 4096, 'encoder_attention_heads': 16, 'activation_fn': gelu, 'layer_type': transformer, 'dropout': 0.0, 'attention_dropout': 0.0, 'activation_dropout': 0.0, 'encoder_layerdrop': 0.0, 'dropout_input': 0.0, 'dropout_features': 0.0, 'final_dim': 768, 'untie_final_proj': True, 'layer_norm_first': True, 'conv_feature_layers': '[(512,10,5)] + [(512,3,2)] * 4 + [(512,2,2)] * 2', 'conv_bias': False, 'logit_temp': 0.1, 'target_glu': False, 'feature_grad_mult': 1.0, 'mask_length': 10, 'mask_prob': 0.8, 'mask_selection': static, 'mask_other': 0.0, 'no_mask_overlap': False, 'mask_min_space': 1, 'mask_channel_length': 10, 'mask_channel_prob': 0.0, 'mask_channel_selection': static, 'mask_channel_other': 0.0, 'no_mask_channel_overlap': False, 'mask_channel_min_space': 1, 'conv_pos': 128, 'conv_pos_groups': 16, 'latent_temp': [2.0, 0.5, 0.999995], 'skip_masked': False, 'skip_nomask': True, 'checkpoint_activations': False, 'required_seq_len_multiple': 2, 'depthwise_conv_kernel_size': 31, 'attn_type': '', 'pos_enc_type': 'abs', 'fp16': False}
|
8 |
+
2022-09-29 17:18:45,573 WARNING [hubert_encoder.py:51] Overwriting mask channel length to 64
|
9 |
+
2022-09-29 17:18:45,574 WARNING [hubert_encoder.py:58] Overwriting mask channel prob to 0.5. Original ckpt: 0.0
|
10 |
+
2022-09-29 17:18:45,574 WARNING [hubert_encoder.py:65] Overwriting mask prob to 0.65. Original ckpt to 0.8
|
11 |
+
2022-09-29 17:18:45,598 INFO [hubert_encoder.py:85] Subsample output!}
|
12 |
+
2022-09-29 17:18:45,598 INFO [hubert_encoder.py:86] Sequential(
|
13 |
+
(0): Dropout(p=0.1, inplace=False)
|
14 |
+
(1): Linear(in_features=2048, out_features=1024, bias=True)
|
15 |
+
(2): Tanh()
|
16 |
+
)
|
17 |
+
2022-09-29 17:18:45,647 INFO [decode.py:484] Set hubert encoder training to false
|
18 |
+
2022-09-29 17:18:45,647 INFO [decode.py:552] Calculating the averaged model over epoch range from 5 (excluded) to 12
|
19 |
+
2022-09-29 17:19:12,522 INFO [checkpoint.py:470] Skipping loading a Long tensor encoder.num_updates
|
20 |
+
2022-09-29 17:19:15,792 INFO [decode.py:565] Saved
|
21 |
+
2022-09-29 17:19:15,800 INFO [decode.py:576] Number of model parameters: 319603304
|
22 |
+
2022-09-29 17:19:15,801 INFO [asr_datamodule.py:445] About to get test-clean cuts
|
23 |
+
2022-09-29 17:19:15,803 INFO [asr_datamodule.py:452] About to get test-other cuts
|
train_960h_hubert_large/modified_beam_search/recogs-test-clean-beam_size_4-epoch-12-avg-7-modified_beam_search-beam-size-4-use-averaged-model.txt
ADDED
The diff for this file is too large to render.
See raw diff
|
|
train_960h_hubert_large/modified_beam_search/recogs-test-other-beam_size_4-epoch-12-avg-7-modified_beam_search-beam-size-4-use-averaged-model.txt
ADDED
The diff for this file is too large to render.
See raw diff
|
|
train_960h_hubert_large/modified_beam_search/wer-summary-test-clean-beam_size_4-epoch-12-avg-7-modified_beam_search-beam-size-4-use-averaged-model.txt
ADDED
@@ -0,0 +1,2 @@
|
|
|
|
|
|
|
1 |
+
settings WER
|
2 |
+
beam_size_4 1.93
|
train_960h_hubert_large/modified_beam_search/wer-summary-test-other-beam_size_4-epoch-12-avg-7-modified_beam_search-beam-size-4-use-averaged-model.txt
ADDED
@@ -0,0 +1,2 @@
|
|
|
|
|
|
|
1 |
+
settings WER
|
2 |
+
beam_size_4 3.93
|
train_960h_hubert_large/tensorboard/events.out.tfevents.1662251451.de-74279-k2-train-1-0307195509-567fcb96d6-kdztg.296923.0
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:2fe393b7d776578625de1be60723a4884457cc1f38c717ffba29c5624f236ac8
|
3 |
+
size 1119373
|
train_960h_hubert_large/tensorboard/events.out.tfevents.1662356744.de-74279-k2-train-1-0307195509-567fcb96d6-kdztg.3343040.0
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:3f11a6c8eb5c788d13b024e9c1d462e1a012a92ee2fa44117c2fbdabb9cefa4c
|
3 |
+
size 1171038
|
train_960h_hubert_large/tensorboard/events.out.tfevents.1662453430.de-74279-k2-train-1-0307195509-567fcb96d6-kdztg.2420747.0
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:3d74368f6646f0dcc7a8647e0fe5242364057038be187ab0f76afb2422113623
|
3 |
+
size 55687
|
train_960h_hubert_large/tensorboard/events.out.tfevents.1662459882.de-74279-k2-train-1-0307195509-567fcb96d6-kdztg.2572016.0
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:2e78680e2c4b00a6bfae056bc0ed14e2071730f88d696d6d23d93e4dbd59b058
|
3 |
+
size 61906
|
train_960h_hubert_large/tensorboard/events.out.tfevents.1662473223.de-74279-k2-train-1-0307195509-567fcb96d6-kdztg.2752641.0
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:1cec4ba203d51e2ce766a308144ba8d229b7322619b719a0f9034cb55a065d6f
|
3 |
+
size 1223870
|
train_960h_hubert_large/tensorboard/events.out.tfevents.1662597179.de-74279-k2-train-1-0307195509-567fcb96d6-kdztg.2059958.0
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:774b516c34f121051bc75be77bd895cea25c0b619f64ebbbcb59aa3f6d2ceeda
|
3 |
+
size 1537134
|
train_960h_hubert_large/tensorboard/events.out.tfevents.1662792497.de-74279-k2-train-6-0701202559-8476c48f5f-xmr4s.274438.0
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:4e6215aa6eaa213926cfe729cebd500a1673905712c4816ad5d5329cd7edf617
|
3 |
+
size 40
|