Siddhant commited on
Commit
e00ed6b
1 Parent(s): 10f3d01

import from zenodo

Browse files
Files changed (29) hide show
  1. README.md +50 -0
  2. exp/asr_stats_raw/train/feats_stats.npz +0 -0
  3. exp/asr_train_asr_transformer_raw_char/RESULTS.md +37 -0
  4. exp/asr_train_asr_transformer_raw_char/config.yaml +207 -0
  5. exp/asr_train_asr_transformer_raw_char/images/acc.png +0 -0
  6. exp/asr_train_asr_transformer_raw_char/images/backward_time.png +0 -0
  7. exp/asr_train_asr_transformer_raw_char/images/cer.png +0 -0
  8. exp/asr_train_asr_transformer_raw_char/images/cer_ctc.png +0 -0
  9. exp/asr_train_asr_transformer_raw_char/images/forward_time.png +0 -0
  10. exp/asr_train_asr_transformer_raw_char/images/iter_time.png +0 -0
  11. exp/asr_train_asr_transformer_raw_char/images/loss.png +0 -0
  12. exp/asr_train_asr_transformer_raw_char/images/loss_att.png +0 -0
  13. exp/asr_train_asr_transformer_raw_char/images/loss_ctc.png +0 -0
  14. exp/asr_train_asr_transformer_raw_char/images/lr_0.png +0 -0
  15. exp/asr_train_asr_transformer_raw_char/images/optim_step_time.png +0 -0
  16. exp/asr_train_asr_transformer_raw_char/images/train_time.png +0 -0
  17. exp/asr_train_asr_transformer_raw_char/images/wer.png +0 -0
  18. exp/asr_train_asr_transformer_raw_char/valid.acc.ave_10best.pth +3 -0
  19. exp/lm_train_lm_char_optimadam_batch_size512_optim_conflr0.0005_lm_confnlayers4/40epoch.pth +3 -0
  20. exp/lm_train_lm_char_optimadam_batch_size512_optim_conflr0.0005_lm_confnlayers4/config.yaml +166 -0
  21. exp/lm_train_lm_char_optimadam_batch_size512_optim_conflr0.0005_lm_confnlayers4/images/backward_time.png +0 -0
  22. exp/lm_train_lm_char_optimadam_batch_size512_optim_conflr0.0005_lm_confnlayers4/images/forward_time.png +0 -0
  23. exp/lm_train_lm_char_optimadam_batch_size512_optim_conflr0.0005_lm_confnlayers4/images/iter_time.png +0 -0
  24. exp/lm_train_lm_char_optimadam_batch_size512_optim_conflr0.0005_lm_confnlayers4/images/loss.png +0 -0
  25. exp/lm_train_lm_char_optimadam_batch_size512_optim_conflr0.0005_lm_confnlayers4/images/lr_0.png +0 -0
  26. exp/lm_train_lm_char_optimadam_batch_size512_optim_conflr0.0005_lm_confnlayers4/images/optim_step_time.png +0 -0
  27. exp/lm_train_lm_char_optimadam_batch_size512_optim_conflr0.0005_lm_confnlayers4/images/train_time.png +0 -0
  28. exp/lm_train_lm_char_optimadam_batch_size512_optim_conflr0.0005_lm_confnlayers4/perplexity_test/ppl +1 -0
  29. meta.yaml +10 -0
README.md ADDED
@@ -0,0 +1,50 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ tags:
3
+ - espnet
4
+ - audio
5
+ - automatic-speech-recognition
6
+ language: en
7
+ datasets:
8
+ - wsj
9
+ license: cc-by-4.0
10
+ ---
11
+ ## Example ESPnet2 ASR model
12
+ ### `kamo-naoyuki/wsj`
13
+ ♻️ Imported from https://zenodo.org/record/4003381/
14
+
15
+ This model was trained by kamo-naoyuki using wsj/asr1 recipe in [espnet](https://github.com/espnet/espnet/).
16
+ ### Demo: How to use in ESPnet2
17
+ ```python
18
+ # coming soon
19
+ ```
20
+ ### Citing ESPnet
21
+ ```BibTex
22
+ @inproceedings{watanabe2018espnet,
23
+ author={Shinji Watanabe and Takaaki Hori and Shigeki Karita and Tomoki Hayashi and Jiro Nishitoba and Yuya Unno and Nelson {Enrique Yalta Soplin} and Jahn Heymann and Matthew Wiesner and Nanxin Chen and Adithya Renduchintala and Tsubasa Ochiai},
24
+ title={{ESPnet}: End-to-End Speech Processing Toolkit},
25
+ year={2018},
26
+ booktitle={Proceedings of Interspeech},
27
+ pages={2207--2211},
28
+ doi={10.21437/Interspeech.2018-1456},
29
+ url={http://dx.doi.org/10.21437/Interspeech.2018-1456}
30
+ }
31
+ @inproceedings{hayashi2020espnet,
32
+ title={{Espnet-TTS}: Unified, reproducible, and integratable open source end-to-end text-to-speech toolkit},
33
+ author={Hayashi, Tomoki and Yamamoto, Ryuichi and Inoue, Katsuki and Yoshimura, Takenori and Watanabe, Shinji and Toda, Tomoki and Takeda, Kazuya and Zhang, Yu and Tan, Xu},
34
+ booktitle={Proceedings of IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP)},
35
+ pages={7654--7658},
36
+ year={2020},
37
+ organization={IEEE}
38
+ }
39
+ ```
40
+ or arXiv:
41
+ ```bibtex
42
+ @misc{watanabe2018espnet,
43
+ title={ESPnet: End-to-End Speech Processing Toolkit},
44
+ author={Shinji Watanabe and Takaaki Hori and Shigeki Karita and Tomoki Hayashi and Jiro Nishitoba and Yuya Unno and Nelson Enrique Yalta Soplin and Jahn Heymann and Matthew Wiesner and Nanxin Chen and Adithya Renduchintala and Tsubasa Ochiai},
45
+ year={2018},
46
+ eprint={1804.00015},
47
+ archivePrefix={arXiv},
48
+ primaryClass={cs.CL}
49
+ }
50
+ ```
exp/asr_stats_raw/train/feats_stats.npz ADDED
Binary file (1.4 kB). View file
 
exp/asr_train_asr_transformer_raw_char/RESULTS.md ADDED
@@ -0,0 +1,37 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ <!-- Generated by scripts/utils/show_asr_result.sh -->
2
+ # RESULTS
3
+ ## Environments
4
+ - date: `Thu Aug 27 09:50:23 JST 2020`
5
+ - python version: `3.7.3 (default, Mar 27 2019, 22:11:17) [GCC 7.3.0]`
6
+ - espnet version: `espnet 0.9.0`
7
+ - pytorch version: `pytorch 1.6.0`
8
+ - Git hash: `4b040785069f114fe046ffa9acdd8698fdeb7f21`
9
+ - Commit date: `Tue Aug 25 17:46:25 2020 +0900`
10
+
11
+ ## asr_train_asr_transformer_raw_char
12
+ ### WER
13
+
14
+ |dataset|Snt|Wrd|Corr|Sub|Del|Ins|Err|S.Err|
15
+ |---|---|---|---|---|---|---|---|---|
16
+ |inference_lm_lm_train_lm_char_optimadam_batch_size512_optim_conflr0.0005_lm_confnlayers4_valid.loss.ave_asr_model_valid.acc.ave/test_dev93|503|8234|93.3|5.9|0.8|0.8|7.5|58.1|
17
+ |inference_lm_lm_train_lm_char_optimadam_batch_size512_optim_conflr0.0005_lm_confnlayers4_valid.loss.ave_asr_model_valid.acc.ave/test_eval92|333|5643|95.8|4.0|0.3|0.7|5.0|44.7|
18
+ |inference_lm_lm_train_lm_char_optimadam_optim_conflr0.0005_lm_confnlayers4_keep_nbest_models235810_batch_size512_valid.loss.ave_asr_model_valid.acc.ave/test_dev93|503|8234|93.2|5.9|0.8|0.9|7.6|59.0|
19
+ |inference_lm_lm_train_lm_char_optimadam_optim_conflr0.0005_lm_confnlayers4_keep_nbest_models235810_batch_size512_valid.loss.ave_asr_model_valid.acc.ave/test_eval92|333|5643|95.8|4.0|0.2|0.7|4.9|45.6|
20
+ |inference_lm_train_lm_char_optimadam_batch_size512_optim_conflr0.0005_lm_confnlayers4_valid.loss.best_asr_model_valid.acc.best/test_dev93|503|8234|92.3|6.8|1.0|1.0|8.8|62.2|
21
+ |inference_lm_train_lm_char_optimadam_batch_size512_optim_conflr0.0005_lm_confnlayers4_valid.loss.best_asr_model_valid.acc.best/test_eval92|333|5643|94.8|4.8|0.5|0.9|6.1|48.6|
22
+ |inference_lm_train_lm_char_valid.loss.best_asr_model_valid.acc.best/test_dev93|503|8234|80.9|17.4|1.7|2.6|21.7|90.7|
23
+ |inference_lm_train_lm_char_valid.loss.best_asr_model_valid.acc.best/test_eval92|333|5643|84.3|14.6|1.1|2.4|18.1|86.8|
24
+
25
+ ### CER
26
+
27
+ |dataset|Snt|Wrd|Corr|Sub|Del|Ins|Err|S.Err|
28
+ |---|---|---|---|---|---|---|---|---|
29
+ |inference_lm_lm_train_lm_char_optimadam_batch_size512_optim_conflr0.0005_lm_confnlayers4_valid.loss.ave_asr_model_valid.acc.ave/test_dev93|503|48634|97.6|1.1|1.3|0.6|3.0|62.8|
30
+ |inference_lm_lm_train_lm_char_optimadam_batch_size512_optim_conflr0.0005_lm_confnlayers4_valid.loss.ave_asr_model_valid.acc.ave/test_eval92|333|33341|98.5|0.7|0.8|0.5|2.0|53.2|
31
+ |inference_lm_lm_train_lm_char_optimadam_optim_conflr0.0005_lm_confnlayers4_keep_nbest_models235810_batch_size512_valid.loss.ave_asr_model_valid.acc.ave/test_dev93|503|48634|97.5|1.2|1.3|0.5|3.1|63.6|
32
+ |inference_lm_lm_train_lm_char_optimadam_optim_conflr0.0005_lm_confnlayers4_keep_nbest_models235810_batch_size512_valid.loss.ave_asr_model_valid.acc.ave/test_eval92|333|33341|98.5|0.7|0.8|0.5|2.0|53.2|
33
+ |inference_lm_train_lm_char_optimadam_batch_size512_optim_conflr0.0005_lm_confnlayers4_valid.loss.best_asr_model_valid.acc.best/test_dev93|503|48634|97.0|1.4|1.6|0.7|3.6|67.2|
34
+ |inference_lm_train_lm_char_optimadam_batch_size512_optim_conflr0.0005_lm_confnlayers4_valid.loss.best_asr_model_valid.acc.best/test_eval92|333|33341|98.0|0.9|1.0|0.5|2.5|56.8|
35
+ |inference_lm_train_lm_char_valid.loss.best_asr_model_valid.acc.best/test_dev93|503|48634|94.4|2.8|2.9|1.5|7.1|91.3|
36
+ |inference_lm_train_lm_char_valid.loss.best_asr_model_valid.acc.best/test_eval92|333|33341|95.6|2.2|2.1|1.3|5.7|88.9|
37
+
exp/asr_train_asr_transformer_raw_char/config.yaml ADDED
@@ -0,0 +1,207 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ config: conf/train_asr_transformer.yaml
2
+ print_config: false
3
+ log_level: INFO
4
+ dry_run: false
5
+ iterator_type: sequence
6
+ output_dir: exp/asr_train_asr_transformer_raw_char
7
+ ngpu: 1
8
+ seed: 0
9
+ num_workers: 1
10
+ num_att_plot: 3
11
+ dist_backend: nccl
12
+ dist_init_method: env://
13
+ dist_world_size: null
14
+ dist_rank: null
15
+ local_rank: 0
16
+ dist_master_addr: null
17
+ dist_master_port: null
18
+ dist_launcher: null
19
+ multiprocessing_distributed: false
20
+ cudnn_enabled: true
21
+ cudnn_benchmark: false
22
+ cudnn_deterministic: true
23
+ collect_stats: false
24
+ write_collected_feats: false
25
+ max_epoch: 100
26
+ patience: null
27
+ val_scheduler_criterion:
28
+ - valid
29
+ - loss
30
+ early_stopping_criterion:
31
+ - valid
32
+ - loss
33
+ - min
34
+ best_model_criterion:
35
+ - - valid
36
+ - acc
37
+ - max
38
+ keep_nbest_models: 10
39
+ grad_clip: 5.0
40
+ grad_noise: false
41
+ accum_grad: 8
42
+ no_forward_run: false
43
+ resume: true
44
+ train_dtype: float32
45
+ use_amp: false
46
+ log_interval: null
47
+ pretrain_path: []
48
+ pretrain_key: []
49
+ num_iters_per_epoch: null
50
+ batch_size: 32
51
+ valid_batch_size: null
52
+ batch_bins: 1000000
53
+ valid_batch_bins: null
54
+ train_shape_file:
55
+ - exp/asr_stats_raw/train/speech_shape
56
+ - exp/asr_stats_raw/train/text_shape.char
57
+ valid_shape_file:
58
+ - exp/asr_stats_raw/valid/speech_shape
59
+ - exp/asr_stats_raw/valid/text_shape.char
60
+ batch_type: folded
61
+ valid_batch_type: null
62
+ fold_length:
63
+ - 80000
64
+ - 150
65
+ sort_in_batch: descending
66
+ sort_batch: descending
67
+ multiple_iterator: false
68
+ chunk_length: 500
69
+ chunk_shift_ratio: 0.5
70
+ num_cache_chunks: 1024
71
+ train_data_path_and_name_and_type:
72
+ - - dump/raw/train_si284/wav.scp
73
+ - speech
74
+ - sound
75
+ - - dump/raw/train_si284/text
76
+ - text
77
+ - text
78
+ valid_data_path_and_name_and_type:
79
+ - - dump/raw/test_dev93/wav.scp
80
+ - speech
81
+ - sound
82
+ - - dump/raw/test_dev93/text
83
+ - text
84
+ - text
85
+ allow_variable_data_keys: false
86
+ max_cache_size: 0.0
87
+ valid_max_cache_size: null
88
+ optim: adam
89
+ optim_conf:
90
+ lr: 0.005
91
+ scheduler: warmuplr
92
+ scheduler_conf:
93
+ warmup_steps: 30000
94
+ token_list:
95
+ - <blank>
96
+ - <unk>
97
+ - <space>
98
+ - E
99
+ - T
100
+ - A
101
+ - N
102
+ - I
103
+ - O
104
+ - S
105
+ - R
106
+ - H
107
+ - L
108
+ - D
109
+ - C
110
+ - U
111
+ - M
112
+ - P
113
+ - F
114
+ - G
115
+ - Y
116
+ - W
117
+ - B
118
+ - V
119
+ - K
120
+ - .
121
+ - X
122
+ - ''''
123
+ - J
124
+ - Q
125
+ - Z
126
+ - <NOISE>
127
+ - ','
128
+ - '-'
129
+ - '"'
130
+ - '*'
131
+ - ':'
132
+ - (
133
+ - )
134
+ - '?'
135
+ - '!'
136
+ - '&'
137
+ - ;
138
+ - '1'
139
+ - '2'
140
+ - '0'
141
+ - /
142
+ - $
143
+ - '{'
144
+ - '}'
145
+ - '8'
146
+ - '9'
147
+ - '6'
148
+ - '3'
149
+ - '5'
150
+ - '7'
151
+ - '4'
152
+ - '~'
153
+ - '`'
154
+ - _
155
+ - <*IN*>
156
+ - <*MR.*>
157
+ - \
158
+ - ^
159
+ - <sos/eos>
160
+ init: xavier_uniform
161
+ input_size: null
162
+ ctc_conf:
163
+ dropout_rate: 0.0
164
+ ctc_type: builtin
165
+ reduce: true
166
+ model_conf:
167
+ ctc_weight: 0.3
168
+ lsm_weight: 0.1
169
+ length_normalized_loss: false
170
+ use_preprocessor: true
171
+ token_type: char
172
+ bpemodel: null
173
+ non_linguistic_symbols: data/nlsyms.txt
174
+ cleaner: null
175
+ g2p: null
176
+ frontend: default
177
+ frontend_conf:
178
+ fs: 16k
179
+ specaug: null
180
+ specaug_conf: {}
181
+ normalize: global_mvn
182
+ normalize_conf:
183
+ stats_file: exp/asr_stats_raw/train/feats_stats.npz
184
+ encoder: transformer
185
+ encoder_conf:
186
+ output_size: 256
187
+ attention_heads: 4
188
+ linear_units: 2048
189
+ num_blocks: 12
190
+ dropout_rate: 0.1
191
+ positional_dropout_rate: 0.1
192
+ attention_dropout_rate: 0.0
193
+ input_layer: conv2d
194
+ normalize_before: true
195
+ decoder: transformer
196
+ decoder_conf:
197
+ attention_heads: 4
198
+ linear_units: 2048
199
+ num_blocks: 6
200
+ dropout_rate: 0.1
201
+ positional_dropout_rate: 0.1
202
+ self_attention_dropout_rate: 0.0
203
+ src_attention_dropout_rate: 0.0
204
+ required:
205
+ - output_dir
206
+ - token_list
207
+ distributed: false
exp/asr_train_asr_transformer_raw_char/images/acc.png ADDED
exp/asr_train_asr_transformer_raw_char/images/backward_time.png ADDED
exp/asr_train_asr_transformer_raw_char/images/cer.png ADDED
exp/asr_train_asr_transformer_raw_char/images/cer_ctc.png ADDED
exp/asr_train_asr_transformer_raw_char/images/forward_time.png ADDED
exp/asr_train_asr_transformer_raw_char/images/iter_time.png ADDED
exp/asr_train_asr_transformer_raw_char/images/loss.png ADDED
exp/asr_train_asr_transformer_raw_char/images/loss_att.png ADDED
exp/asr_train_asr_transformer_raw_char/images/loss_ctc.png ADDED
exp/asr_train_asr_transformer_raw_char/images/lr_0.png ADDED
exp/asr_train_asr_transformer_raw_char/images/optim_step_time.png ADDED
exp/asr_train_asr_transformer_raw_char/images/train_time.png ADDED
exp/asr_train_asr_transformer_raw_char/images/wer.png ADDED
exp/asr_train_asr_transformer_raw_char/valid.acc.ave_10best.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1d946539cd717cf931d8cd22b3776daa3a23fbf2673fe7fcfb27b79cb8bd2986
3
+ size 108794253
exp/lm_train_lm_char_optimadam_batch_size512_optim_conflr0.0005_lm_confnlayers4/40epoch.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f96407e0f8c318a56d51d24b92203abb66c94a4ec6cad1f9c44a0a982db044d2
3
+ size 54504915
exp/lm_train_lm_char_optimadam_batch_size512_optim_conflr0.0005_lm_confnlayers4/config.yaml ADDED
@@ -0,0 +1,166 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ config: conf/train_lm.yaml
2
+ print_config: false
3
+ log_level: INFO
4
+ dry_run: false
5
+ iterator_type: sequence
6
+ output_dir: exp/lm_train_lm_char_optimadam_batch_size512_optim_conflr0.0005_lm_confnlayers4
7
+ ngpu: 1
8
+ seed: 0
9
+ num_workers: 1
10
+ num_att_plot: 3
11
+ dist_backend: nccl
12
+ dist_init_method: env://
13
+ dist_world_size: null
14
+ dist_rank: null
15
+ local_rank: 0
16
+ dist_master_addr: null
17
+ dist_master_port: null
18
+ dist_launcher: null
19
+ multiprocessing_distributed: false
20
+ cudnn_enabled: true
21
+ cudnn_benchmark: false
22
+ cudnn_deterministic: true
23
+ collect_stats: false
24
+ write_collected_feats: false
25
+ max_epoch: 40
26
+ patience: null
27
+ val_scheduler_criterion:
28
+ - valid
29
+ - loss
30
+ early_stopping_criterion:
31
+ - valid
32
+ - loss
33
+ - min
34
+ best_model_criterion:
35
+ - - valid
36
+ - loss
37
+ - min
38
+ keep_nbest_models: 1
39
+ grad_clip: 5.0
40
+ grad_noise: false
41
+ accum_grad: 1
42
+ no_forward_run: false
43
+ resume: true
44
+ train_dtype: float32
45
+ use_amp: false
46
+ log_interval: null
47
+ pretrain_path: []
48
+ pretrain_key: []
49
+ num_iters_per_epoch: null
50
+ batch_size: 512
51
+ valid_batch_size: null
52
+ batch_bins: 1000000
53
+ valid_batch_bins: null
54
+ train_shape_file:
55
+ - exp/lm_stats/train/text_shape.char
56
+ valid_shape_file:
57
+ - exp/lm_stats/valid/text_shape.char
58
+ batch_type: folded
59
+ valid_batch_type: null
60
+ fold_length:
61
+ - 150
62
+ sort_in_batch: descending
63
+ sort_batch: descending
64
+ multiple_iterator: false
65
+ chunk_length: 500
66
+ chunk_shift_ratio: 0.5
67
+ num_cache_chunks: 1024
68
+ train_data_path_and_name_and_type:
69
+ - - dump/raw/srctexts
70
+ - text
71
+ - text
72
+ valid_data_path_and_name_and_type:
73
+ - - dump/raw/test_dev93/text
74
+ - text
75
+ - text
76
+ allow_variable_data_keys: false
77
+ max_cache_size: 0.0
78
+ valid_max_cache_size: null
79
+ optim: adam
80
+ optim_conf:
81
+ lr: 0.0005
82
+ scheduler: null
83
+ scheduler_conf: {}
84
+ token_list:
85
+ - <blank>
86
+ - <unk>
87
+ - <space>
88
+ - E
89
+ - T
90
+ - A
91
+ - N
92
+ - I
93
+ - O
94
+ - S
95
+ - R
96
+ - H
97
+ - L
98
+ - D
99
+ - C
100
+ - U
101
+ - M
102
+ - P
103
+ - F
104
+ - G
105
+ - Y
106
+ - W
107
+ - B
108
+ - V
109
+ - K
110
+ - .
111
+ - X
112
+ - ''''
113
+ - J
114
+ - Q
115
+ - Z
116
+ - <NOISE>
117
+ - ','
118
+ - '-'
119
+ - '"'
120
+ - '*'
121
+ - ':'
122
+ - (
123
+ - )
124
+ - '?'
125
+ - '!'
126
+ - '&'
127
+ - ;
128
+ - '1'
129
+ - '2'
130
+ - '0'
131
+ - /
132
+ - $
133
+ - '{'
134
+ - '}'
135
+ - '8'
136
+ - '9'
137
+ - '6'
138
+ - '3'
139
+ - '5'
140
+ - '7'
141
+ - '4'
142
+ - '~'
143
+ - '`'
144
+ - _
145
+ - <*IN*>
146
+ - <*MR.*>
147
+ - \
148
+ - ^
149
+ - <sos/eos>
150
+ init: null
151
+ model_conf:
152
+ ignore_id: 0
153
+ use_preprocessor: true
154
+ token_type: char
155
+ bpemodel: null
156
+ non_linguistic_symbols: data/nlsyms.txt
157
+ cleaner: null
158
+ g2p: null
159
+ lm: seq_rnn
160
+ lm_conf:
161
+ unit: 650
162
+ nlayers: 4
163
+ required:
164
+ - output_dir
165
+ - token_list
166
+ distributed: false
exp/lm_train_lm_char_optimadam_batch_size512_optim_conflr0.0005_lm_confnlayers4/images/backward_time.png ADDED
exp/lm_train_lm_char_optimadam_batch_size512_optim_conflr0.0005_lm_confnlayers4/images/forward_time.png ADDED
exp/lm_train_lm_char_optimadam_batch_size512_optim_conflr0.0005_lm_confnlayers4/images/iter_time.png ADDED
exp/lm_train_lm_char_optimadam_batch_size512_optim_conflr0.0005_lm_confnlayers4/images/loss.png ADDED
exp/lm_train_lm_char_optimadam_batch_size512_optim_conflr0.0005_lm_confnlayers4/images/lr_0.png ADDED
exp/lm_train_lm_char_optimadam_batch_size512_optim_conflr0.0005_lm_confnlayers4/images/optim_step_time.png ADDED
exp/lm_train_lm_char_optimadam_batch_size512_optim_conflr0.0005_lm_confnlayers4/images/train_time.png ADDED
exp/lm_train_lm_char_optimadam_batch_size512_optim_conflr0.0005_lm_confnlayers4/perplexity_test/ppl ADDED
@@ -0,0 +1 @@
 
 
1
+ 2.413982977013579
meta.yaml ADDED
@@ -0,0 +1,10 @@
 
 
 
 
 
 
 
 
 
 
 
1
+ espnet: 0.9.0
2
+ files:
3
+ asr_model_file: exp/asr_train_asr_transformer_raw_char/valid.acc.ave_10best.pth
4
+ lm_file: exp/lm_train_lm_char_optimadam_batch_size512_optim_conflr0.0005_lm_confnlayers4/40epoch.pth
5
+ python: "3.7.3 (default, Mar 27 2019, 22:11:17) \n[GCC 7.3.0]"
6
+ timestamp: 1598508133.366529
7
+ torch: 1.6.0
8
+ yaml_files:
9
+ asr_train_config: exp/asr_train_asr_transformer_raw_char/config.yaml
10
+ lm_train_config: exp/lm_train_lm_char_optimadam_batch_size512_optim_conflr0.0005_lm_confnlayers4/config.yaml