Siddhant commited on
Commit
f7f1668
1 Parent(s): 98535ad

import from zenodo

Browse files
README.md ADDED
@@ -0,0 +1,50 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ tags:
3
+ - espnet
4
+ - audio
5
+ - automatic-speech-recognition
6
+ language: es
7
+ datasets:
8
+ - mls
9
+ license: cc-by-4.0
10
+ ---
11
+ ## Example ESPnet2 ASR model
12
+ ### `ftshijt/mls_asr_transformer_valid.acc.best`
13
+ ♻️ Imported from https://zenodo.org/record/4458452/
14
+
15
+ This model was trained by ftshijt using mls/asr1 recipe in [espnet](https://github.com/espnet/espnet/).
16
+ ### Demo: How to use in ESPnet2
17
+ ```python
18
+ # coming soon
19
+ ```
20
+ ### Citing ESPnet
21
+ ```BibTex
22
+ @inproceedings{watanabe2018espnet,
23
+ author={Shinji Watanabe and Takaaki Hori and Shigeki Karita and Tomoki Hayashi and Jiro Nishitoba and Yuya Unno and Nelson {Enrique Yalta Soplin} and Jahn Heymann and Matthew Wiesner and Nanxin Chen and Adithya Renduchintala and Tsubasa Ochiai},
24
+ title={{ESPnet}: End-to-End Speech Processing Toolkit},
25
+ year={2018},
26
+ booktitle={Proceedings of Interspeech},
27
+ pages={2207--2211},
28
+ doi={10.21437/Interspeech.2018-1456},
29
+ url={http://dx.doi.org/10.21437/Interspeech.2018-1456}
30
+ }
31
+ @inproceedings{hayashi2020espnet,
32
+ title={{Espnet-TTS}: Unified, reproducible, and integratable open source end-to-end text-to-speech toolkit},
33
+ author={Hayashi, Tomoki and Yamamoto, Ryuichi and Inoue, Katsuki and Yoshimura, Takenori and Watanabe, Shinji and Toda, Tomoki and Takeda, Kazuya and Zhang, Yu and Tan, Xu},
34
+ booktitle={Proceedings of IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP)},
35
+ pages={7654--7658},
36
+ year={2020},
37
+ organization={IEEE}
38
+ }
39
+ ```
40
+ or arXiv:
41
+ ```bibtex
42
+ @misc{watanabe2018espnet,
43
+ title={ESPnet: End-to-End Speech Processing Toolkit},
44
+ author={Shinji Watanabe and Takaaki Hori and Shigeki Karita and Tomoki Hayashi and Jiro Nishitoba and Yuya Unno and Nelson Enrique Yalta Soplin and Jahn Heymann and Matthew Wiesner and Nanxin Chen and Adithya Renduchintala and Tsubasa Ochiai},
45
+ year={2018},
46
+ eprint={1804.00015},
47
+ archivePrefix={arXiv},
48
+ primaryClass={cs.CL}
49
+ }
50
+ ```
data/token_list/bpe_unigram150/bpe.model ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f633bfbf3f726a1166cebdaddb9c4253ba3690d9d4461ad998fdd70211e2847f
3
+ size 239455
exp/asr_stats_raw_bpe150/train/feats_stats.npz ADDED
Binary file (1.4 kB). View file
 
exp/asr_transformer/29epoch.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5e6f7f6542a536cd7658ab0539d1c23ba91c93ae66230649c63405898eb8ac0d
3
+ size 109062285
exp/asr_transformer/RESULTS.md ADDED
@@ -0,0 +1,29 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ <!-- Generated by scripts/utils/show_asr_result.sh -->
2
+ # RESULTS
3
+ ## Environments
4
+ - date: `Fri Jan 22 04:56:26 EST 2021`
5
+ - python version: `3.8.3 (default, May 19 2020, 18:47:26) [GCC 7.3.0]`
6
+ - espnet version: `espnet 0.9.2`
7
+ - pytorch version: `pytorch 1.6.0`
8
+ - Git hash: `c0c3724fe660abd205dbca9c9bbdffed1d2c79db`
9
+ - Commit date: `Tue Jan 12 23:00:11 2021 -0500`
10
+
11
+ ## asr_transformer
12
+ ### WER
13
+
14
+ |dataset|Snt|Wrd|Corr|Sub|Del|Ins|Err|S.Err|
15
+ |---|---|---|---|---|---|---|---|---|
16
+ |decode_asr_lm_lm_train_bpe150_valid.loss.ave_asr_model_valid.acc.best/es_test|2385|88499|81.3|15.6|3.1|2.5|21.2|98.6|
17
+
18
+ ### CER
19
+
20
+ |dataset|Snt|Wrd|Corr|Sub|Del|Ins|Err|S.Err|
21
+ |---|---|---|---|---|---|---|---|---|
22
+ |decode_asr_lm_lm_train_bpe150_valid.loss.ave_asr_model_valid.acc.best/es_test|2385|474976|94.3|2.9|2.7|1.4|7.1|98.6|
23
+
24
+ ### TER
25
+
26
+ |dataset|Snt|Wrd|Corr|Sub|Del|Ins|Err|S.Err|
27
+ |---|---|---|---|---|---|---|---|---|
28
+ |decode_asr_lm_lm_train_bpe150_valid.loss.ave_asr_model_valid.acc.best/es_test|2385|251160|88.6|7.9|3.5|2.1|13.6|98.6|
29
+
exp/asr_transformer/config.yaml ADDED
@@ -0,0 +1,297 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ config: conf/tuning/train_asr_transformer.yaml
2
+ print_config: false
3
+ log_level: INFO
4
+ dry_run: false
5
+ iterator_type: sequence
6
+ output_dir: exp/asr_transformer
7
+ ngpu: 1
8
+ seed: 0
9
+ num_workers: 1
10
+ num_att_plot: 3
11
+ dist_backend: nccl
12
+ dist_init_method: env://
13
+ dist_world_size: null
14
+ dist_rank: null
15
+ local_rank: 0
16
+ dist_master_addr: null
17
+ dist_master_port: null
18
+ dist_launcher: null
19
+ multiprocessing_distributed: false
20
+ cudnn_enabled: true
21
+ cudnn_benchmark: false
22
+ cudnn_deterministic: true
23
+ collect_stats: false
24
+ write_collected_feats: false
25
+ max_epoch: 100
26
+ patience: 0
27
+ val_scheduler_criterion:
28
+ - valid
29
+ - loss
30
+ early_stopping_criterion:
31
+ - valid
32
+ - loss
33
+ - min
34
+ best_model_criterion:
35
+ - - valid
36
+ - acc
37
+ - max
38
+ keep_nbest_models: 10
39
+ grad_clip: 5
40
+ grad_clip_type: 2.0
41
+ grad_noise: false
42
+ accum_grad: 2
43
+ no_forward_run: false
44
+ resume: true
45
+ train_dtype: float32
46
+ use_amp: false
47
+ log_interval: null
48
+ unused_parameters: false
49
+ use_tensorboard: true
50
+ use_wandb: false
51
+ wandb_project: null
52
+ wandb_id: null
53
+ pretrain_path: null
54
+ init_param: []
55
+ num_iters_per_epoch: null
56
+ batch_size: 32
57
+ valid_batch_size: null
58
+ batch_bins: 1000000
59
+ valid_batch_bins: null
60
+ train_shape_file:
61
+ - exp/asr_stats_raw_bpe150/train/speech_shape
62
+ - exp/asr_stats_raw_bpe150/train/text_shape.bpe
63
+ valid_shape_file:
64
+ - exp/asr_stats_raw_bpe150/valid/speech_shape
65
+ - exp/asr_stats_raw_bpe150/valid/text_shape.bpe
66
+ batch_type: folded
67
+ valid_batch_type: null
68
+ fold_length:
69
+ - 80000
70
+ - 150
71
+ sort_in_batch: descending
72
+ sort_batch: descending
73
+ multiple_iterator: false
74
+ chunk_length: 500
75
+ chunk_shift_ratio: 0.5
76
+ num_cache_chunks: 1024
77
+ train_data_path_and_name_and_type:
78
+ - - dump/raw/es_train/wav.scp
79
+ - speech
80
+ - sound
81
+ - - dump/raw/es_train/text
82
+ - text
83
+ - text
84
+ valid_data_path_and_name_and_type:
85
+ - - dump/raw/es_dev/wav.scp
86
+ - speech
87
+ - sound
88
+ - - dump/raw/es_dev/text
89
+ - text
90
+ - text
91
+ allow_variable_data_keys: false
92
+ max_cache_size: 0.0
93
+ max_cache_fd: 32
94
+ valid_max_cache_size: null
95
+ optim: adam
96
+ optim_conf:
97
+ lr: 1.0
98
+ scheduler: noamlr
99
+ scheduler_conf:
100
+ warmup_steps: 25000
101
+ token_list:
102
+ - <blank>
103
+ - <unk>
104
+ - ▁
105
+ - s
106
+ - n
107
+ - r
108
+ - o
109
+ - a
110
+ - ▁de
111
+ - e
112
+ - l
113
+ - ▁a
114
+ - u
115
+ - ▁y
116
+ - ▁que
117
+ - ra
118
+ - ta
119
+ - do
120
+ - ▁la
121
+ - i
122
+ - ▁en
123
+ - re
124
+ - to
125
+ - ▁el
126
+ - d
127
+ - p
128
+ - da
129
+ - la
130
+ - c
131
+ - b
132
+ - t
133
+ - ro
134
+ - ó
135
+ - en
136
+ - ri
137
+ - g
138
+ - ba
139
+ - ▁se
140
+ - os
141
+ - er
142
+ - te
143
+ - ▁con
144
+ - ci
145
+ - ▁es
146
+ - es
147
+ - ▁no
148
+ - ▁su
149
+ - h
150
+ - ti
151
+ - é
152
+ - mo
153
+ - á
154
+ - ▁ca
155
+ - ▁ha
156
+ - na
157
+ - ▁los
158
+ - lo
159
+ - í
160
+ - ía
161
+ - de
162
+ - me
163
+ - ca
164
+ - ▁al
165
+ - le
166
+ - ce
167
+ - v
168
+ - ma
169
+ - nte
170
+ - ▁di
171
+ - ▁ma
172
+ - ▁por
173
+ - y
174
+ - di
175
+ - m
176
+ - ▁pa
177
+ - sa
178
+ - ▁si
179
+ - ▁pe
180
+ - gu
181
+ - z
182
+ - ▁mi
183
+ - ▁co
184
+ - ▁me
185
+ - ▁o
186
+ - ▁e
187
+ - ▁un
188
+ - tra
189
+ - ▁re
190
+ - li
191
+ - ▁f
192
+ - co
193
+ - ▁á
194
+ - ndo
195
+ - se
196
+ - mi
197
+ - ga
198
+ - ni
199
+ - ▁cu
200
+ - ▁le
201
+ - jo
202
+ - ▁ve
203
+ - mp
204
+ - bi
205
+ - f
206
+ - va
207
+ - ▁mu
208
+ - go
209
+ - ▁so
210
+ - ñ
211
+ - tu
212
+ - si
213
+ - ▁lo
214
+ - ▁pu
215
+ - ▁vi
216
+ - ▁b
217
+ - ▁las
218
+ - ▁c
219
+ - ▁sa
220
+ - za
221
+ - ▁del
222
+ - ▁po
223
+ - ▁in
224
+ - vi
225
+ - ▁te
226
+ - tro
227
+ - cia
228
+ - ▁una
229
+ - qui
230
+ - pi
231
+ - que
232
+ - ja
233
+ - pa
234
+ - ▁para
235
+ - cu
236
+ - pe
237
+ - ▁como
238
+ - ▁esta
239
+ - ve
240
+ - je
241
+ - lle
242
+ - x
243
+ - ú
244
+ - j
245
+ - q
246
+ - ''''
247
+ - k
248
+ - w
249
+ - ü
250
+ - '-'
251
+ - <sos/eos>
252
+ init: chainer
253
+ input_size: null
254
+ ctc_conf:
255
+ dropout_rate: 0.0
256
+ ctc_type: builtin
257
+ reduce: true
258
+ ignore_nan_grad: false
259
+ model_conf:
260
+ ctc_weight: 0.3
261
+ lsm_weight: 0.1
262
+ length_normalized_loss: false
263
+ use_preprocessor: true
264
+ token_type: bpe
265
+ bpemodel: data/token_list/bpe_unigram150/bpe.model
266
+ non_linguistic_symbols: null
267
+ cleaner: null
268
+ g2p: null
269
+ frontend: default
270
+ frontend_conf:
271
+ fs: 16k
272
+ specaug: null
273
+ specaug_conf: {}
274
+ normalize: global_mvn
275
+ normalize_conf:
276
+ stats_file: exp/asr_stats_raw_bpe150/train/feats_stats.npz
277
+ preencoder: null
278
+ preencoder_conf: {}
279
+ encoder: transformer
280
+ encoder_conf:
281
+ input_layer: conv2d
282
+ num_blocks: 12
283
+ linear_units: 2048
284
+ dropout_rate: 0.1
285
+ output_size: 256
286
+ attention_heads: 4
287
+ attention_dropout_rate: 0.0
288
+ decoder: transformer
289
+ decoder_conf:
290
+ input_layer: embed
291
+ num_blocks: 6
292
+ linear_units: 2048
293
+ dropout_rate: 0.1
294
+ required:
295
+ - output_dir
296
+ - token_list
297
+ distributed: false
exp/asr_transformer/images/acc.png ADDED
exp/asr_transformer/images/backward_time.png ADDED
exp/asr_transformer/images/cer.png ADDED
exp/asr_transformer/images/cer_ctc.png ADDED
exp/asr_transformer/images/forward_time.png ADDED
exp/asr_transformer/images/iter_time.png ADDED
exp/asr_transformer/images/loss.png ADDED
exp/asr_transformer/images/loss_att.png ADDED
exp/asr_transformer/images/loss_ctc.png ADDED
exp/asr_transformer/images/lr_0.png ADDED
exp/asr_transformer/images/optim_step_time.png ADDED
exp/asr_transformer/images/train_time.png ADDED
exp/asr_transformer/images/wer.png ADDED
exp/lm_train_bpe150/config.yaml ADDED
@@ -0,0 +1,265 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ config: null
2
+ print_config: false
3
+ log_level: INFO
4
+ dry_run: false
5
+ iterator_type: sequence
6
+ output_dir: exp/lm_train_bpe150
7
+ ngpu: 1
8
+ seed: 0
9
+ num_workers: 1
10
+ num_att_plot: 3
11
+ dist_backend: nccl
12
+ dist_init_method: env://
13
+ dist_world_size: null
14
+ dist_rank: null
15
+ local_rank: 0
16
+ dist_master_addr: null
17
+ dist_master_port: null
18
+ dist_launcher: null
19
+ multiprocessing_distributed: false
20
+ cudnn_enabled: true
21
+ cudnn_benchmark: false
22
+ cudnn_deterministic: true
23
+ collect_stats: false
24
+ write_collected_feats: false
25
+ max_epoch: 40
26
+ patience: null
27
+ val_scheduler_criterion:
28
+ - valid
29
+ - loss
30
+ early_stopping_criterion:
31
+ - valid
32
+ - loss
33
+ - min
34
+ best_model_criterion:
35
+ - - train
36
+ - loss
37
+ - min
38
+ - - valid
39
+ - loss
40
+ - min
41
+ - - train
42
+ - acc
43
+ - max
44
+ - - valid
45
+ - acc
46
+ - max
47
+ keep_nbest_models:
48
+ - 10
49
+ grad_clip: 5.0
50
+ grad_clip_type: 2.0
51
+ grad_noise: false
52
+ accum_grad: 1
53
+ no_forward_run: false
54
+ resume: true
55
+ train_dtype: float32
56
+ use_amp: false
57
+ log_interval: null
58
+ unused_parameters: false
59
+ use_tensorboard: true
60
+ use_wandb: false
61
+ wandb_project: null
62
+ wandb_id: null
63
+ pretrain_path: null
64
+ init_param: []
65
+ num_iters_per_epoch: null
66
+ batch_size: 20
67
+ valid_batch_size: null
68
+ batch_bins: 1000000
69
+ valid_batch_bins: null
70
+ train_shape_file:
71
+ - exp/lm_stats_bpe150/train/text_shape.bpe
72
+ valid_shape_file:
73
+ - exp/lm_stats_bpe150/valid/text_shape.bpe
74
+ batch_type: folded
75
+ valid_batch_type: null
76
+ fold_length:
77
+ - 150
78
+ sort_in_batch: descending
79
+ sort_batch: descending
80
+ multiple_iterator: false
81
+ chunk_length: 500
82
+ chunk_shift_ratio: 0.5
83
+ num_cache_chunks: 1024
84
+ train_data_path_and_name_and_type:
85
+ - - dump/raw/lm_train.txt
86
+ - text
87
+ - text
88
+ valid_data_path_and_name_and_type:
89
+ - - dump/raw/es_dev/text
90
+ - text
91
+ - text
92
+ allow_variable_data_keys: false
93
+ max_cache_size: 0.0
94
+ max_cache_fd: 32
95
+ valid_max_cache_size: null
96
+ optim: adadelta
97
+ optim_conf: {}
98
+ scheduler: null
99
+ scheduler_conf: {}
100
+ token_list:
101
+ - <blank>
102
+ - <unk>
103
+ - ▁
104
+ - s
105
+ - n
106
+ - r
107
+ - o
108
+ - a
109
+ - ▁de
110
+ - e
111
+ - l
112
+ - ▁a
113
+ - u
114
+ - ▁y
115
+ - ▁que
116
+ - ra
117
+ - ta
118
+ - do
119
+ - ▁la
120
+ - i
121
+ - ▁en
122
+ - re
123
+ - to
124
+ - ▁el
125
+ - d
126
+ - p
127
+ - da
128
+ - la
129
+ - c
130
+ - b
131
+ - t
132
+ - ro
133
+ - ó
134
+ - en
135
+ - ri
136
+ - g
137
+ - ba
138
+ - ▁se
139
+ - os
140
+ - er
141
+ - te
142
+ - ▁con
143
+ - ci
144
+ - ▁es
145
+ - es
146
+ - ▁no
147
+ - ▁su
148
+ - h
149
+ - ti
150
+ - é
151
+ - mo
152
+ - á
153
+ - ▁ca
154
+ - ▁ha
155
+ - na
156
+ - ▁los
157
+ - lo
158
+ - í
159
+ - ía
160
+ - de
161
+ - me
162
+ - ca
163
+ - ▁al
164
+ - le
165
+ - ce
166
+ - v
167
+ - ma
168
+ - nte
169
+ - ▁di
170
+ - ▁ma
171
+ - ▁por
172
+ - y
173
+ - di
174
+ - m
175
+ - ▁pa
176
+ - sa
177
+ - ▁si
178
+ - ▁pe
179
+ - gu
180
+ - z
181
+ - ▁mi
182
+ - ▁co
183
+ - ▁me
184
+ - ▁o
185
+ - ▁e
186
+ - ▁un
187
+ - tra
188
+ - ▁re
189
+ - li
190
+ - ▁f
191
+ - co
192
+ - ▁á
193
+ - ndo
194
+ - se
195
+ - mi
196
+ - ga
197
+ - ni
198
+ - ▁cu
199
+ - ▁le
200
+ - jo
201
+ - ▁ve
202
+ - mp
203
+ - bi
204
+ - f
205
+ - va
206
+ - ▁mu
207
+ - go
208
+ - ▁so
209
+ - ñ
210
+ - tu
211
+ - si
212
+ - ▁lo
213
+ - ▁pu
214
+ - ▁vi
215
+ - ▁b
216
+ - ▁las
217
+ - ▁c
218
+ - ▁sa
219
+ - za
220
+ - ▁del
221
+ - ▁po
222
+ - ▁in
223
+ - vi
224
+ - ▁te
225
+ - tro
226
+ - cia
227
+ - ▁una
228
+ - qui
229
+ - pi
230
+ - que
231
+ - ja
232
+ - pa
233
+ - ▁para
234
+ - cu
235
+ - pe
236
+ - ▁como
237
+ - ▁esta
238
+ - ve
239
+ - je
240
+ - lle
241
+ - x
242
+ - ú
243
+ - j
244
+ - q
245
+ - ''''
246
+ - k
247
+ - w
248
+ - ü
249
+ - '-'
250
+ - <sos/eos>
251
+ init: null
252
+ model_conf:
253
+ ignore_id: 0
254
+ use_preprocessor: true
255
+ token_type: bpe
256
+ bpemodel: data/token_list/bpe_unigram150/bpe.model
257
+ non_linguistic_symbols: null
258
+ cleaner: null
259
+ g2p: null
260
+ lm: seq_rnn
261
+ lm_conf: {}
262
+ required:
263
+ - output_dir
264
+ - token_list
265
+ distributed: false
exp/lm_train_bpe150/images/backward_time.png ADDED
exp/lm_train_bpe150/images/forward_time.png ADDED
exp/lm_train_bpe150/images/iter_time.png ADDED
exp/lm_train_bpe150/images/loss.png ADDED
exp/lm_train_bpe150/images/lr_0.png ADDED
exp/lm_train_bpe150/images/optim_step_time.png ADDED
exp/lm_train_bpe150/images/train_time.png ADDED
exp/lm_train_bpe150/perplexity_test/ppl ADDED
@@ -0,0 +1 @@
 
 
1
+ 8.779000113188225
exp/lm_train_bpe150/valid.loss.ave_10best.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b44c99e0c9ba7be39a8234c384d302f686b4058a93f748d72165686ce73ed01b
3
+ size 27865874
meta.yaml ADDED
@@ -0,0 +1,10 @@
 
 
 
 
 
 
 
 
 
 
 
1
+ espnet: 0.9.2
2
+ files:
3
+ asr_model_file: exp/asr_transformer/29epoch.pth
4
+ lm_file: exp/lm_train_bpe150/valid.loss.ave_10best.pth
5
+ python: "3.8.3 (default, May 19 2020, 18:47:26) \n[GCC 7.3.0]"
6
+ timestamp: 1611336069.625217
7
+ torch: 1.6.0
8
+ yaml_files:
9
+ asr_train_config: exp/asr_transformer/config.yaml
10
+ lm_train_config: exp/lm_train_bpe150/config.yaml