Zengwei commited on
Commit
fc43572
1 Parent(s): 1215f7f

upload files

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. README.md +1 -0
  2. data/lang_bpe_500/HLG.pt +3 -0
  3. data/lang_bpe_500/L.pt +3 -0
  4. data/lang_bpe_500/LG.pt +3 -0
  5. data/lang_bpe_500/Linv.pt +3 -0
  6. data/lang_bpe_500/bpe.model +3 -0
  7. data/lang_bpe_500/tokens.txt +502 -0
  8. data/lang_bpe_500/words.txt +0 -0
  9. data/lm/G_4_gram.pt +3 -0
  10. decoding_results/1best/errs-test-clean-epoch-40-avg-16-use-averaged-model.txt +0 -0
  11. decoding_results/1best/errs-test-other-epoch-40-avg-16-use-averaged-model.txt +0 -0
  12. decoding_results/1best/log-decode-epoch-40-avg-16-use-averaged-model-2023-06-13-19-45-46 +26 -0
  13. decoding_results/1best/recogs-test-clean-epoch-40-avg-16-use-averaged-model.txt +0 -0
  14. decoding_results/1best/recogs-test-other-epoch-40-avg-16-use-averaged-model.txt +0 -0
  15. decoding_results/1best/wer-summary-test-clean-epoch-40-avg-16-use-averaged-model.txt +2 -0
  16. decoding_results/1best/wer-summary-test-other-epoch-40-avg-16-use-averaged-model.txt +2 -0
  17. decoding_results/ctc-decoding/errs-test-clean-epoch-40-avg-16-use-averaged-model.txt +0 -0
  18. decoding_results/ctc-decoding/errs-test-other-epoch-40-avg-16-use-averaged-model.txt +0 -0
  19. decoding_results/ctc-decoding/log-decode-epoch-40-avg-16-use-averaged-model-2023-06-13-19-42-20 +31 -0
  20. decoding_results/ctc-decoding/recogs-test-clean-epoch-40-avg-16-use-averaged-model.txt +0 -0
  21. decoding_results/ctc-decoding/recogs-test-other-epoch-40-avg-16-use-averaged-model.txt +0 -0
  22. decoding_results/ctc-decoding/wer-summary-test-clean-epoch-40-avg-16-use-averaged-model.txt +2 -0
  23. decoding_results/ctc-decoding/wer-summary-test-other-epoch-40-avg-16-use-averaged-model.txt +2 -0
  24. decoding_results/nbest-rescoring/errs-test-clean-epoch-40-avg-16-use-averaged-model.txt +0 -0
  25. decoding_results/nbest-rescoring/errs-test-other-epoch-40-avg-16-use-averaged-model.txt +0 -0
  26. decoding_results/nbest-rescoring/log-decode-epoch-40-avg-16-use-averaged-model-2023-06-13-17-24-42 +6 -0
  27. decoding_results/nbest-rescoring/log-decode-epoch-40-avg-16-use-averaged-model-2023-06-13-17-28-30 +180 -0
  28. decoding_results/nbest-rescoring/recogs-test-clean-epoch-40-avg-16-use-averaged-model.txt +0 -0
  29. decoding_results/nbest-rescoring/recogs-test-other-epoch-40-avg-16-use-averaged-model.txt +0 -0
  30. decoding_results/nbest-rescoring/wer-summary-test-clean-epoch-40-avg-16-use-averaged-model.txt +21 -0
  31. decoding_results/nbest-rescoring/wer-summary-test-other-epoch-40-avg-16-use-averaged-model.txt +21 -0
  32. decoding_results/nbest/errs-test-clean-epoch-40-avg-16-use-averaged-model.txt +0 -0
  33. decoding_results/nbest/errs-test-other-epoch-40-avg-16-use-averaged-model.txt +0 -0
  34. decoding_results/nbest/log-decode-epoch-40-avg-16-use-averaged-model-2023-06-13-17-21-29 +35 -0
  35. decoding_results/nbest/recogs-test-clean-epoch-40-avg-16-use-averaged-model.txt +0 -0
  36. decoding_results/nbest/recogs-test-other-epoch-40-avg-16-use-averaged-model.txt +0 -0
  37. decoding_results/nbest/wer-summary-test-clean-epoch-40-avg-16-use-averaged-model.txt +2 -0
  38. decoding_results/nbest/wer-summary-test-other-epoch-40-avg-16-use-averaged-model.txt +2 -0
  39. decoding_results/whole-lattice-rescoring/errs-test-clean-epoch-40-avg-16-use-averaged-model.txt +0 -0
  40. decoding_results/whole-lattice-rescoring/errs-test-other-epoch-40-avg-16-use-averaged-model.txt +0 -0
  41. decoding_results/whole-lattice-rescoring/log-decode-epoch-40-avg-16-use-averaged-model-2023-06-13-17-33-26 +251 -0
  42. decoding_results/whole-lattice-rescoring/recogs-test-clean-epoch-40-avg-16-use-averaged-model.txt +0 -0
  43. decoding_results/whole-lattice-rescoring/recogs-test-other-epoch-40-avg-16-use-averaged-model.txt +0 -0
  44. decoding_results/whole-lattice-rescoring/wer-summary-test-clean-epoch-40-avg-16-use-averaged-model.txt +21 -0
  45. decoding_results/whole-lattice-rescoring/wer-summary-test-other-epoch-40-avg-16-use-averaged-model.txt +21 -0
  46. exp/decode.sh +16 -0
  47. exp/epoch-40.pt +3 -0
  48. exp/export.sh +10 -0
  49. exp/jit_script.pt +3 -0
  50. exp/log/log-train-2023-06-01-20-30-01-0 +0 -0
README.md ADDED
@@ -0,0 +1 @@
 
 
1
+ See https://github.com/k2-fsa/icefall/pull/1111
data/lang_bpe_500/HLG.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b5dbbe8b485c0cb37d11e07e8e734990f1e40a2d00fe9689d8da2e7b6fe72883
3
+ size 845007583
data/lang_bpe_500/L.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b1b88996f918737fba67fbd29152018b51a537c16ce0718a2b43d5140583224e
3
+ size 19025703
data/lang_bpe_500/LG.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3bb9f021c7aad79d45dc275ba8154a430c4f660a319dcb872cd52500f25553d6
3
+ size 249852195
data/lang_bpe_500/Linv.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:cbc8b3687a1b8f0811a84106b3b310642566c7b1bc282a929878f9269507a2c6
3
+ size 19025703
data/lang_bpe_500/bpe.model ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c53433de083c4a6ad12d034550ef22de68cec62c4f58932a7b6b8b2f1e743fa5
3
+ size 244865
data/lang_bpe_500/tokens.txt ADDED
@@ -0,0 +1,502 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ <blk> 0
2
+ <sos/eos> 1
3
+ <unk> 2
4
+ S 3
5
+ ▁THE 4
6
+ ▁A 5
7
+ T 6
8
+ ▁AND 7
9
+ ED 8
10
+ ▁OF 9
11
+ ▁TO 10
12
+ E 11
13
+ D 12
14
+ N 13
15
+ ING 14
16
+ ▁IN 15
17
+ Y 16
18
+ M 17
19
+ C 18
20
+ ▁I 19
21
+ A 20
22
+ P 21
23
+ ▁HE 22
24
+ R 23
25
+ O 24
26
+ L 25
27
+ RE 26
28
+ I 27
29
+ U 28
30
+ ER 29
31
+ ▁IT 30
32
+ LY 31
33
+ ▁THAT 32
34
+ ▁WAS 33
35
+ ▁ 34
36
+ ▁S 35
37
+ AR 36
38
+ ▁BE 37
39
+ F 38
40
+ ▁C 39
41
+ IN 40
42
+ B 41
43
+ ▁FOR 42
44
+ OR 43
45
+ LE 44
46
+ ' 45
47
+ ▁HIS 46
48
+ ▁YOU 47
49
+ AL 48
50
+ ▁RE 49
51
+ V 50
52
+ ▁B 51
53
+ G 52
54
+ RI 53
55
+ ▁E 54
56
+ ▁WITH 55
57
+ ▁T 56
58
+ ▁AS 57
59
+ LL 58
60
+ ▁P 59
61
+ ▁HER 60
62
+ ST 61
63
+ ▁HAD 62
64
+ ▁SO 63
65
+ ▁F 64
66
+ W 65
67
+ CE 66
68
+ ▁IS 67
69
+ ND 68
70
+ ▁NOT 69
71
+ TH 70
72
+ ▁BUT 71
73
+ EN 72
74
+ ▁SHE 73
75
+ ▁ON 74
76
+ VE 75
77
+ ON 76
78
+ SE 77
79
+ ▁DE 78
80
+ UR 79
81
+ ▁G 80
82
+ CH 81
83
+ K 82
84
+ TER 83
85
+ ▁AT 84
86
+ IT 85
87
+ ▁ME 86
88
+ RO 87
89
+ NE 88
90
+ RA 89
91
+ ES 90
92
+ IL 91
93
+ NG 92
94
+ IC 93
95
+ ▁NO 94
96
+ ▁HIM 95
97
+ ENT 96
98
+ IR 97
99
+ ▁WE 98
100
+ H 99
101
+ ▁DO 100
102
+ ▁ALL 101
103
+ ▁HAVE 102
104
+ LO 103
105
+ ▁BY 104
106
+ ▁MY 105
107
+ ▁MO 106
108
+ ▁THIS 107
109
+ LA 108
110
+ ▁ST 109
111
+ ▁WHICH 110
112
+ ▁CON 111
113
+ ▁THEY 112
114
+ CK 113
115
+ TE 114
116
+ ▁SAID 115
117
+ ▁FROM 116
118
+ ▁GO 117
119
+ ▁WHO 118
120
+ ▁TH 119
121
+ ▁OR 120
122
+ ▁D 121
123
+ ▁W 122
124
+ VER 123
125
+ LI 124
126
+ ▁SE 125
127
+ ▁ONE 126
128
+ ▁CA 127
129
+ ▁AN 128
130
+ ▁LA 129
131
+ ▁WERE 130
132
+ EL 131
133
+ ▁HA 132
134
+ ▁MAN 133
135
+ ▁FA 134
136
+ ▁EX 135
137
+ AD 136
138
+ ▁SU 137
139
+ RY 138
140
+ ▁MI 139
141
+ AT 140
142
+ ▁BO 141
143
+ ▁WHEN 142
144
+ AN 143
145
+ THER 144
146
+ PP 145
147
+ ATION 146
148
+ ▁FI 147
149
+ ▁WOULD 148
150
+ ▁PRO 149
151
+ OW 150
152
+ ET 151
153
+ ▁O 152
154
+ ▁THERE 153
155
+ ▁HO 154
156
+ ION 155
157
+ ▁WHAT 156
158
+ ▁FE 157
159
+ ▁PA 158
160
+ US 159
161
+ MENT 160
162
+ ▁MA 161
163
+ UT 162
164
+ ▁OUT 163
165
+ ▁THEIR 164
166
+ ▁IF 165
167
+ ▁LI 166
168
+ ▁K 167
169
+ ▁WILL 168
170
+ ▁ARE 169
171
+ ID 170
172
+ ▁RO 171
173
+ DE 172
174
+ TION 173
175
+ ▁WA 174
176
+ PE 175
177
+ ▁UP 176
178
+ ▁SP 177
179
+ ▁PO 178
180
+ IGHT 179
181
+ ▁UN 180
182
+ RU 181
183
+ ▁LO 182
184
+ AS 183
185
+ OL 184
186
+ ▁LE 185
187
+ ▁BEEN 186
188
+ ▁SH 187
189
+ ▁RA 188
190
+ ▁SEE 189
191
+ KE 190
192
+ UL 191
193
+ TED 192
194
+ ▁SA 193
195
+ UN 194
196
+ UND 195
197
+ ANT 196
198
+ ▁NE 197
199
+ IS 198
200
+ ▁THEM 199
201
+ CI 200
202
+ GE 201
203
+ ▁COULD 202
204
+ ▁DIS 203
205
+ OM 204
206
+ ISH 205
207
+ HE 206
208
+ EST 207
209
+ ▁SOME 208
210
+ ENCE 209
211
+ ITY 210
212
+ IVE 211
213
+ ▁US 212
214
+ ▁MORE 213
215
+ ▁EN 214
216
+ ARD 215
217
+ ATE 216
218
+ ▁YOUR 217
219
+ ▁INTO 218
220
+ ▁KNOW 219
221
+ ▁CO 220
222
+ ANCE 221
223
+ ▁TIME 222
224
+ ▁WI 223
225
+ ▁YE 224
226
+ AGE 225
227
+ ▁NOW 226
228
+ TI 227
229
+ FF 228
230
+ ABLE 229
231
+ ▁VERY 230
232
+ ▁LIKE 231
233
+ AM 232
234
+ HI 233
235
+ Z 234
236
+ ▁OTHER 235
237
+ ▁THAN 236
238
+ ▁LITTLE 237
239
+ ▁DID 238
240
+ ▁LOOK 239
241
+ TY 240
242
+ ERS 241
243
+ ▁CAN 242
244
+ ▁CHA 243
245
+ ▁AR 244
246
+ X 245
247
+ FUL 246
248
+ UGH 247
249
+ ▁BA 248
250
+ ▁DAY 249
251
+ ▁ABOUT 250
252
+ TEN 251
253
+ IM 252
254
+ ▁ANY 253
255
+ ▁PRE 254
256
+ ▁OVER 255
257
+ IES 256
258
+ NESS 257
259
+ ME 258
260
+ BLE 259
261
+ ▁M 260
262
+ ROW 261
263
+ ▁HAS 262
264
+ ▁GREAT 263
265
+ ▁VI 264
266
+ TA 265
267
+ ▁AFTER 266
268
+ PER 267
269
+ ▁AGAIN 268
270
+ HO 269
271
+ SH 270
272
+ ▁UPON 271
273
+ ▁DI 272
274
+ ▁HAND 273
275
+ ▁COM 274
276
+ IST 275
277
+ TURE 276
278
+ ▁STA 277
279
+ ▁THEN 278
280
+ ▁SHOULD 279
281
+ ▁GA 280
282
+ OUS 281
283
+ OUR 282
284
+ ▁WELL 283
285
+ ▁ONLY 284
286
+ MAN 285
287
+ ▁GOOD 286
288
+ ▁TWO 287
289
+ ▁MAR 288
290
+ ▁SAY 289
291
+ ▁HU 290
292
+ TING 291
293
+ ▁OUR 292
294
+ RESS 293
295
+ ▁DOWN 294
296
+ IOUS 295
297
+ ▁BEFORE 296
298
+ ▁DA 297
299
+ ▁NA 298
300
+ QUI 299
301
+ ▁MADE 300
302
+ ▁EVERY 301
303
+ ▁OLD 302
304
+ ▁EVEN 303
305
+ IG 304
306
+ ▁COME 305
307
+ ▁GRA 306
308
+ ▁RI 307
309
+ ▁LONG 308
310
+ OT 309
311
+ SIDE 310
312
+ WARD 311
313
+ ▁FO 312
314
+ ▁WHERE 313
315
+ MO 314
316
+ LESS 315
317
+ ▁SC 316
318
+ ▁MUST 317
319
+ ▁NEVER 318
320
+ ▁HOW 319
321
+ ▁CAME 320
322
+ ▁SUCH 321
323
+ ▁RU 322
324
+ ▁TAKE 323
325
+ ▁WO 324
326
+ ▁CAR 325
327
+ UM 326
328
+ AK 327
329
+ ▁THINK 328
330
+ ▁MUCH 329
331
+ ▁MISTER 330
332
+ ▁MAY 331
333
+ ▁JO 332
334
+ ▁WAY 333
335
+ ▁COMP 334
336
+ ▁THOUGHT 335
337
+ ▁STO 336
338
+ ▁MEN 337
339
+ ▁BACK 338
340
+ ▁DON 339
341
+ J 340
342
+ ▁LET 341
343
+ ▁TRA 342
344
+ ▁FIRST 343
345
+ ▁JUST 344
346
+ ▁VA 345
347
+ ▁OWN 346
348
+ ▁PLA 347
349
+ ▁MAKE 348
350
+ ATED 349
351
+ ▁HIMSELF 350
352
+ ▁WENT 351
353
+ ▁PI 352
354
+ GG 353
355
+ RING 354
356
+ ▁DU 355
357
+ ▁MIGHT 356
358
+ ▁PART 357
359
+ ▁GIVE 358
360
+ ▁IMP 359
361
+ ▁BU 360
362
+ ▁PER 361
363
+ ▁PLACE 362
364
+ ▁HOUSE 363
365
+ ▁THROUGH 364
366
+ IAN 365
367
+ ▁SW 366
368
+ ▁UNDER 367
369
+ QUE 368
370
+ ▁AWAY 369
371
+ ▁LOVE 370
372
+ QUA 371
373
+ ▁LIFE 372
374
+ ▁GET 373
375
+ ▁WITHOUT 374
376
+ ▁PASS 375
377
+ ▁TURN 376
378
+ IGN 377
379
+ ▁HEAD 378
380
+ ▁MOST 379
381
+ ▁THOSE 380
382
+ ▁SHALL 381
383
+ ▁EYES 382
384
+ ▁COL 383
385
+ ▁STILL 384
386
+ ▁NIGHT 385
387
+ ▁NOTHING 386
388
+ ITION 387
389
+ HA 388
390
+ ▁TELL 389
391
+ ▁WORK 390
392
+ ▁LAST 391
393
+ ▁NEW 392
394
+ ▁FACE 393
395
+ ▁HI 394
396
+ ▁WORD 395
397
+ ▁FOUND 396
398
+ ▁COUNT 397
399
+ ▁OB 398
400
+ ▁WHILE 399
401
+ ▁SHA 400
402
+ ▁MEAN 401
403
+ ▁SAW 402
404
+ ▁PEOPLE 403
405
+ ▁FRIEND 404
406
+ ▁THREE 405
407
+ ▁ROOM 406
408
+ ▁SAME 407
409
+ ▁THOUGH 408
410
+ ▁RIGHT 409
411
+ ▁CHILD 410
412
+ ▁FATHER 411
413
+ ▁ANOTHER 412
414
+ ▁HEART 413
415
+ ▁WANT 414
416
+ ▁TOOK 415
417
+ OOK 416
418
+ ▁LIGHT 417
419
+ ▁MISSUS 418
420
+ ▁OPEN 419
421
+ ▁JU 420
422
+ ▁ASKED 421
423
+ PORT 422
424
+ ▁LEFT 423
425
+ ▁JA 424
426
+ ▁WORLD 425
427
+ ▁HOME 426
428
+ ▁WHY 427
429
+ ▁ALWAYS 428
430
+ ▁ANSWER 429
431
+ ▁SEEMED 430
432
+ ▁SOMETHING 431
433
+ ▁GIRL 432
434
+ ▁BECAUSE 433
435
+ ▁NAME 434
436
+ ▁TOLD 435
437
+ ▁NI 436
438
+ ▁HIGH 437
439
+ IZE 438
440
+ ▁WOMAN 439
441
+ ▁FOLLOW 440
442
+ ▁RETURN 441
443
+ ▁KNEW 442
444
+ ▁EACH 443
445
+ ▁KIND 444
446
+ ▁JE 445
447
+ ▁ACT 446
448
+ ▁LU 447
449
+ ▁CERTAIN 448
450
+ ▁YEARS 449
451
+ ▁QUITE 450
452
+ ▁APPEAR 451
453
+ ▁BETTER 452
454
+ ▁HALF 453
455
+ ▁PRESENT 454
456
+ ▁PRINCE 455
457
+ SHIP 456
458
+ ▁ALSO 457
459
+ ▁BEGAN 458
460
+ ▁HAVING 459
461
+ ▁ENOUGH 460
462
+ ▁PERSON 461
463
+ ▁LADY 462
464
+ ▁WHITE 463
465
+ ▁COURSE 464
466
+ ▁VOICE 465
467
+ ▁SPEAK 466
468
+ ▁POWER 467
469
+ ▁MORNING 468
470
+ ▁BETWEEN 469
471
+ ▁AMONG 470
472
+ ▁KEEP 471
473
+ ▁WALK 472
474
+ ▁MATTER 473
475
+ ▁TEA 474
476
+ ▁BELIEVE 475
477
+ ▁SMALL 476
478
+ ▁TALK 477
479
+ ▁FELT 478
480
+ ▁HORSE 479
481
+ ▁MYSELF 480
482
+ ▁SIX 481
483
+ ▁HOWEVER 482
484
+ ▁FULL 483
485
+ ▁HERSELF 484
486
+ ▁POINT 485
487
+ ▁STOOD 486
488
+ ▁HUNDRED 487
489
+ ▁ALMOST 488
490
+ ▁SINCE 489
491
+ ▁LARGE 490
492
+ ▁LEAVE 491
493
+ ▁PERHAPS 492
494
+ ▁DARK 493
495
+ ▁SUDDEN 494
496
+ ▁REPLIED 495
497
+ ▁ANYTHING 496
498
+ ▁WONDER 497
499
+ ▁UNTIL 498
500
+ Q 499
501
+ #0 500
502
+ #1 501
data/lang_bpe_500/words.txt ADDED
The diff for this file is too large to render. See raw diff
 
data/lm/G_4_gram.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c91581bc632f9c72c557ccdf726298255b9627a6ac38270c51891459b82630e9
3
+ size 3700956587
decoding_results/1best/errs-test-clean-epoch-40-avg-16-use-averaged-model.txt ADDED
The diff for this file is too large to render. See raw diff
 
decoding_results/1best/errs-test-other-epoch-40-avg-16-use-averaged-model.txt ADDED
The diff for this file is too large to render. See raw diff
 
decoding_results/1best/log-decode-epoch-40-avg-16-use-averaged-model-2023-06-13-19-45-46 ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ 2023-06-13 19:45:46,339 INFO [ctc_decode.py:641] Decoding started
2
+ 2023-06-13 19:45:46,340 INFO [ctc_decode.py:647] Device: cuda:0
3
+ 2023-06-13 19:45:46,340 INFO [ctc_decode.py:648] {'best_train_loss': inf, 'best_valid_loss': inf, 'best_train_epoch': -1, 'best_valid_epoch': -1, 'batch_idx_train': 0, 'log_interval': 50, 'reset_interval': 200, 'valid_interval': 3000, 'feature_dim': 80, 'subsampling_factor': 4, 'warm_step': 2000, 'env_info': {'k2-version': '1.24.1', 'k2-build-type': 'Release', 'k2-with-cuda': True, 'k2-git-sha1': 'c51a0b9684442a88ee37f3ce0af686a04b66855b', 'k2-git-date': 'Mon May 1 21:38:03 2023', 'lhotse-version': '1.12.0.dev+git.891bad1.clean', 'torch-version': '1.10.0+cu102', 'torch-cuda-available': True, 'torch-cuda-version': '10.2', 'python-version': '3.8', 'icefall-git-branch': 'new-zipformer-add-ctc', 'icefall-git-sha1': '046b6cb6-dirty', 'icefall-git-date': 'Fri Jun 2 15:51:49 2023', 'icefall-path': '/ceph-zw/workspace/zipformer/icefall_zipformer', 'k2-path': '/ceph-zw/workspace/k2/k2/k2/python/k2/__init__.py', 'lhotse-path': '/ceph-zw/workspace/share/lhotse/lhotse/__init__.py', 'hostname': 'de-74279-k2-train-1-1220091118-57c4d55446-mlpzc', 'IP address': '10.177.22.19'}, 'frame_shift_ms': 10, 'search_beam': 20, 'output_beam': 8, 'min_active_states': 30, 'max_active_states': 10000, 'use_double_scores': True, 'epoch': 40, 'iter': 0, 'avg': 16, 'use_averaged_model': True, 'exp_dir': PosixPath('zipformer/exp-ctc-rnnt'), 'bpe_model': 'data/lang_bpe_500/bpe.model', 'lang_dir': PosixPath('data/lang_bpe_500'), 'context_size': 2, 'decoding_method': '1best', 'num_paths': 100, 'nbest_scale': 1.0, 'hlg_scale': 0.6, 'lm_dir': PosixPath('data/my_lm'), 'num_encoder_layers': '2,2,3,4,3,2', 'downsampling_factor': '1,2,4,8,4,2', 'feedforward_dim': '512,768,1024,1536,1024,768', 'num_heads': '4,4,4,8,4,4', 'encoder_dim': '192,256,384,512,384,256', 'query_head_dim': '32', 'value_head_dim': '12', 'pos_head_dim': '4', 'pos_dim': 48, 'encoder_unmasked_dim': '192,192,256,256,256,192', 'cnn_module_kernel': '31,31,15,15,15,31', 'decoder_dim': 512, 'joiner_dim': 512, 'causal': False, 'chunk_size': '16,32,64,-1', 'left_context_frames': '64,128,256,-1', 'use_transducer': True, 'use_ctc': True, 'full_libri': True, 'mini_libri': False, 'manifest_dir': PosixPath('data/fbank'), 'max_duration': 300, 'bucketing_sampler': True, 'num_buckets': 30, 'concatenate_cuts': False, 'duration_factor': 1.0, 'gap': 1.0, 'on_the_fly_feats': False, 'shuffle': True, 'drop_last': True, 'return_cuts': True, 'num_workers': 2, 'enable_spec_aug': True, 'spec_aug_time_warp_factor': 80, 'enable_musan': True, 'input_strategy': 'PrecomputedFeatures', 'res_dir': PosixPath('zipformer/exp-ctc-rnnt/1best'), 'suffix': 'epoch-40-avg-16-use-averaged-model'}
4
+ 2023-06-13 19:45:46,609 INFO [lexicon.py:168] Loading pre-compiled data/lang_bpe_500/Linv.pt
5
+ 2023-06-13 19:45:51,497 INFO [ctc_decode.py:726] About to create model
6
+ 2023-06-13 19:45:52,095 INFO [ctc_decode.py:793] Calculating the averaged model over epoch range from 24 (excluded) to 40
7
+ 2023-06-13 19:45:54,495 INFO [ctc_decode.py:810] Number of model parameters: 65805511
8
+ 2023-06-13 19:45:54,496 INFO [asr_datamodule.py:465] About to get test-clean cuts
9
+ 2023-06-13 19:45:54,500 INFO [asr_datamodule.py:472] About to get test-other cuts
10
+ 2023-06-13 19:45:55,448 INFO [ctc_decode.py:558] batch 0/?, cuts processed until now is 21
11
+ 2023-06-13 19:46:22,063 INFO [ctc_decode.py:572] The transcripts are stored in zipformer/exp-ctc-rnnt/1best/recogs-test-clean-epoch-40-avg-16-use-averaged-model.txt
12
+ 2023-06-13 19:46:22,133 INFO [utils.py:561] [test-clean-no_rescore] %WER 2.46% [1294 / 52576, 181 ins, 93 del, 1020 sub ]
13
+ 2023-06-13 19:46:22,308 INFO [ctc_decode.py:581] Wrote detailed error stats to zipformer/exp-ctc-rnnt/1best/errs-test-clean-epoch-40-avg-16-use-averaged-model.txt
14
+ 2023-06-13 19:46:22,309 INFO [ctc_decode.py:595]
15
+ For test-clean, WER of different settings are:
16
+ no_rescore 2.46 best for test-clean
17
+
18
+ 2023-06-13 19:46:23,023 INFO [ctc_decode.py:558] batch 0/?, cuts processed until now is 26
19
+ 2023-06-13 19:46:50,469 INFO [ctc_decode.py:572] The transcripts are stored in zipformer/exp-ctc-rnnt/1best/recogs-test-other-epoch-40-avg-16-use-averaged-model.txt
20
+ 2023-06-13 19:46:50,541 INFO [utils.py:561] [test-other-no_rescore] %WER 5.11% [2674 / 52343, 288 ins, 256 del, 2130 sub ]
21
+ 2023-06-13 19:46:50,716 INFO [ctc_decode.py:581] Wrote detailed error stats to zipformer/exp-ctc-rnnt/1best/errs-test-other-epoch-40-avg-16-use-averaged-model.txt
22
+ 2023-06-13 19:46:50,717 INFO [ctc_decode.py:595]
23
+ For test-other, WER of different settings are:
24
+ no_rescore 5.11 best for test-other
25
+
26
+ 2023-06-13 19:46:50,717 INFO [ctc_decode.py:843] Done!
decoding_results/1best/recogs-test-clean-epoch-40-avg-16-use-averaged-model.txt ADDED
The diff for this file is too large to render. See raw diff
 
decoding_results/1best/recogs-test-other-epoch-40-avg-16-use-averaged-model.txt ADDED
The diff for this file is too large to render. See raw diff
 
decoding_results/1best/wer-summary-test-clean-epoch-40-avg-16-use-averaged-model.txt ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ settings WER
2
+ no_rescore 2.46
decoding_results/1best/wer-summary-test-other-epoch-40-avg-16-use-averaged-model.txt ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ settings WER
2
+ no_rescore 5.11
decoding_results/ctc-decoding/errs-test-clean-epoch-40-avg-16-use-averaged-model.txt ADDED
The diff for this file is too large to render. See raw diff
 
decoding_results/ctc-decoding/errs-test-other-epoch-40-avg-16-use-averaged-model.txt ADDED
The diff for this file is too large to render. See raw diff
 
decoding_results/ctc-decoding/log-decode-epoch-40-avg-16-use-averaged-model-2023-06-13-19-42-20 ADDED
@@ -0,0 +1,31 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ 2023-06-13 19:42:20,348 INFO [ctc_decode.py:641] Decoding started
2
+ 2023-06-13 19:42:20,349 INFO [ctc_decode.py:647] Device: cuda:0
3
+ 2023-06-13 19:42:20,349 INFO [ctc_decode.py:648] {'best_train_loss': inf, 'best_valid_loss': inf, 'best_train_epoch': -1, 'best_valid_epoch': -1, 'batch_idx_train': 0, 'log_interval': 50, 'reset_interval': 200, 'valid_interval': 3000, 'feature_dim': 80, 'subsampling_factor': 4, 'warm_step': 2000, 'env_info': {'k2-version': '1.24.1', 'k2-build-type': 'Release', 'k2-with-cuda': True, 'k2-git-sha1': 'c51a0b9684442a88ee37f3ce0af686a04b66855b', 'k2-git-date': 'Mon May 1 21:38:03 2023', 'lhotse-version': '1.12.0.dev+git.891bad1.clean', 'torch-version': '1.10.0+cu102', 'torch-cuda-available': True, 'torch-cuda-version': '10.2', 'python-version': '3.8', 'icefall-git-branch': 'new-zipformer-add-ctc', 'icefall-git-sha1': '046b6cb6-dirty', 'icefall-git-date': 'Fri Jun 2 15:51:49 2023', 'icefall-path': '/ceph-zw/workspace/zipformer/icefall_zipformer', 'k2-path': '/ceph-zw/workspace/k2/k2/k2/python/k2/__init__.py', 'lhotse-path': '/ceph-zw/workspace/share/lhotse/lhotse/__init__.py', 'hostname': 'de-74279-k2-train-1-1220091118-57c4d55446-mlpzc', 'IP address': '10.177.22.19'}, 'frame_shift_ms': 10, 'search_beam': 20, 'output_beam': 8, 'min_active_states': 30, 'max_active_states': 10000, 'use_double_scores': True, 'epoch': 40, 'iter': 0, 'avg': 16, 'use_averaged_model': True, 'exp_dir': PosixPath('zipformer/exp-ctc-rnnt'), 'bpe_model': 'data/lang_bpe_500/bpe.model', 'lang_dir': PosixPath('data/lang_bpe_500'), 'context_size': 2, 'decoding_method': 'ctc-decoding', 'num_paths': 100, 'nbest_scale': 1.0, 'hlg_scale': 0.6, 'lm_dir': PosixPath('data/my_lm'), 'num_encoder_layers': '2,2,3,4,3,2', 'downsampling_factor': '1,2,4,8,4,2', 'feedforward_dim': '512,768,1024,1536,1024,768', 'num_heads': '4,4,4,8,4,4', 'encoder_dim': '192,256,384,512,384,256', 'query_head_dim': '32', 'value_head_dim': '12', 'pos_head_dim': '4', 'pos_dim': 48, 'encoder_unmasked_dim': '192,192,256,256,256,192', 'cnn_module_kernel': '31,31,15,15,15,31', 'decoder_dim': 512, 'joiner_dim': 512, 'causal': False, 'chunk_size': '16,32,64,-1', 'left_context_frames': '64,128,256,-1', 'use_transducer': True, 'use_ctc': True, 'full_libri': True, 'mini_libri': False, 'manifest_dir': PosixPath('data/fbank'), 'max_duration': 300, 'bucketing_sampler': True, 'num_buckets': 30, 'concatenate_cuts': False, 'duration_factor': 1.0, 'gap': 1.0, 'on_the_fly_feats': False, 'shuffle': True, 'drop_last': True, 'return_cuts': True, 'num_workers': 2, 'enable_spec_aug': True, 'spec_aug_time_warp_factor': 80, 'enable_musan': True, 'input_strategy': 'PrecomputedFeatures', 'res_dir': PosixPath('zipformer/exp-ctc-rnnt/ctc-decoding'), 'suffix': 'epoch-40-avg-16-use-averaged-model'}
4
+ 2023-06-13 19:42:20,615 INFO [lexicon.py:168] Loading pre-compiled data/lang_bpe_500/Linv.pt
5
+ 2023-06-13 19:42:24,473 INFO [ctc_decode.py:726] About to create model
6
+ 2023-06-13 19:42:25,063 INFO [ctc_decode.py:793] Calculating the averaged model over epoch range from 24 (excluded) to 40
7
+ 2023-06-13 19:42:27,461 INFO [ctc_decode.py:810] Number of model parameters: 65805511
8
+ 2023-06-13 19:42:27,462 INFO [asr_datamodule.py:465] About to get test-clean cuts
9
+ 2023-06-13 19:42:27,466 INFO [asr_datamodule.py:472] About to get test-other cuts
10
+ 2023-06-13 19:42:28,473 INFO [ctc_decode.py:558] batch 0/?, cuts processed until now is 21
11
+ 2023-06-13 19:42:54,474 INFO [zipformer.py:1711] name=None, attn_weights_entropy = tensor([4.6098, 4.0538, 4.2424, 4.1121], device='cuda:0')
12
+ 2023-06-13 19:42:57,548 INFO [ctc_decode.py:572] The transcripts are stored in zipformer/exp-ctc-rnnt/ctc-decoding/recogs-test-clean-epoch-40-avg-16-use-averaged-model.txt
13
+ 2023-06-13 19:42:57,621 INFO [utils.py:561] [test-clean-ctc-decoding] %WER 2.40% [1262 / 52576, 133 ins, 89 del, 1040 sub ]
14
+ 2023-06-13 19:42:57,796 INFO [ctc_decode.py:581] Wrote detailed error stats to zipformer/exp-ctc-rnnt/ctc-decoding/errs-test-clean-epoch-40-avg-16-use-averaged-model.txt
15
+ 2023-06-13 19:42:57,796 INFO [ctc_decode.py:595]
16
+ For test-clean, WER of different settings are:
17
+ ctc-decoding 2.4 best for test-clean
18
+
19
+ 2023-06-13 19:42:58,556 INFO [ctc_decode.py:558] batch 0/?, cuts processed until now is 26
20
+ 2023-06-13 19:43:06,459 INFO [zipformer.py:1711] name=None, attn_weights_entropy = tensor([4.0038, 3.6233, 3.5348, 3.5192], device='cuda:0')
21
+ 2023-06-13 19:43:10,237 INFO [zipformer.py:1711] name=None, attn_weights_entropy = tensor([4.2716, 2.5148, 3.4661, 1.9089], device='cuda:0')
22
+ 2023-06-13 19:43:19,177 INFO [zipformer.py:1711] name=None, attn_weights_entropy = tensor([2.3797, 3.3571, 3.0888, 2.5630], device='cuda:0')
23
+ 2023-06-13 19:43:25,600 INFO [zipformer.py:1711] name=None, attn_weights_entropy = tensor([4.5188, 4.0134, 3.8820, 4.2079], device='cuda:0')
24
+ 2023-06-13 19:43:27,974 INFO [ctc_decode.py:572] The transcripts are stored in zipformer/exp-ctc-rnnt/ctc-decoding/recogs-test-other-epoch-40-avg-16-use-averaged-model.txt
25
+ 2023-06-13 19:43:28,074 INFO [utils.py:561] [test-other-ctc-decoding] %WER 5.66% [2961 / 52343, 278 ins, 224 del, 2459 sub ]
26
+ 2023-06-13 19:43:28,356 INFO [ctc_decode.py:581] Wrote detailed error stats to zipformer/exp-ctc-rnnt/ctc-decoding/errs-test-other-epoch-40-avg-16-use-averaged-model.txt
27
+ 2023-06-13 19:43:28,357 INFO [ctc_decode.py:595]
28
+ For test-other, WER of different settings are:
29
+ ctc-decoding 5.66 best for test-other
30
+
31
+ 2023-06-13 19:43:28,357 INFO [ctc_decode.py:843] Done!
decoding_results/ctc-decoding/recogs-test-clean-epoch-40-avg-16-use-averaged-model.txt ADDED
The diff for this file is too large to render. See raw diff
 
decoding_results/ctc-decoding/recogs-test-other-epoch-40-avg-16-use-averaged-model.txt ADDED
The diff for this file is too large to render. See raw diff
 
decoding_results/ctc-decoding/wer-summary-test-clean-epoch-40-avg-16-use-averaged-model.txt ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ settings WER
2
+ ctc-decoding 2.4
decoding_results/ctc-decoding/wer-summary-test-other-epoch-40-avg-16-use-averaged-model.txt ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ settings WER
2
+ ctc-decoding 5.66
decoding_results/nbest-rescoring/errs-test-clean-epoch-40-avg-16-use-averaged-model.txt ADDED
The diff for this file is too large to render. See raw diff
 
decoding_results/nbest-rescoring/errs-test-other-epoch-40-avg-16-use-averaged-model.txt ADDED
The diff for this file is too large to render. See raw diff
 
decoding_results/nbest-rescoring/log-decode-epoch-40-avg-16-use-averaged-model-2023-06-13-17-24-42 ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ 2023-06-13 17:24:42,946 INFO [ctc_decode.py:633] Decoding started
2
+ 2023-06-13 17:24:42,946 INFO [ctc_decode.py:639] Device: cuda:0
3
+ 2023-06-13 17:24:42,946 INFO [ctc_decode.py:640] {'best_train_loss': inf, 'best_valid_loss': inf, 'best_train_epoch': -1, 'best_valid_epoch': -1, 'batch_idx_train': 0, 'log_interval': 50, 'reset_interval': 200, 'valid_interval': 3000, 'feature_dim': 80, 'subsampling_factor': 4, 'warm_step': 2000, 'env_info': {'k2-version': '1.24.1', 'k2-build-type': 'Release', 'k2-with-cuda': True, 'k2-git-sha1': 'c51a0b9684442a88ee37f3ce0af686a04b66855b', 'k2-git-date': 'Mon May 1 21:38:03 2023', 'lhotse-version': '1.12.0.dev+git.891bad1.clean', 'torch-version': '1.10.0+cu102', 'torch-cuda-available': True, 'torch-cuda-version': '10.2', 'python-version': '3.8', 'icefall-git-branch': 'new-zipformer-add-ctc', 'icefall-git-sha1': '046b6cb6-clean', 'icefall-git-date': 'Fri Jun 2 15:51:49 2023', 'icefall-path': '/ceph-zw/workspace/zipformer/icefall_zipformer', 'k2-path': '/ceph-zw/workspace/k2/k2/k2/python/k2/__init__.py', 'lhotse-path': '/ceph-zw/workspace/share/lhotse/lhotse/__init__.py', 'hostname': 'de-74279-k2-train-1-1220091118-57c4d55446-mlpzc', 'IP address': '10.177.22.19'}, 'frame_shift_ms': 10, 'search_beam': 20, 'output_beam': 8, 'min_active_states': 30, 'max_active_states': 10000, 'use_double_scores': True, 'epoch': 40, 'iter': 0, 'avg': 16, 'use_averaged_model': True, 'exp_dir': PosixPath('zipformer/exp-ctc-rnnt'), 'bpe_model': 'data/lang_bpe_500/bpe.model', 'lang_dir': PosixPath('data/lang_bpe_500'), 'context_size': 2, 'decoding_method': 'nbest-rescoring', 'num_paths': 100, 'nbest_scale': 1.0, 'hlg_scale': 0.6, 'lm_dir': PosixPath('data/lm'), 'num_encoder_layers': '2,2,3,4,3,2', 'downsampling_factor': '1,2,4,8,4,2', 'feedforward_dim': '512,768,1024,1536,1024,768', 'num_heads': '4,4,4,8,4,4', 'encoder_dim': '192,256,384,512,384,256', 'query_head_dim': '32', 'value_head_dim': '12', 'pos_head_dim': '4', 'pos_dim': 48, 'encoder_unmasked_dim': '192,192,256,256,256,192', 'cnn_module_kernel': '31,31,15,15,15,31', 'decoder_dim': 512, 'joiner_dim': 512, 'causal': False, 'chunk_size': '16,32,64,-1', 'left_context_frames': '64,128,256,-1', 'use_transducer': True, 'use_ctc': True, 'full_libri': True, 'mini_libri': False, 'manifest_dir': PosixPath('data/fbank'), 'max_duration': 300, 'bucketing_sampler': True, 'num_buckets': 30, 'concatenate_cuts': False, 'duration_factor': 1.0, 'gap': 1.0, 'on_the_fly_feats': False, 'shuffle': True, 'drop_last': True, 'return_cuts': True, 'num_workers': 2, 'enable_spec_aug': True, 'spec_aug_time_warp_factor': 80, 'enable_musan': True, 'input_strategy': 'PrecomputedFeatures', 'res_dir': PosixPath('zipformer/exp-ctc-rnnt/nbest-rescoring'), 'suffix': 'epoch-40-avg-16-use-averaged-model'}
4
+ 2023-06-13 17:24:43,219 INFO [lexicon.py:168] Loading pre-compiled data/lang_bpe_500/Linv.pt
5
+ 2023-06-13 17:24:48,038 INFO [ctc_decode.py:676] Loading G_4_gram.fst.txt
6
+ 2023-06-13 17:24:48,038 WARNING [ctc_decode.py:677] It may take 8 minutes.
decoding_results/nbest-rescoring/log-decode-epoch-40-avg-16-use-averaged-model-2023-06-13-17-28-30 ADDED
@@ -0,0 +1,180 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ 2023-06-13 17:28:30,904 INFO [ctc_decode.py:633] Decoding started
2
+ 2023-06-13 17:28:30,904 INFO [ctc_decode.py:639] Device: cuda:0
3
+ 2023-06-13 17:28:30,904 INFO [ctc_decode.py:640] {'best_train_loss': inf, 'best_valid_loss': inf, 'best_train_epoch': -1, 'best_valid_epoch': -1, 'batch_idx_train': 0, 'log_interval': 50, 'reset_interval': 200, 'valid_interval': 3000, 'feature_dim': 80, 'subsampling_factor': 4, 'warm_step': 2000, 'env_info': {'k2-version': '1.24.1', 'k2-build-type': 'Release', 'k2-with-cuda': True, 'k2-git-sha1': 'c51a0b9684442a88ee37f3ce0af686a04b66855b', 'k2-git-date': 'Mon May 1 21:38:03 2023', 'lhotse-version': '1.12.0.dev+git.891bad1.clean', 'torch-version': '1.10.0+cu102', 'torch-cuda-available': True, 'torch-cuda-version': '10.2', 'python-version': '3.8', 'icefall-git-branch': 'new-zipformer-add-ctc', 'icefall-git-sha1': '046b6cb6-clean', 'icefall-git-date': 'Fri Jun 2 15:51:49 2023', 'icefall-path': '/ceph-zw/workspace/zipformer/icefall_zipformer', 'k2-path': '/ceph-zw/workspace/k2/k2/k2/python/k2/__init__.py', 'lhotse-path': '/ceph-zw/workspace/share/lhotse/lhotse/__init__.py', 'hostname': 'de-74279-k2-train-1-1220091118-57c4d55446-mlpzc', 'IP address': '10.177.22.19'}, 'frame_shift_ms': 10, 'search_beam': 20, 'output_beam': 8, 'min_active_states': 30, 'max_active_states': 10000, 'use_double_scores': True, 'epoch': 40, 'iter': 0, 'avg': 16, 'use_averaged_model': True, 'exp_dir': PosixPath('zipformer/exp-ctc-rnnt'), 'bpe_model': 'data/lang_bpe_500/bpe.model', 'lang_dir': PosixPath('data/lang_bpe_500'), 'context_size': 2, 'decoding_method': 'nbest-rescoring', 'num_paths': 100, 'nbest_scale': 1.0, 'hlg_scale': 0.6, 'lm_dir': PosixPath('data/my_lm'), 'num_encoder_layers': '2,2,3,4,3,2', 'downsampling_factor': '1,2,4,8,4,2', 'feedforward_dim': '512,768,1024,1536,1024,768', 'num_heads': '4,4,4,8,4,4', 'encoder_dim': '192,256,384,512,384,256', 'query_head_dim': '32', 'value_head_dim': '12', 'pos_head_dim': '4', 'pos_dim': 48, 'encoder_unmasked_dim': '192,192,256,256,256,192', 'cnn_module_kernel': '31,31,15,15,15,31', 'decoder_dim': 512, 'joiner_dim': 512, 'causal': False, 'chunk_size': '16,32,64,-1', 'left_context_frames': '64,128,256,-1', 'use_transducer': True, 'use_ctc': True, 'full_libri': True, 'mini_libri': False, 'manifest_dir': PosixPath('data/fbank'), 'max_duration': 300, 'bucketing_sampler': True, 'num_buckets': 30, 'concatenate_cuts': False, 'duration_factor': 1.0, 'gap': 1.0, 'on_the_fly_feats': False, 'shuffle': True, 'drop_last': True, 'return_cuts': True, 'num_workers': 2, 'enable_spec_aug': True, 'spec_aug_time_warp_factor': 80, 'enable_musan': True, 'input_strategy': 'PrecomputedFeatures', 'res_dir': PosixPath('zipformer/exp-ctc-rnnt/nbest-rescoring'), 'suffix': 'epoch-40-avg-16-use-averaged-model'}
4
+ 2023-06-13 17:28:31,179 INFO [lexicon.py:168] Loading pre-compiled data/lang_bpe_500/Linv.pt
5
+ 2023-06-13 17:28:35,887 INFO [ctc_decode.py:701] Loading pre-compiled G_4_gram.pt
6
+ 2023-06-13 17:28:58,167 INFO [ctc_decode.py:718] About to create model
7
+ 2023-06-13 17:28:58,818 INFO [ctc_decode.py:785] Calculating the averaged model over epoch range from 24 (excluded) to 40
8
+ 2023-06-13 17:29:02,258 INFO [ctc_decode.py:802] Number of model parameters: 65805511
9
+ 2023-06-13 17:29:02,259 INFO [asr_datamodule.py:465] About to get test-clean cuts
10
+ 2023-06-13 17:29:02,262 INFO [asr_datamodule.py:472] About to get test-other cuts
11
+ 2023-06-13 17:29:03,502 INFO [ctc_decode.py:550] batch 0/?, cuts processed until now is 21
12
+ 2023-06-13 17:29:39,698 INFO [zipformer.py:1711] name=None, attn_weights_entropy = tensor([4.6109, 4.3406, 4.1379, 4.6409], device='cuda:0')
13
+ 2023-06-13 17:29:53,312 INFO [ctc_decode.py:564] The transcripts are stored in zipformer/exp-ctc-rnnt/nbest-rescoring/recogs-test-clean-epoch-40-avg-16-use-averaged-model.txt
14
+ 2023-06-13 17:29:53,387 INFO [utils.py:561] [test-clean-lm_scale_0.1] %WER 2.49% [1309 / 52576, 219 ins, 68 del, 1022 sub ]
15
+ 2023-06-13 17:29:53,562 INFO [ctc_decode.py:573] Wrote detailed error stats to zipformer/exp-ctc-rnnt/nbest-rescoring/errs-test-clean-epoch-40-avg-16-use-averaged-model.txt
16
+ 2023-06-13 17:29:53,585 INFO [ctc_decode.py:564] The transcripts are stored in zipformer/exp-ctc-rnnt/nbest-rescoring/recogs-test-clean-epoch-40-avg-16-use-averaged-model.txt
17
+ 2023-06-13 17:29:53,654 INFO [utils.py:561] [test-clean-lm_scale_0.2] %WER 2.45% [1289 / 52576, 211 ins, 68 del, 1010 sub ]
18
+ 2023-06-13 17:29:53,833 INFO [ctc_decode.py:573] Wrote detailed error stats to zipformer/exp-ctc-rnnt/nbest-rescoring/errs-test-clean-epoch-40-avg-16-use-averaged-model.txt
19
+ 2023-06-13 17:29:53,858 INFO [ctc_decode.py:564] The transcripts are stored in zipformer/exp-ctc-rnnt/nbest-rescoring/recogs-test-clean-epoch-40-avg-16-use-averaged-model.txt
20
+ 2023-06-13 17:29:54,096 INFO [utils.py:561] [test-clean-lm_scale_0.3] %WER 2.42% [1272 / 52576, 202 ins, 72 del, 998 sub ]
21
+ 2023-06-13 17:29:54,266 INFO [ctc_decode.py:573] Wrote detailed error stats to zipformer/exp-ctc-rnnt/nbest-rescoring/errs-test-clean-epoch-40-avg-16-use-averaged-model.txt
22
+ 2023-06-13 17:29:54,289 INFO [ctc_decode.py:564] The transcripts are stored in zipformer/exp-ctc-rnnt/nbest-rescoring/recogs-test-clean-epoch-40-avg-16-use-averaged-model.txt
23
+ 2023-06-13 17:29:54,356 INFO [utils.py:561] [test-clean-lm_scale_0.4] %WER 2.39% [1254 / 52576, 192 ins, 74 del, 988 sub ]
24
+ 2023-06-13 17:29:54,529 INFO [ctc_decode.py:573] Wrote detailed error stats to zipformer/exp-ctc-rnnt/nbest-rescoring/errs-test-clean-epoch-40-avg-16-use-averaged-model.txt
25
+ 2023-06-13 17:29:54,552 INFO [ctc_decode.py:564] The transcripts are stored in zipformer/exp-ctc-rnnt/nbest-rescoring/recogs-test-clean-epoch-40-avg-16-use-averaged-model.txt
26
+ 2023-06-13 17:29:54,622 INFO [utils.py:561] [test-clean-lm_scale_0.5] %WER 2.37% [1248 / 52576, 184 ins, 81 del, 983 sub ]
27
+ 2023-06-13 17:29:54,795 INFO [ctc_decode.py:573] Wrote detailed error stats to zipformer/exp-ctc-rnnt/nbest-rescoring/errs-test-clean-epoch-40-avg-16-use-averaged-model.txt
28
+ 2023-06-13 17:29:54,817 INFO [ctc_decode.py:564] The transcripts are stored in zipformer/exp-ctc-rnnt/nbest-rescoring/recogs-test-clean-epoch-40-avg-16-use-averaged-model.txt
29
+ 2023-06-13 17:29:54,886 INFO [utils.py:561] [test-clean-lm_scale_0.6] %WER 2.38% [1250 / 52576, 178 ins, 87 del, 985 sub ]
30
+ 2023-06-13 17:29:55,057 INFO [ctc_decode.py:573] Wrote detailed error stats to zipformer/exp-ctc-rnnt/nbest-rescoring/errs-test-clean-epoch-40-avg-16-use-averaged-model.txt
31
+ 2023-06-13 17:29:55,080 INFO [ctc_decode.py:564] The transcripts are stored in zipformer/exp-ctc-rnnt/nbest-rescoring/recogs-test-clean-epoch-40-avg-16-use-averaged-model.txt
32
+ 2023-06-13 17:29:55,147 INFO [utils.py:561] [test-clean-lm_scale_0.7] %WER 2.40% [1261 / 52576, 172 ins, 97 del, 992 sub ]
33
+ 2023-06-13 17:29:55,320 INFO [ctc_decode.py:573] Wrote detailed error stats to zipformer/exp-ctc-rnnt/nbest-rescoring/errs-test-clean-epoch-40-avg-16-use-averaged-model.txt
34
+ 2023-06-13 17:29:55,342 INFO [ctc_decode.py:564] The transcripts are stored in zipformer/exp-ctc-rnnt/nbest-rescoring/recogs-test-clean-epoch-40-avg-16-use-averaged-model.txt
35
+ 2023-06-13 17:29:55,409 INFO [utils.py:561] [test-clean-lm_scale_0.8] %WER 2.42% [1272 / 52576, 167 ins, 108 del, 997 sub ]
36
+ 2023-06-13 17:29:55,577 INFO [ctc_decode.py:573] Wrote detailed error stats to zipformer/exp-ctc-rnnt/nbest-rescoring/errs-test-clean-epoch-40-avg-16-use-averaged-model.txt
37
+ 2023-06-13 17:29:55,599 INFO [ctc_decode.py:564] The transcripts are stored in zipformer/exp-ctc-rnnt/nbest-rescoring/recogs-test-clean-epoch-40-avg-16-use-averaged-model.txt
38
+ 2023-06-13 17:29:55,822 INFO [utils.py:561] [test-clean-lm_scale_0.9] %WER 2.44% [1284 / 52576, 158 ins, 123 del, 1003 sub ]
39
+ 2023-06-13 17:29:55,994 INFO [ctc_decode.py:573] Wrote detailed error stats to zipformer/exp-ctc-rnnt/nbest-rescoring/errs-test-clean-epoch-40-avg-16-use-averaged-model.txt
40
+ 2023-06-13 17:29:56,016 INFO [ctc_decode.py:564] The transcripts are stored in zipformer/exp-ctc-rnnt/nbest-rescoring/recogs-test-clean-epoch-40-avg-16-use-averaged-model.txt
41
+ 2023-06-13 17:29:56,082 INFO [utils.py:561] [test-clean-lm_scale_1.0] %WER 2.50% [1312 / 52576, 154 ins, 140 del, 1018 sub ]
42
+ 2023-06-13 17:29:56,252 INFO [ctc_decode.py:573] Wrote detailed error stats to zipformer/exp-ctc-rnnt/nbest-rescoring/errs-test-clean-epoch-40-avg-16-use-averaged-model.txt
43
+ 2023-06-13 17:29:56,275 INFO [ctc_decode.py:564] The transcripts are stored in zipformer/exp-ctc-rnnt/nbest-rescoring/recogs-test-clean-epoch-40-avg-16-use-averaged-model.txt
44
+ 2023-06-13 17:29:56,343 INFO [utils.py:561] [test-clean-lm_scale_1.1] %WER 2.56% [1348 / 52576, 156 ins, 155 del, 1037 sub ]
45
+ 2023-06-13 17:29:56,511 INFO [ctc_decode.py:573] Wrote detailed error stats to zipformer/exp-ctc-rnnt/nbest-rescoring/errs-test-clean-epoch-40-avg-16-use-averaged-model.txt
46
+ 2023-06-13 17:29:56,534 INFO [ctc_decode.py:564] The transcripts are stored in zipformer/exp-ctc-rnnt/nbest-rescoring/recogs-test-clean-epoch-40-avg-16-use-averaged-model.txt
47
+ 2023-06-13 17:29:56,601 INFO [utils.py:561] [test-clean-lm_scale_1.2] %WER 2.60% [1367 / 52576, 153 ins, 162 del, 1052 sub ]
48
+ 2023-06-13 17:29:56,773 INFO [ctc_decode.py:573] Wrote detailed error stats to zipformer/exp-ctc-rnnt/nbest-rescoring/errs-test-clean-epoch-40-avg-16-use-averaged-model.txt
49
+ 2023-06-13 17:29:56,795 INFO [ctc_decode.py:564] The transcripts are stored in zipformer/exp-ctc-rnnt/nbest-rescoring/recogs-test-clean-epoch-40-avg-16-use-averaged-model.txt
50
+ 2023-06-13 17:29:56,864 INFO [utils.py:561] [test-clean-lm_scale_1.3] %WER 2.66% [1396 / 52576, 153 ins, 179 del, 1064 sub ]
51
+ 2023-06-13 17:29:57,033 INFO [ctc_decode.py:573] Wrote detailed error stats to zipformer/exp-ctc-rnnt/nbest-rescoring/errs-test-clean-epoch-40-avg-16-use-averaged-model.txt
52
+ 2023-06-13 17:29:57,056 INFO [ctc_decode.py:564] The transcripts are stored in zipformer/exp-ctc-rnnt/nbest-rescoring/recogs-test-clean-epoch-40-avg-16-use-averaged-model.txt
53
+ 2023-06-13 17:29:57,123 INFO [utils.py:561] [test-clean-lm_scale_1.4] %WER 2.69% [1414 / 52576, 150 ins, 191 del, 1073 sub ]
54
+ 2023-06-13 17:29:57,294 INFO [ctc_decode.py:573] Wrote detailed error stats to zipformer/exp-ctc-rnnt/nbest-rescoring/errs-test-clean-epoch-40-avg-16-use-averaged-model.txt
55
+ 2023-06-13 17:29:57,316 INFO [ctc_decode.py:564] The transcripts are stored in zipformer/exp-ctc-rnnt/nbest-rescoring/recogs-test-clean-epoch-40-avg-16-use-averaged-model.txt
56
+ 2023-06-13 17:29:57,383 INFO [utils.py:561] [test-clean-lm_scale_1.5] %WER 2.73% [1436 / 52576, 149 ins, 203 del, 1084 sub ]
57
+ 2023-06-13 17:29:57,707 INFO [ctc_decode.py:573] Wrote detailed error stats to zipformer/exp-ctc-rnnt/nbest-rescoring/errs-test-clean-epoch-40-avg-16-use-averaged-model.txt
58
+ 2023-06-13 17:29:57,730 INFO [ctc_decode.py:564] The transcripts are stored in zipformer/exp-ctc-rnnt/nbest-rescoring/recogs-test-clean-epoch-40-avg-16-use-averaged-model.txt
59
+ 2023-06-13 17:29:57,798 INFO [utils.py:561] [test-clean-lm_scale_1.6] %WER 2.80% [1471 / 52576, 149 ins, 220 del, 1102 sub ]
60
+ 2023-06-13 17:29:57,974 INFO [ctc_decode.py:573] Wrote detailed error stats to zipformer/exp-ctc-rnnt/nbest-rescoring/errs-test-clean-epoch-40-avg-16-use-averaged-model.txt
61
+ 2023-06-13 17:29:57,996 INFO [ctc_decode.py:564] The transcripts are stored in zipformer/exp-ctc-rnnt/nbest-rescoring/recogs-test-clean-epoch-40-avg-16-use-averaged-model.txt
62
+ 2023-06-13 17:29:58,062 INFO [utils.py:561] [test-clean-lm_scale_1.7] %WER 2.85% [1501 / 52576, 149 ins, 227 del, 1125 sub ]
63
+ 2023-06-13 17:29:58,237 INFO [ctc_decode.py:573] Wrote detailed error stats to zipformer/exp-ctc-rnnt/nbest-rescoring/errs-test-clean-epoch-40-avg-16-use-averaged-model.txt
64
+ 2023-06-13 17:29:58,259 INFO [ctc_decode.py:564] The transcripts are stored in zipformer/exp-ctc-rnnt/nbest-rescoring/recogs-test-clean-epoch-40-avg-16-use-averaged-model.txt
65
+ 2023-06-13 17:29:58,326 INFO [utils.py:561] [test-clean-lm_scale_1.8] %WER 2.89% [1520 / 52576, 149 ins, 233 del, 1138 sub ]
66
+ 2023-06-13 17:29:58,494 INFO [ctc_decode.py:573] Wrote detailed error stats to zipformer/exp-ctc-rnnt/nbest-rescoring/errs-test-clean-epoch-40-avg-16-use-averaged-model.txt
67
+ 2023-06-13 17:29:58,516 INFO [ctc_decode.py:564] The transcripts are stored in zipformer/exp-ctc-rnnt/nbest-rescoring/recogs-test-clean-epoch-40-avg-16-use-averaged-model.txt
68
+ 2023-06-13 17:29:58,583 INFO [utils.py:561] [test-clean-lm_scale_1.9] %WER 2.94% [1545 / 52576, 147 ins, 242 del, 1156 sub ]
69
+ 2023-06-13 17:29:58,750 INFO [ctc_decode.py:573] Wrote detailed error stats to zipformer/exp-ctc-rnnt/nbest-rescoring/errs-test-clean-epoch-40-avg-16-use-averaged-model.txt
70
+ 2023-06-13 17:29:58,773 INFO [ctc_decode.py:564] The transcripts are stored in zipformer/exp-ctc-rnnt/nbest-rescoring/recogs-test-clean-epoch-40-avg-16-use-averaged-model.txt
71
+ 2023-06-13 17:29:58,841 INFO [utils.py:561] [test-clean-lm_scale_2.0] %WER 2.98% [1566 / 52576, 147 ins, 252 del, 1167 sub ]
72
+ 2023-06-13 17:29:59,014 INFO [ctc_decode.py:573] Wrote detailed error stats to zipformer/exp-ctc-rnnt/nbest-rescoring/errs-test-clean-epoch-40-avg-16-use-averaged-model.txt
73
+ 2023-06-13 17:29:59,015 INFO [ctc_decode.py:587]
74
+ For test-clean, WER of different settings are:
75
+ lm_scale_0.5 2.37 best for test-clean
76
+ lm_scale_0.6 2.38
77
+ lm_scale_0.4 2.39
78
+ lm_scale_0.7 2.4
79
+ lm_scale_0.3 2.42
80
+ lm_scale_0.8 2.42
81
+ lm_scale_0.9 2.44
82
+ lm_scale_0.2 2.45
83
+ lm_scale_0.1 2.49
84
+ lm_scale_1.0 2.5
85
+ lm_scale_1.1 2.56
86
+ lm_scale_1.2 2.6
87
+ lm_scale_1.3 2.66
88
+ lm_scale_1.4 2.69
89
+ lm_scale_1.5 2.73
90
+ lm_scale_1.6 2.8
91
+ lm_scale_1.7 2.85
92
+ lm_scale_1.8 2.89
93
+ lm_scale_1.9 2.94
94
+ lm_scale_2.0 2.98
95
+
96
+ 2023-06-13 17:30:00,580 INFO [ctc_decode.py:550] batch 0/?, cuts processed until now is 26
97
+ 2023-06-13 17:31:00,699 INFO [ctc_decode.py:564] The transcripts are stored in zipformer/exp-ctc-rnnt/nbest-rescoring/recogs-test-other-epoch-40-avg-16-use-averaged-model.txt
98
+ 2023-06-13 17:31:00,770 INFO [utils.py:561] [test-other-lm_scale_0.1] %WER 5.20% [2721 / 52343, 362 ins, 183 del, 2176 sub ]
99
+ 2023-06-13 17:31:00,946 INFO [ctc_decode.py:573] Wrote detailed error stats to zipformer/exp-ctc-rnnt/nbest-rescoring/errs-test-other-epoch-40-avg-16-use-averaged-model.txt
100
+ 2023-06-13 17:31:00,970 INFO [ctc_decode.py:564] The transcripts are stored in zipformer/exp-ctc-rnnt/nbest-rescoring/recogs-test-other-epoch-40-avg-16-use-averaged-model.txt
101
+ 2023-06-13 17:31:01,039 INFO [utils.py:561] [test-other-lm_scale_0.2] %WER 5.10% [2670 / 52343, 355 ins, 183 del, 2132 sub ]
102
+ 2023-06-13 17:31:01,212 INFO [ctc_decode.py:573] Wrote detailed error stats to zipformer/exp-ctc-rnnt/nbest-rescoring/errs-test-other-epoch-40-avg-16-use-averaged-model.txt
103
+ 2023-06-13 17:31:01,235 INFO [ctc_decode.py:564] The transcripts are stored in zipformer/exp-ctc-rnnt/nbest-rescoring/recogs-test-other-epoch-40-avg-16-use-averaged-model.txt
104
+ 2023-06-13 17:31:01,306 INFO [utils.py:561] [test-other-lm_scale_0.3] %WER 5.04% [2638 / 52343, 342 ins, 187 del, 2109 sub ]
105
+ 2023-06-13 17:31:01,481 INFO [ctc_decode.py:573] Wrote detailed error stats to zipformer/exp-ctc-rnnt/nbest-rescoring/errs-test-other-epoch-40-avg-16-use-averaged-model.txt
106
+ 2023-06-13 17:31:01,503 INFO [ctc_decode.py:564] The transcripts are stored in zipformer/exp-ctc-rnnt/nbest-rescoring/recogs-test-other-epoch-40-avg-16-use-averaged-model.txt
107
+ 2023-06-13 17:31:01,573 INFO [utils.py:561] [test-other-lm_scale_0.4] %WER 4.99% [2610 / 52343, 323 ins, 198 del, 2089 sub ]
108
+ 2023-06-13 17:31:01,745 INFO [ctc_decode.py:573] Wrote detailed error stats to zipformer/exp-ctc-rnnt/nbest-rescoring/errs-test-other-epoch-40-avg-16-use-averaged-model.txt
109
+ 2023-06-13 17:31:01,769 INFO [ctc_decode.py:564] The transcripts are stored in zipformer/exp-ctc-rnnt/nbest-rescoring/recogs-test-other-epoch-40-avg-16-use-averaged-model.txt
110
+ 2023-06-13 17:31:01,842 INFO [utils.py:561] [test-other-lm_scale_0.5] %WER 4.95% [2592 / 52343, 316 ins, 214 del, 2062 sub ]
111
+ 2023-06-13 17:31:02,020 INFO [ctc_decode.py:573] Wrote detailed error stats to zipformer/exp-ctc-rnnt/nbest-rescoring/errs-test-other-epoch-40-avg-16-use-averaged-model.txt
112
+ 2023-06-13 17:31:02,042 INFO [ctc_decode.py:564] The transcripts are stored in zipformer/exp-ctc-rnnt/nbest-rescoring/recogs-test-other-epoch-40-avg-16-use-averaged-model.txt
113
+ 2023-06-13 17:31:02,110 INFO [utils.py:561] [test-other-lm_scale_0.6] %WER 4.93% [2583 / 52343, 290 ins, 234 del, 2059 sub ]
114
+ 2023-06-13 17:31:02,283 INFO [ctc_decode.py:573] Wrote detailed error stats to zipformer/exp-ctc-rnnt/nbest-rescoring/errs-test-other-epoch-40-avg-16-use-averaged-model.txt
115
+ 2023-06-13 17:31:02,306 INFO [ctc_decode.py:564] The transcripts are stored in zipformer/exp-ctc-rnnt/nbest-rescoring/recogs-test-other-epoch-40-avg-16-use-averaged-model.txt
116
+ 2023-06-13 17:31:02,378 INFO [utils.py:561] [test-other-lm_scale_0.7] %WER 4.94% [2584 / 52343, 279 ins, 250 del, 2055 sub ]
117
+ 2023-06-13 17:31:02,550 INFO [ctc_decode.py:573] Wrote detailed error stats to zipformer/exp-ctc-rnnt/nbest-rescoring/errs-test-other-epoch-40-avg-16-use-averaged-model.txt
118
+ 2023-06-13 17:31:02,573 INFO [ctc_decode.py:564] The transcripts are stored in zipformer/exp-ctc-rnnt/nbest-rescoring/recogs-test-other-epoch-40-avg-16-use-averaged-model.txt
119
+ 2023-06-13 17:31:02,641 INFO [utils.py:561] [test-other-lm_scale_0.8] %WER 4.99% [2614 / 52343, 265 ins, 288 del, 2061 sub ]
120
+ 2023-06-13 17:31:02,975 INFO [ctc_decode.py:573] Wrote detailed error stats to zipformer/exp-ctc-rnnt/nbest-rescoring/errs-test-other-epoch-40-avg-16-use-averaged-model.txt
121
+ 2023-06-13 17:31:02,998 INFO [ctc_decode.py:564] The transcripts are stored in zipformer/exp-ctc-rnnt/nbest-rescoring/recogs-test-other-epoch-40-avg-16-use-averaged-model.txt
122
+ 2023-06-13 17:31:03,067 INFO [utils.py:561] [test-other-lm_scale_0.9] %WER 5.03% [2635 / 52343, 252 ins, 305 del, 2078 sub ]
123
+ 2023-06-13 17:31:03,244 INFO [ctc_decode.py:573] Wrote detailed error stats to zipformer/exp-ctc-rnnt/nbest-rescoring/errs-test-other-epoch-40-avg-16-use-averaged-model.txt
124
+ 2023-06-13 17:31:03,268 INFO [ctc_decode.py:564] The transcripts are stored in zipformer/exp-ctc-rnnt/nbest-rescoring/recogs-test-other-epoch-40-avg-16-use-averaged-model.txt
125
+ 2023-06-13 17:31:03,338 INFO [utils.py:561] [test-other-lm_scale_1.0] %WER 5.08% [2660 / 52343, 250 ins, 331 del, 2079 sub ]
126
+ 2023-06-13 17:31:03,513 INFO [ctc_decode.py:573] Wrote detailed error stats to zipformer/exp-ctc-rnnt/nbest-rescoring/errs-test-other-epoch-40-avg-16-use-averaged-model.txt
127
+ 2023-06-13 17:31:03,537 INFO [ctc_decode.py:564] The transcripts are stored in zipformer/exp-ctc-rnnt/nbest-rescoring/recogs-test-other-epoch-40-avg-16-use-averaged-model.txt
128
+ 2023-06-13 17:31:03,608 INFO [utils.py:561] [test-other-lm_scale_1.1] %WER 5.16% [2701 / 52343, 244 ins, 350 del, 2107 sub ]
129
+ 2023-06-13 17:31:03,782 INFO [ctc_decode.py:573] Wrote detailed error stats to zipformer/exp-ctc-rnnt/nbest-rescoring/errs-test-other-epoch-40-avg-16-use-averaged-model.txt
130
+ 2023-06-13 17:31:03,805 INFO [ctc_decode.py:564] The transcripts are stored in zipformer/exp-ctc-rnnt/nbest-rescoring/recogs-test-other-epoch-40-avg-16-use-averaged-model.txt
131
+ 2023-06-13 17:31:03,874 INFO [utils.py:561] [test-other-lm_scale_1.2] %WER 5.25% [2748 / 52343, 241 ins, 383 del, 2124 sub ]
132
+ 2023-06-13 17:31:04,060 INFO [ctc_decode.py:573] Wrote detailed error stats to zipformer/exp-ctc-rnnt/nbest-rescoring/errs-test-other-epoch-40-avg-16-use-averaged-model.txt
133
+ 2023-06-13 17:31:04,085 INFO [ctc_decode.py:564] The transcripts are stored in zipformer/exp-ctc-rnnt/nbest-rescoring/recogs-test-other-epoch-40-avg-16-use-averaged-model.txt
134
+ 2023-06-13 17:31:04,153 INFO [utils.py:561] [test-other-lm_scale_1.3] %WER 5.30% [2772 / 52343, 237 ins, 404 del, 2131 sub ]
135
+ 2023-06-13 17:31:04,327 INFO [ctc_decode.py:573] Wrote detailed error stats to zipformer/exp-ctc-rnnt/nbest-rescoring/errs-test-other-epoch-40-avg-16-use-averaged-model.txt
136
+ 2023-06-13 17:31:04,350 INFO [ctc_decode.py:564] The transcripts are stored in zipformer/exp-ctc-rnnt/nbest-rescoring/recogs-test-other-epoch-40-avg-16-use-averaged-model.txt
137
+ 2023-06-13 17:31:04,418 INFO [utils.py:561] [test-other-lm_scale_1.4] %WER 5.38% [2815 / 52343, 231 ins, 422 del, 2162 sub ]
138
+ 2023-06-13 17:31:04,591 INFO [ctc_decode.py:573] Wrote detailed error stats to zipformer/exp-ctc-rnnt/nbest-rescoring/errs-test-other-epoch-40-avg-16-use-averaged-model.txt
139
+ 2023-06-13 17:31:04,615 INFO [ctc_decode.py:564] The transcripts are stored in zipformer/exp-ctc-rnnt/nbest-rescoring/recogs-test-other-epoch-40-avg-16-use-averaged-model.txt
140
+ 2023-06-13 17:31:04,683 INFO [utils.py:561] [test-other-lm_scale_1.5] %WER 5.45% [2855 / 52343, 230 ins, 452 del, 2173 sub ]
141
+ 2023-06-13 17:31:05,037 INFO [ctc_decode.py:573] Wrote detailed error stats to zipformer/exp-ctc-rnnt/nbest-rescoring/errs-test-other-epoch-40-avg-16-use-averaged-model.txt
142
+ 2023-06-13 17:31:05,060 INFO [ctc_decode.py:564] The transcripts are stored in zipformer/exp-ctc-rnnt/nbest-rescoring/recogs-test-other-epoch-40-avg-16-use-averaged-model.txt
143
+ 2023-06-13 17:31:05,129 INFO [utils.py:561] [test-other-lm_scale_1.6] %WER 5.52% [2887 / 52343, 226 ins, 469 del, 2192 sub ]
144
+ 2023-06-13 17:31:05,304 INFO [ctc_decode.py:573] Wrote detailed error stats to zipformer/exp-ctc-rnnt/nbest-rescoring/errs-test-other-epoch-40-avg-16-use-averaged-model.txt
145
+ 2023-06-13 17:31:05,327 INFO [ctc_decode.py:564] The transcripts are stored in zipformer/exp-ctc-rnnt/nbest-rescoring/recogs-test-other-epoch-40-avg-16-use-averaged-model.txt
146
+ 2023-06-13 17:31:05,395 INFO [utils.py:561] [test-other-lm_scale_1.7] %WER 5.58% [2919 / 52343, 227 ins, 492 del, 2200 sub ]
147
+ 2023-06-13 17:31:05,572 INFO [ctc_decode.py:573] Wrote detailed error stats to zipformer/exp-ctc-rnnt/nbest-rescoring/errs-test-other-epoch-40-avg-16-use-averaged-model.txt
148
+ 2023-06-13 17:31:05,595 INFO [ctc_decode.py:564] The transcripts are stored in zipformer/exp-ctc-rnnt/nbest-rescoring/recogs-test-other-epoch-40-avg-16-use-averaged-model.txt
149
+ 2023-06-13 17:31:05,664 INFO [utils.py:561] [test-other-lm_scale_1.8] %WER 5.63% [2947 / 52343, 227 ins, 506 del, 2214 sub ]
150
+ 2023-06-13 17:31:05,840 INFO [ctc_decode.py:573] Wrote detailed error stats to zipformer/exp-ctc-rnnt/nbest-rescoring/errs-test-other-epoch-40-avg-16-use-averaged-model.txt
151
+ 2023-06-13 17:31:05,863 INFO [ctc_decode.py:564] The transcripts are stored in zipformer/exp-ctc-rnnt/nbest-rescoring/recogs-test-other-epoch-40-avg-16-use-averaged-model.txt
152
+ 2023-06-13 17:31:05,938 INFO [utils.py:561] [test-other-lm_scale_1.9] %WER 5.70% [2984 / 52343, 227 ins, 520 del, 2237 sub ]
153
+ 2023-06-13 17:31:06,113 INFO [ctc_decode.py:573] Wrote detailed error stats to zipformer/exp-ctc-rnnt/nbest-rescoring/errs-test-other-epoch-40-avg-16-use-averaged-model.txt
154
+ 2023-06-13 17:31:06,136 INFO [ctc_decode.py:564] The transcripts are stored in zipformer/exp-ctc-rnnt/nbest-rescoring/recogs-test-other-epoch-40-avg-16-use-averaged-model.txt
155
+ 2023-06-13 17:31:06,204 INFO [utils.py:561] [test-other-lm_scale_2.0] %WER 5.74% [3007 / 52343, 224 ins, 532 del, 2251 sub ]
156
+ 2023-06-13 17:31:06,379 INFO [ctc_decode.py:573] Wrote detailed error stats to zipformer/exp-ctc-rnnt/nbest-rescoring/errs-test-other-epoch-40-avg-16-use-averaged-model.txt
157
+ 2023-06-13 17:31:06,380 INFO [ctc_decode.py:587]
158
+ For test-other, WER of different settings are:
159
+ lm_scale_0.6 4.93 best for test-other
160
+ lm_scale_0.7 4.94
161
+ lm_scale_0.5 4.95
162
+ lm_scale_0.4 4.99
163
+ lm_scale_0.8 4.99
164
+ lm_scale_0.9 5.03
165
+ lm_scale_0.3 5.04
166
+ lm_scale_1.0 5.08
167
+ lm_scale_0.2 5.1
168
+ lm_scale_1.1 5.16
169
+ lm_scale_0.1 5.2
170
+ lm_scale_1.2 5.25
171
+ lm_scale_1.3 5.3
172
+ lm_scale_1.4 5.38
173
+ lm_scale_1.5 5.45
174
+ lm_scale_1.6 5.52
175
+ lm_scale_1.7 5.58
176
+ lm_scale_1.8 5.63
177
+ lm_scale_1.9 5.7
178
+ lm_scale_2.0 5.74
179
+
180
+ 2023-06-13 17:31:06,380 INFO [ctc_decode.py:835] Done!
decoding_results/nbest-rescoring/recogs-test-clean-epoch-40-avg-16-use-averaged-model.txt ADDED
The diff for this file is too large to render. See raw diff
 
decoding_results/nbest-rescoring/recogs-test-other-epoch-40-avg-16-use-averaged-model.txt ADDED
The diff for this file is too large to render. See raw diff
 
decoding_results/nbest-rescoring/wer-summary-test-clean-epoch-40-avg-16-use-averaged-model.txt ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ settings WER
2
+ lm_scale_0.5 2.37
3
+ lm_scale_0.6 2.38
4
+ lm_scale_0.4 2.39
5
+ lm_scale_0.7 2.4
6
+ lm_scale_0.3 2.42
7
+ lm_scale_0.8 2.42
8
+ lm_scale_0.9 2.44
9
+ lm_scale_0.2 2.45
10
+ lm_scale_0.1 2.49
11
+ lm_scale_1.0 2.5
12
+ lm_scale_1.1 2.56
13
+ lm_scale_1.2 2.6
14
+ lm_scale_1.3 2.66
15
+ lm_scale_1.4 2.69
16
+ lm_scale_1.5 2.73
17
+ lm_scale_1.6 2.8
18
+ lm_scale_1.7 2.85
19
+ lm_scale_1.8 2.89
20
+ lm_scale_1.9 2.94
21
+ lm_scale_2.0 2.98
decoding_results/nbest-rescoring/wer-summary-test-other-epoch-40-avg-16-use-averaged-model.txt ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ settings WER
2
+ lm_scale_0.6 4.93
3
+ lm_scale_0.7 4.94
4
+ lm_scale_0.5 4.95
5
+ lm_scale_0.4 4.99
6
+ lm_scale_0.8 4.99
7
+ lm_scale_0.9 5.03
8
+ lm_scale_0.3 5.04
9
+ lm_scale_1.0 5.08
10
+ lm_scale_0.2 5.1
11
+ lm_scale_1.1 5.16
12
+ lm_scale_0.1 5.2
13
+ lm_scale_1.2 5.25
14
+ lm_scale_1.3 5.3
15
+ lm_scale_1.4 5.38
16
+ lm_scale_1.5 5.45
17
+ lm_scale_1.6 5.52
18
+ lm_scale_1.7 5.58
19
+ lm_scale_1.8 5.63
20
+ lm_scale_1.9 5.7
21
+ lm_scale_2.0 5.74
decoding_results/nbest/errs-test-clean-epoch-40-avg-16-use-averaged-model.txt ADDED
The diff for this file is too large to render. See raw diff
 
decoding_results/nbest/errs-test-other-epoch-40-avg-16-use-averaged-model.txt ADDED
The diff for this file is too large to render. See raw diff
 
decoding_results/nbest/log-decode-epoch-40-avg-16-use-averaged-model-2023-06-13-17-21-29 ADDED
@@ -0,0 +1,35 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ 2023-06-13 17:21:29,434 INFO [ctc_decode.py:633] Decoding started
2
+ 2023-06-13 17:21:29,434 INFO [ctc_decode.py:639] Device: cuda:0
3
+ 2023-06-13 17:21:29,434 INFO [ctc_decode.py:640] {'best_train_loss': inf, 'best_valid_loss': inf, 'best_train_epoch': -1, 'best_valid_epoch': -1, 'batch_idx_train': 0, 'log_interval': 50, 'reset_interval': 200, 'valid_interval': 3000, 'feature_dim': 80, 'subsampling_factor': 4, 'warm_step': 2000, 'env_info': {'k2-version': '1.24.1', 'k2-build-type': 'Release', 'k2-with-cuda': True, 'k2-git-sha1': 'c51a0b9684442a88ee37f3ce0af686a04b66855b', 'k2-git-date': 'Mon May 1 21:38:03 2023', 'lhotse-version': '1.12.0.dev+git.891bad1.clean', 'torch-version': '1.10.0+cu102', 'torch-cuda-available': True, 'torch-cuda-version': '10.2', 'python-version': '3.8', 'icefall-git-branch': 'new-zipformer-add-ctc', 'icefall-git-sha1': '046b6cb6-clean', 'icefall-git-date': 'Fri Jun 2 15:51:49 2023', 'icefall-path': '/ceph-zw/workspace/zipformer/icefall_zipformer', 'k2-path': '/ceph-zw/workspace/k2/k2/k2/python/k2/__init__.py', 'lhotse-path': '/ceph-zw/workspace/share/lhotse/lhotse/__init__.py', 'hostname': 'de-74279-k2-train-1-1220091118-57c4d55446-mlpzc', 'IP address': '10.177.22.19'}, 'frame_shift_ms': 10, 'search_beam': 20, 'output_beam': 8, 'min_active_states': 30, 'max_active_states': 10000, 'use_double_scores': True, 'epoch': 40, 'iter': 0, 'avg': 16, 'use_averaged_model': True, 'exp_dir': PosixPath('zipformer/exp-ctc-rnnt'), 'bpe_model': 'data/lang_bpe_500/bpe.model', 'lang_dir': PosixPath('data/lang_bpe_500'), 'context_size': 2, 'decoding_method': 'nbest', 'num_paths': 100, 'nbest_scale': 1.0, 'hlg_scale': 0.6, 'lm_dir': PosixPath('data/lm'), 'num_encoder_layers': '2,2,3,4,3,2', 'downsampling_factor': '1,2,4,8,4,2', 'feedforward_dim': '512,768,1024,1536,1024,768', 'num_heads': '4,4,4,8,4,4', 'encoder_dim': '192,256,384,512,384,256', 'query_head_dim': '32', 'value_head_dim': '12', 'pos_head_dim': '4', 'pos_dim': 48, 'encoder_unmasked_dim': '192,192,256,256,256,192', 'cnn_module_kernel': '31,31,15,15,15,31', 'decoder_dim': 512, 'joiner_dim': 512, 'causal': False, 'chunk_size': '16,32,64,-1', 'left_context_frames': '64,128,256,-1', 'use_transducer': True, 'use_ctc': True, 'full_libri': True, 'mini_libri': False, 'manifest_dir': PosixPath('data/fbank'), 'max_duration': 300, 'bucketing_sampler': True, 'num_buckets': 30, 'concatenate_cuts': False, 'duration_factor': 1.0, 'gap': 1.0, 'on_the_fly_feats': False, 'shuffle': True, 'drop_last': True, 'return_cuts': True, 'num_workers': 2, 'enable_spec_aug': True, 'spec_aug_time_warp_factor': 80, 'enable_musan': True, 'input_strategy': 'PrecomputedFeatures', 'res_dir': PosixPath('zipformer/exp-ctc-rnnt/nbest'), 'suffix': 'epoch-40-avg-16-use-averaged-model'}
4
+ 2023-06-13 17:21:29,725 INFO [lexicon.py:168] Loading pre-compiled data/lang_bpe_500/Linv.pt
5
+ 2023-06-13 17:21:34,644 INFO [ctc_decode.py:718] About to create model
6
+ 2023-06-13 17:21:35,244 INFO [ctc_decode.py:785] Calculating the averaged model over epoch range from 24 (excluded) to 40
7
+ 2023-06-13 17:21:39,252 INFO [ctc_decode.py:802] Number of model parameters: 65805511
8
+ 2023-06-13 17:21:39,252 INFO [asr_datamodule.py:465] About to get test-clean cuts
9
+ 2023-06-13 17:21:39,255 INFO [asr_datamodule.py:472] About to get test-other cuts
10
+ 2023-06-13 17:21:40,545 INFO [ctc_decode.py:550] batch 0/?, cuts processed until now is 21
11
+ 2023-06-13 17:21:42,976 INFO [zipformer.py:1711] name=None, attn_weights_entropy = tensor([5.0315, 4.4660, 4.2767, 4.5975], device='cuda:0')
12
+ 2023-06-13 17:21:55,627 INFO [zipformer.py:1711] name=None, attn_weights_entropy = tensor([4.4431, 2.6692, 3.6222, 2.0333], device='cuda:0')
13
+ 2023-06-13 17:22:11,994 INFO [zipformer.py:1711] name=None, attn_weights_entropy = tensor([2.2852, 3.4389, 3.4427, 3.4871, 3.2347, 2.9755, 2.4241, 2.8685],
14
+ device='cuda:0')
15
+ 2023-06-13 17:22:16,053 INFO [zipformer.py:1711] name=None, attn_weights_entropy = tensor([4.5884, 2.7881, 3.7633, 2.0764], device='cuda:0')
16
+ 2023-06-13 17:22:20,226 INFO [zipformer.py:1711] name=None, attn_weights_entropy = tensor([3.9332, 4.9863, 4.5516, 4.1826], device='cuda:0')
17
+ 2023-06-13 17:22:26,654 INFO [zipformer.py:1711] name=None, attn_weights_entropy = tensor([2.8336, 4.2831, 4.1287, 3.3007, 3.3161, 3.7073, 4.0190, 3.6861],
18
+ device='cuda:0')
19
+ 2023-06-13 17:22:28,090 INFO [ctc_decode.py:564] The transcripts are stored in zipformer/exp-ctc-rnnt/nbest/recogs-test-clean-epoch-40-avg-16-use-averaged-model.txt
20
+ 2023-06-13 17:22:28,169 INFO [utils.py:561] [test-clean-no_rescore-nbest-scale-1.0-100] %WER 2.46% [1294 / 52576, 181 ins, 93 del, 1020 sub ]
21
+ 2023-06-13 17:22:28,344 INFO [ctc_decode.py:573] Wrote detailed error stats to zipformer/exp-ctc-rnnt/nbest/errs-test-clean-epoch-40-avg-16-use-averaged-model.txt
22
+ 2023-06-13 17:22:28,345 INFO [ctc_decode.py:587]
23
+ For test-clean, WER of different settings are:
24
+ no_rescore-nbest-scale-1.0-100 2.46 best for test-clean
25
+
26
+ 2023-06-13 17:22:29,623 INFO [ctc_decode.py:550] batch 0/?, cuts processed until now is 26
27
+ 2023-06-13 17:22:59,061 INFO [zipformer.py:1711] name=None, attn_weights_entropy = tensor([3.7139, 3.3612, 2.7698, 3.3808], device='cuda:0')
28
+ 2023-06-13 17:23:25,555 INFO [ctc_decode.py:564] The transcripts are stored in zipformer/exp-ctc-rnnt/nbest/recogs-test-other-epoch-40-avg-16-use-averaged-model.txt
29
+ 2023-06-13 17:23:25,630 INFO [utils.py:561] [test-other-no_rescore-nbest-scale-1.0-100] %WER 5.11% [2674 / 52343, 288 ins, 256 del, 2130 sub ]
30
+ 2023-06-13 17:23:25,808 INFO [ctc_decode.py:573] Wrote detailed error stats to zipformer/exp-ctc-rnnt/nbest/errs-test-other-epoch-40-avg-16-use-averaged-model.txt
31
+ 2023-06-13 17:23:25,808 INFO [ctc_decode.py:587]
32
+ For test-other, WER of different settings are:
33
+ no_rescore-nbest-scale-1.0-100 5.11 best for test-other
34
+
35
+ 2023-06-13 17:23:25,808 INFO [ctc_decode.py:835] Done!
decoding_results/nbest/recogs-test-clean-epoch-40-avg-16-use-averaged-model.txt ADDED
The diff for this file is too large to render. See raw diff
 
decoding_results/nbest/recogs-test-other-epoch-40-avg-16-use-averaged-model.txt ADDED
The diff for this file is too large to render. See raw diff
 
decoding_results/nbest/wer-summary-test-clean-epoch-40-avg-16-use-averaged-model.txt ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ settings WER
2
+ no_rescore-nbest-scale-1.0-100 2.46
decoding_results/nbest/wer-summary-test-other-epoch-40-avg-16-use-averaged-model.txt ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ settings WER
2
+ no_rescore-nbest-scale-1.0-100 5.11
decoding_results/whole-lattice-rescoring/errs-test-clean-epoch-40-avg-16-use-averaged-model.txt ADDED
The diff for this file is too large to render. See raw diff
 
decoding_results/whole-lattice-rescoring/errs-test-other-epoch-40-avg-16-use-averaged-model.txt ADDED
The diff for this file is too large to render. See raw diff
 
decoding_results/whole-lattice-rescoring/log-decode-epoch-40-avg-16-use-averaged-model-2023-06-13-17-33-26 ADDED
@@ -0,0 +1,251 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ 2023-06-13 17:33:26,582 INFO [ctc_decode.py:633] Decoding started
2
+ 2023-06-13 17:33:26,582 INFO [ctc_decode.py:639] Device: cuda:0
3
+ 2023-06-13 17:33:26,582 INFO [ctc_decode.py:640] {'best_train_loss': inf, 'best_valid_loss': inf, 'best_train_epoch': -1, 'best_valid_epoch': -1, 'batch_idx_train': 0, 'log_interval': 50, 'reset_interval': 200, 'valid_interval': 3000, 'feature_dim': 80, 'subsampling_factor': 4, 'warm_step': 2000, 'env_info': {'k2-version': '1.24.1', 'k2-build-type': 'Release', 'k2-with-cuda': True, 'k2-git-sha1': 'c51a0b9684442a88ee37f3ce0af686a04b66855b', 'k2-git-date': 'Mon May 1 21:38:03 2023', 'lhotse-version': '1.12.0.dev+git.891bad1.clean', 'torch-version': '1.10.0+cu102', 'torch-cuda-available': True, 'torch-cuda-version': '10.2', 'python-version': '3.8', 'icefall-git-branch': 'new-zipformer-add-ctc', 'icefall-git-sha1': '046b6cb6-clean', 'icefall-git-date': 'Fri Jun 2 15:51:49 2023', 'icefall-path': '/ceph-zw/workspace/zipformer/icefall_zipformer', 'k2-path': '/ceph-zw/workspace/k2/k2/k2/python/k2/__init__.py', 'lhotse-path': '/ceph-zw/workspace/share/lhotse/lhotse/__init__.py', 'hostname': 'de-74279-k2-train-1-1220091118-57c4d55446-mlpzc', 'IP address': '10.177.22.19'}, 'frame_shift_ms': 10, 'search_beam': 20, 'output_beam': 8, 'min_active_states': 30, 'max_active_states': 10000, 'use_double_scores': True, 'epoch': 40, 'iter': 0, 'avg': 16, 'use_averaged_model': True, 'exp_dir': PosixPath('zipformer/exp-ctc-rnnt'), 'bpe_model': 'data/lang_bpe_500/bpe.model', 'lang_dir': PosixPath('data/lang_bpe_500'), 'context_size': 2, 'decoding_method': 'whole-lattice-rescoring', 'num_paths': 100, 'nbest_scale': 1.0, 'hlg_scale': 0.6, 'lm_dir': PosixPath('data/my_lm'), 'num_encoder_layers': '2,2,3,4,3,2', 'downsampling_factor': '1,2,4,8,4,2', 'feedforward_dim': '512,768,1024,1536,1024,768', 'num_heads': '4,4,4,8,4,4', 'encoder_dim': '192,256,384,512,384,256', 'query_head_dim': '32', 'value_head_dim': '12', 'pos_head_dim': '4', 'pos_dim': 48, 'encoder_unmasked_dim': '192,192,256,256,256,192', 'cnn_module_kernel': '31,31,15,15,15,31', 'decoder_dim': 512, 'joiner_dim': 512, 'causal': False, 'chunk_size': '16,32,64,-1', 'left_context_frames': '64,128,256,-1', 'use_transducer': True, 'use_ctc': True, 'full_libri': True, 'mini_libri': False, 'manifest_dir': PosixPath('data/fbank'), 'max_duration': 300, 'bucketing_sampler': True, 'num_buckets': 30, 'concatenate_cuts': False, 'duration_factor': 1.0, 'gap': 1.0, 'on_the_fly_feats': False, 'shuffle': True, 'drop_last': True, 'return_cuts': True, 'num_workers': 2, 'enable_spec_aug': True, 'spec_aug_time_warp_factor': 80, 'enable_musan': True, 'input_strategy': 'PrecomputedFeatures', 'res_dir': PosixPath('zipformer/exp-ctc-rnnt/whole-lattice-rescoring'), 'suffix': 'epoch-40-avg-16-use-averaged-model'}
4
+ 2023-06-13 17:33:26,852 INFO [lexicon.py:168] Loading pre-compiled data/lang_bpe_500/Linv.pt
5
+ 2023-06-13 17:33:31,836 INFO [ctc_decode.py:701] Loading pre-compiled G_4_gram.pt
6
+ 2023-06-13 17:33:45,493 INFO [ctc_decode.py:718] About to create model
7
+ 2023-06-13 17:33:46,067 INFO [ctc_decode.py:785] Calculating the averaged model over epoch range from 24 (excluded) to 40
8
+ 2023-06-13 17:33:48,190 INFO [ctc_decode.py:802] Number of model parameters: 65805511
9
+ 2023-06-13 17:33:48,190 INFO [asr_datamodule.py:465] About to get test-clean cuts
10
+ 2023-06-13 17:33:48,193 INFO [asr_datamodule.py:472] About to get test-other cuts
11
+ 2023-06-13 17:33:49,550 INFO [ctc_decode.py:550] batch 0/?, cuts processed until now is 21
12
+ 2023-06-13 17:34:04,381 INFO [zipformer.py:1711] name=None, attn_weights_entropy = tensor([4.1588, 2.7219, 2.7286, 3.4695], device='cuda:0')
13
+ 2023-06-13 17:34:43,425 INFO [ctc_decode.py:564] The transcripts are stored in zipformer/exp-ctc-rnnt/whole-lattice-rescoring/recogs-test-clean-epoch-40-avg-16-use-averaged-model.txt
14
+ 2023-06-13 17:34:43,497 INFO [utils.py:561] [test-clean-lm_scale_0.1] %WER 2.51% [1318 / 52576, 228 ins, 66 del, 1024 sub ]
15
+ 2023-06-13 17:34:43,669 INFO [ctc_decode.py:573] Wrote detailed error stats to zipformer/exp-ctc-rnnt/whole-lattice-rescoring/errs-test-clean-epoch-40-avg-16-use-averaged-model.txt
16
+ 2023-06-13 17:34:43,693 INFO [ctc_decode.py:564] The transcripts are stored in zipformer/exp-ctc-rnnt/whole-lattice-rescoring/recogs-test-clean-epoch-40-avg-16-use-averaged-model.txt
17
+ 2023-06-13 17:34:43,761 INFO [utils.py:561] [test-clean-lm_scale_0.2] %WER 2.47% [1299 / 52576, 216 ins, 67 del, 1016 sub ]
18
+ 2023-06-13 17:34:44,120 INFO [ctc_decode.py:573] Wrote detailed error stats to zipformer/exp-ctc-rnnt/whole-lattice-rescoring/errs-test-clean-epoch-40-avg-16-use-averaged-model.txt
19
+ 2023-06-13 17:34:44,143 INFO [ctc_decode.py:564] The transcripts are stored in zipformer/exp-ctc-rnnt/whole-lattice-rescoring/recogs-test-clean-epoch-40-avg-16-use-averaged-model.txt
20
+ 2023-06-13 17:34:44,214 INFO [utils.py:561] [test-clean-lm_scale_0.3] %WER 2.42% [1272 / 52576, 203 ins, 71 del, 998 sub ]
21
+ 2023-06-13 17:34:44,391 INFO [ctc_decode.py:573] Wrote detailed error stats to zipformer/exp-ctc-rnnt/whole-lattice-rescoring/errs-test-clean-epoch-40-avg-16-use-averaged-model.txt
22
+ 2023-06-13 17:34:44,413 INFO [ctc_decode.py:564] The transcripts are stored in zipformer/exp-ctc-rnnt/whole-lattice-rescoring/recogs-test-clean-epoch-40-avg-16-use-averaged-model.txt
23
+ 2023-06-13 17:34:44,482 INFO [utils.py:561] [test-clean-lm_scale_0.4] %WER 2.39% [1254 / 52576, 192 ins, 74 del, 988 sub ]
24
+ 2023-06-13 17:34:44,653 INFO [ctc_decode.py:573] Wrote detailed error stats to zipformer/exp-ctc-rnnt/whole-lattice-rescoring/errs-test-clean-epoch-40-avg-16-use-averaged-model.txt
25
+ 2023-06-13 17:34:44,675 INFO [ctc_decode.py:564] The transcripts are stored in zipformer/exp-ctc-rnnt/whole-lattice-rescoring/recogs-test-clean-epoch-40-avg-16-use-averaged-model.txt
26
+ 2023-06-13 17:34:44,744 INFO [utils.py:561] [test-clean-lm_scale_0.5] %WER 2.37% [1245 / 52576, 184 ins, 81 del, 980 sub ]
27
+ 2023-06-13 17:34:44,920 INFO [ctc_decode.py:573] Wrote detailed error stats to zipformer/exp-ctc-rnnt/whole-lattice-rescoring/errs-test-clean-epoch-40-avg-16-use-averaged-model.txt
28
+ 2023-06-13 17:34:44,943 INFO [ctc_decode.py:564] The transcripts are stored in zipformer/exp-ctc-rnnt/whole-lattice-rescoring/recogs-test-clean-epoch-40-avg-16-use-averaged-model.txt
29
+ 2023-06-13 17:34:45,011 INFO [utils.py:561] [test-clean-lm_scale_0.6] %WER 2.38% [1250 / 52576, 178 ins, 87 del, 985 sub ]
30
+ 2023-06-13 17:34:45,180 INFO [ctc_decode.py:573] Wrote detailed error stats to zipformer/exp-ctc-rnnt/whole-lattice-rescoring/errs-test-clean-epoch-40-avg-16-use-averaged-model.txt
31
+ 2023-06-13 17:34:45,202 INFO [ctc_decode.py:564] The transcripts are stored in zipformer/exp-ctc-rnnt/whole-lattice-rescoring/recogs-test-clean-epoch-40-avg-16-use-averaged-model.txt
32
+ 2023-06-13 17:34:45,272 INFO [utils.py:561] [test-clean-lm_scale_0.7] %WER 2.39% [1259 / 52576, 171 ins, 98 del, 990 sub ]
33
+ 2023-06-13 17:34:45,440 INFO [ctc_decode.py:573] Wrote detailed error stats to zipformer/exp-ctc-rnnt/whole-lattice-rescoring/errs-test-clean-epoch-40-avg-16-use-averaged-model.txt
34
+ 2023-06-13 17:34:45,462 INFO [ctc_decode.py:564] The transcripts are stored in zipformer/exp-ctc-rnnt/whole-lattice-rescoring/recogs-test-clean-epoch-40-avg-16-use-averaged-model.txt
35
+ 2023-06-13 17:34:45,530 INFO [utils.py:561] [test-clean-lm_scale_0.8] %WER 2.43% [1277 / 52576, 164 ins, 113 del, 1000 sub ]
36
+ 2023-06-13 17:34:45,698 INFO [ctc_decode.py:573] Wrote detailed error stats to zipformer/exp-ctc-rnnt/whole-lattice-rescoring/errs-test-clean-epoch-40-avg-16-use-averaged-model.txt
37
+ 2023-06-13 17:34:45,720 INFO [ctc_decode.py:564] The transcripts are stored in zipformer/exp-ctc-rnnt/whole-lattice-rescoring/recogs-test-clean-epoch-40-avg-16-use-averaged-model.txt
38
+ 2023-06-13 17:34:45,790 INFO [utils.py:561] [test-clean-lm_scale_0.9] %WER 2.49% [1307 / 52576, 155 ins, 137 del, 1015 sub ]
39
+ 2023-06-13 17:34:45,961 INFO [ctc_decode.py:573] Wrote detailed error stats to zipformer/exp-ctc-rnnt/whole-lattice-rescoring/errs-test-clean-epoch-40-avg-16-use-averaged-model.txt
40
+ 2023-06-13 17:34:45,983 INFO [ctc_decode.py:564] The transcripts are stored in zipformer/exp-ctc-rnnt/whole-lattice-rescoring/recogs-test-clean-epoch-40-avg-16-use-averaged-model.txt
41
+ 2023-06-13 17:34:46,202 INFO [utils.py:561] [test-clean-lm_scale_1.0] %WER 2.57% [1350 / 52576, 151 ins, 162 del, 1037 sub ]
42
+ 2023-06-13 17:34:46,373 INFO [ctc_decode.py:573] Wrote detailed error stats to zipformer/exp-ctc-rnnt/whole-lattice-rescoring/errs-test-clean-epoch-40-avg-16-use-averaged-model.txt
43
+ 2023-06-13 17:34:46,396 INFO [ctc_decode.py:564] The transcripts are stored in zipformer/exp-ctc-rnnt/whole-lattice-rescoring/recogs-test-clean-epoch-40-avg-16-use-averaged-model.txt
44
+ 2023-06-13 17:34:46,464 INFO [utils.py:561] [test-clean-lm_scale_1.1] %WER 2.72% [1432 / 52576, 151 ins, 202 del, 1079 sub ]
45
+ 2023-06-13 17:34:46,635 INFO [ctc_decode.py:573] Wrote detailed error stats to zipformer/exp-ctc-rnnt/whole-lattice-rescoring/errs-test-clean-epoch-40-avg-16-use-averaged-model.txt
46
+ 2023-06-13 17:34:46,657 INFO [ctc_decode.py:564] The transcripts are stored in zipformer/exp-ctc-rnnt/whole-lattice-rescoring/recogs-test-clean-epoch-40-avg-16-use-averaged-model.txt
47
+ 2023-06-13 17:34:46,726 INFO [utils.py:561] [test-clean-lm_scale_1.2] %WER 2.84% [1493 / 52576, 148 ins, 236 del, 1109 sub ]
48
+ 2023-06-13 17:34:46,898 INFO [ctc_decode.py:573] Wrote detailed error stats to zipformer/exp-ctc-rnnt/whole-lattice-rescoring/errs-test-clean-epoch-40-avg-16-use-averaged-model.txt
49
+ 2023-06-13 17:34:46,920 INFO [ctc_decode.py:564] The transcripts are stored in zipformer/exp-ctc-rnnt/whole-lattice-rescoring/recogs-test-clean-epoch-40-avg-16-use-averaged-model.txt
50
+ 2023-06-13 17:34:46,988 INFO [utils.py:561] [test-clean-lm_scale_1.3] %WER 3.03% [1594 / 52576, 147 ins, 293 del, 1154 sub ]
51
+ 2023-06-13 17:34:47,159 INFO [ctc_decode.py:573] Wrote detailed error stats to zipformer/exp-ctc-rnnt/whole-lattice-rescoring/errs-test-clean-epoch-40-avg-16-use-averaged-model.txt
52
+ 2023-06-13 17:34:47,181 INFO [ctc_decode.py:564] The transcripts are stored in zipformer/exp-ctc-rnnt/whole-lattice-rescoring/recogs-test-clean-epoch-40-avg-16-use-averaged-model.txt
53
+ 2023-06-13 17:34:47,250 INFO [utils.py:561] [test-clean-lm_scale_1.4] %WER 3.22% [1695 / 52576, 144 ins, 360 del, 1191 sub ]
54
+ 2023-06-13 17:34:47,421 INFO [ctc_decode.py:573] Wrote detailed error stats to zipformer/exp-ctc-rnnt/whole-lattice-rescoring/errs-test-clean-epoch-40-avg-16-use-averaged-model.txt
55
+ 2023-06-13 17:34:47,443 INFO [ctc_decode.py:564] The transcripts are stored in zipformer/exp-ctc-rnnt/whole-lattice-rescoring/recogs-test-clean-epoch-40-avg-16-use-averaged-model.txt
56
+ 2023-06-13 17:34:47,513 INFO [utils.py:561] [test-clean-lm_scale_1.5] %WER 3.44% [1810 / 52576, 136 ins, 432 del, 1242 sub ]
57
+ 2023-06-13 17:34:47,683 INFO [ctc_decode.py:573] Wrote detailed error stats to zipformer/exp-ctc-rnnt/whole-lattice-rescoring/errs-test-clean-epoch-40-avg-16-use-averaged-model.txt
58
+ 2023-06-13 17:34:47,705 INFO [ctc_decode.py:564] The transcripts are stored in zipformer/exp-ctc-rnnt/whole-lattice-rescoring/recogs-test-clean-epoch-40-avg-16-use-averaged-model.txt
59
+ 2023-06-13 17:34:47,923 INFO [utils.py:561] [test-clean-lm_scale_1.6] %WER 3.69% [1939 / 52576, 136 ins, 510 del, 1293 sub ]
60
+ 2023-06-13 17:34:48,094 INFO [ctc_decode.py:573] Wrote detailed error stats to zipformer/exp-ctc-rnnt/whole-lattice-rescoring/errs-test-clean-epoch-40-avg-16-use-averaged-model.txt
61
+ 2023-06-13 17:34:48,117 INFO [ctc_decode.py:564] The transcripts are stored in zipformer/exp-ctc-rnnt/whole-lattice-rescoring/recogs-test-clean-epoch-40-avg-16-use-averaged-model.txt
62
+ 2023-06-13 17:34:48,185 INFO [utils.py:561] [test-clean-lm_scale_1.7] %WER 3.93% [2065 / 52576, 133 ins, 576 del, 1356 sub ]
63
+ 2023-06-13 17:34:48,360 INFO [ctc_decode.py:573] Wrote detailed error stats to zipformer/exp-ctc-rnnt/whole-lattice-rescoring/errs-test-clean-epoch-40-avg-16-use-averaged-model.txt
64
+ 2023-06-13 17:34:48,382 INFO [ctc_decode.py:564] The transcripts are stored in zipformer/exp-ctc-rnnt/whole-lattice-rescoring/recogs-test-clean-epoch-40-avg-16-use-averaged-model.txt
65
+ 2023-06-13 17:34:48,451 INFO [utils.py:561] [test-clean-lm_scale_1.8] %WER 4.11% [2163 / 52576, 131 ins, 629 del, 1403 sub ]
66
+ 2023-06-13 17:34:48,621 INFO [ctc_decode.py:573] Wrote detailed error stats to zipformer/exp-ctc-rnnt/whole-lattice-rescoring/errs-test-clean-epoch-40-avg-16-use-averaged-model.txt
67
+ 2023-06-13 17:34:48,644 INFO [ctc_decode.py:564] The transcripts are stored in zipformer/exp-ctc-rnnt/whole-lattice-rescoring/recogs-test-clean-epoch-40-avg-16-use-averaged-model.txt
68
+ 2023-06-13 17:34:48,711 INFO [utils.py:561] [test-clean-lm_scale_1.9] %WER 4.35% [2285 / 52576, 129 ins, 701 del, 1455 sub ]
69
+ 2023-06-13 17:34:48,889 INFO [ctc_decode.py:573] Wrote detailed error stats to zipformer/exp-ctc-rnnt/whole-lattice-rescoring/errs-test-clean-epoch-40-avg-16-use-averaged-model.txt
70
+ 2023-06-13 17:34:48,911 INFO [ctc_decode.py:564] The transcripts are stored in zipformer/exp-ctc-rnnt/whole-lattice-rescoring/recogs-test-clean-epoch-40-avg-16-use-averaged-model.txt
71
+ 2023-06-13 17:34:48,980 INFO [utils.py:561] [test-clean-lm_scale_2.0] %WER 4.58% [2409 / 52576, 130 ins, 767 del, 1512 sub ]
72
+ 2023-06-13 17:34:49,152 INFO [ctc_decode.py:573] Wrote detailed error stats to zipformer/exp-ctc-rnnt/whole-lattice-rescoring/errs-test-clean-epoch-40-avg-16-use-averaged-model.txt
73
+ 2023-06-13 17:34:49,153 INFO [ctc_decode.py:587]
74
+ For test-clean, WER of different settings are:
75
+ lm_scale_0.5 2.37 best for test-clean
76
+ lm_scale_0.6 2.38
77
+ lm_scale_0.4 2.39
78
+ lm_scale_0.7 2.39
79
+ lm_scale_0.3 2.42
80
+ lm_scale_0.8 2.43
81
+ lm_scale_0.2 2.47
82
+ lm_scale_0.9 2.49
83
+ lm_scale_0.1 2.51
84
+ lm_scale_1.0 2.57
85
+ lm_scale_1.1 2.72
86
+ lm_scale_1.2 2.84
87
+ lm_scale_1.3 3.03
88
+ lm_scale_1.4 3.22
89
+ lm_scale_1.5 3.44
90
+ lm_scale_1.6 3.69
91
+ lm_scale_1.7 3.93
92
+ lm_scale_1.8 4.11
93
+ lm_scale_1.9 4.35
94
+ lm_scale_2.0 4.58
95
+
96
+ 2023-06-13 17:34:50,631 INFO [ctc_decode.py:550] batch 0/?, cuts processed until now is 26
97
+ 2023-06-13 17:35:10,848 INFO [decode.py:893] Caught exception:
98
+ CUDA out of memory. Tried to allocate 8.00 GiB (GPU 0; 31.75 GiB total capacity; 23.78 GiB already allocated; 181.69 MiB free; 30.20 GiB reserved in total by PyTorch) If reserved memory is >> allocated memory try setting max_split_size_mb to avoid fragmentation. See documentation for Memory Management and PYTORCH_CUDA_ALLOC_CONF
99
+ Exception raised from malloc at ../c10/cuda/CUDACachingAllocator.cpp:536 (most recent call first):
100
+ frame #0: c10::Error::Error(c10::SourceLocation, std::string) + 0x42 (0x7f34d9c0fd62 in /ceph-jb/yaozengwei/env/k2_icefall/lib/python3.8/site-packages/torch/lib/libc10.so)
101
+ frame #1: <unknown function> + 0x25358 (0x7f34d9e7b358 in /ceph-jb/yaozengwei/env/k2_icefall/lib/python3.8/site-packages/torch/lib/libc10_cuda.so)
102
+ frame #2: <unknown function> + 0x25d72 (0x7f34d9e7bd72 in /ceph-jb/yaozengwei/env/k2_icefall/lib/python3.8/site-packages/torch/lib/libc10_cuda.so)
103
+ frame #3: <unknown function> + 0x261a2 (0x7f34d9e7c1a2 in /ceph-jb/yaozengwei/env/k2_icefall/lib/python3.8/site-packages/torch/lib/libc10_cuda.so)
104
+ frame #4: k2::PytorchCudaContext::Allocate(unsigned long, void**) + 0x32 (0x7f34328d10d2 in /ceph-zw/workspace/k2/k2/build_release/lib/libk2context.so)
105
+ frame #5: k2::NewRegion(std::shared_ptr<k2::Context>, unsigned long) + 0x112 (0x7f34325f6392 in /ceph-zw/workspace/k2/k2/build_release/lib/libk2context.so)
106
+ frame #6: k2::Hash::Hash(std::shared_ptr<k2::Context>, int, int, int) + 0x2f7 (0x7f34326cb3a7 in /ceph-zw/workspace/k2/k2/build_release/lib/libk2context.so)
107
+ frame #7: k2::Hash::Resize(int, int, int, bool) + 0x1b4 (0x7f34326c0eb4 in /ceph-zw/workspace/k2/k2/build_release/lib/libk2context.so)
108
+ frame #8: k2::DeviceIntersector::ForwardSortedA() + 0x53e (0x7f34326f5fae in /ceph-zw/workspace/k2/k2/build_release/lib/libk2context.so)
109
+ frame #9: k2::IntersectDevice(k2::Ragged<k2::Arc>&, int, k2::Ragged<k2::Arc>&, int, k2::Array1<int> const&, k2::Array1<int>*, k2::Array1<int>*, bool) + 0x4cd (0x7f34326d81fd in /ceph-zw/workspace/k2/k2/build_release/lib/libk2context.so)
110
+ frame #10: <unknown function> + 0x75936 (0x7f3433715936 in /ceph-zw/workspace/k2/k2/build_release/lib/_k2.cpython-38-x86_64-linux-gnu.so)
111
+ frame #11: <unknown function> + 0x377ec (0x7f34336d77ec in /ceph-zw/workspace/k2/k2/build_release/lib/_k2.cpython-38-x86_64-linux-gnu.so)
112
+ <omitting python frames>
113
+ frame #34: python3() [0x662c2e]
114
+ frame #39: __libc_start_main + 0xe7 (0x7f353b2a7bf7 in /lib/x86_64-linux-gnu/libc.so.6)
115
+
116
+
117
+ 2023-06-13 17:35:10,848 INFO [decode.py:897] num_arcs before pruning: 262949
118
+ 2023-06-13 17:35:10,848 INFO [decode.py:898] This OOM is not an error. You can ignore it. If your model does not converge well, or --max-duration is too large, or the input sound file is difficult to decode, you will meet this exception.
119
+ 2023-06-13 17:35:10,857 INFO [decode.py:909] num_arcs after pruning: 45772
120
+ 2023-06-13 17:35:36,889 INFO [zipformer.py:1711] name=None, attn_weights_entropy = tensor([3.4643, 2.3418, 2.5677, 2.0390], device='cuda:0')
121
+ 2023-06-13 17:35:37,983 INFO [decode.py:893] Caught exception:
122
+ CUDA out of memory. Tried to allocate 2.00 GiB (GPU 0; 31.75 GiB total capacity; 29.74 GiB already allocated; 27.69 MiB free; 30.35 GiB reserved in total by PyTorch) If reserved memory is >> allocated memory try setting max_split_size_mb to avoid fragmentation. See documentation for Memory Management and PYTORCH_CUDA_ALLOC_CONF
123
+ Exception raised from malloc at ../c10/cuda/CUDACachingAllocator.cpp:536 (most recent call first):
124
+ frame #0: c10::Error::Error(c10::SourceLocation, std::string) + 0x42 (0x7f34d9c0fd62 in /ceph-jb/yaozengwei/env/k2_icefall/lib/python3.8/site-packages/torch/lib/libc10.so)
125
+ frame #1: <unknown function> + 0x25358 (0x7f34d9e7b358 in /ceph-jb/yaozengwei/env/k2_icefall/lib/python3.8/site-packages/torch/lib/libc10_cuda.so)
126
+ frame #2: <unknown function> + 0x25d72 (0x7f34d9e7bd72 in /ceph-jb/yaozengwei/env/k2_icefall/lib/python3.8/site-packages/torch/lib/libc10_cuda.so)
127
+ frame #3: <unknown function> + 0x261a2 (0x7f34d9e7c1a2 in /ceph-jb/yaozengwei/env/k2_icefall/lib/python3.8/site-packages/torch/lib/libc10_cuda.so)
128
+ frame #4: k2::PytorchCudaContext::Allocate(unsigned long, void**) + 0x32 (0x7f34328d10d2 in /ceph-zw/workspace/k2/k2/build_release/lib/libk2context.so)
129
+ frame #5: k2::Region::Extend(unsigned long) + 0x7a (0x7f34326dcaea in /ceph-zw/workspace/k2/k2/build_release/lib/libk2context.so)
130
+ frame #6: k2::Array1<k2::intersect_internal::StateInfo>::Resize(int, bool) + 0xb5 (0x7f34326dd435 in /ceph-zw/workspace/k2/k2/build_release/lib/libk2context.so)
131
+ frame #7: void k2::DeviceIntersector::ForwardSortedAOneIter<k2::Hash::PackedAccessor>(int, k2::Array1<int> const&, k2::Array1<int> const&, k2::Array1<int> const&, k2::Array1<int> const&, int) + 0x9da (0x7f34326f4d8a in /ceph-zw/workspace/k2/k2/build_release/lib/libk2context.so)
132
+ frame #8: k2::DeviceIntersector::ForwardSortedA() + 0xadb (0x7f34326f654b in /ceph-zw/workspace/k2/k2/build_release/lib/libk2context.so)
133
+ frame #9: k2::IntersectDevice(k2::Ragged<k2::Arc>&, int, k2::Ragged<k2::Arc>&, int, k2::Array1<int> const&, k2::Array1<int>*, k2::Array1<int>*, bool) + 0x4cd (0x7f34326d81fd in /ceph-zw/workspace/k2/k2/build_release/lib/libk2context.so)
134
+ frame #10: <unknown function> + 0x75936 (0x7f3433715936 in /ceph-zw/workspace/k2/k2/build_release/lib/_k2.cpython-38-x86_64-linux-gnu.so)
135
+ frame #11: <unknown function> + 0x377ec (0x7f34336d77ec in /ceph-zw/workspace/k2/k2/build_release/lib/_k2.cpython-38-x86_64-linux-gnu.so)
136
+ <omitting python frames>
137
+ frame #34: python3() [0x662c2e]
138
+ frame #39: __libc_start_main + 0xe7 (0x7f353b2a7bf7 in /lib/x86_64-linux-gnu/libc.so.6)
139
+
140
+
141
+ 2023-06-13 17:35:37,983 INFO [decode.py:897] num_arcs before pruning: 305119
142
+ 2023-06-13 17:35:37,983 INFO [decode.py:898] This OOM is not an error. You can ignore it. If your model does not converge well, or --max-duration is too large, or the input sound file is difficult to decode, you will meet this exception.
143
+ 2023-06-13 17:35:38,002 INFO [decode.py:909] num_arcs after pruning: 45058
144
+ 2023-06-13 17:35:49,399 INFO [zipformer.py:1711] name=None, attn_weights_entropy = tensor([4.0697, 2.1705, 3.1589, 1.7420], device='cuda:0')
145
+ 2023-06-13 17:35:51,043 INFO [decode.py:893] Caught exception:
146
+ CUDA out of memory. Tried to allocate 2.00 GiB (GPU 0; 31.75 GiB total capacity; 28.59 GiB already allocated; 813.69 MiB free; 29.58 GiB reserved in total by PyTorch) If reserved memory is >> allocated memory try setting max_split_size_mb to avoid fragmentation. See documentation for Memory Management and PYTORCH_CUDA_ALLOC_CONF
147
+ Exception raised from malloc at ../c10/cuda/CUDACachingAllocator.cpp:536 (most recent call first):
148
+ frame #0: c10::Error::Error(c10::SourceLocation, std::string) + 0x42 (0x7f34d9c0fd62 in /ceph-jb/yaozengwei/env/k2_icefall/lib/python3.8/site-packages/torch/lib/libc10.so)
149
+ frame #1: <unknown function> + 0x25358 (0x7f34d9e7b358 in /ceph-jb/yaozengwei/env/k2_icefall/lib/python3.8/site-packages/torch/lib/libc10_cuda.so)
150
+ frame #2: <unknown function> + 0x25d72 (0x7f34d9e7bd72 in /ceph-jb/yaozengwei/env/k2_icefall/lib/python3.8/site-packages/torch/lib/libc10_cuda.so)
151
+ frame #3: <unknown function> + 0x261a2 (0x7f34d9e7c1a2 in /ceph-jb/yaozengwei/env/k2_icefall/lib/python3.8/site-packages/torch/lib/libc10_cuda.so)
152
+ frame #4: k2::PytorchCudaContext::Allocate(unsigned long, void**) + 0x32 (0x7f34328d10d2 in /ceph-zw/workspace/k2/k2/build_release/lib/libk2context.so)
153
+ frame #5: k2::Region::Extend(unsigned long) + 0x7a (0x7f34326dcaea in /ceph-zw/workspace/k2/k2/build_release/lib/libk2context.so)
154
+ frame #6: k2::Array1<k2::intersect_internal::StateInfo>::Resize(int, bool) + 0xb5 (0x7f34326dd435 in /ceph-zw/workspace/k2/k2/build_release/lib/libk2context.so)
155
+ frame #7: void k2::DeviceIntersector::ForwardSortedAOneIter<k2::Hash::PackedAccessor>(int, k2::Array1<int> const&, k2::Array1<int> const&, k2::Array1<int> const&, k2::Array1<int> const&, int) + 0x9da (0x7f34326f4d8a in /ceph-zw/workspace/k2/k2/build_release/lib/libk2context.so)
156
+ frame #8: k2::DeviceIntersector::ForwardSortedA() + 0xadb (0x7f34326f654b in /ceph-zw/workspace/k2/k2/build_release/lib/libk2context.so)
157
+ frame #9: k2::IntersectDevice(k2::Ragged<k2::Arc>&, int, k2::Ragged<k2::Arc>&, int, k2::Array1<int> const&, k2::Array1<int>*, k2::Array1<int>*, bool) + 0x4cd (0x7f34326d81fd in /ceph-zw/workspace/k2/k2/build_release/lib/libk2context.so)
158
+ frame #10: <unknown function> + 0x75936 (0x7f3433715936 in /ceph-zw/workspace/k2/k2/build_release/lib/_k2.cpython-38-x86_64-linux-gnu.so)
159
+ frame #11: <unknown function> + 0x377ec (0x7f34336d77ec in /ceph-zw/workspace/k2/k2/build_release/lib/_k2.cpython-38-x86_64-linux-gnu.so)
160
+ <omitting python frames>
161
+ frame #34: python3() [0x662c2e]
162
+ frame #39: __libc_start_main + 0xe7 (0x7f353b2a7bf7 in /lib/x86_64-linux-gnu/libc.so.6)
163
+
164
+
165
+ 2023-06-13 17:35:51,043 INFO [decode.py:897] num_arcs before pruning: 347421
166
+ 2023-06-13 17:35:51,043 INFO [decode.py:898] This OOM is not an error. You can ignore it. If your model does not converge well, or --max-duration is too large, or the input sound file is difficult to decode, you will meet this exception.
167
+ 2023-06-13 17:35:51,064 INFO [decode.py:909] num_arcs after pruning: 22726
168
+ 2023-06-13 17:35:52,877 INFO [ctc_decode.py:564] The transcripts are stored in zipformer/exp-ctc-rnnt/whole-lattice-rescoring/recogs-test-other-epoch-40-avg-16-use-averaged-model.txt
169
+ 2023-06-13 17:35:52,951 INFO [utils.py:561] [test-other-lm_scale_0.1] %WER 5.29% [2768 / 52343, 385 ins, 176 del, 2207 sub ]
170
+ 2023-06-13 17:35:53,126 INFO [ctc_decode.py:573] Wrote detailed error stats to zipformer/exp-ctc-rnnt/whole-lattice-rescoring/errs-test-other-epoch-40-avg-16-use-averaged-model.txt
171
+ 2023-06-13 17:35:53,150 INFO [ctc_decode.py:564] The transcripts are stored in zipformer/exp-ctc-rnnt/whole-lattice-rescoring/recogs-test-other-epoch-40-avg-16-use-averaged-model.txt
172
+ 2023-06-13 17:35:53,226 INFO [utils.py:561] [test-other-lm_scale_0.2] %WER 5.13% [2686 / 52343, 365 ins, 176 del, 2145 sub ]
173
+ 2023-06-13 17:35:53,401 INFO [ctc_decode.py:573] Wrote detailed error stats to zipformer/exp-ctc-rnnt/whole-lattice-rescoring/errs-test-other-epoch-40-avg-16-use-averaged-model.txt
174
+ 2023-06-13 17:35:53,424 INFO [ctc_decode.py:564] The transcripts are stored in zipformer/exp-ctc-rnnt/whole-lattice-rescoring/recogs-test-other-epoch-40-avg-16-use-averaged-model.txt
175
+ 2023-06-13 17:35:53,493 INFO [utils.py:561] [test-other-lm_scale_0.3] %WER 5.04% [2639 / 52343, 346 ins, 185 del, 2108 sub ]
176
+ 2023-06-13 17:35:53,666 INFO [ctc_decode.py:573] Wrote detailed error stats to zipformer/exp-ctc-rnnt/whole-lattice-rescoring/errs-test-other-epoch-40-avg-16-use-averaged-model.txt
177
+ 2023-06-13 17:35:53,688 INFO [ctc_decode.py:564] The transcripts are stored in zipformer/exp-ctc-rnnt/whole-lattice-rescoring/recogs-test-other-epoch-40-avg-16-use-averaged-model.txt
178
+ 2023-06-13 17:35:53,758 INFO [utils.py:561] [test-other-lm_scale_0.4] %WER 4.98% [2608 / 52343, 325 ins, 196 del, 2087 sub ]
179
+ 2023-06-13 17:35:53,933 INFO [ctc_decode.py:573] Wrote detailed error stats to zipformer/exp-ctc-rnnt/whole-lattice-rescoring/errs-test-other-epoch-40-avg-16-use-averaged-model.txt
180
+ 2023-06-13 17:35:53,955 INFO [ctc_decode.py:564] The transcripts are stored in zipformer/exp-ctc-rnnt/whole-lattice-rescoring/recogs-test-other-epoch-40-avg-16-use-averaged-model.txt
181
+ 2023-06-13 17:35:54,025 INFO [utils.py:561] [test-other-lm_scale_0.5] %WER 4.93% [2581 / 52343, 315 ins, 212 del, 2054 sub ]
182
+ 2023-06-13 17:35:54,198 INFO [ctc_decode.py:573] Wrote detailed error stats to zipformer/exp-ctc-rnnt/whole-lattice-rescoring/errs-test-other-epoch-40-avg-16-use-averaged-model.txt
183
+ 2023-06-13 17:35:54,221 INFO [ctc_decode.py:564] The transcripts are stored in zipformer/exp-ctc-rnnt/whole-lattice-rescoring/recogs-test-other-epoch-40-avg-16-use-averaged-model.txt
184
+ 2023-06-13 17:35:54,291 INFO [utils.py:561] [test-other-lm_scale_0.6] %WER 4.91% [2568 / 52343, 290 ins, 231 del, 2047 sub ]
185
+ 2023-06-13 17:35:54,464 INFO [ctc_decode.py:573] Wrote detailed error stats to zipformer/exp-ctc-rnnt/whole-lattice-rescoring/errs-test-other-epoch-40-avg-16-use-averaged-model.txt
186
+ 2023-06-13 17:35:54,487 INFO [ctc_decode.py:564] The transcripts are stored in zipformer/exp-ctc-rnnt/whole-lattice-rescoring/recogs-test-other-epoch-40-avg-16-use-averaged-model.txt
187
+ 2023-06-13 17:35:54,557 INFO [utils.py:561] [test-other-lm_scale_0.7] %WER 4.88% [2556 / 52343, 272 ins, 256 del, 2028 sub ]
188
+ 2023-06-13 17:35:54,728 INFO [ctc_decode.py:573] Wrote detailed error stats to zipformer/exp-ctc-rnnt/whole-lattice-rescoring/errs-test-other-epoch-40-avg-16-use-averaged-model.txt
189
+ 2023-06-13 17:35:54,751 INFO [ctc_decode.py:564] The transcripts are stored in zipformer/exp-ctc-rnnt/whole-lattice-rescoring/recogs-test-other-epoch-40-avg-16-use-averaged-model.txt
190
+ 2023-06-13 17:35:54,821 INFO [utils.py:561] [test-other-lm_scale_0.8] %WER 4.97% [2600 / 52343, 259 ins, 303 del, 2038 sub ]
191
+ 2023-06-13 17:35:54,995 INFO [ctc_decode.py:573] Wrote detailed error stats to zipformer/exp-ctc-rnnt/whole-lattice-rescoring/errs-test-other-epoch-40-avg-16-use-averaged-model.txt
192
+ 2023-06-13 17:35:55,017 INFO [ctc_decode.py:564] The transcripts are stored in zipformer/exp-ctc-rnnt/whole-lattice-rescoring/recogs-test-other-epoch-40-avg-16-use-averaged-model.txt
193
+ 2023-06-13 17:35:55,086 INFO [utils.py:561] [test-other-lm_scale_0.9] %WER 5.07% [2652 / 52343, 251 ins, 347 del, 2054 sub ]
194
+ 2023-06-13 17:35:55,260 INFO [ctc_decode.py:573] Wrote detailed error stats to zipformer/exp-ctc-rnnt/whole-lattice-rescoring/errs-test-other-epoch-40-avg-16-use-averaged-model.txt
195
+ 2023-06-13 17:35:55,283 INFO [ctc_decode.py:564] The transcripts are stored in zipformer/exp-ctc-rnnt/whole-lattice-rescoring/recogs-test-other-epoch-40-avg-16-use-averaged-model.txt
196
+ 2023-06-13 17:35:55,353 INFO [utils.py:561] [test-other-lm_scale_1.0] %WER 5.16% [2703 / 52343, 239 ins, 400 del, 2064 sub ]
197
+ 2023-06-13 17:35:55,679 INFO [ctc_decode.py:573] Wrote detailed error stats to zipformer/exp-ctc-rnnt/whole-lattice-rescoring/errs-test-other-epoch-40-avg-16-use-averaged-model.txt
198
+ 2023-06-13 17:35:55,702 INFO [ctc_decode.py:564] The transcripts are stored in zipformer/exp-ctc-rnnt/whole-lattice-rescoring/recogs-test-other-epoch-40-avg-16-use-averaged-model.txt
199
+ 2023-06-13 17:35:55,772 INFO [utils.py:561] [test-other-lm_scale_1.1] %WER 5.36% [2805 / 52343, 230 ins, 461 del, 2114 sub ]
200
+ 2023-06-13 17:35:55,945 INFO [ctc_decode.py:573] Wrote detailed error stats to zipformer/exp-ctc-rnnt/whole-lattice-rescoring/errs-test-other-epoch-40-avg-16-use-averaged-model.txt
201
+ 2023-06-13 17:35:55,968 INFO [ctc_decode.py:564] The transcripts are stored in zipformer/exp-ctc-rnnt/whole-lattice-rescoring/recogs-test-other-epoch-40-avg-16-use-averaged-model.txt
202
+ 2023-06-13 17:35:56,039 INFO [utils.py:561] [test-other-lm_scale_1.2] %WER 5.60% [2933 / 52343, 220 ins, 570 del, 2143 sub ]
203
+ 2023-06-13 17:35:56,214 INFO [ctc_decode.py:573] Wrote detailed error stats to zipformer/exp-ctc-rnnt/whole-lattice-rescoring/errs-test-other-epoch-40-avg-16-use-averaged-model.txt
204
+ 2023-06-13 17:35:56,237 INFO [ctc_decode.py:564] The transcripts are stored in zipformer/exp-ctc-rnnt/whole-lattice-rescoring/recogs-test-other-epoch-40-avg-16-use-averaged-model.txt
205
+ 2023-06-13 17:35:56,313 INFO [utils.py:561] [test-other-lm_scale_1.3] %WER 5.88% [3079 / 52343, 214 ins, 691 del, 2174 sub ]
206
+ 2023-06-13 17:35:56,489 INFO [ctc_decode.py:573] Wrote detailed error stats to zipformer/exp-ctc-rnnt/whole-lattice-rescoring/errs-test-other-epoch-40-avg-16-use-averaged-model.txt
207
+ 2023-06-13 17:35:56,512 INFO [ctc_decode.py:564] The transcripts are stored in zipformer/exp-ctc-rnnt/whole-lattice-rescoring/recogs-test-other-epoch-40-avg-16-use-averaged-model.txt
208
+ 2023-06-13 17:35:56,584 INFO [utils.py:561] [test-other-lm_scale_1.4] %WER 6.16% [3226 / 52343, 214 ins, 780 del, 2232 sub ]
209
+ 2023-06-13 17:35:56,759 INFO [ctc_decode.py:573] Wrote detailed error stats to zipformer/exp-ctc-rnnt/whole-lattice-rescoring/errs-test-other-epoch-40-avg-16-use-averaged-model.txt
210
+ 2023-06-13 17:35:56,782 INFO [ctc_decode.py:564] The transcripts are stored in zipformer/exp-ctc-rnnt/whole-lattice-rescoring/recogs-test-other-epoch-40-avg-16-use-averaged-model.txt
211
+ 2023-06-13 17:35:56,854 INFO [utils.py:561] [test-other-lm_scale_1.5] %WER 6.52% [3411 / 52343, 215 ins, 898 del, 2298 sub ]
212
+ 2023-06-13 17:35:57,029 INFO [ctc_decode.py:573] Wrote detailed error stats to zipformer/exp-ctc-rnnt/whole-lattice-rescoring/errs-test-other-epoch-40-avg-16-use-averaged-model.txt
213
+ 2023-06-13 17:35:57,052 INFO [ctc_decode.py:564] The transcripts are stored in zipformer/exp-ctc-rnnt/whole-lattice-rescoring/recogs-test-other-epoch-40-avg-16-use-averaged-model.txt
214
+ 2023-06-13 17:35:57,120 INFO [utils.py:561] [test-other-lm_scale_1.6] %WER 6.80% [3559 / 52343, 206 ins, 1009 del, 2344 sub ]
215
+ 2023-06-13 17:35:57,294 INFO [ctc_decode.py:573] Wrote detailed error stats to zipformer/exp-ctc-rnnt/whole-lattice-rescoring/errs-test-other-epoch-40-avg-16-use-averaged-model.txt
216
+ 2023-06-13 17:35:57,317 INFO [ctc_decode.py:564] The transcripts are stored in zipformer/exp-ctc-rnnt/whole-lattice-rescoring/recogs-test-other-epoch-40-avg-16-use-averaged-model.txt
217
+ 2023-06-13 17:35:57,386 INFO [utils.py:561] [test-other-lm_scale_1.7] %WER 7.13% [3731 / 52343, 204 ins, 1139 del, 2388 sub ]
218
+ 2023-06-13 17:35:57,712 INFO [ctc_decode.py:573] Wrote detailed error stats to zipformer/exp-ctc-rnnt/whole-lattice-rescoring/errs-test-other-epoch-40-avg-16-use-averaged-model.txt
219
+ 2023-06-13 17:35:57,734 INFO [ctc_decode.py:564] The transcripts are stored in zipformer/exp-ctc-rnnt/whole-lattice-rescoring/recogs-test-other-epoch-40-avg-16-use-averaged-model.txt
220
+ 2023-06-13 17:35:57,805 INFO [utils.py:561] [test-other-lm_scale_1.8] %WER 7.42% [3886 / 52343, 198 ins, 1248 del, 2440 sub ]
221
+ 2023-06-13 17:35:57,980 INFO [ctc_decode.py:573] Wrote detailed error stats to zipformer/exp-ctc-rnnt/whole-lattice-rescoring/errs-test-other-epoch-40-avg-16-use-averaged-model.txt
222
+ 2023-06-13 17:35:58,003 INFO [ctc_decode.py:564] The transcripts are stored in zipformer/exp-ctc-rnnt/whole-lattice-rescoring/recogs-test-other-epoch-40-avg-16-use-averaged-model.txt
223
+ 2023-06-13 17:35:58,072 INFO [utils.py:561] [test-other-lm_scale_1.9] %WER 7.75% [4059 / 52343, 194 ins, 1359 del, 2506 sub ]
224
+ 2023-06-13 17:35:58,245 INFO [ctc_decode.py:573] Wrote detailed error stats to zipformer/exp-ctc-rnnt/whole-lattice-rescoring/errs-test-other-epoch-40-avg-16-use-averaged-model.txt
225
+ 2023-06-13 17:35:58,268 INFO [ctc_decode.py:564] The transcripts are stored in zipformer/exp-ctc-rnnt/whole-lattice-rescoring/recogs-test-other-epoch-40-avg-16-use-averaged-model.txt
226
+ 2023-06-13 17:35:58,338 INFO [utils.py:561] [test-other-lm_scale_2.0] %WER 8.04% [4206 / 52343, 190 ins, 1459 del, 2557 sub ]
227
+ 2023-06-13 17:35:58,511 INFO [ctc_decode.py:573] Wrote detailed error stats to zipformer/exp-ctc-rnnt/whole-lattice-rescoring/errs-test-other-epoch-40-avg-16-use-averaged-model.txt
228
+ 2023-06-13 17:35:58,512 INFO [ctc_decode.py:587]
229
+ For test-other, WER of different settings are:
230
+ lm_scale_0.7 4.88 best for test-other
231
+ lm_scale_0.6 4.91
232
+ lm_scale_0.5 4.93
233
+ lm_scale_0.8 4.97
234
+ lm_scale_0.4 4.98
235
+ lm_scale_0.3 5.04
236
+ lm_scale_0.9 5.07
237
+ lm_scale_0.2 5.13
238
+ lm_scale_1.0 5.16
239
+ lm_scale_0.1 5.29
240
+ lm_scale_1.1 5.36
241
+ lm_scale_1.2 5.6
242
+ lm_scale_1.3 5.88
243
+ lm_scale_1.4 6.16
244
+ lm_scale_1.5 6.52
245
+ lm_scale_1.6 6.8
246
+ lm_scale_1.7 7.13
247
+ lm_scale_1.8 7.42
248
+ lm_scale_1.9 7.75
249
+ lm_scale_2.0 8.04
250
+
251
+ 2023-06-13 17:35:58,512 INFO [ctc_decode.py:835] Done!
decoding_results/whole-lattice-rescoring/recogs-test-clean-epoch-40-avg-16-use-averaged-model.txt ADDED
The diff for this file is too large to render. See raw diff
 
decoding_results/whole-lattice-rescoring/recogs-test-other-epoch-40-avg-16-use-averaged-model.txt ADDED
The diff for this file is too large to render. See raw diff
 
decoding_results/whole-lattice-rescoring/wer-summary-test-clean-epoch-40-avg-16-use-averaged-model.txt ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ settings WER
2
+ lm_scale_0.5 2.37
3
+ lm_scale_0.6 2.38
4
+ lm_scale_0.4 2.39
5
+ lm_scale_0.7 2.39
6
+ lm_scale_0.3 2.42
7
+ lm_scale_0.8 2.43
8
+ lm_scale_0.2 2.47
9
+ lm_scale_0.9 2.49
10
+ lm_scale_0.1 2.51
11
+ lm_scale_1.0 2.57
12
+ lm_scale_1.1 2.72
13
+ lm_scale_1.2 2.84
14
+ lm_scale_1.3 3.03
15
+ lm_scale_1.4 3.22
16
+ lm_scale_1.5 3.44
17
+ lm_scale_1.6 3.69
18
+ lm_scale_1.7 3.93
19
+ lm_scale_1.8 4.11
20
+ lm_scale_1.9 4.35
21
+ lm_scale_2.0 4.58
decoding_results/whole-lattice-rescoring/wer-summary-test-other-epoch-40-avg-16-use-averaged-model.txt ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ settings WER
2
+ lm_scale_0.7 4.88
3
+ lm_scale_0.6 4.91
4
+ lm_scale_0.5 4.93
5
+ lm_scale_0.8 4.97
6
+ lm_scale_0.4 4.98
7
+ lm_scale_0.3 5.04
8
+ lm_scale_0.9 5.07
9
+ lm_scale_0.2 5.13
10
+ lm_scale_1.0 5.16
11
+ lm_scale_0.1 5.29
12
+ lm_scale_1.1 5.36
13
+ lm_scale_1.2 5.6
14
+ lm_scale_1.3 5.88
15
+ lm_scale_1.4 6.16
16
+ lm_scale_1.5 6.52
17
+ lm_scale_1.6 6.8
18
+ lm_scale_1.7 7.13
19
+ lm_scale_1.8 7.42
20
+ lm_scale_1.9 7.75
21
+ lm_scale_2.0 8.04
exp/decode.sh ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ export CUDA_VISIBLE_DEVICES="0"
2
+ for m in ctc-decoding 1best nbest nbest-rescoring whole-lattice-rescoring; do
3
+ ./zipformer/ctc_decode.py \
4
+ --epoch 40 \
5
+ --avg 16 \
6
+ --exp-dir zipformer/exp-ctc-rnnt \
7
+ --use-transducer 1 \
8
+ --use-ctc 1 \
9
+ --max-duration 300 \
10
+ --causal 0 \
11
+ --num-paths 100 \
12
+ --nbest-scale 1.0 \
13
+ --hlg-scale 0.6 \
14
+ --decoding-method $m
15
+ done
16
+
exp/epoch-40.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fee348148cdb5910ad116ad0fcfa6469f5976e0b1552d9b1899a3b3093c18d39
3
+ size 1053871436
exp/export.sh ADDED
@@ -0,0 +1,10 @@
 
 
 
 
 
 
 
 
 
 
 
1
+ ./zipformer/export.py \
2
+ --exp-dir ./zipformer/exp-ctc-rnnt \
3
+ --use-transducer 1 \
4
+ --use-ctc 1 \
5
+ --bpe-model data/lang_bpe_500/bpe.model \
6
+ --epoch 40 \
7
+ --avg 16 \
8
+ --jit 1
9
+
10
+
exp/jit_script.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a877a5d18386311fe069dec68bb7e0ca956129b90f8a4ce4e956dad1ac98e154
3
+ size 265955146
exp/log/log-train-2023-06-01-20-30-01-0 ADDED
The diff for this file is too large to render. See raw diff