wgb14 commited on
Commit
fe00c53
1 Parent(s): 280882c

upload model, lexicon and log

Browse files
.gitattributes CHANGED
@@ -26,3 +26,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
26
  *.zip filter=lfs diff=lfs merge=lfs -text
27
  *.zstandard filter=lfs diff=lfs merge=lfs -text
28
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
26
  *.zip filter=lfs diff=lfs merge=lfs -text
27
  *.zstandard filter=lfs diff=lfs merge=lfs -text
28
  *tfevents* filter=lfs diff=lfs merge=lfs -text
29
+ *.txt filter=lfs diff=lfs merge=lfs -text
data/lang_bpe_500/HLG.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:456cf93a8085c04b5562eaa0b7ea93677e261afa3fe67f39f39e3b150d73402b
3
+ size 504576927
data/lang_bpe_500/L.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:be1fe4e2d70b3bec970a7bc737895e2d86f9a847fbf1ab9b3c013c5d9d5fe54f
3
+ size 45300967
data/lang_bpe_500/L_disambig.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6b5b54665ac47e1b999e2ecd43ac0d56c662bb65fc7c0926d8c602c15ff19bac
3
+ size 46950503
data/lang_bpe_500/Linv.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f474dfccce72cd2b0e11c98f808212ccac396025d9ea9fc396ebd607b66dae53
3
+ size 45300967
data/lang_bpe_500/P.arpa ADDED
The diff for this file is too large to render. See raw diff
 
data/lang_bpe_500/P.fst.txt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e29392f10149001b141ed7eb71f6ef2d4415cee80b9de508f5d57faf7f1a0d76
3
+ size 3857427
data/lang_bpe_500/bpe.model ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:bcae393dbc5611be5ffa4c7ae0841558978a5a4f484008cb9dff3a2cc97ebe01
3
+ size 244836
data/lang_bpe_500/lexicon.txt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d938da3733725009af4758207ebec5f12335d4f84bf42fc7757140cd6edc87bd
3
+ size 11120754
data/lang_bpe_500/lexicon_disambig.txt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7d4dfa97c8e771cb39ba9d775559854238a858e25ede5ca5031f6d20c111a6b7
3
+ size 11368176
data/lang_bpe_500/tokens.txt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1ff15a58ee60af64de84c98e3d6a1cc2318f384bc7c84ddb8dc277ea4f3ac4f5
3
+ size 5019
data/lang_bpe_500/transcript_tokens.txt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6e1ab14de896b234d6fbc317da7760214907bd92656c605bd8b121c14e8a09d2
3
+ size 990859978
data/lang_bpe_500/transcript_words.txt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0146610da4fcf115995dac855f9826f1c918993e8dc022b0b33dd210fd2ac682
3
+ size 577903494
data/lang_bpe_500/unigram_500.model ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:bcae393dbc5611be5ffa4c7ae0841558978a5a4f484008cb9dff3a2cc97ebe01
3
+ size 244836
data/lang_bpe_500/unigram_500.vocab ADDED
@@ -0,0 +1,500 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ <blk> 0
2
+ <sos/eos> 0
3
+ <unk> 0
4
+ S -2.91936
5
+ T -3.62226
6
+ ▁THE -3.63907
7
+ ▁A -3.91998
8
+ E -3.9774
9
+ ▁AND -4.03888
10
+ ▁TO -4.11528
11
+ N -4.13926
12
+ D -4.23828
13
+ ▁OF -4.26126
14
+ ' -4.32705
15
+ ING -4.36266
16
+ ▁I -4.43026
17
+ Y -4.47664
18
+ ▁IN -4.5039
19
+ ED -4.56388
20
+ ▁THAT -4.56493
21
+ ▁ -4.61161
22
+ P -4.68867
23
+ R -4.69194
24
+ ▁YOU -4.70013
25
+ M -4.73468
26
+ RE -4.77896
27
+ ER -4.79222
28
+ C -4.80441
29
+ O -4.84437
30
+ ▁IT -4.84599
31
+ L -4.92876
32
+ A -5.01304
33
+ U -5.04339
34
+ G -5.07981
35
+ ▁WE -5.15716
36
+ ▁IS -5.16592
37
+ ▁SO -5.20473
38
+ AL -5.20761
39
+ I -5.20892
40
+ ▁S -5.21941
41
+ ▁RE -5.25961
42
+ AR -5.26464
43
+ B -5.27126
44
+ ▁FOR -5.28788
45
+ ▁C -5.28918
46
+ ▁BE -5.30209
47
+ LE -5.3038
48
+ F -5.31727
49
+ W -5.3626
50
+ ▁E -5.39058
51
+ ▁HE -5.3984
52
+ LL -5.39936
53
+ ▁WAS -5.41947
54
+ LY -5.41982
55
+ OR -5.451
56
+ IN -5.51245
57
+ ▁F -5.51686
58
+ VE -5.54551
59
+ ▁THIS -5.54604
60
+ TH -5.55438
61
+ K -5.59782
62
+ ▁ON -5.60492
63
+ IT -5.63051
64
+ ▁B -5.64315
65
+ ▁WITH -5.6692
66
+ ▁BUT -5.69372
67
+ EN -5.69464
68
+ CE -5.70105
69
+ RI -5.74079
70
+ ▁DO -5.75023
71
+ UR -5.75089
72
+ ▁HAVE -5.75784
73
+ ▁DE -5.78198
74
+ ▁ME -5.78744
75
+ ▁T -5.79218
76
+ ENT -5.84937
77
+ CH -5.86931
78
+ ▁THEY -5.8741
79
+ ▁NOT -5.88022
80
+ ES -5.90245
81
+ V -5.90528
82
+ ▁AS -5.90745
83
+ RA -5.91436
84
+ ▁P -5.91784
85
+ ON -5.91788
86
+ TER -5.92587
87
+ ▁ARE -5.94781
88
+ ▁WHAT -5.95664
89
+ IC -5.96785
90
+ ▁ST -5.97911
91
+ ▁LIKE -6.02232
92
+ ATION -6.04962
93
+ ▁OR -6.05223
94
+ ▁CA -6.05876
95
+ ▁AT -6.0815
96
+ H -6.08965
97
+ ▁KNOW -6.09389
98
+ ▁G -6.09914
99
+ AN -6.10082
100
+ ▁CON -6.1086
101
+ IL -6.12259
102
+ ND -6.12748
103
+ RO -6.13926
104
+ ▁HIS -6.1401
105
+ ▁CAN -6.14445
106
+ ▁ALL -6.14626
107
+ TE -6.15654
108
+ ▁THERE -6.16263
109
+ ▁SU -6.18963
110
+ ▁MO -6.19822
111
+ ▁MA -6.21735
112
+ LI -6.24568
113
+ ▁ONE -6.25177
114
+ ▁ABOUT -6.253
115
+ LA -6.25309
116
+ ▁CO -6.25588
117
+ - -6.26085
118
+ ▁MY -6.27134
119
+ ▁HAD -6.2739
120
+ CK -6.27737
121
+ NG -6.2891
122
+ ▁NO -6.29472
123
+ MENT -6.30738
124
+ AD -6.31071
125
+ LO -6.31339
126
+ ME -6.3139
127
+ ▁AN -6.31782
128
+ ▁FROM -6.3267
129
+ NE -6.32822
130
+ ▁IF -6.3309
131
+ VER -6.33734
132
+ ▁JUST -6.34705
133
+ ▁PRO -6.38157
134
+ ION -6.38203
135
+ ▁PA -6.38311
136
+ ▁WHO -6.389
137
+ ▁SE -6.39145
138
+ EL -6.39577
139
+ IR -6.39902
140
+ ▁US -6.40366
141
+ ▁UP -6.40929
142
+ ▁YOUR -6.41111
143
+ CI -6.41571
144
+ RY -6.42878
145
+ ▁GO -6.43225
146
+ ▁SHE -6.44576
147
+ ▁LE -6.45467
148
+ ▁OUT -6.4618
149
+ ▁PO -6.46959
150
+ ▁HO -6.47283
151
+ ATE -6.48248
152
+ ▁BO -6.48598
153
+ ▁BY -6.48743
154
+ ▁FA -6.48779
155
+ ▁MI -6.48972
156
+ AS -6.49552
157
+ MP -6.50825
158
+ ▁HER -6.51444
159
+ VI -6.51584
160
+ ▁THINK -6.51821
161
+ ▁SOME -6.51966
162
+ ▁WHEN -6.53955
163
+ ▁AH -6.54789
164
+ ▁PEOPLE -6.56579
165
+ IG -6.56619
166
+ ▁WA -6.58446
167
+ ▁TE -6.58708
168
+ ▁LA -6.5924
169
+ ▁WERE -6.59558
170
+ ▁LI -6.59617
171
+ ▁WOULD -6.60333
172
+ ▁SEE -6.60421
173
+ ▁WHICH -6.60769
174
+ DE -6.61146
175
+ GE -6.61214
176
+ ▁K -6.61725
177
+ IGHT -6.63987
178
+ ▁HA -6.64097
179
+ ▁OUR -6.67441
180
+ UN -6.67953
181
+ ▁HOW -6.68709
182
+ ▁GET -6.6874
183
+ IS -6.69453
184
+ UT -6.70334
185
+ Z -6.70922
186
+ CO -6.71316
187
+ ET -6.71592
188
+ UL -6.71664
189
+ IES -6.72788
190
+ IVE -6.73038
191
+ AT -6.73107
192
+ ▁O -6.73302
193
+ ▁DON -6.73526
194
+ LU -6.73936
195
+ ▁TIME -6.74787
196
+ ▁WILL -6.75714
197
+ ▁MORE -6.75772
198
+ ▁SP -6.75795
199
+ ▁NOW -6.76281
200
+ RU -6.76441
201
+ ▁THEIR -6.76701
202
+ ▁UN -6.77337
203
+ ITY -6.78846
204
+ OL -6.78991
205
+ X -6.79216
206
+ TI -6.79565
207
+ US -6.79815
208
+ ▁VERY -6.81318
209
+ TION -6.81516
210
+ ▁FI -6.81739
211
+ ▁SAY -6.81764
212
+ ▁BECAUSE -6.83879
213
+ ▁EX -6.83974
214
+ ▁RO -6.84322
215
+ ERS -6.8685
216
+ IST -6.87091
217
+ ▁DA -6.87303
218
+ TING -6.87408
219
+ ▁EN -6.88514
220
+ OM -6.88529
221
+ ▁BA -6.8895
222
+ ▁BEEN -6.89199
223
+ ▁LO -6.89575
224
+ ▁UM -6.89988
225
+ AGE -6.9046
226
+ ABLE -6.90556
227
+ ▁WO -6.90762
228
+ ▁RA -6.90807
229
+ ▁OTHER -6.92762
230
+ ▁REALLY -6.92772
231
+ ENCE -6.93464
232
+ ▁GOING -6.93644
233
+ ▁HIM -6.93782
234
+ ▁HAS -6.9382
235
+ ▁THEM -6.93852
236
+ ▁DIS -6.94051
237
+ ▁WANT -6.94824
238
+ ID -6.95197
239
+ TA -6.95466
240
+ ▁LOOK -6.95859
241
+ KE -6.96683
242
+ ▁DID -6.96973
243
+ ▁SA -6.97504
244
+ ▁VI -6.97815
245
+ ▁SAID -6.98116
246
+ ▁RIGHT -6.98233
247
+ ▁THESE -6.98239
248
+ ▁WORK -6.98267
249
+ ▁COM -7.00591
250
+ ALLY -7.02101
251
+ FF -7.02191
252
+ QU -7.02982
253
+ AC -7.032
254
+ ▁DR -7.03706
255
+ ▁WAY -7.03766
256
+ ▁INTO -7.04973
257
+ MO -7.05136
258
+ TED -7.05518
259
+ EST -7.06036
260
+ ▁HERE -7.06279
261
+ OK -7.06659
262
+ ▁COULD -7.07484
263
+ ▁WELL -7.07681
264
+ MA -7.0798
265
+ ▁PRE -7.08375
266
+ ▁DI -7.08411
267
+ MAN -7.09183
268
+ ▁COMP -7.09251
269
+ ▁THEN -7.09311
270
+ IM -7.09454
271
+ ▁PER -7.09471
272
+ ▁NA -7.09814
273
+ ▁WHERE -7.0995
274
+ ▁TWO -7.10374
275
+ ▁WI -7.10433
276
+ ▁FE -7.12258
277
+ INE -7.13542
278
+ ▁ANY -7.13685
279
+ TURE -7.13787
280
+ ▁OVER -7.13806
281
+ BO -7.1436
282
+ ACH -7.15075
283
+ OW -7.16294
284
+ ▁MAKE -7.16967
285
+ ▁TRA -7.17924
286
+ HE -7.19414
287
+ UND -7.19438
288
+ ▁EVEN -7.1973
289
+ ANCE -7.19936
290
+ ▁YEAR -7.20074
291
+ HO -7.20126
292
+ AM -7.20803
293
+ ▁CHA -7.20944
294
+ ▁BACK -7.21247
295
+ VO -7.22034
296
+ ANT -7.24148
297
+ DI -7.24414
298
+ ▁ALSO -7.24441
299
+ ▁THOSE -7.2492
300
+ ▁MAN -7.25828
301
+ CTION -7.25994
302
+ ICAL -7.27095
303
+ ▁JO -7.27522
304
+ ▁OP -7.27657
305
+ ▁NEW -7.28026
306
+ ▁MU -7.28457
307
+ ▁HU -7.28719
308
+ ▁KIND -7.29056
309
+ ▁NE -7.29057
310
+ CA -7.29455
311
+ END -7.29991
312
+ TIC -7.30338
313
+ FUL -7.308
314
+ ▁YEAH -7.30971
315
+ SH -7.31052
316
+ ▁APP -7.31863
317
+ ▁THINGS -7.31873
318
+ SIDE -7.31876
319
+ ▁GOOD -7.32149
320
+ ONE -7.32385
321
+ ▁TAKE -7.33148
322
+ CU -7.33525
323
+ ▁EVERY -7.33606
324
+ ▁MEAN -7.33916
325
+ ▁FIRST -7.35633
326
+ OP -7.35807
327
+ ▁TH -7.35816
328
+ ▁MUCH -7.36035
329
+ ▁PART -7.36303
330
+ UGH -7.38075
331
+ ▁COME -7.39025
332
+ J -7.40206
333
+ ▁THAN -7.4027
334
+ ▁EXP -7.40635
335
+ ▁AGAIN -7.40638
336
+ ▁LITTLE -7.4123
337
+ MB -7.42061
338
+ ▁NEED -7.42982
339
+ ▁TALK -7.42993
340
+ IF -7.43499
341
+ FOR -7.43685
342
+ ▁SH -7.43707
343
+ ISH -7.43834
344
+ ▁STA -7.44406
345
+ ATED -7.44429
346
+ ▁GU -7.4457
347
+ ▁LET -7.44843
348
+ IA -7.45208
349
+ ▁MAR -7.46218
350
+ ▁DOWN -7.46252
351
+ ▁DAY -7.46636
352
+ ▁GA -7.46831
353
+ ▁SOMETHING -7.47373
354
+ ▁BU -7.47952
355
+ DUC -7.47968
356
+ HA -7.48577
357
+ ▁LOT -7.48964
358
+ ▁RU -7.49641
359
+ ▁THOUGH -7.50103
360
+ ▁GREAT -7.51571
361
+ AIN -7.53755
362
+ ▁THROUGH -7.53814
363
+ ▁THING -7.5397
364
+ OUS -7.54298
365
+ ▁PRI -7.54663
366
+ ▁GOT -7.54668
367
+ ▁SHOULD -7.54854
368
+ ▁AFTER -7.54887
369
+ ▁HEAR -7.54921
370
+ ▁TA -7.5583
371
+ ▁ONLY -7.56318
372
+ ▁CHI -7.56667
373
+ IOUS -7.58401
374
+ ▁SHA -7.58711
375
+ ▁MOST -7.5956
376
+ ▁ACTUALLY -7.59878
377
+ ▁START -7.60046
378
+ LIC -7.60951
379
+ ▁VA -7.61021
380
+ ▁RI -7.6118
381
+ DAY -7.61429
382
+ IAN -7.6194
383
+ ▁DOES -7.62247
384
+ ROW -7.62941
385
+ ▁GRA -7.63287
386
+ ITION -7.64608
387
+ ▁MANY -7.6498
388
+ ▁BEFORE -7.67839
389
+ ▁GIVE -7.67892
390
+ PORT -7.68357
391
+ QUI -7.70117
392
+ ▁LIFE -7.70155
393
+ ▁WORLD -7.70833
394
+ ▁PI -7.71249
395
+ ▁LONG -7.71586
396
+ ▁THREE -7.72143
397
+ IZE -7.72871
398
+ NESS -7.73046
399
+ ▁SHOW -7.73117
400
+ PH -7.74408
401
+ ▁WHY -7.75452
402
+ ▁QUESTION -7.7659
403
+ WARD -7.76633
404
+ ▁THANK -7.77763
405
+ ▁PH -7.78092
406
+ ▁DIFFERENT -7.79798
407
+ ▁OWN -7.8008
408
+ ▁FEEL -7.80762
409
+ ▁MIGHT -7.81212
410
+ ▁HAPPEN -7.82257
411
+ ▁MADE -7.82361
412
+ ▁BRO -7.83505
413
+ IBLE -7.83692
414
+ ▁HI -7.83977
415
+ ▁STATE -7.8432
416
+ ▁HAND -7.85373
417
+ ▁NEVER -7.8582
418
+ ▁PLACE -7.8697
419
+ ▁LOVE -7.87056
420
+ ▁DU -7.87177
421
+ ▁POINT -7.87926
422
+ ▁HELP -7.88273
423
+ ▁COUNT -7.88305
424
+ ▁STILL -7.88446
425
+ ▁MR -7.90123
426
+ ▁FIND -7.90288
427
+ ▁PERSON -7.92879
428
+ ▁CAME -7.93
429
+ ▁SAME -7.95337
430
+ ▁LAST -7.97701
431
+ ▁HIGH -7.97875
432
+ ▁OLD -8.00689
433
+ ▁UNDER -8.01208
434
+ ▁FOUR -8.01237
435
+ ▁AROUND -8.0163
436
+ ▁SORT -8.0255
437
+ ▁CHANGE -8.0293
438
+ ▁YES -8.03364
439
+ SHIP -8.04202
440
+ ▁ANOTHER -8.04909
441
+ ATIVE -8.07714
442
+ ▁FOUND -8.09227
443
+ ▁JA -8.09351
444
+ ▁ALWAYS -8.10023
445
+ ▁NEXT -8.10913
446
+ ▁TURN -8.11079
447
+ ▁JU -8.12961
448
+ ▁SIX -8.13018
449
+ ▁FACT -8.13809
450
+ ▁INTEREST -8.14199
451
+ ▁WORD -8.15103
452
+ ▁THOUSAND -8.16173
453
+ ▁HUNDRED -8.19128
454
+ ▁NUMBER -8.19271
455
+ ▁IDEA -8.2011
456
+ ▁PLAN -8.21386
457
+ ▁COURSE -8.21699
458
+ ▁SCHOOL -8.22226
459
+ ▁HOUSE -8.22885
460
+ ▁TWENTY -8.23354
461
+ ▁JE -8.23872
462
+ ▁PLAY -8.2479
463
+ ▁AWAY -8.25259
464
+ ▁LEARN -8.25403
465
+ ▁HARD -8.25523
466
+ ▁WEEK -8.25715
467
+ ▁BETTER -8.26711
468
+ ▁WHILE -8.27626
469
+ ▁FRIEND -8.28156
470
+ ▁OKAY -8.28222
471
+ ▁NINE -8.28927
472
+ ▁UNDERSTAND -8.30685
473
+ ▁KEEP -8.30703
474
+ ▁GONNA -8.30908
475
+ ▁SYSTEM -8.33115
476
+ ▁AMERICA -8.33344
477
+ ▁POWER -8.34085
478
+ ▁IMPORTANT -8.34508
479
+ ▁WITHOUT -8.35783
480
+ ▁MAYBE -8.36369
481
+ ▁SEVEN -8.36468
482
+ ▁BETWEEN -8.36808
483
+ ▁BUILD -8.38309
484
+ ▁CERTAIN -8.38571
485
+ ▁PROBLEM -8.40003
486
+ ▁MONEY -8.40454
487
+ ▁BELIEVE -8.42698
488
+ ▁SECOND -8.43244
489
+ ▁REASON -8.45837
490
+ ▁TOGETHER -8.49857
491
+ ▁PUBLIC -8.50598
492
+ ▁ANYTHING -8.50789
493
+ ▁SPEAK -8.51154
494
+ ▁BUSINESS -8.5218
495
+ ▁EVERYTHING -8.52868
496
+ ▁CLOSE -8.52921
497
+ ▁QUITE -8.5317
498
+ ▁ANSWER -8.5436
499
+ ▁ENOUGH -8.54754
500
+ Q -9.75572
data/lang_bpe_500/words.txt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f51338a57551ee4d7bd41d679a4890087fc13c41af8c266742cb1b94095a03c5
3
+ size 6749578
data/lm/G_4_gram.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:dbd55286dcd73575b22bba203cc0c53fcad9891196760348bd8438dfbc3083ab
3
+ size 668693803
exp/cpu_jit.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fdcb94b47038797a1f18e76c312d2755706277bcd8dceb8a36d887589a3a3965
3
+ size 457683746
exp/pretrained.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a2e1e15835f1f5f3f73f0bce3fda061157db0ddd1546ebafd5d332b6b19bb62f
3
+ size 437153185
log/log-decode-2022-04-08-22-02-12 ADDED
@@ -0,0 +1,778 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ 2022-04-08 22:02:12,850 INFO [decode.py:583] Decoding started
2
+ 2022-04-08 22:02:12,851 INFO [decode.py:584] {'subsampling_factor': 4, 'vgg_frontend': False, 'use_feat_batchnorm': True, 'feature_dim': 80, 'nhead': 8, 'attention_dim': 512, 'num_decoder_layers': 6, 'search_beam': 20, 'output_beam': 8, 'min_active_states': 30, 'max_active_states': 10000, 'use_double_scores': True, 'env_info': {'k2-version': '1.14', 'k2-build-type': 'Release', 'k2-with-cuda': True, 'k2-git-sha1': '6833270cb228aba7bf9681fccd41e2b52f7d984c', 'k2-git-date': 'Wed Mar 16 11:16:05 2022', 'lhotse-version': '1.0.0.dev+git.d917411.clean', 'torch-cuda-available': True, 'torch-cuda-version': '11.1', 'python-version': '3.7', 'icefall-git-branch': 'gigaspeech_recipe', 'icefall-git-sha1': 'c3993a5-dirty', 'icefall-git-date': 'Mon Mar 21 13:49:39 2022', 'icefall-path': '/userhome/user/guanbo/icefall_decode', 'k2-path': '/opt/conda/lib/python3.7/site-packages/k2-1.14.dev20220408+cuda11.1.torch1.10.0-py3.7-linux-x86_64.egg/k2/__init__.py', 'lhotse-path': '/userhome/user/guanbo/lhotse/lhotse/__init__.py', 'hostname': 'd7b02ab00b70c011ec0a3ee069db84328338-chenx8564-0', 'IP address': '10.9.150.18'}, 'epoch': 18, 'avg': 6, 'method': 'attention-decoder', 'num_paths': 1000, 'nbest_scale': 0.5, 'exp_dir': PosixPath('conformer_ctc/exp_500_8_2'), 'lang_dir': PosixPath('data/lang_bpe_500'), 'lm_dir': PosixPath('data/lm'), 'manifest_dir': PosixPath('data/fbank'), 'max_duration': 20, 'bucketing_sampler': True, 'num_buckets': 30, 'concatenate_cuts': False, 'duration_factor': 1.0, 'gap': 1.0, 'on_the_fly_feats': False, 'shuffle': True, 'return_cuts': True, 'num_workers': 1, 'enable_spec_aug': True, 'spec_aug_time_warp_factor': 80, 'enable_musan': True, 'subset': 'XL', 'lazy_load': True, 'small_dev': False}
3
+ 2022-04-08 22:02:13,611 INFO [lexicon.py:176] Loading pre-compiled data/lang_bpe_500/Linv.pt
4
+ 2022-04-08 22:02:13,897 INFO [decode.py:594] device: cuda:0
5
+ 2022-04-08 22:02:19,463 INFO [decode.py:656] Loading pre-compiled G_4_gram.pt
6
+ 2022-04-08 22:02:23,064 INFO [decode.py:692] averaging ['conformer_ctc/exp_500_8_2/epoch-13.pt', 'conformer_ctc/exp_500_8_2/epoch-14.pt', 'conformer_ctc/exp_500_8_2/epoch-15.pt', 'conformer_ctc/exp_500_8_2/epoch-16.pt', 'conformer_ctc/exp_500_8_2/epoch-17.pt', 'conformer_ctc/exp_500_8_2/epoch-18.pt']
7
+ 2022-04-08 22:04:17,302 INFO [decode.py:699] Number of model parameters: 109226120
8
+ 2022-04-08 22:04:17,303 INFO [asr_datamodule.py:372] About to get dev cuts
9
+ 2022-04-08 22:04:21,114 INFO [decode.py:497] batch 0/?, cuts processed until now is 3
10
+ 2022-04-08 22:06:56,367 INFO [decode.py:497] batch 100/?, cuts processed until now is 243
11
+ 2022-04-08 22:09:33,967 INFO [decode.py:497] batch 200/?, cuts processed until now is 464
12
+ 2022-04-08 22:12:05,730 INFO [decode.py:497] batch 300/?, cuts processed until now is 665
13
+ 2022-04-08 22:13:23,989 INFO [decode.py:736] Caught exception:
14
+ CUDA out of memory. Tried to allocate 4.93 GiB (GPU 0; 31.75 GiB total capacity; 24.54 GiB already allocated; 3.87 GiB free; 26.53 GiB reserved in total by PyTorch) If reserved memory is >> allocated memory try setting max_split_size_mb to avoid fragmentation. See documentation for Memory Management and PYTORCH_CUDA_ALLOC_CONF
15
+
16
+ 2022-04-08 22:13:23,989 INFO [decode.py:743] num_arcs before pruning: 333034
17
+ 2022-04-08 22:13:23,989 INFO [decode.py:746] This OOM is not an error. You can ignore it. If your model does not converge well, or --max-duration is too large, or the input sound file is difficult to decode, you will meet this exception.
18
+ 2022-04-08 22:13:24,010 INFO [decode.py:757] num_arcs after pruning: 7258
19
+ 2022-04-08 22:14:38,171 INFO [decode.py:497] batch 400/?, cuts processed until now is 891
20
+ 2022-04-08 22:17:05,640 INFO [decode.py:497] batch 500/?, cuts processed until now is 1098
21
+ 2022-04-08 22:19:29,901 INFO [decode.py:497] batch 600/?, cuts processed until now is 1363
22
+ 2022-04-08 22:20:05,953 INFO [decode.py:736] Caught exception:
23
+ CUDA out of memory. Tried to allocate 8.00 GiB (GPU 0; 31.75 GiB total capacity; 19.51 GiB already allocated; 7.07 GiB free; 23.32 GiB reserved in total by PyTorch) If reserved memory is >> allocated memory try setting max_split_size_mb to avoid fragmentation. See documentation for Memory Management and PYTORCH_CUDA_ALLOC_CONF
24
+
25
+ 2022-04-08 22:20:05,954 INFO [decode.py:743] num_arcs before pruning: 514392
26
+ 2022-04-08 22:20:05,954 INFO [decode.py:746] This OOM is not an error. You can ignore it. If your model does not converge well, or --max-duration is too large, or the input sound file is difficult to decode, you will meet this exception.
27
+ 2022-04-08 22:20:05,966 INFO [decode.py:757] num_arcs after pruning: 13888
28
+ 2022-04-08 22:22:02,765 INFO [decode.py:497] batch 700/?, cuts processed until now is 1626
29
+ 2022-04-08 22:24:05,393 INFO [decode.py:736] Caught exception:
30
+ CUDA out of memory. Tried to allocate 8.00 GiB (GPU 0; 31.75 GiB total capacity; 14.24 GiB already allocated; 7.07 GiB free; 23.33 GiB reserved in total by PyTorch) If reserved memory is >> allocated memory try setting max_split_size_mb to avoid fragmentation. See documentation for Memory Management and PYTORCH_CUDA_ALLOC_CONF
31
+
32
+ 2022-04-08 22:24:05,393 INFO [decode.py:743] num_arcs before pruning: 164808
33
+ 2022-04-08 22:24:05,393 INFO [decode.py:746] This OOM is not an error. You can ignore it. If your model does not converge well, or --max-duration is too large, or the input sound file is difficult to decode, you will meet this exception.
34
+ 2022-04-08 22:24:05,404 INFO [decode.py:757] num_arcs after pruning: 8771
35
+ 2022-04-08 22:24:40,652 INFO [decode.py:497] batch 800/?, cuts processed until now is 1870
36
+ 2022-04-08 22:25:03,574 INFO [decode.py:736] Caught exception:
37
+ CUDA out of memory. Tried to allocate 8.00 GiB (GPU 0; 31.75 GiB total capacity; 14.28 GiB already allocated; 7.07 GiB free; 23.32 GiB reserved in total by PyTorch) If reserved memory is >> allocated memory try setting max_split_size_mb to avoid fragmentation. See documentation for Memory Management and PYTORCH_CUDA_ALLOC_CONF
38
+
39
+ 2022-04-08 22:25:03,575 INFO [decode.py:743] num_arcs before pruning: 267824
40
+ 2022-04-08 22:25:03,575 INFO [decode.py:746] This OOM is not an error. You can ignore it. If your model does not converge well, or --max-duration is too large, or the input sound file is difficult to decode, you will meet this exception.
41
+ 2022-04-08 22:25:03,582 INFO [decode.py:757] num_arcs after pruning: 9250
42
+ 2022-04-08 22:27:25,872 INFO [decode.py:497] batch 900/?, cuts processed until now is 2134
43
+ 2022-04-08 22:29:45,824 INFO [decode.py:736] Caught exception:
44
+ CUDA out of memory. Tried to allocate 8.00 GiB (GPU 0; 31.75 GiB total capacity; 14.45 GiB already allocated; 7.06 GiB free; 23.33 GiB reserved in total by PyTorch) If reserved memory is >> allocated memory try setting max_split_size_mb to avoid fragmentation. See documentation for Memory Management and PYTORCH_CUDA_ALLOC_CONF
45
+
46
+ 2022-04-08 22:29:45,825 INFO [decode.py:743] num_arcs before pruning: 236799
47
+ 2022-04-08 22:29:45,825 INFO [decode.py:746] This OOM is not an error. You can ignore it. If your model does not converge well, or --max-duration is too large, or the input sound file is difficult to decode, you will meet this exception.
48
+ 2022-04-08 22:29:45,837 INFO [decode.py:757] num_arcs after pruning: 7885
49
+ 2022-04-08 22:30:03,747 INFO [decode.py:497] batch 1000/?, cuts processed until now is 2380
50
+ 2022-04-08 22:30:44,532 INFO [decode.py:736] Caught exception:
51
+
52
+ Some bad things happened. Please read the above error messages and stack
53
+ trace. If you are using Python, the following command may be helpful:
54
+
55
+ gdb --args python /path/to/your/code.py
56
+
57
+ (You can use `gdb` to debug the code. Please consider compiling
58
+ a debug version of k2.).
59
+
60
+ If you are unable to fix it, please open an issue at:
61
+
62
+ https://github.com/k2-fsa/k2/issues/new
63
+
64
+
65
+ 2022-04-08 22:30:44,532 INFO [decode.py:743] num_arcs before pruning: 632546
66
+ 2022-04-08 22:30:44,533 INFO [decode.py:746] This OOM is not an error. You can ignore it. If your model does not converge well, or --max-duration is too large, or the input sound file is difficult to decode, you will meet this exception.
67
+ 2022-04-08 22:30:44,585 INFO [decode.py:757] num_arcs after pruning: 10602
68
+ 2022-04-08 22:32:41,978 INFO [decode.py:497] batch 1100/?, cuts processed until now is 2624
69
+ 2022-04-08 22:34:54,199 INFO [decode.py:736] Caught exception:
70
+ CUDA out of memory. Tried to allocate 8.00 GiB (GPU 0; 31.75 GiB total capacity; 19.67 GiB already allocated; 5.68 GiB free; 24.72 GiB reserved in total by PyTorch) If reserved memory is >> allocated memory try setting max_split_size_mb to avoid fragmentation. See documentation for Memory Management and PYTORCH_CUDA_ALLOC_CONF
71
+
72
+ 2022-04-08 22:34:54,200 INFO [decode.py:743] num_arcs before pruning: 227558
73
+ 2022-04-08 22:34:54,200 INFO [decode.py:746] This OOM is not an error. You can ignore it. If your model does not converge well, or --max-duration is too large, or the input sound file is difficult to decode, you will meet this exception.
74
+ 2022-04-08 22:34:54,218 INFO [decode.py:757] num_arcs after pruning: 8505
75
+ 2022-04-08 22:35:25,806 INFO [decode.py:497] batch 1200/?, cuts processed until now is 2889
76
+ 2022-04-08 22:38:28,827 INFO [decode.py:497] batch 1300/?, cuts processed until now is 3182
77
+ 2022-04-08 22:39:35,318 INFO [decode.py:736] Caught exception:
78
+ CUDA out of memory. Tried to allocate 2.65 GiB (GPU 0; 31.75 GiB total capacity; 27.28 GiB already allocated; 1.20 GiB free; 29.19 GiB reserved in total by PyTorch) If reserved memory is >> allocated memory try setting max_split_size_mb to avoid fragmentation. See documentation for Memory Management and PYTORCH_CUDA_ALLOC_CONF
79
+
80
+ 2022-04-08 22:39:35,318 INFO [decode.py:743] num_arcs before pruning: 348294
81
+ 2022-04-08 22:39:35,318 INFO [decode.py:746] This OOM is not an error. You can ignore it. If your model does not converge well, or --max-duration is too large, or the input sound file is difficult to decode, you will meet this exception.
82
+ 2022-04-08 22:39:35,324 INFO [decode.py:757] num_arcs after pruning: 4422
83
+ 2022-04-08 22:41:48,886 INFO [decode.py:497] batch 1400/?, cuts processed until now is 3491
84
+ 2022-04-08 22:42:03,583 INFO [decode.py:736] Caught exception:
85
+ CUDA out of memory. Tried to allocate 4.53 GiB (GPU 0; 31.75 GiB total capacity; 24.43 GiB already allocated; 1.20 GiB free; 29.19 GiB reserved in total by PyTorch) If reserved memory is >> allocated memory try setting max_split_size_mb to avoid fragmentation. See documentation for Memory Management and PYTORCH_CUDA_ALLOC_CONF
86
+
87
+ 2022-04-08 22:42:03,584 INFO [decode.py:743] num_arcs before pruning: 446338
88
+ 2022-04-08 22:42:03,584 INFO [decode.py:746] This OOM is not an error. You can ignore it. If your model does not converge well, or --max-duration is too large, or the input sound file is difficult to decode, you will meet this exception.
89
+ 2022-04-08 22:42:03,592 INFO [decode.py:757] num_arcs after pruning: 13422
90
+ 2022-04-08 22:44:41,081 INFO [decode.py:497] batch 1500/?, cuts processed until now is 3738
91
+ 2022-04-08 22:44:48,819 INFO [decode.py:736] Caught exception:
92
+ CUDA out of memory. Tried to allocate 1.94 GiB (GPU 0; 31.75 GiB total capacity; 29.06 GiB already allocated; 231.75 MiB free; 30.17 GiB reserved in total by PyTorch) If reserved memory is >> allocated memory try setting max_split_size_mb to avoid fragmentation. See documentation for Memory Management and PYTORCH_CUDA_ALLOC_CONF
93
+
94
+ 2022-04-08 22:44:48,820 INFO [decode.py:743] num_arcs before pruning: 263598
95
+ 2022-04-08 22:44:48,820 INFO [decode.py:746] This OOM is not an error. You can ignore it. If your model does not converge well, or --max-duration is too large, or the input sound file is difficult to decode, you will meet this exception.
96
+ 2022-04-08 22:44:48,833 INFO [decode.py:757] num_arcs after pruning: 7847
97
+ 2022-04-08 22:47:10,728 INFO [decode.py:497] batch 1600/?, cuts processed until now is 3970
98
+ 2022-04-08 22:47:52,235 INFO [decode.py:736] Caught exception:
99
+ CUDA out of memory. Tried to allocate 5.20 GiB (GPU 0; 31.75 GiB total capacity; 24.71 GiB already allocated; 231.75 MiB free; 30.17 GiB reserved in total by PyTorch) If reserved memory is >> allocated memory try setting max_split_size_mb to avoid fragmentation. See documentation for Memory Management and PYTORCH_CUDA_ALLOC_CONF
100
+
101
+ 2022-04-08 22:47:52,236 INFO [decode.py:743] num_arcs before pruning: 317009
102
+ 2022-04-08 22:47:52,236 INFO [decode.py:746] This OOM is not an error. You can ignore it. If your model does not converge well, or --max-duration is too large, or the input sound file is difficult to decode, you will meet this exception.
103
+ 2022-04-08 22:47:52,252 INFO [decode.py:757] num_arcs after pruning: 9354
104
+ 2022-04-08 22:49:32,370 INFO [decode.py:736] Caught exception:
105
+ CUDA out of memory. Tried to allocate 4.55 GiB (GPU 0; 31.75 GiB total capacity; 24.05 GiB already allocated; 231.75 MiB free; 30.17 GiB reserved in total by PyTorch) If reserved memory is >> allocated memory try setting max_split_size_mb to avoid fragmentation. See documentation for Memory Management and PYTORCH_CUDA_ALLOC_CONF
106
+
107
+ 2022-04-08 22:49:32,371 INFO [decode.py:743] num_arcs before pruning: 136624
108
+ 2022-04-08 22:49:32,371 INFO [decode.py:746] This OOM is not an error. You can ignore it. If your model does not converge well, or --max-duration is too large, or the input sound file is difficult to decode, you will meet this exception.
109
+ 2022-04-08 22:49:32,402 INFO [decode.py:757] num_arcs after pruning: 5456
110
+ 2022-04-08 22:49:36,398 INFO [decode.py:497] batch 1700/?, cuts processed until now is 4192
111
+ 2022-04-08 22:50:50,382 INFO [decode.py:736] Caught exception:
112
+ CUDA out of memory. Tried to allocate 8.00 GiB (GPU 0; 31.75 GiB total capacity; 19.56 GiB already allocated; 2.10 GiB free; 28.29 GiB reserved in total by PyTorch) If reserved memory is >> allocated memory try setting max_split_size_mb to avoid fragmentation. See documentation for Memory Management and PYTORCH_CUDA_ALLOC_CONF
113
+
114
+ 2022-04-08 22:50:50,383 INFO [decode.py:743] num_arcs before pruning: 303893
115
+ 2022-04-08 22:50:50,383 INFO [decode.py:746] This OOM is not an error. You can ignore it. If your model does not converge well, or --max-duration is too large, or the input sound file is difficult to decode, you will meet this exception.
116
+ 2022-04-08 22:50:50,400 INFO [decode.py:757] num_arcs after pruning: 9312
117
+ 2022-04-08 22:52:09,335 INFO [decode.py:497] batch 1800/?, cuts processed until now is 4416
118
+ 2022-04-08 22:52:51,744 INFO [decode.py:736] Caught exception:
119
+ CUDA out of memory. Tried to allocate 5.02 GiB (GPU 0; 31.75 GiB total capacity; 26.25 GiB already allocated; 2.10 GiB free; 28.29 GiB reserved in total by PyTorch) If reserved memory is >> allocated memory try setting max_split_size_mb to avoid fragmentation. See documentation for Memory Management and PYTORCH_CUDA_ALLOC_CONF
120
+
121
+ 2022-04-08 22:52:51,745 INFO [decode.py:743] num_arcs before pruning: 379292
122
+ 2022-04-08 22:52:51,745 INFO [decode.py:746] This OOM is not an error. You can ignore it. If your model does not converge well, or --max-duration is too large, or the input sound file is difficult to decode, you will meet this exception.
123
+ 2022-04-08 22:52:51,751 INFO [decode.py:757] num_arcs after pruning: 14317
124
+ 2022-04-08 22:54:33,478 INFO [decode.py:497] batch 1900/?, cuts processed until now is 4619
125
+ 2022-04-08 22:56:34,371 INFO [decode.py:736] Caught exception:
126
+ CUDA out of memory. Tried to allocate 8.00 GiB (GPU 0; 31.75 GiB total capacity; 19.32 GiB already allocated; 3.07 GiB free; 27.33 GiB reserved in total by PyTorch) If reserved memory is >> allocated memory try setting max_split_size_mb to avoid fragmentation. See documentation for Memory Management and PYTORCH_CUDA_ALLOC_CONF
127
+
128
+ 2022-04-08 22:56:34,372 INFO [decode.py:743] num_arcs before pruning: 294097
129
+ 2022-04-08 22:56:34,372 INFO [decode.py:746] This OOM is not an error. You can ignore it. If your model does not converge well, or --max-duration is too large, or the input sound file is difficult to decode, you will meet this exception.
130
+ 2022-04-08 22:56:34,389 INFO [decode.py:757] num_arcs after pruning: 5895
131
+ 2022-04-08 22:56:47,967 INFO [decode.py:497] batch 2000/?, cuts processed until now is 4816
132
+ 2022-04-08 22:58:06,236 INFO [decode.py:736] Caught exception:
133
+ CUDA out of memory. Tried to allocate 8.00 GiB (GPU 0; 31.75 GiB total capacity; 19.41 GiB already allocated; 3.06 GiB free; 27.33 GiB reserved in total by PyTorch) If reserved memory is >> allocated memory try setting max_split_size_mb to avoid fragmentation. See documentation for Memory Management and PYTORCH_CUDA_ALLOC_CONF
134
+
135
+ 2022-04-08 22:58:06,236 INFO [decode.py:743] num_arcs before pruning: 253855
136
+ 2022-04-08 22:58:06,236 INFO [decode.py:746] This OOM is not an error. You can ignore it. If your model does not converge well, or --max-duration is too large, or the input sound file is difficult to decode, you will meet this exception.
137
+ 2022-04-08 22:58:06,253 INFO [decode.py:757] num_arcs after pruning: 9191
138
+ 2022-04-08 22:58:17,534 INFO [decode.py:736] Caught exception:
139
+ CUDA out of memory. Tried to allocate 2.17 GiB (GPU 0; 31.75 GiB total capacity; 26.06 GiB already allocated; 1.56 GiB free; 28.83 GiB reserved in total by PyTorch) If reserved memory is >> allocated memory try setting max_split_size_mb to avoid fragmentation. See documentation for Memory Management and PYTORCH_CUDA_ALLOC_CONF
140
+
141
+ 2022-04-08 22:58:17,535 INFO [decode.py:743] num_arcs before pruning: 242689
142
+ 2022-04-08 22:58:17,535 INFO [decode.py:746] This OOM is not an error. You can ignore it. If your model does not converge well, or --max-duration is too large, or the input sound file is difficult to decode, you will meet this exception.
143
+ 2022-04-08 22:58:17,549 INFO [decode.py:757] num_arcs after pruning: 4733
144
+ 2022-04-08 22:58:32,154 INFO [decode.py:736] Caught exception:
145
+ CUDA out of memory. Tried to allocate 2.38 GiB (GPU 0; 31.75 GiB total capacity; 26.65 GiB already allocated; 1.57 GiB free; 28.82 GiB reserved in total by PyTorch) If reserved memory is >> allocated memory try setting max_split_size_mb to avoid fragmentation. See documentation for Memory Management and PYTORCH_CUDA_ALLOC_CONF
146
+
147
+ 2022-04-08 22:58:32,155 INFO [decode.py:743] num_arcs before pruning: 288302
148
+ 2022-04-08 22:58:32,155 INFO [decode.py:746] This OOM is not an error. You can ignore it. If your model does not converge well, or --max-duration is too large, or the input sound file is difficult to decode, you will meet this exception.
149
+ 2022-04-08 22:58:32,164 INFO [decode.py:757] num_arcs after pruning: 5472
150
+ 2022-04-08 22:59:15,988 INFO [decode.py:497] batch 2100/?, cuts processed until now is 4981
151
+ 2022-04-08 23:00:31,937 INFO [decode.py:736] Caught exception:
152
+
153
+ Some bad things happened. Please read the above error messages and stack
154
+ trace. If you are using Python, the following command may be helpful:
155
+
156
+ gdb --args python /path/to/your/code.py
157
+
158
+ (You can use `gdb` to debug the code. Please consider compiling
159
+ a debug version of k2.).
160
+
161
+ If you are unable to fix it, please open an issue at:
162
+
163
+ https://github.com/k2-fsa/k2/issues/new
164
+
165
+
166
+ 2022-04-08 23:00:31,937 INFO [decode.py:743] num_arcs before pruning: 745182
167
+ 2022-04-08 23:00:31,937 INFO [decode.py:746] This OOM is not an error. You can ignore it. If your model does not converge well, or --max-duration is too large, or the input sound file is difficult to decode, you will meet this exception.
168
+ 2022-04-08 23:00:31,989 INFO [decode.py:757] num_arcs after pruning: 13933
169
+ 2022-04-08 23:01:49,408 INFO [decode.py:497] batch 2200/?, cuts processed until now is 5132
170
+ 2022-04-08 23:04:08,911 INFO [decode.py:497] batch 2300/?, cuts processed until now is 5273
171
+ 2022-04-08 23:06:50,854 INFO [decode.py:497] batch 2400/?, cuts processed until now is 5388
172
+ 2022-04-08 23:06:53,493 INFO [decode.py:736] Caught exception:
173
+
174
+ Some bad things happened. Please read the above error messages and stack
175
+ trace. If you are using Python, the following command may be helpful:
176
+
177
+ gdb --args python /path/to/your/code.py
178
+
179
+ (You can use `gdb` to debug the code. Please consider compiling
180
+ a debug version of k2.).
181
+
182
+ If you are unable to fix it, please open an issue at:
183
+
184
+ https://github.com/k2-fsa/k2/issues/new
185
+
186
+
187
+ 2022-04-08 23:06:53,493 INFO [decode.py:743] num_arcs before pruning: 203946
188
+ 2022-04-08 23:06:53,493 INFO [decode.py:746] This OOM is not an error. You can ignore it. If your model does not converge well, or --max-duration is too large, or the input sound file is difficult to decode, you will meet this exception.
189
+ 2022-04-08 23:06:53,545 INFO [decode.py:757] num_arcs after pruning: 7172
190
+ 2022-04-08 23:09:08,764 INFO [decode.py:497] batch 2500/?, cuts processed until now is 5488
191
+ 2022-04-08 23:10:26,345 INFO [decode.py:841] Caught exception:
192
+ CUDA out of memory. Tried to allocate 5.79 GiB (GPU 0; 31.75 GiB total capacity; 24.31 GiB already allocated; 1.58 GiB free; 28.82 GiB reserved in total by PyTorch) If reserved memory is >> allocated memory try setting max_split_size_mb to avoid fragmentation. See documentation for Memory Management and PYTORCH_CUDA_ALLOC_CONF
193
+
194
+ 2022-04-08 23:10:26,346 INFO [decode.py:843] num_paths before decreasing: 1000
195
+ 2022-04-08 23:10:26,346 INFO [decode.py:852] This OOM is not an error. You can ignore it. If your model does not converge well, or --max-duration is too large, or the input sound file is difficult to decode, you will meet this exception.
196
+ 2022-04-08 23:10:26,346 INFO [decode.py:858] num_paths after decreasing: 500
197
+ 2022-04-08 23:11:31,973 INFO [decode.py:497] batch 2600/?, cuts processed until now is 5588
198
+ 2022-04-08 23:13:41,208 INFO [decode.py:497] batch 2700/?, cuts processed until now is 5688
199
+ 2022-04-08 23:20:49,158 INFO [decode.py:567]
200
+ For dev, WER of different settings are:
201
+ ngram_lm_scale_0.6_attention_scale_1.5 10.46 best for dev
202
+ ngram_lm_scale_0.6_attention_scale_1.7 10.46
203
+ ngram_lm_scale_0.5_attention_scale_0.9 10.47
204
+ ngram_lm_scale_0.5_attention_scale_1.0 10.47
205
+ ngram_lm_scale_0.5_attention_scale_1.1 10.47
206
+ ngram_lm_scale_0.5_attention_scale_1.2 10.47
207
+ ngram_lm_scale_0.5_attention_scale_1.3 10.47
208
+ ngram_lm_scale_0.5_attention_scale_1.5 10.47
209
+ ngram_lm_scale_0.5_attention_scale_1.7 10.47
210
+ ngram_lm_scale_0.6_attention_scale_1.3 10.47
211
+ ngram_lm_scale_0.6_attention_scale_1.9 10.47
212
+ ngram_lm_scale_0.6_attention_scale_2.0 10.47
213
+ ngram_lm_scale_0.6_attention_scale_2.1 10.47
214
+ ngram_lm_scale_0.7_attention_scale_1.9 10.47
215
+ ngram_lm_scale_0.7_attention_scale_2.0 10.47
216
+ ngram_lm_scale_0.7_attention_scale_2.1 10.47
217
+ ngram_lm_scale_0.7_attention_scale_2.2 10.47
218
+ ngram_lm_scale_0.5_attention_scale_1.9 10.48
219
+ ngram_lm_scale_0.6_attention_scale_1.1 10.48
220
+ ngram_lm_scale_0.6_attention_scale_1.2 10.48
221
+ ngram_lm_scale_0.6_attention_scale_2.2 10.48
222
+ ngram_lm_scale_0.6_attention_scale_2.3 10.48
223
+ ngram_lm_scale_0.7_attention_scale_1.5 10.48
224
+ ngram_lm_scale_0.7_attention_scale_1.7 10.48
225
+ ngram_lm_scale_0.7_attention_scale_2.3 10.48
226
+ ngram_lm_scale_0.7_attention_scale_2.5 10.48
227
+ ngram_lm_scale_0.9_attention_scale_4.0 10.48
228
+ ngram_lm_scale_0.3_attention_scale_1.1 10.49
229
+ ngram_lm_scale_0.5_attention_scale_0.6 10.49
230
+ ngram_lm_scale_0.5_attention_scale_0.7 10.49
231
+ ngram_lm_scale_0.5_attention_scale_2.0 10.49
232
+ ngram_lm_scale_0.5_attention_scale_2.1 10.49
233
+ ngram_lm_scale_0.5_attention_scale_2.5 10.49
234
+ ngram_lm_scale_0.5_attention_scale_3.0 10.49
235
+ ngram_lm_scale_0.6_attention_scale_1.0 10.49
236
+ ngram_lm_scale_0.6_attention_scale_2.5 10.49
237
+ ngram_lm_scale_0.6_attention_scale_3.0 10.49
238
+ ngram_lm_scale_0.7_attention_scale_1.3 10.49
239
+ ngram_lm_scale_0.7_attention_scale_3.0 10.49
240
+ ngram_lm_scale_0.7_attention_scale_4.0 10.49
241
+ ngram_lm_scale_0.9_attention_scale_3.0 10.49
242
+ ngram_lm_scale_0.9_attention_scale_5.0 10.49
243
+ ngram_lm_scale_1.0_attention_scale_4.0 10.49
244
+ ngram_lm_scale_1.0_attention_scale_5.0 10.49
245
+ ngram_lm_scale_1.1_attention_scale_4.0 10.49
246
+ ngram_lm_scale_1.1_attention_scale_5.0 10.49
247
+ ngram_lm_scale_1.2_attention_scale_4.0 10.49
248
+ ngram_lm_scale_1.2_attention_scale_5.0 10.49
249
+ ngram_lm_scale_1.3_attention_scale_5.0 10.49
250
+ ngram_lm_scale_1.5_attention_scale_5.0 10.49
251
+ ngram_lm_scale_0.3_attention_scale_0.7 10.5
252
+ ngram_lm_scale_0.3_attention_scale_0.9 10.5
253
+ ngram_lm_scale_0.3_attention_scale_1.0 10.5
254
+ ngram_lm_scale_0.3_attention_scale_1.2 10.5
255
+ ngram_lm_scale_0.3_attention_scale_1.3 10.5
256
+ ngram_lm_scale_0.3_attention_scale_1.5 10.5
257
+ ngram_lm_scale_0.5_attention_scale_2.2 10.5
258
+ ngram_lm_scale_0.5_attention_scale_2.3 10.5
259
+ ngram_lm_scale_0.6_attention_scale_0.7 10.5
260
+ ngram_lm_scale_0.6_attention_scale_0.9 10.5
261
+ ngram_lm_scale_0.7_attention_scale_1.0 10.5
262
+ ngram_lm_scale_0.7_attention_scale_1.1 10.5
263
+ ngram_lm_scale_0.7_attention_scale_5.0 10.5
264
+ ngram_lm_scale_0.9_attention_scale_2.1 10.5
265
+ ngram_lm_scale_1.0_attention_scale_3.0 10.5
266
+ ngram_lm_scale_1.3_attention_scale_4.0 10.5
267
+ ngram_lm_scale_1.5_attention_scale_4.0 10.5
268
+ ngram_lm_scale_0.3_attention_scale_1.7 10.51
269
+ ngram_lm_scale_0.3_attention_scale_1.9 10.51
270
+ ngram_lm_scale_0.3_attention_scale_2.0 10.51
271
+ ngram_lm_scale_0.3_attention_scale_2.1 10.51
272
+ ngram_lm_scale_0.3_attention_scale_2.2 10.51
273
+ ngram_lm_scale_0.3_attention_scale_2.3 10.51
274
+ ngram_lm_scale_0.3_attention_scale_2.5 10.51
275
+ ngram_lm_scale_0.3_attention_scale_3.0 10.51
276
+ ngram_lm_scale_0.3_attention_scale_4.0 10.51
277
+ ngram_lm_scale_0.5_attention_scale_0.5 10.51
278
+ ngram_lm_scale_0.5_attention_scale_4.0 10.51
279
+ ngram_lm_scale_0.5_attention_scale_5.0 10.51
280
+ ngram_lm_scale_0.6_attention_scale_4.0 10.51
281
+ ngram_lm_scale_0.6_attention_scale_5.0 10.51
282
+ ngram_lm_scale_0.7_attention_scale_1.2 10.51
283
+ ngram_lm_scale_0.9_attention_scale_2.0 10.51
284
+ ngram_lm_scale_0.9_attention_scale_2.2 10.51
285
+ ngram_lm_scale_0.9_attention_scale_2.3 10.51
286
+ ngram_lm_scale_0.9_attention_scale_2.5 10.51
287
+ ngram_lm_scale_1.0_attention_scale_2.2 10.51
288
+ ngram_lm_scale_1.0_attention_scale_2.3 10.51
289
+ ngram_lm_scale_1.0_attention_scale_2.5 10.51
290
+ ngram_lm_scale_1.1_attention_scale_2.5 10.51
291
+ ngram_lm_scale_1.2_attention_scale_3.0 10.51
292
+ ngram_lm_scale_1.7_attention_scale_5.0 10.51
293
+ ngram_lm_scale_0.05_attention_scale_2.5 10.52
294
+ ngram_lm_scale_0.05_attention_scale_3.0 10.52
295
+ ngram_lm_scale_0.08_attention_scale_2.5 10.52
296
+ ngram_lm_scale_0.08_attention_scale_4.0 10.52
297
+ ngram_lm_scale_0.08_attention_scale_5.0 10.52
298
+ ngram_lm_scale_0.1_attention_scale_2.5 10.52
299
+ ngram_lm_scale_0.1_attention_scale_3.0 10.52
300
+ ngram_lm_scale_0.1_attention_scale_4.0 10.52
301
+ ngram_lm_scale_0.1_attention_scale_5.0 10.52
302
+ ngram_lm_scale_0.3_attention_scale_0.5 10.52
303
+ ngram_lm_scale_0.3_attention_scale_0.6 10.52
304
+ ngram_lm_scale_0.3_attention_scale_5.0 10.52
305
+ ngram_lm_scale_0.6_attention_scale_0.6 10.52
306
+ ngram_lm_scale_0.7_attention_scale_0.9 10.52
307
+ ngram_lm_scale_0.9_attention_scale_1.7 10.52
308
+ ngram_lm_scale_0.9_attention_scale_1.9 10.52
309
+ ngram_lm_scale_1.0_attention_scale_2.0 10.52
310
+ ngram_lm_scale_1.0_attention_scale_2.1 10.52
311
+ ngram_lm_scale_1.1_attention_scale_2.3 10.52
312
+ ngram_lm_scale_1.1_attention_scale_3.0 10.52
313
+ ngram_lm_scale_1.9_attention_scale_5.0 10.52
314
+ ngram_lm_scale_0.01_attention_scale_2.5 10.53
315
+ ngram_lm_scale_0.01_attention_scale_3.0 10.53
316
+ ngram_lm_scale_0.01_attention_scale_4.0 10.53
317
+ ngram_lm_scale_0.01_attention_scale_5.0 10.53
318
+ ngram_lm_scale_0.05_attention_scale_1.9 10.53
319
+ ngram_lm_scale_0.05_attention_scale_2.1 10.53
320
+ ngram_lm_scale_0.05_attention_scale_2.3 10.53
321
+ ngram_lm_scale_0.05_attention_scale_4.0 10.53
322
+ ngram_lm_scale_0.05_attention_scale_5.0 10.53
323
+ ngram_lm_scale_0.08_attention_scale_1.9 10.53
324
+ ngram_lm_scale_0.08_attention_scale_2.1 10.53
325
+ ngram_lm_scale_0.08_attention_scale_2.2 10.53
326
+ ngram_lm_scale_0.08_attention_scale_2.3 10.53
327
+ ngram_lm_scale_0.08_attention_scale_3.0 10.53
328
+ ngram_lm_scale_0.1_attention_scale_2.2 10.53
329
+ ngram_lm_scale_0.1_attention_scale_2.3 10.53
330
+ ngram_lm_scale_0.3_attention_scale_0.3 10.53
331
+ ngram_lm_scale_0.9_attention_scale_1.5 10.53
332
+ ngram_lm_scale_1.0_attention_scale_1.9 10.53
333
+ ngram_lm_scale_1.1_attention_scale_2.1 10.53
334
+ ngram_lm_scale_1.1_attention_scale_2.2 10.53
335
+ ngram_lm_scale_1.2_attention_scale_2.5 10.53
336
+ ngram_lm_scale_1.3_attention_scale_3.0 10.53
337
+ ngram_lm_scale_1.7_attention_scale_4.0 10.53
338
+ ngram_lm_scale_2.0_attention_scale_5.0 10.53
339
+ ngram_lm_scale_0.01_attention_scale_2.2 10.54
340
+ ngram_lm_scale_0.01_attention_scale_2.3 10.54
341
+ ngram_lm_scale_0.05_attention_scale_1.7 10.54
342
+ ngram_lm_scale_0.05_attention_scale_2.0 10.54
343
+ ngram_lm_scale_0.05_attention_scale_2.2 10.54
344
+ ngram_lm_scale_0.08_attention_scale_1.2 10.54
345
+ ngram_lm_scale_0.08_attention_scale_1.3 10.54
346
+ ngram_lm_scale_0.08_attention_scale_1.7 10.54
347
+ ngram_lm_scale_0.08_attention_scale_2.0 10.54
348
+ ngram_lm_scale_0.1_attention_scale_1.5 10.54
349
+ ngram_lm_scale_0.1_attention_scale_1.7 10.54
350
+ ngram_lm_scale_0.1_attention_scale_1.9 10.54
351
+ ngram_lm_scale_0.1_attention_scale_2.0 10.54
352
+ ngram_lm_scale_0.1_attention_scale_2.1 10.54
353
+ ngram_lm_scale_0.9_attention_scale_1.2 10.54
354
+ ngram_lm_scale_1.0_attention_scale_1.7 10.54
355
+ ngram_lm_scale_1.2_attention_scale_2.3 10.54
356
+ ngram_lm_scale_1.3_attention_scale_2.3 10.54
357
+ ngram_lm_scale_1.5_attention_scale_3.0 10.54
358
+ ngram_lm_scale_0.01_attention_scale_1.9 10.55
359
+ ngram_lm_scale_0.01_attention_scale_2.0 10.55
360
+ ngram_lm_scale_0.01_attention_scale_2.1 10.55
361
+ ngram_lm_scale_0.05_attention_scale_1.2 10.55
362
+ ngram_lm_scale_0.05_attention_scale_1.3 10.55
363
+ ngram_lm_scale_0.08_attention_scale_1.1 10.55
364
+ ngram_lm_scale_0.08_attention_scale_1.5 10.55
365
+ ngram_lm_scale_0.1_attention_scale_1.1 10.55
366
+ ngram_lm_scale_0.1_attention_scale_1.2 10.55
367
+ ngram_lm_scale_0.1_attention_scale_1.3 10.55
368
+ ngram_lm_scale_0.6_attention_scale_0.5 10.55
369
+ ngram_lm_scale_0.7_attention_scale_0.7 10.55
370
+ ngram_lm_scale_0.9_attention_scale_1.3 10.55
371
+ ngram_lm_scale_1.0_attention_scale_1.5 10.55
372
+ ngram_lm_scale_1.1_attention_scale_2.0 10.55
373
+ ngram_lm_scale_1.2_attention_scale_2.0 10.55
374
+ ngram_lm_scale_1.2_attention_scale_2.1 10.55
375
+ ngram_lm_scale_1.2_attention_scale_2.2 10.55
376
+ ngram_lm_scale_1.3_attention_scale_2.2 10.55
377
+ ngram_lm_scale_1.3_attention_scale_2.5 10.55
378
+ ngram_lm_scale_2.1_attention_scale_5.0 10.55
379
+ ngram_lm_scale_0.01_attention_scale_1.1 10.56
380
+ ngram_lm_scale_0.01_attention_scale_1.3 10.56
381
+ ngram_lm_scale_0.01_attention_scale_1.7 10.56
382
+ ngram_lm_scale_0.05_attention_scale_1.1 10.56
383
+ ngram_lm_scale_0.05_attention_scale_1.5 10.56
384
+ ngram_lm_scale_0.08_attention_scale_1.0 10.56
385
+ ngram_lm_scale_0.1_attention_scale_1.0 10.56
386
+ ngram_lm_scale_0.7_attention_scale_0.6 10.56
387
+ ngram_lm_scale_0.9_attention_scale_1.1 10.56
388
+ ngram_lm_scale_1.0_attention_scale_1.3 10.56
389
+ ngram_lm_scale_1.1_attention_scale_1.7 10.56
390
+ ngram_lm_scale_1.1_attention_scale_1.9 10.56
391
+ ngram_lm_scale_1.2_attention_scale_1.9 10.56
392
+ ngram_lm_scale_1.3_attention_scale_2.0 10.56
393
+ ngram_lm_scale_1.9_attention_scale_4.0 10.56
394
+ ngram_lm_scale_2.2_attention_scale_5.0 10.56
395
+ ngram_lm_scale_0.01_attention_scale_1.2 10.57
396
+ ngram_lm_scale_0.01_attention_scale_1.5 10.57
397
+ ngram_lm_scale_0.05_attention_scale_1.0 10.57
398
+ ngram_lm_scale_0.1_attention_scale_0.5 10.57
399
+ ngram_lm_scale_0.1_attention_scale_0.7 10.57
400
+ ngram_lm_scale_0.1_attention_scale_0.9 10.57
401
+ ngram_lm_scale_0.5_attention_scale_0.3 10.57
402
+ ngram_lm_scale_0.9_attention_scale_1.0 10.57
403
+ ngram_lm_scale_1.1_attention_scale_1.5 10.57
404
+ ngram_lm_scale_1.2_attention_scale_1.7 10.57
405
+ ngram_lm_scale_1.3_attention_scale_2.1 10.57
406
+ ngram_lm_scale_0.01_attention_scale_1.0 10.58
407
+ ngram_lm_scale_0.05_attention_scale_0.9 10.58
408
+ ngram_lm_scale_0.08_attention_scale_0.7 10.58
409
+ ngram_lm_scale_0.08_attention_scale_0.9 10.58
410
+ ngram_lm_scale_0.1_attention_scale_0.6 10.58
411
+ ngram_lm_scale_0.3_attention_scale_0.1 10.58
412
+ ngram_lm_scale_0.9_attention_scale_0.9 10.58
413
+ ngram_lm_scale_1.0_attention_scale_1.2 10.58
414
+ ngram_lm_scale_1.3_attention_scale_1.9 10.58
415
+ ngram_lm_scale_1.5_attention_scale_2.5 10.58
416
+ ngram_lm_scale_2.0_attention_scale_4.0 10.58
417
+ ngram_lm_scale_0.01_attention_scale_0.9 10.59
418
+ ngram_lm_scale_0.08_attention_scale_0.5 10.59
419
+ ngram_lm_scale_0.08_attention_scale_0.6 10.59
420
+ ngram_lm_scale_0.1_attention_scale_0.3 10.59
421
+ ngram_lm_scale_0.3_attention_scale_0.08 10.59
422
+ ngram_lm_scale_0.6_attention_scale_0.3 10.59
423
+ ngram_lm_scale_0.7_attention_scale_0.5 10.59
424
+ ngram_lm_scale_1.7_attention_scale_3.0 10.59
425
+ ngram_lm_scale_2.3_attention_scale_5.0 10.59
426
+ ngram_lm_scale_0.05_attention_scale_0.6 10.6
427
+ ngram_lm_scale_0.05_attention_scale_0.7 10.6
428
+ ngram_lm_scale_0.08_attention_scale_0.3 10.6
429
+ ngram_lm_scale_0.3_attention_scale_0.05 10.6
430
+ ngram_lm_scale_1.0_attention_scale_1.1 10.6
431
+ ngram_lm_scale_1.1_attention_scale_1.3 10.6
432
+ ngram_lm_scale_1.2_attention_scale_1.5 10.6
433
+ ngram_lm_scale_1.5_attention_scale_2.3 10.6
434
+ ngram_lm_scale_0.01_attention_scale_0.7 10.61
435
+ ngram_lm_scale_1.3_attention_scale_1.7 10.61
436
+ ngram_lm_scale_0.01_attention_scale_0.6 10.62
437
+ ngram_lm_scale_0.05_attention_scale_0.3 10.62
438
+ ngram_lm_scale_0.05_attention_scale_0.5 10.62
439
+ ngram_lm_scale_0.1_attention_scale_0.1 10.62
440
+ ngram_lm_scale_2.1_attention_scale_4.0 10.62
441
+ ngram_lm_scale_0.01_attention_scale_0.5 10.63
442
+ ngram_lm_scale_1.0_attention_scale_1.0 10.63
443
+ ngram_lm_scale_1.5_attention_scale_2.2 10.63
444
+ ngram_lm_scale_2.5_attention_scale_5.0 10.63
445
+ ngram_lm_scale_0.08_attention_scale_0.1 10.64
446
+ ngram_lm_scale_0.1_attention_scale_0.08 10.64
447
+ ngram_lm_scale_0.3_attention_scale_0.01 10.64
448
+ ngram_lm_scale_1.1_attention_scale_1.2 10.64
449
+ ngram_lm_scale_0.01_attention_scale_0.3 10.65
450
+ ngram_lm_scale_0.5_attention_scale_0.1 10.65
451
+ ngram_lm_scale_0.7_attention_scale_0.3 10.65
452
+ ngram_lm_scale_1.5_attention_scale_2.1 10.65
453
+ ngram_lm_scale_0.08_attention_scale_0.08 10.66
454
+ ngram_lm_scale_0.1_attention_scale_0.05 10.66
455
+ ngram_lm_scale_0.5_attention_scale_0.08 10.66
456
+ ngram_lm_scale_0.9_attention_scale_0.7 10.66
457
+ ngram_lm_scale_2.2_attention_scale_4.0 10.66
458
+ ngram_lm_scale_0.1_attention_scale_0.01 10.67
459
+ ngram_lm_scale_1.0_attention_scale_0.9 10.67
460
+ ngram_lm_scale_1.1_attention_scale_1.1 10.67
461
+ ngram_lm_scale_1.7_attention_scale_2.5 10.67
462
+ ngram_lm_scale_0.05_attention_scale_0.1 10.68
463
+ ngram_lm_scale_0.5_attention_scale_0.05 10.68
464
+ ngram_lm_scale_1.5_attention_scale_2.0 10.68
465
+ ngram_lm_scale_0.05_attention_scale_0.08 10.69
466
+ ngram_lm_scale_0.08_attention_scale_0.05 10.69
467
+ ngram_lm_scale_1.2_attention_scale_1.3 10.69
468
+ ngram_lm_scale_1.9_attention_scale_3.0 10.69
469
+ ngram_lm_scale_0.08_attention_scale_0.01 10.7
470
+ ngram_lm_scale_0.6_attention_scale_0.1 10.7
471
+ ngram_lm_scale_1.3_attention_scale_1.5 10.7
472
+ ngram_lm_scale_2.3_attention_scale_4.0 10.7
473
+ ngram_lm_scale_0.05_attention_scale_0.05 10.71
474
+ ngram_lm_scale_0.5_attention_scale_0.01 10.71
475
+ ngram_lm_scale_0.9_attention_scale_0.6 10.71
476
+ ngram_lm_scale_1.1_attention_scale_1.0 10.71
477
+ ngram_lm_scale_1.5_attention_scale_1.9 10.71
478
+ ngram_lm_scale_0.01_attention_scale_0.1 10.72
479
+ ngram_lm_scale_0.01_attention_scale_0.08 10.73
480
+ ngram_lm_scale_0.05_attention_scale_0.01 10.73
481
+ ngram_lm_scale_0.6_attention_scale_0.08 10.73
482
+ ngram_lm_scale_1.2_attention_scale_1.2 10.73
483
+ ngram_lm_scale_0.01_attention_scale_0.05 10.75
484
+ ngram_lm_scale_0.9_attention_scale_0.5 10.75
485
+ ngram_lm_scale_1.0_attention_scale_0.7 10.75
486
+ ngram_lm_scale_1.1_attention_scale_0.9 10.75
487
+ ngram_lm_scale_1.2_attention_scale_1.1 10.75
488
+ ngram_lm_scale_1.3_attention_scale_1.3 10.76
489
+ ngram_lm_scale_1.7_attention_scale_2.3 10.76
490
+ ngram_lm_scale_2.0_attention_scale_3.0 10.77
491
+ ngram_lm_scale_0.6_attention_scale_0.05 10.78
492
+ ngram_lm_scale_0.01_attention_scale_0.01 10.79
493
+ ngram_lm_scale_1.5_attention_scale_1.7 10.79
494
+ ngram_lm_scale_1.7_attention_scale_2.2 10.79
495
+ ngram_lm_scale_1.2_attention_scale_1.0 10.8
496
+ ngram_lm_scale_1.3_attention_scale_1.2 10.8
497
+ ngram_lm_scale_2.5_attention_scale_4.0 10.81
498
+ ngram_lm_scale_1.7_attention_scale_2.1 10.82
499
+ ngram_lm_scale_1.0_attention_scale_0.6 10.83
500
+ ngram_lm_scale_2.1_attention_scale_3.0 10.84
501
+ ngram_lm_scale_0.6_attention_scale_0.01 10.85
502
+ ngram_lm_scale_1.7_attention_scale_2.0 10.85
503
+ ngram_lm_scale_1.9_attention_scale_2.5 10.85
504
+ ngram_lm_scale_3.0_attention_scale_5.0 10.86
505
+ ngram_lm_scale_1.3_attention_scale_1.1 10.87
506
+ ngram_lm_scale_0.7_attention_scale_0.1 10.88
507
+ ngram_lm_scale_1.5_attention_scale_1.5 10.88
508
+ ngram_lm_scale_1.2_attention_scale_0.9 10.89
509
+ ngram_lm_scale_1.7_attention_scale_1.9 10.89
510
+ ngram_lm_scale_2.2_attention_scale_3.0 10.9
511
+ ngram_lm_scale_1.1_attention_scale_0.7 10.91
512
+ ngram_lm_scale_1.9_attention_scale_2.3 10.91
513
+ ngram_lm_scale_2.0_attention_scale_2.5 10.91
514
+ ngram_lm_scale_0.7_attention_scale_0.08 10.92
515
+ ngram_lm_scale_0.7_attention_scale_0.05 10.96
516
+ ngram_lm_scale_1.0_attention_scale_0.5 10.96
517
+ ngram_lm_scale_1.9_attention_scale_2.2 10.97
518
+ ngram_lm_scale_2.3_attention_scale_3.0 10.97
519
+ ngram_lm_scale_1.3_attention_scale_1.0 10.99
520
+ ngram_lm_scale_1.7_attention_scale_1.7 11.01
521
+ ngram_lm_scale_2.1_attention_scale_2.5 11.02
522
+ ngram_lm_scale_0.9_attention_scale_0.3 11.03
523
+ ngram_lm_scale_1.9_attention_scale_2.1 11.03
524
+ ngram_lm_scale_0.7_attention_scale_0.01 11.04
525
+ ngram_lm_scale_1.5_attention_scale_1.3 11.04
526
+ ngram_lm_scale_2.0_attention_scale_2.3 11.04
527
+ ngram_lm_scale_1.1_attention_scale_0.6 11.05
528
+ ngram_lm_scale_1.9_attention_scale_2.0 11.1
529
+ ngram_lm_scale_2.0_attention_scale_2.2 11.1
530
+ ngram_lm_scale_1.3_attention_scale_0.9 11.11
531
+ ngram_lm_scale_1.2_attention_scale_0.7 11.14
532
+ ngram_lm_scale_1.5_attention_scale_1.2 11.15
533
+ ngram_lm_scale_2.2_attention_scale_2.5 11.16
534
+ ngram_lm_scale_2.1_attention_scale_2.3 11.17
535
+ ngram_lm_scale_3.0_attention_scale_4.0 11.17
536
+ ngram_lm_scale_1.9_attention_scale_1.9 11.18
537
+ ngram_lm_scale_2.0_attention_scale_2.1 11.18
538
+ ngram_lm_scale_1.1_attention_scale_0.5 11.19
539
+ ngram_lm_scale_2.5_attention_scale_3.0 11.19
540
+ ngram_lm_scale_1.7_attention_scale_1.5 11.21
541
+ ngram_lm_scale_2.1_attention_scale_2.2 11.25
542
+ ngram_lm_scale_1.2_attention_scale_0.6 11.26
543
+ ngram_lm_scale_1.5_attention_scale_1.1 11.26
544
+ ngram_lm_scale_2.0_attention_scale_2.0 11.26
545
+ ngram_lm_scale_1.0_attention_scale_0.3 11.29
546
+ ngram_lm_scale_2.3_attention_scale_2.5 11.3
547
+ ngram_lm_scale_2.2_attention_scale_2.3 11.31
548
+ ngram_lm_scale_2.1_attention_scale_2.1 11.32
549
+ ngram_lm_scale_2.0_attention_scale_1.9 11.34
550
+ ngram_lm_scale_1.3_attention_scale_0.7 11.36
551
+ ngram_lm_scale_1.9_attention_scale_1.7 11.37
552
+ ngram_lm_scale_1.5_attention_scale_1.0 11.4
553
+ ngram_lm_scale_2.2_attention_scale_2.2 11.4
554
+ ngram_lm_scale_2.1_attention_scale_2.0 11.41
555
+ ngram_lm_scale_0.9_attention_scale_0.1 11.42
556
+ ngram_lm_scale_1.7_attention_scale_1.3 11.44
557
+ ngram_lm_scale_1.2_attention_scale_0.5 11.45
558
+ ngram_lm_scale_0.9_attention_scale_0.08 11.47
559
+ ngram_lm_scale_2.3_attention_scale_2.3 11.48
560
+ ngram_lm_scale_2.2_attention_scale_2.1 11.51
561
+ ngram_lm_scale_2.1_attention_scale_1.9 11.54
562
+ ngram_lm_scale_1.3_attention_scale_0.6 11.55
563
+ ngram_lm_scale_1.5_attention_scale_0.9 11.56
564
+ ngram_lm_scale_0.9_attention_scale_0.05 11.57
565
+ ngram_lm_scale_2.0_attention_scale_1.7 11.57
566
+ ngram_lm_scale_2.3_attention_scale_2.2 11.58
567
+ ngram_lm_scale_1.1_attention_scale_0.3 11.59
568
+ ngram_lm_scale_1.7_attention_scale_1.2 11.59
569
+ ngram_lm_scale_1.9_attention_scale_1.5 11.63
570
+ ngram_lm_scale_2.2_attention_scale_2.0 11.63
571
+ ngram_lm_scale_2.5_attention_scale_2.5 11.63
572
+ ngram_lm_scale_4.0_attention_scale_5.0 11.67
573
+ ngram_lm_scale_2.3_attention_scale_2.1 11.7
574
+ ngram_lm_scale_0.9_attention_scale_0.01 11.71
575
+ ngram_lm_scale_2.2_attention_scale_1.9 11.73
576
+ ngram_lm_scale_1.3_attention_scale_0.5 11.76
577
+ ngram_lm_scale_1.7_attention_scale_1.1 11.76
578
+ ngram_lm_scale_1.0_attention_scale_0.1 11.78
579
+ ngram_lm_scale_2.1_attention_scale_1.7 11.8
580
+ ngram_lm_scale_2.3_attention_scale_2.0 11.8
581
+ ngram_lm_scale_2.5_attention_scale_2.3 11.83
582
+ ngram_lm_scale_2.0_attention_scale_1.5 11.86
583
+ ngram_lm_scale_1.0_attention_scale_0.08 11.89
584
+ ngram_lm_scale_1.9_attention_scale_1.3 11.93
585
+ ngram_lm_scale_3.0_attention_scale_3.0 11.94
586
+ ngram_lm_scale_1.2_attention_scale_0.3 11.95
587
+ ngram_lm_scale_1.7_attention_scale_1.0 11.95
588
+ ngram_lm_scale_2.3_attention_scale_1.9 11.95
589
+ ngram_lm_scale_2.5_attention_scale_2.2 11.96
590
+ ngram_lm_scale_1.5_attention_scale_0.7 11.98
591
+ ngram_lm_scale_1.0_attention_scale_0.05 12.0
592
+ ngram_lm_scale_2.2_attention_scale_1.7 12.02
593
+ ngram_lm_scale_2.1_attention_scale_1.5 12.09
594
+ ngram_lm_scale_2.5_attention_scale_2.1 12.09
595
+ ngram_lm_scale_1.9_attention_scale_1.2 12.12
596
+ ngram_lm_scale_1.7_attention_scale_0.9 12.16
597
+ ngram_lm_scale_1.0_attention_scale_0.01 12.19
598
+ ngram_lm_scale_2.0_attention_scale_1.3 12.2
599
+ ngram_lm_scale_2.5_attention_scale_2.0 12.22
600
+ ngram_lm_scale_1.5_attention_scale_0.6 12.24
601
+ ngram_lm_scale_2.3_attention_scale_1.7 12.24
602
+ ngram_lm_scale_1.1_attention_scale_0.1 12.27
603
+ ngram_lm_scale_1.9_attention_scale_1.1 12.3
604
+ ngram_lm_scale_4.0_attention_scale_4.0 12.31
605
+ ngram_lm_scale_2.2_attention_scale_1.5 12.32
606
+ ngram_lm_scale_2.5_attention_scale_1.9 12.35
607
+ ngram_lm_scale_1.1_attention_scale_0.08 12.36
608
+ ngram_lm_scale_2.0_attention_scale_1.2 12.37
609
+ ngram_lm_scale_1.3_attention_scale_0.3 12.4
610
+ ngram_lm_scale_2.1_attention_scale_1.3 12.43
611
+ ngram_lm_scale_3.0_attention_scale_2.5 12.46
612
+ ngram_lm_scale_1.1_attention_scale_0.05 12.51
613
+ ngram_lm_scale_1.9_attention_scale_1.0 12.52
614
+ ngram_lm_scale_2.3_attention_scale_1.5 12.53
615
+ ngram_lm_scale_1.5_attention_scale_0.5 12.54
616
+ ngram_lm_scale_2.0_attention_scale_1.1 12.58
617
+ ngram_lm_scale_5.0_attention_scale_5.0 12.62
618
+ ngram_lm_scale_2.1_attention_scale_1.2 12.63
619
+ ngram_lm_scale_2.5_attention_scale_1.7 12.64
620
+ ngram_lm_scale_1.7_attention_scale_0.7 12.68
621
+ ngram_lm_scale_2.2_attention_scale_1.3 12.68
622
+ ngram_lm_scale_1.1_attention_scale_0.01 12.72
623
+ ngram_lm_scale_3.0_attention_scale_2.3 12.72
624
+ ngram_lm_scale_1.9_attention_scale_0.9 12.78
625
+ ngram_lm_scale_1.2_attention_scale_0.1 12.79
626
+ ngram_lm_scale_2.0_attention_scale_1.0 12.82
627
+ ngram_lm_scale_2.1_attention_scale_1.1 12.86
628
+ ngram_lm_scale_3.0_attention_scale_2.2 12.87
629
+ ngram_lm_scale_1.2_attention_scale_0.08 12.88
630
+ ngram_lm_scale_2.2_attention_scale_1.2 12.92
631
+ ngram_lm_scale_2.3_attention_scale_1.3 12.97
632
+ ngram_lm_scale_1.7_attention_scale_0.6 12.98
633
+ ngram_lm_scale_3.0_attention_scale_2.1 13.03
634
+ ngram_lm_scale_2.5_attention_scale_1.5 13.04
635
+ ngram_lm_scale_1.2_attention_scale_0.05 13.05
636
+ ngram_lm_scale_2.0_attention_scale_0.9 13.11
637
+ ngram_lm_scale_2.1_attention_scale_1.0 13.17
638
+ ngram_lm_scale_2.2_attention_scale_1.1 13.2
639
+ ngram_lm_scale_3.0_attention_scale_2.0 13.2
640
+ ngram_lm_scale_2.3_attention_scale_1.2 13.24
641
+ ngram_lm_scale_1.2_attention_scale_0.01 13.27
642
+ ngram_lm_scale_1.3_attention_scale_0.1 13.3
643
+ ngram_lm_scale_1.5_attention_scale_0.3 13.32
644
+ ngram_lm_scale_1.7_attention_scale_0.5 13.33
645
+ ngram_lm_scale_1.3_attention_scale_0.08 13.4
646
+ ngram_lm_scale_4.0_attention_scale_3.0 13.41
647
+ ngram_lm_scale_1.9_attention_scale_0.7 13.42
648
+ ngram_lm_scale_3.0_attention_scale_1.9 13.42
649
+ ngram_lm_scale_2.1_attention_scale_0.9 13.45
650
+ ngram_lm_scale_2.2_attention_scale_1.0 13.46
651
+ ngram_lm_scale_2.3_attention_scale_1.1 13.47
652
+ ngram_lm_scale_2.5_attention_scale_1.3 13.53
653
+ ngram_lm_scale_1.3_attention_scale_0.05 13.56
654
+ ngram_lm_scale_5.0_attention_scale_4.0 13.57
655
+ ngram_lm_scale_2.0_attention_scale_0.7 13.73
656
+ ngram_lm_scale_2.2_attention_scale_0.9 13.74
657
+ ngram_lm_scale_1.9_attention_scale_0.6 13.75
658
+ ngram_lm_scale_2.3_attention_scale_1.0 13.75
659
+ ngram_lm_scale_2.5_attention_scale_1.2 13.78
660
+ ngram_lm_scale_1.3_attention_scale_0.01 13.81
661
+ ngram_lm_scale_3.0_attention_scale_1.7 13.84
662
+ ngram_lm_scale_2.5_attention_scale_1.1 14.05
663
+ ngram_lm_scale_2.1_attention_scale_0.7 14.07
664
+ ngram_lm_scale_2.3_attention_scale_0.9 14.07
665
+ ngram_lm_scale_2.0_attention_scale_0.6 14.1
666
+ ngram_lm_scale_1.9_attention_scale_0.5 14.14
667
+ ngram_lm_scale_1.7_attention_scale_0.3 14.18
668
+ ngram_lm_scale_4.0_attention_scale_2.5 14.2
669
+ ngram_lm_scale_3.0_attention_scale_1.5 14.28
670
+ ngram_lm_scale_1.5_attention_scale_0.1 14.3
671
+ ngram_lm_scale_2.5_attention_scale_1.0 14.35
672
+ ngram_lm_scale_1.5_attention_scale_0.08 14.41
673
+ ngram_lm_scale_2.2_attention_scale_0.7 14.42
674
+ ngram_lm_scale_2.1_attention_scale_0.6 14.47
675
+ ngram_lm_scale_2.0_attention_scale_0.5 14.51
676
+ ngram_lm_scale_4.0_attention_scale_2.3 14.56
677
+ ngram_lm_scale_1.5_attention_scale_0.05 14.57
678
+ ngram_lm_scale_2.5_attention_scale_0.9 14.66
679
+ ngram_lm_scale_2.3_attention_scale_0.7 14.72
680
+ ngram_lm_scale_4.0_attention_scale_2.2 14.75
681
+ ngram_lm_scale_2.2_attention_scale_0.6 14.76
682
+ ngram_lm_scale_3.0_attention_scale_1.3 14.76
683
+ ngram_lm_scale_2.1_attention_scale_0.5 14.8
684
+ ngram_lm_scale_1.5_attention_scale_0.01 14.82
685
+ ngram_lm_scale_5.0_attention_scale_3.0 14.84
686
+ ngram_lm_scale_4.0_attention_scale_2.1 14.9
687
+ ngram_lm_scale_1.9_attention_scale_0.3 14.93
688
+ ngram_lm_scale_3.0_attention_scale_1.2 14.98
689
+ ngram_lm_scale_2.3_attention_scale_0.6 15.04
690
+ ngram_lm_scale_4.0_attention_scale_2.0 15.07
691
+ ngram_lm_scale_2.2_attention_scale_0.5 15.13
692
+ ngram_lm_scale_1.7_attention_scale_0.1 15.2
693
+ ngram_lm_scale_3.0_attention_scale_1.1 15.24
694
+ ngram_lm_scale_4.0_attention_scale_1.9 15.25
695
+ ngram_lm_scale_2.5_attention_scale_0.7 15.26
696
+ ngram_lm_scale_1.7_attention_scale_0.08 15.3
697
+ ngram_lm_scale_2.0_attention_scale_0.3 15.31
698
+ ngram_lm_scale_2.3_attention_scale_0.5 15.41
699
+ ngram_lm_scale_1.7_attention_scale_0.05 15.48
700
+ ngram_lm_scale_3.0_attention_scale_1.0 15.54
701
+ ngram_lm_scale_2.5_attention_scale_0.6 15.59
702
+ ngram_lm_scale_5.0_attention_scale_2.5 15.61
703
+ ngram_lm_scale_2.1_attention_scale_0.3 15.62
704
+ ngram_lm_scale_4.0_attention_scale_1.7 15.66
705
+ ngram_lm_scale_1.7_attention_scale_0.01 15.73
706
+ ngram_lm_scale_3.0_attention_scale_0.9 15.8
707
+ ngram_lm_scale_5.0_attention_scale_2.3 15.9
708
+ ngram_lm_scale_1.9_attention_scale_0.1 15.91
709
+ ngram_lm_scale_2.2_attention_scale_0.3 15.93
710
+ ngram_lm_scale_2.5_attention_scale_0.5 15.96
711
+ ngram_lm_scale_1.9_attention_scale_0.08 16.02
712
+ ngram_lm_scale_4.0_attention_scale_1.5 16.04
713
+ ngram_lm_scale_5.0_attention_scale_2.2 16.04
714
+ ngram_lm_scale_1.9_attention_scale_0.05 16.18
715
+ ngram_lm_scale_5.0_attention_scale_2.1 16.2
716
+ ngram_lm_scale_2.3_attention_scale_0.3 16.21
717
+ ngram_lm_scale_2.0_attention_scale_0.1 16.25
718
+ ngram_lm_scale_3.0_attention_scale_0.7 16.34
719
+ ngram_lm_scale_2.0_attention_scale_0.08 16.35
720
+ ngram_lm_scale_5.0_attention_scale_2.0 16.37
721
+ ngram_lm_scale_1.9_attention_scale_0.01 16.42
722
+ ngram_lm_scale_4.0_attention_scale_1.3 16.45
723
+ ngram_lm_scale_2.0_attention_scale_0.05 16.5
724
+ ngram_lm_scale_5.0_attention_scale_1.9 16.52
725
+ ngram_lm_scale_2.1_attention_scale_0.1 16.55
726
+ ngram_lm_scale_4.0_attention_scale_1.2 16.62
727
+ ngram_lm_scale_2.1_attention_scale_0.08 16.64
728
+ ngram_lm_scale_3.0_attention_scale_0.6 16.64
729
+ ngram_lm_scale_2.5_attention_scale_0.3 16.67
730
+ ngram_lm_scale_2.0_attention_scale_0.01 16.71
731
+ ngram_lm_scale_2.1_attention_scale_0.05 16.77
732
+ ngram_lm_scale_2.2_attention_scale_0.1 16.8
733
+ ngram_lm_scale_5.0_attention_scale_1.7 16.82
734
+ ngram_lm_scale_4.0_attention_scale_1.1 16.84
735
+ ngram_lm_scale_2.2_attention_scale_0.08 16.89
736
+ ngram_lm_scale_3.0_attention_scale_0.5 16.95
737
+ ngram_lm_scale_2.1_attention_scale_0.01 16.99
738
+ ngram_lm_scale_2.2_attention_scale_0.05 17.02
739
+ ngram_lm_scale_2.3_attention_scale_0.1 17.02
740
+ ngram_lm_scale_4.0_attention_scale_1.0 17.07
741
+ ngram_lm_scale_2.3_attention_scale_0.08 17.09
742
+ ngram_lm_scale_5.0_attention_scale_1.5 17.16
743
+ ngram_lm_scale_2.2_attention_scale_0.01 17.18
744
+ ngram_lm_scale_2.3_attention_scale_0.05 17.2
745
+ ngram_lm_scale_4.0_attention_scale_0.9 17.24
746
+ ngram_lm_scale_2.3_attention_scale_0.01 17.38
747
+ ngram_lm_scale_2.5_attention_scale_0.1 17.4
748
+ ngram_lm_scale_5.0_attention_scale_1.3 17.45
749
+ ngram_lm_scale_2.5_attention_scale_0.08 17.47
750
+ ngram_lm_scale_3.0_attention_scale_0.3 17.53
751
+ ngram_lm_scale_2.5_attention_scale_0.05 17.58
752
+ ngram_lm_scale_5.0_attention_scale_1.2 17.63
753
+ ngram_lm_scale_2.5_attention_scale_0.01 17.7
754
+ ngram_lm_scale_4.0_attention_scale_0.7 17.7
755
+ ngram_lm_scale_5.0_attention_scale_1.1 17.8
756
+ ngram_lm_scale_4.0_attention_scale_0.6 17.89
757
+ ngram_lm_scale_5.0_attention_scale_1.0 17.94
758
+ ngram_lm_scale_3.0_attention_scale_0.1 18.09
759
+ ngram_lm_scale_4.0_attention_scale_0.5 18.09
760
+ ngram_lm_scale_5.0_attention_scale_0.9 18.09
761
+ ngram_lm_scale_3.0_attention_scale_0.08 18.14
762
+ ngram_lm_scale_3.0_attention_scale_0.05 18.21
763
+ ngram_lm_scale_3.0_attention_scale_0.01 18.31
764
+ ngram_lm_scale_5.0_attention_scale_0.7 18.41
765
+ ngram_lm_scale_4.0_attention_scale_0.3 18.49
766
+ ngram_lm_scale_5.0_attention_scale_0.6 18.57
767
+ ngram_lm_scale_5.0_attention_scale_0.5 18.71
768
+ ngram_lm_scale_4.0_attention_scale_0.1 18.85
769
+ ngram_lm_scale_4.0_attention_scale_0.08 18.88
770
+ ngram_lm_scale_4.0_attention_scale_0.05 18.95
771
+ ngram_lm_scale_5.0_attention_scale_0.3 19.01
772
+ ngram_lm_scale_4.0_attention_scale_0.01 19.02
773
+ ngram_lm_scale_5.0_attention_scale_0.1 19.3
774
+ ngram_lm_scale_5.0_attention_scale_0.08 19.32
775
+ ngram_lm_scale_5.0_attention_scale_0.05 19.37
776
+ ngram_lm_scale_5.0_attention_scale_0.01 19.43
777
+
778
+ 2022-04-08 23:20:49,165 INFO [decode.py:730] Done!
log/log-decode-2022-04-09-01-40-41 ADDED
@@ -0,0 +1,1176 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ 2022-04-09 01:40:41,909 INFO [decode_test.py:583] Decoding started
2
+ 2022-04-09 01:40:41,910 INFO [decode_test.py:584] {'subsampling_factor': 4, 'vgg_frontend': False, 'use_feat_batchnorm': True, 'feature_dim': 80, 'nhead': 8, 'attention_dim': 512, 'num_decoder_layers': 6, 'search_beam': 20, 'output_beam': 8, 'min_active_states': 30, 'max_active_states': 10000, 'use_double_scores': True, 'env_info': {'k2-version': '1.14', 'k2-build-type': 'Release', 'k2-with-cuda': True, 'k2-git-sha1': '6833270cb228aba7bf9681fccd41e2b52f7d984c', 'k2-git-date': 'Wed Mar 16 11:16:05 2022', 'lhotse-version': '1.0.0.dev+git.d917411.clean', 'torch-cuda-available': True, 'torch-cuda-version': '11.1', 'python-version': '3.7', 'icefall-git-branch': 'gigaspeech_recipe', 'icefall-git-sha1': 'c3993a5-dirty', 'icefall-git-date': 'Mon Mar 21 13:49:39 2022', 'icefall-path': '/userhome/user/guanbo/icefall_decode', 'k2-path': '/opt/conda/lib/python3.7/site-packages/k2-1.14.dev20220408+cuda11.1.torch1.10.0-py3.7-linux-x86_64.egg/k2/__init__.py', 'lhotse-path': '/userhome/user/guanbo/lhotse/lhotse/__init__.py', 'hostname': 'c8861f400b70d011ec0a3ee069db84328338-chenx8564-0', 'IP address': '10.9.150.55'}, 'epoch': 18, 'avg': 6, 'method': 'attention-decoder', 'num_paths': 1000, 'nbest_scale': 0.5, 'exp_dir': PosixPath('conformer_ctc/exp_500_8_2'), 'lang_dir': PosixPath('data/lang_bpe_500'), 'lm_dir': PosixPath('data/lm'), 'manifest_dir': PosixPath('data/fbank'), 'max_duration': 20, 'bucketing_sampler': True, 'num_buckets': 30, 'concatenate_cuts': False, 'duration_factor': 1.0, 'gap': 1.0, 'on_the_fly_feats': False, 'shuffle': True, 'return_cuts': True, 'num_workers': 1, 'enable_spec_aug': True, 'spec_aug_time_warp_factor': 80, 'enable_musan': True, 'subset': 'XL', 'lazy_load': True, 'small_dev': False}
3
+ 2022-04-09 01:40:42,371 INFO [lexicon.py:176] Loading pre-compiled data/lang_bpe_500/Linv.pt
4
+ 2022-04-09 01:40:42,473 INFO [decode_test.py:594] device: cuda:0
5
+ 2022-04-09 01:40:46,249 INFO [decode_test.py:656] Loading pre-compiled G_4_gram.pt
6
+ 2022-04-09 01:40:47,406 INFO [decode_test.py:692] averaging ['conformer_ctc/exp_500_8_2/epoch-13.pt', 'conformer_ctc/exp_500_8_2/epoch-14.pt', 'conformer_ctc/exp_500_8_2/epoch-15.pt', 'conformer_ctc/exp_500_8_2/epoch-16.pt', 'conformer_ctc/exp_500_8_2/epoch-17.pt', 'conformer_ctc/exp_500_8_2/epoch-18.pt']
7
+ 2022-04-09 01:40:53,065 INFO [decode_test.py:699] Number of model parameters: 109226120
8
+ 2022-04-09 01:40:53,065 INFO [asr_datamodule.py:381] About to get test cuts
9
+ 2022-04-09 01:40:56,361 INFO [decode_test.py:497] batch 0/?, cuts processed until now is 3
10
+ 2022-04-09 01:41:24,462 INFO [decode.py:736] Caught exception:
11
+ CUDA out of memory. Tried to allocate 5.93 GiB (GPU 0; 31.75 GiB total capacity; 27.23 GiB already allocated; 1.90 GiB free; 28.49 GiB reserved in total by PyTorch) If reserved memory is >> allocated memory try setting max_split_size_mb to avoid fragmentation. See documentation for Memory Management and PYTORCH_CUDA_ALLOC_CONF
12
+
13
+ 2022-04-09 01:41:24,462 INFO [decode.py:743] num_arcs before pruning: 324363
14
+ 2022-04-09 01:41:24,462 INFO [decode.py:746] This OOM is not an error. You can ignore it. If your model does not converge well, or --max-duration is too large, or the input sound file is difficult to decode, you will meet this exception.
15
+ 2022-04-09 01:41:24,473 INFO [decode.py:757] num_arcs after pruning: 7174
16
+ 2022-04-09 01:41:40,284 INFO [decode.py:736] Caught exception:
17
+ CUDA out of memory. Tried to allocate 4.67 GiB (GPU 0; 31.75 GiB total capacity; 25.69 GiB already allocated; 2.92 GiB free; 27.47 GiB reserved in total by PyTorch) If reserved memory is >> allocated memory try setting max_split_size_mb to avoid fragmentation. See documentation for Memory Management and PYTORCH_CUDA_ALLOC_CONF
18
+
19
+ 2022-04-09 01:41:40,285 INFO [decode.py:743] num_arcs before pruning: 368362
20
+ 2022-04-09 01:41:40,285 INFO [decode.py:746] This OOM is not an error. You can ignore it. If your model does not converge well, or --max-duration is too large, or the input sound file is difficult to decode, you will meet this exception.
21
+ 2022-04-09 01:41:40,305 INFO [decode.py:757] num_arcs after pruning: 8521
22
+ 2022-04-09 01:42:38,727 INFO [decode.py:736] Caught exception:
23
+ CUDA out of memory. Tried to allocate 2.18 GiB (GPU 0; 31.75 GiB total capacity; 26.05 GiB already allocated; 1.42 GiB free; 28.98 GiB reserved in total by PyTorch) If reserved memory is >> allocated memory try setting max_split_size_mb to avoid fragmentation. See documentation for Memory Management and PYTORCH_CUDA_ALLOC_CONF
24
+
25
+ 2022-04-09 01:42:38,727 INFO [decode.py:743] num_arcs before pruning: 432616
26
+ 2022-04-09 01:42:38,728 INFO [decode.py:746] This OOM is not an error. You can ignore it. If your model does not converge well, or --max-duration is too large, or the input sound file is difficult to decode, you will meet this exception.
27
+ 2022-04-09 01:42:38,736 INFO [decode.py:757] num_arcs after pruning: 9233
28
+ 2022-04-09 01:43:13,573 INFO [decode_test.py:497] batch 100/?, cuts processed until now is 297
29
+ 2022-04-09 01:43:48,362 INFO [decode.py:736] Caught exception:
30
+ CUDA out of memory. Tried to allocate 8.00 GiB (GPU 0; 31.75 GiB total capacity; 25.34 GiB already allocated; 2.20 GiB free; 28.20 GiB reserved in total by PyTorch) If reserved memory is >> allocated memory try setting max_split_size_mb to avoid fragmentation. See documentation for Memory Management and PYTORCH_CUDA_ALLOC_CONF
31
+
32
+ 2022-04-09 01:43:48,363 INFO [decode.py:743] num_arcs before pruning: 319907
33
+ 2022-04-09 01:43:48,363 INFO [decode.py:746] This OOM is not an error. You can ignore it. If your model does not converge well, or --max-duration is too large, or the input sound file is difficult to decode, you will meet this exception.
34
+ 2022-04-09 01:43:48,372 INFO [decode.py:757] num_arcs after pruning: 6358
35
+ 2022-04-09 01:43:59,713 INFO [decode.py:736] Caught exception:
36
+ CUDA out of memory. Tried to allocate 2.74 GiB (GPU 0; 31.75 GiB total capacity; 27.51 GiB already allocated; 2.19 GiB free; 28.20 GiB reserved in total by PyTorch) If reserved memory is >> allocated memory try setting max_split_size_mb to avoid fragmentation. See documentation for Memory Management and PYTORCH_CUDA_ALLOC_CONF
37
+
38
+ 2022-04-09 01:43:59,713 INFO [decode.py:743] num_arcs before pruning: 313596
39
+ 2022-04-09 01:43:59,713 INFO [decode.py:746] This OOM is not an error. You can ignore it. If your model does not converge well, or --max-duration is too large, or the input sound file is difficult to decode, you will meet this exception.
40
+ 2022-04-09 01:43:59,724 INFO [decode.py:757] num_arcs after pruning: 8252
41
+ 2022-04-09 01:44:54,463 INFO [decode.py:736] Caught exception:
42
+ CUDA out of memory. Tried to allocate 8.00 GiB (GPU 0; 31.75 GiB total capacity; 25.25 GiB already allocated; 3.07 GiB free; 27.32 GiB reserved in total by PyTorch) If reserved memory is >> allocated memory try setting max_split_size_mb to avoid fragmentation. See documentation for Memory Management and PYTORCH_CUDA_ALLOC_CONF
43
+
44
+ 2022-04-09 01:44:54,463 INFO [decode.py:743] num_arcs before pruning: 353355
45
+ 2022-04-09 01:44:54,463 INFO [decode.py:746] This OOM is not an error. You can ignore it. If your model does not converge well, or --max-duration is too large, or the input sound file is difficult to decode, you will meet this exception.
46
+ 2022-04-09 01:44:54,485 INFO [decode.py:757] num_arcs after pruning: 7520
47
+ 2022-04-09 01:45:20,716 INFO [decode_test.py:497] batch 200/?, cuts processed until now is 570
48
+ 2022-04-09 01:47:19,457 INFO [decode_test.py:497] batch 300/?, cuts processed until now is 806
49
+ 2022-04-09 01:47:38,292 INFO [decode.py:736] Caught exception:
50
+ CUDA out of memory. Tried to allocate 2.28 GiB (GPU 0; 31.75 GiB total capacity; 26.28 GiB already allocated; 1.48 GiB free; 28.92 GiB reserved in total by PyTorch) If reserved memory is >> allocated memory try setting max_split_size_mb to avoid fragmentation. See documentation for Memory Management and PYTORCH_CUDA_ALLOC_CONF
51
+
52
+ 2022-04-09 01:47:38,293 INFO [decode.py:743] num_arcs before pruning: 596002
53
+ 2022-04-09 01:47:38,293 INFO [decode.py:746] This OOM is not an error. You can ignore it. If your model does not converge well, or --max-duration is too large, or the input sound file is difficult to decode, you will meet this exception.
54
+ 2022-04-09 01:47:38,312 INFO [decode.py:757] num_arcs after pruning: 10745
55
+ 2022-04-09 01:49:18,493 INFO [decode.py:736] Caught exception:
56
+
57
+ Some bad things happened. Please read the above error messages and stack
58
+ trace. If you are using Python, the following command may be helpful:
59
+
60
+ gdb --args python /path/to/your/code.py
61
+
62
+ (You can use `gdb` to debug the code. Please consider compiling
63
+ a debug version of k2.).
64
+
65
+ If you are unable to fix it, please open an issue at:
66
+
67
+ https://github.com/k2-fsa/k2/issues/new
68
+
69
+
70
+ 2022-04-09 01:49:18,494 INFO [decode.py:743] num_arcs before pruning: 398202
71
+ 2022-04-09 01:49:18,494 INFO [decode.py:746] This OOM is not an error. You can ignore it. If your model does not converge well, or --max-duration is too large, or the input sound file is difficult to decode, you will meet this exception.
72
+ 2022-04-09 01:49:18,541 INFO [decode.py:757] num_arcs after pruning: 14003
73
+ 2022-04-09 01:49:21,800 INFO [decode_test.py:497] batch 400/?, cuts processed until now is 1082
74
+ 2022-04-09 01:50:58,700 INFO [decode.py:736] Caught exception:
75
+ CUDA out of memory. Tried to allocate 4.85 GiB (GPU 0; 31.75 GiB total capacity; 25.89 GiB already allocated; 1.48 GiB free; 28.92 GiB reserved in total by PyTorch) If reserved memory is >> allocated memory try setting max_split_size_mb to avoid fragmentation. See documentation for Memory Management and PYTORCH_CUDA_ALLOC_CONF
76
+
77
+ 2022-04-09 01:50:58,701 INFO [decode.py:743] num_arcs before pruning: 398349
78
+ 2022-04-09 01:50:58,701 INFO [decode.py:746] This OOM is not an error. You can ignore it. If your model does not converge well, or --max-duration is too large, or the input sound file is difficult to decode, you will meet this exception.
79
+ 2022-04-09 01:50:58,709 INFO [decode.py:757] num_arcs after pruning: 10321
80
+ 2022-04-09 01:51:31,627 INFO [decode_test.py:497] batch 500/?, cuts processed until now is 1334
81
+ 2022-04-09 01:52:05,232 INFO [decode.py:736] Caught exception:
82
+ CUDA out of memory. Tried to allocate 8.00 GiB (GPU 0; 31.75 GiB total capacity; 19.62 GiB already allocated; 1.47 GiB free; 28.93 GiB reserved in total by PyTorch) If reserved memory is >> allocated memory try setting max_split_size_mb to avoid fragmentation. See documentation for Memory Management and PYTORCH_CUDA_ALLOC_CONF
83
+
84
+ 2022-04-09 01:52:05,232 INFO [decode.py:743] num_arcs before pruning: 212665
85
+ 2022-04-09 01:52:05,232 INFO [decode.py:746] This OOM is not an error. You can ignore it. If your model does not converge well, or --max-duration is too large, or the input sound file is difficult to decode, you will meet this exception.
86
+ 2022-04-09 01:52:05,241 INFO [decode.py:757] num_arcs after pruning: 6301
87
+ 2022-04-09 01:53:29,890 INFO [decode.py:736] Caught exception:
88
+ CUDA out of memory. Tried to allocate 1.91 GiB (GPU 0; 31.75 GiB total capacity; 25.66 GiB already allocated; 1.48 GiB free; 28.92 GiB reserved in total by PyTorch) If reserved memory is >> allocated memory try setting max_split_size_mb to avoid fragmentation. See documentation for Memory Management and PYTORCH_CUDA_ALLOC_CONF
89
+
90
+ 2022-04-09 01:53:29,891 INFO [decode.py:743] num_arcs before pruning: 883555
91
+ 2022-04-09 01:53:29,891 INFO [decode.py:746] This OOM is not an error. You can ignore it. If your model does not converge well, or --max-duration is too large, or the input sound file is difficult to decode, you will meet this exception.
92
+ 2022-04-09 01:53:29,905 INFO [decode.py:757] num_arcs after pruning: 14819
93
+ 2022-04-09 01:53:38,676 INFO [decode_test.py:497] batch 600/?, cuts processed until now is 1651
94
+ 2022-04-09 01:54:57,438 INFO [decode.py:736] Caught exception:
95
+ CUDA out of memory. Tried to allocate 8.00 GiB (GPU 0; 31.75 GiB total capacity; 25.34 GiB already allocated; 1.48 GiB free; 28.92 GiB reserved in total by PyTorch) If reserved memory is >> allocated memory try setting max_split_size_mb to avoid fragmentation. See documentation for Memory Management and PYTORCH_CUDA_ALLOC_CONF
96
+
97
+ 2022-04-09 01:54:57,438 INFO [decode.py:743] num_arcs before pruning: 515795
98
+ 2022-04-09 01:54:57,438 INFO [decode.py:746] This OOM is not an error. You can ignore it. If your model does not converge well, or --max-duration is too large, or the input sound file is difficult to decode, you will meet this exception.
99
+ 2022-04-09 01:54:57,447 INFO [decode.py:757] num_arcs after pruning: 10132
100
+ 2022-04-09 01:55:28,356 INFO [decode.py:736] Caught exception:
101
+ CUDA out of memory. Tried to allocate 8.00 GiB (GPU 0; 31.75 GiB total capacity; 19.46 GiB already allocated; 1.48 GiB free; 28.92 GiB reserved in total by PyTorch) If reserved memory is >> allocated memory try setting max_split_size_mb to avoid fragmentation. See documentation for Memory Management and PYTORCH_CUDA_ALLOC_CONF
102
+
103
+ 2022-04-09 01:55:28,356 INFO [decode.py:743] num_arcs before pruning: 670748
104
+ 2022-04-09 01:55:28,356 INFO [decode.py:746] This OOM is not an error. You can ignore it. If your model does not converge well, or --max-duration is too large, or the input sound file is difficult to decode, you will meet this exception.
105
+ 2022-04-09 01:55:28,365 INFO [decode.py:757] num_arcs after pruning: 10497
106
+ 2022-04-09 01:55:42,238 INFO [decode_test.py:497] batch 700/?, cuts processed until now is 1956
107
+ 2022-04-09 01:57:57,456 INFO [decode_test.py:497] batch 800/?, cuts processed until now is 2238
108
+ 2022-04-09 01:58:04,281 INFO [decode.py:736] Caught exception:
109
+ CUDA out of memory. Tried to allocate 8.00 GiB (GPU 0; 31.75 GiB total capacity; 19.45 GiB already allocated; 3.07 GiB free; 27.33 GiB reserved in total by PyTorch) If reserved memory is >> allocated memory try setting max_split_size_mb to avoid fragmentation. See documentation for Memory Management and PYTORCH_CUDA_ALLOC_CONF
110
+
111
+ 2022-04-09 01:58:04,282 INFO [decode.py:743] num_arcs before pruning: 175423
112
+ 2022-04-09 01:58:04,282 INFO [decode.py:746] This OOM is not an error. You can ignore it. If your model does not converge well, or --max-duration is too large, or the input sound file is difficult to decode, you will meet this exception.
113
+ 2022-04-09 01:58:04,296 INFO [decode.py:757] num_arcs after pruning: 7926
114
+ 2022-04-09 01:59:07,916 INFO [decode.py:736] Caught exception:
115
+ CUDA out of memory. Tried to allocate 4.68 GiB (GPU 0; 31.75 GiB total capacity; 24.40 GiB already allocated; 3.06 GiB free; 27.33 GiB reserved in total by PyTorch) If reserved memory is >> allocated memory try setting max_split_size_mb to avoid fragmentation. See documentation for Memory Management and PYTORCH_CUDA_ALLOC_CONF
116
+
117
+ 2022-04-09 01:59:07,917 INFO [decode.py:743] num_arcs before pruning: 259758
118
+ 2022-04-09 01:59:07,917 INFO [decode.py:746] This OOM is not an error. You can ignore it. If your model does not converge well, or --max-duration is too large, or the input sound file is difficult to decode, you will meet this exception.
119
+ 2022-04-09 01:59:07,928 INFO [decode.py:757] num_arcs after pruning: 6026
120
+ 2022-04-09 02:00:00,623 INFO [decode_test.py:497] batch 900/?, cuts processed until now is 2536
121
+ 2022-04-09 02:01:22,959 INFO [decode.py:736] Caught exception:
122
+ CUDA out of memory. Tried to allocate 8.00 GiB (GPU 0; 31.75 GiB total capacity; 19.44 GiB already allocated; 3.08 GiB free; 27.32 GiB reserved in total by PyTorch) If reserved memory is >> allocated memory try setting max_split_size_mb to avoid fragmentation. See documentation for Memory Management and PYTORCH_CUDA_ALLOC_CONF
123
+
124
+ 2022-04-09 02:01:22,959 INFO [decode.py:743] num_arcs before pruning: 749228
125
+ 2022-04-09 02:01:22,959 INFO [decode.py:746] This OOM is not an error. You can ignore it. If your model does not converge well, or --max-duration is too large, or the input sound file is difficult to decode, you will meet this exception.
126
+ 2022-04-09 02:01:22,968 INFO [decode.py:757] num_arcs after pruning: 23868
127
+ 2022-04-09 02:01:59,449 INFO [decode_test.py:497] batch 1000/?, cuts processed until now is 2824
128
+ 2022-04-09 02:03:05,494 INFO [decode.py:736] Caught exception:
129
+ CUDA out of memory. Tried to allocate 8.00 GiB (GPU 0; 31.75 GiB total capacity; 19.38 GiB already allocated; 3.06 GiB free; 27.33 GiB reserved in total by PyTorch) If reserved memory is >> allocated memory try setting max_split_size_mb to avoid fragmentation. See documentation for Memory Management and PYTORCH_CUDA_ALLOC_CONF
130
+
131
+ 2022-04-09 02:03:05,494 INFO [decode.py:743] num_arcs before pruning: 255135
132
+ 2022-04-09 02:03:05,494 INFO [decode.py:746] This OOM is not an error. You can ignore it. If your model does not converge well, or --max-duration is too large, or the input sound file is difficult to decode, you will meet this exception.
133
+ 2022-04-09 02:03:05,504 INFO [decode.py:757] num_arcs after pruning: 5955
134
+ 2022-04-09 02:03:48,017 INFO [decode.py:736] Caught exception:
135
+ CUDA out of memory. Tried to allocate 8.00 GiB (GPU 0; 31.75 GiB total capacity; 19.61 GiB already allocated; 3.08 GiB free; 27.32 GiB reserved in total by PyTorch) If reserved memory is >> allocated memory try setting max_split_size_mb to avoid fragmentation. See documentation for Memory Management and PYTORCH_CUDA_ALLOC_CONF
136
+
137
+ 2022-04-09 02:03:48,017 INFO [decode.py:743] num_arcs before pruning: 517077
138
+ 2022-04-09 02:03:48,017 INFO [decode.py:746] This OOM is not an error. You can ignore it. If your model does not converge well, or --max-duration is too large, or the input sound file is difficult to decode, you will meet this exception.
139
+ 2022-04-09 02:03:48,026 INFO [decode.py:757] num_arcs after pruning: 7695
140
+ 2022-04-09 02:04:09,806 INFO [decode_test.py:497] batch 1100/?, cuts processed until now is 3105
141
+ 2022-04-09 02:04:31,410 INFO [decode.py:736] Caught exception:
142
+ CUDA out of memory. Tried to allocate 8.00 GiB (GPU 0; 31.75 GiB total capacity; 19.34 GiB already allocated; 3.08 GiB free; 27.32 GiB reserved in total by PyTorch) If reserved memory is >> allocated memory try setting max_split_size_mb to avoid fragmentation. See documentation for Memory Management and PYTORCH_CUDA_ALLOC_CONF
143
+
144
+ 2022-04-09 02:04:31,411 INFO [decode.py:743] num_arcs before pruning: 859561
145
+ 2022-04-09 02:04:31,411 INFO [decode.py:746] This OOM is not an error. You can ignore it. If your model does not converge well, or --max-duration is too large, or the input sound file is difficult to decode, you will meet this exception.
146
+ 2022-04-09 02:04:31,422 INFO [decode.py:757] num_arcs after pruning: 13014
147
+ 2022-04-09 02:06:11,496 INFO [decode_test.py:497] batch 1200/?, cuts processed until now is 3401
148
+ 2022-04-09 02:08:10,727 INFO [decode_test.py:497] batch 1300/?, cuts processed until now is 3730
149
+ 2022-04-09 02:10:17,677 INFO [decode_test.py:497] batch 1400/?, cuts processed until now is 4067
150
+ 2022-04-09 02:12:13,175 INFO [decode_test.py:497] batch 1500/?, cuts processed until now is 4329
151
+ 2022-04-09 02:13:02,842 INFO [decode.py:736] Caught exception:
152
+ CUDA out of memory. Tried to allocate 8.00 GiB (GPU 0; 31.75 GiB total capacity; 19.55 GiB already allocated; 3.08 GiB free; 27.32 GiB reserved in total by PyTorch) If reserved memory is >> allocated memory try setting max_split_size_mb to avoid fragmentation. See documentation for Memory Management and PYTORCH_CUDA_ALLOC_CONF
153
+
154
+ 2022-04-09 02:13:02,843 INFO [decode.py:743] num_arcs before pruning: 475511
155
+ 2022-04-09 02:13:02,843 INFO [decode.py:746] This OOM is not an error. You can ignore it. If your model does not converge well, or --max-duration is too large, or the input sound file is difficult to decode, you will meet this exception.
156
+ 2022-04-09 02:13:02,849 INFO [decode.py:757] num_arcs after pruning: 8439
157
+ 2022-04-09 02:13:46,588 INFO [decode.py:736] Caught exception:
158
+ CUDA out of memory. Tried to allocate 2.37 GiB (GPU 0; 31.75 GiB total capacity; 26.83 GiB already allocated; 1.45 GiB free; 28.94 GiB reserved in total by PyTorch) If reserved memory is >> allocated memory try setting max_split_size_mb to avoid fragmentation. See documentation for Memory Management and PYTORCH_CUDA_ALLOC_CONF
159
+
160
+ 2022-04-09 02:13:46,588 INFO [decode.py:743] num_arcs before pruning: 595488
161
+ 2022-04-09 02:13:46,588 INFO [decode.py:746] This OOM is not an error. You can ignore it. If your model does not converge well, or --max-duration is too large, or the input sound file is difficult to decode, you will meet this exception.
162
+ 2022-04-09 02:13:46,598 INFO [decode.py:757] num_arcs after pruning: 13475
163
+ 2022-04-09 02:14:21,206 INFO [decode_test.py:497] batch 1600/?, cuts processed until now is 4598
164
+ 2022-04-09 02:16:42,740 INFO [decode_test.py:497] batch 1700/?, cuts processed until now is 4969
165
+ 2022-04-09 02:17:13,672 INFO [decode.py:736] Caught exception:
166
+ CUDA out of memory. Tried to allocate 8.00 GiB (GPU 0; 31.75 GiB total capacity; 25.39 GiB already allocated; 1.45 GiB free; 28.94 GiB reserved in total by PyTorch) If reserved memory is >> allocated memory try setting max_split_size_mb to avoid fragmentation. See documentation for Memory Management and PYTORCH_CUDA_ALLOC_CONF
167
+
168
+ 2022-04-09 02:17:13,673 INFO [decode.py:743] num_arcs before pruning: 615734
169
+ 2022-04-09 02:17:13,673 INFO [decode.py:746] This OOM is not an error. You can ignore it. If your model does not converge well, or --max-duration is too large, or the input sound file is difficult to decode, you will meet this exception.
170
+ 2022-04-09 02:17:13,685 INFO [decode.py:757] num_arcs after pruning: 8684
171
+ 2022-04-09 02:18:54,514 INFO [decode_test.py:497] batch 1800/?, cuts processed until now is 5260
172
+ 2022-04-09 02:18:59,938 INFO [decode.py:736] Caught exception:
173
+ CUDA out of memory. Tried to allocate 8.00 GiB (GPU 0; 31.75 GiB total capacity; 19.36 GiB already allocated; 3.08 GiB free; 27.32 GiB reserved in total by PyTorch) If reserved memory is >> allocated memory try setting max_split_size_mb to avoid fragmentation. See documentation for Memory Management and PYTORCH_CUDA_ALLOC_CONF
174
+
175
+ 2022-04-09 02:18:59,938 INFO [decode.py:743] num_arcs before pruning: 360099
176
+ 2022-04-09 02:18:59,938 INFO [decode.py:746] This OOM is not an error. You can ignore it. If your model does not converge well, or --max-duration is too large, or the input sound file is difficult to decode, you will meet this exception.
177
+ 2022-04-09 02:18:59,949 INFO [decode.py:757] num_arcs after pruning: 6898
178
+ 2022-04-09 02:19:48,186 INFO [decode.py:736] Caught exception:
179
+ CUDA out of memory. Tried to allocate 6.00 GiB (GPU 0; 31.75 GiB total capacity; 27.15 GiB already allocated; 967.75 MiB free; 29.45 GiB reserved in total by PyTorch) If reserved memory is >> allocated memory try setting max_split_size_mb to avoid fragmentation. See documentation for Memory Management and PYTORCH_CUDA_ALLOC_CONF
180
+
181
+ 2022-04-09 02:19:48,186 INFO [decode.py:743] num_arcs before pruning: 168720
182
+ 2022-04-09 02:19:48,186 INFO [decode.py:746] This OOM is not an error. You can ignore it. If your model does not converge well, or --max-duration is too large, or the input sound file is difficult to decode, you will meet this exception.
183
+ 2022-04-09 02:19:48,201 INFO [decode.py:757] num_arcs after pruning: 5346
184
+ 2022-04-09 02:20:52,049 INFO [decode_test.py:497] batch 1900/?, cuts processed until now is 5585
185
+ 2022-04-09 02:22:12,107 INFO [decode.py:736] Caught exception:
186
+ CUDA out of memory. Tried to allocate 8.00 GiB (GPU 0; 31.75 GiB total capacity; 19.45 GiB already allocated; 973.75 MiB free; 29.44 GiB reserved in total by PyTorch) If reserved memory is >> allocated memory try setting max_split_size_mb to avoid fragmentation. See documentation for Memory Management and PYTORCH_CUDA_ALLOC_CONF
187
+
188
+ 2022-04-09 02:22:12,107 INFO [decode.py:743] num_arcs before pruning: 1151735
189
+ 2022-04-09 02:22:12,107 INFO [decode.py:746] This OOM is not an error. You can ignore it. If your model does not converge well, or --max-duration is too large, or the input sound file is difficult to decode, you will meet this exception.
190
+ 2022-04-09 02:22:12,120 INFO [decode.py:757] num_arcs after pruning: 8335
191
+ 2022-04-09 02:23:01,497 INFO [decode_test.py:497] batch 2000/?, cuts processed until now is 5902
192
+ 2022-04-09 02:25:26,356 INFO [decode_test.py:497] batch 2100/?, cuts processed until now is 6219
193
+ 2022-04-09 02:25:56,466 INFO [decode.py:736] Caught exception:
194
+ CUDA out of memory. Tried to allocate 8.00 GiB (GPU 0; 31.75 GiB total capacity; 19.34 GiB already allocated; 973.75 MiB free; 29.44 GiB reserved in total by PyTorch) If reserved memory is >> allocated memory try setting max_split_size_mb to avoid fragmentation. See documentation for Memory Management and PYTORCH_CUDA_ALLOC_CONF
195
+
196
+ 2022-04-09 02:25:56,467 INFO [decode.py:743] num_arcs before pruning: 612804
197
+ 2022-04-09 02:25:56,467 INFO [decode.py:746] This OOM is not an error. You can ignore it. If your model does not converge well, or --max-duration is too large, or the input sound file is difficult to decode, you will meet this exception.
198
+ 2022-04-09 02:25:56,477 INFO [decode.py:757] num_arcs after pruning: 10853
199
+ 2022-04-09 02:27:26,441 INFO [decode_test.py:497] batch 2200/?, cuts processed until now is 6480
200
+ 2022-04-09 02:29:28,073 INFO [decode_test.py:497] batch 2300/?, cuts processed until now is 6768
201
+ 2022-04-09 02:31:41,553 INFO [decode_test.py:497] batch 2400/?, cuts processed until now is 7120
202
+ 2022-04-09 02:31:55,632 INFO [decode.py:736] Caught exception:
203
+ CUDA out of memory. Tried to allocate 8.00 GiB (GPU 0; 31.75 GiB total capacity; 19.42 GiB already allocated; 3.07 GiB free; 27.32 GiB reserved in total by PyTorch) If reserved memory is >> allocated memory try setting max_split_size_mb to avoid fragmentation. See documentation for Memory Management and PYTORCH_CUDA_ALLOC_CONF
204
+
205
+ 2022-04-09 02:31:55,632 INFO [decode.py:743] num_arcs before pruning: 411490
206
+ 2022-04-09 02:31:55,632 INFO [decode.py:746] This OOM is not an error. You can ignore it. If your model does not converge well, or --max-duration is too large, or the input sound file is difficult to decode, you will meet this exception.
207
+ 2022-04-09 02:31:55,638 INFO [decode.py:757] num_arcs after pruning: 8626
208
+ 2022-04-09 02:33:22,034 INFO [decode.py:736] Caught exception:
209
+ CUDA out of memory. Tried to allocate 8.00 GiB (GPU 0; 31.75 GiB total capacity; 19.42 GiB already allocated; 3.07 GiB free; 27.32 GiB reserved in total by PyTorch) If reserved memory is >> allocated memory try setting max_split_size_mb to avoid fragmentation. See documentation for Memory Management and PYTORCH_CUDA_ALLOC_CONF
210
+
211
+ 2022-04-09 02:33:22,034 INFO [decode.py:743] num_arcs before pruning: 625728
212
+ 2022-04-09 02:33:22,035 INFO [decode.py:746] This OOM is not an error. You can ignore it. If your model does not converge well, or --max-duration is too large, or the input sound file is difficult to decode, you will meet this exception.
213
+ 2022-04-09 02:33:22,043 INFO [decode.py:757] num_arcs after pruning: 9502
214
+ 2022-04-09 02:33:37,663 INFO [decode_test.py:497] batch 2500/?, cuts processed until now is 7387
215
+ 2022-04-09 02:34:18,300 INFO [decode.py:736] Caught exception:
216
+ CUDA out of memory. Tried to allocate 8.00 GiB (GPU 0; 31.75 GiB total capacity; 19.51 GiB already allocated; 3.07 GiB free; 27.32 GiB reserved in total by PyTorch) If reserved memory is >> allocated memory try setting max_split_size_mb to avoid fragmentation. See documentation for Memory Management and PYTORCH_CUDA_ALLOC_CONF
217
+
218
+ 2022-04-09 02:34:18,301 INFO [decode.py:743] num_arcs before pruning: 1015956
219
+ 2022-04-09 02:34:18,301 INFO [decode.py:746] This OOM is not an error. You can ignore it. If your model does not converge well, or --max-duration is too large, or the input sound file is difficult to decode, you will meet this exception.
220
+ 2022-04-09 02:34:18,314 INFO [decode.py:757] num_arcs after pruning: 14404
221
+ 2022-04-09 02:34:20,220 INFO [decode.py:841] Caught exception:
222
+ CUDA out of memory. Tried to allocate 5.58 GiB (GPU 0; 31.75 GiB total capacity; 24.87 GiB already allocated; 3.07 GiB free; 27.32 GiB reserved in total by PyTorch) If reserved memory is >> allocated memory try setting max_split_size_mb to avoid fragmentation. See documentation for Memory Management and PYTORCH_CUDA_ALLOC_CONF
223
+
224
+ 2022-04-09 02:34:20,221 INFO [decode.py:843] num_paths before decreasing: 1000
225
+ 2022-04-09 02:34:20,221 INFO [decode.py:852] This OOM is not an error. You can ignore it. If your model does not converge well, or --max-duration is too large, or the input sound file is difficult to decode, you will meet this exception.
226
+ 2022-04-09 02:34:20,221 INFO [decode.py:858] num_paths after decreasing: 500
227
+ 2022-04-09 02:34:40,089 INFO [decode.py:736] Caught exception:
228
+ CUDA out of memory. Tried to allocate 8.00 GiB (GPU 0; 31.75 GiB total capacity; 19.38 GiB already allocated; 3.07 GiB free; 27.32 GiB reserved in total by PyTorch) If reserved memory is >> allocated memory try setting max_split_size_mb to avoid fragmentation. See documentation for Memory Management and PYTORCH_CUDA_ALLOC_CONF
229
+
230
+ 2022-04-09 02:34:40,089 INFO [decode.py:743] num_arcs before pruning: 570686
231
+ 2022-04-09 02:34:40,089 INFO [decode.py:746] This OOM is not an error. You can ignore it. If your model does not converge well, or --max-duration is too large, or the input sound file is difficult to decode, you will meet this exception.
232
+ 2022-04-09 02:34:40,098 INFO [decode.py:757] num_arcs after pruning: 9182
233
+ 2022-04-09 02:35:50,624 INFO [decode_test.py:497] batch 2600/?, cuts processed until now is 7764
234
+ 2022-04-09 02:36:44,519 INFO [decode.py:736] Caught exception:
235
+ CUDA out of memory. Tried to allocate 8.00 GiB (GPU 0; 31.75 GiB total capacity; 19.61 GiB already allocated; 3.08 GiB free; 27.32 GiB reserved in total by PyTorch) If reserved memory is >> allocated memory try setting max_split_size_mb to avoid fragmentation. See documentation for Memory Management and PYTORCH_CUDA_ALLOC_CONF
236
+
237
+ 2022-04-09 02:36:44,519 INFO [decode.py:743] num_arcs before pruning: 1066267
238
+ 2022-04-09 02:36:44,519 INFO [decode.py:746] This OOM is not an error. You can ignore it. If your model does not converge well, or --max-duration is too large, or the input sound file is difficult to decode, you will meet this exception.
239
+ 2022-04-09 02:36:44,530 INFO [decode.py:757] num_arcs after pruning: 6963
240
+ 2022-04-09 02:38:18,717 INFO [decode_test.py:497] batch 2700/?, cuts processed until now is 8078
241
+ 2022-04-09 02:40:07,021 INFO [decode.py:736] Caught exception:
242
+ CUDA out of memory. Tried to allocate 8.00 GiB (GPU 0; 31.75 GiB total capacity; 19.42 GiB already allocated; 3.07 GiB free; 27.32 GiB reserved in total by PyTorch) If reserved memory is >> allocated memory try setting max_split_size_mb to avoid fragmentation. See documentation for Memory Management and PYTORCH_CUDA_ALLOC_CONF
243
+
244
+ 2022-04-09 02:40:07,022 INFO [decode.py:743] num_arcs before pruning: 1023667
245
+ 2022-04-09 02:40:07,022 INFO [decode.py:746] This OOM is not an error. You can ignore it. If your model does not converge well, or --max-duration is too large, or the input sound file is difficult to decode, you will meet this exception.
246
+ 2022-04-09 02:40:07,034 INFO [decode.py:757] num_arcs after pruning: 13090
247
+ 2022-04-09 02:40:25,184 INFO [decode_test.py:497] batch 2800/?, cuts processed until now is 8444
248
+ 2022-04-09 02:41:27,080 INFO [decode.py:736] Caught exception:
249
+ CUDA out of memory. Tried to allocate 8.00 GiB (GPU 0; 31.75 GiB total capacity; 19.32 GiB already allocated; 3.08 GiB free; 27.32 GiB reserved in total by PyTorch) If reserved memory is >> allocated memory try setting max_split_size_mb to avoid fragmentation. See documentation for Memory Management and PYTORCH_CUDA_ALLOC_CONF
250
+
251
+ 2022-04-09 02:41:27,080 INFO [decode.py:743] num_arcs before pruning: 739744
252
+ 2022-04-09 02:41:27,080 INFO [decode.py:746] This OOM is not an error. You can ignore it. If your model does not converge well, or --max-duration is too large, or the input sound file is difficult to decode, you will meet this exception.
253
+ 2022-04-09 02:41:27,093 INFO [decode.py:757] num_arcs after pruning: 9791
254
+ 2022-04-09 02:42:44,319 INFO [decode_test.py:497] batch 2900/?, cuts processed until now is 8765
255
+ 2022-04-09 02:42:44,656 INFO [decode.py:736] Caught exception:
256
+ CUDA out of memory. Tried to allocate 8.00 GiB (GPU 0; 31.75 GiB total capacity; 19.73 GiB already allocated; 3.08 GiB free; 27.32 GiB reserved in total by PyTorch) If reserved memory is >> allocated memory try setting max_split_size_mb to avoid fragmentation. See documentation for Memory Management and PYTORCH_CUDA_ALLOC_CONF
257
+
258
+ 2022-04-09 02:42:44,656 INFO [decode.py:743] num_arcs before pruning: 666168
259
+ 2022-04-09 02:42:44,656 INFO [decode.py:746] This OOM is not an error. You can ignore it. If your model does not converge well, or --max-duration is too large, or the input sound file is difficult to decode, you will meet this exception.
260
+ 2022-04-09 02:42:44,665 INFO [decode.py:757] num_arcs after pruning: 17223
261
+ 2022-04-09 02:43:05,748 INFO [decode.py:736] Caught exception:
262
+ CUDA out of memory. Tried to allocate 5.60 GiB (GPU 0; 31.75 GiB total capacity; 26.18 GiB already allocated; 1.14 GiB free; 29.26 GiB reserved in total by PyTorch) If reserved memory is >> allocated memory try setting max_split_size_mb to avoid fragmentation. See documentation for Memory Management and PYTORCH_CUDA_ALLOC_CONF
263
+
264
+ 2022-04-09 02:43:05,748 INFO [decode.py:743] num_arcs before pruning: 188729
265
+ 2022-04-09 02:43:05,748 INFO [decode.py:746] This OOM is not an error. You can ignore it. If your model does not converge well, or --max-duration is too large, or the input sound file is difficult to decode, you will meet this exception.
266
+ 2022-04-09 02:43:05,762 INFO [decode.py:757] num_arcs after pruning: 8688
267
+ 2022-04-09 02:44:54,469 INFO [decode_test.py:497] batch 3000/?, cuts processed until now is 9050
268
+ 2022-04-09 02:46:55,167 INFO [decode_test.py:497] batch 3100/?, cuts processed until now is 9296
269
+ 2022-04-09 02:47:28,418 INFO [decode.py:736] Caught exception:
270
+ CUDA out of memory. Tried to allocate 8.00 GiB (GPU 0; 31.75 GiB total capacity; 20.00 GiB already allocated; 3.07 GiB free; 27.33 GiB reserved in total by PyTorch) If reserved memory is >> allocated memory try setting max_split_size_mb to avoid fragmentation. See documentation for Memory Management and PYTORCH_CUDA_ALLOC_CONF
271
+
272
+ 2022-04-09 02:47:28,419 INFO [decode.py:743] num_arcs before pruning: 160153
273
+ 2022-04-09 02:47:28,419 INFO [decode.py:746] This OOM is not an error. You can ignore it. If your model does not converge well, or --max-duration is too large, or the input sound file is difficult to decode, you will meet this exception.
274
+ 2022-04-09 02:47:28,448 INFO [decode.py:757] num_arcs after pruning: 7778
275
+ 2022-04-09 02:49:21,448 INFO [decode_test.py:497] batch 3200/?, cuts processed until now is 9652
276
+ 2022-04-09 02:50:17,558 INFO [decode.py:736] Caught exception:
277
+ CUDA out of memory. Tried to allocate 6.13 GiB (GPU 0; 31.75 GiB total capacity; 27.60 GiB already allocated; 895.75 MiB free; 29.52 GiB reserved in total by PyTorch) If reserved memory is >> allocated memory try setting max_split_size_mb to avoid fragmentation. See documentation for Memory Management and PYTORCH_CUDA_ALLOC_CONF
278
+
279
+ 2022-04-09 02:50:17,558 INFO [decode.py:743] num_arcs before pruning: 388116
280
+ 2022-04-09 02:50:17,559 INFO [decode.py:746] This OOM is not an error. You can ignore it. If your model does not converge well, or --max-duration is too large, or the input sound file is difficult to decode, you will meet this exception.
281
+ 2022-04-09 02:50:17,565 INFO [decode.py:757] num_arcs after pruning: 10555
282
+ 2022-04-09 02:51:30,675 INFO [decode_test.py:497] batch 3300/?, cuts processed until now is 10071
283
+ 2022-04-09 02:53:49,565 INFO [decode_test.py:497] batch 3400/?, cuts processed until now is 10342
284
+ 2022-04-09 02:55:49,392 INFO [decode_test.py:497] batch 3500/?, cuts processed until now is 10642
285
+ 2022-04-09 02:58:07,518 INFO [decode_test.py:497] batch 3600/?, cuts processed until now is 10951
286
+ 2022-04-09 02:58:16,360 INFO [decode.py:736] Caught exception:
287
+ CUDA out of memory. Tried to allocate 8.00 GiB (GPU 0; 31.75 GiB total capacity; 19.29 GiB already allocated; 3.07 GiB free; 27.32 GiB reserved in total by PyTorch) If reserved memory is >> allocated memory try setting max_split_size_mb to avoid fragmentation. See documentation for Memory Management and PYTORCH_CUDA_ALLOC_CONF
288
+
289
+ 2022-04-09 02:58:16,361 INFO [decode.py:743] num_arcs before pruning: 396714
290
+ 2022-04-09 02:58:16,361 INFO [decode.py:746] This OOM is not an error. You can ignore it. If your model does not converge well, or --max-duration is too large, or the input sound file is difficult to decode, you will meet this exception.
291
+ 2022-04-09 02:58:16,374 INFO [decode.py:757] num_arcs after pruning: 9543
292
+ 2022-04-09 03:00:00,485 INFO [decode_test.py:497] batch 3700/?, cuts processed until now is 11231
293
+ 2022-04-09 03:00:17,600 INFO [decode.py:736] Caught exception:
294
+ CUDA out of memory. Tried to allocate 8.00 GiB (GPU 0; 31.75 GiB total capacity; 19.45 GiB already allocated; 3.07 GiB free; 27.32 GiB reserved in total by PyTorch) If reserved memory is >> allocated memory try setting max_split_size_mb to avoid fragmentation. See documentation for Memory Management and PYTORCH_CUDA_ALLOC_CONF
295
+
296
+ 2022-04-09 03:00:17,601 INFO [decode.py:743] num_arcs before pruning: 854366
297
+ 2022-04-09 03:00:17,601 INFO [decode.py:746] This OOM is not an error. You can ignore it. If your model does not converge well, or --max-duration is too large, or the input sound file is difficult to decode, you will meet this exception.
298
+ 2022-04-09 03:00:17,612 INFO [decode.py:757] num_arcs after pruning: 10487
299
+ 2022-04-09 03:00:20,098 INFO [decode.py:736] Caught exception:
300
+ CUDA out of memory. Tried to allocate 8.00 GiB (GPU 0; 31.75 GiB total capacity; 19.68 GiB already allocated; 3.07 GiB free; 27.32 GiB reserved in total by PyTorch) If reserved memory is >> allocated memory try setting max_split_size_mb to avoid fragmentation. See documentation for Memory Management and PYTORCH_CUDA_ALLOC_CONF
301
+
302
+ 2022-04-09 03:00:20,098 INFO [decode.py:743] num_arcs before pruning: 442824
303
+ 2022-04-09 03:00:20,098 INFO [decode.py:746] This OOM is not an error. You can ignore it. If your model does not converge well, or --max-duration is too large, or the input sound file is difficult to decode, you will meet this exception.
304
+ 2022-04-09 03:00:20,108 INFO [decode.py:757] num_arcs after pruning: 5265
305
+ 2022-04-09 03:02:00,114 INFO [decode_test.py:497] batch 3800/?, cuts processed until now is 11509
306
+ 2022-04-09 03:02:11,570 INFO [decode.py:736] Caught exception:
307
+ CUDA out of memory. Tried to allocate 8.00 GiB (GPU 0; 31.75 GiB total capacity; 19.19 GiB already allocated; 3.07 GiB free; 27.32 GiB reserved in total by PyTorch) If reserved memory is >> allocated memory try setting max_split_size_mb to avoid fragmentation. See documentation for Memory Management and PYTORCH_CUDA_ALLOC_CONF
308
+
309
+ 2022-04-09 03:02:11,571 INFO [decode.py:743] num_arcs before pruning: 285638
310
+ 2022-04-09 03:02:11,571 INFO [decode.py:746] This OOM is not an error. You can ignore it. If your model does not converge well, or --max-duration is too large, or the input sound file is difficult to decode, you will meet this exception.
311
+ 2022-04-09 03:02:11,579 INFO [decode.py:757] num_arcs after pruning: 5903
312
+ 2022-04-09 03:04:02,757 INFO [decode_test.py:497] batch 3900/?, cuts processed until now is 11774
313
+ 2022-04-09 03:05:19,989 INFO [decode.py:736] Caught exception:
314
+ CUDA out of memory. Tried to allocate 8.00 GiB (GPU 0; 31.75 GiB total capacity; 19.73 GiB already allocated; 3.08 GiB free; 27.32 GiB reserved in total by PyTorch) If reserved memory is >> allocated memory try setting max_split_size_mb to avoid fragmentation. See documentation for Memory Management and PYTORCH_CUDA_ALLOC_CONF
315
+
316
+ 2022-04-09 03:05:19,990 INFO [decode.py:743] num_arcs before pruning: 637327
317
+ 2022-04-09 03:05:19,990 INFO [decode.py:746] This OOM is not an error. You can ignore it. If your model does not converge well, or --max-duration is too large, or the input sound file is difficult to decode, you will meet this exception.
318
+ 2022-04-09 03:05:19,999 INFO [decode.py:757] num_arcs after pruning: 6357
319
+ 2022-04-09 03:06:01,953 INFO [decode_test.py:497] batch 4000/?, cuts processed until now is 12045
320
+ 2022-04-09 03:07:49,854 INFO [decode_test.py:497] batch 4100/?, cuts processed until now is 12300
321
+ 2022-04-09 03:09:15,137 INFO [decode.py:736] Caught exception:
322
+ CUDA out of memory. Tried to allocate 8.00 GiB (GPU 0; 31.75 GiB total capacity; 19.45 GiB already allocated; 3.08 GiB free; 27.32 GiB reserved in total by PyTorch) If reserved memory is >> allocated memory try setting max_split_size_mb to avoid fragmentation. See documentation for Memory Management and PYTORCH_CUDA_ALLOC_CONF
323
+
324
+ 2022-04-09 03:09:15,138 INFO [decode.py:743] num_arcs before pruning: 507733
325
+ 2022-04-09 03:09:15,138 INFO [decode.py:746] This OOM is not an error. You can ignore it. If your model does not converge well, or --max-duration is too large, or the input sound file is difficult to decode, you will meet this exception.
326
+ 2022-04-09 03:09:15,148 INFO [decode.py:757] num_arcs after pruning: 4196
327
+ 2022-04-09 03:09:47,397 INFO [decode.py:736] Caught exception:
328
+ CUDA out of memory. Tried to allocate 5.86 GiB (GPU 0; 31.75 GiB total capacity; 27.78 GiB already allocated; 925.75 MiB free; 29.49 GiB reserved in total by PyTorch) If reserved memory is >> allocated memory try setting max_split_size_mb to avoid fragmentation. See documentation for Memory Management and PYTORCH_CUDA_ALLOC_CONF
329
+
330
+ 2022-04-09 03:09:47,397 INFO [decode.py:743] num_arcs before pruning: 514118
331
+ 2022-04-09 03:09:47,397 INFO [decode.py:746] This OOM is not an error. You can ignore it. If your model does not converge well, or --max-duration is too large, or the input sound file is difficult to decode, you will meet this exception.
332
+ 2022-04-09 03:09:47,407 INFO [decode.py:757] num_arcs after pruning: 7168
333
+ 2022-04-09 03:10:00,013 INFO [decode_test.py:497] batch 4200/?, cuts processed until now is 12580
334
+ 2022-04-09 03:10:33,411 INFO [decode.py:736] Caught exception:
335
+ CUDA out of memory. Tried to allocate 2.80 GiB (GPU 0; 31.75 GiB total capacity; 27.70 GiB already allocated; 925.75 MiB free; 29.49 GiB reserved in total by PyTorch) If reserved memory is >> allocated memory try setting max_split_size_mb to avoid fragmentation. See documentation for Memory Management and PYTORCH_CUDA_ALLOC_CONF
336
+
337
+ 2022-04-09 03:10:33,411 INFO [decode.py:743] num_arcs before pruning: 374935
338
+ 2022-04-09 03:10:33,411 INFO [decode.py:746] This OOM is not an error. You can ignore it. If your model does not converge well, or --max-duration is too large, or the input sound file is difficult to decode, you will meet this exception.
339
+ 2022-04-09 03:10:33,418 INFO [decode.py:757] num_arcs after pruning: 10023
340
+ 2022-04-09 03:12:04,333 INFO [decode_test.py:497] batch 4300/?, cuts processed until now is 12807
341
+ 2022-04-09 03:14:06,889 INFO [decode_test.py:497] batch 4400/?, cuts processed until now is 13050
342
+ 2022-04-09 03:14:34,787 INFO [decode.py:736] Caught exception:
343
+ CUDA out of memory. Tried to allocate 8.00 GiB (GPU 0; 31.75 GiB total capacity; 19.47 GiB already allocated; 925.75 MiB free; 29.49 GiB reserved in total by PyTorch) If reserved memory is >> allocated memory try setting max_split_size_mb to avoid fragmentation. See documentation for Memory Management and PYTORCH_CUDA_ALLOC_CONF
344
+
345
+ 2022-04-09 03:14:34,788 INFO [decode.py:743] num_arcs before pruning: 767465
346
+ 2022-04-09 03:14:34,788 INFO [decode.py:746] This OOM is not an error. You can ignore it. If your model does not converge well, or --max-duration is too large, or the input sound file is difficult to decode, you will meet this exception.
347
+ 2022-04-09 03:14:34,797 INFO [decode.py:757] num_arcs after pruning: 19151
348
+ 2022-04-09 03:15:08,864 INFO [decode.py:736] Caught exception:
349
+
350
+ Some bad things happened. Please read the above error messages and stack
351
+ trace. If you are using Python, the following command may be helpful:
352
+
353
+ gdb --args python /path/to/your/code.py
354
+
355
+ (You can use `gdb` to debug the code. Please consider compiling
356
+ a debug version of k2.).
357
+
358
+ If you are unable to fix it, please open an issue at:
359
+
360
+ https://github.com/k2-fsa/k2/issues/new
361
+
362
+
363
+ 2022-04-09 03:15:08,864 INFO [decode.py:743] num_arcs before pruning: 123833
364
+ 2022-04-09 03:15:08,864 INFO [decode.py:746] This OOM is not an error. You can ignore it. If your model does not converge well, or --max-duration is too large, or the input sound file is difficult to decode, you will meet this exception.
365
+ 2022-04-09 03:15:08,913 INFO [decode.py:757] num_arcs after pruning: 4150
366
+ 2022-04-09 03:15:34,899 INFO [decode.py:736] Caught exception:
367
+ CUDA out of memory. Tried to allocate 8.00 GiB (GPU 0; 31.75 GiB total capacity; 25.64 GiB already allocated; 3.07 GiB free; 27.32 GiB reserved in total by PyTorch) If reserved memory is >> allocated memory try setting max_split_size_mb to avoid fragmentation. See documentation for Memory Management and PYTORCH_CUDA_ALLOC_CONF
368
+
369
+ 2022-04-09 03:15:34,899 INFO [decode.py:743] num_arcs before pruning: 444800
370
+ 2022-04-09 03:15:34,899 INFO [decode.py:746] This OOM is not an error. You can ignore it. If your model does not converge well, or --max-duration is too large, or the input sound file is difficult to decode, you will meet this exception.
371
+ 2022-04-09 03:15:34,908 INFO [decode.py:757] num_arcs after pruning: 11839
372
+ 2022-04-09 03:16:08,462 INFO [decode_test.py:497] batch 4500/?, cuts processed until now is 13295
373
+ 2022-04-09 03:17:56,946 INFO [decode_test.py:497] batch 4600/?, cuts processed until now is 13593
374
+ 2022-04-09 03:18:16,099 INFO [decode.py:736] Caught exception:
375
+ CUDA out of memory. Tried to allocate 5.53 GiB (GPU 0; 31.75 GiB total capacity; 26.53 GiB already allocated; 1.12 GiB free; 29.28 GiB reserved in total by PyTorch) If reserved memory is >> allocated memory try setting max_split_size_mb to avoid fragmentation. See documentation for Memory Management and PYTORCH_CUDA_ALLOC_CONF
376
+
377
+ 2022-04-09 03:18:16,099 INFO [decode.py:743] num_arcs before pruning: 350609
378
+ 2022-04-09 03:18:16,100 INFO [decode.py:746] This OOM is not an error. You can ignore it. If your model does not converge well, or --max-duration is too large, or the input sound file is difficult to decode, you will meet this exception.
379
+ 2022-04-09 03:18:16,105 INFO [decode.py:757] num_arcs after pruning: 9262
380
+ 2022-04-09 03:19:57,230 INFO [decode_test.py:497] batch 4700/?, cuts processed until now is 13858
381
+ 2022-04-09 03:20:19,775 INFO [decode.py:736] Caught exception:
382
+ CUDA out of memory. Tried to allocate 4.87 GiB (GPU 0; 31.75 GiB total capacity; 25.78 GiB already allocated; 1.12 GiB free; 29.28 GiB reserved in total by PyTorch) If reserved memory is >> allocated memory try setting max_split_size_mb to avoid fragmentation. See documentation for Memory Management and PYTORCH_CUDA_ALLOC_CONF
383
+
384
+ 2022-04-09 03:20:19,775 INFO [decode.py:743] num_arcs before pruning: 375071
385
+ 2022-04-09 03:20:19,775 INFO [decode.py:746] This OOM is not an error. You can ignore it. If your model does not converge well, or --max-duration is too large, or the input sound file is difficult to decode, you will meet this exception.
386
+ 2022-04-09 03:20:19,785 INFO [decode.py:757] num_arcs after pruning: 6365
387
+ 2022-04-09 03:21:29,481 INFO [decode.py:736] Caught exception:
388
+ CUDA out of memory. Tried to allocate 8.00 GiB (GPU 0; 31.75 GiB total capacity; 19.42 GiB already allocated; 1.12 GiB free; 29.27 GiB reserved in total by PyTorch) If reserved memory is >> allocated memory try setting max_split_size_mb to avoid fragmentation. See documentation for Memory Management and PYTORCH_CUDA_ALLOC_CONF
389
+
390
+ 2022-04-09 03:21:29,481 INFO [decode.py:743] num_arcs before pruning: 872088
391
+ 2022-04-09 03:21:29,481 INFO [decode.py:746] This OOM is not an error. You can ignore it. If your model does not converge well, or --max-duration is too large, or the input sound file is difficult to decode, you will meet this exception.
392
+ 2022-04-09 03:21:29,492 INFO [decode.py:757] num_arcs after pruning: 10043
393
+ 2022-04-09 03:22:01,760 INFO [decode_test.py:497] batch 4800/?, cuts processed until now is 14079
394
+ 2022-04-09 03:24:10,370 INFO [decode_test.py:497] batch 4900/?, cuts processed until now is 14298
395
+ 2022-04-09 03:26:10,811 INFO [decode_test.py:497] batch 5000/?, cuts processed until now is 14515
396
+ 2022-04-09 03:27:46,191 INFO [decode.py:736] Caught exception:
397
+
398
+ Some bad things happened. Please read the above error messages and stack
399
+ trace. If you are using Python, the following command may be helpful:
400
+
401
+ gdb --args python /path/to/your/code.py
402
+
403
+ (You can use `gdb` to debug the code. Please consider compiling
404
+ a debug version of k2.).
405
+
406
+ If you are unable to fix it, please open an issue at:
407
+
408
+ https://github.com/k2-fsa/k2/issues/new
409
+
410
+
411
+ 2022-04-09 03:27:46,192 INFO [decode.py:743] num_arcs before pruning: 246382
412
+ 2022-04-09 03:27:46,192 INFO [decode.py:746] This OOM is not an error. You can ignore it. If your model does not converge well, or --max-duration is too large, or the input sound file is difficult to decode, you will meet this exception.
413
+ 2022-04-09 03:27:46,253 INFO [decode.py:757] num_arcs after pruning: 6775
414
+ 2022-04-09 03:28:15,199 INFO [decode_test.py:497] batch 5100/?, cuts processed until now is 14718
415
+ 2022-04-09 03:29:19,807 INFO [decode.py:736] Caught exception:
416
+ CUDA out of memory. Tried to allocate 6.15 GiB (GPU 0; 31.75 GiB total capacity; 26.67 GiB already allocated; 1.11 GiB free; 29.29 GiB reserved in total by PyTorch) If reserved memory is >> allocated memory try setting max_split_size_mb to avoid fragmentation. See documentation for Memory Management and PYTORCH_CUDA_ALLOC_CONF
417
+
418
+ 2022-04-09 03:29:19,808 INFO [decode.py:743] num_arcs before pruning: 220820
419
+ 2022-04-09 03:29:19,808 INFO [decode.py:746] This OOM is not an error. You can ignore it. If your model does not converge well, or --max-duration is too large, or the input sound file is difficult to decode, you will meet this exception.
420
+ 2022-04-09 03:29:19,815 INFO [decode.py:757] num_arcs after pruning: 13482
421
+ 2022-04-09 03:30:16,045 INFO [decode_test.py:497] batch 5200/?, cuts processed until now is 14930
422
+ 2022-04-09 03:32:12,235 INFO [decode_test.py:497] batch 5300/?, cuts processed until now is 15128
423
+ 2022-04-09 03:33:06,358 INFO [decode.py:736] Caught exception:
424
+
425
+ Some bad things happened. Please read the above error messages and stack
426
+ trace. If you are using Python, the following command may be helpful:
427
+
428
+ gdb --args python /path/to/your/code.py
429
+
430
+ (You can use `gdb` to debug the code. Please consider compiling
431
+ a debug version of k2.).
432
+
433
+ If you are unable to fix it, please open an issue at:
434
+
435
+ https://github.com/k2-fsa/k2/issues/new
436
+
437
+
438
+ 2022-04-09 03:33:06,359 INFO [decode.py:743] num_arcs before pruning: 190203
439
+ 2022-04-09 03:33:06,359 INFO [decode.py:746] This OOM is not an error. You can ignore it. If your model does not converge well, or --max-duration is too large, or the input sound file is difficult to decode, you will meet this exception.
440
+ 2022-04-09 03:33:06,413 INFO [decode.py:757] num_arcs after pruning: 6202
441
+ 2022-04-09 03:34:14,862 INFO [decode_test.py:497] batch 5400/?, cuts processed until now is 15327
442
+ 2022-04-09 03:36:18,973 INFO [decode_test.py:497] batch 5500/?, cuts processed until now is 15531
443
+ 2022-04-09 03:38:18,633 INFO [decode_test.py:497] batch 5600/?, cuts processed until now is 15724
444
+ 2022-04-09 03:38:48,490 INFO [decode.py:736] Caught exception:
445
+ CUDA out of memory. Tried to allocate 8.00 GiB (GPU 0; 31.75 GiB total capacity; 19.52 GiB already allocated; 3.07 GiB free; 27.32 GiB reserved in total by PyTorch) If reserved memory is >> allocated memory try setting max_split_size_mb to avoid fragmentation. See documentation for Memory Management and PYTORCH_CUDA_ALLOC_CONF
446
+
447
+ 2022-04-09 03:38:48,491 INFO [decode.py:743] num_arcs before pruning: 554330
448
+ 2022-04-09 03:38:48,491 INFO [decode.py:746] This OOM is not an error. You can ignore it. If your model does not converge well, or --max-duration is too large, or the input sound file is difficult to decode, you will meet this exception.
449
+ 2022-04-09 03:38:48,500 INFO [decode.py:757] num_arcs after pruning: 10730
450
+ 2022-04-09 03:39:51,281 INFO [decode.py:736] Caught exception:
451
+ CUDA out of memory. Tried to allocate 4.83 GiB (GPU 0; 31.75 GiB total capacity; 25.96 GiB already allocated; 1.31 GiB free; 29.08 GiB reserved in total by PyTorch) If reserved memory is >> allocated memory try setting max_split_size_mb to avoid fragmentation. See documentation for Memory Management and PYTORCH_CUDA_ALLOC_CONF
452
+
453
+ 2022-04-09 03:39:51,281 INFO [decode.py:743] num_arcs before pruning: 160031
454
+ 2022-04-09 03:39:51,281 INFO [decode.py:746] This OOM is not an error. You can ignore it. If your model does not converge well, or --max-duration is too large, or the input sound file is difficult to decode, you will meet this exception.
455
+ 2022-04-09 03:39:51,288 INFO [decode.py:757] num_arcs after pruning: 4270
456
+ 2022-04-09 03:40:28,016 INFO [decode_test.py:497] batch 5700/?, cuts processed until now is 15908
457
+ 2022-04-09 03:40:46,608 INFO [decode.py:736] Caught exception:
458
+ CUDA out of memory. Tried to allocate 2.58 GiB (GPU 0; 31.75 GiB total capacity; 27.28 GiB already allocated; 1.32 GiB free; 29.07 GiB reserved in total by PyTorch) If reserved memory is >> allocated memory try setting max_split_size_mb to avoid fragmentation. See documentation for Memory Management and PYTORCH_CUDA_ALLOC_CONF
459
+
460
+ 2022-04-09 03:40:46,608 INFO [decode.py:743] num_arcs before pruning: 406026
461
+ 2022-04-09 03:40:46,608 INFO [decode.py:746] This OOM is not an error. You can ignore it. If your model does not converge well, or --max-duration is too large, or the input sound file is difficult to decode, you will meet this exception.
462
+ 2022-04-09 03:40:46,616 INFO [decode.py:757] num_arcs after pruning: 11179
463
+ 2022-04-09 03:42:16,464 INFO [decode.py:736] Caught exception:
464
+ CUDA out of memory. Tried to allocate 2.29 GiB (GPU 0; 31.75 GiB total capacity; 26.71 GiB already allocated; 1.32 GiB free; 29.07 GiB reserved in total by PyTorch) If reserved memory is >> allocated memory try setting max_split_size_mb to avoid fragmentation. See documentation for Memory Management and PYTORCH_CUDA_ALLOC_CONF
465
+
466
+ 2022-04-09 03:42:16,464 INFO [decode.py:743] num_arcs before pruning: 639824
467
+ 2022-04-09 03:42:16,464 INFO [decode.py:746] This OOM is not an error. You can ignore it. If your model does not converge well, or --max-duration is too large, or the input sound file is difficult to decode, you will meet this exception.
468
+ 2022-04-09 03:42:16,476 INFO [decode.py:757] num_arcs after pruning: 5520
469
+ 2022-04-09 03:42:52,683 INFO [decode_test.py:497] batch 5800/?, cuts processed until now is 16094
470
+ 2022-04-09 03:44:51,754 INFO [decode_test.py:497] batch 5900/?, cuts processed until now is 16289
471
+ 2022-04-09 03:46:52,121 INFO [decode_test.py:497] batch 6000/?, cuts processed until now is 16488
472
+ 2022-04-09 03:48:54,739 INFO [decode_test.py:497] batch 6100/?, cuts processed until now is 16661
473
+ 2022-04-09 03:49:24,829 INFO [decode.py:736] Caught exception:
474
+ CUDA out of memory. Tried to allocate 1.84 GiB (GPU 0; 31.75 GiB total capacity; 28.87 GiB already allocated; 409.75 MiB free; 29.99 GiB reserved in total by PyTorch) If reserved memory is >> allocated memory try setting max_split_size_mb to avoid fragmentation. See documentation for Memory Management and PYTORCH_CUDA_ALLOC_CONF
475
+
476
+ 2022-04-09 03:49:24,830 INFO [decode.py:743] num_arcs before pruning: 443401
477
+ 2022-04-09 03:49:24,830 INFO [decode.py:746] This OOM is not an error. You can ignore it. If your model does not converge well, or --max-duration is too large, or the input sound file is difficult to decode, you will meet this exception.
478
+ 2022-04-09 03:49:24,837 INFO [decode.py:757] num_arcs after pruning: 5211
479
+ 2022-04-09 03:50:27,492 INFO [decode.py:736] Caught exception:
480
+ CUDA out of memory. Tried to allocate 8.00 GiB (GPU 0; 31.75 GiB total capacity; 19.35 GiB already allocated; 2.15 GiB free; 28.24 GiB reserved in total by PyTorch) If reserved memory is >> allocated memory try setting max_split_size_mb to avoid fragmentation. See documentation for Memory Management and PYTORCH_CUDA_ALLOC_CONF
481
+
482
+ 2022-04-09 03:50:27,493 INFO [decode.py:743] num_arcs before pruning: 361598
483
+ 2022-04-09 03:50:27,493 INFO [decode.py:746] This OOM is not an error. You can ignore it. If your model does not converge well, or --max-duration is too large, or the input sound file is difficult to decode, you will meet this exception.
484
+ 2022-04-09 03:50:27,507 INFO [decode.py:757] num_arcs after pruning: 8660
485
+ 2022-04-09 03:51:02,856 INFO [decode_test.py:497] batch 6200/?, cuts processed until now is 16828
486
+ 2022-04-09 03:53:03,912 INFO [decode_test.py:497] batch 6300/?, cuts processed until now is 17002
487
+ 2022-04-09 03:55:04,964 INFO [decode_test.py:497] batch 6400/?, cuts processed until now is 17181
488
+ 2022-04-09 03:55:08,345 INFO [decode.py:736] Caught exception:
489
+ CUDA out of memory. Tried to allocate 4.89 GiB (GPU 0; 31.75 GiB total capacity; 26.28 GiB already allocated; 2.16 GiB free; 28.24 GiB reserved in total by PyTorch) If reserved memory is >> allocated memory try setting max_split_size_mb to avoid fragmentation. See documentation for Memory Management and PYTORCH_CUDA_ALLOC_CONF
490
+
491
+ 2022-04-09 03:55:08,345 INFO [decode.py:743] num_arcs before pruning: 867262
492
+ 2022-04-09 03:55:08,345 INFO [decode.py:746] This OOM is not an error. You can ignore it. If your model does not converge well, or --max-duration is too large, or the input sound file is difficult to decode, you will meet this exception.
493
+ 2022-04-09 03:55:08,356 INFO [decode.py:757] num_arcs after pruning: 6494
494
+ 2022-04-09 03:56:03,884 INFO [decode.py:736] Caught exception:
495
+ CUDA out of memory. Tried to allocate 1.90 GiB (GPU 0; 31.75 GiB total capacity; 28.97 GiB already allocated; 1.16 GiB free; 29.23 GiB reserved in total by PyTorch) If reserved memory is >> allocated memory try setting max_split_size_mb to avoid fragmentation. See documentation for Memory Management and PYTORCH_CUDA_ALLOC_CONF
496
+
497
+ 2022-04-09 03:56:03,885 INFO [decode.py:743] num_arcs before pruning: 233755
498
+ 2022-04-09 03:56:03,885 INFO [decode.py:746] This OOM is not an error. You can ignore it. If your model does not converge well, or --max-duration is too large, or the input sound file is difficult to decode, you will meet this exception.
499
+ 2022-04-09 03:56:03,910 INFO [decode.py:757] num_arcs after pruning: 5823
500
+ 2022-04-09 03:57:08,774 INFO [decode_test.py:497] batch 6500/?, cuts processed until now is 17347
501
+ 2022-04-09 03:59:01,245 INFO [decode_test.py:497] batch 6600/?, cuts processed until now is 17502
502
+ 2022-04-09 03:59:13,147 INFO [decode.py:736] Caught exception:
503
+ CUDA out of memory. Tried to allocate 5.80 GiB (GPU 0; 31.75 GiB total capacity; 26.73 GiB already allocated; 1.17 GiB free; 29.22 GiB reserved in total by PyTorch) If reserved memory is >> allocated memory try setting max_split_size_mb to avoid fragmentation. See documentation for Memory Management and PYTORCH_CUDA_ALLOC_CONF
504
+
505
+ 2022-04-09 03:59:13,147 INFO [decode.py:743] num_arcs before pruning: 174004
506
+ 2022-04-09 03:59:13,147 INFO [decode.py:746] This OOM is not an error. You can ignore it. If your model does not converge well, or --max-duration is too large, or the input sound file is difficult to decode, you will meet this exception.
507
+ 2022-04-09 03:59:13,155 INFO [decode.py:757] num_arcs after pruning: 6857
508
+ 2022-04-09 04:00:59,687 INFO [decode_test.py:497] batch 6700/?, cuts processed until now is 17661
509
+ 2022-04-09 04:03:01,660 INFO [decode_test.py:497] batch 6800/?, cuts processed until now is 17823
510
+ 2022-04-09 04:04:55,219 INFO [decode_test.py:497] batch 6900/?, cuts processed until now is 17997
511
+ 2022-04-09 04:07:05,841 INFO [decode_test.py:497] batch 7000/?, cuts processed until now is 18159
512
+ 2022-04-09 04:09:04,994 INFO [decode_test.py:497] batch 7100/?, cuts processed until now is 18299
513
+ 2022-04-09 04:11:07,439 INFO [decode_test.py:497] batch 7200/?, cuts processed until now is 18432
514
+ 2022-04-09 04:13:18,126 INFO [decode_test.py:497] batch 7300/?, cuts processed until now is 18552
515
+ 2022-04-09 04:15:23,102 INFO [decode_test.py:497] batch 7400/?, cuts processed until now is 18656
516
+ 2022-04-09 04:17:49,550 INFO [decode_test.py:497] batch 7500/?, cuts processed until now is 18798
517
+ 2022-04-09 04:19:16,128 INFO [decode.py:736] Caught exception:
518
+ CUDA out of memory. Tried to allocate 8.00 GiB (GPU 0; 31.75 GiB total capacity; 19.34 GiB already allocated; 2.12 GiB free; 28.27 GiB reserved in total by PyTorch) If reserved memory is >> allocated memory try setting max_split_size_mb to avoid fragmentation. See documentation for Memory Management and PYTORCH_CUDA_ALLOC_CONF
519
+
520
+ 2022-04-09 04:19:16,129 INFO [decode.py:743] num_arcs before pruning: 1155990
521
+ 2022-04-09 04:19:16,129 INFO [decode.py:746] This OOM is not an error. You can ignore it. If your model does not converge well, or --max-duration is too large, or the input sound file is difficult to decode, you will meet this exception.
522
+ 2022-04-09 04:19:16,143 INFO [decode.py:757] num_arcs after pruning: 9141
523
+ 2022-04-09 04:20:19,961 INFO [decode_test.py:497] batch 7600/?, cuts processed until now is 18945
524
+ 2022-04-09 04:22:44,642 INFO [decode_test.py:497] batch 7700/?, cuts processed until now is 19084
525
+ 2022-04-09 04:23:18,184 INFO [decode.py:841] Caught exception:
526
+ CUDA out of memory. Tried to allocate 1.26 GiB (GPU 0; 31.75 GiB total capacity; 27.36 GiB already allocated; 881.75 MiB free; 29.53 GiB reserved in total by PyTorch) If reserved memory is >> allocated memory try setting max_split_size_mb to avoid fragmentation. See documentation for Memory Management and PYTORCH_CUDA_ALLOC_CONF
527
+
528
+ 2022-04-09 04:23:18,184 INFO [decode.py:843] num_paths before decreasing: 1000
529
+ 2022-04-09 04:23:18,184 INFO [decode.py:852] This OOM is not an error. You can ignore it. If your model does not converge well, or --max-duration is too large, or the input sound file is difficult to decode, you will meet this exception.
530
+ 2022-04-09 04:23:18,184 INFO [decode.py:858] num_paths after decreasing: 500
531
+ 2022-04-09 04:24:52,959 INFO [decode.py:736] Caught exception:
532
+ CUDA out of memory. Tried to allocate 8.00 GiB (GPU 0; 31.75 GiB total capacity; 19.53 GiB already allocated; 2.12 GiB free; 28.27 GiB reserved in total by PyTorch) If reserved memory is >> allocated memory try setting max_split_size_mb to avoid fragmentation. See documentation for Memory Management and PYTORCH_CUDA_ALLOC_CONF
533
+
534
+ 2022-04-09 04:24:52,960 INFO [decode.py:743] num_arcs before pruning: 624026
535
+ 2022-04-09 04:24:52,960 INFO [decode.py:746] This OOM is not an error. You can ignore it. If your model does not converge well, or --max-duration is too large, or the input sound file is difficult to decode, you will meet this exception.
536
+ 2022-04-09 04:24:52,972 INFO [decode.py:757] num_arcs after pruning: 10008
537
+ 2022-04-09 04:25:07,718 INFO [decode_test.py:497] batch 7800/?, cuts processed until now is 19232
538
+ 2022-04-09 04:25:31,876 INFO [decode.py:736] Caught exception:
539
+ CUDA out of memory. Tried to allocate 8.00 GiB (GPU 0; 31.75 GiB total capacity; 19.51 GiB already allocated; 2.12 GiB free; 28.27 GiB reserved in total by PyTorch) If reserved memory is >> allocated memory try setting max_split_size_mb to avoid fragmentation. See documentation for Memory Management and PYTORCH_CUDA_ALLOC_CONF
540
+
541
+ 2022-04-09 04:25:31,876 INFO [decode.py:743] num_arcs before pruning: 688909
542
+ 2022-04-09 04:25:31,877 INFO [decode.py:746] This OOM is not an error. You can ignore it. If your model does not converge well, or --max-duration is too large, or the input sound file is difficult to decode, you will meet this exception.
543
+ 2022-04-09 04:25:31,887 INFO [decode.py:757] num_arcs after pruning: 8886
544
+ 2022-04-09 04:25:57,970 INFO [decode.py:736] Caught exception:
545
+ CUDA out of memory. Tried to allocate 5.04 GiB (GPU 0; 31.75 GiB total capacity; 25.95 GiB already allocated; 2.12 GiB free; 28.27 GiB reserved in total by PyTorch) If reserved memory is >> allocated memory try setting max_split_size_mb to avoid fragmentation. See documentation for Memory Management and PYTORCH_CUDA_ALLOC_CONF
546
+
547
+ 2022-04-09 04:25:57,971 INFO [decode.py:743] num_arcs before pruning: 891176
548
+ 2022-04-09 04:25:57,971 INFO [decode.py:746] This OOM is not an error. You can ignore it. If your model does not converge well, or --max-duration is too large, or the input sound file is difficult to decode, you will meet this exception.
549
+ 2022-04-09 04:25:57,982 INFO [decode.py:757] num_arcs after pruning: 10106
550
+ 2022-04-09 04:26:19,609 INFO [decode.py:736] Caught exception:
551
+ CUDA out of memory. Tried to allocate 2.63 GiB (GPU 0; 31.75 GiB total capacity; 27.60 GiB already allocated; 327.75 MiB free; 30.07 GiB reserved in total by PyTorch) If reserved memory is >> allocated memory try setting max_split_size_mb to avoid fragmentation. See documentation for Memory Management and PYTORCH_CUDA_ALLOC_CONF
552
+
553
+ 2022-04-09 04:26:19,609 INFO [decode.py:743] num_arcs before pruning: 415376
554
+ 2022-04-09 04:26:19,609 INFO [decode.py:746] This OOM is not an error. You can ignore it. If your model does not converge well, or --max-duration is too large, or the input sound file is difficult to decode, you will meet this exception.
555
+ 2022-04-09 04:26:19,620 INFO [decode.py:757] num_arcs after pruning: 7771
556
+ 2022-04-09 04:27:33,059 INFO [decode_test.py:497] batch 7900/?, cuts processed until now is 19375
557
+ 2022-04-09 04:29:43,649 INFO [decode_test.py:497] batch 8000/?, cuts processed until now is 19510
558
+ 2022-04-09 04:30:20,590 INFO [decode.py:736] Caught exception:
559
+ CUDA out of memory. Tried to allocate 8.00 GiB (GPU 0; 31.75 GiB total capacity; 19.65 GiB already allocated; 2.12 GiB free; 28.27 GiB reserved in total by PyTorch) If reserved memory is >> allocated memory try setting max_split_size_mb to avoid fragmentation. See documentation for Memory Management and PYTORCH_CUDA_ALLOC_CONF
560
+
561
+ 2022-04-09 04:30:20,591 INFO [decode.py:743] num_arcs before pruning: 330767
562
+ 2022-04-09 04:30:20,591 INFO [decode.py:746] This OOM is not an error. You can ignore it. If your model does not converge well, or --max-duration is too large, or the input sound file is difficult to decode, you will meet this exception.
563
+ 2022-04-09 04:30:20,606 INFO [decode.py:757] num_arcs after pruning: 5820
564
+ 2022-04-09 04:31:55,818 INFO [decode_test.py:497] batch 8100/?, cuts processed until now is 19643
565
+ 2022-04-09 04:34:11,720 INFO [decode_test.py:497] batch 8200/?, cuts processed until now is 19776
566
+ 2022-04-09 04:35:04,147 INFO [decode.py:736] Caught exception:
567
+ CUDA out of memory. Tried to allocate 4.49 GiB (GPU 0; 31.75 GiB total capacity; 24.38 GiB already allocated; 2.12 GiB free; 28.27 GiB reserved in total by PyTorch) If reserved memory is >> allocated memory try setting max_split_size_mb to avoid fragmentation. See documentation for Memory Management and PYTORCH_CUDA_ALLOC_CONF
568
+
569
+ 2022-04-09 04:35:04,147 INFO [decode.py:743] num_arcs before pruning: 533967
570
+ 2022-04-09 04:35:04,147 INFO [decode.py:746] This OOM is not an error. You can ignore it. If your model does not converge well, or --max-duration is too large, or the input sound file is difficult to decode, you will meet this exception.
571
+ 2022-04-09 04:35:04,157 INFO [decode.py:757] num_arcs after pruning: 3449
572
+ 2022-04-09 04:36:15,595 INFO [decode.py:736] Caught exception:
573
+ CUDA out of memory. Tried to allocate 8.00 GiB (GPU 0; 31.75 GiB total capacity; 19.67 GiB already allocated; 2.12 GiB free; 28.27 GiB reserved in total by PyTorch) If reserved memory is >> allocated memory try setting max_split_size_mb to avoid fragmentation. See documentation for Memory Management and PYTORCH_CUDA_ALLOC_CONF
574
+
575
+ 2022-04-09 04:36:15,595 INFO [decode.py:743] num_arcs before pruning: 397138
576
+ 2022-04-09 04:36:15,596 INFO [decode.py:746] This OOM is not an error. You can ignore it. If your model does not converge well, or --max-duration is too large, or the input sound file is difficult to decode, you will meet this exception.
577
+ 2022-04-09 04:36:15,605 INFO [decode.py:757] num_arcs after pruning: 6775
578
+ 2022-04-09 04:36:31,844 INFO [decode_test.py:497] batch 8300/?, cuts processed until now is 19882
579
+ 2022-04-09 04:37:04,130 INFO [decode.py:736] Caught exception:
580
+
581
+ Some bad things happened. Please read the above error messages and stack
582
+ trace. If you are using Python, the following command may be helpful:
583
+
584
+ gdb --args python /path/to/your/code.py
585
+
586
+ (You can use `gdb` to debug the code. Please consider compiling
587
+ a debug version of k2.).
588
+
589
+ If you are unable to fix it, please open an issue at:
590
+
591
+ https://github.com/k2-fsa/k2/issues/new
592
+
593
+
594
+ 2022-04-09 04:37:04,130 INFO [decode.py:743] num_arcs before pruning: 456591
595
+ 2022-04-09 04:37:04,130 INFO [decode.py:746] This OOM is not an error. You can ignore it. If your model does not converge well, or --max-duration is too large, or the input sound file is difficult to decode, you will meet this exception.
596
+ 2022-04-09 04:37:04,180 INFO [decode.py:757] num_arcs after pruning: 5275
597
+ 2022-04-09 04:57:33,432 INFO [decode_test.py:567]
598
+ For test, WER of different settings are:
599
+ ngram_lm_scale_0.3_attention_scale_0.7 10.58 best for test
600
+ ngram_lm_scale_0.5_attention_scale_1.3 10.58
601
+ ngram_lm_scale_0.3_attention_scale_0.5 10.59
602
+ ngram_lm_scale_0.3_attention_scale_0.6 10.59
603
+ ngram_lm_scale_0.3_attention_scale_0.9 10.59
604
+ ngram_lm_scale_0.3_attention_scale_1.0 10.59
605
+ ngram_lm_scale_0.3_attention_scale_1.1 10.59
606
+ ngram_lm_scale_0.3_attention_scale_1.2 10.59
607
+ ngram_lm_scale_0.3_attention_scale_1.3 10.59
608
+ ngram_lm_scale_0.5_attention_scale_1.0 10.59
609
+ ngram_lm_scale_0.5_attention_scale_1.1 10.59
610
+ ngram_lm_scale_0.5_attention_scale_1.2 10.59
611
+ ngram_lm_scale_0.5_attention_scale_1.5 10.59
612
+ ngram_lm_scale_0.5_attention_scale_1.7 10.59
613
+ ngram_lm_scale_0.5_attention_scale_1.9 10.59
614
+ ngram_lm_scale_0.5_attention_scale_2.0 10.59
615
+ ngram_lm_scale_0.5_attention_scale_2.1 10.59
616
+ ngram_lm_scale_0.5_attention_scale_2.2 10.59
617
+ ngram_lm_scale_0.5_attention_scale_2.3 10.59
618
+ ngram_lm_scale_0.6_attention_scale_1.9 10.59
619
+ ngram_lm_scale_0.6_attention_scale_2.0 10.59
620
+ ngram_lm_scale_0.6_attention_scale_2.1 10.59
621
+ ngram_lm_scale_0.6_attention_scale_2.2 10.59
622
+ ngram_lm_scale_0.6_attention_scale_2.3 10.59
623
+ ngram_lm_scale_0.6_attention_scale_2.5 10.59
624
+ ngram_lm_scale_0.3_attention_scale_1.5 10.6
625
+ ngram_lm_scale_0.3_attention_scale_1.7 10.6
626
+ ngram_lm_scale_0.3_attention_scale_1.9 10.6
627
+ ngram_lm_scale_0.3_attention_scale_2.0 10.6
628
+ ngram_lm_scale_0.3_attention_scale_2.1 10.6
629
+ ngram_lm_scale_0.3_attention_scale_2.2 10.6
630
+ ngram_lm_scale_0.3_attention_scale_2.3 10.6
631
+ ngram_lm_scale_0.3_attention_scale_2.5 10.6
632
+ ngram_lm_scale_0.5_attention_scale_0.9 10.6
633
+ ngram_lm_scale_0.5_attention_scale_2.5 10.6
634
+ ngram_lm_scale_0.5_attention_scale_3.0 10.6
635
+ ngram_lm_scale_0.6_attention_scale_1.3 10.6
636
+ ngram_lm_scale_0.6_attention_scale_1.5 10.6
637
+ ngram_lm_scale_0.6_attention_scale_1.7 10.6
638
+ ngram_lm_scale_0.6_attention_scale_3.0 10.6
639
+ ngram_lm_scale_0.3_attention_scale_0.3 10.61
640
+ ngram_lm_scale_0.3_attention_scale_3.0 10.61
641
+ ngram_lm_scale_0.5_attention_scale_4.0 10.61
642
+ ngram_lm_scale_0.5_attention_scale_5.0 10.61
643
+ ngram_lm_scale_0.6_attention_scale_1.2 10.61
644
+ ngram_lm_scale_0.6_attention_scale_4.0 10.61
645
+ ngram_lm_scale_0.6_attention_scale_5.0 10.61
646
+ ngram_lm_scale_0.7_attention_scale_1.7 10.61
647
+ ngram_lm_scale_0.7_attention_scale_1.9 10.61
648
+ ngram_lm_scale_0.7_attention_scale_2.0 10.61
649
+ ngram_lm_scale_0.7_attention_scale_2.1 10.61
650
+ ngram_lm_scale_0.7_attention_scale_2.2 10.61
651
+ ngram_lm_scale_0.7_attention_scale_2.3 10.61
652
+ ngram_lm_scale_0.7_attention_scale_2.5 10.61
653
+ ngram_lm_scale_0.7_attention_scale_3.0 10.61
654
+ ngram_lm_scale_0.7_attention_scale_4.0 10.61
655
+ ngram_lm_scale_0.7_attention_scale_5.0 10.61
656
+ ngram_lm_scale_0.1_attention_scale_1.1 10.62
657
+ ngram_lm_scale_0.3_attention_scale_4.0 10.62
658
+ ngram_lm_scale_0.3_attention_scale_5.0 10.62
659
+ ngram_lm_scale_0.5_attention_scale_0.7 10.62
660
+ ngram_lm_scale_0.6_attention_scale_1.0 10.62
661
+ ngram_lm_scale_0.6_attention_scale_1.1 10.62
662
+ ngram_lm_scale_0.7_attention_scale_1.5 10.62
663
+ ngram_lm_scale_0.9_attention_scale_3.0 10.62
664
+ ngram_lm_scale_0.9_attention_scale_4.0 10.62
665
+ ngram_lm_scale_0.9_attention_scale_5.0 10.62
666
+ ngram_lm_scale_1.0_attention_scale_4.0 10.62
667
+ ngram_lm_scale_1.1_attention_scale_5.0 10.62
668
+ ngram_lm_scale_0.05_attention_scale_1.1 10.63
669
+ ngram_lm_scale_0.05_attention_scale_1.2 10.63
670
+ ngram_lm_scale_0.08_attention_scale_0.9 10.63
671
+ ngram_lm_scale_0.08_attention_scale_1.0 10.63
672
+ ngram_lm_scale_0.08_attention_scale_1.1 10.63
673
+ ngram_lm_scale_0.08_attention_scale_1.2 10.63
674
+ ngram_lm_scale_0.08_attention_scale_1.3 10.63
675
+ ngram_lm_scale_0.08_attention_scale_1.9 10.63
676
+ ngram_lm_scale_0.08_attention_scale_2.0 10.63
677
+ ngram_lm_scale_0.08_attention_scale_2.1 10.63
678
+ ngram_lm_scale_0.08_attention_scale_2.2 10.63
679
+ ngram_lm_scale_0.08_attention_scale_2.3 10.63
680
+ ngram_lm_scale_0.08_attention_scale_3.0 10.63
681
+ ngram_lm_scale_0.1_attention_scale_0.5 10.63
682
+ ngram_lm_scale_0.1_attention_scale_0.6 10.63
683
+ ngram_lm_scale_0.1_attention_scale_0.7 10.63
684
+ ngram_lm_scale_0.1_attention_scale_0.9 10.63
685
+ ngram_lm_scale_0.1_attention_scale_1.0 10.63
686
+ ngram_lm_scale_0.1_attention_scale_1.2 10.63
687
+ ngram_lm_scale_0.1_attention_scale_1.3 10.63
688
+ ngram_lm_scale_0.1_attention_scale_1.7 10.63
689
+ ngram_lm_scale_0.1_attention_scale_1.9 10.63
690
+ ngram_lm_scale_0.1_attention_scale_2.0 10.63
691
+ ngram_lm_scale_0.1_attention_scale_2.1 10.63
692
+ ngram_lm_scale_0.1_attention_scale_2.2 10.63
693
+ ngram_lm_scale_0.1_attention_scale_2.3 10.63
694
+ ngram_lm_scale_0.1_attention_scale_2.5 10.63
695
+ ngram_lm_scale_0.1_attention_scale_3.0 10.63
696
+ ngram_lm_scale_0.1_attention_scale_5.0 10.63
697
+ ngram_lm_scale_0.5_attention_scale_0.6 10.63
698
+ ngram_lm_scale_0.6_attention_scale_0.9 10.63
699
+ ngram_lm_scale_0.9_attention_scale_2.3 10.63
700
+ ngram_lm_scale_0.9_attention_scale_2.5 10.63
701
+ ngram_lm_scale_1.0_attention_scale_5.0 10.63
702
+ ngram_lm_scale_1.2_attention_scale_5.0 10.63
703
+ ngram_lm_scale_0.01_attention_scale_0.9 10.64
704
+ ngram_lm_scale_0.01_attention_scale_1.0 10.64
705
+ ngram_lm_scale_0.01_attention_scale_1.1 10.64
706
+ ngram_lm_scale_0.01_attention_scale_1.2 10.64
707
+ ngram_lm_scale_0.01_attention_scale_4.0 10.64
708
+ ngram_lm_scale_0.01_attention_scale_5.0 10.64
709
+ ngram_lm_scale_0.05_attention_scale_0.5 10.64
710
+ ngram_lm_scale_0.05_attention_scale_0.6 10.64
711
+ ngram_lm_scale_0.05_attention_scale_0.7 10.64
712
+ ngram_lm_scale_0.05_attention_scale_0.9 10.64
713
+ ngram_lm_scale_0.05_attention_scale_1.0 10.64
714
+ ngram_lm_scale_0.05_attention_scale_1.3 10.64
715
+ ngram_lm_scale_0.05_attention_scale_1.5 10.64
716
+ ngram_lm_scale_0.05_attention_scale_1.7 10.64
717
+ ngram_lm_scale_0.05_attention_scale_1.9 10.64
718
+ ngram_lm_scale_0.05_attention_scale_2.0 10.64
719
+ ngram_lm_scale_0.05_attention_scale_2.1 10.64
720
+ ngram_lm_scale_0.05_attention_scale_2.2 10.64
721
+ ngram_lm_scale_0.05_attention_scale_2.3 10.64
722
+ ngram_lm_scale_0.05_attention_scale_2.5 10.64
723
+ ngram_lm_scale_0.05_attention_scale_3.0 10.64
724
+ ngram_lm_scale_0.05_attention_scale_4.0 10.64
725
+ ngram_lm_scale_0.05_attention_scale_5.0 10.64
726
+ ngram_lm_scale_0.08_attention_scale_0.5 10.64
727
+ ngram_lm_scale_0.08_attention_scale_0.6 10.64
728
+ ngram_lm_scale_0.08_attention_scale_0.7 10.64
729
+ ngram_lm_scale_0.08_attention_scale_1.5 10.64
730
+ ngram_lm_scale_0.08_attention_scale_1.7 10.64
731
+ ngram_lm_scale_0.08_attention_scale_2.5 10.64
732
+ ngram_lm_scale_0.08_attention_scale_4.0 10.64
733
+ ngram_lm_scale_0.08_attention_scale_5.0 10.64
734
+ ngram_lm_scale_0.1_attention_scale_0.3 10.64
735
+ ngram_lm_scale_0.1_attention_scale_1.5 10.64
736
+ ngram_lm_scale_0.1_attention_scale_4.0 10.64
737
+ ngram_lm_scale_0.7_attention_scale_1.3 10.64
738
+ ngram_lm_scale_0.9_attention_scale_2.2 10.64
739
+ ngram_lm_scale_1.0_attention_scale_3.0 10.64
740
+ ngram_lm_scale_1.1_attention_scale_4.0 10.64
741
+ ngram_lm_scale_1.3_attention_scale_5.0 10.64
742
+ ngram_lm_scale_0.01_attention_scale_0.6 10.65
743
+ ngram_lm_scale_0.01_attention_scale_0.7 10.65
744
+ ngram_lm_scale_0.01_attention_scale_1.3 10.65
745
+ ngram_lm_scale_0.01_attention_scale_1.5 10.65
746
+ ngram_lm_scale_0.01_attention_scale_1.7 10.65
747
+ ngram_lm_scale_0.01_attention_scale_1.9 10.65
748
+ ngram_lm_scale_0.01_attention_scale_2.0 10.65
749
+ ngram_lm_scale_0.01_attention_scale_2.1 10.65
750
+ ngram_lm_scale_0.01_attention_scale_2.2 10.65
751
+ ngram_lm_scale_0.01_attention_scale_2.3 10.65
752
+ ngram_lm_scale_0.01_attention_scale_2.5 10.65
753
+ ngram_lm_scale_0.01_attention_scale_3.0 10.65
754
+ ngram_lm_scale_0.08_attention_scale_0.3 10.65
755
+ ngram_lm_scale_0.5_attention_scale_0.5 10.65
756
+ ngram_lm_scale_0.6_attention_scale_0.7 10.65
757
+ ngram_lm_scale_0.7_attention_scale_1.1 10.65
758
+ ngram_lm_scale_0.7_attention_scale_1.2 10.65
759
+ ngram_lm_scale_0.9_attention_scale_2.1 10.65
760
+ ngram_lm_scale_1.2_attention_scale_4.0 10.65
761
+ ngram_lm_scale_0.05_attention_scale_0.3 10.66
762
+ ngram_lm_scale_0.7_attention_scale_1.0 10.66
763
+ ngram_lm_scale_0.9_attention_scale_1.9 10.66
764
+ ngram_lm_scale_0.9_attention_scale_2.0 10.66
765
+ ngram_lm_scale_1.0_attention_scale_2.5 10.66
766
+ ngram_lm_scale_1.1_attention_scale_3.0 10.66
767
+ ngram_lm_scale_0.01_attention_scale_0.5 10.67
768
+ ngram_lm_scale_0.1_attention_scale_0.08 10.67
769
+ ngram_lm_scale_0.1_attention_scale_0.1 10.67
770
+ ngram_lm_scale_0.6_attention_scale_0.6 10.67
771
+ ngram_lm_scale_0.9_attention_scale_1.7 10.67
772
+ ngram_lm_scale_1.0_attention_scale_2.2 10.67
773
+ ngram_lm_scale_1.0_attention_scale_2.3 10.67
774
+ ngram_lm_scale_1.3_attention_scale_4.0 10.67
775
+ ngram_lm_scale_1.5_attention_scale_5.0 10.67
776
+ ngram_lm_scale_0.01_attention_scale_0.3 10.68
777
+ ngram_lm_scale_0.08_attention_scale_0.08 10.68
778
+ ngram_lm_scale_0.08_attention_scale_0.1 10.68
779
+ ngram_lm_scale_0.3_attention_scale_0.08 10.68
780
+ ngram_lm_scale_0.3_attention_scale_0.1 10.68
781
+ ngram_lm_scale_0.7_attention_scale_0.9 10.68
782
+ ngram_lm_scale_1.0_attention_scale_2.0 10.68
783
+ ngram_lm_scale_1.0_attention_scale_2.1 10.68
784
+ ngram_lm_scale_1.1_attention_scale_2.5 10.68
785
+ ngram_lm_scale_1.2_attention_scale_3.0 10.68
786
+ ngram_lm_scale_0.1_attention_scale_0.05 10.69
787
+ ngram_lm_scale_0.5_attention_scale_0.3 10.69
788
+ ngram_lm_scale_0.9_attention_scale_1.5 10.69
789
+ ngram_lm_scale_1.0_attention_scale_1.9 10.69
790
+ ngram_lm_scale_1.1_attention_scale_2.3 10.69
791
+ ngram_lm_scale_0.05_attention_scale_0.1 10.7
792
+ ngram_lm_scale_0.08_attention_scale_0.05 10.7
793
+ ngram_lm_scale_0.3_attention_scale_0.05 10.7
794
+ ngram_lm_scale_0.6_attention_scale_0.5 10.7
795
+ ngram_lm_scale_1.1_attention_scale_2.2 10.7
796
+ ngram_lm_scale_1.5_attention_scale_4.0 10.7
797
+ ngram_lm_scale_1.7_attention_scale_5.0 10.7
798
+ ngram_lm_scale_0.05_attention_scale_0.08 10.71
799
+ ngram_lm_scale_1.1_attention_scale_2.1 10.71
800
+ ngram_lm_scale_1.2_attention_scale_2.5 10.71
801
+ ngram_lm_scale_1.3_attention_scale_3.0 10.71
802
+ ngram_lm_scale_0.01_attention_scale_0.1 10.72
803
+ ngram_lm_scale_0.05_attention_scale_0.05 10.72
804
+ ngram_lm_scale_0.08_attention_scale_0.01 10.72
805
+ ngram_lm_scale_0.1_attention_scale_0.01 10.72
806
+ ngram_lm_scale_0.3_attention_scale_0.01 10.72
807
+ ngram_lm_scale_0.7_attention_scale_0.7 10.72
808
+ ngram_lm_scale_0.9_attention_scale_1.3 10.72
809
+ ngram_lm_scale_1.0_attention_scale_1.7 10.72
810
+ ngram_lm_scale_1.1_attention_scale_2.0 10.72
811
+ ngram_lm_scale_0.01_attention_scale_0.08 10.73
812
+ ngram_lm_scale_0.9_attention_scale_1.2 10.73
813
+ ngram_lm_scale_1.1_attention_scale_1.9 10.73
814
+ ngram_lm_scale_1.2_attention_scale_2.3 10.73
815
+ ngram_lm_scale_1.0_attention_scale_1.5 10.74
816
+ ngram_lm_scale_1.2_attention_scale_2.2 10.74
817
+ ngram_lm_scale_1.3_attention_scale_2.5 10.74
818
+ ngram_lm_scale_1.9_attention_scale_5.0 10.74
819
+ ngram_lm_scale_0.01_attention_scale_0.05 10.75
820
+ ngram_lm_scale_0.05_attention_scale_0.01 10.75
821
+ ngram_lm_scale_0.7_attention_scale_0.6 10.75
822
+ ngram_lm_scale_0.9_attention_scale_1.1 10.75
823
+ ngram_lm_scale_1.1_attention_scale_1.7 10.75
824
+ ngram_lm_scale_1.2_attention_scale_2.1 10.75
825
+ ngram_lm_scale_1.7_attention_scale_4.0 10.75
826
+ ngram_lm_scale_1.2_attention_scale_2.0 10.76
827
+ ngram_lm_scale_1.3_attention_scale_2.3 10.76
828
+ ngram_lm_scale_2.0_attention_scale_5.0 10.76
829
+ ngram_lm_scale_1.0_attention_scale_1.3 10.77
830
+ ngram_lm_scale_1.2_attention_scale_1.9 10.77
831
+ ngram_lm_scale_1.5_attention_scale_3.0 10.77
832
+ ngram_lm_scale_0.01_attention_scale_0.01 10.78
833
+ ngram_lm_scale_0.6_attention_scale_0.3 10.78
834
+ ngram_lm_scale_0.7_attention_scale_0.5 10.78
835
+ ngram_lm_scale_0.9_attention_scale_1.0 10.78
836
+ ngram_lm_scale_2.1_attention_scale_5.0 10.78
837
+ ngram_lm_scale_1.1_attention_scale_1.5 10.79
838
+ ngram_lm_scale_1.3_attention_scale_2.2 10.79
839
+ ngram_lm_scale_0.5_attention_scale_0.1 10.8
840
+ ngram_lm_scale_1.0_attention_scale_1.2 10.8
841
+ ngram_lm_scale_1.3_attention_scale_2.1 10.8
842
+ ngram_lm_scale_1.9_attention_scale_4.0 10.8
843
+ ngram_lm_scale_2.2_attention_scale_5.0 10.8
844
+ ngram_lm_scale_0.5_attention_scale_0.08 10.81
845
+ ngram_lm_scale_0.9_attention_scale_0.9 10.81
846
+ ngram_lm_scale_1.2_attention_scale_1.7 10.81
847
+ ngram_lm_scale_1.3_attention_scale_2.0 10.81
848
+ ngram_lm_scale_1.0_attention_scale_1.1 10.82
849
+ ngram_lm_scale_0.5_attention_scale_0.05 10.83
850
+ ngram_lm_scale_1.1_attention_scale_1.3 10.83
851
+ ngram_lm_scale_1.3_attention_scale_1.9 10.83
852
+ ngram_lm_scale_1.5_attention_scale_2.5 10.84
853
+ ngram_lm_scale_2.3_attention_scale_5.0 10.84
854
+ ngram_lm_scale_1.0_attention_scale_1.0 10.85
855
+ ngram_lm_scale_1.2_attention_scale_1.5 10.85
856
+ ngram_lm_scale_2.0_attention_scale_4.0 10.85
857
+ ngram_lm_scale_1.1_attention_scale_1.2 10.86
858
+ ngram_lm_scale_1.7_attention_scale_3.0 10.86
859
+ ngram_lm_scale_0.5_attention_scale_0.01 10.87
860
+ ngram_lm_scale_1.5_attention_scale_2.3 10.87
861
+ ngram_lm_scale_0.7_attention_scale_0.3 10.88
862
+ ngram_lm_scale_0.9_attention_scale_0.7 10.88
863
+ ngram_lm_scale_1.3_attention_scale_1.7 10.88
864
+ ngram_lm_scale_1.0_attention_scale_0.9 10.89
865
+ ngram_lm_scale_1.5_attention_scale_2.2 10.89
866
+ ngram_lm_scale_2.1_attention_scale_4.0 10.89
867
+ ngram_lm_scale_1.1_attention_scale_1.1 10.91
868
+ ngram_lm_scale_0.6_attention_scale_0.1 10.92
869
+ ngram_lm_scale_0.9_attention_scale_0.6 10.92
870
+ ngram_lm_scale_1.5_attention_scale_2.1 10.92
871
+ ngram_lm_scale_1.2_attention_scale_1.3 10.93
872
+ ngram_lm_scale_2.5_attention_scale_5.0 10.93
873
+ ngram_lm_scale_0.6_attention_scale_0.08 10.94
874
+ ngram_lm_scale_2.2_attention_scale_4.0 10.94
875
+ ngram_lm_scale_1.1_attention_scale_1.0 10.95
876
+ ngram_lm_scale_1.3_attention_scale_1.5 10.95
877
+ ngram_lm_scale_1.5_attention_scale_2.0 10.96
878
+ ngram_lm_scale_1.2_attention_scale_1.2 10.97
879
+ ngram_lm_scale_1.7_attention_scale_2.5 10.97
880
+ ngram_lm_scale_0.6_attention_scale_0.05 10.98
881
+ ngram_lm_scale_1.9_attention_scale_3.0 10.98
882
+ ngram_lm_scale_1.0_attention_scale_0.7 10.99
883
+ ngram_lm_scale_1.5_attention_scale_1.9 10.99
884
+ ngram_lm_scale_2.3_attention_scale_4.0 10.99
885
+ ngram_lm_scale_0.9_attention_scale_0.5 11.0
886
+ ngram_lm_scale_1.1_attention_scale_0.9 11.0
887
+ ngram_lm_scale_0.6_attention_scale_0.01 11.02
888
+ ngram_lm_scale_1.2_attention_scale_1.1 11.02
889
+ ngram_lm_scale_1.7_attention_scale_2.3 11.03
890
+ ngram_lm_scale_1.3_attention_scale_1.3 11.05
891
+ ngram_lm_scale_2.0_attention_scale_3.0 11.05
892
+ ngram_lm_scale_1.7_attention_scale_2.2 11.07
893
+ ngram_lm_scale_1.0_attention_scale_0.6 11.08
894
+ ngram_lm_scale_1.5_attention_scale_1.7 11.08
895
+ ngram_lm_scale_1.2_attention_scale_1.0 11.09
896
+ ngram_lm_scale_0.7_attention_scale_0.1 11.1
897
+ ngram_lm_scale_1.3_attention_scale_1.2 11.1
898
+ ngram_lm_scale_1.7_attention_scale_2.1 11.11
899
+ ngram_lm_scale_2.1_attention_scale_3.0 11.12
900
+ ngram_lm_scale_2.5_attention_scale_4.0 11.12
901
+ ngram_lm_scale_0.7_attention_scale_0.08 11.13
902
+ ngram_lm_scale_1.9_attention_scale_2.5 11.13
903
+ ngram_lm_scale_1.7_attention_scale_2.0 11.14
904
+ ngram_lm_scale_1.2_attention_scale_0.9 11.16
905
+ ngram_lm_scale_1.1_attention_scale_0.7 11.17
906
+ ngram_lm_scale_1.3_attention_scale_1.1 11.17
907
+ ngram_lm_scale_3.0_attention_scale_5.0 11.17
908
+ ngram_lm_scale_0.7_attention_scale_0.05 11.18
909
+ ngram_lm_scale_1.5_attention_scale_1.5 11.18
910
+ ngram_lm_scale_1.0_attention_scale_0.5 11.19
911
+ ngram_lm_scale_1.7_attention_scale_1.9 11.2
912
+ ngram_lm_scale_2.2_attention_scale_3.0 11.21
913
+ ngram_lm_scale_1.9_attention_scale_2.3 11.22
914
+ ngram_lm_scale_2.0_attention_scale_2.5 11.23
915
+ ngram_lm_scale_0.9_attention_scale_0.3 11.25
916
+ ngram_lm_scale_1.3_attention_scale_1.0 11.26
917
+ ngram_lm_scale_0.7_attention_scale_0.01 11.27
918
+ ngram_lm_scale_1.9_attention_scale_2.2 11.27
919
+ ngram_lm_scale_1.1_attention_scale_0.6 11.29
920
+ ngram_lm_scale_2.3_attention_scale_3.0 11.31
921
+ ngram_lm_scale_1.7_attention_scale_1.7 11.33
922
+ ngram_lm_scale_1.5_attention_scale_1.3 11.34
923
+ ngram_lm_scale_1.9_attention_scale_2.1 11.34
924
+ ngram_lm_scale_2.0_attention_scale_2.3 11.34
925
+ ngram_lm_scale_2.1_attention_scale_2.5 11.35
926
+ ngram_lm_scale_1.3_attention_scale_0.9 11.36
927
+ ngram_lm_scale_1.2_attention_scale_0.7 11.39
928
+ ngram_lm_scale_1.9_attention_scale_2.0 11.4
929
+ ngram_lm_scale_2.0_attention_scale_2.2 11.4
930
+ ngram_lm_scale_1.5_attention_scale_1.2 11.43
931
+ ngram_lm_scale_1.1_attention_scale_0.5 11.44
932
+ ngram_lm_scale_2.0_attention_scale_2.1 11.47
933
+ ngram_lm_scale_2.1_attention_scale_2.3 11.47
934
+ ngram_lm_scale_2.2_attention_scale_2.5 11.47
935
+ ngram_lm_scale_1.9_attention_scale_1.9 11.48
936
+ ngram_lm_scale_1.7_attention_scale_1.5 11.5
937
+ ngram_lm_scale_2.5_attention_scale_3.0 11.51
938
+ ngram_lm_scale_3.0_attention_scale_4.0 11.51
939
+ ngram_lm_scale_1.0_attention_scale_0.3 11.53
940
+ ngram_lm_scale_1.2_attention_scale_0.6 11.53
941
+ ngram_lm_scale_1.5_attention_scale_1.1 11.54
942
+ ngram_lm_scale_2.1_attention_scale_2.2 11.54
943
+ ngram_lm_scale_2.0_attention_scale_2.0 11.55
944
+ ngram_lm_scale_2.3_attention_scale_2.5 11.59
945
+ ngram_lm_scale_2.2_attention_scale_2.3 11.61
946
+ ngram_lm_scale_2.1_attention_scale_2.1 11.62
947
+ ngram_lm_scale_1.3_attention_scale_0.7 11.63
948
+ ngram_lm_scale_2.0_attention_scale_1.9 11.63
949
+ ngram_lm_scale_1.9_attention_scale_1.7 11.66
950
+ ngram_lm_scale_1.5_attention_scale_1.0 11.67
951
+ ngram_lm_scale_2.2_attention_scale_2.2 11.69
952
+ ngram_lm_scale_0.9_attention_scale_0.1 11.7
953
+ ngram_lm_scale_2.1_attention_scale_2.0 11.71
954
+ ngram_lm_scale_1.2_attention_scale_0.5 11.72
955
+ ngram_lm_scale_1.7_attention_scale_1.3 11.72
956
+ ngram_lm_scale_2.3_attention_scale_2.3 11.75
957
+ ngram_lm_scale_0.9_attention_scale_0.08 11.77
958
+ ngram_lm_scale_2.2_attention_scale_2.1 11.78
959
+ ngram_lm_scale_2.1_attention_scale_1.9 11.82
960
+ ngram_lm_scale_1.3_attention_scale_0.6 11.83
961
+ ngram_lm_scale_1.5_attention_scale_0.9 11.85
962
+ ngram_lm_scale_2.0_attention_scale_1.7 11.85
963
+ ngram_lm_scale_2.3_attention_scale_2.2 11.86
964
+ ngram_lm_scale_0.9_attention_scale_0.05 11.87
965
+ ngram_lm_scale_1.1_attention_scale_0.3 11.87
966
+ ngram_lm_scale_1.7_attention_scale_1.2 11.88
967
+ ngram_lm_scale_1.9_attention_scale_1.5 11.9
968
+ ngram_lm_scale_2.2_attention_scale_2.0 11.9
969
+ ngram_lm_scale_2.5_attention_scale_2.5 11.9
970
+ ngram_lm_scale_4.0_attention_scale_5.0 11.93
971
+ ngram_lm_scale_2.3_attention_scale_2.1 11.97
972
+ ngram_lm_scale_0.9_attention_scale_0.01 12.0
973
+ ngram_lm_scale_2.2_attention_scale_1.9 12.02
974
+ ngram_lm_scale_1.7_attention_scale_1.1 12.05
975
+ ngram_lm_scale_1.3_attention_scale_0.5 12.07
976
+ ngram_lm_scale_2.1_attention_scale_1.7 12.07
977
+ ngram_lm_scale_2.3_attention_scale_2.0 12.09
978
+ ngram_lm_scale_1.0_attention_scale_0.1 12.11
979
+ ngram_lm_scale_2.5_attention_scale_2.3 12.11
980
+ ngram_lm_scale_2.0_attention_scale_1.5 12.14
981
+ ngram_lm_scale_1.0_attention_scale_0.08 12.19
982
+ ngram_lm_scale_3.0_attention_scale_3.0 12.19
983
+ ngram_lm_scale_1.9_attention_scale_1.3 12.22
984
+ ngram_lm_scale_1.7_attention_scale_1.0 12.23
985
+ ngram_lm_scale_2.3_attention_scale_1.9 12.23
986
+ ngram_lm_scale_2.5_attention_scale_2.2 12.23
987
+ ngram_lm_scale_1.5_attention_scale_0.7 12.27
988
+ ngram_lm_scale_1.2_attention_scale_0.3 12.28
989
+ ngram_lm_scale_2.2_attention_scale_1.7 12.3
990
+ ngram_lm_scale_1.0_attention_scale_0.05 12.32
991
+ ngram_lm_scale_2.5_attention_scale_2.1 12.37
992
+ ngram_lm_scale_2.1_attention_scale_1.5 12.39
993
+ ngram_lm_scale_1.9_attention_scale_1.2 12.41
994
+ ngram_lm_scale_1.7_attention_scale_0.9 12.46
995
+ ngram_lm_scale_1.0_attention_scale_0.01 12.49
996
+ ngram_lm_scale_2.0_attention_scale_1.3 12.5
997
+ ngram_lm_scale_2.5_attention_scale_2.0 12.51
998
+ ngram_lm_scale_2.3_attention_scale_1.7 12.54
999
+ ngram_lm_scale_1.5_attention_scale_0.6 12.55
1000
+ ngram_lm_scale_1.1_attention_scale_0.1 12.58
1001
+ ngram_lm_scale_1.9_attention_scale_1.1 12.62
1002
+ ngram_lm_scale_2.2_attention_scale_1.5 12.64
1003
+ ngram_lm_scale_1.1_attention_scale_0.08 12.67
1004
+ ngram_lm_scale_2.5_attention_scale_1.9 12.67
1005
+ ngram_lm_scale_4.0_attention_scale_4.0 12.67
1006
+ ngram_lm_scale_1.3_attention_scale_0.3 12.71
1007
+ ngram_lm_scale_2.0_attention_scale_1.2 12.71
1008
+ ngram_lm_scale_2.1_attention_scale_1.3 12.78
1009
+ ngram_lm_scale_3.0_attention_scale_2.5 12.8
1010
+ ngram_lm_scale_1.1_attention_scale_0.05 12.81
1011
+ ngram_lm_scale_1.9_attention_scale_1.0 12.85
1012
+ ngram_lm_scale_1.5_attention_scale_0.5 12.86
1013
+ ngram_lm_scale_2.3_attention_scale_1.5 12.91
1014
+ ngram_lm_scale_2.0_attention_scale_1.1 12.92
1015
+ ngram_lm_scale_1.7_attention_scale_0.7 12.99
1016
+ ngram_lm_scale_2.1_attention_scale_1.2 12.99
1017
+ ngram_lm_scale_5.0_attention_scale_5.0 13.01
1018
+ ngram_lm_scale_1.1_attention_scale_0.01 13.02
1019
+ ngram_lm_scale_2.5_attention_scale_1.7 13.02
1020
+ ngram_lm_scale_2.2_attention_scale_1.3 13.05
1021
+ ngram_lm_scale_3.0_attention_scale_2.3 13.09
1022
+ ngram_lm_scale_1.2_attention_scale_0.1 13.1
1023
+ ngram_lm_scale_1.9_attention_scale_0.9 13.11
1024
+ ngram_lm_scale_2.0_attention_scale_1.0 13.17
1025
+ ngram_lm_scale_1.2_attention_scale_0.08 13.2
1026
+ ngram_lm_scale_2.1_attention_scale_1.1 13.22
1027
+ ngram_lm_scale_3.0_attention_scale_2.2 13.24
1028
+ ngram_lm_scale_2.2_attention_scale_1.2 13.28
1029
+ ngram_lm_scale_1.7_attention_scale_0.6 13.33
1030
+ ngram_lm_scale_2.3_attention_scale_1.3 13.34
1031
+ ngram_lm_scale_1.2_attention_scale_0.05 13.36
1032
+ ngram_lm_scale_3.0_attention_scale_2.1 13.42
1033
+ ngram_lm_scale_2.5_attention_scale_1.5 13.43
1034
+ ngram_lm_scale_2.0_attention_scale_0.9 13.48
1035
+ ngram_lm_scale_2.1_attention_scale_1.0 13.51
1036
+ ngram_lm_scale_2.2_attention_scale_1.1 13.56
1037
+ ngram_lm_scale_1.2_attention_scale_0.01 13.6
1038
+ ngram_lm_scale_2.3_attention_scale_1.2 13.6
1039
+ ngram_lm_scale_3.0_attention_scale_2.0 13.62
1040
+ ngram_lm_scale_1.3_attention_scale_0.1 13.65
1041
+ ngram_lm_scale_1.5_attention_scale_0.3 13.68
1042
+ ngram_lm_scale_1.7_attention_scale_0.5 13.72
1043
+ ngram_lm_scale_1.3_attention_scale_0.08 13.76
1044
+ ngram_lm_scale_1.9_attention_scale_0.7 13.78
1045
+ ngram_lm_scale_3.0_attention_scale_1.9 13.81
1046
+ ngram_lm_scale_2.1_attention_scale_0.9 13.82
1047
+ ngram_lm_scale_2.2_attention_scale_1.0 13.85
1048
+ ngram_lm_scale_4.0_attention_scale_3.0 13.85
1049
+ ngram_lm_scale_2.3_attention_scale_1.1 13.89
1050
+ ngram_lm_scale_1.3_attention_scale_0.05 13.94
1051
+ ngram_lm_scale_2.5_attention_scale_1.3 13.94
1052
+ ngram_lm_scale_5.0_attention_scale_4.0 13.97
1053
+ ngram_lm_scale_1.9_attention_scale_0.6 14.15
1054
+ ngram_lm_scale_2.0_attention_scale_0.7 14.16
1055
+ ngram_lm_scale_2.2_attention_scale_0.9 14.17
1056
+ ngram_lm_scale_2.3_attention_scale_1.0 14.19
1057
+ ngram_lm_scale_1.3_attention_scale_0.01 14.2
1058
+ ngram_lm_scale_2.5_attention_scale_1.2 14.2
1059
+ ngram_lm_scale_3.0_attention_scale_1.7 14.26
1060
+ ngram_lm_scale_2.5_attention_scale_1.1 14.48
1061
+ ngram_lm_scale_2.3_attention_scale_0.9 14.5
1062
+ ngram_lm_scale_2.1_attention_scale_0.7 14.53
1063
+ ngram_lm_scale_2.0_attention_scale_0.6 14.54
1064
+ ngram_lm_scale_1.9_attention_scale_0.5 14.57
1065
+ ngram_lm_scale_4.0_attention_scale_2.5 14.63
1066
+ ngram_lm_scale_1.7_attention_scale_0.3 14.64
1067
+ ngram_lm_scale_3.0_attention_scale_1.5 14.71
1068
+ ngram_lm_scale_1.5_attention_scale_0.1 14.75
1069
+ ngram_lm_scale_2.5_attention_scale_1.0 14.79
1070
+ ngram_lm_scale_2.2_attention_scale_0.7 14.86
1071
+ ngram_lm_scale_1.5_attention_scale_0.08 14.87
1072
+ ngram_lm_scale_2.1_attention_scale_0.6 14.91
1073
+ ngram_lm_scale_2.0_attention_scale_0.5 14.95
1074
+ ngram_lm_scale_4.0_attention_scale_2.3 14.98
1075
+ ngram_lm_scale_1.5_attention_scale_0.05 15.05
1076
+ ngram_lm_scale_2.5_attention_scale_0.9 15.12
1077
+ ngram_lm_scale_4.0_attention_scale_2.2 15.17
1078
+ ngram_lm_scale_2.3_attention_scale_0.7 15.21
1079
+ ngram_lm_scale_3.0_attention_scale_1.3 15.22
1080
+ ngram_lm_scale_2.2_attention_scale_0.6 15.27
1081
+ ngram_lm_scale_1.5_attention_scale_0.01 15.3
1082
+ ngram_lm_scale_5.0_attention_scale_3.0 15.32
1083
+ ngram_lm_scale_2.1_attention_scale_0.5 15.33
1084
+ ngram_lm_scale_4.0_attention_scale_2.1 15.37
1085
+ ngram_lm_scale_1.9_attention_scale_0.3 15.5
1086
+ ngram_lm_scale_3.0_attention_scale_1.2 15.51
1087
+ ngram_lm_scale_4.0_attention_scale_2.0 15.57
1088
+ ngram_lm_scale_2.3_attention_scale_0.6 15.61
1089
+ ngram_lm_scale_2.2_attention_scale_0.5 15.68
1090
+ ngram_lm_scale_1.7_attention_scale_0.1 15.72
1091
+ ngram_lm_scale_4.0_attention_scale_1.9 15.79
1092
+ ngram_lm_scale_3.0_attention_scale_1.1 15.82
1093
+ ngram_lm_scale_1.7_attention_scale_0.08 15.83
1094
+ ngram_lm_scale_2.5_attention_scale_0.7 15.85
1095
+ ngram_lm_scale_2.0_attention_scale_0.3 15.87
1096
+ ngram_lm_scale_2.3_attention_scale_0.5 16.0
1097
+ ngram_lm_scale_1.7_attention_scale_0.05 16.01
1098
+ ngram_lm_scale_3.0_attention_scale_1.0 16.11
1099
+ ngram_lm_scale_5.0_attention_scale_2.5 16.12
1100
+ ngram_lm_scale_2.5_attention_scale_0.6 16.19
1101
+ ngram_lm_scale_2.1_attention_scale_0.3 16.2
1102
+ ngram_lm_scale_4.0_attention_scale_1.7 16.22
1103
+ ngram_lm_scale_1.7_attention_scale_0.01 16.23
1104
+ ngram_lm_scale_3.0_attention_scale_0.9 16.4
1105
+ ngram_lm_scale_5.0_attention_scale_2.3 16.44
1106
+ ngram_lm_scale_1.9_attention_scale_0.1 16.5
1107
+ ngram_lm_scale_2.2_attention_scale_0.3 16.53
1108
+ ngram_lm_scale_2.5_attention_scale_0.5 16.54
1109
+ ngram_lm_scale_1.9_attention_scale_0.08 16.6
1110
+ ngram_lm_scale_5.0_attention_scale_2.2 16.6
1111
+ ngram_lm_scale_4.0_attention_scale_1.5 16.63
1112
+ ngram_lm_scale_1.9_attention_scale_0.05 16.74
1113
+ ngram_lm_scale_5.0_attention_scale_2.1 16.77
1114
+ ngram_lm_scale_2.3_attention_scale_0.3 16.81
1115
+ ngram_lm_scale_2.0_attention_scale_0.1 16.83
1116
+ ngram_lm_scale_2.0_attention_scale_0.08 16.92
1117
+ ngram_lm_scale_5.0_attention_scale_2.0 16.94
1118
+ ngram_lm_scale_1.9_attention_scale_0.01 16.95
1119
+ ngram_lm_scale_3.0_attention_scale_0.7 16.96
1120
+ ngram_lm_scale_2.0_attention_scale_0.05 17.05
1121
+ ngram_lm_scale_4.0_attention_scale_1.3 17.05
1122
+ ngram_lm_scale_2.1_attention_scale_0.1 17.11
1123
+ ngram_lm_scale_5.0_attention_scale_1.9 17.11
1124
+ ngram_lm_scale_2.1_attention_scale_0.08 17.21
1125
+ ngram_lm_scale_2.0_attention_scale_0.01 17.24
1126
+ ngram_lm_scale_3.0_attention_scale_0.6 17.26
1127
+ ngram_lm_scale_4.0_attention_scale_1.2 17.27
1128
+ ngram_lm_scale_2.5_attention_scale_0.3 17.28
1129
+ ngram_lm_scale_2.1_attention_scale_0.05 17.34
1130
+ ngram_lm_scale_2.2_attention_scale_0.1 17.38
1131
+ ngram_lm_scale_5.0_attention_scale_1.7 17.44
1132
+ ngram_lm_scale_2.2_attention_scale_0.08 17.46
1133
+ ngram_lm_scale_4.0_attention_scale_1.1 17.5
1134
+ ngram_lm_scale_2.1_attention_scale_0.01 17.52
1135
+ ngram_lm_scale_3.0_attention_scale_0.5 17.57
1136
+ ngram_lm_scale_2.2_attention_scale_0.05 17.59
1137
+ ngram_lm_scale_2.3_attention_scale_0.1 17.62
1138
+ ngram_lm_scale_2.3_attention_scale_0.08 17.7
1139
+ ngram_lm_scale_4.0_attention_scale_1.0 17.72
1140
+ ngram_lm_scale_2.2_attention_scale_0.01 17.76
1141
+ ngram_lm_scale_5.0_attention_scale_1.5 17.8
1142
+ ngram_lm_scale_2.3_attention_scale_0.05 17.82
1143
+ ngram_lm_scale_4.0_attention_scale_0.9 17.94
1144
+ ngram_lm_scale_2.3_attention_scale_0.01 17.98
1145
+ ngram_lm_scale_2.5_attention_scale_0.1 18.03
1146
+ ngram_lm_scale_2.5_attention_scale_0.08 18.1
1147
+ ngram_lm_scale_5.0_attention_scale_1.3 18.12
1148
+ ngram_lm_scale_3.0_attention_scale_0.3 18.17
1149
+ ngram_lm_scale_2.5_attention_scale_0.05 18.2
1150
+ ngram_lm_scale_5.0_attention_scale_1.2 18.29
1151
+ ngram_lm_scale_2.5_attention_scale_0.01 18.33
1152
+ ngram_lm_scale_4.0_attention_scale_0.7 18.36
1153
+ ngram_lm_scale_5.0_attention_scale_1.1 18.48
1154
+ ngram_lm_scale_4.0_attention_scale_0.6 18.58
1155
+ ngram_lm_scale_5.0_attention_scale_1.0 18.65
1156
+ ngram_lm_scale_3.0_attention_scale_0.1 18.75
1157
+ ngram_lm_scale_4.0_attention_scale_0.5 18.79
1158
+ ngram_lm_scale_3.0_attention_scale_0.08 18.81
1159
+ ngram_lm_scale_5.0_attention_scale_0.9 18.81
1160
+ ngram_lm_scale_3.0_attention_scale_0.05 18.89
1161
+ ngram_lm_scale_3.0_attention_scale_0.01 18.99
1162
+ ngram_lm_scale_5.0_attention_scale_0.7 19.11
1163
+ ngram_lm_scale_4.0_attention_scale_0.3 19.18
1164
+ ngram_lm_scale_5.0_attention_scale_0.6 19.25
1165
+ ngram_lm_scale_5.0_attention_scale_0.5 19.41
1166
+ ngram_lm_scale_4.0_attention_scale_0.1 19.57
1167
+ ngram_lm_scale_4.0_attention_scale_0.08 19.61
1168
+ ngram_lm_scale_4.0_attention_scale_0.05 19.67
1169
+ ngram_lm_scale_5.0_attention_scale_0.3 19.71
1170
+ ngram_lm_scale_4.0_attention_scale_0.01 19.73
1171
+ ngram_lm_scale_5.0_attention_scale_0.1 19.99
1172
+ ngram_lm_scale_5.0_attention_scale_0.08 20.01
1173
+ ngram_lm_scale_5.0_attention_scale_0.05 20.05
1174
+ ngram_lm_scale_5.0_attention_scale_0.01 20.11
1175
+
1176
+ 2022-04-09 04:57:33,455 INFO [decode_test.py:730] Done!