humeur commited on
Commit
820c0fc
1 Parent(s): 37e4c66

Delete checkpoint-2000

Browse files
checkpoint-2000/config.json DELETED
@@ -1,42 +0,0 @@
1
- {
2
- "_name_or_path": "./lab2_id2223",
3
- "activation_dropout": 0.0,
4
- "activation_function": "gelu",
5
- "architectures": [
6
- "WhisperForConditionalGeneration"
7
- ],
8
- "attention_dropout": 0.0,
9
- "begin_suppress_tokens": [
10
- 220,
11
- 50257
12
- ],
13
- "bos_token_id": 50257,
14
- "d_model": 768,
15
- "decoder_attention_heads": 12,
16
- "decoder_ffn_dim": 3072,
17
- "decoder_layerdrop": 0.0,
18
- "decoder_layers": 12,
19
- "decoder_start_token_id": 50258,
20
- "dropout": 0.0,
21
- "encoder_attention_heads": 12,
22
- "encoder_ffn_dim": 3072,
23
- "encoder_layerdrop": 0.0,
24
- "encoder_layers": 12,
25
- "eos_token_id": 50257,
26
- "forced_decoder_ids": null,
27
- "init_std": 0.02,
28
- "is_encoder_decoder": true,
29
- "max_length": 448,
30
- "max_source_positions": 1500,
31
- "max_target_positions": 448,
32
- "model_type": "whisper",
33
- "num_hidden_layers": 12,
34
- "num_mel_bins": 80,
35
- "pad_token_id": 50257,
36
- "scale_embedding": false,
37
- "suppress_tokens": [],
38
- "torch_dtype": "float32",
39
- "transformers_version": "4.26.0.dev0",
40
- "use_cache": false,
41
- "vocab_size": 51865
42
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
checkpoint-2000/optimizer.pt DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:1280b664e2299772ee558c8b50a070bb2ecb8aeeff2becf9dacd5f8eeaa96fae
3
- size 1934161093
 
 
 
 
checkpoint-2000/preprocessor_config.json DELETED
The diff for this file is too large to render. See raw diff
 
checkpoint-2000/pytorch_model.bin DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:f5bc6e356b2f66f6b694c2bd274a577c6d9586eac3215b17afee61b5217f3579
3
- size 967102601
 
 
 
 
checkpoint-2000/rng_state.pth DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:fc2370b290bbb9b0161d6bb54c04e80e95148a66774a7fc043af0ab9a03d7ecd
3
- size 14575
 
 
 
 
checkpoint-2000/scaler.pt DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:738953d7bfb3f612f66f32f7e2019c92256bd40f51a66f25241750d74e38d0b1
3
- size 557
 
 
 
 
checkpoint-2000/scheduler.pt DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:0fe7ca1c4fdccf931b9bdbd5399f5fc0f8d8eee4c47f56dd3b27eacf280f86a3
3
- size 627
 
 
 
 
checkpoint-2000/trainer_state.json DELETED
@@ -1,514 +0,0 @@
1
- {
2
- "best_metric": 19.680970568411592,
3
- "best_model_checkpoint": "./lab2_id2223/checkpoint-2000",
4
- "epoch": 2.5873221216041395,
5
- "global_step": 2000,
6
- "is_hyper_param_search": false,
7
- "is_local_process_zero": true,
8
- "is_world_process_zero": true,
9
- "log_history": [
10
- {
11
- "epoch": 0.03,
12
- "learning_rate": 4.800000000000001e-07,
13
- "loss": 0.2225,
14
- "step": 25
15
- },
16
- {
17
- "epoch": 0.06,
18
- "learning_rate": 9.800000000000001e-07,
19
- "loss": 0.2081,
20
- "step": 50
21
- },
22
- {
23
- "epoch": 0.1,
24
- "learning_rate": 1.48e-06,
25
- "loss": 0.1992,
26
- "step": 75
27
- },
28
- {
29
- "epoch": 0.13,
30
- "learning_rate": 1.98e-06,
31
- "loss": 0.2014,
32
- "step": 100
33
- },
34
- {
35
- "epoch": 0.16,
36
- "learning_rate": 2.4800000000000004e-06,
37
- "loss": 0.1914,
38
- "step": 125
39
- },
40
- {
41
- "epoch": 0.19,
42
- "learning_rate": 2.9800000000000003e-06,
43
- "loss": 0.1978,
44
- "step": 150
45
- },
46
- {
47
- "epoch": 0.23,
48
- "learning_rate": 3.48e-06,
49
- "loss": 0.1988,
50
- "step": 175
51
- },
52
- {
53
- "epoch": 0.26,
54
- "learning_rate": 3.980000000000001e-06,
55
- "loss": 0.2153,
56
- "step": 200
57
- },
58
- {
59
- "epoch": 0.29,
60
- "learning_rate": 4.48e-06,
61
- "loss": 0.1907,
62
- "step": 225
63
- },
64
- {
65
- "epoch": 0.32,
66
- "learning_rate": 4.980000000000001e-06,
67
- "loss": 0.1976,
68
- "step": 250
69
- },
70
- {
71
- "epoch": 0.36,
72
- "learning_rate": 5.480000000000001e-06,
73
- "loss": 0.2256,
74
- "step": 275
75
- },
76
- {
77
- "epoch": 0.39,
78
- "learning_rate": 5.98e-06,
79
- "loss": 0.1805,
80
- "step": 300
81
- },
82
- {
83
- "epoch": 0.42,
84
- "learning_rate": 6.480000000000001e-06,
85
- "loss": 0.2144,
86
- "step": 325
87
- },
88
- {
89
- "epoch": 0.45,
90
- "learning_rate": 6.98e-06,
91
- "loss": 0.195,
92
- "step": 350
93
- },
94
- {
95
- "epoch": 0.49,
96
- "learning_rate": 7.48e-06,
97
- "loss": 0.2053,
98
- "step": 375
99
- },
100
- {
101
- "epoch": 0.52,
102
- "learning_rate": 7.980000000000002e-06,
103
- "loss": 0.2157,
104
- "step": 400
105
- },
106
- {
107
- "epoch": 0.55,
108
- "learning_rate": 8.48e-06,
109
- "loss": 0.2062,
110
- "step": 425
111
- },
112
- {
113
- "epoch": 0.58,
114
- "learning_rate": 8.96e-06,
115
- "loss": 0.1979,
116
- "step": 450
117
- },
118
- {
119
- "epoch": 0.61,
120
- "learning_rate": 9.460000000000001e-06,
121
- "loss": 0.1868,
122
- "step": 475
123
- },
124
- {
125
- "epoch": 0.65,
126
- "learning_rate": 9.960000000000001e-06,
127
- "loss": 0.2211,
128
- "step": 500
129
- },
130
- {
131
- "epoch": 0.68,
132
- "learning_rate": 9.934285714285715e-06,
133
- "loss": 0.2049,
134
- "step": 525
135
- },
136
- {
137
- "epoch": 0.71,
138
- "learning_rate": 9.862857142857144e-06,
139
- "loss": 0.1989,
140
- "step": 550
141
- },
142
- {
143
- "epoch": 0.74,
144
- "learning_rate": 9.791428571428571e-06,
145
- "loss": 0.2127,
146
- "step": 575
147
- },
148
- {
149
- "epoch": 0.78,
150
- "learning_rate": 9.72e-06,
151
- "loss": 0.2261,
152
- "step": 600
153
- },
154
- {
155
- "epoch": 0.81,
156
- "learning_rate": 9.648571428571429e-06,
157
- "loss": 0.2081,
158
- "step": 625
159
- },
160
- {
161
- "epoch": 0.84,
162
- "learning_rate": 9.577142857142858e-06,
163
- "loss": 0.2281,
164
- "step": 650
165
- },
166
- {
167
- "epoch": 0.87,
168
- "learning_rate": 9.505714285714287e-06,
169
- "loss": 0.204,
170
- "step": 675
171
- },
172
- {
173
- "epoch": 0.91,
174
- "learning_rate": 9.434285714285714e-06,
175
- "loss": 0.2053,
176
- "step": 700
177
- },
178
- {
179
- "epoch": 0.94,
180
- "learning_rate": 9.362857142857143e-06,
181
- "loss": 0.2208,
182
- "step": 725
183
- },
184
- {
185
- "epoch": 0.97,
186
- "learning_rate": 9.291428571428572e-06,
187
- "loss": 0.2076,
188
- "step": 750
189
- },
190
- {
191
- "epoch": 1.0,
192
- "learning_rate": 9.220000000000002e-06,
193
- "loss": 0.1941,
194
- "step": 775
195
- },
196
- {
197
- "epoch": 1.03,
198
- "learning_rate": 9.148571428571429e-06,
199
- "loss": 0.0949,
200
- "step": 800
201
- },
202
- {
203
- "epoch": 1.07,
204
- "learning_rate": 9.077142857142858e-06,
205
- "loss": 0.1011,
206
- "step": 825
207
- },
208
- {
209
- "epoch": 1.1,
210
- "learning_rate": 9.005714285714287e-06,
211
- "loss": 0.1078,
212
- "step": 850
213
- },
214
- {
215
- "epoch": 1.13,
216
- "learning_rate": 8.934285714285716e-06,
217
- "loss": 0.0956,
218
- "step": 875
219
- },
220
- {
221
- "epoch": 1.16,
222
- "learning_rate": 8.862857142857143e-06,
223
- "loss": 0.1093,
224
- "step": 900
225
- },
226
- {
227
- "epoch": 1.2,
228
- "learning_rate": 8.791428571428572e-06,
229
- "loss": 0.1133,
230
- "step": 925
231
- },
232
- {
233
- "epoch": 1.23,
234
- "learning_rate": 8.720000000000001e-06,
235
- "loss": 0.1128,
236
- "step": 950
237
- },
238
- {
239
- "epoch": 1.26,
240
- "learning_rate": 8.64857142857143e-06,
241
- "loss": 0.1068,
242
- "step": 975
243
- },
244
- {
245
- "epoch": 1.29,
246
- "learning_rate": 8.577142857142858e-06,
247
- "loss": 0.1015,
248
- "step": 1000
249
- },
250
- {
251
- "epoch": 1.29,
252
- "eval_loss": 0.2880099415779114,
253
- "eval_runtime": 2234.5268,
254
- "eval_samples_per_second": 1.343,
255
- "eval_steps_per_second": 0.168,
256
- "eval_wer": 20.41339024938216,
257
- "step": 1000
258
- },
259
- {
260
- "epoch": 1.33,
261
- "learning_rate": 8.505714285714287e-06,
262
- "loss": 0.1154,
263
- "step": 1025
264
- },
265
- {
266
- "epoch": 1.36,
267
- "learning_rate": 8.434285714285716e-06,
268
- "loss": 0.1035,
269
- "step": 1050
270
- },
271
- {
272
- "epoch": 1.39,
273
- "learning_rate": 8.362857142857143e-06,
274
- "loss": 0.103,
275
- "step": 1075
276
- },
277
- {
278
- "epoch": 1.42,
279
- "learning_rate": 8.291428571428572e-06,
280
- "loss": 0.1089,
281
- "step": 1100
282
- },
283
- {
284
- "epoch": 1.46,
285
- "learning_rate": 8.220000000000001e-06,
286
- "loss": 0.0986,
287
- "step": 1125
288
- },
289
- {
290
- "epoch": 1.49,
291
- "learning_rate": 8.148571428571428e-06,
292
- "loss": 0.0999,
293
- "step": 1150
294
- },
295
- {
296
- "epoch": 1.52,
297
- "learning_rate": 8.077142857142857e-06,
298
- "loss": 0.1075,
299
- "step": 1175
300
- },
301
- {
302
- "epoch": 1.55,
303
- "learning_rate": 8.005714285714286e-06,
304
- "loss": 0.1155,
305
- "step": 1200
306
- },
307
- {
308
- "epoch": 1.58,
309
- "learning_rate": 7.934285714285715e-06,
310
- "loss": 0.0978,
311
- "step": 1225
312
- },
313
- {
314
- "epoch": 1.62,
315
- "learning_rate": 7.862857142857143e-06,
316
- "loss": 0.1035,
317
- "step": 1250
318
- },
319
- {
320
- "epoch": 1.65,
321
- "learning_rate": 7.791428571428572e-06,
322
- "loss": 0.1085,
323
- "step": 1275
324
- },
325
- {
326
- "epoch": 1.68,
327
- "learning_rate": 7.72e-06,
328
- "loss": 0.1008,
329
- "step": 1300
330
- },
331
- {
332
- "epoch": 1.71,
333
- "learning_rate": 7.64857142857143e-06,
334
- "loss": 0.1083,
335
- "step": 1325
336
- },
337
- {
338
- "epoch": 1.75,
339
- "learning_rate": 7.577142857142857e-06,
340
- "loss": 0.0918,
341
- "step": 1350
342
- },
343
- {
344
- "epoch": 1.78,
345
- "learning_rate": 7.505714285714286e-06,
346
- "loss": 0.1124,
347
- "step": 1375
348
- },
349
- {
350
- "epoch": 1.81,
351
- "learning_rate": 7.434285714285715e-06,
352
- "loss": 0.0997,
353
- "step": 1400
354
- },
355
- {
356
- "epoch": 1.84,
357
- "learning_rate": 7.362857142857144e-06,
358
- "loss": 0.0954,
359
- "step": 1425
360
- },
361
- {
362
- "epoch": 1.88,
363
- "learning_rate": 7.291428571428571e-06,
364
- "loss": 0.0946,
365
- "step": 1450
366
- },
367
- {
368
- "epoch": 1.91,
369
- "learning_rate": 7.22e-06,
370
- "loss": 0.1,
371
- "step": 1475
372
- },
373
- {
374
- "epoch": 1.94,
375
- "learning_rate": 7.148571428571429e-06,
376
- "loss": 0.0961,
377
- "step": 1500
378
- },
379
- {
380
- "epoch": 1.97,
381
- "learning_rate": 7.077142857142858e-06,
382
- "loss": 0.1074,
383
- "step": 1525
384
- },
385
- {
386
- "epoch": 2.01,
387
- "learning_rate": 7.0057142857142865e-06,
388
- "loss": 0.0821,
389
- "step": 1550
390
- },
391
- {
392
- "epoch": 2.04,
393
- "learning_rate": 6.934285714285715e-06,
394
- "loss": 0.0392,
395
- "step": 1575
396
- },
397
- {
398
- "epoch": 2.07,
399
- "learning_rate": 6.862857142857144e-06,
400
- "loss": 0.0371,
401
- "step": 1600
402
- },
403
- {
404
- "epoch": 2.1,
405
- "learning_rate": 6.791428571428572e-06,
406
- "loss": 0.0441,
407
- "step": 1625
408
- },
409
- {
410
- "epoch": 2.13,
411
- "learning_rate": 6.720000000000001e-06,
412
- "loss": 0.0416,
413
- "step": 1650
414
- },
415
- {
416
- "epoch": 2.17,
417
- "learning_rate": 6.648571428571429e-06,
418
- "loss": 0.0434,
419
- "step": 1675
420
- },
421
- {
422
- "epoch": 2.2,
423
- "learning_rate": 6.577142857142857e-06,
424
- "loss": 0.0457,
425
- "step": 1700
426
- },
427
- {
428
- "epoch": 2.23,
429
- "learning_rate": 6.505714285714286e-06,
430
- "loss": 0.0376,
431
- "step": 1725
432
- },
433
- {
434
- "epoch": 2.26,
435
- "learning_rate": 6.434285714285715e-06,
436
- "loss": 0.039,
437
- "step": 1750
438
- },
439
- {
440
- "epoch": 2.3,
441
- "learning_rate": 6.3628571428571426e-06,
442
- "loss": 0.0405,
443
- "step": 1775
444
- },
445
- {
446
- "epoch": 2.33,
447
- "learning_rate": 6.2914285714285716e-06,
448
- "loss": 0.0352,
449
- "step": 1800
450
- },
451
- {
452
- "epoch": 2.36,
453
- "learning_rate": 6.220000000000001e-06,
454
- "loss": 0.0397,
455
- "step": 1825
456
- },
457
- {
458
- "epoch": 2.39,
459
- "learning_rate": 6.14857142857143e-06,
460
- "loss": 0.0396,
461
- "step": 1850
462
- },
463
- {
464
- "epoch": 2.43,
465
- "learning_rate": 6.077142857142858e-06,
466
- "loss": 0.0388,
467
- "step": 1875
468
- },
469
- {
470
- "epoch": 2.46,
471
- "learning_rate": 6.005714285714286e-06,
472
- "loss": 0.038,
473
- "step": 1900
474
- },
475
- {
476
- "epoch": 2.49,
477
- "learning_rate": 5.934285714285715e-06,
478
- "loss": 0.0391,
479
- "step": 1925
480
- },
481
- {
482
- "epoch": 2.52,
483
- "learning_rate": 5.862857142857143e-06,
484
- "loss": 0.0358,
485
- "step": 1950
486
- },
487
- {
488
- "epoch": 2.55,
489
- "learning_rate": 5.791428571428572e-06,
490
- "loss": 0.0399,
491
- "step": 1975
492
- },
493
- {
494
- "epoch": 2.59,
495
- "learning_rate": 5.72e-06,
496
- "loss": 0.0387,
497
- "step": 2000
498
- },
499
- {
500
- "epoch": 2.59,
501
- "eval_loss": 0.29592692852020264,
502
- "eval_runtime": 2465.3193,
503
- "eval_samples_per_second": 1.217,
504
- "eval_steps_per_second": 0.152,
505
- "eval_wer": 19.680970568411592,
506
- "step": 2000
507
- }
508
- ],
509
- "max_steps": 4000,
510
- "num_train_epochs": 6,
511
- "total_flos": 9.23011544383488e+18,
512
- "trial_name": null,
513
- "trial_params": null
514
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
checkpoint-2000/training_args.bin DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:e622acaf222233592b6831832007ec4d3cdb12856c8cb7d9c6a33ac3ba0638d9
3
- size 3579