flatala-research commited on
Commit
990b468
1 Parent(s): d875331

End of training

Browse files
Files changed (4) hide show
  1. README.md +4 -4
  2. all_results.json +4 -0
  3. test_results.json +4 -0
  4. trainer_state.json +577 -0
README.md CHANGED
@@ -15,11 +15,11 @@ should probably proofread and complete it, then remove this comment. -->
15
 
16
  This model is a fine-tuned version of [MCG-NJU/videomae-large](https://huggingface.co/MCG-NJU/videomae-large) on an unknown dataset.
17
  It achieves the following results on the evaluation set:
18
- - eval_loss: 1.7307
19
  - eval_accuracy: 0.5805
20
- - eval_runtime: 28.969
21
- - eval_samples_per_second: 7.077
22
- - eval_steps_per_second: 0.449
23
  - epoch: 10.0342
24
  - step: 618
25
 
 
15
 
16
  This model is a fine-tuned version of [MCG-NJU/videomae-large](https://huggingface.co/MCG-NJU/videomae-large) on an unknown dataset.
17
  It achieves the following results on the evaluation set:
18
+ - eval_loss: 1.7345
19
  - eval_accuracy: 0.5805
20
+ - eval_runtime: 19.4619
21
+ - eval_samples_per_second: 10.533
22
+ - eval_steps_per_second: 0.668
23
  - epoch: 10.0342
24
  - step: 618
25
 
all_results.json ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ {
2
+ "eval_accuracy": 0.5804878048780487,
3
+ "eval_loss": 1.7345412969589233
4
+ }
test_results.json ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ {
2
+ "eval_accuracy": 0.5804878048780487,
3
+ "eval_loss": 1.7345412969589233
4
+ }
trainer_state.json ADDED
@@ -0,0 +1,577 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": 0.6310679611650486,
3
+ "best_model_checkpoint": "videomae-large-finetuned-right-hand-conflab-v3/checkpoint-531",
4
+ "epoch": 10.034188034188034,
5
+ "eval_steps": 500,
6
+ "global_step": 618,
7
+ "is_hyper_param_search": false,
8
+ "is_local_process_zero": true,
9
+ "is_world_process_zero": true,
10
+ "log_history": [
11
+ {
12
+ "epoch": 0.01221001221001221,
13
+ "grad_norm": 10.883779525756836,
14
+ "learning_rate": 6.0975609756097564e-06,
15
+ "loss": 2.1132,
16
+ "step": 10
17
+ },
18
+ {
19
+ "epoch": 0.02442002442002442,
20
+ "grad_norm": 9.386421203613281,
21
+ "learning_rate": 1.2195121951219513e-05,
22
+ "loss": 2.0109,
23
+ "step": 20
24
+ },
25
+ {
26
+ "epoch": 0.03663003663003663,
27
+ "grad_norm": 9.540677070617676,
28
+ "learning_rate": 1.8292682926829268e-05,
29
+ "loss": 1.9597,
30
+ "step": 30
31
+ },
32
+ {
33
+ "epoch": 0.04884004884004884,
34
+ "grad_norm": 8.591822624206543,
35
+ "learning_rate": 2.4390243902439026e-05,
36
+ "loss": 1.9359,
37
+ "step": 40
38
+ },
39
+ {
40
+ "epoch": 0.06105006105006105,
41
+ "grad_norm": 8.493247985839844,
42
+ "learning_rate": 3.048780487804878e-05,
43
+ "loss": 1.9038,
44
+ "step": 50
45
+ },
46
+ {
47
+ "epoch": 0.07203907203907203,
48
+ "eval_accuracy": 0.24757281553398058,
49
+ "eval_loss": 2.092643976211548,
50
+ "eval_runtime": 20.1931,
51
+ "eval_samples_per_second": 10.201,
52
+ "eval_steps_per_second": 0.644,
53
+ "step": 59
54
+ },
55
+ {
56
+ "epoch": 1.0012210012210012,
57
+ "grad_norm": 9.048627853393555,
58
+ "learning_rate": 3.6585365853658535e-05,
59
+ "loss": 2.0497,
60
+ "step": 60
61
+ },
62
+ {
63
+ "epoch": 1.0134310134310134,
64
+ "grad_norm": 5.44881010055542,
65
+ "learning_rate": 4.26829268292683e-05,
66
+ "loss": 2.0479,
67
+ "step": 70
68
+ },
69
+ {
70
+ "epoch": 1.0256410256410255,
71
+ "grad_norm": 6.384305477142334,
72
+ "learning_rate": 4.878048780487805e-05,
73
+ "loss": 2.09,
74
+ "step": 80
75
+ },
76
+ {
77
+ "epoch": 1.037851037851038,
78
+ "grad_norm": 4.374388694763184,
79
+ "learning_rate": 4.94572591587517e-05,
80
+ "loss": 2.0209,
81
+ "step": 90
82
+ },
83
+ {
84
+ "epoch": 1.05006105006105,
85
+ "grad_norm": 4.627490043640137,
86
+ "learning_rate": 4.877883310719132e-05,
87
+ "loss": 2.0525,
88
+ "step": 100
89
+ },
90
+ {
91
+ "epoch": 1.0622710622710623,
92
+ "grad_norm": 5.586881160736084,
93
+ "learning_rate": 4.810040705563094e-05,
94
+ "loss": 1.8963,
95
+ "step": 110
96
+ },
97
+ {
98
+ "epoch": 1.072039072039072,
99
+ "eval_accuracy": 0.24271844660194175,
100
+ "eval_loss": 1.9697831869125366,
101
+ "eval_runtime": 21.1947,
102
+ "eval_samples_per_second": 9.719,
103
+ "eval_steps_per_second": 0.613,
104
+ "step": 118
105
+ },
106
+ {
107
+ "epoch": 2.0024420024420024,
108
+ "grad_norm": 5.62977409362793,
109
+ "learning_rate": 4.742198100407056e-05,
110
+ "loss": 1.9983,
111
+ "step": 120
112
+ },
113
+ {
114
+ "epoch": 2.0146520146520146,
115
+ "grad_norm": 4.484747886657715,
116
+ "learning_rate": 4.674355495251018e-05,
117
+ "loss": 1.9586,
118
+ "step": 130
119
+ },
120
+ {
121
+ "epoch": 2.0268620268620268,
122
+ "grad_norm": 4.371191501617432,
123
+ "learning_rate": 4.60651289009498e-05,
124
+ "loss": 1.8729,
125
+ "step": 140
126
+ },
127
+ {
128
+ "epoch": 2.039072039072039,
129
+ "grad_norm": 4.68417501449585,
130
+ "learning_rate": 4.5386702849389416e-05,
131
+ "loss": 2.0021,
132
+ "step": 150
133
+ },
134
+ {
135
+ "epoch": 2.051282051282051,
136
+ "grad_norm": 12.167759895324707,
137
+ "learning_rate": 4.470827679782904e-05,
138
+ "loss": 1.7877,
139
+ "step": 160
140
+ },
141
+ {
142
+ "epoch": 2.0634920634920633,
143
+ "grad_norm": 3.787353515625,
144
+ "learning_rate": 4.402985074626866e-05,
145
+ "loss": 1.9154,
146
+ "step": 170
147
+ },
148
+ {
149
+ "epoch": 2.0720390720390722,
150
+ "eval_accuracy": 0.36893203883495146,
151
+ "eval_loss": 1.7126853466033936,
152
+ "eval_runtime": 18.7207,
153
+ "eval_samples_per_second": 11.004,
154
+ "eval_steps_per_second": 0.694,
155
+ "step": 177
156
+ },
157
+ {
158
+ "epoch": 3.0036630036630036,
159
+ "grad_norm": 5.718711853027344,
160
+ "learning_rate": 4.335142469470828e-05,
161
+ "loss": 1.7051,
162
+ "step": 180
163
+ },
164
+ {
165
+ "epoch": 3.015873015873016,
166
+ "grad_norm": 6.827328681945801,
167
+ "learning_rate": 4.26729986431479e-05,
168
+ "loss": 1.7233,
169
+ "step": 190
170
+ },
171
+ {
172
+ "epoch": 3.028083028083028,
173
+ "grad_norm": 6.945235252380371,
174
+ "learning_rate": 4.199457259158752e-05,
175
+ "loss": 1.5634,
176
+ "step": 200
177
+ },
178
+ {
179
+ "epoch": 3.04029304029304,
180
+ "grad_norm": 7.2410197257995605,
181
+ "learning_rate": 4.131614654002714e-05,
182
+ "loss": 1.5571,
183
+ "step": 210
184
+ },
185
+ {
186
+ "epoch": 3.0525030525030523,
187
+ "grad_norm": 5.356701374053955,
188
+ "learning_rate": 4.063772048846676e-05,
189
+ "loss": 1.6352,
190
+ "step": 220
191
+ },
192
+ {
193
+ "epoch": 3.064713064713065,
194
+ "grad_norm": 6.82385778427124,
195
+ "learning_rate": 3.995929443690638e-05,
196
+ "loss": 1.4173,
197
+ "step": 230
198
+ },
199
+ {
200
+ "epoch": 3.0720390720390722,
201
+ "eval_accuracy": 0.5242718446601942,
202
+ "eval_loss": 1.4571937322616577,
203
+ "eval_runtime": 25.9349,
204
+ "eval_samples_per_second": 7.943,
205
+ "eval_steps_per_second": 0.501,
206
+ "step": 236
207
+ },
208
+ {
209
+ "epoch": 4.004884004884005,
210
+ "grad_norm": 5.349239349365234,
211
+ "learning_rate": 3.9280868385345995e-05,
212
+ "loss": 1.5368,
213
+ "step": 240
214
+ },
215
+ {
216
+ "epoch": 4.017094017094017,
217
+ "grad_norm": 7.013088703155518,
218
+ "learning_rate": 3.860244233378562e-05,
219
+ "loss": 1.3767,
220
+ "step": 250
221
+ },
222
+ {
223
+ "epoch": 4.029304029304029,
224
+ "grad_norm": 8.280290603637695,
225
+ "learning_rate": 3.792401628222524e-05,
226
+ "loss": 1.4361,
227
+ "step": 260
228
+ },
229
+ {
230
+ "epoch": 4.041514041514041,
231
+ "grad_norm": 8.19866943359375,
232
+ "learning_rate": 3.724559023066486e-05,
233
+ "loss": 1.193,
234
+ "step": 270
235
+ },
236
+ {
237
+ "epoch": 4.0537240537240535,
238
+ "grad_norm": 7.372025012969971,
239
+ "learning_rate": 3.656716417910448e-05,
240
+ "loss": 1.3667,
241
+ "step": 280
242
+ },
243
+ {
244
+ "epoch": 4.065934065934066,
245
+ "grad_norm": 5.639531135559082,
246
+ "learning_rate": 3.58887381275441e-05,
247
+ "loss": 1.3246,
248
+ "step": 290
249
+ },
250
+ {
251
+ "epoch": 4.072039072039072,
252
+ "eval_accuracy": 0.5679611650485437,
253
+ "eval_loss": 1.223657488822937,
254
+ "eval_runtime": 20.7954,
255
+ "eval_samples_per_second": 9.906,
256
+ "eval_steps_per_second": 0.625,
257
+ "step": 295
258
+ },
259
+ {
260
+ "epoch": 5.006105006105006,
261
+ "grad_norm": 7.878034591674805,
262
+ "learning_rate": 3.521031207598372e-05,
263
+ "loss": 1.1881,
264
+ "step": 300
265
+ },
266
+ {
267
+ "epoch": 5.018315018315018,
268
+ "grad_norm": 7.679462909698486,
269
+ "learning_rate": 3.453188602442334e-05,
270
+ "loss": 1.1117,
271
+ "step": 310
272
+ },
273
+ {
274
+ "epoch": 5.03052503052503,
275
+ "grad_norm": 10.07403564453125,
276
+ "learning_rate": 3.385345997286296e-05,
277
+ "loss": 1.0766,
278
+ "step": 320
279
+ },
280
+ {
281
+ "epoch": 5.042735042735043,
282
+ "grad_norm": 7.193727970123291,
283
+ "learning_rate": 3.3175033921302575e-05,
284
+ "loss": 1.006,
285
+ "step": 330
286
+ },
287
+ {
288
+ "epoch": 5.054945054945055,
289
+ "grad_norm": 5.925375461578369,
290
+ "learning_rate": 3.24966078697422e-05,
291
+ "loss": 0.9709,
292
+ "step": 340
293
+ },
294
+ {
295
+ "epoch": 5.067155067155067,
296
+ "grad_norm": 13.94832706451416,
297
+ "learning_rate": 3.181818181818182e-05,
298
+ "loss": 1.077,
299
+ "step": 350
300
+ },
301
+ {
302
+ "epoch": 5.072039072039072,
303
+ "eval_accuracy": 0.49514563106796117,
304
+ "eval_loss": 1.4524530172348022,
305
+ "eval_runtime": 22.2248,
306
+ "eval_samples_per_second": 9.269,
307
+ "eval_steps_per_second": 0.585,
308
+ "step": 354
309
+ },
310
+ {
311
+ "epoch": 6.007326007326007,
312
+ "grad_norm": 8.016749382019043,
313
+ "learning_rate": 3.113975576662144e-05,
314
+ "loss": 0.9269,
315
+ "step": 360
316
+ },
317
+ {
318
+ "epoch": 6.0195360195360195,
319
+ "grad_norm": 5.849711894989014,
320
+ "learning_rate": 3.046132971506106e-05,
321
+ "loss": 0.8371,
322
+ "step": 370
323
+ },
324
+ {
325
+ "epoch": 6.031746031746032,
326
+ "grad_norm": 7.203096389770508,
327
+ "learning_rate": 2.9782903663500678e-05,
328
+ "loss": 0.7639,
329
+ "step": 380
330
+ },
331
+ {
332
+ "epoch": 6.043956043956044,
333
+ "grad_norm": 7.49542236328125,
334
+ "learning_rate": 2.91044776119403e-05,
335
+ "loss": 0.8475,
336
+ "step": 390
337
+ },
338
+ {
339
+ "epoch": 6.056166056166056,
340
+ "grad_norm": 9.251474380493164,
341
+ "learning_rate": 2.842605156037992e-05,
342
+ "loss": 0.8942,
343
+ "step": 400
344
+ },
345
+ {
346
+ "epoch": 6.068376068376068,
347
+ "grad_norm": 8.035192489624023,
348
+ "learning_rate": 2.7747625508819542e-05,
349
+ "loss": 0.923,
350
+ "step": 410
351
+ },
352
+ {
353
+ "epoch": 6.072039072039072,
354
+ "eval_accuracy": 0.587378640776699,
355
+ "eval_loss": 1.3191975355148315,
356
+ "eval_runtime": 21.2599,
357
+ "eval_samples_per_second": 9.69,
358
+ "eval_steps_per_second": 0.611,
359
+ "step": 413
360
+ },
361
+ {
362
+ "epoch": 7.0085470085470085,
363
+ "grad_norm": 9.951005935668945,
364
+ "learning_rate": 2.7069199457259158e-05,
365
+ "loss": 0.6643,
366
+ "step": 420
367
+ },
368
+ {
369
+ "epoch": 7.020757020757021,
370
+ "grad_norm": 10.067875862121582,
371
+ "learning_rate": 2.639077340569878e-05,
372
+ "loss": 0.6866,
373
+ "step": 430
374
+ },
375
+ {
376
+ "epoch": 7.032967032967033,
377
+ "grad_norm": 11.081637382507324,
378
+ "learning_rate": 2.57123473541384e-05,
379
+ "loss": 0.5887,
380
+ "step": 440
381
+ },
382
+ {
383
+ "epoch": 7.045177045177045,
384
+ "grad_norm": 7.662527084350586,
385
+ "learning_rate": 2.5033921302578023e-05,
386
+ "loss": 0.6175,
387
+ "step": 450
388
+ },
389
+ {
390
+ "epoch": 7.057387057387057,
391
+ "grad_norm": 6.395303726196289,
392
+ "learning_rate": 2.4355495251017642e-05,
393
+ "loss": 0.6575,
394
+ "step": 460
395
+ },
396
+ {
397
+ "epoch": 7.069597069597069,
398
+ "grad_norm": 12.816076278686523,
399
+ "learning_rate": 2.367706919945726e-05,
400
+ "loss": 0.651,
401
+ "step": 470
402
+ },
403
+ {
404
+ "epoch": 7.072039072039072,
405
+ "eval_accuracy": 0.5728155339805825,
406
+ "eval_loss": 1.3817497491836548,
407
+ "eval_runtime": 22.418,
408
+ "eval_samples_per_second": 9.189,
409
+ "eval_steps_per_second": 0.58,
410
+ "step": 472
411
+ },
412
+ {
413
+ "epoch": 8.00976800976801,
414
+ "grad_norm": 6.377563953399658,
415
+ "learning_rate": 2.299864314789688e-05,
416
+ "loss": 0.4579,
417
+ "step": 480
418
+ },
419
+ {
420
+ "epoch": 8.021978021978022,
421
+ "grad_norm": 9.152300834655762,
422
+ "learning_rate": 2.2320217096336503e-05,
423
+ "loss": 0.4142,
424
+ "step": 490
425
+ },
426
+ {
427
+ "epoch": 8.034188034188034,
428
+ "grad_norm": 6.605992317199707,
429
+ "learning_rate": 2.164179104477612e-05,
430
+ "loss": 0.4407,
431
+ "step": 500
432
+ },
433
+ {
434
+ "epoch": 8.046398046398046,
435
+ "grad_norm": 14.947940826416016,
436
+ "learning_rate": 2.0963364993215738e-05,
437
+ "loss": 0.3638,
438
+ "step": 510
439
+ },
440
+ {
441
+ "epoch": 8.058608058608058,
442
+ "grad_norm": 9.7440824508667,
443
+ "learning_rate": 2.028493894165536e-05,
444
+ "loss": 0.5602,
445
+ "step": 520
446
+ },
447
+ {
448
+ "epoch": 8.07081807081807,
449
+ "grad_norm": 8.176590919494629,
450
+ "learning_rate": 1.960651289009498e-05,
451
+ "loss": 0.5092,
452
+ "step": 530
453
+ },
454
+ {
455
+ "epoch": 8.072039072039072,
456
+ "eval_accuracy": 0.6310679611650486,
457
+ "eval_loss": 1.3680421113967896,
458
+ "eval_runtime": 18.341,
459
+ "eval_samples_per_second": 11.232,
460
+ "eval_steps_per_second": 0.709,
461
+ "step": 531
462
+ },
463
+ {
464
+ "epoch": 9.010989010989011,
465
+ "grad_norm": 6.507408142089844,
466
+ "learning_rate": 1.89280868385346e-05,
467
+ "loss": 0.3515,
468
+ "step": 540
469
+ },
470
+ {
471
+ "epoch": 9.023199023199023,
472
+ "grad_norm": 10.037909507751465,
473
+ "learning_rate": 1.824966078697422e-05,
474
+ "loss": 0.38,
475
+ "step": 550
476
+ },
477
+ {
478
+ "epoch": 9.035409035409035,
479
+ "grad_norm": 8.945121765136719,
480
+ "learning_rate": 1.757123473541384e-05,
481
+ "loss": 0.2686,
482
+ "step": 560
483
+ },
484
+ {
485
+ "epoch": 9.047619047619047,
486
+ "grad_norm": 7.725048542022705,
487
+ "learning_rate": 1.689280868385346e-05,
488
+ "loss": 0.318,
489
+ "step": 570
490
+ },
491
+ {
492
+ "epoch": 9.05982905982906,
493
+ "grad_norm": 8.256610870361328,
494
+ "learning_rate": 1.6214382632293083e-05,
495
+ "loss": 0.3725,
496
+ "step": 580
497
+ },
498
+ {
499
+ "epoch": 9.072039072039072,
500
+ "grad_norm": 11.454293251037598,
501
+ "learning_rate": 1.55359565807327e-05,
502
+ "loss": 0.2484,
503
+ "step": 590
504
+ },
505
+ {
506
+ "epoch": 9.072039072039072,
507
+ "eval_accuracy": 0.6262135922330098,
508
+ "eval_loss": 1.4326366186141968,
509
+ "eval_runtime": 19.1344,
510
+ "eval_samples_per_second": 10.766,
511
+ "eval_steps_per_second": 0.679,
512
+ "step": 590
513
+ },
514
+ {
515
+ "epoch": 10.012210012210012,
516
+ "grad_norm": 16.326658248901367,
517
+ "learning_rate": 1.485753052917232e-05,
518
+ "loss": 0.2977,
519
+ "step": 600
520
+ },
521
+ {
522
+ "epoch": 10.024420024420024,
523
+ "grad_norm": 6.485968112945557,
524
+ "learning_rate": 1.417910447761194e-05,
525
+ "loss": 0.2218,
526
+ "step": 610
527
+ },
528
+ {
529
+ "epoch": 10.034188034188034,
530
+ "eval_accuracy": 0.5804878048780487,
531
+ "eval_loss": 1.7306667566299438,
532
+ "eval_runtime": 28.969,
533
+ "eval_samples_per_second": 7.077,
534
+ "eval_steps_per_second": 0.449,
535
+ "step": 618
536
+ },
537
+ {
538
+ "epoch": 10.034188034188034,
539
+ "eval_accuracy": 0.5804878048780487,
540
+ "eval_loss": 1.7345412969589233,
541
+ "eval_runtime": 19.4619,
542
+ "eval_samples_per_second": 10.533,
543
+ "eval_steps_per_second": 0.668,
544
+ "step": 618
545
+ }
546
+ ],
547
+ "logging_steps": 10,
548
+ "max_steps": 819,
549
+ "num_input_tokens_seen": 0,
550
+ "num_train_epochs": 9223372036854775807,
551
+ "save_steps": 500,
552
+ "stateful_callbacks": {
553
+ "EarlyStoppingCallback": {
554
+ "args": {
555
+ "early_stopping_patience": 3,
556
+ "early_stopping_threshold": 0.0
557
+ },
558
+ "attributes": {
559
+ "early_stopping_patience_counter": 0
560
+ }
561
+ },
562
+ "TrainerControl": {
563
+ "args": {
564
+ "should_epoch_stop": false,
565
+ "should_evaluate": false,
566
+ "should_log": false,
567
+ "should_save": true,
568
+ "should_training_stop": false
569
+ },
570
+ "attributes": {}
571
+ }
572
+ },
573
+ "total_flos": 4.276946192165241e+19,
574
+ "train_batch_size": 16,
575
+ "trial_name": null,
576
+ "trial_params": null
577
+ }