selmamalak commited on
Commit
60d4103
1 Parent(s): 439bbfd

End of training

Browse files
Files changed (5) hide show
  1. README.md +4 -4
  2. all_results.json +16 -0
  3. eval_results.json +11 -0
  4. train_results.json +8 -0
  5. trainer_state.json +591 -0
README.md CHANGED
@@ -21,11 +21,11 @@ should probably proofread and complete it, then remove this comment. -->
21
 
22
  This model is a fine-tuned version of [facebook/deit-base-patch16-224](https://huggingface.co/facebook/deit-base-patch16-224) on an unknown dataset.
23
  It achieves the following results on the evaluation set:
24
- - Loss: 0.0885
25
  - Accuracy: 0.9622
26
- - Precision: 0.9398
27
- - Recall: 0.9681
28
- - F1: 0.9526
29
 
30
  ## Model description
31
 
 
21
 
22
  This model is a fine-tuned version of [facebook/deit-base-patch16-224](https://huggingface.co/facebook/deit-base-patch16-224) on an unknown dataset.
23
  It achieves the following results on the evaluation set:
24
+ - Loss: 0.0979
25
  - Accuracy: 0.9622
26
+ - Precision: 0.9531
27
+ - Recall: 0.9562
28
+ - F1: 0.9546
29
 
30
  ## Model description
31
 
all_results.json ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 9.88,
3
+ "eval_accuracy": 0.9621993127147767,
4
+ "eval_f1": 0.9546086216676121,
5
+ "eval_loss": 0.09792790561914444,
6
+ "eval_precision": 0.9530929801998389,
7
+ "eval_recall": 0.9561616937721433,
8
+ "eval_runtime": 6.2606,
9
+ "eval_samples_per_second": 92.963,
10
+ "eval_steps_per_second": 5.91,
11
+ "total_flos": 3.1439051980091965e+18,
12
+ "train_loss": 0.1791202223490155,
13
+ "train_runtime": 695.1527,
14
+ "train_samples_per_second": 58.649,
15
+ "train_steps_per_second": 0.906
16
+ }
eval_results.json ADDED
@@ -0,0 +1,11 @@
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 9.88,
3
+ "eval_accuracy": 0.9621993127147767,
4
+ "eval_f1": 0.9546086216676121,
5
+ "eval_loss": 0.09792790561914444,
6
+ "eval_precision": 0.9530929801998389,
7
+ "eval_recall": 0.9561616937721433,
8
+ "eval_runtime": 6.2606,
9
+ "eval_samples_per_second": 92.963,
10
+ "eval_steps_per_second": 5.91
11
+ }
train_results.json ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 9.88,
3
+ "total_flos": 3.1439051980091965e+18,
4
+ "train_loss": 0.1791202223490155,
5
+ "train_runtime": 695.1527,
6
+ "train_samples_per_second": 58.649,
7
+ "train_steps_per_second": 0.906
8
+ }
trainer_state.json ADDED
@@ -0,0 +1,591 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": 0.975107296137339,
3
+ "best_model_checkpoint": "deit-base-patch16-224-finetuned-lora-medmnistv2/checkpoint-446",
4
+ "epoch": 9.882352941176471,
5
+ "eval_steps": 500,
6
+ "global_step": 630,
7
+ "is_hyper_param_search": false,
8
+ "is_local_process_zero": true,
9
+ "is_world_process_zero": true,
10
+ "log_history": [
11
+ {
12
+ "epoch": 0.16,
13
+ "grad_norm": 1.1300164461135864,
14
+ "learning_rate": 0.004920634920634921,
15
+ "loss": 0.4737,
16
+ "step": 10
17
+ },
18
+ {
19
+ "epoch": 0.31,
20
+ "grad_norm": 0.7213650941848755,
21
+ "learning_rate": 0.004841269841269842,
22
+ "loss": 0.6061,
23
+ "step": 20
24
+ },
25
+ {
26
+ "epoch": 0.47,
27
+ "grad_norm": 3.0624804496765137,
28
+ "learning_rate": 0.0047619047619047615,
29
+ "loss": 0.2754,
30
+ "step": 30
31
+ },
32
+ {
33
+ "epoch": 0.63,
34
+ "grad_norm": 0.509834885597229,
35
+ "learning_rate": 0.004682539682539683,
36
+ "loss": 0.2462,
37
+ "step": 40
38
+ },
39
+ {
40
+ "epoch": 0.78,
41
+ "grad_norm": 1.2371031045913696,
42
+ "learning_rate": 0.004603174603174603,
43
+ "loss": 0.205,
44
+ "step": 50
45
+ },
46
+ {
47
+ "epoch": 0.94,
48
+ "grad_norm": 1.2106962203979492,
49
+ "learning_rate": 0.004523809523809524,
50
+ "loss": 0.2556,
51
+ "step": 60
52
+ },
53
+ {
54
+ "epoch": 0.99,
55
+ "eval_accuracy": 0.9184549356223176,
56
+ "eval_f1": 0.8822492390220755,
57
+ "eval_loss": 0.20187422633171082,
58
+ "eval_precision": 0.9446022031823746,
59
+ "eval_recall": 0.8469420502475702,
60
+ "eval_runtime": 11.2928,
61
+ "eval_samples_per_second": 103.163,
62
+ "eval_steps_per_second": 6.464,
63
+ "step": 63
64
+ },
65
+ {
66
+ "epoch": 1.1,
67
+ "grad_norm": 2.5079874992370605,
68
+ "learning_rate": 0.0044444444444444444,
69
+ "loss": 0.282,
70
+ "step": 70
71
+ },
72
+ {
73
+ "epoch": 1.25,
74
+ "grad_norm": 0.6824792623519897,
75
+ "learning_rate": 0.004365079365079365,
76
+ "loss": 0.2291,
77
+ "step": 80
78
+ },
79
+ {
80
+ "epoch": 1.41,
81
+ "grad_norm": 1.294320821762085,
82
+ "learning_rate": 0.004285714285714286,
83
+ "loss": 0.208,
84
+ "step": 90
85
+ },
86
+ {
87
+ "epoch": 1.57,
88
+ "grad_norm": 2.1044976711273193,
89
+ "learning_rate": 0.004206349206349207,
90
+ "loss": 0.2288,
91
+ "step": 100
92
+ },
93
+ {
94
+ "epoch": 1.73,
95
+ "grad_norm": 0.8730837106704712,
96
+ "learning_rate": 0.0041269841269841265,
97
+ "loss": 0.3071,
98
+ "step": 110
99
+ },
100
+ {
101
+ "epoch": 1.88,
102
+ "grad_norm": 1.6219590902328491,
103
+ "learning_rate": 0.004047619047619048,
104
+ "loss": 0.2302,
105
+ "step": 120
106
+ },
107
+ {
108
+ "epoch": 1.99,
109
+ "eval_accuracy": 0.9613733905579399,
110
+ "eval_f1": 0.9513881612438695,
111
+ "eval_loss": 0.10982219129800797,
112
+ "eval_precision": 0.9396367521367521,
113
+ "eval_recall": 0.965355461825295,
114
+ "eval_runtime": 11.2742,
115
+ "eval_samples_per_second": 103.333,
116
+ "eval_steps_per_second": 6.475,
117
+ "step": 127
118
+ },
119
+ {
120
+ "epoch": 2.04,
121
+ "grad_norm": 1.7205462455749512,
122
+ "learning_rate": 0.003968253968253968,
123
+ "loss": 0.214,
124
+ "step": 130
125
+ },
126
+ {
127
+ "epoch": 2.2,
128
+ "grad_norm": 0.4880439341068268,
129
+ "learning_rate": 0.003888888888888889,
130
+ "loss": 0.2757,
131
+ "step": 140
132
+ },
133
+ {
134
+ "epoch": 2.35,
135
+ "grad_norm": 1.5420565605163574,
136
+ "learning_rate": 0.0038095238095238095,
137
+ "loss": 0.2314,
138
+ "step": 150
139
+ },
140
+ {
141
+ "epoch": 2.51,
142
+ "grad_norm": 0.872795820236206,
143
+ "learning_rate": 0.0037301587301587303,
144
+ "loss": 0.2689,
145
+ "step": 160
146
+ },
147
+ {
148
+ "epoch": 2.67,
149
+ "grad_norm": 1.3670737743377686,
150
+ "learning_rate": 0.0036507936507936506,
151
+ "loss": 0.1787,
152
+ "step": 170
153
+ },
154
+ {
155
+ "epoch": 2.82,
156
+ "grad_norm": 0.9697684049606323,
157
+ "learning_rate": 0.0035714285714285718,
158
+ "loss": 0.1962,
159
+ "step": 180
160
+ },
161
+ {
162
+ "epoch": 2.98,
163
+ "grad_norm": 0.6434329152107239,
164
+ "learning_rate": 0.003492063492063492,
165
+ "loss": 0.2258,
166
+ "step": 190
167
+ },
168
+ {
169
+ "epoch": 3.0,
170
+ "eval_accuracy": 0.9622317596566523,
171
+ "eval_f1": 0.9496135003577972,
172
+ "eval_loss": 0.11506476998329163,
173
+ "eval_precision": 0.9640835246121029,
174
+ "eval_recall": 0.9372077296900788,
175
+ "eval_runtime": 11.448,
176
+ "eval_samples_per_second": 101.764,
177
+ "eval_steps_per_second": 6.377,
178
+ "step": 191
179
+ },
180
+ {
181
+ "epoch": 3.14,
182
+ "grad_norm": 0.3758108913898468,
183
+ "learning_rate": 0.003412698412698413,
184
+ "loss": 0.2048,
185
+ "step": 200
186
+ },
187
+ {
188
+ "epoch": 3.29,
189
+ "grad_norm": 0.3552361726760864,
190
+ "learning_rate": 0.003333333333333333,
191
+ "loss": 0.1136,
192
+ "step": 210
193
+ },
194
+ {
195
+ "epoch": 3.45,
196
+ "grad_norm": 0.3793877959251404,
197
+ "learning_rate": 0.0032539682539682543,
198
+ "loss": 0.2048,
199
+ "step": 220
200
+ },
201
+ {
202
+ "epoch": 3.61,
203
+ "grad_norm": 0.6903932094573975,
204
+ "learning_rate": 0.0031746031746031746,
205
+ "loss": 0.2696,
206
+ "step": 230
207
+ },
208
+ {
209
+ "epoch": 3.76,
210
+ "grad_norm": 0.628604531288147,
211
+ "learning_rate": 0.0030952380952380953,
212
+ "loss": 0.1585,
213
+ "step": 240
214
+ },
215
+ {
216
+ "epoch": 3.92,
217
+ "grad_norm": 0.7250388860702515,
218
+ "learning_rate": 0.0030158730158730157,
219
+ "loss": 0.1465,
220
+ "step": 250
221
+ },
222
+ {
223
+ "epoch": 4.0,
224
+ "eval_accuracy": 0.9725321888412017,
225
+ "eval_f1": 0.9643166172765768,
226
+ "eval_loss": 0.07334637641906738,
227
+ "eval_precision": 0.9653162002256106,
228
+ "eval_recall": 0.9633286722904824,
229
+ "eval_runtime": 11.4298,
230
+ "eval_samples_per_second": 101.927,
231
+ "eval_steps_per_second": 6.387,
232
+ "step": 255
233
+ },
234
+ {
235
+ "epoch": 4.08,
236
+ "grad_norm": 0.4553186893463135,
237
+ "learning_rate": 0.002936507936507937,
238
+ "loss": 0.1609,
239
+ "step": 260
240
+ },
241
+ {
242
+ "epoch": 4.24,
243
+ "grad_norm": 0.3900231420993805,
244
+ "learning_rate": 0.002857142857142857,
245
+ "loss": 0.1517,
246
+ "step": 270
247
+ },
248
+ {
249
+ "epoch": 4.39,
250
+ "grad_norm": 0.45594316720962524,
251
+ "learning_rate": 0.002777777777777778,
252
+ "loss": 0.1705,
253
+ "step": 280
254
+ },
255
+ {
256
+ "epoch": 4.55,
257
+ "grad_norm": 0.46610358357429504,
258
+ "learning_rate": 0.002698412698412698,
259
+ "loss": 0.1451,
260
+ "step": 290
261
+ },
262
+ {
263
+ "epoch": 4.71,
264
+ "grad_norm": 0.557108461856842,
265
+ "learning_rate": 0.0026190476190476194,
266
+ "loss": 0.1582,
267
+ "step": 300
268
+ },
269
+ {
270
+ "epoch": 4.86,
271
+ "grad_norm": 0.4486098885536194,
272
+ "learning_rate": 0.0025396825396825397,
273
+ "loss": 0.1763,
274
+ "step": 310
275
+ },
276
+ {
277
+ "epoch": 4.99,
278
+ "eval_accuracy": 0.9725321888412017,
279
+ "eval_f1": 0.9639271421162228,
280
+ "eval_loss": 0.07630692422389984,
281
+ "eval_precision": 0.9702764832336926,
282
+ "eval_recall": 0.9580085885445321,
283
+ "eval_runtime": 11.1034,
284
+ "eval_samples_per_second": 104.923,
285
+ "eval_steps_per_second": 6.575,
286
+ "step": 318
287
+ },
288
+ {
289
+ "epoch": 5.02,
290
+ "grad_norm": 0.5927975177764893,
291
+ "learning_rate": 0.0024603174603174604,
292
+ "loss": 0.1563,
293
+ "step": 320
294
+ },
295
+ {
296
+ "epoch": 5.18,
297
+ "grad_norm": 0.5728869438171387,
298
+ "learning_rate": 0.0023809523809523807,
299
+ "loss": 0.1666,
300
+ "step": 330
301
+ },
302
+ {
303
+ "epoch": 5.33,
304
+ "grad_norm": 0.34924712777137756,
305
+ "learning_rate": 0.0023015873015873015,
306
+ "loss": 0.1425,
307
+ "step": 340
308
+ },
309
+ {
310
+ "epoch": 5.49,
311
+ "grad_norm": 0.487810343503952,
312
+ "learning_rate": 0.0022222222222222222,
313
+ "loss": 0.1157,
314
+ "step": 350
315
+ },
316
+ {
317
+ "epoch": 5.65,
318
+ "grad_norm": 0.5470776557922363,
319
+ "learning_rate": 0.002142857142857143,
320
+ "loss": 0.1503,
321
+ "step": 360
322
+ },
323
+ {
324
+ "epoch": 5.8,
325
+ "grad_norm": 0.9323746562004089,
326
+ "learning_rate": 0.0020634920634920633,
327
+ "loss": 0.1813,
328
+ "step": 370
329
+ },
330
+ {
331
+ "epoch": 5.96,
332
+ "grad_norm": 0.4948749542236328,
333
+ "learning_rate": 0.001984126984126984,
334
+ "loss": 0.1627,
335
+ "step": 380
336
+ },
337
+ {
338
+ "epoch": 5.99,
339
+ "eval_accuracy": 0.9570815450643777,
340
+ "eval_f1": 0.9465564950602603,
341
+ "eval_loss": 0.10568992793560028,
342
+ "eval_precision": 0.9315479562643401,
343
+ "eval_recall": 0.965643911608289,
344
+ "eval_runtime": 11.3843,
345
+ "eval_samples_per_second": 102.334,
346
+ "eval_steps_per_second": 6.412,
347
+ "step": 382
348
+ },
349
+ {
350
+ "epoch": 6.12,
351
+ "grad_norm": 0.5047426819801331,
352
+ "learning_rate": 0.0019047619047619048,
353
+ "loss": 0.1335,
354
+ "step": 390
355
+ },
356
+ {
357
+ "epoch": 6.27,
358
+ "grad_norm": 0.3176202178001404,
359
+ "learning_rate": 0.0018253968253968253,
360
+ "loss": 0.0897,
361
+ "step": 400
362
+ },
363
+ {
364
+ "epoch": 6.43,
365
+ "grad_norm": 0.42543891072273254,
366
+ "learning_rate": 0.001746031746031746,
367
+ "loss": 0.1356,
368
+ "step": 410
369
+ },
370
+ {
371
+ "epoch": 6.59,
372
+ "grad_norm": 0.35515210032463074,
373
+ "learning_rate": 0.0016666666666666666,
374
+ "loss": 0.1324,
375
+ "step": 420
376
+ },
377
+ {
378
+ "epoch": 6.75,
379
+ "grad_norm": 0.900916337966919,
380
+ "learning_rate": 0.0015873015873015873,
381
+ "loss": 0.1435,
382
+ "step": 430
383
+ },
384
+ {
385
+ "epoch": 6.9,
386
+ "grad_norm": 0.6653035283088684,
387
+ "learning_rate": 0.0015079365079365078,
388
+ "loss": 0.1509,
389
+ "step": 440
390
+ },
391
+ {
392
+ "epoch": 7.0,
393
+ "eval_accuracy": 0.975107296137339,
394
+ "eval_f1": 0.9680345077825117,
395
+ "eval_loss": 0.07007680833339691,
396
+ "eval_precision": 0.9637800176993805,
397
+ "eval_recall": 0.9725189498135582,
398
+ "eval_runtime": 12.1771,
399
+ "eval_samples_per_second": 95.672,
400
+ "eval_steps_per_second": 5.995,
401
+ "step": 446
402
+ },
403
+ {
404
+ "epoch": 7.06,
405
+ "grad_norm": 0.31296506524086,
406
+ "learning_rate": 0.0014285714285714286,
407
+ "loss": 0.1322,
408
+ "step": 450
409
+ },
410
+ {
411
+ "epoch": 7.22,
412
+ "grad_norm": 0.6464645862579346,
413
+ "learning_rate": 0.001349206349206349,
414
+ "loss": 0.1457,
415
+ "step": 460
416
+ },
417
+ {
418
+ "epoch": 7.37,
419
+ "grad_norm": 1.0385708808898926,
420
+ "learning_rate": 0.0012698412698412698,
421
+ "loss": 0.1278,
422
+ "step": 470
423
+ },
424
+ {
425
+ "epoch": 7.53,
426
+ "grad_norm": 0.6095046401023865,
427
+ "learning_rate": 0.0011904761904761904,
428
+ "loss": 0.1168,
429
+ "step": 480
430
+ },
431
+ {
432
+ "epoch": 7.69,
433
+ "grad_norm": 0.7724207639694214,
434
+ "learning_rate": 0.0011111111111111111,
435
+ "loss": 0.1182,
436
+ "step": 490
437
+ },
438
+ {
439
+ "epoch": 7.84,
440
+ "grad_norm": 0.4225674569606781,
441
+ "learning_rate": 0.0010317460317460316,
442
+ "loss": 0.1325,
443
+ "step": 500
444
+ },
445
+ {
446
+ "epoch": 8.0,
447
+ "grad_norm": 0.47218218445777893,
448
+ "learning_rate": 0.0009523809523809524,
449
+ "loss": 0.1209,
450
+ "step": 510
451
+ },
452
+ {
453
+ "epoch": 8.0,
454
+ "eval_accuracy": 0.9570815450643777,
455
+ "eval_f1": 0.9465564950602603,
456
+ "eval_loss": 0.10465892404317856,
457
+ "eval_precision": 0.9315479562643401,
458
+ "eval_recall": 0.965643911608289,
459
+ "eval_runtime": 11.3686,
460
+ "eval_samples_per_second": 102.475,
461
+ "eval_steps_per_second": 6.421,
462
+ "step": 510
463
+ },
464
+ {
465
+ "epoch": 8.16,
466
+ "grad_norm": 0.296578973531723,
467
+ "learning_rate": 0.000873015873015873,
468
+ "loss": 0.1006,
469
+ "step": 520
470
+ },
471
+ {
472
+ "epoch": 8.31,
473
+ "grad_norm": 0.9204122424125671,
474
+ "learning_rate": 0.0007936507936507937,
475
+ "loss": 0.1202,
476
+ "step": 530
477
+ },
478
+ {
479
+ "epoch": 8.47,
480
+ "grad_norm": 0.5195232629776001,
481
+ "learning_rate": 0.0007142857142857143,
482
+ "loss": 0.1052,
483
+ "step": 540
484
+ },
485
+ {
486
+ "epoch": 8.63,
487
+ "grad_norm": 0.29583191871643066,
488
+ "learning_rate": 0.0006349206349206349,
489
+ "loss": 0.1285,
490
+ "step": 550
491
+ },
492
+ {
493
+ "epoch": 8.78,
494
+ "grad_norm": 0.4851154386997223,
495
+ "learning_rate": 0.0005555555555555556,
496
+ "loss": 0.1271,
497
+ "step": 560
498
+ },
499
+ {
500
+ "epoch": 8.94,
501
+ "grad_norm": 0.47467851638793945,
502
+ "learning_rate": 0.0004761904761904762,
503
+ "loss": 0.0961,
504
+ "step": 570
505
+ },
506
+ {
507
+ "epoch": 8.99,
508
+ "eval_accuracy": 0.9733905579399141,
509
+ "eval_f1": 0.966177521420591,
510
+ "eval_loss": 0.07205679267644882,
511
+ "eval_precision": 0.9577373642293522,
512
+ "eval_recall": 0.975613576624488,
513
+ "eval_runtime": 11.2412,
514
+ "eval_samples_per_second": 103.637,
515
+ "eval_steps_per_second": 6.494,
516
+ "step": 573
517
+ },
518
+ {
519
+ "epoch": 9.1,
520
+ "grad_norm": 0.29292458295822144,
521
+ "learning_rate": 0.0003968253968253968,
522
+ "loss": 0.096,
523
+ "step": 580
524
+ },
525
+ {
526
+ "epoch": 9.25,
527
+ "grad_norm": 0.513671338558197,
528
+ "learning_rate": 0.00031746031746031746,
529
+ "loss": 0.0871,
530
+ "step": 590
531
+ },
532
+ {
533
+ "epoch": 9.41,
534
+ "grad_norm": 0.8143641948699951,
535
+ "learning_rate": 0.0002380952380952381,
536
+ "loss": 0.1297,
537
+ "step": 600
538
+ },
539
+ {
540
+ "epoch": 9.57,
541
+ "grad_norm": 0.4731471836566925,
542
+ "learning_rate": 0.00015873015873015873,
543
+ "loss": 0.1175,
544
+ "step": 610
545
+ },
546
+ {
547
+ "epoch": 9.73,
548
+ "grad_norm": 0.30293089151382446,
549
+ "learning_rate": 7.936507936507937e-05,
550
+ "loss": 0.1166,
551
+ "step": 620
552
+ },
553
+ {
554
+ "epoch": 9.88,
555
+ "grad_norm": 0.3364422023296356,
556
+ "learning_rate": 0.0,
557
+ "loss": 0.1063,
558
+ "step": 630
559
+ },
560
+ {
561
+ "epoch": 9.88,
562
+ "eval_accuracy": 0.9622317596566523,
563
+ "eval_f1": 0.9526072485207102,
564
+ "eval_loss": 0.08854742348194122,
565
+ "eval_precision": 0.9398335630995462,
566
+ "eval_recall": 0.9680642154165903,
567
+ "eval_runtime": 11.4305,
568
+ "eval_samples_per_second": 101.92,
569
+ "eval_steps_per_second": 6.386,
570
+ "step": 630
571
+ },
572
+ {
573
+ "epoch": 9.88,
574
+ "step": 630,
575
+ "total_flos": 3.1439051980091965e+18,
576
+ "train_loss": 0.1791202223490155,
577
+ "train_runtime": 695.1527,
578
+ "train_samples_per_second": 58.649,
579
+ "train_steps_per_second": 0.906
580
+ }
581
+ ],
582
+ "logging_steps": 10,
583
+ "max_steps": 630,
584
+ "num_input_tokens_seen": 0,
585
+ "num_train_epochs": 10,
586
+ "save_steps": 500,
587
+ "total_flos": 3.1439051980091965e+18,
588
+ "train_batch_size": 16,
589
+ "trial_name": null,
590
+ "trial_params": null
591
+ }