selmamalak commited on
Commit
dd053e9
1 Parent(s): 2e62a75

End of training

Browse files
Files changed (5) hide show
  1. README.md +5 -5
  2. all_results.json +16 -0
  3. eval_results.json +11 -0
  4. train_results.json +8 -0
  5. trainer_state.json +591 -0
README.md CHANGED
@@ -21,11 +21,11 @@ should probably proofread and complete it, then remove this comment. -->
21
 
22
  This model is a fine-tuned version of [microsoft/beit-base-patch16-224-pt22k-ft22k](https://huggingface.co/microsoft/beit-base-patch16-224-pt22k-ft22k) on an unknown dataset.
23
  It achieves the following results on the evaluation set:
24
- - Loss: 0.2739
25
- - Accuracy: 0.8927
26
- - Precision: 0.8528
27
- - Recall: 0.8912
28
- - F1: 0.8685
29
 
30
  ## Model description
31
 
 
21
 
22
  This model is a fine-tuned version of [microsoft/beit-base-patch16-224-pt22k-ft22k](https://huggingface.co/microsoft/beit-base-patch16-224-pt22k-ft22k) on an unknown dataset.
23
  It achieves the following results on the evaluation set:
24
+ - Loss: 0.2620
25
+ - Accuracy: 0.9107
26
+ - Precision: 0.8923
27
+ - Recall: 0.8923
28
+ - F1: 0.8923
29
 
30
  ## Model description
31
 
all_results.json ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 9.88,
3
+ "eval_accuracy": 0.9106529209621993,
4
+ "eval_f1": 0.8923464378708328,
5
+ "eval_loss": 0.26196494698524475,
6
+ "eval_precision": 0.8923464378708328,
7
+ "eval_recall": 0.8923464378708328,
8
+ "eval_runtime": 5.9035,
9
+ "eval_samples_per_second": 98.585,
10
+ "eval_steps_per_second": 6.267,
11
+ "total_flos": 3.142570654487126e+18,
12
+ "train_loss": 0.3948629246817695,
13
+ "train_runtime": 760.2155,
14
+ "train_samples_per_second": 53.63,
15
+ "train_steps_per_second": 0.829
16
+ }
eval_results.json ADDED
@@ -0,0 +1,11 @@
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 9.88,
3
+ "eval_accuracy": 0.9106529209621993,
4
+ "eval_f1": 0.8923464378708328,
5
+ "eval_loss": 0.26196494698524475,
6
+ "eval_precision": 0.8923464378708328,
7
+ "eval_recall": 0.8923464378708328,
8
+ "eval_runtime": 5.9035,
9
+ "eval_samples_per_second": 98.585,
10
+ "eval_steps_per_second": 6.267
11
+ }
train_results.json ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 9.88,
3
+ "total_flos": 3.142570654487126e+18,
4
+ "train_loss": 0.3948629246817695,
5
+ "train_runtime": 760.2155,
6
+ "train_samples_per_second": 53.63,
7
+ "train_steps_per_second": 0.829
8
+ }
trainer_state.json ADDED
@@ -0,0 +1,591 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": 0.9141630901287554,
3
+ "best_model_checkpoint": "beit-base-patch16-224-pt22k-ft22k-finetuned-lora-medmnistv2/checkpoint-63",
4
+ "epoch": 9.882352941176471,
5
+ "eval_steps": 500,
6
+ "global_step": 630,
7
+ "is_hyper_param_search": false,
8
+ "is_local_process_zero": true,
9
+ "is_world_process_zero": true,
10
+ "log_history": [
11
+ {
12
+ "epoch": 0.16,
13
+ "grad_norm": 2.7611124515533447,
14
+ "learning_rate": 0.004920634920634921,
15
+ "loss": 0.6696,
16
+ "step": 10
17
+ },
18
+ {
19
+ "epoch": 0.31,
20
+ "grad_norm": 3.1516690254211426,
21
+ "learning_rate": 0.004841269841269842,
22
+ "loss": 0.5037,
23
+ "step": 20
24
+ },
25
+ {
26
+ "epoch": 0.47,
27
+ "grad_norm": 1.6632912158966064,
28
+ "learning_rate": 0.0047619047619047615,
29
+ "loss": 0.5343,
30
+ "step": 30
31
+ },
32
+ {
33
+ "epoch": 0.63,
34
+ "grad_norm": 15.660941123962402,
35
+ "learning_rate": 0.004682539682539683,
36
+ "loss": 0.7623,
37
+ "step": 40
38
+ },
39
+ {
40
+ "epoch": 0.78,
41
+ "grad_norm": 1.5720008611679077,
42
+ "learning_rate": 0.004603174603174603,
43
+ "loss": 0.5904,
44
+ "step": 50
45
+ },
46
+ {
47
+ "epoch": 0.94,
48
+ "grad_norm": 3.288884401321411,
49
+ "learning_rate": 0.004523809523809524,
50
+ "loss": 0.4775,
51
+ "step": 60
52
+ },
53
+ {
54
+ "epoch": 0.99,
55
+ "eval_accuracy": 0.9141630901287554,
56
+ "eval_f1": 0.8903401432261271,
57
+ "eval_loss": 0.22644232213497162,
58
+ "eval_precision": 0.884993651288677,
59
+ "eval_recall": 0.8961752704933064,
60
+ "eval_runtime": 12.2128,
61
+ "eval_samples_per_second": 95.391,
62
+ "eval_steps_per_second": 5.977,
63
+ "step": 63
64
+ },
65
+ {
66
+ "epoch": 1.1,
67
+ "grad_norm": 2.6683709621429443,
68
+ "learning_rate": 0.0044603174603174605,
69
+ "loss": 0.4832,
70
+ "step": 70
71
+ },
72
+ {
73
+ "epoch": 1.25,
74
+ "grad_norm": 1.2820926904678345,
75
+ "learning_rate": 0.004380952380952381,
76
+ "loss": 0.5064,
77
+ "step": 80
78
+ },
79
+ {
80
+ "epoch": 1.41,
81
+ "grad_norm": 5.157268047332764,
82
+ "learning_rate": 0.004301587301587302,
83
+ "loss": 0.4561,
84
+ "step": 90
85
+ },
86
+ {
87
+ "epoch": 1.57,
88
+ "grad_norm": 0.9834429025650024,
89
+ "learning_rate": 0.004222222222222223,
90
+ "loss": 0.3981,
91
+ "step": 100
92
+ },
93
+ {
94
+ "epoch": 1.73,
95
+ "grad_norm": 7.78388786315918,
96
+ "learning_rate": 0.0041428571428571434,
97
+ "loss": 0.5016,
98
+ "step": 110
99
+ },
100
+ {
101
+ "epoch": 1.88,
102
+ "grad_norm": 12.203431129455566,
103
+ "learning_rate": 0.004063492063492063,
104
+ "loss": 0.7117,
105
+ "step": 120
106
+ },
107
+ {
108
+ "epoch": 1.99,
109
+ "eval_accuracy": 0.7390557939914163,
110
+ "eval_f1": 0.4249753208292202,
111
+ "eval_loss": 0.4007842242717743,
112
+ "eval_precision": 0.36952789699570815,
113
+ "eval_recall": 0.5,
114
+ "eval_runtime": 12.6185,
115
+ "eval_samples_per_second": 92.325,
116
+ "eval_steps_per_second": 5.785,
117
+ "step": 127
118
+ },
119
+ {
120
+ "epoch": 2.04,
121
+ "grad_norm": 9.85489273071289,
122
+ "learning_rate": 0.003992063492063492,
123
+ "loss": 0.4575,
124
+ "step": 130
125
+ },
126
+ {
127
+ "epoch": 2.2,
128
+ "grad_norm": 2.926109552383423,
129
+ "learning_rate": 0.003912698412698413,
130
+ "loss": 0.4562,
131
+ "step": 140
132
+ },
133
+ {
134
+ "epoch": 2.35,
135
+ "grad_norm": 0.5027350187301636,
136
+ "learning_rate": 0.0038333333333333336,
137
+ "loss": 0.417,
138
+ "step": 150
139
+ },
140
+ {
141
+ "epoch": 2.51,
142
+ "grad_norm": 0.8259086608886719,
143
+ "learning_rate": 0.003753968253968254,
144
+ "loss": 0.3953,
145
+ "step": 160
146
+ },
147
+ {
148
+ "epoch": 2.67,
149
+ "grad_norm": 2.8119688034057617,
150
+ "learning_rate": 0.003674603174603175,
151
+ "loss": 0.4175,
152
+ "step": 170
153
+ },
154
+ {
155
+ "epoch": 2.82,
156
+ "grad_norm": 2.306551218032837,
157
+ "learning_rate": 0.0035952380952380954,
158
+ "loss": 0.4226,
159
+ "step": 180
160
+ },
161
+ {
162
+ "epoch": 2.98,
163
+ "grad_norm": 6.00565242767334,
164
+ "learning_rate": 0.003515873015873016,
165
+ "loss": 0.4115,
166
+ "step": 190
167
+ },
168
+ {
169
+ "epoch": 3.0,
170
+ "eval_accuracy": 0.8154506437768241,
171
+ "eval_f1": 0.7957275475643482,
172
+ "eval_loss": 0.43578293919563293,
173
+ "eval_precision": 0.7870816708492059,
174
+ "eval_recall": 0.8645050125313283,
175
+ "eval_runtime": 12.1326,
176
+ "eval_samples_per_second": 96.022,
177
+ "eval_steps_per_second": 6.017,
178
+ "step": 191
179
+ },
180
+ {
181
+ "epoch": 3.14,
182
+ "grad_norm": 1.8762693405151367,
183
+ "learning_rate": 0.0034365079365079364,
184
+ "loss": 0.3876,
185
+ "step": 200
186
+ },
187
+ {
188
+ "epoch": 3.29,
189
+ "grad_norm": 1.1961586475372314,
190
+ "learning_rate": 0.003357142857142857,
191
+ "loss": 0.3232,
192
+ "step": 210
193
+ },
194
+ {
195
+ "epoch": 3.45,
196
+ "grad_norm": 0.5015142560005188,
197
+ "learning_rate": 0.003277777777777778,
198
+ "loss": 0.4061,
199
+ "step": 220
200
+ },
201
+ {
202
+ "epoch": 3.61,
203
+ "grad_norm": 2.750140428543091,
204
+ "learning_rate": 0.003198412698412698,
205
+ "loss": 0.372,
206
+ "step": 230
207
+ },
208
+ {
209
+ "epoch": 3.76,
210
+ "grad_norm": 2.029989242553711,
211
+ "learning_rate": 0.003119047619047619,
212
+ "loss": 0.3498,
213
+ "step": 240
214
+ },
215
+ {
216
+ "epoch": 3.92,
217
+ "grad_norm": 2.366288661956787,
218
+ "learning_rate": 0.0030396825396825397,
219
+ "loss": 0.3631,
220
+ "step": 250
221
+ },
222
+ {
223
+ "epoch": 4.0,
224
+ "eval_accuracy": 0.8798283261802575,
225
+ "eval_f1": 0.8517509925684618,
226
+ "eval_loss": 0.30910125374794006,
227
+ "eval_precision": 0.8380756825290672,
228
+ "eval_recall": 0.8708184332783178,
229
+ "eval_runtime": 12.051,
230
+ "eval_samples_per_second": 96.673,
231
+ "eval_steps_per_second": 6.058,
232
+ "step": 255
233
+ },
234
+ {
235
+ "epoch": 4.08,
236
+ "grad_norm": 0.5986310839653015,
237
+ "learning_rate": 0.0029603174603174604,
238
+ "loss": 0.3413,
239
+ "step": 260
240
+ },
241
+ {
242
+ "epoch": 4.24,
243
+ "grad_norm": 4.78090763092041,
244
+ "learning_rate": 0.0028809523809523807,
245
+ "loss": 0.384,
246
+ "step": 270
247
+ },
248
+ {
249
+ "epoch": 4.39,
250
+ "grad_norm": 3.392075300216675,
251
+ "learning_rate": 0.0028015873015873015,
252
+ "loss": 0.3579,
253
+ "step": 280
254
+ },
255
+ {
256
+ "epoch": 4.55,
257
+ "grad_norm": 1.982884407043457,
258
+ "learning_rate": 0.0027222222222222222,
259
+ "loss": 0.3397,
260
+ "step": 290
261
+ },
262
+ {
263
+ "epoch": 4.71,
264
+ "grad_norm": 4.884500980377197,
265
+ "learning_rate": 0.002642857142857143,
266
+ "loss": 0.3609,
267
+ "step": 300
268
+ },
269
+ {
270
+ "epoch": 4.86,
271
+ "grad_norm": 0.9579557776451111,
272
+ "learning_rate": 0.0025634920634920633,
273
+ "loss": 0.3794,
274
+ "step": 310
275
+ },
276
+ {
277
+ "epoch": 4.99,
278
+ "eval_accuracy": 0.8798283261802575,
279
+ "eval_f1": 0.8494965377744312,
280
+ "eval_loss": 0.2801721394062042,
281
+ "eval_precision": 0.8392582443135881,
282
+ "eval_recall": 0.8623062992847974,
283
+ "eval_runtime": 12.197,
284
+ "eval_samples_per_second": 95.516,
285
+ "eval_steps_per_second": 5.985,
286
+ "step": 318
287
+ },
288
+ {
289
+ "epoch": 5.02,
290
+ "grad_norm": 1.1442433595657349,
291
+ "learning_rate": 0.002484126984126984,
292
+ "loss": 0.3566,
293
+ "step": 320
294
+ },
295
+ {
296
+ "epoch": 5.18,
297
+ "grad_norm": 4.931128978729248,
298
+ "learning_rate": 0.0024047619047619048,
299
+ "loss": 0.4874,
300
+ "step": 330
301
+ },
302
+ {
303
+ "epoch": 5.33,
304
+ "grad_norm": 2.474273443222046,
305
+ "learning_rate": 0.0023253968253968255,
306
+ "loss": 0.375,
307
+ "step": 340
308
+ },
309
+ {
310
+ "epoch": 5.49,
311
+ "grad_norm": 2.144062042236328,
312
+ "learning_rate": 0.0022460317460317463,
313
+ "loss": 0.368,
314
+ "step": 350
315
+ },
316
+ {
317
+ "epoch": 5.65,
318
+ "grad_norm": 0.6386366486549377,
319
+ "learning_rate": 0.002166666666666667,
320
+ "loss": 0.3755,
321
+ "step": 360
322
+ },
323
+ {
324
+ "epoch": 5.8,
325
+ "grad_norm": 3.0398051738739014,
326
+ "learning_rate": 0.0020873015873015873,
327
+ "loss": 0.3248,
328
+ "step": 370
329
+ },
330
+ {
331
+ "epoch": 5.96,
332
+ "grad_norm": 2.5777676105499268,
333
+ "learning_rate": 0.002007936507936508,
334
+ "loss": 0.3713,
335
+ "step": 380
336
+ },
337
+ {
338
+ "epoch": 5.99,
339
+ "eval_accuracy": 0.8772532188841202,
340
+ "eval_f1": 0.8449126373928156,
341
+ "eval_loss": 0.2805176377296448,
342
+ "eval_precision": 0.8370533441922019,
343
+ "eval_recall": 0.8541800385109115,
344
+ "eval_runtime": 11.9639,
345
+ "eval_samples_per_second": 97.376,
346
+ "eval_steps_per_second": 6.102,
347
+ "step": 382
348
+ },
349
+ {
350
+ "epoch": 6.12,
351
+ "grad_norm": 0.9707914590835571,
352
+ "learning_rate": 0.0019285714285714288,
353
+ "loss": 0.3227,
354
+ "step": 390
355
+ },
356
+ {
357
+ "epoch": 6.27,
358
+ "grad_norm": 1.1869500875473022,
359
+ "learning_rate": 0.0018492063492063493,
360
+ "loss": 0.3271,
361
+ "step": 400
362
+ },
363
+ {
364
+ "epoch": 6.43,
365
+ "grad_norm": 0.6983945369720459,
366
+ "learning_rate": 0.00176984126984127,
367
+ "loss": 0.3709,
368
+ "step": 410
369
+ },
370
+ {
371
+ "epoch": 6.59,
372
+ "grad_norm": 1.19561767578125,
373
+ "learning_rate": 0.0016904761904761906,
374
+ "loss": 0.3629,
375
+ "step": 420
376
+ },
377
+ {
378
+ "epoch": 6.75,
379
+ "grad_norm": 1.7224721908569336,
380
+ "learning_rate": 0.0016111111111111113,
381
+ "loss": 0.3182,
382
+ "step": 430
383
+ },
384
+ {
385
+ "epoch": 6.9,
386
+ "grad_norm": 2.228806972503662,
387
+ "learning_rate": 0.0015317460317460319,
388
+ "loss": 0.3953,
389
+ "step": 440
390
+ },
391
+ {
392
+ "epoch": 7.0,
393
+ "eval_accuracy": 0.8583690987124464,
394
+ "eval_f1": 0.836744521418762,
395
+ "eval_loss": 0.3396996855735779,
396
+ "eval_precision": 0.8185304398119043,
397
+ "eval_recall": 0.8871569166819487,
398
+ "eval_runtime": 12.0715,
399
+ "eval_samples_per_second": 96.508,
400
+ "eval_steps_per_second": 6.047,
401
+ "step": 446
402
+ },
403
+ {
404
+ "epoch": 7.06,
405
+ "grad_norm": 3.1751720905303955,
406
+ "learning_rate": 0.0014523809523809526,
407
+ "loss": 0.3753,
408
+ "step": 450
409
+ },
410
+ {
411
+ "epoch": 7.22,
412
+ "grad_norm": 2.4551353454589844,
413
+ "learning_rate": 0.0013730158730158731,
414
+ "loss": 0.3061,
415
+ "step": 460
416
+ },
417
+ {
418
+ "epoch": 7.37,
419
+ "grad_norm": 1.707070231437683,
420
+ "learning_rate": 0.0012936507936507939,
421
+ "loss": 0.3525,
422
+ "step": 470
423
+ },
424
+ {
425
+ "epoch": 7.53,
426
+ "grad_norm": 0.43897438049316406,
427
+ "learning_rate": 0.0012142857142857144,
428
+ "loss": 0.3032,
429
+ "step": 480
430
+ },
431
+ {
432
+ "epoch": 7.69,
433
+ "grad_norm": 2.149365186691284,
434
+ "learning_rate": 0.001134920634920635,
435
+ "loss": 0.3031,
436
+ "step": 490
437
+ },
438
+ {
439
+ "epoch": 7.84,
440
+ "grad_norm": 1.5541889667510986,
441
+ "learning_rate": 0.0010555555555555557,
442
+ "loss": 0.3199,
443
+ "step": 500
444
+ },
445
+ {
446
+ "epoch": 8.0,
447
+ "grad_norm": 0.5194265842437744,
448
+ "learning_rate": 0.0009761904761904762,
449
+ "loss": 0.3218,
450
+ "step": 510
451
+ },
452
+ {
453
+ "epoch": 8.0,
454
+ "eval_accuracy": 0.8669527896995708,
455
+ "eval_f1": 0.8448168439144914,
456
+ "eval_loss": 0.3072466254234314,
457
+ "eval_precision": 0.8257109927719459,
458
+ "eval_recall": 0.8897720673635308,
459
+ "eval_runtime": 12.0816,
460
+ "eval_samples_per_second": 96.428,
461
+ "eval_steps_per_second": 6.042,
462
+ "step": 510
463
+ },
464
+ {
465
+ "epoch": 8.16,
466
+ "grad_norm": 0.4812917709350586,
467
+ "learning_rate": 0.0008968253968253968,
468
+ "loss": 0.3105,
469
+ "step": 520
470
+ },
471
+ {
472
+ "epoch": 8.31,
473
+ "grad_norm": 3.871387004852295,
474
+ "learning_rate": 0.0008174603174603175,
475
+ "loss": 0.3398,
476
+ "step": 530
477
+ },
478
+ {
479
+ "epoch": 8.47,
480
+ "grad_norm": 0.3649653196334839,
481
+ "learning_rate": 0.000746031746031746,
482
+ "loss": 0.3169,
483
+ "step": 540
484
+ },
485
+ {
486
+ "epoch": 8.63,
487
+ "grad_norm": 0.4373646676540375,
488
+ "learning_rate": 0.0006666666666666666,
489
+ "loss": 0.3019,
490
+ "step": 550
491
+ },
492
+ {
493
+ "epoch": 8.78,
494
+ "grad_norm": 0.21746236085891724,
495
+ "learning_rate": 0.0005873015873015873,
496
+ "loss": 0.3463,
497
+ "step": 560
498
+ },
499
+ {
500
+ "epoch": 8.94,
501
+ "grad_norm": 1.6411595344543457,
502
+ "learning_rate": 0.0005079365079365079,
503
+ "loss": 0.3219,
504
+ "step": 570
505
+ },
506
+ {
507
+ "epoch": 8.99,
508
+ "eval_accuracy": 0.896137339055794,
509
+ "eval_f1": 0.870789179160911,
510
+ "eval_loss": 0.263265997171402,
511
+ "eval_precision": 0.8582184517497349,
512
+ "eval_recall": 0.887172198789657,
513
+ "eval_runtime": 12.1817,
514
+ "eval_samples_per_second": 95.635,
515
+ "eval_steps_per_second": 5.993,
516
+ "step": 573
517
+ },
518
+ {
519
+ "epoch": 9.1,
520
+ "grad_norm": 1.573140025138855,
521
+ "learning_rate": 0.0004285714285714286,
522
+ "loss": 0.3078,
523
+ "step": 580
524
+ },
525
+ {
526
+ "epoch": 9.25,
527
+ "grad_norm": 0.6549800634384155,
528
+ "learning_rate": 0.00034920634920634924,
529
+ "loss": 0.2872,
530
+ "step": 590
531
+ },
532
+ {
533
+ "epoch": 9.41,
534
+ "grad_norm": 0.4417751133441925,
535
+ "learning_rate": 0.0002698412698412699,
536
+ "loss": 0.3256,
537
+ "step": 600
538
+ },
539
+ {
540
+ "epoch": 9.57,
541
+ "grad_norm": 3.714104413986206,
542
+ "learning_rate": 0.0001904761904761905,
543
+ "loss": 0.361,
544
+ "step": 610
545
+ },
546
+ {
547
+ "epoch": 9.73,
548
+ "grad_norm": 0.5447297096252441,
549
+ "learning_rate": 0.00011111111111111112,
550
+ "loss": 0.3773,
551
+ "step": 620
552
+ },
553
+ {
554
+ "epoch": 9.88,
555
+ "grad_norm": 1.347410798072815,
556
+ "learning_rate": 3.1746031746031745e-05,
557
+ "loss": 0.3049,
558
+ "step": 630
559
+ },
560
+ {
561
+ "epoch": 9.88,
562
+ "eval_accuracy": 0.8927038626609443,
563
+ "eval_f1": 0.8684775000564475,
564
+ "eval_loss": 0.27391761541366577,
565
+ "eval_precision": 0.8528169402296,
566
+ "eval_recall": 0.8912334189131366,
567
+ "eval_runtime": 12.284,
568
+ "eval_samples_per_second": 94.839,
569
+ "eval_steps_per_second": 5.943,
570
+ "step": 630
571
+ },
572
+ {
573
+ "epoch": 9.88,
574
+ "step": 630,
575
+ "total_flos": 3.142570654487126e+18,
576
+ "train_loss": 0.3948629246817695,
577
+ "train_runtime": 760.2155,
578
+ "train_samples_per_second": 53.63,
579
+ "train_steps_per_second": 0.829
580
+ }
581
+ ],
582
+ "logging_steps": 10,
583
+ "max_steps": 630,
584
+ "num_input_tokens_seen": 0,
585
+ "num_train_epochs": 10,
586
+ "save_steps": 500,
587
+ "total_flos": 3.142570654487126e+18,
588
+ "train_batch_size": 16,
589
+ "trial_name": null,
590
+ "trial_params": null
591
+ }