flatala-research commited on
Commit
5efc8d3
1 Parent(s): c3eaf10

End of training

Browse files
Files changed (3) hide show
  1. all_results.json +8 -0
  2. test_results.json +8 -0
  3. trainer_state.json +885 -0
all_results.json ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 8.103513770180436,
3
+ "eval_accuracy": 0.551219512195122,
4
+ "eval_loss": 1.7062228918075562,
5
+ "eval_runtime": 25.2134,
6
+ "eval_samples_per_second": 8.131,
7
+ "eval_steps_per_second": 1.031
8
+ }
test_results.json ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 8.103513770180436,
3
+ "eval_accuracy": 0.551219512195122,
4
+ "eval_loss": 1.7062228918075562,
5
+ "eval_runtime": 25.2134,
6
+ "eval_samples_per_second": 8.131,
7
+ "eval_steps_per_second": 1.031
8
+ }
trainer_state.json ADDED
@@ -0,0 +1,885 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": 0.6213592233009708,
3
+ "best_model_checkpoint": "videomae-base-finetuned-kinetics-finetuned-conflab-traj-direction-rh-v2/checkpoint-1053",
4
+ "epoch": 8.103513770180436,
5
+ "eval_steps": 500,
6
+ "global_step": 1053,
7
+ "is_hyper_param_search": false,
8
+ "is_local_process_zero": true,
9
+ "is_world_process_zero": true,
10
+ "log_history": [
11
+ {
12
+ "epoch": 0.00949667616334283,
13
+ "grad_norm": 8.752718925476074,
14
+ "learning_rate": 4.716981132075472e-06,
15
+ "loss": 2.1131,
16
+ "step": 10
17
+ },
18
+ {
19
+ "epoch": 0.01899335232668566,
20
+ "grad_norm": 7.10194206237793,
21
+ "learning_rate": 9.433962264150944e-06,
22
+ "loss": 2.031,
23
+ "step": 20
24
+ },
25
+ {
26
+ "epoch": 0.02849002849002849,
27
+ "grad_norm": 8.048168182373047,
28
+ "learning_rate": 1.4150943396226415e-05,
29
+ "loss": 2.0382,
30
+ "step": 30
31
+ },
32
+ {
33
+ "epoch": 0.03798670465337132,
34
+ "grad_norm": 8.191485404968262,
35
+ "learning_rate": 1.8867924528301888e-05,
36
+ "loss": 1.9895,
37
+ "step": 40
38
+ },
39
+ {
40
+ "epoch": 0.04748338081671415,
41
+ "grad_norm": 7.475042819976807,
42
+ "learning_rate": 2.358490566037736e-05,
43
+ "loss": 1.9052,
44
+ "step": 50
45
+ },
46
+ {
47
+ "epoch": 0.05698005698005698,
48
+ "grad_norm": 12.284913063049316,
49
+ "learning_rate": 2.830188679245283e-05,
50
+ "loss": 1.9474,
51
+ "step": 60
52
+ },
53
+ {
54
+ "epoch": 0.06647673314339982,
55
+ "grad_norm": 6.110039234161377,
56
+ "learning_rate": 3.30188679245283e-05,
57
+ "loss": 1.8688,
58
+ "step": 70
59
+ },
60
+ {
61
+ "epoch": 0.07597340930674264,
62
+ "grad_norm": 9.754578590393066,
63
+ "learning_rate": 3.7735849056603776e-05,
64
+ "loss": 1.9115,
65
+ "step": 80
66
+ },
67
+ {
68
+ "epoch": 0.08547008547008547,
69
+ "grad_norm": 7.101430892944336,
70
+ "learning_rate": 4.245283018867925e-05,
71
+ "loss": 1.9359,
72
+ "step": 90
73
+ },
74
+ {
75
+ "epoch": 0.0949667616334283,
76
+ "grad_norm": 5.658616065979004,
77
+ "learning_rate": 4.716981132075472e-05,
78
+ "loss": 1.9613,
79
+ "step": 100
80
+ },
81
+ {
82
+ "epoch": 0.10446343779677113,
83
+ "grad_norm": 4.824208736419678,
84
+ "learning_rate": 4.978880675818374e-05,
85
+ "loss": 1.9728,
86
+ "step": 110
87
+ },
88
+ {
89
+ "epoch": 0.1120607787274454,
90
+ "eval_accuracy": 0.2912621359223301,
91
+ "eval_loss": 1.8081567287445068,
92
+ "eval_runtime": 37.3417,
93
+ "eval_samples_per_second": 5.517,
94
+ "eval_steps_per_second": 0.696,
95
+ "step": 118
96
+ },
97
+ {
98
+ "epoch": 1.0018993352326686,
99
+ "grad_norm": 5.8929443359375,
100
+ "learning_rate": 4.9260823653643085e-05,
101
+ "loss": 1.8685,
102
+ "step": 120
103
+ },
104
+ {
105
+ "epoch": 1.0113960113960114,
106
+ "grad_norm": 5.137537956237793,
107
+ "learning_rate": 4.8732840549102435e-05,
108
+ "loss": 1.8045,
109
+ "step": 130
110
+ },
111
+ {
112
+ "epoch": 1.0208926875593543,
113
+ "grad_norm": 8.457473754882812,
114
+ "learning_rate": 4.820485744456177e-05,
115
+ "loss": 1.7704,
116
+ "step": 140
117
+ },
118
+ {
119
+ "epoch": 1.030389363722697,
120
+ "grad_norm": 6.545841693878174,
121
+ "learning_rate": 4.767687434002112e-05,
122
+ "loss": 1.7769,
123
+ "step": 150
124
+ },
125
+ {
126
+ "epoch": 1.03988603988604,
127
+ "grad_norm": 11.467026710510254,
128
+ "learning_rate": 4.7148891235480466e-05,
129
+ "loss": 1.664,
130
+ "step": 160
131
+ },
132
+ {
133
+ "epoch": 1.0493827160493827,
134
+ "grad_norm": 7.759367942810059,
135
+ "learning_rate": 4.662090813093981e-05,
136
+ "loss": 1.6638,
137
+ "step": 170
138
+ },
139
+ {
140
+ "epoch": 1.0588793922127255,
141
+ "grad_norm": 4.53931188583374,
142
+ "learning_rate": 4.609292502639916e-05,
143
+ "loss": 1.6305,
144
+ "step": 180
145
+ },
146
+ {
147
+ "epoch": 1.0683760683760684,
148
+ "grad_norm": 11.055715560913086,
149
+ "learning_rate": 4.55649419218585e-05,
150
+ "loss": 1.6884,
151
+ "step": 190
152
+ },
153
+ {
154
+ "epoch": 1.0778727445394112,
155
+ "grad_norm": 7.864542007446289,
156
+ "learning_rate": 4.503695881731785e-05,
157
+ "loss": 1.5855,
158
+ "step": 200
159
+ },
160
+ {
161
+ "epoch": 1.087369420702754,
162
+ "grad_norm": 10.435840606689453,
163
+ "learning_rate": 4.45089757127772e-05,
164
+ "loss": 1.4554,
165
+ "step": 210
166
+ },
167
+ {
168
+ "epoch": 1.0968660968660968,
169
+ "grad_norm": 9.551925659179688,
170
+ "learning_rate": 4.398099260823654e-05,
171
+ "loss": 1.6277,
172
+ "step": 220
173
+ },
174
+ {
175
+ "epoch": 1.1063627730294396,
176
+ "grad_norm": 8.910355567932129,
177
+ "learning_rate": 4.3453009503695884e-05,
178
+ "loss": 1.5903,
179
+ "step": 230
180
+ },
181
+ {
182
+ "epoch": 1.1120607787274455,
183
+ "eval_accuracy": 0.3786407766990291,
184
+ "eval_loss": 1.803798794746399,
185
+ "eval_runtime": 30.5748,
186
+ "eval_samples_per_second": 6.738,
187
+ "eval_steps_per_second": 0.85,
188
+ "step": 236
189
+ },
190
+ {
191
+ "epoch": 2.003798670465337,
192
+ "grad_norm": 5.9250712394714355,
193
+ "learning_rate": 4.292502639915523e-05,
194
+ "loss": 1.7366,
195
+ "step": 240
196
+ },
197
+ {
198
+ "epoch": 2.01329534662868,
199
+ "grad_norm": 7.3831400871276855,
200
+ "learning_rate": 4.239704329461457e-05,
201
+ "loss": 1.4794,
202
+ "step": 250
203
+ },
204
+ {
205
+ "epoch": 2.022792022792023,
206
+ "grad_norm": 4.820956230163574,
207
+ "learning_rate": 4.186906019007392e-05,
208
+ "loss": 1.2516,
209
+ "step": 260
210
+ },
211
+ {
212
+ "epoch": 2.0322886989553655,
213
+ "grad_norm": 10.172005653381348,
214
+ "learning_rate": 4.1341077085533265e-05,
215
+ "loss": 1.3548,
216
+ "step": 270
217
+ },
218
+ {
219
+ "epoch": 2.0417853751187085,
220
+ "grad_norm": 14.752180099487305,
221
+ "learning_rate": 4.081309398099261e-05,
222
+ "loss": 1.2371,
223
+ "step": 280
224
+ },
225
+ {
226
+ "epoch": 2.051282051282051,
227
+ "grad_norm": 12.396346092224121,
228
+ "learning_rate": 4.028511087645195e-05,
229
+ "loss": 1.4009,
230
+ "step": 290
231
+ },
232
+ {
233
+ "epoch": 2.060778727445394,
234
+ "grad_norm": 11.691078186035156,
235
+ "learning_rate": 3.97571277719113e-05,
236
+ "loss": 1.2658,
237
+ "step": 300
238
+ },
239
+ {
240
+ "epoch": 2.0702754036087367,
241
+ "grad_norm": 7.867719650268555,
242
+ "learning_rate": 3.9229144667370646e-05,
243
+ "loss": 1.3029,
244
+ "step": 310
245
+ },
246
+ {
247
+ "epoch": 2.07977207977208,
248
+ "grad_norm": 11.82673454284668,
249
+ "learning_rate": 3.870116156282999e-05,
250
+ "loss": 1.1971,
251
+ "step": 320
252
+ },
253
+ {
254
+ "epoch": 2.0892687559354224,
255
+ "grad_norm": 12.339221954345703,
256
+ "learning_rate": 3.817317845828934e-05,
257
+ "loss": 1.3162,
258
+ "step": 330
259
+ },
260
+ {
261
+ "epoch": 2.0987654320987654,
262
+ "grad_norm": 14.504555702209473,
263
+ "learning_rate": 3.764519535374868e-05,
264
+ "loss": 1.4248,
265
+ "step": 340
266
+ },
267
+ {
268
+ "epoch": 2.1082621082621085,
269
+ "grad_norm": 12.908592224121094,
270
+ "learning_rate": 3.711721224920803e-05,
271
+ "loss": 1.2371,
272
+ "step": 350
273
+ },
274
+ {
275
+ "epoch": 2.1120607787274452,
276
+ "eval_accuracy": 0.4563106796116505,
277
+ "eval_loss": 1.466732382774353,
278
+ "eval_runtime": 28.3624,
279
+ "eval_samples_per_second": 7.263,
280
+ "eval_steps_per_second": 0.917,
281
+ "step": 354
282
+ },
283
+ {
284
+ "epoch": 3.005698005698006,
285
+ "grad_norm": 10.089714050292969,
286
+ "learning_rate": 3.658922914466738e-05,
287
+ "loss": 0.9885,
288
+ "step": 360
289
+ },
290
+ {
291
+ "epoch": 3.0151946818613484,
292
+ "grad_norm": 6.925798416137695,
293
+ "learning_rate": 3.6061246040126714e-05,
294
+ "loss": 0.8912,
295
+ "step": 370
296
+ },
297
+ {
298
+ "epoch": 3.0246913580246915,
299
+ "grad_norm": 7.01603889465332,
300
+ "learning_rate": 3.5533262935586064e-05,
301
+ "loss": 0.78,
302
+ "step": 380
303
+ },
304
+ {
305
+ "epoch": 3.034188034188034,
306
+ "grad_norm": 7.25798225402832,
307
+ "learning_rate": 3.500527983104541e-05,
308
+ "loss": 0.6707,
309
+ "step": 390
310
+ },
311
+ {
312
+ "epoch": 3.043684710351377,
313
+ "grad_norm": 11.455845832824707,
314
+ "learning_rate": 3.447729672650475e-05,
315
+ "loss": 0.8491,
316
+ "step": 400
317
+ },
318
+ {
319
+ "epoch": 3.0531813865147197,
320
+ "grad_norm": 13.49953556060791,
321
+ "learning_rate": 3.3949313621964095e-05,
322
+ "loss": 0.7744,
323
+ "step": 410
324
+ },
325
+ {
326
+ "epoch": 3.0626780626780628,
327
+ "grad_norm": 19.157188415527344,
328
+ "learning_rate": 3.3421330517423445e-05,
329
+ "loss": 0.83,
330
+ "step": 420
331
+ },
332
+ {
333
+ "epoch": 3.0721747388414054,
334
+ "grad_norm": 15.591106414794922,
335
+ "learning_rate": 3.289334741288279e-05,
336
+ "loss": 0.8751,
337
+ "step": 430
338
+ },
339
+ {
340
+ "epoch": 3.0816714150047484,
341
+ "grad_norm": 15.52799129486084,
342
+ "learning_rate": 3.236536430834213e-05,
343
+ "loss": 0.868,
344
+ "step": 440
345
+ },
346
+ {
347
+ "epoch": 3.091168091168091,
348
+ "grad_norm": 15.363070487976074,
349
+ "learning_rate": 3.183738120380148e-05,
350
+ "loss": 0.7792,
351
+ "step": 450
352
+ },
353
+ {
354
+ "epoch": 3.100664767331434,
355
+ "grad_norm": 9.656035423278809,
356
+ "learning_rate": 3.130939809926082e-05,
357
+ "loss": 0.6143,
358
+ "step": 460
359
+ },
360
+ {
361
+ "epoch": 3.1101614434947766,
362
+ "grad_norm": 11.785117149353027,
363
+ "learning_rate": 3.078141499472017e-05,
364
+ "loss": 0.9635,
365
+ "step": 470
366
+ },
367
+ {
368
+ "epoch": 3.1120607787274452,
369
+ "eval_accuracy": 0.5631067961165048,
370
+ "eval_loss": 1.418686032295227,
371
+ "eval_runtime": 23.7661,
372
+ "eval_samples_per_second": 8.668,
373
+ "eval_steps_per_second": 1.094,
374
+ "step": 472
375
+ },
376
+ {
377
+ "epoch": 4.007597340930674,
378
+ "grad_norm": 5.675172805786133,
379
+ "learning_rate": 3.0253431890179517e-05,
380
+ "loss": 0.5573,
381
+ "step": 480
382
+ },
383
+ {
384
+ "epoch": 4.017094017094017,
385
+ "grad_norm": 5.014197826385498,
386
+ "learning_rate": 2.972544878563886e-05,
387
+ "loss": 0.4382,
388
+ "step": 490
389
+ },
390
+ {
391
+ "epoch": 4.02659069325736,
392
+ "grad_norm": 8.953272819519043,
393
+ "learning_rate": 2.9197465681098207e-05,
394
+ "loss": 0.639,
395
+ "step": 500
396
+ },
397
+ {
398
+ "epoch": 4.036087369420703,
399
+ "grad_norm": 10.838029861450195,
400
+ "learning_rate": 2.8669482576557548e-05,
401
+ "loss": 0.493,
402
+ "step": 510
403
+ },
404
+ {
405
+ "epoch": 4.045584045584046,
406
+ "grad_norm": 8.463229179382324,
407
+ "learning_rate": 2.8141499472016898e-05,
408
+ "loss": 0.4703,
409
+ "step": 520
410
+ },
411
+ {
412
+ "epoch": 4.055080721747388,
413
+ "grad_norm": 8.104616165161133,
414
+ "learning_rate": 2.7613516367476245e-05,
415
+ "loss": 0.3912,
416
+ "step": 530
417
+ },
418
+ {
419
+ "epoch": 4.064577397910731,
420
+ "grad_norm": 13.097394943237305,
421
+ "learning_rate": 2.7085533262935585e-05,
422
+ "loss": 0.4528,
423
+ "step": 540
424
+ },
425
+ {
426
+ "epoch": 4.074074074074074,
427
+ "grad_norm": 11.679549217224121,
428
+ "learning_rate": 2.6557550158394935e-05,
429
+ "loss": 0.5598,
430
+ "step": 550
431
+ },
432
+ {
433
+ "epoch": 4.083570750237417,
434
+ "grad_norm": 17.88136100769043,
435
+ "learning_rate": 2.6029567053854276e-05,
436
+ "loss": 0.4933,
437
+ "step": 560
438
+ },
439
+ {
440
+ "epoch": 4.09306742640076,
441
+ "grad_norm": 4.025656700134277,
442
+ "learning_rate": 2.5501583949313622e-05,
443
+ "loss": 0.4895,
444
+ "step": 570
445
+ },
446
+ {
447
+ "epoch": 4.102564102564102,
448
+ "grad_norm": 18.150617599487305,
449
+ "learning_rate": 2.497360084477297e-05,
450
+ "loss": 0.5344,
451
+ "step": 580
452
+ },
453
+ {
454
+ "epoch": 4.112060778727446,
455
+ "grad_norm": 10.502995491027832,
456
+ "learning_rate": 2.4445617740232313e-05,
457
+ "loss": 0.3984,
458
+ "step": 590
459
+ },
460
+ {
461
+ "epoch": 4.112060778727446,
462
+ "eval_accuracy": 0.5631067961165048,
463
+ "eval_loss": 1.398964285850525,
464
+ "eval_runtime": 20.6856,
465
+ "eval_samples_per_second": 9.959,
466
+ "eval_steps_per_second": 1.257,
467
+ "step": 590
468
+ },
469
+ {
470
+ "epoch": 5.009496676163343,
471
+ "grad_norm": 7.37868070602417,
472
+ "learning_rate": 2.391763463569166e-05,
473
+ "loss": 0.3346,
474
+ "step": 600
475
+ },
476
+ {
477
+ "epoch": 5.018993352326686,
478
+ "grad_norm": 5.786819934844971,
479
+ "learning_rate": 2.3389651531151003e-05,
480
+ "loss": 0.2937,
481
+ "step": 610
482
+ },
483
+ {
484
+ "epoch": 5.028490028490029,
485
+ "grad_norm": 8.739676475524902,
486
+ "learning_rate": 2.286166842661035e-05,
487
+ "loss": 0.2478,
488
+ "step": 620
489
+ },
490
+ {
491
+ "epoch": 5.037986704653371,
492
+ "grad_norm": 3.41898512840271,
493
+ "learning_rate": 2.2333685322069694e-05,
494
+ "loss": 0.2988,
495
+ "step": 630
496
+ },
497
+ {
498
+ "epoch": 5.047483380816714,
499
+ "grad_norm": 13.979087829589844,
500
+ "learning_rate": 2.180570221752904e-05,
501
+ "loss": 0.2672,
502
+ "step": 640
503
+ },
504
+ {
505
+ "epoch": 5.056980056980057,
506
+ "grad_norm": 9.218252182006836,
507
+ "learning_rate": 2.1277719112988384e-05,
508
+ "loss": 0.2932,
509
+ "step": 650
510
+ },
511
+ {
512
+ "epoch": 5.0664767331434,
513
+ "grad_norm": 8.248359680175781,
514
+ "learning_rate": 2.074973600844773e-05,
515
+ "loss": 0.2845,
516
+ "step": 660
517
+ },
518
+ {
519
+ "epoch": 5.075973409306743,
520
+ "grad_norm": 12.172591209411621,
521
+ "learning_rate": 2.0221752903907075e-05,
522
+ "loss": 0.2125,
523
+ "step": 670
524
+ },
525
+ {
526
+ "epoch": 5.085470085470085,
527
+ "grad_norm": 4.901180267333984,
528
+ "learning_rate": 1.9693769799366422e-05,
529
+ "loss": 0.2387,
530
+ "step": 680
531
+ },
532
+ {
533
+ "epoch": 5.094966761633429,
534
+ "grad_norm": 3.2035112380981445,
535
+ "learning_rate": 1.9165786694825765e-05,
536
+ "loss": 0.1137,
537
+ "step": 690
538
+ },
539
+ {
540
+ "epoch": 5.104463437796771,
541
+ "grad_norm": 1.4330121278762817,
542
+ "learning_rate": 1.863780359028511e-05,
543
+ "loss": 0.1962,
544
+ "step": 700
545
+ },
546
+ {
547
+ "epoch": 5.112060778727446,
548
+ "eval_accuracy": 0.5825242718446602,
549
+ "eval_loss": 1.4243208169937134,
550
+ "eval_runtime": 20.9667,
551
+ "eval_samples_per_second": 9.825,
552
+ "eval_steps_per_second": 1.24,
553
+ "step": 708
554
+ },
555
+ {
556
+ "epoch": 6.001899335232668,
557
+ "grad_norm": 17.324275970458984,
558
+ "learning_rate": 1.810982048574446e-05,
559
+ "loss": 0.2794,
560
+ "step": 710
561
+ },
562
+ {
563
+ "epoch": 6.011396011396012,
564
+ "grad_norm": 13.693547248840332,
565
+ "learning_rate": 1.7581837381203803e-05,
566
+ "loss": 0.2361,
567
+ "step": 720
568
+ },
569
+ {
570
+ "epoch": 6.020892687559354,
571
+ "grad_norm": 1.044037938117981,
572
+ "learning_rate": 1.7053854276663146e-05,
573
+ "loss": 0.0796,
574
+ "step": 730
575
+ },
576
+ {
577
+ "epoch": 6.030389363722697,
578
+ "grad_norm": 1.7120157480239868,
579
+ "learning_rate": 1.6525871172122493e-05,
580
+ "loss": 0.1323,
581
+ "step": 740
582
+ },
583
+ {
584
+ "epoch": 6.0398860398860394,
585
+ "grad_norm": 16.39497184753418,
586
+ "learning_rate": 1.5997888067581837e-05,
587
+ "loss": 0.1033,
588
+ "step": 750
589
+ },
590
+ {
591
+ "epoch": 6.049382716049383,
592
+ "grad_norm": 0.9011927247047424,
593
+ "learning_rate": 1.5469904963041184e-05,
594
+ "loss": 0.082,
595
+ "step": 760
596
+ },
597
+ {
598
+ "epoch": 6.0588793922127255,
599
+ "grad_norm": 19.976755142211914,
600
+ "learning_rate": 1.4941921858500529e-05,
601
+ "loss": 0.1568,
602
+ "step": 770
603
+ },
604
+ {
605
+ "epoch": 6.068376068376068,
606
+ "grad_norm": 1.4492021799087524,
607
+ "learning_rate": 1.4413938753959874e-05,
608
+ "loss": 0.1059,
609
+ "step": 780
610
+ },
611
+ {
612
+ "epoch": 6.077872744539412,
613
+ "grad_norm": 10.073105812072754,
614
+ "learning_rate": 1.388595564941922e-05,
615
+ "loss": 0.0908,
616
+ "step": 790
617
+ },
618
+ {
619
+ "epoch": 6.087369420702754,
620
+ "grad_norm": 12.8700532913208,
621
+ "learning_rate": 1.3357972544878563e-05,
622
+ "loss": 0.1215,
623
+ "step": 800
624
+ },
625
+ {
626
+ "epoch": 6.096866096866097,
627
+ "grad_norm": 0.7424122095108032,
628
+ "learning_rate": 1.2829989440337912e-05,
629
+ "loss": 0.1036,
630
+ "step": 810
631
+ },
632
+ {
633
+ "epoch": 6.106362773029439,
634
+ "grad_norm": 4.486748218536377,
635
+ "learning_rate": 1.2302006335797255e-05,
636
+ "loss": 0.1952,
637
+ "step": 820
638
+ },
639
+ {
640
+ "epoch": 6.112060778727446,
641
+ "eval_accuracy": 0.6019417475728155,
642
+ "eval_loss": 1.557113528251648,
643
+ "eval_runtime": 56.6502,
644
+ "eval_samples_per_second": 3.636,
645
+ "eval_steps_per_second": 0.459,
646
+ "step": 826
647
+ },
648
+ {
649
+ "epoch": 7.003798670465337,
650
+ "grad_norm": 18.093158721923828,
651
+ "learning_rate": 1.17740232312566e-05,
652
+ "loss": 0.1356,
653
+ "step": 830
654
+ },
655
+ {
656
+ "epoch": 7.01329534662868,
657
+ "grad_norm": 0.3082960247993469,
658
+ "learning_rate": 1.1246040126715946e-05,
659
+ "loss": 0.0692,
660
+ "step": 840
661
+ },
662
+ {
663
+ "epoch": 7.022792022792022,
664
+ "grad_norm": 0.9313774108886719,
665
+ "learning_rate": 1.0718057022175291e-05,
666
+ "loss": 0.0197,
667
+ "step": 850
668
+ },
669
+ {
670
+ "epoch": 7.032288698955366,
671
+ "grad_norm": 0.9466642141342163,
672
+ "learning_rate": 1.0190073917634636e-05,
673
+ "loss": 0.0202,
674
+ "step": 860
675
+ },
676
+ {
677
+ "epoch": 7.0417853751187085,
678
+ "grad_norm": 7.768035888671875,
679
+ "learning_rate": 9.662090813093982e-06,
680
+ "loss": 0.0544,
681
+ "step": 870
682
+ },
683
+ {
684
+ "epoch": 7.051282051282051,
685
+ "grad_norm": 0.7909216284751892,
686
+ "learning_rate": 9.134107708553327e-06,
687
+ "loss": 0.0614,
688
+ "step": 880
689
+ },
690
+ {
691
+ "epoch": 7.060778727445394,
692
+ "grad_norm": 0.6131118535995483,
693
+ "learning_rate": 8.606124604012672e-06,
694
+ "loss": 0.0458,
695
+ "step": 890
696
+ },
697
+ {
698
+ "epoch": 7.070275403608737,
699
+ "grad_norm": 0.46935775876045227,
700
+ "learning_rate": 8.078141499472017e-06,
701
+ "loss": 0.0309,
702
+ "step": 900
703
+ },
704
+ {
705
+ "epoch": 7.07977207977208,
706
+ "grad_norm": 4.931636333465576,
707
+ "learning_rate": 7.5501583949313625e-06,
708
+ "loss": 0.0386,
709
+ "step": 910
710
+ },
711
+ {
712
+ "epoch": 7.089268755935422,
713
+ "grad_norm": 0.7869710326194763,
714
+ "learning_rate": 7.022175290390708e-06,
715
+ "loss": 0.0599,
716
+ "step": 920
717
+ },
718
+ {
719
+ "epoch": 7.098765432098766,
720
+ "grad_norm": 18.208322525024414,
721
+ "learning_rate": 6.494192185850054e-06,
722
+ "loss": 0.1256,
723
+ "step": 930
724
+ },
725
+ {
726
+ "epoch": 7.1082621082621085,
727
+ "grad_norm": 6.396451473236084,
728
+ "learning_rate": 5.966209081309398e-06,
729
+ "loss": 0.0319,
730
+ "step": 940
731
+ },
732
+ {
733
+ "epoch": 7.112060778727446,
734
+ "eval_accuracy": 0.587378640776699,
735
+ "eval_loss": 1.5844324827194214,
736
+ "eval_runtime": 38.1637,
737
+ "eval_samples_per_second": 5.398,
738
+ "eval_steps_per_second": 0.681,
739
+ "step": 944
740
+ },
741
+ {
742
+ "epoch": 8.005698005698006,
743
+ "grad_norm": 0.25447767972946167,
744
+ "learning_rate": 5.438225976768744e-06,
745
+ "loss": 0.0605,
746
+ "step": 950
747
+ },
748
+ {
749
+ "epoch": 8.015194681861349,
750
+ "grad_norm": 3.442044973373413,
751
+ "learning_rate": 4.910242872228089e-06,
752
+ "loss": 0.0193,
753
+ "step": 960
754
+ },
755
+ {
756
+ "epoch": 8.024691358024691,
757
+ "grad_norm": 0.15950489044189453,
758
+ "learning_rate": 4.382259767687434e-06,
759
+ "loss": 0.0443,
760
+ "step": 970
761
+ },
762
+ {
763
+ "epoch": 8.034188034188034,
764
+ "grad_norm": 0.21153709292411804,
765
+ "learning_rate": 3.854276663146779e-06,
766
+ "loss": 0.0152,
767
+ "step": 980
768
+ },
769
+ {
770
+ "epoch": 8.043684710351377,
771
+ "grad_norm": 0.30688220262527466,
772
+ "learning_rate": 3.326293558606125e-06,
773
+ "loss": 0.0275,
774
+ "step": 990
775
+ },
776
+ {
777
+ "epoch": 8.05318138651472,
778
+ "grad_norm": 0.7708584070205688,
779
+ "learning_rate": 2.79831045406547e-06,
780
+ "loss": 0.0195,
781
+ "step": 1000
782
+ },
783
+ {
784
+ "epoch": 8.062678062678062,
785
+ "grad_norm": 1.1436554193496704,
786
+ "learning_rate": 2.2703273495248154e-06,
787
+ "loss": 0.0098,
788
+ "step": 1010
789
+ },
790
+ {
791
+ "epoch": 8.072174738841406,
792
+ "grad_norm": 1.9318220615386963,
793
+ "learning_rate": 1.7423442449841606e-06,
794
+ "loss": 0.0597,
795
+ "step": 1020
796
+ },
797
+ {
798
+ "epoch": 8.081671415004749,
799
+ "grad_norm": 3.5386385917663574,
800
+ "learning_rate": 1.2143611404435059e-06,
801
+ "loss": 0.0213,
802
+ "step": 1030
803
+ },
804
+ {
805
+ "epoch": 8.091168091168091,
806
+ "grad_norm": 0.16293883323669434,
807
+ "learning_rate": 6.863780359028511e-07,
808
+ "loss": 0.0118,
809
+ "step": 1040
810
+ },
811
+ {
812
+ "epoch": 8.100664767331434,
813
+ "grad_norm": 2.2753586769104004,
814
+ "learning_rate": 1.5839493136219642e-07,
815
+ "loss": 0.0203,
816
+ "step": 1050
817
+ },
818
+ {
819
+ "epoch": 8.103513770180436,
820
+ "eval_accuracy": 0.6213592233009708,
821
+ "eval_loss": 1.579399824142456,
822
+ "eval_runtime": 23.3894,
823
+ "eval_samples_per_second": 8.807,
824
+ "eval_steps_per_second": 1.112,
825
+ "step": 1053
826
+ },
827
+ {
828
+ "epoch": 8.103513770180436,
829
+ "step": 1053,
830
+ "total_flos": 1.0477471675667448e+19,
831
+ "train_loss": 0.7528787313023864,
832
+ "train_runtime": 2130.6056,
833
+ "train_samples_per_second": 3.954,
834
+ "train_steps_per_second": 0.494
835
+ },
836
+ {
837
+ "epoch": 8.103513770180436,
838
+ "eval_accuracy": 0.551219512195122,
839
+ "eval_loss": 1.7062230110168457,
840
+ "eval_runtime": 32.2785,
841
+ "eval_samples_per_second": 6.351,
842
+ "eval_steps_per_second": 0.805,
843
+ "step": 1053
844
+ },
845
+ {
846
+ "epoch": 8.103513770180436,
847
+ "eval_accuracy": 0.551219512195122,
848
+ "eval_loss": 1.7062228918075562,
849
+ "eval_runtime": 25.2134,
850
+ "eval_samples_per_second": 8.131,
851
+ "eval_steps_per_second": 1.031,
852
+ "step": 1053
853
+ }
854
+ ],
855
+ "logging_steps": 10,
856
+ "max_steps": 1053,
857
+ "num_input_tokens_seen": 0,
858
+ "num_train_epochs": 9223372036854775807,
859
+ "save_steps": 500,
860
+ "stateful_callbacks": {
861
+ "EarlyStoppingCallback": {
862
+ "args": {
863
+ "early_stopping_patience": 3,
864
+ "early_stopping_threshold": 0.0
865
+ },
866
+ "attributes": {
867
+ "early_stopping_patience_counter": 0
868
+ }
869
+ },
870
+ "TrainerControl": {
871
+ "args": {
872
+ "should_epoch_stop": false,
873
+ "should_evaluate": false,
874
+ "should_log": false,
875
+ "should_save": true,
876
+ "should_training_stop": true
877
+ },
878
+ "attributes": {}
879
+ }
880
+ },
881
+ "total_flos": 1.0477471675667448e+19,
882
+ "train_batch_size": 8,
883
+ "trial_name": null,
884
+ "trial_params": null
885
+ }