kanishka commited on
Commit
9c05b7c
1 Parent(s): d09fbed

End of training

Browse files
Files changed (5) hide show
  1. README.md +16 -4
  2. all_results.json +16 -0
  3. eval_results.json +10 -0
  4. train_results.json +9 -0
  5. trainer_state.json +681 -0
README.md CHANGED
@@ -2,11 +2,23 @@
2
  library_name: transformers
3
  tags:
4
  - generated_from_trainer
 
 
5
  metrics:
6
  - accuracy
7
  model-index:
8
  - name: opt-babylm2-rewritten-clean-spacy-32k-earlystop-40epochs_seed-42_1e-3
9
- results: []
 
 
 
 
 
 
 
 
 
 
10
  ---
11
 
12
  <!-- This model card has been generated automatically according to the information the Trainer had access to. You
@@ -14,10 +26,10 @@ should probably proofread and complete it, then remove this comment. -->
14
 
15
  # opt-babylm2-rewritten-clean-spacy-32k-earlystop-40epochs_seed-42_1e-3
16
 
17
- This model was trained from scratch on an unknown dataset.
18
  It achieves the following results on the evaluation set:
19
- - Loss: 2.9601
20
- - Accuracy: 0.4245
21
 
22
  ## Model description
23
 
 
2
  library_name: transformers
3
  tags:
4
  - generated_from_trainer
5
+ datasets:
6
+ - kanishka/babylm2-rewritten-clean-spacy
7
  metrics:
8
  - accuracy
9
  model-index:
10
  - name: opt-babylm2-rewritten-clean-spacy-32k-earlystop-40epochs_seed-42_1e-3
11
+ results:
12
+ - task:
13
+ name: Causal Language Modeling
14
+ type: text-generation
15
+ dataset:
16
+ name: kanishka/babylm2-rewritten-clean-spacy
17
+ type: kanishka/babylm2-rewritten-clean-spacy
18
+ metrics:
19
+ - name: Accuracy
20
+ type: accuracy
21
+ value: 0.42334742212654364
22
  ---
23
 
24
  <!-- This model card has been generated automatically according to the information the Trainer had access to. You
 
26
 
27
  # opt-babylm2-rewritten-clean-spacy-32k-earlystop-40epochs_seed-42_1e-3
28
 
29
+ This model was trained from scratch on the kanishka/babylm2-rewritten-clean-spacy dataset.
30
  It achieves the following results on the evaluation set:
31
+ - Loss: 2.9600
32
+ - Accuracy: 0.4233
33
 
34
  ## Model description
35
 
all_results.json ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 28.0,
3
+ "eval_accuracy": 0.42334742212654364,
4
+ "eval_loss": 2.9599573612213135,
5
+ "eval_runtime": 111.655,
6
+ "eval_samples": 52190,
7
+ "eval_samples_per_second": 467.422,
8
+ "eval_steps_per_second": 7.308,
9
+ "perplexity": 19.297148931098217,
10
+ "total_flos": 1.808986925039616e+18,
11
+ "train_loss": 2.9159927132606205,
12
+ "train_runtime": 57317.9169,
13
+ "train_samples": 494517,
14
+ "train_samples_per_second": 345.105,
15
+ "train_steps_per_second": 1.348
16
+ }
eval_results.json ADDED
@@ -0,0 +1,10 @@
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 28.0,
3
+ "eval_accuracy": 0.42334742212654364,
4
+ "eval_loss": 2.9599573612213135,
5
+ "eval_runtime": 111.655,
6
+ "eval_samples": 52190,
7
+ "eval_samples_per_second": 467.422,
8
+ "eval_steps_per_second": 7.308,
9
+ "perplexity": 19.297148931098217
10
+ }
train_results.json ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 28.0,
3
+ "total_flos": 1.808986925039616e+18,
4
+ "train_loss": 2.9159927132606205,
5
+ "train_runtime": 57317.9169,
6
+ "train_samples": 494517,
7
+ "train_samples_per_second": 345.105,
8
+ "train_steps_per_second": 1.348
9
+ }
trainer_state.json ADDED
@@ -0,0 +1,681 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": 2.9599573612213135,
3
+ "best_model_checkpoint": "models/opt-babylm2-rewritten-clean-spacy-32k-earlystop-40epochs_seed-42_1e-3/checkpoint-48293",
4
+ "epoch": 28.0,
5
+ "eval_steps": 500,
6
+ "global_step": 54089,
7
+ "is_hyper_param_search": false,
8
+ "is_local_process_zero": true,
9
+ "is_world_process_zero": true,
10
+ "log_history": [
11
+ {
12
+ "epoch": 0.5176653293645658,
13
+ "grad_norm": 0.516268789768219,
14
+ "learning_rate": 3.125e-05,
15
+ "loss": 5.9216,
16
+ "step": 1000
17
+ },
18
+ {
19
+ "epoch": 0.9996117510029766,
20
+ "eval_accuracy": 0.32528310960329715,
21
+ "eval_loss": 4.013359069824219,
22
+ "eval_runtime": 112.6568,
23
+ "eval_samples_per_second": 463.265,
24
+ "eval_steps_per_second": 7.243,
25
+ "step": 1931
26
+ },
27
+ {
28
+ "epoch": 1.0353306587291315,
29
+ "grad_norm": 0.6370756030082703,
30
+ "learning_rate": 6.25e-05,
31
+ "loss": 4.1987,
32
+ "step": 2000
33
+ },
34
+ {
35
+ "epoch": 1.5529959880936974,
36
+ "grad_norm": 0.6017232537269592,
37
+ "learning_rate": 9.375e-05,
38
+ "loss": 3.7977,
39
+ "step": 3000
40
+ },
41
+ {
42
+ "epoch": 1.9997411673353178,
43
+ "eval_accuracy": 0.3639096213308086,
44
+ "eval_loss": 3.544811725616455,
45
+ "eval_runtime": 112.8804,
46
+ "eval_samples_per_second": 462.348,
47
+ "eval_steps_per_second": 7.229,
48
+ "step": 3863
49
+ },
50
+ {
51
+ "epoch": 2.070661317458263,
52
+ "grad_norm": 0.5702515840530396,
53
+ "learning_rate": 0.000125,
54
+ "loss": 3.5582,
55
+ "step": 4000
56
+ },
57
+ {
58
+ "epoch": 2.588326646822829,
59
+ "grad_norm": 0.48817870020866394,
60
+ "learning_rate": 0.00015625,
61
+ "loss": 3.3887,
62
+ "step": 5000
63
+ },
64
+ {
65
+ "epoch": 2.9998705836676587,
66
+ "eval_accuracy": 0.3840780857274889,
67
+ "eval_loss": 3.324249744415283,
68
+ "eval_runtime": 112.7528,
69
+ "eval_samples_per_second": 462.871,
70
+ "eval_steps_per_second": 7.237,
71
+ "step": 5795
72
+ },
73
+ {
74
+ "epoch": 3.105991976187395,
75
+ "grad_norm": 0.45357462763786316,
76
+ "learning_rate": 0.0001875,
77
+ "loss": 3.2719,
78
+ "step": 6000
79
+ },
80
+ {
81
+ "epoch": 3.6236573055519608,
82
+ "grad_norm": 0.42539018392562866,
83
+ "learning_rate": 0.00021875,
84
+ "loss": 3.1805,
85
+ "step": 7000
86
+ },
87
+ {
88
+ "epoch": 4.0,
89
+ "eval_accuracy": 0.3949032381682315,
90
+ "eval_loss": 3.2081618309020996,
91
+ "eval_runtime": 112.6095,
92
+ "eval_samples_per_second": 463.46,
93
+ "eval_steps_per_second": 7.246,
94
+ "step": 7727
95
+ },
96
+ {
97
+ "epoch": 4.141322634916526,
98
+ "grad_norm": 0.41741499304771423,
99
+ "learning_rate": 0.00025,
100
+ "loss": 3.1173,
101
+ "step": 8000
102
+ },
103
+ {
104
+ "epoch": 4.658987964281092,
105
+ "grad_norm": 0.3810145854949951,
106
+ "learning_rate": 0.00028125000000000003,
107
+ "loss": 3.0632,
108
+ "step": 9000
109
+ },
110
+ {
111
+ "epoch": 4.999611751002977,
112
+ "eval_accuracy": 0.401180377880219,
113
+ "eval_loss": 3.143218517303467,
114
+ "eval_runtime": 112.4876,
115
+ "eval_samples_per_second": 463.962,
116
+ "eval_steps_per_second": 7.254,
117
+ "step": 9658
118
+ },
119
+ {
120
+ "epoch": 5.176653293645658,
121
+ "grad_norm": 0.3555419445037842,
122
+ "learning_rate": 0.0003125,
123
+ "loss": 3.0212,
124
+ "step": 10000
125
+ },
126
+ {
127
+ "epoch": 5.694318623010224,
128
+ "grad_norm": 0.3318658173084259,
129
+ "learning_rate": 0.00034375,
130
+ "loss": 2.9865,
131
+ "step": 11000
132
+ },
133
+ {
134
+ "epoch": 5.999741167335317,
135
+ "eval_accuracy": 0.4055885546400971,
136
+ "eval_loss": 3.101013422012329,
137
+ "eval_runtime": 112.8154,
138
+ "eval_samples_per_second": 462.614,
139
+ "eval_steps_per_second": 7.233,
140
+ "step": 11590
141
+ },
142
+ {
143
+ "epoch": 6.21198395237479,
144
+ "grad_norm": 0.3243854343891144,
145
+ "learning_rate": 0.000375,
146
+ "loss": 2.9568,
147
+ "step": 12000
148
+ },
149
+ {
150
+ "epoch": 6.729649281739356,
151
+ "grad_norm": 0.3086845874786377,
152
+ "learning_rate": 0.00040625000000000004,
153
+ "loss": 2.9347,
154
+ "step": 13000
155
+ },
156
+ {
157
+ "epoch": 6.999870583667659,
158
+ "eval_accuracy": 0.4087078510269791,
159
+ "eval_loss": 3.071547746658325,
160
+ "eval_runtime": 113.6377,
161
+ "eval_samples_per_second": 459.267,
162
+ "eval_steps_per_second": 7.181,
163
+ "step": 13522
164
+ },
165
+ {
166
+ "epoch": 7.2473146111039215,
167
+ "grad_norm": 0.29632025957107544,
168
+ "learning_rate": 0.0004375,
169
+ "loss": 2.9084,
170
+ "step": 14000
171
+ },
172
+ {
173
+ "epoch": 7.764979940468487,
174
+ "grad_norm": 0.28605663776397705,
175
+ "learning_rate": 0.0004686875,
176
+ "loss": 2.8953,
177
+ "step": 15000
178
+ },
179
+ {
180
+ "epoch": 8.0,
181
+ "eval_accuracy": 0.4107785654978604,
182
+ "eval_loss": 3.053938388824463,
183
+ "eval_runtime": 112.9423,
184
+ "eval_samples_per_second": 462.094,
185
+ "eval_steps_per_second": 7.225,
186
+ "step": 15454
187
+ },
188
+ {
189
+ "epoch": 8.282645269833052,
190
+ "grad_norm": 0.2786637246608734,
191
+ "learning_rate": 0.0004999375,
192
+ "loss": 2.8698,
193
+ "step": 16000
194
+ },
195
+ {
196
+ "epoch": 8.80031059919762,
197
+ "grad_norm": 0.2667602002620697,
198
+ "learning_rate": 0.00053115625,
199
+ "loss": 2.8689,
200
+ "step": 17000
201
+ },
202
+ {
203
+ "epoch": 8.999611751002977,
204
+ "eval_accuracy": 0.4122456033572655,
205
+ "eval_loss": 3.039193868637085,
206
+ "eval_runtime": 112.7699,
207
+ "eval_samples_per_second": 462.801,
208
+ "eval_steps_per_second": 7.236,
209
+ "step": 17385
210
+ },
211
+ {
212
+ "epoch": 9.317975928562184,
213
+ "grad_norm": 0.25813835859298706,
214
+ "learning_rate": 0.00056240625,
215
+ "loss": 2.8401,
216
+ "step": 18000
217
+ },
218
+ {
219
+ "epoch": 9.835641257926751,
220
+ "grad_norm": 0.2392367571592331,
221
+ "learning_rate": 0.00059365625,
222
+ "loss": 2.8456,
223
+ "step": 19000
224
+ },
225
+ {
226
+ "epoch": 9.999741167335317,
227
+ "eval_accuracy": 0.4133619617611367,
228
+ "eval_loss": 3.0309925079345703,
229
+ "eval_runtime": 112.8237,
230
+ "eval_samples_per_second": 462.58,
231
+ "eval_steps_per_second": 7.233,
232
+ "step": 19317
233
+ },
234
+ {
235
+ "epoch": 10.353306587291316,
236
+ "grad_norm": 0.2465026080608368,
237
+ "learning_rate": 0.00062490625,
238
+ "loss": 2.8163,
239
+ "step": 20000
240
+ },
241
+ {
242
+ "epoch": 10.870971916655883,
243
+ "grad_norm": 0.21547040343284607,
244
+ "learning_rate": 0.000656125,
245
+ "loss": 2.8298,
246
+ "step": 21000
247
+ },
248
+ {
249
+ "epoch": 10.99987058366766,
250
+ "eval_accuracy": 0.41438394403555634,
251
+ "eval_loss": 3.0251340866088867,
252
+ "eval_runtime": 112.6781,
253
+ "eval_samples_per_second": 463.178,
254
+ "eval_steps_per_second": 7.242,
255
+ "step": 21249
256
+ },
257
+ {
258
+ "epoch": 11.388637246020448,
259
+ "grad_norm": 0.23142270743846893,
260
+ "learning_rate": 0.0006873749999999999,
261
+ "loss": 2.798,
262
+ "step": 22000
263
+ },
264
+ {
265
+ "epoch": 11.906302575385013,
266
+ "grad_norm": 0.2184012234210968,
267
+ "learning_rate": 0.00071859375,
268
+ "loss": 2.817,
269
+ "step": 23000
270
+ },
271
+ {
272
+ "epoch": 12.0,
273
+ "eval_accuracy": 0.4152235609706615,
274
+ "eval_loss": 3.0175206661224365,
275
+ "eval_runtime": 112.4939,
276
+ "eval_samples_per_second": 463.936,
277
+ "eval_steps_per_second": 7.254,
278
+ "step": 23181
279
+ },
280
+ {
281
+ "epoch": 12.42396790474958,
282
+ "grad_norm": 0.211518794298172,
283
+ "learning_rate": 0.0007498437500000001,
284
+ "loss": 2.7828,
285
+ "step": 24000
286
+ },
287
+ {
288
+ "epoch": 12.941633234114144,
289
+ "grad_norm": 0.2092377245426178,
290
+ "learning_rate": 0.00078109375,
291
+ "loss": 2.8069,
292
+ "step": 25000
293
+ },
294
+ {
295
+ "epoch": 12.999611751002977,
296
+ "eval_accuracy": 0.41580334298885296,
297
+ "eval_loss": 3.0118961334228516,
298
+ "eval_runtime": 112.5807,
299
+ "eval_samples_per_second": 463.578,
300
+ "eval_steps_per_second": 7.248,
301
+ "step": 25112
302
+ },
303
+ {
304
+ "epoch": 13.459298563478711,
305
+ "grad_norm": 0.20688970386981964,
306
+ "learning_rate": 0.00081234375,
307
+ "loss": 2.7707,
308
+ "step": 26000
309
+ },
310
+ {
311
+ "epoch": 13.976963892843276,
312
+ "grad_norm": 0.20183704793453217,
313
+ "learning_rate": 0.00084353125,
314
+ "loss": 2.7996,
315
+ "step": 27000
316
+ },
317
+ {
318
+ "epoch": 13.999741167335317,
319
+ "eval_accuracy": 0.4162821365373128,
320
+ "eval_loss": 3.005990743637085,
321
+ "eval_runtime": 112.5919,
322
+ "eval_samples_per_second": 463.533,
323
+ "eval_steps_per_second": 7.247,
324
+ "step": 27044
325
+ },
326
+ {
327
+ "epoch": 14.494629222207843,
328
+ "grad_norm": 0.19293886423110962,
329
+ "learning_rate": 0.00087478125,
330
+ "loss": 2.7615,
331
+ "step": 28000
332
+ },
333
+ {
334
+ "epoch": 14.99987058366766,
335
+ "eval_accuracy": 0.4170801257847458,
336
+ "eval_loss": 3.0038492679595947,
337
+ "eval_runtime": 112.6692,
338
+ "eval_samples_per_second": 463.214,
339
+ "eval_steps_per_second": 7.242,
340
+ "step": 28976
341
+ },
342
+ {
343
+ "epoch": 15.012294551572408,
344
+ "grad_norm": 0.1958475559949875,
345
+ "learning_rate": 0.0009060312499999999,
346
+ "loss": 2.7934,
347
+ "step": 29000
348
+ },
349
+ {
350
+ "epoch": 15.529959880936975,
351
+ "grad_norm": 0.18981003761291504,
352
+ "learning_rate": 0.00093728125,
353
+ "loss": 2.7575,
354
+ "step": 30000
355
+ },
356
+ {
357
+ "epoch": 16.0,
358
+ "eval_accuracy": 0.4168810041740398,
359
+ "eval_loss": 3.0022430419921875,
360
+ "eval_runtime": 112.632,
361
+ "eval_samples_per_second": 463.367,
362
+ "eval_steps_per_second": 7.245,
363
+ "step": 30908
364
+ },
365
+ {
366
+ "epoch": 16.04762521030154,
367
+ "grad_norm": 0.21319861710071564,
368
+ "learning_rate": 0.00096853125,
369
+ "loss": 2.7826,
370
+ "step": 31000
371
+ },
372
+ {
373
+ "epoch": 16.565290539666105,
374
+ "grad_norm": 0.188828244805336,
375
+ "learning_rate": 0.00099975,
376
+ "loss": 2.7573,
377
+ "step": 32000
378
+ },
379
+ {
380
+ "epoch": 16.999611751002977,
381
+ "eval_accuracy": 0.4178921662552739,
382
+ "eval_loss": 2.9961841106414795,
383
+ "eval_runtime": 112.6097,
384
+ "eval_samples_per_second": 463.459,
385
+ "eval_steps_per_second": 7.246,
386
+ "step": 32839
387
+ },
388
+ {
389
+ "epoch": 17.082955869030673,
390
+ "grad_norm": 0.1903599202632904,
391
+ "learning_rate": 0.0009780725022104334,
392
+ "loss": 2.7729,
393
+ "step": 33000
394
+ },
395
+ {
396
+ "epoch": 17.60062119839524,
397
+ "grad_norm": 0.18594609200954437,
398
+ "learning_rate": 0.0009559902740937224,
399
+ "loss": 2.7451,
400
+ "step": 34000
401
+ },
402
+ {
403
+ "epoch": 17.99974116733532,
404
+ "eval_accuracy": 0.41887905804207104,
405
+ "eval_loss": 2.98665452003479,
406
+ "eval_runtime": 112.7053,
407
+ "eval_samples_per_second": 463.066,
408
+ "eval_steps_per_second": 7.24,
409
+ "step": 34771
410
+ },
411
+ {
412
+ "epoch": 18.118286527759803,
413
+ "grad_norm": 0.19103111326694489,
414
+ "learning_rate": 0.0009338859416445623,
415
+ "loss": 2.7475,
416
+ "step": 35000
417
+ },
418
+ {
419
+ "epoch": 18.63595185712437,
420
+ "grad_norm": 0.17397448420524597,
421
+ "learning_rate": 0.0009118037135278515,
422
+ "loss": 2.7275,
423
+ "step": 36000
424
+ },
425
+ {
426
+ "epoch": 18.999870583667658,
427
+ "eval_accuracy": 0.4201490782172229,
428
+ "eval_loss": 2.98036527633667,
429
+ "eval_runtime": 112.4975,
430
+ "eval_samples_per_second": 463.921,
431
+ "eval_steps_per_second": 7.253,
432
+ "step": 36703
433
+ },
434
+ {
435
+ "epoch": 19.153617186488933,
436
+ "grad_norm": 0.18536260724067688,
437
+ "learning_rate": 0.0008896993810786914,
438
+ "loss": 2.7238,
439
+ "step": 37000
440
+ },
441
+ {
442
+ "epoch": 19.671282515853502,
443
+ "grad_norm": 0.17299328744411469,
444
+ "learning_rate": 0.0008676171529619805,
445
+ "loss": 2.7099,
446
+ "step": 38000
447
+ },
448
+ {
449
+ "epoch": 20.0,
450
+ "eval_accuracy": 0.42075012492063313,
451
+ "eval_loss": 2.9760243892669678,
452
+ "eval_runtime": 112.7131,
453
+ "eval_samples_per_second": 463.034,
454
+ "eval_steps_per_second": 7.24,
455
+ "step": 38635
456
+ },
457
+ {
458
+ "epoch": 20.188947845218067,
459
+ "grad_norm": 0.19630002975463867,
460
+ "learning_rate": 0.0008455128205128205,
461
+ "loss": 2.7028,
462
+ "step": 39000
463
+ },
464
+ {
465
+ "epoch": 20.706613174582632,
466
+ "grad_norm": 0.18252891302108765,
467
+ "learning_rate": 0.0008234084880636605,
468
+ "loss": 2.693,
469
+ "step": 40000
470
+ },
471
+ {
472
+ "epoch": 20.999611751002977,
473
+ "eval_accuracy": 0.4216216764536817,
474
+ "eval_loss": 2.968324899673462,
475
+ "eval_runtime": 112.7059,
476
+ "eval_samples_per_second": 463.064,
477
+ "eval_steps_per_second": 7.24,
478
+ "step": 40566
479
+ },
480
+ {
481
+ "epoch": 21.224278503947197,
482
+ "grad_norm": 0.19568035006523132,
483
+ "learning_rate": 0.0008013262599469496,
484
+ "loss": 2.6802,
485
+ "step": 41000
486
+ },
487
+ {
488
+ "epoch": 21.741943833311765,
489
+ "grad_norm": 0.2092587798833847,
490
+ "learning_rate": 0.0007792219274977895,
491
+ "loss": 2.6785,
492
+ "step": 42000
493
+ },
494
+ {
495
+ "epoch": 21.99974116733532,
496
+ "eval_accuracy": 0.4221156483286934,
497
+ "eval_loss": 2.96663761138916,
498
+ "eval_runtime": 112.5823,
499
+ "eval_samples_per_second": 463.572,
500
+ "eval_steps_per_second": 7.248,
501
+ "step": 42498
502
+ },
503
+ {
504
+ "epoch": 22.25960916267633,
505
+ "grad_norm": 0.2045181393623352,
506
+ "learning_rate": 0.0007571175950486296,
507
+ "loss": 2.6616,
508
+ "step": 43000
509
+ },
510
+ {
511
+ "epoch": 22.777274492040895,
512
+ "grad_norm": 0.19560641050338745,
513
+ "learning_rate": 0.0007350132625994696,
514
+ "loss": 2.6628,
515
+ "step": 44000
516
+ },
517
+ {
518
+ "epoch": 22.999870583667658,
519
+ "eval_accuracy": 0.4226662007972378,
520
+ "eval_loss": 2.9646129608154297,
521
+ "eval_runtime": 112.7148,
522
+ "eval_samples_per_second": 463.027,
523
+ "eval_steps_per_second": 7.24,
524
+ "step": 44430
525
+ },
526
+ {
527
+ "epoch": 23.29493982140546,
528
+ "grad_norm": 0.19825534522533417,
529
+ "learning_rate": 0.0007129089301503095,
530
+ "loss": 2.6395,
531
+ "step": 45000
532
+ },
533
+ {
534
+ "epoch": 23.812605150770025,
535
+ "grad_norm": 0.1957850456237793,
536
+ "learning_rate": 0.0006908267020335986,
537
+ "loss": 2.6501,
538
+ "step": 46000
539
+ },
540
+ {
541
+ "epoch": 24.0,
542
+ "eval_accuracy": 0.42281715752022214,
543
+ "eval_loss": 2.9626119136810303,
544
+ "eval_runtime": 111.9607,
545
+ "eval_samples_per_second": 466.146,
546
+ "eval_steps_per_second": 7.288,
547
+ "step": 46362
548
+ },
549
+ {
550
+ "epoch": 24.330270480134594,
551
+ "grad_norm": 0.2085314244031906,
552
+ "learning_rate": 0.0006687223695844385,
553
+ "loss": 2.6181,
554
+ "step": 47000
555
+ },
556
+ {
557
+ "epoch": 24.84793580949916,
558
+ "grad_norm": 0.21406565606594086,
559
+ "learning_rate": 0.0006466401414677277,
560
+ "loss": 2.6343,
561
+ "step": 48000
562
+ },
563
+ {
564
+ "epoch": 24.999611751002977,
565
+ "eval_accuracy": 0.42334742212654364,
566
+ "eval_loss": 2.9599573612213135,
567
+ "eval_runtime": 111.9994,
568
+ "eval_samples_per_second": 465.985,
569
+ "eval_steps_per_second": 7.286,
570
+ "step": 48293
571
+ },
572
+ {
573
+ "epoch": 25.365601138863724,
574
+ "grad_norm": 0.20139683783054352,
575
+ "learning_rate": 0.0006245358090185677,
576
+ "loss": 2.598,
577
+ "step": 49000
578
+ },
579
+ {
580
+ "epoch": 25.88326646822829,
581
+ "grad_norm": 0.20590080320835114,
582
+ "learning_rate": 0.0006024535809018568,
583
+ "loss": 2.6198,
584
+ "step": 50000
585
+ },
586
+ {
587
+ "epoch": 25.99974116733532,
588
+ "eval_accuracy": 0.42357351907998303,
589
+ "eval_loss": 2.9637885093688965,
590
+ "eval_runtime": 112.1406,
591
+ "eval_samples_per_second": 465.398,
592
+ "eval_steps_per_second": 7.277,
593
+ "step": 50225
594
+ },
595
+ {
596
+ "epoch": 26.400931797592857,
597
+ "grad_norm": 0.2109968364238739,
598
+ "learning_rate": 0.0005803492484526968,
599
+ "loss": 2.5789,
600
+ "step": 51000
601
+ },
602
+ {
603
+ "epoch": 26.918597126957422,
604
+ "grad_norm": 0.21926765143871307,
605
+ "learning_rate": 0.0005582670203359858,
606
+ "loss": 2.604,
607
+ "step": 52000
608
+ },
609
+ {
610
+ "epoch": 26.999870583667658,
611
+ "eval_accuracy": 0.4239798023060537,
612
+ "eval_loss": 2.9604480266571045,
613
+ "eval_runtime": 112.1419,
614
+ "eval_samples_per_second": 465.392,
615
+ "eval_steps_per_second": 7.276,
616
+ "step": 52157
617
+ },
618
+ {
619
+ "epoch": 27.436262456321987,
620
+ "grad_norm": 0.2280665636062622,
621
+ "learning_rate": 0.0005361626878868259,
622
+ "loss": 2.5576,
623
+ "step": 53000
624
+ },
625
+ {
626
+ "epoch": 27.953927785686552,
627
+ "grad_norm": 0.218441903591156,
628
+ "learning_rate": 0.000514080459770115,
629
+ "loss": 2.5876,
630
+ "step": 54000
631
+ },
632
+ {
633
+ "epoch": 28.0,
634
+ "eval_accuracy": 0.42452238991016983,
635
+ "eval_loss": 2.960141658782959,
636
+ "eval_runtime": 111.7102,
637
+ "eval_samples_per_second": 467.191,
638
+ "eval_steps_per_second": 7.305,
639
+ "step": 54089
640
+ },
641
+ {
642
+ "epoch": 28.0,
643
+ "step": 54089,
644
+ "total_flos": 1.808986925039616e+18,
645
+ "train_loss": 2.9159927132606205,
646
+ "train_runtime": 57317.9169,
647
+ "train_samples_per_second": 345.105,
648
+ "train_steps_per_second": 1.348
649
+ }
650
+ ],
651
+ "logging_steps": 1000,
652
+ "max_steps": 77240,
653
+ "num_input_tokens_seen": 0,
654
+ "num_train_epochs": 40,
655
+ "save_steps": 500,
656
+ "stateful_callbacks": {
657
+ "EarlyStoppingCallback": {
658
+ "args": {
659
+ "early_stopping_patience": 3,
660
+ "early_stopping_threshold": 0.0
661
+ },
662
+ "attributes": {
663
+ "early_stopping_patience_counter": 3
664
+ }
665
+ },
666
+ "TrainerControl": {
667
+ "args": {
668
+ "should_epoch_stop": false,
669
+ "should_evaluate": false,
670
+ "should_log": false,
671
+ "should_save": true,
672
+ "should_training_stop": true
673
+ },
674
+ "attributes": {}
675
+ }
676
+ },
677
+ "total_flos": 1.808986925039616e+18,
678
+ "train_batch_size": 32,
679
+ "trial_name": null,
680
+ "trial_params": null
681
+ }