dada22231 commited on
Commit
d3e551b
1 Parent(s): bc75507

Training in progress, step 25, checkpoint

Browse files
last-checkpoint/adapter_config.json CHANGED
@@ -20,13 +20,13 @@
20
  "rank_pattern": {},
21
  "revision": null,
22
  "target_modules": [
23
- "v_proj",
24
- "up_proj",
25
- "down_proj",
26
- "k_proj",
27
  "o_proj",
 
 
28
  "q_proj",
29
- "gate_proj"
 
 
30
  ],
31
  "task_type": "CAUSAL_LM",
32
  "use_dora": false,
 
20
  "rank_pattern": {},
21
  "revision": null,
22
  "target_modules": [
 
 
 
 
23
  "o_proj",
24
+ "down_proj",
25
+ "gate_proj",
26
  "q_proj",
27
+ "v_proj",
28
+ "up_proj",
29
+ "k_proj"
30
  ],
31
  "task_type": "CAUSAL_LM",
32
  "use_dora": false,
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:162b3c6e7bc433daa4d14fba27d0bb1188a053ff9fe4695614d1c332b516ed7b
3
  size 319876032
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f99bc2c659cbdb0992e1a6091af6809085d19bc2edf14ae50e83db113fa5fcb4
3
  size 319876032
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:2fa1d14dbc34af276200a438a5689d521bdd756c1ec0d76f07e56c1bdb72accd
3
  size 640009682
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8a69589c53894362eff20f58966ceb7c4fe896fd6b42431e73043324550d40fb
3
  size 640009682
last-checkpoint/rng_state_0.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:f54a4529a8e6845ded0eda45072a4e815479439591a260f73506331363d14828
3
  size 15024
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:080926b6fad49ee6b44ff32b6c88dcf1a54560ba5269dd472628f349a0e91734
3
  size 15024
last-checkpoint/rng_state_1.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:1122e23e5053dc11adc18506adea02f4afa189a4981884a78b8091004698e3c9
3
  size 15024
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:97e6f18cc73c49e1b8ffd8c02656630b4c1ab425d84004ddab33c7d1ac32ba3f
3
  size 15024
last-checkpoint/rng_state_2.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:9672115f995075c45b19e844719708bc3519b7f8900dabf0e3b2c0ab349455b1
3
  size 15024
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a884ce65b2106c5faada4bc542ca8806b383884d40a67a36a96d4d9a59de34ad
3
  size 15024
last-checkpoint/rng_state_3.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:f80273d0fe6205387d305a36fe3d9d7805a1c74392ec5a9b45ad7c4825c5914c
3
  size 15024
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1601b0f6af9e056067f5465ae47446902063858add4122d3355cea2b50d7a490
3
  size 15024
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:5607f6de446164d9d9adb8b91c44cec55b14aa391e24ba5637c08b834eedda2a
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d271cdb95f63cd655315f063ca2e25c78dc5ae4275523c5d4f80f367586b3351
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,16 +1,16 @@
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
- "epoch": 0.013341894382228596,
5
  "eval_steps": 25,
6
- "global_step": 50,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
10
  "log_history": [
11
  {
12
  "epoch": 0.00026683788764457196,
13
- "grad_norm": 21.58336067199707,
14
  "learning_rate": 5e-05,
15
  "loss": 99.4744,
16
  "step": 1
@@ -18,369 +18,186 @@
18
  {
19
  "epoch": 0.00026683788764457196,
20
  "eval_loss": 3.534377336502075,
21
- "eval_runtime": 2.9302,
22
- "eval_samples_per_second": 17.063,
23
- "eval_steps_per_second": 4.437,
24
  "step": 1
25
  },
26
  {
27
  "epoch": 0.0005336757752891439,
28
- "grad_norm": 22.419708251953125,
29
  "learning_rate": 0.0001,
30
  "loss": 103.3411,
31
  "step": 2
32
  },
33
  {
34
  "epoch": 0.0008005136629337158,
35
- "grad_norm": 44.11233901977539,
36
  "learning_rate": 9.990365154573717e-05,
37
- "loss": 101.998,
38
  "step": 3
39
  },
40
  {
41
  "epoch": 0.0010673515505782878,
42
- "grad_norm": 39.20323181152344,
43
  "learning_rate": 9.961501876182148e-05,
44
- "loss": 102.6876,
45
  "step": 4
46
  },
47
  {
48
  "epoch": 0.0013341894382228596,
49
- "grad_norm": 33.18415069580078,
50
  "learning_rate": 9.913533761814537e-05,
51
- "loss": 98.8706,
52
  "step": 5
53
  },
54
  {
55
  "epoch": 0.0016010273258674317,
56
- "grad_norm": 35.75772476196289,
57
  "learning_rate": 9.846666218300807e-05,
58
- "loss": 96.6055,
59
  "step": 6
60
  },
61
  {
62
  "epoch": 0.0018678652135120035,
63
- "grad_norm": 36.51875686645508,
64
  "learning_rate": 9.761185582727977e-05,
65
- "loss": 94.8261,
66
  "step": 7
67
  },
68
  {
69
  "epoch": 0.0021347031011565757,
70
- "grad_norm": 33.68272018432617,
71
  "learning_rate": 9.657457896300791e-05,
72
- "loss": 93.3615,
73
  "step": 8
74
  },
75
  {
76
  "epoch": 0.0024015409888011475,
77
- "grad_norm": 30.13109016418457,
78
  "learning_rate": 9.535927336897098e-05,
79
- "loss": 91.072,
80
  "step": 9
81
  },
82
  {
83
  "epoch": 0.0026683788764457193,
84
- "grad_norm": 32.036537170410156,
85
  "learning_rate": 9.397114317029975e-05,
86
- "loss": 93.589,
87
  "step": 10
88
  },
89
  {
90
  "epoch": 0.002935216764090291,
91
- "grad_norm": 24.110986709594727,
92
  "learning_rate": 9.241613255361455e-05,
93
- "loss": 93.6183,
94
  "step": 11
95
  },
96
  {
97
  "epoch": 0.0032020546517348633,
98
- "grad_norm": 59.20383071899414,
99
  "learning_rate": 9.070090031310558e-05,
100
- "loss": 100.723,
101
  "step": 12
102
  },
103
  {
104
  "epoch": 0.003468892539379435,
105
- "grad_norm": 72.5294418334961,
106
  "learning_rate": 8.883279133655399e-05,
107
- "loss": 103.4235,
108
  "step": 13
109
  },
110
  {
111
  "epoch": 0.003735730427024007,
112
- "grad_norm": 49.615962982177734,
113
  "learning_rate": 8.681980515339464e-05,
114
- "loss": 90.5619,
115
  "step": 14
116
  },
117
  {
118
  "epoch": 0.004002568314668579,
119
- "grad_norm": 36.85390853881836,
120
  "learning_rate": 8.467056167950311e-05,
121
- "loss": 90.9424,
122
  "step": 15
123
  },
124
  {
125
  "epoch": 0.004269406202313151,
126
- "grad_norm": 28.41231346130371,
127
  "learning_rate": 8.239426430539243e-05,
128
- "loss": 86.9021,
129
  "step": 16
130
  },
131
  {
132
  "epoch": 0.004536244089957723,
133
- "grad_norm": 19.181047439575195,
134
  "learning_rate": 8.000066048588211e-05,
135
- "loss": 87.2284,
136
  "step": 17
137
  },
138
  {
139
  "epoch": 0.004803081977602295,
140
- "grad_norm": 16.206340789794922,
141
  "learning_rate": 7.75e-05,
142
- "loss": 87.68,
143
  "step": 18
144
  },
145
  {
146
  "epoch": 0.005069919865246866,
147
- "grad_norm": 13.560489654541016,
148
  "learning_rate": 7.490299105985507e-05,
149
- "loss": 87.0709,
150
  "step": 19
151
  },
152
  {
153
  "epoch": 0.0053367577528914386,
154
- "grad_norm": 13.36911678314209,
155
  "learning_rate": 7.222075445642904e-05,
156
- "loss": 87.0837,
157
  "step": 20
158
  },
159
  {
160
  "epoch": 0.005603595640536011,
161
- "grad_norm": 13.232701301574707,
162
  "learning_rate": 6.946477593864228e-05,
163
- "loss": 87.4922,
164
  "step": 21
165
  },
166
  {
167
  "epoch": 0.005870433528180582,
168
- "grad_norm": 19.99480438232422,
169
  "learning_rate": 6.664685702961344e-05,
170
- "loss": 86.7508,
171
  "step": 22
172
  },
173
  {
174
  "epoch": 0.006137271415825154,
175
- "grad_norm": 19.568359375,
176
  "learning_rate": 6.377906449072578e-05,
177
- "loss": 88.3357,
178
  "step": 23
179
  },
180
  {
181
  "epoch": 0.006404109303469727,
182
- "grad_norm": 30.02350425720215,
183
  "learning_rate": 6.087367864990233e-05,
184
- "loss": 89.6037,
185
  "step": 24
186
  },
187
  {
188
  "epoch": 0.006670947191114298,
189
- "grad_norm": 132.76075744628906,
190
  "learning_rate": 5.794314081535644e-05,
191
- "loss": 106.9513,
192
  "step": 25
193
  },
194
  {
195
  "epoch": 0.006670947191114298,
196
- "eval_loss": 2.699881076812744,
197
- "eval_runtime": 2.969,
198
- "eval_samples_per_second": 16.841,
199
- "eval_steps_per_second": 4.379,
200
  "step": 25
201
- },
202
- {
203
- "epoch": 0.00693778507875887,
204
- "grad_norm": 11.069785118103027,
205
- "learning_rate": 5.500000000000001e-05,
206
- "loss": 85.2464,
207
- "step": 26
208
- },
209
- {
210
- "epoch": 0.0072046229664034424,
211
- "grad_norm": 11.894067764282227,
212
- "learning_rate": 5.205685918464356e-05,
213
- "loss": 86.4133,
214
- "step": 27
215
- },
216
- {
217
- "epoch": 0.007471460854048014,
218
- "grad_norm": 11.504545211791992,
219
- "learning_rate": 4.912632135009769e-05,
220
- "loss": 85.9424,
221
- "step": 28
222
- },
223
- {
224
- "epoch": 0.007738298741692586,
225
- "grad_norm": 9.52103042602539,
226
- "learning_rate": 4.6220935509274235e-05,
227
- "loss": 85.5899,
228
- "step": 29
229
- },
230
- {
231
- "epoch": 0.008005136629337158,
232
- "grad_norm": 8.053053855895996,
233
- "learning_rate": 4.3353142970386564e-05,
234
- "loss": 86.4908,
235
- "step": 30
236
- },
237
- {
238
- "epoch": 0.00827197451698173,
239
- "grad_norm": 6.596405982971191,
240
- "learning_rate": 4.053522406135775e-05,
241
- "loss": 84.509,
242
- "step": 31
243
- },
244
- {
245
- "epoch": 0.008538812404626303,
246
- "grad_norm": 12.154988288879395,
247
- "learning_rate": 3.777924554357096e-05,
248
- "loss": 85.5146,
249
- "step": 32
250
- },
251
- {
252
- "epoch": 0.008805650292270874,
253
- "grad_norm": 12.631579399108887,
254
- "learning_rate": 3.509700894014496e-05,
255
- "loss": 84.0384,
256
- "step": 33
257
- },
258
- {
259
- "epoch": 0.009072488179915445,
260
- "grad_norm": 14.187871932983398,
261
- "learning_rate": 3.250000000000001e-05,
262
- "loss": 85.3073,
263
- "step": 34
264
- },
265
- {
266
- "epoch": 0.009339326067560017,
267
- "grad_norm": 17.819459915161133,
268
- "learning_rate": 2.9999339514117912e-05,
269
- "loss": 83.8787,
270
- "step": 35
271
- },
272
- {
273
- "epoch": 0.00960616395520459,
274
- "grad_norm": 20.426162719726562,
275
- "learning_rate": 2.760573569460757e-05,
276
- "loss": 87.1894,
277
- "step": 36
278
- },
279
- {
280
- "epoch": 0.009873001842849161,
281
- "grad_norm": 31.33350944519043,
282
- "learning_rate": 2.53294383204969e-05,
283
- "loss": 89.3437,
284
- "step": 37
285
- },
286
- {
287
- "epoch": 0.010139839730493733,
288
- "grad_norm": 47.56151580810547,
289
- "learning_rate": 2.3180194846605367e-05,
290
- "loss": 100.6103,
291
- "step": 38
292
- },
293
- {
294
- "epoch": 0.010406677618138306,
295
- "grad_norm": 6.464964866638184,
296
- "learning_rate": 2.1167208663446025e-05,
297
- "loss": 83.3005,
298
- "step": 39
299
- },
300
- {
301
- "epoch": 0.010673515505782877,
302
- "grad_norm": 6.612872123718262,
303
- "learning_rate": 1.9299099686894423e-05,
304
- "loss": 84.2439,
305
- "step": 40
306
- },
307
- {
308
- "epoch": 0.010940353393427448,
309
- "grad_norm": 6.869765758514404,
310
- "learning_rate": 1.758386744638546e-05,
311
- "loss": 84.894,
312
- "step": 41
313
- },
314
- {
315
- "epoch": 0.011207191281072022,
316
- "grad_norm": 5.677627086639404,
317
- "learning_rate": 1.602885682970026e-05,
318
- "loss": 84.2743,
319
- "step": 42
320
- },
321
- {
322
- "epoch": 0.011474029168716593,
323
- "grad_norm": 6.212136745452881,
324
- "learning_rate": 1.464072663102903e-05,
325
- "loss": 85.6072,
326
- "step": 43
327
- },
328
- {
329
- "epoch": 0.011740867056361164,
330
- "grad_norm": 6.752817153930664,
331
- "learning_rate": 1.3425421036992098e-05,
332
- "loss": 84.6236,
333
- "step": 44
334
- },
335
- {
336
- "epoch": 0.012007704944005737,
337
- "grad_norm": 7.166438102722168,
338
- "learning_rate": 1.2388144172720251e-05,
339
- "loss": 83.1906,
340
- "step": 45
341
- },
342
- {
343
- "epoch": 0.012274542831650309,
344
- "grad_norm": 7.727652549743652,
345
- "learning_rate": 1.1533337816991932e-05,
346
- "loss": 83.4727,
347
- "step": 46
348
- },
349
- {
350
- "epoch": 0.01254138071929488,
351
- "grad_norm": 8.032402038574219,
352
- "learning_rate": 1.0864662381854632e-05,
353
- "loss": 84.3546,
354
- "step": 47
355
- },
356
- {
357
- "epoch": 0.012808218606939453,
358
- "grad_norm": 10.028108596801758,
359
- "learning_rate": 1.0384981238178534e-05,
360
- "loss": 84.996,
361
- "step": 48
362
- },
363
- {
364
- "epoch": 0.013075056494584025,
365
- "grad_norm": 15.79798698425293,
366
- "learning_rate": 1.0096348454262845e-05,
367
- "loss": 87.5037,
368
- "step": 49
369
- },
370
- {
371
- "epoch": 0.013341894382228596,
372
- "grad_norm": 68.88801574707031,
373
- "learning_rate": 1e-05,
374
- "loss": 99.1348,
375
- "step": 50
376
- },
377
- {
378
- "epoch": 0.013341894382228596,
379
- "eval_loss": 2.6576178073883057,
380
- "eval_runtime": 2.9174,
381
- "eval_samples_per_second": 17.139,
382
- "eval_steps_per_second": 4.456,
383
- "step": 50
384
  }
385
  ],
386
  "logging_steps": 1,
@@ -395,12 +212,12 @@
395
  "should_evaluate": false,
396
  "should_log": false,
397
  "should_save": true,
398
- "should_training_stop": true
399
  },
400
  "attributes": {}
401
  }
402
  },
403
- "total_flos": 5.259156315439104e+17,
404
  "train_batch_size": 1,
405
  "trial_name": null,
406
  "trial_params": null
 
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
+ "epoch": 0.006670947191114298,
5
  "eval_steps": 25,
6
+ "global_step": 25,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
10
  "log_history": [
11
  {
12
  "epoch": 0.00026683788764457196,
13
+ "grad_norm": 22.89421844482422,
14
  "learning_rate": 5e-05,
15
  "loss": 99.4744,
16
  "step": 1
 
18
  {
19
  "epoch": 0.00026683788764457196,
20
  "eval_loss": 3.534377336502075,
21
+ "eval_runtime": 2.9106,
22
+ "eval_samples_per_second": 17.179,
23
+ "eval_steps_per_second": 4.466,
24
  "step": 1
25
  },
26
  {
27
  "epoch": 0.0005336757752891439,
28
+ "grad_norm": 23.7343692779541,
29
  "learning_rate": 0.0001,
30
  "loss": 103.3411,
31
  "step": 2
32
  },
33
  {
34
  "epoch": 0.0008005136629337158,
35
+ "grad_norm": 48.079044342041016,
36
  "learning_rate": 9.990365154573717e-05,
37
+ "loss": 101.9827,
38
  "step": 3
39
  },
40
  {
41
  "epoch": 0.0010673515505782878,
42
+ "grad_norm": 38.11738967895508,
43
  "learning_rate": 9.961501876182148e-05,
44
+ "loss": 102.6331,
45
  "step": 4
46
  },
47
  {
48
  "epoch": 0.0013341894382228596,
49
+ "grad_norm": 32.43550109863281,
50
  "learning_rate": 9.913533761814537e-05,
51
+ "loss": 98.8496,
52
  "step": 5
53
  },
54
  {
55
  "epoch": 0.0016010273258674317,
56
+ "grad_norm": 36.24091339111328,
57
  "learning_rate": 9.846666218300807e-05,
58
+ "loss": 96.6713,
59
  "step": 6
60
  },
61
  {
62
  "epoch": 0.0018678652135120035,
63
+ "grad_norm": 37.49607467651367,
64
  "learning_rate": 9.761185582727977e-05,
65
+ "loss": 94.9006,
66
  "step": 7
67
  },
68
  {
69
  "epoch": 0.0021347031011565757,
70
+ "grad_norm": 34.56901931762695,
71
  "learning_rate": 9.657457896300791e-05,
72
+ "loss": 93.4301,
73
  "step": 8
74
  },
75
  {
76
  "epoch": 0.0024015409888011475,
77
+ "grad_norm": 31.850221633911133,
78
  "learning_rate": 9.535927336897098e-05,
79
+ "loss": 91.105,
80
  "step": 9
81
  },
82
  {
83
  "epoch": 0.0026683788764457193,
84
+ "grad_norm": 43.924991607666016,
85
  "learning_rate": 9.397114317029975e-05,
86
+ "loss": 93.5747,
87
  "step": 10
88
  },
89
  {
90
  "epoch": 0.002935216764090291,
91
+ "grad_norm": 33.07682800292969,
92
  "learning_rate": 9.241613255361455e-05,
93
+ "loss": 93.5264,
94
  "step": 11
95
  },
96
  {
97
  "epoch": 0.0032020546517348633,
98
+ "grad_norm": 52.70429992675781,
99
  "learning_rate": 9.070090031310558e-05,
100
+ "loss": 100.4311,
101
  "step": 12
102
  },
103
  {
104
  "epoch": 0.003468892539379435,
105
+ "grad_norm": 91.30162048339844,
106
  "learning_rate": 8.883279133655399e-05,
107
+ "loss": 103.5051,
108
  "step": 13
109
  },
110
  {
111
  "epoch": 0.003735730427024007,
112
+ "grad_norm": 48.74945068359375,
113
  "learning_rate": 8.681980515339464e-05,
114
+ "loss": 90.932,
115
  "step": 14
116
  },
117
  {
118
  "epoch": 0.004002568314668579,
119
+ "grad_norm": 38.04246139526367,
120
  "learning_rate": 8.467056167950311e-05,
121
+ "loss": 91.092,
122
  "step": 15
123
  },
124
  {
125
  "epoch": 0.004269406202313151,
126
+ "grad_norm": 32.99767303466797,
127
  "learning_rate": 8.239426430539243e-05,
128
+ "loss": 86.978,
129
  "step": 16
130
  },
131
  {
132
  "epoch": 0.004536244089957723,
133
+ "grad_norm": 25.959495544433594,
134
  "learning_rate": 8.000066048588211e-05,
135
+ "loss": 87.323,
136
  "step": 17
137
  },
138
  {
139
  "epoch": 0.004803081977602295,
140
+ "grad_norm": 22.552183151245117,
141
  "learning_rate": 7.75e-05,
142
+ "loss": 87.7695,
143
  "step": 18
144
  },
145
  {
146
  "epoch": 0.005069919865246866,
147
+ "grad_norm": 17.352420806884766,
148
  "learning_rate": 7.490299105985507e-05,
149
+ "loss": 87.1552,
150
  "step": 19
151
  },
152
  {
153
  "epoch": 0.0053367577528914386,
154
+ "grad_norm": 14.216341018676758,
155
  "learning_rate": 7.222075445642904e-05,
156
+ "loss": 87.1521,
157
  "step": 20
158
  },
159
  {
160
  "epoch": 0.005603595640536011,
161
+ "grad_norm": 13.55358600616455,
162
  "learning_rate": 6.946477593864228e-05,
163
+ "loss": 87.6133,
164
  "step": 21
165
  },
166
  {
167
  "epoch": 0.005870433528180582,
168
+ "grad_norm": 18.19391441345215,
169
  "learning_rate": 6.664685702961344e-05,
170
+ "loss": 86.9043,
171
  "step": 22
172
  },
173
  {
174
  "epoch": 0.006137271415825154,
175
+ "grad_norm": 20.173397064208984,
176
  "learning_rate": 6.377906449072578e-05,
177
+ "loss": 88.4352,
178
  "step": 23
179
  },
180
  {
181
  "epoch": 0.006404109303469727,
182
+ "grad_norm": 27.15044403076172,
183
  "learning_rate": 6.087367864990233e-05,
184
+ "loss": 89.5455,
185
  "step": 24
186
  },
187
  {
188
  "epoch": 0.006670947191114298,
189
+ "grad_norm": 77.1603012084961,
190
  "learning_rate": 5.794314081535644e-05,
191
+ "loss": 105.756,
192
  "step": 25
193
  },
194
  {
195
  "epoch": 0.006670947191114298,
196
+ "eval_loss": 2.6978037357330322,
197
+ "eval_runtime": 2.9516,
198
+ "eval_samples_per_second": 16.94,
199
+ "eval_steps_per_second": 4.404,
200
  "step": 25
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
201
  }
202
  ],
203
  "logging_steps": 1,
 
212
  "should_evaluate": false,
213
  "should_log": false,
214
  "should_save": true,
215
+ "should_training_stop": false
216
  },
217
  "attributes": {}
218
  }
219
  },
220
+ "total_flos": 2.629578157719552e+17,
221
  "train_batch_size": 1,
222
  "trial_name": null,
223
  "trial_params": null
last-checkpoint/training_args.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:5bbcff46d73dbb696188a8406cebbd246e69bc502841896b8b531535d0bbbd1a
3
  size 6840
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:daa6781a2b3f71155ea06bbf181fb9d6e5488ae8ac1598f881bc93f1a056095c
3
  size 6840