DeepDream2045 commited on
Commit
08e755e
1 Parent(s): 86f1990

Training in progress, step 25, checkpoint

Browse files
last-checkpoint/adapter_config.json CHANGED
@@ -20,11 +20,11 @@
20
  "rank_pattern": {},
21
  "revision": null,
22
  "target_modules": [
23
- "k_proj",
24
  "fc2",
25
  "v_proj",
26
- "dense",
27
  "q_proj",
 
 
28
  "fc1"
29
  ],
30
  "task_type": "CAUSAL_LM",
 
20
  "rank_pattern": {},
21
  "revision": null,
22
  "target_modules": [
 
23
  "fc2",
24
  "v_proj",
 
25
  "q_proj",
26
+ "k_proj",
27
+ "dense",
28
  "fc1"
29
  ],
30
  "task_type": "CAUSAL_LM",
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:f8641ace53419270953e50f2e98df854dccef9d59f6a514ad2e681f152df9867
3
  size 237402
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5344e7a7add3e89e41aeb337ebdae046fd6fa892d2b80e735f340c3fd78d3fdc
3
  size 237402
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:a4bcef9a91b58d70af76c8eb6df41efc60c1336232637cea7177b97ba498218d
3
  size 222294
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6ab9bb1a6b28590e8cdf6d5d97f56111d741105fe7bd97b6899e8ea18bef4415
3
  size 222294
last-checkpoint/rng_state_0.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:099e3b3ec7346c34835b0aff5f54050462d9647eef25fe8194d29bfcdeca6713
3
  size 15024
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:69df9800e8b1c5e00d6d519768544bf65506150c1bb390d68da7ce38e537e541
3
  size 15024
last-checkpoint/rng_state_1.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:33a3144b6acaf405a5c988057c3b0f6d64546f6b2967d86f16487ec9cb7de0f3
3
  size 15024
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8511a77015534d7db2081b11e409298541ddd24cdcdc646e445d3c1a4528b1fe
3
  size 15024
last-checkpoint/rng_state_2.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:743c0ffc81ebc4cd6e3620423405ad0497e04f340aafdb48f5af9ab1671896e2
3
  size 15024
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:55aeb9f99504e02c5ea1678e1fb16f17a93f84c005df4f32b00f4c3797da054c
3
  size 15024
last-checkpoint/rng_state_3.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:0df6f5a11f5cb4a7d9153c5397414121230ba31550fe0dc8edfc8dbe4c66f782
3
  size 15024
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a2e36554bfd0557463ab58deaa94a8390a208db7b006d5908d842dc58c2ba8e3
3
  size 15024
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:b1df0528620c07325b8faa7567e59b0c1e86a1f1ee6af1245a69c6c0463fe4e2
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f37b2aa490ccb1598b01e14cda36e9081f7ce646deab4d3c2d03de0d2169a755
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,386 +1,203 @@
1
  {
2
- "best_metric": 6.924656867980957,
3
- "best_model_checkpoint": "miner_id_24/checkpoint-50",
4
- "epoch": 0.010696188145949487,
5
  "eval_steps": 25,
6
- "global_step": 50,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
10
  "log_history": [
11
  {
12
  "epoch": 0.00021392376291898973,
13
- "grad_norm": 0.08630125969648361,
14
  "learning_rate": 5e-05,
15
  "loss": 6.9391,
16
  "step": 1
17
  },
18
  {
19
  "epoch": 0.00021392376291898973,
20
- "eval_loss": 6.9391374588012695,
21
- "eval_runtime": 20.6105,
22
- "eval_samples_per_second": 1527.958,
23
- "eval_steps_per_second": 191.019,
24
  "step": 1
25
  },
26
  {
27
  "epoch": 0.00042784752583797947,
28
- "grad_norm": 0.08528615534305573,
29
  "learning_rate": 0.0001,
30
  "loss": 6.9384,
31
  "step": 2
32
  },
33
  {
34
  "epoch": 0.0006417712887569693,
35
- "grad_norm": 0.09091469645500183,
36
  "learning_rate": 9.989294616193017e-05,
37
  "loss": 6.9393,
38
  "step": 3
39
  },
40
  {
41
  "epoch": 0.0008556950516759589,
42
- "grad_norm": 0.09644569456577301,
43
  "learning_rate": 9.957224306869053e-05,
44
- "loss": 6.9372,
45
  "step": 4
46
  },
47
  {
48
  "epoch": 0.0010696188145949488,
49
- "grad_norm": 0.09739217162132263,
50
  "learning_rate": 9.903926402016153e-05,
51
  "loss": 6.9383,
52
  "step": 5
53
  },
54
  {
55
  "epoch": 0.0012835425775139385,
56
- "grad_norm": 0.10253387689590454,
57
  "learning_rate": 9.829629131445342e-05,
58
  "loss": 6.9381,
59
  "step": 6
60
  },
61
  {
62
  "epoch": 0.0014974663404329282,
63
- "grad_norm": 0.10638941824436188,
64
  "learning_rate": 9.73465064747553e-05,
65
- "loss": 6.9363,
66
  "step": 7
67
  },
68
  {
69
  "epoch": 0.0017113901033519179,
70
- "grad_norm": 0.11278034001588821,
71
  "learning_rate": 9.619397662556435e-05,
72
  "loss": 6.9352,
73
  "step": 8
74
  },
75
  {
76
  "epoch": 0.0019253138662709078,
77
- "grad_norm": 0.11507147550582886,
78
  "learning_rate": 9.484363707663442e-05,
79
- "loss": 6.9354,
80
  "step": 9
81
  },
82
  {
83
  "epoch": 0.0021392376291898977,
84
- "grad_norm": 0.11515306681394577,
85
  "learning_rate": 9.330127018922194e-05,
86
- "loss": 6.9372,
87
  "step": 10
88
  },
89
  {
90
  "epoch": 0.0023531613921088873,
91
- "grad_norm": 0.12654078006744385,
92
  "learning_rate": 9.157348061512727e-05,
93
- "loss": 6.9337,
94
  "step": 11
95
  },
96
  {
97
  "epoch": 0.002567085155027877,
98
- "grad_norm": 0.14077888429164886,
99
  "learning_rate": 8.966766701456177e-05,
100
- "loss": 6.9351,
101
  "step": 12
102
  },
103
  {
104
  "epoch": 0.0027810089179468667,
105
- "grad_norm": 0.10465764254331589,
106
  "learning_rate": 8.759199037394887e-05,
107
- "loss": 6.9323,
108
  "step": 13
109
  },
110
  {
111
  "epoch": 0.0029949326808658564,
112
- "grad_norm": 0.09043970704078674,
113
  "learning_rate": 8.535533905932738e-05,
114
- "loss": 6.9356,
115
  "step": 14
116
  },
117
  {
118
  "epoch": 0.003208856443784846,
119
- "grad_norm": 0.09768740832805634,
120
  "learning_rate": 8.296729075500344e-05,
121
- "loss": 6.9315,
122
  "step": 15
123
  },
124
  {
125
  "epoch": 0.0034227802067038357,
126
- "grad_norm": 0.09449315816164017,
127
  "learning_rate": 8.043807145043604e-05,
128
- "loss": 6.9324,
129
  "step": 16
130
  },
131
  {
132
  "epoch": 0.003636703969622826,
133
- "grad_norm": 0.10302135348320007,
134
  "learning_rate": 7.777851165098012e-05,
135
- "loss": 6.9313,
136
  "step": 17
137
  },
138
  {
139
  "epoch": 0.0038506277325418155,
140
- "grad_norm": 0.10031016170978546,
141
  "learning_rate": 7.500000000000001e-05,
142
- "loss": 6.9308,
143
  "step": 18
144
  },
145
  {
146
  "epoch": 0.004064551495460805,
147
- "grad_norm": 0.10906001180410385,
148
  "learning_rate": 7.211443451095007e-05,
149
- "loss": 6.9313,
150
  "step": 19
151
  },
152
  {
153
  "epoch": 0.004278475258379795,
154
- "grad_norm": 0.11029664427042007,
155
  "learning_rate": 6.91341716182545e-05,
156
  "loss": 6.9305,
157
  "step": 20
158
  },
159
  {
160
  "epoch": 0.004492399021298785,
161
- "grad_norm": 0.1134241446852684,
162
  "learning_rate": 6.607197326515808e-05,
163
  "loss": 6.9333,
164
  "step": 21
165
  },
166
  {
167
  "epoch": 0.004706322784217775,
168
- "grad_norm": 0.12209273874759674,
169
  "learning_rate": 6.294095225512603e-05,
170
- "loss": 6.9279,
171
  "step": 22
172
  },
173
  {
174
  "epoch": 0.004920246547136764,
175
- "grad_norm": 0.12280123680830002,
176
  "learning_rate": 5.9754516100806423e-05,
177
- "loss": 6.9276,
178
  "step": 23
179
  },
180
  {
181
  "epoch": 0.005134170310055754,
182
- "grad_norm": 0.13325512409210205,
183
  "learning_rate": 5.6526309611002594e-05,
184
- "loss": 6.9264,
185
  "step": 24
186
  },
187
  {
188
  "epoch": 0.005348094072974744,
189
- "grad_norm": 0.15282446146011353,
190
  "learning_rate": 5.327015646150716e-05,
191
  "loss": 6.9285,
192
  "step": 25
193
  },
194
  {
195
  "epoch": 0.005348094072974744,
196
- "eval_loss": 6.928130626678467,
197
- "eval_runtime": 20.6429,
198
- "eval_samples_per_second": 1525.562,
199
- "eval_steps_per_second": 190.719,
200
  "step": 25
201
- },
202
- {
203
- "epoch": 0.005562017835893733,
204
- "grad_norm": 0.09425940364599228,
205
- "learning_rate": 5e-05,
206
- "loss": 6.9305,
207
- "step": 26
208
- },
209
- {
210
- "epoch": 0.005775941598812723,
211
- "grad_norm": 0.09901078045368195,
212
- "learning_rate": 4.6729843538492847e-05,
213
- "loss": 6.9293,
214
- "step": 27
215
- },
216
- {
217
- "epoch": 0.005989865361731713,
218
- "grad_norm": 0.10344414412975311,
219
- "learning_rate": 4.347369038899744e-05,
220
- "loss": 6.9266,
221
- "step": 28
222
- },
223
- {
224
- "epoch": 0.0062037891246507024,
225
- "grad_norm": 0.10215310752391815,
226
- "learning_rate": 4.0245483899193595e-05,
227
- "loss": 6.9274,
228
- "step": 29
229
- },
230
- {
231
- "epoch": 0.006417712887569692,
232
- "grad_norm": 0.10918069630861282,
233
- "learning_rate": 3.705904774487396e-05,
234
- "loss": 6.9269,
235
- "step": 30
236
- },
237
- {
238
- "epoch": 0.006631636650488682,
239
- "grad_norm": 0.1103460043668747,
240
- "learning_rate": 3.392802673484193e-05,
241
- "loss": 6.9258,
242
- "step": 31
243
- },
244
- {
245
- "epoch": 0.0068455604134076715,
246
- "grad_norm": 0.11610613018274307,
247
- "learning_rate": 3.086582838174551e-05,
248
- "loss": 6.9251,
249
- "step": 32
250
- },
251
- {
252
- "epoch": 0.007059484176326661,
253
- "grad_norm": 0.12197224795818329,
254
- "learning_rate": 2.7885565489049946e-05,
255
- "loss": 6.9268,
256
- "step": 33
257
- },
258
- {
259
- "epoch": 0.007273407939245652,
260
- "grad_norm": 0.1161201074719429,
261
- "learning_rate": 2.500000000000001e-05,
262
- "loss": 6.9275,
263
- "step": 34
264
- },
265
- {
266
- "epoch": 0.007487331702164641,
267
- "grad_norm": 0.13400213420391083,
268
- "learning_rate": 2.2221488349019903e-05,
269
- "loss": 6.9247,
270
- "step": 35
271
- },
272
- {
273
- "epoch": 0.007701255465083631,
274
- "grad_norm": 0.13895855844020844,
275
- "learning_rate": 1.9561928549563968e-05,
276
- "loss": 6.9246,
277
- "step": 36
278
- },
279
- {
280
- "epoch": 0.00791517922800262,
281
- "grad_norm": 0.1469612568616867,
282
- "learning_rate": 1.703270924499656e-05,
283
- "loss": 6.922,
284
- "step": 37
285
- },
286
- {
287
- "epoch": 0.00812910299092161,
288
- "grad_norm": 0.11093045771121979,
289
- "learning_rate": 1.4644660940672627e-05,
290
- "loss": 6.927,
291
- "step": 38
292
- },
293
- {
294
- "epoch": 0.0083430267538406,
295
- "grad_norm": 0.10132980346679688,
296
- "learning_rate": 1.2408009626051137e-05,
297
- "loss": 6.926,
298
- "step": 39
299
- },
300
- {
301
- "epoch": 0.00855695051675959,
302
- "grad_norm": 0.10548317432403564,
303
- "learning_rate": 1.0332332985438248e-05,
304
- "loss": 6.9288,
305
- "step": 40
306
- },
307
- {
308
- "epoch": 0.00877087427967858,
309
- "grad_norm": 0.10971340537071228,
310
- "learning_rate": 8.426519384872733e-06,
311
- "loss": 6.9246,
312
- "step": 41
313
- },
314
- {
315
- "epoch": 0.00898479804259757,
316
- "grad_norm": 0.11398052424192429,
317
- "learning_rate": 6.698729810778065e-06,
318
- "loss": 6.9256,
319
- "step": 42
320
- },
321
- {
322
- "epoch": 0.00919872180551656,
323
- "grad_norm": 0.11947134137153625,
324
- "learning_rate": 5.156362923365588e-06,
325
- "loss": 6.9251,
326
- "step": 43
327
- },
328
- {
329
- "epoch": 0.00941264556843555,
330
- "grad_norm": 0.11277071386575699,
331
- "learning_rate": 3.8060233744356633e-06,
332
- "loss": 6.9225,
333
- "step": 44
334
- },
335
- {
336
- "epoch": 0.009626569331354539,
337
- "grad_norm": 0.11898217350244522,
338
- "learning_rate": 2.653493525244721e-06,
339
- "loss": 6.9247,
340
- "step": 45
341
- },
342
- {
343
- "epoch": 0.009840493094273529,
344
- "grad_norm": 0.12966850399971008,
345
- "learning_rate": 1.70370868554659e-06,
346
- "loss": 6.9218,
347
- "step": 46
348
- },
349
- {
350
- "epoch": 0.010054416857192518,
351
- "grad_norm": 0.11691312491893768,
352
- "learning_rate": 9.607359798384785e-07,
353
- "loss": 6.9276,
354
- "step": 47
355
- },
356
- {
357
- "epoch": 0.010268340620111508,
358
- "grad_norm": 0.13687504827976227,
359
- "learning_rate": 4.277569313094809e-07,
360
- "loss": 6.9221,
361
- "step": 48
362
- },
363
- {
364
- "epoch": 0.010482264383030498,
365
- "grad_norm": 0.1413353681564331,
366
- "learning_rate": 1.0705383806982606e-07,
367
- "loss": 6.9211,
368
- "step": 49
369
- },
370
- {
371
- "epoch": 0.010696188145949487,
372
- "grad_norm": 0.16661731898784637,
373
- "learning_rate": 0.0,
374
- "loss": 6.9214,
375
- "step": 50
376
- },
377
- {
378
- "epoch": 0.010696188145949487,
379
- "eval_loss": 6.924656867980957,
380
- "eval_runtime": 20.5821,
381
- "eval_samples_per_second": 1530.067,
382
- "eval_steps_per_second": 191.283,
383
- "step": 50
384
  }
385
  ],
386
  "logging_steps": 1,
@@ -404,12 +221,12 @@
404
  "should_evaluate": false,
405
  "should_log": false,
406
  "should_save": true,
407
- "should_training_stop": true
408
  },
409
  "attributes": {}
410
  }
411
  },
412
- "total_flos": 5705957376000.0,
413
  "train_batch_size": 2,
414
  "trial_name": null,
415
  "trial_params": null
 
1
  {
2
+ "best_metric": 6.9282989501953125,
3
+ "best_model_checkpoint": "miner_id_24/checkpoint-25",
4
+ "epoch": 0.005348094072974744,
5
  "eval_steps": 25,
6
+ "global_step": 25,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
10
  "log_history": [
11
  {
12
  "epoch": 0.00021392376291898973,
13
+ "grad_norm": 0.084352046251297,
14
  "learning_rate": 5e-05,
15
  "loss": 6.9391,
16
  "step": 1
17
  },
18
  {
19
  "epoch": 0.00021392376291898973,
20
+ "eval_loss": 6.939151287078857,
21
+ "eval_runtime": 22.5661,
22
+ "eval_samples_per_second": 1395.547,
23
+ "eval_steps_per_second": 174.465,
24
  "step": 1
25
  },
26
  {
27
  "epoch": 0.00042784752583797947,
28
+ "grad_norm": 0.08244480937719345,
29
  "learning_rate": 0.0001,
30
  "loss": 6.9384,
31
  "step": 2
32
  },
33
  {
34
  "epoch": 0.0006417712887569693,
35
+ "grad_norm": 0.08688157051801682,
36
  "learning_rate": 9.989294616193017e-05,
37
  "loss": 6.9393,
38
  "step": 3
39
  },
40
  {
41
  "epoch": 0.0008556950516759589,
42
+ "grad_norm": 0.09273950755596161,
43
  "learning_rate": 9.957224306869053e-05,
44
+ "loss": 6.9373,
45
  "step": 4
46
  },
47
  {
48
  "epoch": 0.0010696188145949488,
49
+ "grad_norm": 0.09501701593399048,
50
  "learning_rate": 9.903926402016153e-05,
51
  "loss": 6.9383,
52
  "step": 5
53
  },
54
  {
55
  "epoch": 0.0012835425775139385,
56
+ "grad_norm": 0.09917747974395752,
57
  "learning_rate": 9.829629131445342e-05,
58
  "loss": 6.9381,
59
  "step": 6
60
  },
61
  {
62
  "epoch": 0.0014974663404329282,
63
+ "grad_norm": 0.10376238822937012,
64
  "learning_rate": 9.73465064747553e-05,
65
+ "loss": 6.9364,
66
  "step": 7
67
  },
68
  {
69
  "epoch": 0.0017113901033519179,
70
+ "grad_norm": 0.11044597625732422,
71
  "learning_rate": 9.619397662556435e-05,
72
  "loss": 6.9352,
73
  "step": 8
74
  },
75
  {
76
  "epoch": 0.0019253138662709078,
77
+ "grad_norm": 0.11213497817516327,
78
  "learning_rate": 9.484363707663442e-05,
79
+ "loss": 6.9355,
80
  "step": 9
81
  },
82
  {
83
  "epoch": 0.0021392376291898977,
84
+ "grad_norm": 0.11234603077173233,
85
  "learning_rate": 9.330127018922194e-05,
86
+ "loss": 6.9373,
87
  "step": 10
88
  },
89
  {
90
  "epoch": 0.0023531613921088873,
91
+ "grad_norm": 0.12427860498428345,
92
  "learning_rate": 9.157348061512727e-05,
93
+ "loss": 6.9339,
94
  "step": 11
95
  },
96
  {
97
  "epoch": 0.002567085155027877,
98
+ "grad_norm": 0.13528341054916382,
99
  "learning_rate": 8.966766701456177e-05,
100
+ "loss": 6.9354,
101
  "step": 12
102
  },
103
  {
104
  "epoch": 0.0027810089179468667,
105
+ "grad_norm": 0.10144588351249695,
106
  "learning_rate": 8.759199037394887e-05,
107
+ "loss": 6.9326,
108
  "step": 13
109
  },
110
  {
111
  "epoch": 0.0029949326808658564,
112
+ "grad_norm": 0.08907917141914368,
113
  "learning_rate": 8.535533905932738e-05,
114
+ "loss": 6.9358,
115
  "step": 14
116
  },
117
  {
118
  "epoch": 0.003208856443784846,
119
+ "grad_norm": 0.09633015841245651,
120
  "learning_rate": 8.296729075500344e-05,
121
+ "loss": 6.9317,
122
  "step": 15
123
  },
124
  {
125
  "epoch": 0.0034227802067038357,
126
+ "grad_norm": 0.09162943065166473,
127
  "learning_rate": 8.043807145043604e-05,
128
+ "loss": 6.9326,
129
  "step": 16
130
  },
131
  {
132
  "epoch": 0.003636703969622826,
133
+ "grad_norm": 0.10051411390304565,
134
  "learning_rate": 7.777851165098012e-05,
135
+ "loss": 6.9315,
136
  "step": 17
137
  },
138
  {
139
  "epoch": 0.0038506277325418155,
140
+ "grad_norm": 0.09999406337738037,
141
  "learning_rate": 7.500000000000001e-05,
142
+ "loss": 6.9309,
143
  "step": 18
144
  },
145
  {
146
  "epoch": 0.004064551495460805,
147
+ "grad_norm": 0.10973299294710159,
148
  "learning_rate": 7.211443451095007e-05,
149
+ "loss": 6.9314,
150
  "step": 19
151
  },
152
  {
153
  "epoch": 0.004278475258379795,
154
+ "grad_norm": 0.10994168370962143,
155
  "learning_rate": 6.91341716182545e-05,
156
  "loss": 6.9305,
157
  "step": 20
158
  },
159
  {
160
  "epoch": 0.004492399021298785,
161
+ "grad_norm": 0.11387638747692108,
162
  "learning_rate": 6.607197326515808e-05,
163
  "loss": 6.9333,
164
  "step": 21
165
  },
166
  {
167
  "epoch": 0.004706322784217775,
168
+ "grad_norm": 0.11845920979976654,
169
  "learning_rate": 6.294095225512603e-05,
170
+ "loss": 6.9282,
171
  "step": 22
172
  },
173
  {
174
  "epoch": 0.004920246547136764,
175
+ "grad_norm": 0.12084134668111801,
176
  "learning_rate": 5.9754516100806423e-05,
177
+ "loss": 6.9278,
178
  "step": 23
179
  },
180
  {
181
  "epoch": 0.005134170310055754,
182
+ "grad_norm": 0.13196800649166107,
183
  "learning_rate": 5.6526309611002594e-05,
184
+ "loss": 6.9267,
185
  "step": 24
186
  },
187
  {
188
  "epoch": 0.005348094072974744,
189
+ "grad_norm": 0.15287362039089203,
190
  "learning_rate": 5.327015646150716e-05,
191
  "loss": 6.9285,
192
  "step": 25
193
  },
194
  {
195
  "epoch": 0.005348094072974744,
196
+ "eval_loss": 6.9282989501953125,
197
+ "eval_runtime": 23.0115,
198
+ "eval_samples_per_second": 1368.531,
199
+ "eval_steps_per_second": 171.088,
200
  "step": 25
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
201
  }
202
  ],
203
  "logging_steps": 1,
 
221
  "should_evaluate": false,
222
  "should_log": false,
223
  "should_save": true,
224
+ "should_training_stop": false
225
  },
226
  "attributes": {}
227
  }
228
  },
229
+ "total_flos": 2852978688000.0,
230
  "train_batch_size": 2,
231
  "trial_name": null,
232
  "trial_params": null
last-checkpoint/training_args.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:c64096d40126f031656961b5b5c97e590ef451fb8f9b5712c3cf492cbc825bc9
3
  size 6776
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:88b5c315744df6dc63beea7a20ef2621b6518425fc9c600da4b1ff2f63186638
3
  size 6776