eeeebbb2 commited on
Commit
dab77f2
1 Parent(s): 10d641a

Training in progress, step 25, checkpoint

Browse files
last-checkpoint/adapter_config.json CHANGED
@@ -20,12 +20,12 @@
20
  "rank_pattern": {},
21
  "revision": null,
22
  "target_modules": [
23
- "v_proj",
24
- "k_proj",
25
- "q_proj",
26
  "o_proj",
27
- "down_proj",
28
  "gate_proj",
 
 
 
29
  "up_proj"
30
  ],
31
  "task_type": "CAUSAL_LM",
 
20
  "rank_pattern": {},
21
  "revision": null,
22
  "target_modules": [
 
 
 
23
  "o_proj",
24
+ "k_proj",
25
  "gate_proj",
26
+ "down_proj",
27
+ "q_proj",
28
+ "v_proj",
29
  "up_proj"
30
  ],
31
  "task_type": "CAUSAL_LM",
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:ab3bcbb9ec6038367b1ea709631a09053a575952c1be07e37c59ac223fdb2341
3
  size 295765866
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:16a5569594fb4df5bf1c750785f3494d46feefa182cb2b32b73b96c59ce4dfb9
3
  size 295765866
last-checkpoint/rng_state_0.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:0c3616c5e7378189a5a2f56afac857191c5024fbc66151607e9eb81c0373f6d4
3
  size 15024
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b6c77415a0ab37356a85f52261edc85994f9e5825b42402102b3aa09ffa95c6b
3
  size 15024
last-checkpoint/rng_state_1.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:4a3c2e972aec7435b0166dc5fb51dbe901b2382daee8f52891692ed1e805ca6a
3
  size 15024
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:45532071b2a9516c17404aa6df957ffddf7a787491085332ff789662a653ef15
3
  size 15024
last-checkpoint/rng_state_2.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:09b1983931967f9a0e5ffcf04a402285c81a717924eb4bf834cb0418ace9935c
3
  size 15024
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2ec2f2e59e3b448f5fc47059fdf2a045c0b5f491df29ee4fb5cc582d5b59aec0
3
  size 15024
last-checkpoint/rng_state_3.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:900b6b8c38f7c80a0f05c484c6a322cf8d05460e29582cdeeb16652fd1c095ec
3
  size 15024
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:bd088128f067cdd7f5671e749c765cea89c3e882f4de1fa4bdfd45b21a841ced
3
  size 15024
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:5607f6de446164d9d9adb8b91c44cec55b14aa391e24ba5637c08b834eedda2a
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d271cdb95f63cd655315f063ca2e25c78dc5ae4275523c5d4f80f367586b3351
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
  "best_metric": NaN,
3
  "best_model_checkpoint": "miner_id_24/checkpoint-25",
4
- "epoch": 0.006437025772241935,
5
  "eval_steps": 25,
6
- "global_step": 50,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -18,9 +18,9 @@
18
  {
19
  "epoch": 0.0001287405154448387,
20
  "eval_loss": NaN,
21
- "eval_runtime": 2.4889,
22
- "eval_samples_per_second": 20.089,
23
- "eval_steps_per_second": 5.223,
24
  "step": 1
25
  },
26
  {
@@ -194,193 +194,10 @@
194
  {
195
  "epoch": 0.0032185128861209676,
196
  "eval_loss": NaN,
197
- "eval_runtime": 1.2848,
198
- "eval_samples_per_second": 38.917,
199
- "eval_steps_per_second": 10.118,
200
  "step": 25
201
- },
202
- {
203
- "epoch": 0.0033472534015658063,
204
- "grad_norm": NaN,
205
- "learning_rate": 5.500000000000001e-05,
206
- "loss": 0.0,
207
- "step": 26
208
- },
209
- {
210
- "epoch": 0.003475993917010645,
211
- "grad_norm": NaN,
212
- "learning_rate": 5.205685918464356e-05,
213
- "loss": 0.0,
214
- "step": 27
215
- },
216
- {
217
- "epoch": 0.003604734432455484,
218
- "grad_norm": NaN,
219
- "learning_rate": 4.912632135009769e-05,
220
- "loss": 0.0,
221
- "step": 28
222
- },
223
- {
224
- "epoch": 0.0037334749479003226,
225
- "grad_norm": NaN,
226
- "learning_rate": 4.6220935509274235e-05,
227
- "loss": 0.0,
228
- "step": 29
229
- },
230
- {
231
- "epoch": 0.0038622154633451613,
232
- "grad_norm": NaN,
233
- "learning_rate": 4.3353142970386564e-05,
234
- "loss": 0.0,
235
- "step": 30
236
- },
237
- {
238
- "epoch": 0.0039909559787900005,
239
- "grad_norm": NaN,
240
- "learning_rate": 4.053522406135775e-05,
241
- "loss": 0.0,
242
- "step": 31
243
- },
244
- {
245
- "epoch": 0.004119696494234839,
246
- "grad_norm": NaN,
247
- "learning_rate": 3.777924554357096e-05,
248
- "loss": 0.0,
249
- "step": 32
250
- },
251
- {
252
- "epoch": 0.004248437009679677,
253
- "grad_norm": NaN,
254
- "learning_rate": 3.509700894014496e-05,
255
- "loss": 0.0,
256
- "step": 33
257
- },
258
- {
259
- "epoch": 0.004377177525124516,
260
- "grad_norm": NaN,
261
- "learning_rate": 3.250000000000001e-05,
262
- "loss": 0.0,
263
- "step": 34
264
- },
265
- {
266
- "epoch": 0.0045059180405693545,
267
- "grad_norm": NaN,
268
- "learning_rate": 2.9999339514117912e-05,
269
- "loss": 0.0,
270
- "step": 35
271
- },
272
- {
273
- "epoch": 0.004634658556014194,
274
- "grad_norm": NaN,
275
- "learning_rate": 2.760573569460757e-05,
276
- "loss": 0.0,
277
- "step": 36
278
- },
279
- {
280
- "epoch": 0.004763399071459032,
281
- "grad_norm": NaN,
282
- "learning_rate": 2.53294383204969e-05,
283
- "loss": 0.0,
284
- "step": 37
285
- },
286
- {
287
- "epoch": 0.004892139586903871,
288
- "grad_norm": NaN,
289
- "learning_rate": 2.3180194846605367e-05,
290
- "loss": 0.0,
291
- "step": 38
292
- },
293
- {
294
- "epoch": 0.0050208801023487095,
295
- "grad_norm": NaN,
296
- "learning_rate": 2.1167208663446025e-05,
297
- "loss": 0.0,
298
- "step": 39
299
- },
300
- {
301
- "epoch": 0.005149620617793549,
302
- "grad_norm": NaN,
303
- "learning_rate": 1.9299099686894423e-05,
304
- "loss": 0.0,
305
- "step": 40
306
- },
307
- {
308
- "epoch": 0.005278361133238387,
309
- "grad_norm": NaN,
310
- "learning_rate": 1.758386744638546e-05,
311
- "loss": 0.0,
312
- "step": 41
313
- },
314
- {
315
- "epoch": 0.005407101648683226,
316
- "grad_norm": NaN,
317
- "learning_rate": 1.602885682970026e-05,
318
- "loss": 0.0,
319
- "step": 42
320
- },
321
- {
322
- "epoch": 0.0055358421641280645,
323
- "grad_norm": NaN,
324
- "learning_rate": 1.464072663102903e-05,
325
- "loss": 0.0,
326
- "step": 43
327
- },
328
- {
329
- "epoch": 0.005664582679572904,
330
- "grad_norm": NaN,
331
- "learning_rate": 1.3425421036992098e-05,
332
- "loss": 0.0,
333
- "step": 44
334
- },
335
- {
336
- "epoch": 0.005793323195017742,
337
- "grad_norm": NaN,
338
- "learning_rate": 1.2388144172720251e-05,
339
- "loss": 0.0,
340
- "step": 45
341
- },
342
- {
343
- "epoch": 0.005922063710462581,
344
- "grad_norm": NaN,
345
- "learning_rate": 1.1533337816991932e-05,
346
- "loss": 0.0,
347
- "step": 46
348
- },
349
- {
350
- "epoch": 0.006050804225907419,
351
- "grad_norm": NaN,
352
- "learning_rate": 1.0864662381854632e-05,
353
- "loss": 0.0,
354
- "step": 47
355
- },
356
- {
357
- "epoch": 0.006179544741352259,
358
- "grad_norm": NaN,
359
- "learning_rate": 1.0384981238178534e-05,
360
- "loss": 0.0,
361
- "step": 48
362
- },
363
- {
364
- "epoch": 0.006308285256797097,
365
- "grad_norm": NaN,
366
- "learning_rate": 1.0096348454262845e-05,
367
- "loss": 0.0,
368
- "step": 49
369
- },
370
- {
371
- "epoch": 0.006437025772241935,
372
- "grad_norm": NaN,
373
- "learning_rate": 1e-05,
374
- "loss": 0.0,
375
- "step": 50
376
- },
377
- {
378
- "epoch": 0.006437025772241935,
379
- "eval_loss": NaN,
380
- "eval_runtime": 1.2802,
381
- "eval_samples_per_second": 39.057,
382
- "eval_steps_per_second": 10.155,
383
- "step": 50
384
  }
385
  ],
386
  "logging_steps": 1,
@@ -395,7 +212,7 @@
395
  "early_stopping_threshold": 0.0
396
  },
397
  "attributes": {
398
- "early_stopping_patience_counter": 1
399
  }
400
  },
401
  "TrainerControl": {
@@ -404,12 +221,12 @@
404
  "should_evaluate": false,
405
  "should_log": false,
406
  "should_save": true,
407
- "should_training_stop": true
408
  },
409
  "attributes": {}
410
  }
411
  },
412
- "total_flos": 1.0724494576032154e+17,
413
  "train_batch_size": 2,
414
  "trial_name": null,
415
  "trial_params": null
 
1
  {
2
  "best_metric": NaN,
3
  "best_model_checkpoint": "miner_id_24/checkpoint-25",
4
+ "epoch": 0.0032185128861209676,
5
  "eval_steps": 25,
6
+ "global_step": 25,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
18
  {
19
  "epoch": 0.0001287405154448387,
20
  "eval_loss": NaN,
21
+ "eval_runtime": 2.5013,
22
+ "eval_samples_per_second": 19.99,
23
+ "eval_steps_per_second": 5.197,
24
  "step": 1
25
  },
26
  {
 
194
  {
195
  "epoch": 0.0032185128861209676,
196
  "eval_loss": NaN,
197
+ "eval_runtime": 1.2859,
198
+ "eval_samples_per_second": 38.883,
199
+ "eval_steps_per_second": 10.11,
200
  "step": 25
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
201
  }
202
  ],
203
  "logging_steps": 1,
 
212
  "early_stopping_threshold": 0.0
213
  },
214
  "attributes": {
215
+ "early_stopping_patience_counter": 0
216
  }
217
  },
218
  "TrainerControl": {
 
221
  "should_evaluate": false,
222
  "should_log": false,
223
  "should_save": true,
224
+ "should_training_stop": false
225
  },
226
  "attributes": {}
227
  }
228
  },
229
+ "total_flos": 5.363902810003866e+16,
230
  "train_batch_size": 2,
231
  "trial_name": null,
232
  "trial_params": null
last-checkpoint/training_args.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:0f28581c22b6f891f8144d3f654f19d42faacc9db960d1c5dc464cfce25457f2
3
  size 6840
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:682975059ac12dde1f3a1dfd27b51d5dd3692080007d0dba9ecbf87ddb588d5d
3
  size 6840