besimray commited on
Commit
0aee58f
·
verified ·
1 Parent(s): 70b4db6

Training in progress, step 50, checkpoint

Browse files
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:63d4bb6a956fb56cd0088dc28547952d24c08f2eb86d2aec9d4b21483adb9177
3
  size 90207248
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b3ebae6745992a9891531c7f9dc21af61684fec4cad0bdb8ec71e2d7e0190098
3
  size 90207248
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:8bc10445b82405fb6d37d22c7fe973fefbd6d5e7a4eae5c80f3b4858a86a96da
3
  size 46057082
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:086c8bf4279e90d468163e6119ad5a62eca1c60efa475a439bfc8e41ebe89eaa
3
  size 46057082
last-checkpoint/rng_state.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:d872420bc3dc74bb7836dfa2f4957583cebe65bc0ce2ca2cd3f24b22a86bb602
3
  size 14244
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:93b715727f08f3bec1a4243110369e5be33fa530d65244e67bf09606e5d6da48
3
  size 14244
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:f924e37bc06756f5535d9fa2079568e2b7869291abee642e58937943662c2f6f
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:cfc2bf0eccc6c4e85c949c664a83bcd160767da77920eebf352a6f7f7c4c9b2e
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
- "best_metric": 1.6503487825393677,
3
- "best_model_checkpoint": "miner_id_24/checkpoint-40",
4
- "epoch": 0.011295446523120367,
5
  "eval_steps": 10,
6
- "global_step": 40,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -327,6 +327,84 @@
327
  "eval_samples_per_second": 5.592,
328
  "eval_steps_per_second": 5.592,
329
  "step": 40
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
330
  }
331
  ],
332
  "logging_steps": 1,
@@ -355,7 +433,7 @@
355
  "attributes": {}
356
  }
357
  },
358
- "total_flos": 3915214340751360.0,
359
  "train_batch_size": 1,
360
  "trial_name": null,
361
  "trial_params": null
 
1
  {
2
+ "best_metric": 1.6434087753295898,
3
+ "best_model_checkpoint": "miner_id_24/checkpoint-50",
4
+ "epoch": 0.014119308153900459,
5
  "eval_steps": 10,
6
+ "global_step": 50,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
327
  "eval_samples_per_second": 5.592,
328
  "eval_steps_per_second": 5.592,
329
  "step": 40
330
+ },
331
+ {
332
+ "epoch": 0.011577832686198376,
333
+ "grad_norm": 1.9744967222213745,
334
+ "learning_rate": 0.00019803133943336874,
335
+ "loss": 2.3377,
336
+ "step": 41
337
+ },
338
+ {
339
+ "epoch": 0.011860218849276386,
340
+ "grad_norm": 1.438886046409607,
341
+ "learning_rate": 0.0001979027334832293,
342
+ "loss": 1.4721,
343
+ "step": 42
344
+ },
345
+ {
346
+ "epoch": 0.012142605012354394,
347
+ "grad_norm": 3.23305082321167,
348
+ "learning_rate": 0.00019777010313517518,
349
+ "loss": 1.7882,
350
+ "step": 43
351
+ },
352
+ {
353
+ "epoch": 0.012424991175432405,
354
+ "grad_norm": 5.8521013259887695,
355
+ "learning_rate": 0.00019763345384112043,
356
+ "loss": 1.2744,
357
+ "step": 44
358
+ },
359
+ {
360
+ "epoch": 0.012707377338510413,
361
+ "grad_norm": 5.29448127746582,
362
+ "learning_rate": 0.00019749279121818235,
363
+ "loss": 1.0265,
364
+ "step": 45
365
+ },
366
+ {
367
+ "epoch": 0.012989763501588421,
368
+ "grad_norm": 3.448343515396118,
369
+ "learning_rate": 0.00019734812104845047,
370
+ "loss": 2.1143,
371
+ "step": 46
372
+ },
373
+ {
374
+ "epoch": 0.013272149664666432,
375
+ "grad_norm": 2.209937810897827,
376
+ "learning_rate": 0.00019719944927874881,
377
+ "loss": 1.325,
378
+ "step": 47
379
+ },
380
+ {
381
+ "epoch": 0.01355453582774444,
382
+ "grad_norm": 1.200800895690918,
383
+ "learning_rate": 0.0001970467820203915,
384
+ "loss": 1.8635,
385
+ "step": 48
386
+ },
387
+ {
388
+ "epoch": 0.01383692199082245,
389
+ "grad_norm": 1.2242612838745117,
390
+ "learning_rate": 0.00019689012554893154,
391
+ "loss": 1.7063,
392
+ "step": 49
393
+ },
394
+ {
395
+ "epoch": 0.014119308153900459,
396
+ "grad_norm": 1.140673279762268,
397
+ "learning_rate": 0.00019672948630390294,
398
+ "loss": 1.315,
399
+ "step": 50
400
+ },
401
+ {
402
+ "epoch": 0.014119308153900459,
403
+ "eval_loss": 1.6434087753295898,
404
+ "eval_runtime": 133.392,
405
+ "eval_samples_per_second": 5.593,
406
+ "eval_steps_per_second": 5.593,
407
+ "step": 50
408
  }
409
  ],
410
  "logging_steps": 1,
 
433
  "attributes": {}
434
  }
435
  },
436
+ "total_flos": 4894017925939200.0,
437
  "train_batch_size": 1,
438
  "trial_name": null,
439
  "trial_params": null