oldiday commited on
Commit
0cd441a
·
verified ·
1 Parent(s): a92736d

Training in progress, step 600, checkpoint

Browse files
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:f3fd992e113586f812afc77a0702f2871849c3b36106c0c2de720d263ffaa124
3
  size 100697728
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e69590fa1ed241eef6a296c732ccf103be67167a09de0f7591d694941abbddee
3
  size 100697728
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:3105b22c5fc7869e6ed63a58ea962520392f460fbb1c31c1abc4d139211b21cf
3
  size 51418452
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fe31a5c368391667178b190ac5af3403ae6dda609fe68154c8fe7fd0e907aca6
3
  size 51418452
last-checkpoint/rng_state.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:6207d4c3c45167cc370ab64a1372acf1cee42bfee65685d0672373dc45c12efd
3
  size 14244
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7ad557efcb90e3e9735af4be2b02a20aac5fd7dd9159e4600e591a43624fd0de
3
  size 14244
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:6bb9c0d62d6b3cf0976c16f73e9bd814b298ebffa1786831bc2a68d8e48809b9
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5ad54995b081fae25638228c5d9c8f38ca277e5c5ad00bc3e49897b543f84405
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
- "best_metric": 0.7403023838996887,
3
- "best_model_checkpoint": "miner_id_24/checkpoint-550",
4
- "epoch": 0.6472491909385113,
5
  "eval_steps": 50,
6
- "global_step": 550,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -488,6 +488,49 @@
488
  "eval_samples_per_second": 21.984,
489
  "eval_steps_per_second": 5.496,
490
  "step": 550
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
491
  }
492
  ],
493
  "logging_steps": 10,
@@ -511,12 +554,12 @@
511
  "should_evaluate": false,
512
  "should_log": false,
513
  "should_save": true,
514
- "should_training_stop": false
515
  },
516
  "attributes": {}
517
  }
518
  },
519
- "total_flos": 3.706288114835128e+17,
520
  "train_batch_size": 8,
521
  "trial_name": null,
522
  "trial_params": null
 
1
  {
2
+ "best_metric": 0.7395899295806885,
3
+ "best_model_checkpoint": "miner_id_24/checkpoint-600",
4
+ "epoch": 0.706090026478376,
5
  "eval_steps": 50,
6
+ "global_step": 600,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
488
  "eval_samples_per_second": 21.984,
489
  "eval_steps_per_second": 5.496,
490
  "step": 550
491
+ },
492
+ {
493
+ "epoch": 0.6590173580464843,
494
+ "grad_norm": 0.7405086755752563,
495
+ "learning_rate": 2.259661018213333e-06,
496
+ "loss": 5.5799,
497
+ "step": 560
498
+ },
499
+ {
500
+ "epoch": 0.6707855251544572,
501
+ "grad_norm": 1.1222789287567139,
502
+ "learning_rate": 1.2731645278655445e-06,
503
+ "loss": 4.4696,
504
+ "step": 570
505
+ },
506
+ {
507
+ "epoch": 0.6825536922624301,
508
+ "grad_norm": 0.7055889964103699,
509
+ "learning_rate": 5.665199789862907e-07,
510
+ "loss": 2.6787,
511
+ "step": 580
512
+ },
513
+ {
514
+ "epoch": 0.694321859370403,
515
+ "grad_norm": 0.8095390796661377,
516
+ "learning_rate": 1.4173043232380557e-07,
517
+ "loss": 1.1647,
518
+ "step": 590
519
+ },
520
+ {
521
+ "epoch": 0.706090026478376,
522
+ "grad_norm": 1.5414854288101196,
523
+ "learning_rate": 0.0,
524
+ "loss": 1.2265,
525
+ "step": 600
526
+ },
527
+ {
528
+ "epoch": 0.706090026478376,
529
+ "eval_loss": 0.7395899295806885,
530
+ "eval_runtime": 65.1662,
531
+ "eval_samples_per_second": 21.975,
532
+ "eval_steps_per_second": 5.494,
533
+ "step": 600
534
  }
535
  ],
536
  "logging_steps": 10,
 
554
  "should_evaluate": false,
555
  "should_log": false,
556
  "should_save": true,
557
+ "should_training_stop": true
558
  },
559
  "attributes": {}
560
  }
561
  },
562
+ "total_flos": 4.0424698151121715e+17,
563
  "train_batch_size": 8,
564
  "trial_name": null,
565
  "trial_params": null