mikhail-panzo commited on
Commit
eabb90b
1 Parent(s): 1596c6a

Training in progress, step 6000, checkpoint

Browse files
last-checkpoint/model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:127785b687ae550620c07c76161ce4fde314a3f786bb3bdecdd030f4cff25442
3
  size 577789320
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fb514424646e7839682dd9dae8b997e057d871af06a5e7e929271aaaa2e7c88d
3
  size 577789320
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:f56a73e3c0f71225db11233195f56bf1f4661b8840ebb310a361c5bbb1b1c79b
3
  size 1155772233
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4fb4cccce2cf2fb4228373ebd75bc513ed9c699a9a015d27c99253ddaeab739f
3
  size 1155772233
last-checkpoint/rng_state.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:e814e69ac405112ac0e823a174f61a291238359cb5e185b2802f269183d97fb3
3
  size 14244
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8b78a35397517539ceb5abaec4c078472043c61c90e9313f43ee762be5908798
3
  size 14244
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:1f4ebc94f219234dc117648ea36b52869f5ad7c506a78e36de2ec35324b116e9
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:cdc16f96a129b2f8ebddb713f251b55486ac014357ce4aedef1d542306c34e74
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
- "best_metric": 0.3729405403137207,
3
- "best_model_checkpoint": "mikhail-panzo/zlm_b128_le5_s8000/checkpoint-5500",
4
- "epoch": 9.214659685863875,
5
  "eval_steps": 500,
6
- "global_step": 5500,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -865,6 +865,84 @@
865
  "eval_samples_per_second": 32.812,
866
  "eval_steps_per_second": 4.105,
867
  "step": 5500
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
868
  }
869
  ],
870
  "logging_steps": 50,
@@ -884,7 +962,7 @@
884
  "attributes": {}
885
  }
886
  },
887
- "total_flos": 9.854783789898643e+16,
888
  "train_batch_size": 16,
889
  "trial_name": null,
890
  "trial_params": null
 
1
  {
2
+ "best_metric": 0.3696598410606384,
3
+ "best_model_checkpoint": "mikhail-panzo/zlm_b128_le5_s8000/checkpoint-6000",
4
+ "epoch": 10.052356020942408,
5
  "eval_steps": 500,
6
+ "global_step": 6000,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
865
  "eval_samples_per_second": 32.812,
866
  "eval_steps_per_second": 4.105,
867
  "step": 5500
868
+ },
869
+ {
870
+ "epoch": 9.298429319371728,
871
+ "grad_norm": 1.0570714473724365,
872
+ "learning_rate": 4.088333333333334e-06,
873
+ "loss": 0.4144,
874
+ "step": 5550
875
+ },
876
+ {
877
+ "epoch": 9.38219895287958,
878
+ "grad_norm": 1.1278653144836426,
879
+ "learning_rate": 4.005000000000001e-06,
880
+ "loss": 0.4154,
881
+ "step": 5600
882
+ },
883
+ {
884
+ "epoch": 9.465968586387435,
885
+ "grad_norm": 5.811945915222168,
886
+ "learning_rate": 3.921666666666667e-06,
887
+ "loss": 0.4085,
888
+ "step": 5650
889
+ },
890
+ {
891
+ "epoch": 9.549738219895287,
892
+ "grad_norm": 1.6973522901535034,
893
+ "learning_rate": 3.8383333333333336e-06,
894
+ "loss": 0.4133,
895
+ "step": 5700
896
+ },
897
+ {
898
+ "epoch": 9.633507853403142,
899
+ "grad_norm": 1.209333062171936,
900
+ "learning_rate": 3.7550000000000005e-06,
901
+ "loss": 0.4123,
902
+ "step": 5750
903
+ },
904
+ {
905
+ "epoch": 9.717277486910994,
906
+ "grad_norm": 3.592991590499878,
907
+ "learning_rate": 3.6716666666666665e-06,
908
+ "loss": 0.4126,
909
+ "step": 5800
910
+ },
911
+ {
912
+ "epoch": 9.801047120418849,
913
+ "grad_norm": 1.152239203453064,
914
+ "learning_rate": 3.588333333333334e-06,
915
+ "loss": 0.4115,
916
+ "step": 5850
917
+ },
918
+ {
919
+ "epoch": 9.884816753926701,
920
+ "grad_norm": 1.6118751764297485,
921
+ "learning_rate": 3.505e-06,
922
+ "loss": 0.4036,
923
+ "step": 5900
924
+ },
925
+ {
926
+ "epoch": 9.968586387434556,
927
+ "grad_norm": 1.4384329319000244,
928
+ "learning_rate": 3.4216666666666672e-06,
929
+ "loss": 0.4103,
930
+ "step": 5950
931
+ },
932
+ {
933
+ "epoch": 10.052356020942408,
934
+ "grad_norm": 1.447549819946289,
935
+ "learning_rate": 3.3383333333333333e-06,
936
+ "loss": 0.4056,
937
+ "step": 6000
938
+ },
939
+ {
940
+ "epoch": 10.052356020942408,
941
+ "eval_loss": 0.3696598410606384,
942
+ "eval_runtime": 256.5857,
943
+ "eval_samples_per_second": 33.084,
944
+ "eval_steps_per_second": 4.139,
945
+ "step": 6000
946
  }
947
  ],
948
  "logging_steps": 50,
 
962
  "attributes": {}
963
  }
964
  },
965
+ "total_flos": 1.0748974547355264e+17,
966
  "train_batch_size": 16,
967
  "trial_name": null,
968
  "trial_params": null