mikhail-panzo commited on
Commit
aaed54d
1 Parent(s): 4b80371

Training in progress, step 6000, checkpoint

Browse files
last-checkpoint/model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:6d4028ba97760f329a3677325bcedc4fde68e377bfd1c88c108a2a047b0837f3
3
  size 577789320
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1c90f04ab07ebd466dc2aebcf0b4b67140c7fdc27748d3ec5cb616acf18ad539
3
  size 577789320
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:8625c4613fa8390df21c5d9227e8836dedc5870a492af9f524b26f63997fd9b9
3
  size 1155772233
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d4d30a086259676925eccbdc6518ad47f1b47f06760a576e87b560557cffacf8
3
  size 1155772233
last-checkpoint/rng_state.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:e814e69ac405112ac0e823a174f61a291238359cb5e185b2802f269183d97fb3
3
  size 14244
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8b78a35397517539ceb5abaec4c078472043c61c90e9313f43ee762be5908798
3
  size 14244
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:dee3257d3a4af7b415f176d6a1dc5bb5df29afc75892217ee55cd71b710b6a5c
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:04ff002b31b6cc0a01a54d1e3c6f626c5449fc0c5290b11a28578f7f1a9d96b1
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
- "best_metric": 0.3218235671520233,
3
- "best_model_checkpoint": "mikhail_panzo/zlm_b128_le4_s8000/checkpoint-5500",
4
- "epoch": 9.214659685863875,
5
  "eval_steps": 500,
6
- "global_step": 5500,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -865,6 +865,84 @@
865
  "eval_samples_per_second": 30.365,
866
  "eval_steps_per_second": 3.799,
867
  "step": 5500
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
868
  }
869
  ],
870
  "logging_steps": 50,
@@ -884,7 +962,7 @@
884
  "attributes": {}
885
  }
886
  },
887
- "total_flos": 9.854783789898643e+16,
888
  "train_batch_size": 16,
889
  "trial_name": null,
890
  "trial_params": null
 
1
  {
2
+ "best_metric": 0.31853485107421875,
3
+ "best_model_checkpoint": "mikhail_panzo/zlm_b128_le4_s8000/checkpoint-6000",
4
+ "epoch": 10.052356020942408,
5
  "eval_steps": 500,
6
+ "global_step": 6000,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
865
  "eval_samples_per_second": 30.365,
866
  "eval_steps_per_second": 3.799,
867
  "step": 5500
868
+ },
869
+ {
870
+ "epoch": 9.298429319371728,
871
+ "grad_norm": 1.0284796953201294,
872
+ "learning_rate": 6.452e-05,
873
+ "loss": 0.356,
874
+ "step": 5550
875
+ },
876
+ {
877
+ "epoch": 9.38219895287958,
878
+ "grad_norm": 1.8278234004974365,
879
+ "learning_rate": 6.402e-05,
880
+ "loss": 0.356,
881
+ "step": 5600
882
+ },
883
+ {
884
+ "epoch": 9.465968586387435,
885
+ "grad_norm": 0.9208963513374329,
886
+ "learning_rate": 6.352e-05,
887
+ "loss": 0.3504,
888
+ "step": 5650
889
+ },
890
+ {
891
+ "epoch": 9.549738219895287,
892
+ "grad_norm": 1.295639991760254,
893
+ "learning_rate": 6.302e-05,
894
+ "loss": 0.3551,
895
+ "step": 5700
896
+ },
897
+ {
898
+ "epoch": 9.633507853403142,
899
+ "grad_norm": 0.9757601022720337,
900
+ "learning_rate": 6.252e-05,
901
+ "loss": 0.3529,
902
+ "step": 5750
903
+ },
904
+ {
905
+ "epoch": 9.717277486910994,
906
+ "grad_norm": 1.451418399810791,
907
+ "learning_rate": 6.202e-05,
908
+ "loss": 0.3537,
909
+ "step": 5800
910
+ },
911
+ {
912
+ "epoch": 9.801047120418849,
913
+ "grad_norm": 2.2001028060913086,
914
+ "learning_rate": 6.152e-05,
915
+ "loss": 0.3522,
916
+ "step": 5850
917
+ },
918
+ {
919
+ "epoch": 9.884816753926701,
920
+ "grad_norm": 1.1149827241897583,
921
+ "learning_rate": 6.102e-05,
922
+ "loss": 0.3472,
923
+ "step": 5900
924
+ },
925
+ {
926
+ "epoch": 9.968586387434556,
927
+ "grad_norm": 1.4035720825195312,
928
+ "learning_rate": 6.0519999999999997e-05,
929
+ "loss": 0.3525,
930
+ "step": 5950
931
+ },
932
+ {
933
+ "epoch": 10.052356020942408,
934
+ "grad_norm": 1.0732487440109253,
935
+ "learning_rate": 6.002e-05,
936
+ "loss": 0.3485,
937
+ "step": 6000
938
+ },
939
+ {
940
+ "epoch": 10.052356020942408,
941
+ "eval_loss": 0.31853485107421875,
942
+ "eval_runtime": 271.779,
943
+ "eval_samples_per_second": 31.235,
944
+ "eval_steps_per_second": 3.908,
945
+ "step": 6000
946
  }
947
  ],
948
  "logging_steps": 50,
 
962
  "attributes": {}
963
  }
964
  },
965
+ "total_flos": 1.0748974547355264e+17,
966
  "train_batch_size": 16,
967
  "trial_name": null,
968
  "trial_params": null