mikhail-panzo commited on
Commit
86abd89
1 Parent(s): 1bd5879

Training in progress, step 5500, checkpoint

Browse files
last-checkpoint/model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:3fb634d25a31951e441158f54ae0db2c273aaeb251169c41f8467cecc648bf56
3
  size 577789320
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:226994f9cc097e8b5364a4a2fa612d3987a6cad23e949b085227c213a4385c8c
3
  size 577789320
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:4cc68ae6c7c867a9ae1fe921933744282f9631aa37051c6ca0a82c81119ae62e
3
  size 1155772233
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:93f9f2a7999533b71352726a63f81e73d5b01dafaa3480d9c7002eb3b73664dd
3
  size 1155772233
last-checkpoint/rng_state.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:8f4e204979057d17355f455adeaa53e6157f86fff91322a4565250d75055d836
3
  size 14244
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e814e69ac405112ac0e823a174f61a291238359cb5e185b2802f269183d97fb3
3
  size 14244
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:404094f43137b5b2180fadcde3d1168585bcd71ff155f508576088e11be9d3e1
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:59de8a8992a94a360bd3cf9a1be9afda63623261a7fdb31a70c9e1b4bf3d5cb5
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
- "best_metric": 0.3284249007701874,
3
- "best_model_checkpoint": "mikhail_panzo/zlm_b128_le4_s8000/checkpoint-5000",
4
- "epoch": 8.37696335078534,
5
  "eval_steps": 500,
6
- "global_step": 5000,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -787,6 +787,84 @@
787
  "eval_samples_per_second": 30.901,
788
  "eval_steps_per_second": 3.866,
789
  "step": 5000
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
790
  }
791
  ],
792
  "logging_steps": 50,
@@ -806,7 +884,7 @@
806
  "attributes": {}
807
  }
808
  },
809
- "total_flos": 8.958474220139136e+16,
810
  "train_batch_size": 16,
811
  "trial_name": null,
812
  "trial_params": null
 
1
  {
2
+ "best_metric": 0.3231986165046692,
3
+ "best_model_checkpoint": "mikhail_panzo/zlm_b128_le4_s8000/checkpoint-5500",
4
+ "epoch": 9.214659685863875,
5
  "eval_steps": 500,
6
+ "global_step": 5500,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
787
  "eval_samples_per_second": 30.901,
788
  "eval_steps_per_second": 3.866,
789
  "step": 5000
790
+ },
791
+ {
792
+ "epoch": 8.460732984293193,
793
+ "grad_norm": 1.1301287412643433,
794
+ "learning_rate": 4.92e-05,
795
+ "loss": 0.3542,
796
+ "step": 5050
797
+ },
798
+ {
799
+ "epoch": 8.544502617801047,
800
+ "grad_norm": 1.0920559167861938,
801
+ "learning_rate": 4.836666666666667e-05,
802
+ "loss": 0.3534,
803
+ "step": 5100
804
+ },
805
+ {
806
+ "epoch": 8.6282722513089,
807
+ "grad_norm": 1.1312081813812256,
808
+ "learning_rate": 4.7533333333333334e-05,
809
+ "loss": 0.3598,
810
+ "step": 5150
811
+ },
812
+ {
813
+ "epoch": 8.712041884816754,
814
+ "grad_norm": 1.5819182395935059,
815
+ "learning_rate": 4.6700000000000003e-05,
816
+ "loss": 0.3537,
817
+ "step": 5200
818
+ },
819
+ {
820
+ "epoch": 8.795811518324607,
821
+ "grad_norm": 1.0059967041015625,
822
+ "learning_rate": 4.5866666666666666e-05,
823
+ "loss": 0.3553,
824
+ "step": 5250
825
+ },
826
+ {
827
+ "epoch": 8.879581151832461,
828
+ "grad_norm": 1.1407588720321655,
829
+ "learning_rate": 4.5033333333333335e-05,
830
+ "loss": 0.3557,
831
+ "step": 5300
832
+ },
833
+ {
834
+ "epoch": 8.963350785340314,
835
+ "grad_norm": 1.296221137046814,
836
+ "learning_rate": 4.4200000000000004e-05,
837
+ "loss": 0.3545,
838
+ "step": 5350
839
+ },
840
+ {
841
+ "epoch": 9.047120418848168,
842
+ "grad_norm": 1.4306052923202515,
843
+ "learning_rate": 4.3366666666666666e-05,
844
+ "loss": 0.3583,
845
+ "step": 5400
846
+ },
847
+ {
848
+ "epoch": 9.13089005235602,
849
+ "grad_norm": 1.1458420753479004,
850
+ "learning_rate": 4.2533333333333335e-05,
851
+ "loss": 0.3503,
852
+ "step": 5450
853
+ },
854
+ {
855
+ "epoch": 9.214659685863875,
856
+ "grad_norm": 0.9508205056190491,
857
+ "learning_rate": 4.17e-05,
858
+ "loss": 0.352,
859
+ "step": 5500
860
+ },
861
+ {
862
+ "epoch": 9.214659685863875,
863
+ "eval_loss": 0.3231986165046692,
864
+ "eval_runtime": 273.3343,
865
+ "eval_samples_per_second": 31.057,
866
+ "eval_steps_per_second": 3.885,
867
+ "step": 5500
868
  }
869
  ],
870
  "logging_steps": 50,
 
884
  "attributes": {}
885
  }
886
  },
887
+ "total_flos": 9.854783789898643e+16,
888
  "train_batch_size": 16,
889
  "trial_name": null,
890
  "trial_params": null