ben81828 commited on
Commit
e6a7146
·
verified ·
1 Parent(s): b58b8d8

Training in progress, step 500, checkpoint

Browse files
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:b66f09b5eb3e2b8e820ab8703bfb62caadc5f14fec5d8d4c4a11402660014d23
3
  size 18516456
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8ba83473ace8f2ecdbc048dafa2d00257b4fa3a981f66b8f547625be4d8d6a90
3
  size 18516456
last-checkpoint/global_step500/bf16_zero_pp_rank_0_mp_rank_00_optim_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:51e077bbd6f9b31a667509fac62a9478c3e301fa4263e38f3bc18902b0d68342
3
+ size 27700976
last-checkpoint/global_step500/bf16_zero_pp_rank_1_mp_rank_00_optim_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:eff38edee50cb8cbc762605df12a2bda1ce7bb83f161c166a568ef4787b24709
3
+ size 27700976
last-checkpoint/global_step500/bf16_zero_pp_rank_2_mp_rank_00_optim_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2cce836213e483ff25e319f0422d5b1840f664da1ced4ce68a42f3b5fe9907f6
3
+ size 27700976
last-checkpoint/global_step500/bf16_zero_pp_rank_3_mp_rank_00_optim_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:006a4b5a7735623336a74ba7bef84d475f0f3812ab10b7a115d7ee0c68c72fd2
3
+ size 27700976
last-checkpoint/global_step500/zero_pp_rank_0_mp_rank_00_model_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f30ce533ed2650ad16e410814fe1812e5de7ea417fed7b42efd190da4e6430e1
3
+ size 411571
last-checkpoint/global_step500/zero_pp_rank_1_mp_rank_00_model_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:903cf2f39cadac46161431f156269e06cf06246cbb2efcfe093f150b761674c2
3
+ size 411507
last-checkpoint/global_step500/zero_pp_rank_2_mp_rank_00_model_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b817a826cc293379e470f1ab6ac1e10d78eed46c982fcb6159bc074d58769032
3
+ size 411507
last-checkpoint/global_step500/zero_pp_rank_3_mp_rank_00_model_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3b0127b96085ca5ae5e0c9e40f227a525724276331fdd4609f62c5870b21012d
3
+ size 411507
last-checkpoint/latest CHANGED
@@ -1 +1 @@
1
- global_step450
 
1
+ global_step500
last-checkpoint/rng_state_0.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:7dbc6521b0b64cb12d818506108fcf257a4089ca8a9b1e453776ed3e032e7176
3
  size 15024
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:bdd1f02cb20d3f4f7e0dd26fea62af57e5e71316163f926a28ed6cf89a9f3777
3
  size 15024
last-checkpoint/rng_state_1.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:2b13e3da1b0679cab1bab94f893e385a9a224d3335b5a6f62602f33c2be88d03
3
  size 15024
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fc6d54ba2aa85e2f895439a1b787ec947b848a1c34ea5a3a28821572bf2b9fec
3
  size 15024
last-checkpoint/rng_state_2.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:6a24f0e0f117b5a8236e0d12594c0c358f41ef00068d4460002e95ad1cc3cb1c
3
  size 15024
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8b6927d26551cddd8e35b34b43e79bd58f8b6027b6a481bb6a563a3652addeb4
3
  size 15024
last-checkpoint/rng_state_3.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:e46e4eab6c4a25d84ad36ddf1357401788adeeb6388c03cefa35a63b52ee7610
3
  size 15024
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f8295b1be8e66b4b30cb905dc48cfc717c027e427937b8142d00ae9de8106c6a
3
  size 15024
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:192829f095e6c906a86011515c9b1d243ad2d8f891793c9e500d2d433b84fc78
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ab7567aeded1bd7ef9f3ba115e57865dd25bb569f9711f33170eb2a51540c216
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
  "best_metric": 0.8908902406692505,
3
  "best_model_checkpoint": "saves/CADICA_qwenvl_stenosis_classily_scale4_frozenVision/lora/sft/checkpoint-300",
4
- "epoch": 0.23177955189286634,
5
  "eval_steps": 50,
6
- "global_step": 450,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -808,11 +808,100 @@
808
  "eval_steps_per_second": 0.872,
809
  "num_input_tokens_seen": 5263304,
810
  "step": 450
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
811
  }
812
  ],
813
  "logging_steps": 5,
814
  "max_steps": 3400,
815
- "num_input_tokens_seen": 5263304,
816
  "num_train_epochs": 2,
817
  "save_steps": 50,
818
  "stateful_callbacks": {
@@ -827,7 +916,7 @@
827
  "attributes": {}
828
  }
829
  },
830
- "total_flos": 295512497192960.0,
831
  "train_batch_size": 1,
832
  "trial_name": null,
833
  "trial_params": null
 
1
  {
2
  "best_metric": 0.8908902406692505,
3
  "best_model_checkpoint": "saves/CADICA_qwenvl_stenosis_classily_scale4_frozenVision/lora/sft/checkpoint-300",
4
+ "epoch": 0.25753283543651817,
5
  "eval_steps": 50,
6
+ "global_step": 500,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
808
  "eval_steps_per_second": 0.872,
809
  "num_input_tokens_seen": 5263304,
810
  "step": 450
811
+ },
812
+ {
813
+ "epoch": 0.23435488024723153,
814
+ "grad_norm": 0.5906403377099594,
815
+ "learning_rate": 9.809128215864097e-05,
816
+ "loss": 0.8942,
817
+ "num_input_tokens_seen": 5321760,
818
+ "step": 455
819
+ },
820
+ {
821
+ "epoch": 0.2369302086015967,
822
+ "grad_norm": 0.5706805631290568,
823
+ "learning_rate": 9.802417057704931e-05,
824
+ "loss": 0.9099,
825
+ "num_input_tokens_seen": 5380224,
826
+ "step": 460
827
+ },
828
+ {
829
+ "epoch": 0.23950553695596188,
830
+ "grad_norm": 0.164631948732384,
831
+ "learning_rate": 9.795592321674045e-05,
832
+ "loss": 0.8981,
833
+ "num_input_tokens_seen": 5438704,
834
+ "step": 465
835
+ },
836
+ {
837
+ "epoch": 0.24208086531032708,
838
+ "grad_norm": 0.32986780285522194,
839
+ "learning_rate": 9.788654169177453e-05,
840
+ "loss": 0.8952,
841
+ "num_input_tokens_seen": 5497208,
842
+ "step": 470
843
+ },
844
+ {
845
+ "epoch": 0.24465619366469224,
846
+ "grad_norm": 0.40551569446674784,
847
+ "learning_rate": 9.781602764303487e-05,
848
+ "loss": 0.8959,
849
+ "num_input_tokens_seen": 5555704,
850
+ "step": 475
851
+ },
852
+ {
853
+ "epoch": 0.24723152201905743,
854
+ "grad_norm": 0.20928586231326682,
855
+ "learning_rate": 9.774438273818911e-05,
856
+ "loss": 0.901,
857
+ "num_input_tokens_seen": 5614160,
858
+ "step": 480
859
+ },
860
+ {
861
+ "epoch": 0.24980685037342262,
862
+ "grad_norm": 0.34365307116824517,
863
+ "learning_rate": 9.767160867164979e-05,
864
+ "loss": 0.9008,
865
+ "num_input_tokens_seen": 5672640,
866
+ "step": 485
867
+ },
868
+ {
869
+ "epoch": 0.2523821787277878,
870
+ "grad_norm": 0.4212274243028996,
871
+ "learning_rate": 9.759770716453436e-05,
872
+ "loss": 0.9016,
873
+ "num_input_tokens_seen": 5731072,
874
+ "step": 490
875
+ },
876
+ {
877
+ "epoch": 0.254957507082153,
878
+ "grad_norm": 0.39823625576558597,
879
+ "learning_rate": 9.752267996462434e-05,
880
+ "loss": 0.9132,
881
+ "num_input_tokens_seen": 5789544,
882
+ "step": 495
883
+ },
884
+ {
885
+ "epoch": 0.25753283543651817,
886
+ "grad_norm": 0.24856324117583653,
887
+ "learning_rate": 9.744652884632406e-05,
888
+ "loss": 0.8962,
889
+ "num_input_tokens_seen": 5848048,
890
+ "step": 500
891
+ },
892
+ {
893
+ "epoch": 0.25753283543651817,
894
+ "eval_loss": 0.8987945914268494,
895
+ "eval_runtime": 17.1622,
896
+ "eval_samples_per_second": 3.496,
897
+ "eval_steps_per_second": 0.874,
898
+ "num_input_tokens_seen": 5848048,
899
+ "step": 500
900
  }
901
  ],
902
  "logging_steps": 5,
903
  "max_steps": 3400,
904
+ "num_input_tokens_seen": 5848048,
905
  "num_train_epochs": 2,
906
  "save_steps": 50,
907
  "stateful_callbacks": {
 
916
  "attributes": {}
917
  }
918
  },
919
+ "total_flos": 328347984855040.0,
920
  "train_batch_size": 1,
921
  "trial_name": null,
922
  "trial_params": null