ben81828 commited on
Commit
90db142
·
verified ·
1 Parent(s): ef9abd3

Training in progress, step 2750, checkpoint

Browse files
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:87c5131642e0134183eead6ee8a652dcec18d08a73880b46e923d3ba034a1f2c
3
  size 18516456
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5c53cd6b7a9aaec48a0490b1f83024497fb7a940d74235cfb8f5dc2c2a530336
3
  size 18516456
last-checkpoint/global_step2749/bf16_zero_pp_rank_0_mp_rank_00_optim_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4900d28228b39633c0fc7a89a0d6d17045a472ec14c243c08b3a8a18e265bce0
3
+ size 27700976
last-checkpoint/global_step2749/bf16_zero_pp_rank_1_mp_rank_00_optim_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fc4258fec3a0b749bf4ed1e7249624eefd5a2404f4b71ba2609b9e37f8e36c62
3
+ size 27700976
last-checkpoint/global_step2749/bf16_zero_pp_rank_2_mp_rank_00_optim_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8537ae171e8a0fef8885e434157bed6132b70a5054ff0b05a542d05e118ac6ee
3
+ size 27700976
last-checkpoint/global_step2749/bf16_zero_pp_rank_3_mp_rank_00_optim_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:354d3767be78a341d093d69b42dd0564e20a7d0775a2c3315e974d23e2fb4416
3
+ size 27700976
last-checkpoint/global_step2749/zero_pp_rank_0_mp_rank_00_model_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f371e9ce094939d3861a5773cf2cd0dc35662ff54accc1cc07f3b940d7364101
3
+ size 411571
last-checkpoint/global_step2749/zero_pp_rank_1_mp_rank_00_model_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8e34a0091e2f2d1677ed1efb3fb25f547edd42c07fd556d39731391c1dacd129
3
+ size 411507
last-checkpoint/global_step2749/zero_pp_rank_2_mp_rank_00_model_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:abcaf0f06b18f96340c928d314b3bb483810ab95835fc08c188569390f0a71e9
3
+ size 411507
last-checkpoint/global_step2749/zero_pp_rank_3_mp_rank_00_model_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2b70a0800e2d1ff59c82590c16976a86bc1115aadc7292b2d19b08fa9a1c77df
3
+ size 411507
last-checkpoint/latest CHANGED
@@ -1 +1 @@
1
- global_step2699
 
1
+ global_step2749
last-checkpoint/rng_state_0.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:5d8d3c7739f9787ea797b86ff1b3a51f9e68197835ba3178915a8a77558f67fc
3
  size 15024
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:49db5a9fd0c84d580c671e52905ebeffc155b36537e76ff966d2e82906708999
3
  size 15024
last-checkpoint/rng_state_1.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:a22a57799bc43e59db67d9a787ed73040020c5f35990602033f4dab1318787d7
3
  size 15024
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c8ca224562d8d97aaa131b3516288bb99f68d7dcf62170494326662bda0bb206
3
  size 15024
last-checkpoint/rng_state_2.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:29a624b936b77a04d6bfb6940acdd65a710bf39452e419e7ddb5c40fb2261072
3
  size 15024
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:56e86a11c89dba78d60e1b2a1855a651b90a5a22ef131ce65d26af83668c154e
3
  size 15024
last-checkpoint/rng_state_3.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:3a79306817d4440cd621149537e8cf216b60f847fc6f9531a6147426aa02bb07
3
  size 15024
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3dd63019c923e9692431619aced46b91aaf3fd22e1c22ec0a64347f2fe635a0e
3
  size 15024
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:5e5dcca4048a125fff8fd284657b0498882f3efcb97d36e331842fc3d6d7b6e6
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:eac20f941d720434897b4bd1a2af1a5643e9ab3c6b5d2f9906a1dfc05b7c03aa
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
  "best_metric": 0.6319106221199036,
3
  "best_model_checkpoint": "saves/CADICA_qwenvl_stenosis_classily_scale4_frozenVision/lora/sft/checkpoint-1600",
4
- "epoch": 1.3904197785217616,
5
  "eval_steps": 50,
6
- "global_step": 2700,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -4813,11 +4813,100 @@
4813
  "eval_steps_per_second": 0.928,
4814
  "num_input_tokens_seen": 31573240,
4815
  "step": 2700
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4816
  }
4817
  ],
4818
  "logging_steps": 5,
4819
  "max_steps": 3400,
4820
- "num_input_tokens_seen": 31573240,
4821
  "num_train_epochs": 2,
4822
  "save_steps": 50,
4823
  "stateful_callbacks": {
@@ -4832,7 +4921,7 @@
4832
  "attributes": {}
4833
  }
4834
  },
4835
- "total_flos": 1772988198813696.0,
4836
  "train_batch_size": 1,
4837
  "trial_name": null,
4838
  "trial_params": null
 
1
  {
2
  "best_metric": 0.6319106221199036,
3
  "best_model_checkpoint": "saves/CADICA_qwenvl_stenosis_classily_scale4_frozenVision/lora/sft/checkpoint-1600",
4
+ "epoch": 1.4161730620654134,
5
  "eval_steps": 50,
6
+ "global_step": 2750,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
4813
  "eval_steps_per_second": 0.928,
4814
  "num_input_tokens_seen": 31573240,
4815
  "step": 2700
4816
+ },
4817
+ {
4818
+ "epoch": 1.3929951068761266,
4819
+ "grad_norm": 4.579483859059419,
4820
+ "learning_rate": 1.0995208772202897e-05,
4821
+ "loss": 0.2798,
4822
+ "num_input_tokens_seen": 31631688,
4823
+ "step": 2705
4824
+ },
4825
+ {
4826
+ "epoch": 1.3955704352304918,
4827
+ "grad_norm": 6.098482033036635,
4828
+ "learning_rate": 1.0843536780343865e-05,
4829
+ "loss": 0.289,
4830
+ "num_input_tokens_seen": 31690200,
4831
+ "step": 2710
4832
+ },
4833
+ {
4834
+ "epoch": 1.398145763584857,
4835
+ "grad_norm": 9.834029857293697,
4836
+ "learning_rate": 1.069279084461513e-05,
4837
+ "loss": 0.2844,
4838
+ "num_input_tokens_seen": 31748664,
4839
+ "step": 2715
4840
+ },
4841
+ {
4842
+ "epoch": 1.4007210919392223,
4843
+ "grad_norm": 9.387518267357049,
4844
+ "learning_rate": 1.0542974530180327e-05,
4845
+ "loss": 0.3254,
4846
+ "num_input_tokens_seen": 31807176,
4847
+ "step": 2720
4848
+ },
4849
+ {
4850
+ "epoch": 1.4032964202935874,
4851
+ "grad_norm": 5.648695214602192,
4852
+ "learning_rate": 1.0394091380217352e-05,
4853
+ "loss": 0.3683,
4854
+ "num_input_tokens_seen": 31865696,
4855
+ "step": 2725
4856
+ },
4857
+ {
4858
+ "epoch": 1.4058717486479526,
4859
+ "grad_norm": 5.202858729177478,
4860
+ "learning_rate": 1.0246144915834683e-05,
4861
+ "loss": 0.2968,
4862
+ "num_input_tokens_seen": 31924200,
4863
+ "step": 2730
4864
+ },
4865
+ {
4866
+ "epoch": 1.4084470770023179,
4867
+ "grad_norm": 4.808429946385537,
4868
+ "learning_rate": 1.0099138635988026e-05,
4869
+ "loss": 0.2943,
4870
+ "num_input_tokens_seen": 31982712,
4871
+ "step": 2735
4872
+ },
4873
+ {
4874
+ "epoch": 1.4110224053566829,
4875
+ "grad_norm": 5.094039780174813,
4876
+ "learning_rate": 9.953076017397578e-06,
4877
+ "loss": 0.3037,
4878
+ "num_input_tokens_seen": 32041176,
4879
+ "step": 2740
4880
+ },
4881
+ {
4882
+ "epoch": 1.4135977337110481,
4883
+ "grad_norm": 5.807237736394797,
4884
+ "learning_rate": 9.807960514465792e-06,
4885
+ "loss": 0.3019,
4886
+ "num_input_tokens_seen": 32099656,
4887
+ "step": 2745
4888
+ },
4889
+ {
4890
+ "epoch": 1.4161730620654134,
4891
+ "grad_norm": 6.27488451409393,
4892
+ "learning_rate": 9.663795559195733e-06,
4893
+ "loss": 0.164,
4894
+ "num_input_tokens_seen": 32158144,
4895
+ "step": 2750
4896
+ },
4897
+ {
4898
+ "epoch": 1.4161730620654134,
4899
+ "eval_loss": 0.7807286381721497,
4900
+ "eval_runtime": 16.139,
4901
+ "eval_samples_per_second": 3.718,
4902
+ "eval_steps_per_second": 0.929,
4903
+ "num_input_tokens_seen": 32158144,
4904
+ "step": 2750
4905
  }
4906
  ],
4907
  "logging_steps": 5,
4908
  "max_steps": 3400,
4909
+ "num_input_tokens_seen": 32158144,
4910
  "num_train_epochs": 2,
4911
  "save_steps": 50,
4912
  "stateful_callbacks": {
 
4921
  "attributes": {}
4922
  }
4923
  },
4924
+ "total_flos": 1805832461746176.0,
4925
  "train_batch_size": 1,
4926
  "trial_name": null,
4927
  "trial_params": null