ben81828 commited on
Commit
94d8aff
·
verified ·
1 Parent(s): df91dbd

Training in progress, step 2200, checkpoint

Browse files
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:ff2cc1f16a485d3e450e8a2a866e3d76b5149212ac068f60cacb581a8329cec2
3
  size 18516456
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:76b20ab8c2a7403c32454801b8a1cf7e477efa58783a51bc7e3abf420b274c08
3
  size 18516456
last-checkpoint/global_step2199/bf16_zero_pp_rank_0_mp_rank_00_optim_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0b0613c004ef61ba46c469e7988afd6177f0d8ff04ff8ee6444bdd1d1fc9108c
3
+ size 27700976
last-checkpoint/global_step2199/bf16_zero_pp_rank_1_mp_rank_00_optim_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7b3dba5ff896b70830e8073bb54f1ef435a0fa45b55ce17454c205623c4e48fb
3
+ size 27700976
last-checkpoint/global_step2199/bf16_zero_pp_rank_2_mp_rank_00_optim_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6d68c67d4836968cea589a6fddd1c8dbd6f70c5a0af74ae71adda19649d8531b
3
+ size 27700976
last-checkpoint/global_step2199/bf16_zero_pp_rank_3_mp_rank_00_optim_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:173cf728f5217ccf79a2d5628b083113384f213ca6fb71100b406e577b906625
3
+ size 27700976
last-checkpoint/global_step2199/zero_pp_rank_0_mp_rank_00_model_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1c88274e48eccf85cd6d66b4ac47fffddeff427b70680bfa735d8d1c8a4351cd
3
+ size 411571
last-checkpoint/global_step2199/zero_pp_rank_1_mp_rank_00_model_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:17b85ef590965c58493d34cf18fbce75161a74838c86d524eb7fa17e17fd05ac
3
+ size 411507
last-checkpoint/global_step2199/zero_pp_rank_2_mp_rank_00_model_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b4779fdbd87e10a9b125e08f201524dc4bce730d2f09d576123faf2f33a8445e
3
+ size 411507
last-checkpoint/global_step2199/zero_pp_rank_3_mp_rank_00_model_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2980e5a9feb469339dd5196c24deea3e2fd2c3442acf2f2a882e7f13b04409e8
3
+ size 411507
last-checkpoint/latest CHANGED
@@ -1 +1 @@
1
- global_step2149
 
1
+ global_step2199
last-checkpoint/rng_state_0.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:1c2f72d01585273766959f0cc9805fab753b53f20e581399855a293176ace988
3
  size 15024
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3187a61ccc2722c440dc24ae4a6eefe6b9e5daccf9e92473bbb4483c7751ea77
3
  size 15024
last-checkpoint/rng_state_1.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:3fd1ecda2bb159be37a2a23800e098324f5b0334e7189df47c343ca6cb7605a2
3
  size 15024
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b0f2a0df922fb3337cf2562745ebe8d5adf433ca45cb4e3da33a21b48183c000
3
  size 15024
last-checkpoint/rng_state_2.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:cf71c84ea2995fbc545b918d03f7f94c92293ca2e33343f177e6fd04531b7b19
3
  size 15024
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f4d84b5276f687f44c9af60b1e41cd7b93a6d1659e36831a7bc021b5635d663b
3
  size 15024
last-checkpoint/rng_state_3.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:72c53116f0f4c80841c24cd681d5fbd5a5992b259583a4cfb493f8f3e4544d82
3
  size 15024
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7d69159433c88b97106cf21b92eb5a3f66f0c826aa268d82a47b3faed1ac86cd
3
  size 15024
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:ca5770eed90150126cfd6d4c180a03d91fe366663610f38ec72b635b0b8cfd11
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6225488c9a450b7edfa6b28ac40ecd217bccdb84073c98a64aefcefa7ee337d2
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
  "best_metric": 0.6319106221199036,
3
  "best_model_checkpoint": "saves/CADICA_qwenvl_stenosis_classily_scale4_frozenVision/lora/sft/checkpoint-1600",
4
- "epoch": 1.1071336595415915,
5
  "eval_steps": 50,
6
- "global_step": 2150,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -3834,11 +3834,100 @@
3834
  "eval_steps_per_second": 0.937,
3835
  "num_input_tokens_seen": 25140752,
3836
  "step": 2150
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3837
  }
3838
  ],
3839
  "logging_steps": 5,
3840
  "max_steps": 3400,
3841
- "num_input_tokens_seen": 25140752,
3842
  "num_train_epochs": 2,
3843
  "save_steps": 50,
3844
  "stateful_callbacks": {
@@ -3853,7 +3942,7 @@
3853
  "attributes": {}
3854
  }
3855
  },
3856
- "total_flos": 1411768699584512.0,
3857
  "train_batch_size": 1,
3858
  "trial_name": null,
3859
  "trial_params": null
 
1
  {
2
  "best_metric": 0.6319106221199036,
3
  "best_model_checkpoint": "saves/CADICA_qwenvl_stenosis_classily_scale4_frozenVision/lora/sft/checkpoint-1600",
4
+ "epoch": 1.1328869430852433,
5
  "eval_steps": 50,
6
+ "global_step": 2200,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
3834
  "eval_steps_per_second": 0.937,
3835
  "num_input_tokens_seen": 25140752,
3836
  "step": 2150
3837
+ },
3838
+ {
3839
+ "epoch": 1.1097089878959567,
3840
+ "grad_norm": 3.943124296473041,
3841
+ "learning_rate": 3.2392238785630386e-05,
3842
+ "loss": 0.3154,
3843
+ "num_input_tokens_seen": 25199208,
3844
+ "step": 2155
3845
+ },
3846
+ {
3847
+ "epoch": 1.112284316250322,
3848
+ "grad_norm": 8.398532132538953,
3849
+ "learning_rate": 3.216486683637146e-05,
3850
+ "loss": 0.3915,
3851
+ "num_input_tokens_seen": 25257680,
3852
+ "step": 2160
3853
+ },
3854
+ {
3855
+ "epoch": 1.114859644604687,
3856
+ "grad_norm": 4.081633194377614,
3857
+ "learning_rate": 3.1937916690642356e-05,
3858
+ "loss": 0.3675,
3859
+ "num_input_tokens_seen": 25316200,
3860
+ "step": 2165
3861
+ },
3862
+ {
3863
+ "epoch": 1.1174349729590523,
3864
+ "grad_norm": 6.920842495491902,
3865
+ "learning_rate": 3.1711393715847476e-05,
3866
+ "loss": 0.4047,
3867
+ "num_input_tokens_seen": 25374656,
3868
+ "step": 2170
3869
+ },
3870
+ {
3871
+ "epoch": 1.1200103013134175,
3872
+ "grad_norm": 8.460113153700512,
3873
+ "learning_rate": 3.14853032692886e-05,
3874
+ "loss": 0.4155,
3875
+ "num_input_tokens_seen": 25433168,
3876
+ "step": 2175
3877
+ },
3878
+ {
3879
+ "epoch": 1.1225856296677827,
3880
+ "grad_norm": 9.825074199159944,
3881
+ "learning_rate": 3.125965069803811e-05,
3882
+ "loss": 0.3966,
3883
+ "num_input_tokens_seen": 25491664,
3884
+ "step": 2180
3885
+ },
3886
+ {
3887
+ "epoch": 1.1251609580221478,
3888
+ "grad_norm": 5.732206927543506,
3889
+ "learning_rate": 3.103444133881261e-05,
3890
+ "loss": 0.3068,
3891
+ "num_input_tokens_seen": 25550128,
3892
+ "step": 2185
3893
+ },
3894
+ {
3895
+ "epoch": 1.127736286376513,
3896
+ "grad_norm": 6.135036052058211,
3897
+ "learning_rate": 3.080968051784666e-05,
3898
+ "loss": 0.386,
3899
+ "num_input_tokens_seen": 25608624,
3900
+ "step": 2190
3901
+ },
3902
+ {
3903
+ "epoch": 1.1303116147308783,
3904
+ "grad_norm": 3.31420885852192,
3905
+ "learning_rate": 3.058537355076683e-05,
3906
+ "loss": 0.3898,
3907
+ "num_input_tokens_seen": 25667128,
3908
+ "step": 2195
3909
+ },
3910
+ {
3911
+ "epoch": 1.1328869430852433,
3912
+ "grad_norm": 8.182546413863832,
3913
+ "learning_rate": 3.0361525742465973e-05,
3914
+ "loss": 0.4016,
3915
+ "num_input_tokens_seen": 25725560,
3916
+ "step": 2200
3917
+ },
3918
+ {
3919
+ "epoch": 1.1328869430852433,
3920
+ "eval_loss": 0.7534744143486023,
3921
+ "eval_runtime": 15.969,
3922
+ "eval_samples_per_second": 3.757,
3923
+ "eval_steps_per_second": 0.939,
3924
+ "num_input_tokens_seen": 25725560,
3925
+ "step": 2200
3926
  }
3927
  ],
3928
  "logging_steps": 5,
3929
  "max_steps": 3400,
3930
+ "num_input_tokens_seen": 25725560,
3931
  "num_train_epochs": 2,
3932
  "save_steps": 50,
3933
  "stateful_callbacks": {
 
3942
  "attributes": {}
3943
  }
3944
  },
3945
+ "total_flos": 1444609144651776.0,
3946
  "train_batch_size": 1,
3947
  "trial_name": null,
3948
  "trial_params": null