Training in progress, step 1800, checkpoint
Browse files- last-checkpoint/adapter_model.safetensors +1 -1
- last-checkpoint/global_step1800/bf16_zero_pp_rank_0_mp_rank_00_optim_states.pt +3 -0
- last-checkpoint/global_step1800/bf16_zero_pp_rank_1_mp_rank_00_optim_states.pt +3 -0
- last-checkpoint/global_step1800/bf16_zero_pp_rank_2_mp_rank_00_optim_states.pt +3 -0
- last-checkpoint/global_step1800/bf16_zero_pp_rank_3_mp_rank_00_optim_states.pt +3 -0
- last-checkpoint/global_step1800/zero_pp_rank_0_mp_rank_00_model_states.pt +3 -0
- last-checkpoint/global_step1800/zero_pp_rank_1_mp_rank_00_model_states.pt +3 -0
- last-checkpoint/global_step1800/zero_pp_rank_2_mp_rank_00_model_states.pt +3 -0
- last-checkpoint/global_step1800/zero_pp_rank_3_mp_rank_00_model_states.pt +3 -0
- last-checkpoint/latest +1 -1
- last-checkpoint/rng_state_0.pth +1 -1
- last-checkpoint/rng_state_1.pth +1 -1
- last-checkpoint/rng_state_2.pth +1 -1
- last-checkpoint/rng_state_3.pth +1 -1
- last-checkpoint/scheduler.pt +1 -1
- last-checkpoint/trainer_state.json +93 -4
last-checkpoint/adapter_model.safetensors
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
size 18516456
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:80957d4d61fd32f325d1e1389afae72c016da18ced07ab4e480f80b2f80d1f1d
|
3 |
size 18516456
|
last-checkpoint/global_step1800/bf16_zero_pp_rank_0_mp_rank_00_optim_states.pt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:2b3b41ba55200d920320eff5fa1315f1f113d1b1c434e47e8ab87efd7abca436
|
3 |
+
size 27700976
|
last-checkpoint/global_step1800/bf16_zero_pp_rank_1_mp_rank_00_optim_states.pt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:64993108326120b5b609c448a2563715bb5e3c6d99c0642a59f06352820dcb46
|
3 |
+
size 27700976
|
last-checkpoint/global_step1800/bf16_zero_pp_rank_2_mp_rank_00_optim_states.pt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:a9be39eb1fc36958fe6bb6cd68f427727561b7b8b0f6172648c8f4c61d000ea3
|
3 |
+
size 27700976
|
last-checkpoint/global_step1800/bf16_zero_pp_rank_3_mp_rank_00_optim_states.pt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:07f13221716bd22ef08f503210147b437b204d8a18b1daa271ff3f264f68c1ca
|
3 |
+
size 27700976
|
last-checkpoint/global_step1800/zero_pp_rank_0_mp_rank_00_model_states.pt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:6c90eea3ce34f15daaff9c9681a7d74ccecf6838cb52d2b24db102d117308bce
|
3 |
+
size 411571
|
last-checkpoint/global_step1800/zero_pp_rank_1_mp_rank_00_model_states.pt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:ce46a2ea3eceb9f00da73dd7f2d7d284f5cdb22b8af5b88634ee12736d91d06b
|
3 |
+
size 411507
|
last-checkpoint/global_step1800/zero_pp_rank_2_mp_rank_00_model_states.pt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:fb904050e7bc7ad93f63e761b3f5399ec807ef4590b8af8d961b124317a9d65f
|
3 |
+
size 411507
|
last-checkpoint/global_step1800/zero_pp_rank_3_mp_rank_00_model_states.pt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:43d272e6fb20b6249063feaf7246d0e06ce38ced0dd4de671f629b8555a80690
|
3 |
+
size 411507
|
last-checkpoint/latest
CHANGED
@@ -1 +1 @@
|
|
1 |
-
|
|
|
1 |
+
global_step1800
|
last-checkpoint/rng_state_0.pth
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
size 15024
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:4d76f093328c54f2b94a10b8b50dc92fc99ceff9e3949d050a70042526f1d0eb
|
3 |
size 15024
|
last-checkpoint/rng_state_1.pth
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
size 15024
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:54b172dd00cba9f005761d85fb0804f751caf6e1cc8294d1873354a890cb9909
|
3 |
size 15024
|
last-checkpoint/rng_state_2.pth
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
size 15024
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:ec6adef733bd7630aa48eff1a6edaabc275d67293dcb0b7a64d71451405d489d
|
3 |
size 15024
|
last-checkpoint/rng_state_3.pth
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
size 15024
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:6ce1c4ba1932c0f698bef90e0e4e74ded1e0db5fc35282a0815899b8be759e67
|
3 |
size 15024
|
last-checkpoint/scheduler.pt
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
size 1064
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:586e8022fc0be60f50aebd58453b59ef73708ef9c9ee879c0bef0b407e787025
|
3 |
size 1064
|
last-checkpoint/trainer_state.json
CHANGED
@@ -1,9 +1,9 @@
|
|
1 |
{
|
2 |
"best_metric": 0.6319106221199036,
|
3 |
"best_model_checkpoint": "saves/CADICA_qwenvl_stenosis_classily_scale4_frozenVision/lora/sft/checkpoint-1600",
|
4 |
-
"epoch": 0.
|
5 |
"eval_steps": 50,
|
6 |
-
"global_step":
|
7 |
"is_hyper_param_search": false,
|
8 |
"is_local_process_zero": true,
|
9 |
"is_world_process_zero": true,
|
@@ -3122,11 +3122,100 @@
|
|
3122 |
"eval_steps_per_second": 0.936,
|
3123 |
"num_input_tokens_seen": 20468200,
|
3124 |
"step": 1750
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
3125 |
}
|
3126 |
],
|
3127 |
"logging_steps": 5,
|
3128 |
"max_steps": 3400,
|
3129 |
-
"num_input_tokens_seen":
|
3130 |
"num_train_epochs": 2,
|
3131 |
"save_steps": 50,
|
3132 |
"stateful_callbacks": {
|
@@ -3141,7 +3230,7 @@
|
|
3141 |
"attributes": {}
|
3142 |
}
|
3143 |
},
|
3144 |
-
"total_flos":
|
3145 |
"train_batch_size": 1,
|
3146 |
"trial_name": null,
|
3147 |
"trial_params": null
|
|
|
1 |
{
|
2 |
"best_metric": 0.6319106221199036,
|
3 |
"best_model_checkpoint": "saves/CADICA_qwenvl_stenosis_classily_scale4_frozenVision/lora/sft/checkpoint-1600",
|
4 |
+
"epoch": 0.9271182075714653,
|
5 |
"eval_steps": 50,
|
6 |
+
"global_step": 1800,
|
7 |
"is_hyper_param_search": false,
|
8 |
"is_local_process_zero": true,
|
9 |
"is_world_process_zero": true,
|
|
|
3122 |
"eval_steps_per_second": 0.936,
|
3123 |
"num_input_tokens_seen": 20468200,
|
3124 |
"step": 1750
|
3125 |
+
},
|
3126 |
+
{
|
3127 |
+
"epoch": 0.9039402523821787,
|
3128 |
+
"grad_norm": 4.616122198129487,
|
3129 |
+
"learning_rate": 5.145873693974188e-05,
|
3130 |
+
"loss": 0.5248,
|
3131 |
+
"num_input_tokens_seen": 20526696,
|
3132 |
+
"step": 1755
|
3133 |
+
},
|
3134 |
+
{
|
3135 |
+
"epoch": 0.9065155807365439,
|
3136 |
+
"grad_norm": 5.322590172525407,
|
3137 |
+
"learning_rate": 5.12156668275638e-05,
|
3138 |
+
"loss": 0.4756,
|
3139 |
+
"num_input_tokens_seen": 20585160,
|
3140 |
+
"step": 1760
|
3141 |
+
},
|
3142 |
+
{
|
3143 |
+
"epoch": 0.9090909090909091,
|
3144 |
+
"grad_norm": 4.002252878507737,
|
3145 |
+
"learning_rate": 5.097256796468598e-05,
|
3146 |
+
"loss": 0.4405,
|
3147 |
+
"num_input_tokens_seen": 20643672,
|
3148 |
+
"step": 1765
|
3149 |
+
},
|
3150 |
+
{
|
3151 |
+
"epoch": 0.9116662374452743,
|
3152 |
+
"grad_norm": 5.58017966349683,
|
3153 |
+
"learning_rate": 5.072944610043232e-05,
|
3154 |
+
"loss": 0.5201,
|
3155 |
+
"num_input_tokens_seen": 20702152,
|
3156 |
+
"step": 1770
|
3157 |
+
},
|
3158 |
+
{
|
3159 |
+
"epoch": 0.9142415657996394,
|
3160 |
+
"grad_norm": 4.688576373892097,
|
3161 |
+
"learning_rate": 5.048630698467081e-05,
|
3162 |
+
"loss": 0.4662,
|
3163 |
+
"num_input_tokens_seen": 20760664,
|
3164 |
+
"step": 1775
|
3165 |
+
},
|
3166 |
+
{
|
3167 |
+
"epoch": 0.9168168941540046,
|
3168 |
+
"grad_norm": 4.984086874604376,
|
3169 |
+
"learning_rate": 5.024315636767738e-05,
|
3170 |
+
"loss": 0.5376,
|
3171 |
+
"num_input_tokens_seen": 20819144,
|
3172 |
+
"step": 1780
|
3173 |
+
},
|
3174 |
+
{
|
3175 |
+
"epoch": 0.9193922225083698,
|
3176 |
+
"grad_norm": 4.470690620190923,
|
3177 |
+
"learning_rate": 5e-05,
|
3178 |
+
"loss": 0.5174,
|
3179 |
+
"num_input_tokens_seen": 20877624,
|
3180 |
+
"step": 1785
|
3181 |
+
},
|
3182 |
+
{
|
3183 |
+
"epoch": 0.921967550862735,
|
3184 |
+
"grad_norm": 4.1127649145734795,
|
3185 |
+
"learning_rate": 4.9756843632322626e-05,
|
3186 |
+
"loss": 0.4273,
|
3187 |
+
"num_input_tokens_seen": 20936112,
|
3188 |
+
"step": 1790
|
3189 |
+
},
|
3190 |
+
{
|
3191 |
+
"epoch": 0.9245428792171002,
|
3192 |
+
"grad_norm": 5.1892527739805185,
|
3193 |
+
"learning_rate": 4.9513693015329197e-05,
|
3194 |
+
"loss": 0.4646,
|
3195 |
+
"num_input_tokens_seen": 20994608,
|
3196 |
+
"step": 1795
|
3197 |
+
},
|
3198 |
+
{
|
3199 |
+
"epoch": 0.9271182075714653,
|
3200 |
+
"grad_norm": 6.8574703914708985,
|
3201 |
+
"learning_rate": 4.9270553899567686e-05,
|
3202 |
+
"loss": 0.412,
|
3203 |
+
"num_input_tokens_seen": 21053080,
|
3204 |
+
"step": 1800
|
3205 |
+
},
|
3206 |
+
{
|
3207 |
+
"epoch": 0.9271182075714653,
|
3208 |
+
"eval_loss": 0.6768696904182434,
|
3209 |
+
"eval_runtime": 15.9758,
|
3210 |
+
"eval_samples_per_second": 3.756,
|
3211 |
+
"eval_steps_per_second": 0.939,
|
3212 |
+
"num_input_tokens_seen": 21053080,
|
3213 |
+
"step": 1800
|
3214 |
}
|
3215 |
],
|
3216 |
"logging_steps": 5,
|
3217 |
"max_steps": 3400,
|
3218 |
+
"num_input_tokens_seen": 21053080,
|
3219 |
"num_train_epochs": 2,
|
3220 |
"save_steps": 50,
|
3221 |
"stateful_callbacks": {
|
|
|
3230 |
"attributes": {}
|
3231 |
}
|
3232 |
},
|
3233 |
+
"total_flos": 1182185020981248.0,
|
3234 |
"train_batch_size": 1,
|
3235 |
"trial_name": null,
|
3236 |
"trial_params": null
|