ben81828 commited on
Commit
7c431c8
·
verified ·
1 Parent(s): be581ca

Training in progress, step 650, checkpoint

Browse files
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:69777f5aa25eff25d556b3b21e25927428c8db95972e0d7a65589f133ec91630
3
  size 29034840
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ecbf7b990542d94deda7efbddabf6a00086c684dd76c82c04c38e95dfe58bb1c
3
  size 29034840
last-checkpoint/global_step650/bf16_zero_pp_rank_0_mp_rank_00_optim_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:39f40406c716e9f48cdbc8bcad8392e96933e35472630a456d462ad0578739ae
3
+ size 43429616
last-checkpoint/global_step650/bf16_zero_pp_rank_1_mp_rank_00_optim_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d378a27cd84649fd79dc7860b70f7326c0e673b75939a66bed1b0a3d892c1736
3
+ size 43429616
last-checkpoint/global_step650/bf16_zero_pp_rank_2_mp_rank_00_optim_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:414299a9e9f35fb97164d31510731beabc2ed7f4d100a93dfc2dece42aac6d6c
3
+ size 43429616
last-checkpoint/global_step650/bf16_zero_pp_rank_3_mp_rank_00_optim_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2b8caf1bba5174ffaead850fcafb28d3c77b4bc1c28f74c4f87468e8afbd616b
3
+ size 43429616
last-checkpoint/global_step650/zero_pp_rank_0_mp_rank_00_model_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6f9450a0ced61d6c33a66cdf1b2511022f0ec24416b17e1f16ed49dc98ff62dd
3
+ size 637299
last-checkpoint/global_step650/zero_pp_rank_1_mp_rank_00_model_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d01b9093c4ff061c6453535b14beda5a9d96a531472a58effac685152d63de95
3
+ size 637171
last-checkpoint/global_step650/zero_pp_rank_2_mp_rank_00_model_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1a3bfb0ef98b44969e473f6d5c11a92b5a607c48dc2a278b5d72591611de4079
3
+ size 637171
last-checkpoint/global_step650/zero_pp_rank_3_mp_rank_00_model_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a41c42f45b9260afbaae285e2a90de1413db2108ea931952bc69942ea6f19e1e
3
+ size 637171
last-checkpoint/latest CHANGED
@@ -1 +1 @@
1
- global_step600
 
1
+ global_step650
last-checkpoint/rng_state_0.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:a81e3916b1392c4c49afb171dee5415c15f5a5a5af8749b28195fcfa0596699c
3
  size 15024
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8044e4c53158c210a17648ba8f2dc2d25a25bbfc55f686015542618eb652a33e
3
  size 15024
last-checkpoint/rng_state_1.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:9a781038dd714b87b8adb1aac8dbc8217ceb607428a992133954ad522365236e
3
  size 15024
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4cd85d7fa425e7888c973f1c2985ac15ca21b5e6171fe140a401c2bc75ca46ff
3
  size 15024
last-checkpoint/rng_state_2.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:9446c3db15f382a5546f13622787fc99392a5e0bc8a9ca2da1838de7ab621a37
3
  size 15024
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d7915667371a58f1598639e0d1c20a0c59c783c14580cd040a6631eb4ea2311e
3
  size 15024
last-checkpoint/rng_state_3.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:1f11e7a6b3faa884fc23044e3772ff9dd72c257f02e121665061e2a03d518bd9
3
  size 15024
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:35dd78929ad7f0fbf37fdb1284e8edf0424350f6e6ce1cd5a3ee78979af3d3cb
3
  size 15024
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:f84ae20af7f03e6aebf5ce5f2f22e99f298d925ddedf6c1ee5005301cfd997b7
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:dc96af5ecadce63a7794ae26671a9f92a73593e4cf6d844f16daae8f01c69890
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
  "best_metric": 0.5883122682571411,
3
  "best_model_checkpoint": "saves/CADICA_qwenvl_direction_then_DetectAndClassify_scale6/lora/sft/checkpoint-600",
4
- "epoch": 0.17725258493353027,
5
  "eval_steps": 50,
6
- "global_step": 600,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -1075,11 +1075,100 @@
1075
  "eval_steps_per_second": 0.789,
1076
  "num_input_tokens_seen": 6222736,
1077
  "step": 600
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1078
  }
1079
  ],
1080
  "logging_steps": 5,
1081
  "max_steps": 6770,
1082
- "num_input_tokens_seen": 6222736,
1083
  "num_train_epochs": 2,
1084
  "save_steps": 50,
1085
  "stateful_callbacks": {
@@ -1094,7 +1183,7 @@
1094
  "attributes": {}
1095
  }
1096
  },
1097
- "total_flos": 410420689764352.0,
1098
  "train_batch_size": 1,
1099
  "trial_name": null,
1100
  "trial_params": null
 
1
  {
2
  "best_metric": 0.5883122682571411,
3
  "best_model_checkpoint": "saves/CADICA_qwenvl_direction_then_DetectAndClassify_scale6/lora/sft/checkpoint-600",
4
+ "epoch": 0.19202363367799113,
5
  "eval_steps": 50,
6
+ "global_step": 650,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
1075
  "eval_steps_per_second": 0.789,
1076
  "num_input_tokens_seen": 6222736,
1077
  "step": 600
1078
+ },
1079
+ {
1080
+ "epoch": 0.17872968980797638,
1081
+ "grad_norm": 8.081060384434974,
1082
+ "learning_rate": 9.957846384806636e-05,
1083
+ "loss": 0.5678,
1084
+ "num_input_tokens_seen": 6274328,
1085
+ "step": 605
1086
+ },
1087
+ {
1088
+ "epoch": 0.18020679468242246,
1089
+ "grad_norm": 10.520198943062466,
1090
+ "learning_rate": 9.956249105263121e-05,
1091
+ "loss": 0.5609,
1092
+ "num_input_tokens_seen": 6327088,
1093
+ "step": 610
1094
+ },
1095
+ {
1096
+ "epoch": 0.18168389955686853,
1097
+ "grad_norm": 5.336067400981417,
1098
+ "learning_rate": 9.95462225678715e-05,
1099
+ "loss": 0.5177,
1100
+ "num_input_tokens_seen": 6378824,
1101
+ "step": 615
1102
+ },
1103
+ {
1104
+ "epoch": 0.1831610044313146,
1105
+ "grad_norm": 5.263245734989025,
1106
+ "learning_rate": 9.952965849084483e-05,
1107
+ "loss": 0.5839,
1108
+ "num_input_tokens_seen": 6431024,
1109
+ "step": 620
1110
+ },
1111
+ {
1112
+ "epoch": 0.18463810930576072,
1113
+ "grad_norm": 5.175847441048381,
1114
+ "learning_rate": 9.951279892037233e-05,
1115
+ "loss": 0.5069,
1116
+ "num_input_tokens_seen": 6483072,
1117
+ "step": 625
1118
+ },
1119
+ {
1120
+ "epoch": 0.1861152141802068,
1121
+ "grad_norm": 12.247546396996816,
1122
+ "learning_rate": 9.949564395703803e-05,
1123
+ "loss": 0.495,
1124
+ "num_input_tokens_seen": 6534768,
1125
+ "step": 630
1126
+ },
1127
+ {
1128
+ "epoch": 0.18759231905465287,
1129
+ "grad_norm": 8.126956720775665,
1130
+ "learning_rate": 9.947819370318825e-05,
1131
+ "loss": 0.6435,
1132
+ "num_input_tokens_seen": 6586416,
1133
+ "step": 635
1134
+ },
1135
+ {
1136
+ "epoch": 0.18906942392909898,
1137
+ "grad_norm": 9.112136009018696,
1138
+ "learning_rate": 9.946044826293106e-05,
1139
+ "loss": 0.5014,
1140
+ "num_input_tokens_seen": 6638592,
1141
+ "step": 640
1142
+ },
1143
+ {
1144
+ "epoch": 0.19054652880354506,
1145
+ "grad_norm": 7.086235271485555,
1146
+ "learning_rate": 9.944240774213556e-05,
1147
+ "loss": 0.529,
1148
+ "num_input_tokens_seen": 6689920,
1149
+ "step": 645
1150
+ },
1151
+ {
1152
+ "epoch": 0.19202363367799113,
1153
+ "grad_norm": 21.538813510868643,
1154
+ "learning_rate": 9.942407224843132e-05,
1155
+ "loss": 0.5483,
1156
+ "num_input_tokens_seen": 6743120,
1157
+ "step": 650
1158
+ },
1159
+ {
1160
+ "epoch": 0.19202363367799113,
1161
+ "eval_loss": 0.6100574135780334,
1162
+ "eval_runtime": 18.9585,
1163
+ "eval_samples_per_second": 3.165,
1164
+ "eval_steps_per_second": 0.791,
1165
+ "num_input_tokens_seen": 6743120,
1166
+ "step": 650
1167
  }
1168
  ],
1169
  "logging_steps": 5,
1170
  "max_steps": 6770,
1171
+ "num_input_tokens_seen": 6743120,
1172
  "num_train_epochs": 2,
1173
  "save_steps": 50,
1174
  "stateful_callbacks": {
 
1183
  "attributes": {}
1184
  }
1185
  },
1186
+ "total_flos": 444771194634240.0,
1187
  "train_batch_size": 1,
1188
  "trial_name": null,
1189
  "trial_params": null