mikhail-panzo commited on
Commit
3ffba78
1 Parent(s): d1dc24e

Training in progress, step 8000, checkpoint

Browse files
last-checkpoint/model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:d78442b4e54c305874c81e5bbdf0ba83af286871119ec98549317a5f4613ce1e
3
  size 577789320
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:14347b694edbac48f71cf8408ac46afe84e4750316570c1ed940bf72283d84fe
3
  size 577789320
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:0ddfa344f1d95e08566566e266a49ffbeb8411a7a9f2ace99274a0aaeec28e86
3
  size 1155772233
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:424e6a391353585b51b6aa2f3d9a51faefe01f643f861e61d4f4255d4be74b1e
3
  size 1155772233
last-checkpoint/rng_state.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:7aca6dd3493fbba1f0e04ce463672e9c6637c7fdcee4f2b30e09ee0d653c8045
3
  size 14244
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e65d5e866573e78443e1465c9b9c0413095bd120b3384ac5fdd2285abe38197f
3
  size 14244
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:ba9826396d1061aee6fdf7293fd789e47f8a5ac9db78d8396f43ecdcae9b12a2
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f11095f13a6bb8e73004d74d741982d584b499e9ac31a59ea1de9965f3fec76b
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
  "best_metric": 0.3917270302772522,
3
  "best_model_checkpoint": "mikhail-panzo/ceb_b32_le5_s8000/checkpoint-6500",
4
- "epoch": 148.5148514851485,
5
  "eval_steps": 500,
6
- "global_step": 7500,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -1177,6 +1177,84 @@
1177
  "eval_samples_per_second": 20.184,
1178
  "eval_steps_per_second": 2.579,
1179
  "step": 7500
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1180
  }
1181
  ],
1182
  "logging_steps": 50,
@@ -1191,12 +1269,12 @@
1191
  "should_evaluate": false,
1192
  "should_log": false,
1193
  "should_save": true,
1194
- "should_training_stop": false
1195
  },
1196
  "attributes": {}
1197
  }
1198
  },
1199
- "total_flos": 4.051916995267073e+16,
1200
  "train_batch_size": 16,
1201
  "trial_name": null,
1202
  "trial_params": null
 
1
  {
2
  "best_metric": 0.3917270302772522,
3
  "best_model_checkpoint": "mikhail-panzo/ceb_b32_le5_s8000/checkpoint-6500",
4
+ "epoch": 158.41584158415841,
5
  "eval_steps": 500,
6
+ "global_step": 8000,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
1177
  "eval_samples_per_second": 20.184,
1178
  "eval_steps_per_second": 2.579,
1179
  "step": 7500
1180
+ },
1181
+ {
1182
+ "epoch": 149.5049504950495,
1183
+ "grad_norm": 1.1068042516708374,
1184
+ "learning_rate": 7.566666666666667e-07,
1185
+ "loss": 0.4029,
1186
+ "step": 7550
1187
+ },
1188
+ {
1189
+ "epoch": 150.4950495049505,
1190
+ "grad_norm": 1.2785730361938477,
1191
+ "learning_rate": 6.733333333333334e-07,
1192
+ "loss": 0.4001,
1193
+ "step": 7600
1194
+ },
1195
+ {
1196
+ "epoch": 151.4851485148515,
1197
+ "grad_norm": 1.0063281059265137,
1198
+ "learning_rate": 5.900000000000001e-07,
1199
+ "loss": 0.3977,
1200
+ "step": 7650
1201
+ },
1202
+ {
1203
+ "epoch": 152.47524752475246,
1204
+ "grad_norm": 1.1545789241790771,
1205
+ "learning_rate": 5.066666666666667e-07,
1206
+ "loss": 0.4015,
1207
+ "step": 7700
1208
+ },
1209
+ {
1210
+ "epoch": 153.46534653465346,
1211
+ "grad_norm": 1.0624966621398926,
1212
+ "learning_rate": 4.233333333333334e-07,
1213
+ "loss": 0.4044,
1214
+ "step": 7750
1215
+ },
1216
+ {
1217
+ "epoch": 154.45544554455446,
1218
+ "grad_norm": 1.3898247480392456,
1219
+ "learning_rate": 3.4000000000000003e-07,
1220
+ "loss": 0.4005,
1221
+ "step": 7800
1222
+ },
1223
+ {
1224
+ "epoch": 155.44554455445544,
1225
+ "grad_norm": 1.2618342638015747,
1226
+ "learning_rate": 2.566666666666667e-07,
1227
+ "loss": 0.4025,
1228
+ "step": 7850
1229
+ },
1230
+ {
1231
+ "epoch": 156.43564356435644,
1232
+ "grad_norm": 1.0798155069351196,
1233
+ "learning_rate": 1.7333333333333335e-07,
1234
+ "loss": 0.4015,
1235
+ "step": 7900
1236
+ },
1237
+ {
1238
+ "epoch": 157.4257425742574,
1239
+ "grad_norm": 1.0550168752670288,
1240
+ "learning_rate": 9e-08,
1241
+ "loss": 0.3952,
1242
+ "step": 7950
1243
+ },
1244
+ {
1245
+ "epoch": 158.41584158415841,
1246
+ "grad_norm": 1.0714497566223145,
1247
+ "learning_rate": 6.666666666666667e-09,
1248
+ "loss": 0.3989,
1249
+ "step": 8000
1250
+ },
1251
+ {
1252
+ "epoch": 158.41584158415841,
1253
+ "eval_loss": 0.39282524585723877,
1254
+ "eval_runtime": 8.2483,
1255
+ "eval_samples_per_second": 21.823,
1256
+ "eval_steps_per_second": 2.788,
1257
+ "step": 8000
1258
  }
1259
  ],
1260
  "logging_steps": 50,
 
1269
  "should_evaluate": false,
1270
  "should_log": false,
1271
  "should_save": true,
1272
+ "should_training_stop": true
1273
  },
1274
  "attributes": {}
1275
  }
1276
  },
1277
+ "total_flos": 4.322114324975938e+16,
1278
  "train_batch_size": 16,
1279
  "trial_name": null,
1280
  "trial_params": null