mikhail-panzo commited on
Commit
fbaedba
1 Parent(s): c392a26

Training in progress, step 8000, checkpoint

Browse files
last-checkpoint/model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:403cfbffc7ee4007065e4824a75bc209469efb1d3eaf4b78bdcdc4b8e425ae79
3
  size 577789320
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3bc7aeae14173576a0aebd29e684e90ad24f0513c8e659f4277017c800274d58
3
  size 577789320
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:7f82a7c0b927ee4293785e8b97633d22c4746dbad1b0c92376ee3f5578e79116
3
  size 1155772233
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e262c67abf09f51a6988d2779ae3bb916ffc4796bf41409d1a4cf45d6e527474
3
  size 1155772233
last-checkpoint/rng_state.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:795fe76cfd6ac22612e53f90e3708fe6447c8d1c969f78a57978bb17493086b1
3
  size 14244
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b1a0bdb5e52391cb330bbf26b7de42f02e891fbc6b02cd43250006d07b1a1b82
3
  size 14244
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:2ef84ad524da6dbaaadb8576fa258a66ac5d4b080583a172fefc45887ff84f46
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:053e3e9faa016c771f1e612410ff8471cd7862add0ff2bcd59dd7177245c9618
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
  "best_metric": 0.41427454352378845,
3
  "best_model_checkpoint": "mikhail_panzo/ceb_b128_le3_s8000/checkpoint-500",
4
- "epoch": 588.2352941176471,
5
  "eval_steps": 500,
6
- "global_step": 7500,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -1177,6 +1177,84 @@
1177
  "eval_samples_per_second": 26.734,
1178
  "eval_steps_per_second": 3.416,
1179
  "step": 7500
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1180
  }
1181
  ],
1182
  "logging_steps": 50,
@@ -1191,12 +1269,12 @@
1191
  "should_evaluate": false,
1192
  "should_log": false,
1193
  "should_save": true,
1194
- "should_training_stop": false
1195
  },
1196
  "attributes": {}
1197
  }
1198
  },
1199
- "total_flos": 1.631701918768069e+17,
1200
  "train_batch_size": 32,
1201
  "trial_name": null,
1202
  "trial_params": null
 
1
  {
2
  "best_metric": 0.41427454352378845,
3
  "best_model_checkpoint": "mikhail_panzo/ceb_b128_le3_s8000/checkpoint-500",
4
+ "epoch": 627.4509803921569,
5
  "eval_steps": 500,
6
+ "global_step": 8000,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
1177
  "eval_samples_per_second": 26.734,
1178
  "eval_steps_per_second": 3.416,
1179
  "step": 7500
1180
+ },
1181
+ {
1182
+ "epoch": 592.156862745098,
1183
+ "grad_norm": 0.027007540687918663,
1184
+ "learning_rate": 7.533333333333334e-05,
1185
+ "loss": 1.4173,
1186
+ "step": 7550
1187
+ },
1188
+ {
1189
+ "epoch": 596.0784313725491,
1190
+ "grad_norm": 0.04154467582702637,
1191
+ "learning_rate": 6.7e-05,
1192
+ "loss": 1.4186,
1193
+ "step": 7600
1194
+ },
1195
+ {
1196
+ "epoch": 600.0,
1197
+ "grad_norm": 0.10418181121349335,
1198
+ "learning_rate": 5.8666666666666665e-05,
1199
+ "loss": 1.4187,
1200
+ "step": 7650
1201
+ },
1202
+ {
1203
+ "epoch": 603.9215686274509,
1204
+ "grad_norm": 0.041870325803756714,
1205
+ "learning_rate": 5.0333333333333335e-05,
1206
+ "loss": 1.4188,
1207
+ "step": 7700
1208
+ },
1209
+ {
1210
+ "epoch": 607.843137254902,
1211
+ "grad_norm": 0.08349625766277313,
1212
+ "learning_rate": 4.2000000000000004e-05,
1213
+ "loss": 1.4191,
1214
+ "step": 7750
1215
+ },
1216
+ {
1217
+ "epoch": 611.7647058823529,
1218
+ "grad_norm": 0.08984719216823578,
1219
+ "learning_rate": 3.366666666666667e-05,
1220
+ "loss": 1.4174,
1221
+ "step": 7800
1222
+ },
1223
+ {
1224
+ "epoch": 615.6862745098039,
1225
+ "grad_norm": 0.028009561821818352,
1226
+ "learning_rate": 2.5333333333333334e-05,
1227
+ "loss": 1.4203,
1228
+ "step": 7850
1229
+ },
1230
+ {
1231
+ "epoch": 619.6078431372549,
1232
+ "grad_norm": 0.025170741602778435,
1233
+ "learning_rate": 1.7000000000000003e-05,
1234
+ "loss": 1.4183,
1235
+ "step": 7900
1236
+ },
1237
+ {
1238
+ "epoch": 623.5294117647059,
1239
+ "grad_norm": 0.08974526822566986,
1240
+ "learning_rate": 8.666666666666666e-06,
1241
+ "loss": 1.4177,
1242
+ "step": 7950
1243
+ },
1244
+ {
1245
+ "epoch": 627.4509803921569,
1246
+ "grad_norm": 0.023904943838715553,
1247
+ "learning_rate": 3.3333333333333335e-07,
1248
+ "loss": 1.4184,
1249
+ "step": 8000
1250
+ },
1251
+ {
1252
+ "epoch": 627.4509803921569,
1253
+ "eval_loss": 1.4349356889724731,
1254
+ "eval_runtime": 6.7417,
1255
+ "eval_samples_per_second": 26.7,
1256
+ "eval_steps_per_second": 3.412,
1257
+ "step": 8000
1258
  }
1259
  ],
1260
  "logging_steps": 50,
 
1269
  "should_evaluate": false,
1270
  "should_log": false,
1271
  "should_save": true,
1272
+ "should_training_stop": true
1273
  },
1274
  "attributes": {}
1275
  }
1276
  },
1277
+ "total_flos": 1.7404667537759085e+17,
1278
  "train_batch_size": 32,
1279
  "trial_name": null,
1280
  "trial_params": null