mikhail-panzo commited on
Commit
374aed1
1 Parent(s): 43cd754

Training in progress, step 8000, checkpoint

Browse files
last-checkpoint/model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:99876fe6bd351f75a02b3a9808186d1b448bd2491ee23ee0a8aeb43bc170fbd6
3
  size 577789320
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1bd35714ae6834e3a058bdc15b7498341f12549cbde8d04ed3efab8547ebadad
3
  size 577789320
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:a96075133b42f651d7fd0d56298216a77015a7ef39f1a5b980259ccf9be9b410
3
  size 1155772233
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6abb4c00a59e74b942e5f9da52770fc7e6fb415772ab74e2af1e568a7cc45b67
3
  size 1155772233
last-checkpoint/rng_state.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:7aca6dd3493fbba1f0e04ce463672e9c6637c7fdcee4f2b30e09ee0d653c8045
3
  size 14244
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e65d5e866573e78443e1465c9b9c0413095bd120b3384ac5fdd2285abe38197f
3
  size 14244
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:b5cdfe5ed4f14bdadfee62402701e9c3c91a7e1b8246c7c7f0be536b67574fb3
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:797161cb36b8434a2e9b424b1551b1d663e1ac4112ca3f634c3c132aead6996b
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
  "best_metric": 0.39791539311408997,
3
  "best_model_checkpoint": "mikhail-panzo/ceb_b32_le4_s8000/checkpoint-4500",
4
- "epoch": 148.5148514851485,
5
  "eval_steps": 500,
6
- "global_step": 7500,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -1177,6 +1177,84 @@
1177
  "eval_samples_per_second": 22.161,
1178
  "eval_steps_per_second": 2.832,
1179
  "step": 7500
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1180
  }
1181
  ],
1182
  "logging_steps": 50,
@@ -1191,12 +1269,12 @@
1191
  "should_evaluate": false,
1192
  "should_log": false,
1193
  "should_save": true,
1194
- "should_training_stop": false
1195
  },
1196
  "attributes": {}
1197
  }
1198
  },
1199
- "total_flos": 4.051916995267073e+16,
1200
  "train_batch_size": 16,
1201
  "trial_name": null,
1202
  "trial_params": null
 
1
  {
2
  "best_metric": 0.39791539311408997,
3
  "best_model_checkpoint": "mikhail-panzo/ceb_b32_le4_s8000/checkpoint-4500",
4
+ "epoch": 158.41584158415841,
5
  "eval_steps": 500,
6
+ "global_step": 8000,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
1177
  "eval_samples_per_second": 22.161,
1178
  "eval_steps_per_second": 2.832,
1179
  "step": 7500
1180
+ },
1181
+ {
1182
+ "epoch": 149.5049504950495,
1183
+ "grad_norm": 0.6580781936645508,
1184
+ "learning_rate": 7.55e-06,
1185
+ "loss": 0.345,
1186
+ "step": 7550
1187
+ },
1188
+ {
1189
+ "epoch": 150.4950495049505,
1190
+ "grad_norm": 1.1213836669921875,
1191
+ "learning_rate": 6.716666666666667e-06,
1192
+ "loss": 0.3411,
1193
+ "step": 7600
1194
+ },
1195
+ {
1196
+ "epoch": 151.4851485148515,
1197
+ "grad_norm": 0.6676567792892456,
1198
+ "learning_rate": 5.8833333333333335e-06,
1199
+ "loss": 0.3387,
1200
+ "step": 7650
1201
+ },
1202
+ {
1203
+ "epoch": 152.47524752475246,
1204
+ "grad_norm": 0.6813247799873352,
1205
+ "learning_rate": 5.050000000000001e-06,
1206
+ "loss": 0.3416,
1207
+ "step": 7700
1208
+ },
1209
+ {
1210
+ "epoch": 153.46534653465346,
1211
+ "grad_norm": 1.187516212463379,
1212
+ "learning_rate": 4.216666666666666e-06,
1213
+ "loss": 0.3448,
1214
+ "step": 7750
1215
+ },
1216
+ {
1217
+ "epoch": 154.45544554455446,
1218
+ "grad_norm": 0.8873435854911804,
1219
+ "learning_rate": 3.3833333333333337e-06,
1220
+ "loss": 0.3413,
1221
+ "step": 7800
1222
+ },
1223
+ {
1224
+ "epoch": 155.44554455445544,
1225
+ "grad_norm": 0.7723808288574219,
1226
+ "learning_rate": 2.55e-06,
1227
+ "loss": 0.3431,
1228
+ "step": 7850
1229
+ },
1230
+ {
1231
+ "epoch": 156.43564356435644,
1232
+ "grad_norm": 1.762637734413147,
1233
+ "learning_rate": 1.7166666666666668e-06,
1234
+ "loss": 0.3424,
1235
+ "step": 7900
1236
+ },
1237
+ {
1238
+ "epoch": 157.4257425742574,
1239
+ "grad_norm": 0.5451408624649048,
1240
+ "learning_rate": 8.833333333333334e-07,
1241
+ "loss": 0.3369,
1242
+ "step": 7950
1243
+ },
1244
+ {
1245
+ "epoch": 158.41584158415841,
1246
+ "grad_norm": 0.7317779660224915,
1247
+ "learning_rate": 5.0000000000000004e-08,
1248
+ "loss": 0.34,
1249
+ "step": 8000
1250
+ },
1251
+ {
1252
+ "epoch": 158.41584158415841,
1253
+ "eval_loss": 0.3990664780139923,
1254
+ "eval_runtime": 7.7498,
1255
+ "eval_samples_per_second": 23.226,
1256
+ "eval_steps_per_second": 2.968,
1257
+ "step": 8000
1258
  }
1259
  ],
1260
  "logging_steps": 50,
 
1269
  "should_evaluate": false,
1270
  "should_log": false,
1271
  "should_save": true,
1272
+ "should_training_stop": true
1273
  },
1274
  "attributes": {}
1275
  }
1276
  },
1277
+ "total_flos": 4.322114324975938e+16,
1278
  "train_batch_size": 16,
1279
  "trial_name": null,
1280
  "trial_params": null