mikhail-panzo commited on
Commit
f7880a8
1 Parent(s): 13afe9e

Training in progress, step 8000, checkpoint

Browse files
last-checkpoint/model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:61b08b427524e889a0b8d382f9115eba3798ab208df96f84d67a1110d9c104b2
3
  size 577789320
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ca27b7323d4f7628932290ac099ef40f94c68ff7c1e019417d6f64ef02ed8123
3
  size 577789320
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:1d219537847fbd7111b0d0cd3e7da18b22435bb3ca07d6d17301098b460ef335
3
  size 1155772233
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:744dbfefab11895284826e4e6fa4e87d7e7514a9e41fc6d8d3dec1633b5f79e7
3
  size 1155772233
last-checkpoint/rng_state.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:a7fdad45a86febbfbf7416cb2067a77c42d668b5b524ad36010e05bf35427b6e
3
  size 14244
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ece47d351ec8541aeb5c36472c488dc667a04cd8f53d50bdc3cc307262d6739d
3
  size 14244
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:ba9826396d1061aee6fdf7293fd789e47f8a5ac9db78d8396f43ecdcae9b12a2
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f11095f13a6bb8e73004d74d741982d584b499e9ac31a59ea1de9965f3fec76b
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
  "best_metric": 0.40983906388282776,
3
  "best_model_checkpoint": "mikhail-panzo/fil_b64_le5_s8000/checkpoint-5500",
4
- "epoch": 326.0869565217391,
5
  "eval_steps": 500,
6
- "global_step": 7500,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -1177,6 +1177,84 @@
1177
  "eval_samples_per_second": 23.069,
1178
  "eval_steps_per_second": 2.902,
1179
  "step": 7500
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1180
  }
1181
  ],
1182
  "logging_steps": 50,
@@ -1191,12 +1269,12 @@
1191
  "should_evaluate": false,
1192
  "should_log": false,
1193
  "should_save": true,
1194
- "should_training_stop": false
1195
  },
1196
  "attributes": {}
1197
  }
1198
  },
1199
- "total_flos": 1.1135871544625309e+17,
1200
  "train_batch_size": 64,
1201
  "trial_name": null,
1202
  "trial_params": null
 
1
  {
2
  "best_metric": 0.40983906388282776,
3
  "best_model_checkpoint": "mikhail-panzo/fil_b64_le5_s8000/checkpoint-5500",
4
+ "epoch": 347.82608695652175,
5
  "eval_steps": 500,
6
+ "global_step": 8000,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
1177
  "eval_samples_per_second": 23.069,
1178
  "eval_steps_per_second": 2.902,
1179
  "step": 7500
1180
+ },
1181
+ {
1182
+ "epoch": 328.2608695652174,
1183
+ "grad_norm": 0.8036932945251465,
1184
+ "learning_rate": 7.566666666666667e-07,
1185
+ "loss": 0.4167,
1186
+ "step": 7550
1187
+ },
1188
+ {
1189
+ "epoch": 330.4347826086956,
1190
+ "grad_norm": 0.9011595845222473,
1191
+ "learning_rate": 6.733333333333334e-07,
1192
+ "loss": 0.4146,
1193
+ "step": 7600
1194
+ },
1195
+ {
1196
+ "epoch": 332.60869565217394,
1197
+ "grad_norm": 1.1051392555236816,
1198
+ "learning_rate": 5.900000000000001e-07,
1199
+ "loss": 0.414,
1200
+ "step": 7650
1201
+ },
1202
+ {
1203
+ "epoch": 334.7826086956522,
1204
+ "grad_norm": 0.8294230103492737,
1205
+ "learning_rate": 5.066666666666667e-07,
1206
+ "loss": 0.4136,
1207
+ "step": 7700
1208
+ },
1209
+ {
1210
+ "epoch": 336.95652173913044,
1211
+ "grad_norm": 0.8385105729103088,
1212
+ "learning_rate": 4.233333333333334e-07,
1213
+ "loss": 0.4031,
1214
+ "step": 7750
1215
+ },
1216
+ {
1217
+ "epoch": 339.1304347826087,
1218
+ "grad_norm": 1.015714406967163,
1219
+ "learning_rate": 3.4000000000000003e-07,
1220
+ "loss": 0.4013,
1221
+ "step": 7800
1222
+ },
1223
+ {
1224
+ "epoch": 341.30434782608694,
1225
+ "grad_norm": 0.8815127611160278,
1226
+ "learning_rate": 2.566666666666667e-07,
1227
+ "loss": 0.4067,
1228
+ "step": 7850
1229
+ },
1230
+ {
1231
+ "epoch": 343.4782608695652,
1232
+ "grad_norm": 1.0422344207763672,
1233
+ "learning_rate": 1.7333333333333335e-07,
1234
+ "loss": 0.4095,
1235
+ "step": 7900
1236
+ },
1237
+ {
1238
+ "epoch": 345.6521739130435,
1239
+ "grad_norm": 1.6998138427734375,
1240
+ "learning_rate": 9e-08,
1241
+ "loss": 0.4089,
1242
+ "step": 7950
1243
+ },
1244
+ {
1245
+ "epoch": 347.82608695652175,
1246
+ "grad_norm": 0.9665780067443848,
1247
+ "learning_rate": 6.666666666666667e-09,
1248
+ "loss": 0.4097,
1249
+ "step": 8000
1250
+ },
1251
+ {
1252
+ "epoch": 347.82608695652175,
1253
+ "eval_loss": 0.41188350319862366,
1254
+ "eval_runtime": 6.8472,
1255
+ "eval_samples_per_second": 23.221,
1256
+ "eval_steps_per_second": 2.921,
1257
+ "step": 8000
1258
  }
1259
  ],
1260
  "logging_steps": 50,
 
1269
  "should_evaluate": false,
1270
  "should_log": false,
1271
  "should_save": true,
1272
+ "should_training_stop": true
1273
  },
1274
  "attributes": {}
1275
  }
1276
  },
1277
+ "total_flos": 1.187693654068754e+17,
1278
  "train_batch_size": 64,
1279
  "trial_name": null,
1280
  "trial_params": null