mikhail-panzo commited on
Commit
c073d2a
·
verified ·
1 Parent(s): 945f02b

Training in progress, step 9000, checkpoint

Browse files
last-checkpoint/model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:e0f2409f7977c0c11070e7847ab6d58e94975afadfbf2a12bd0548fcd7859a55
3
  size 577789320
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9107c0ff4e2763513ec5bf8850c4b4e538db65052ee12ccd65d557cafb1e34b8
3
  size 577789320
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:4133e095e6d740409db22a76e80e3b6a7fa538d8c6ba6fe7edb1a0ede80eaf60
3
  size 1155772233
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c1e28ceda6028390e61736b0495cead414f93a42f97c1b979bdc56131341a08c
3
  size 1155772233
last-checkpoint/rng_state.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:517c8b809d6792d05ae4c9199141efb435941aafdeb5561eaa907592337ad507
3
  size 14244
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:12b7a8f09422f4c6b082af7bb144b5b0532b4a256c4619f625ac9a304c7ea0aa
3
  size 14244
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:5efdf488bd79f4078dcc22ed21cb113945810568eae9749dc0cb12dc690695b7
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4f334224cb0aa3586560a9532b53aefd38c1d84e10d9b30ca047caee043ff116
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
- "best_metric": 0.310507208108902,
3
- "best_model_checkpoint": "mikhail_panzo/zlm_b128_le4_s8000/checkpoint-8500",
4
- "epoch": 14.24083769633508,
5
  "eval_steps": 500,
6
- "global_step": 8500,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -1333,6 +1333,84 @@
1333
  "eval_samples_per_second": 30.365,
1334
  "eval_steps_per_second": 3.799,
1335
  "step": 8500
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1336
  }
1337
  ],
1338
  "logging_steps": 50,
@@ -1352,7 +1430,7 @@
1352
  "attributes": {}
1353
  }
1354
  },
1355
- "total_flos": 1.522865163316464e+17,
1356
  "train_batch_size": 16,
1357
  "trial_name": null,
1358
  "trial_params": null
 
1
  {
2
+ "best_metric": 0.31039854884147644,
3
+ "best_model_checkpoint": "mikhail_panzo/zlm_b128_le4_s8000/checkpoint-9000",
4
+ "epoch": 15.078534031413612,
5
  "eval_steps": 500,
6
+ "global_step": 9000,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
1333
  "eval_samples_per_second": 30.365,
1334
  "eval_steps_per_second": 3.799,
1335
  "step": 8500
1336
+ },
1337
+ {
1338
+ "epoch": 14.324607329842932,
1339
+ "grad_norm": 2.537041425704956,
1340
+ "learning_rate": 3.453e-05,
1341
+ "loss": 0.3418,
1342
+ "step": 8550
1343
+ },
1344
+ {
1345
+ "epoch": 14.408376963350785,
1346
+ "grad_norm": 1.3357998132705688,
1347
+ "learning_rate": 3.403e-05,
1348
+ "loss": 0.3408,
1349
+ "step": 8600
1350
+ },
1351
+ {
1352
+ "epoch": 14.492146596858639,
1353
+ "grad_norm": 0.8550173044204712,
1354
+ "learning_rate": 3.353e-05,
1355
+ "loss": 0.3408,
1356
+ "step": 8650
1357
+ },
1358
+ {
1359
+ "epoch": 14.575916230366492,
1360
+ "grad_norm": 1.4455218315124512,
1361
+ "learning_rate": 3.303e-05,
1362
+ "loss": 0.3407,
1363
+ "step": 8700
1364
+ },
1365
+ {
1366
+ "epoch": 14.659685863874346,
1367
+ "grad_norm": 1.0547473430633545,
1368
+ "learning_rate": 3.253e-05,
1369
+ "loss": 0.3382,
1370
+ "step": 8750
1371
+ },
1372
+ {
1373
+ "epoch": 14.743455497382199,
1374
+ "grad_norm": 1.5398694276809692,
1375
+ "learning_rate": 3.2029999999999997e-05,
1376
+ "loss": 0.3402,
1377
+ "step": 8800
1378
+ },
1379
+ {
1380
+ "epoch": 14.827225130890053,
1381
+ "grad_norm": 1.008465051651001,
1382
+ "learning_rate": 3.1530000000000005e-05,
1383
+ "loss": 0.3433,
1384
+ "step": 8850
1385
+ },
1386
+ {
1387
+ "epoch": 14.910994764397905,
1388
+ "grad_norm": 1.8319462537765503,
1389
+ "learning_rate": 3.1030000000000006e-05,
1390
+ "loss": 0.341,
1391
+ "step": 8900
1392
+ },
1393
+ {
1394
+ "epoch": 14.99476439790576,
1395
+ "grad_norm": 1.1432167291641235,
1396
+ "learning_rate": 3.053e-05,
1397
+ "loss": 0.3369,
1398
+ "step": 8950
1399
+ },
1400
+ {
1401
+ "epoch": 15.078534031413612,
1402
+ "grad_norm": 1.098186731338501,
1403
+ "learning_rate": 3.0030000000000002e-05,
1404
+ "loss": 0.3396,
1405
+ "step": 9000
1406
+ },
1407
+ {
1408
+ "epoch": 15.078534031413612,
1409
+ "eval_loss": 0.31039854884147644,
1410
+ "eval_runtime": 280.3967,
1411
+ "eval_samples_per_second": 30.275,
1412
+ "eval_steps_per_second": 3.787,
1413
+ "step": 9000
1414
  }
1415
  ],
1416
  "logging_steps": 50,
 
1430
  "attributes": {}
1431
  }
1432
  },
1433
+ "total_flos": 1.6124734632740026e+17,
1434
  "train_batch_size": 16,
1435
  "trial_name": null,
1436
  "trial_params": null