mikhail-panzo commited on
Commit
10d381d
1 Parent(s): 9dfdfcd

Training in progress, step 2000, checkpoint

Browse files
last-checkpoint/model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:bb0a458b340d24f70fc214681d5c5119f47646e9547550f4a52c22623cc545e0
3
  size 577789320
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:543ba4376065b1459d1d1e92a4d4912f7658d805e431dda1e8d7076b636b8dec
3
  size 577789320
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:3e145b91418be3b833420a93109497d7460e2e0c60c30711829d5ecce85f1097
3
  size 1155772233
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c5e89b6713085fb5d6ebc9f3653b88f811d1b5ec815e6e181c34a162bcbe7c59
3
  size 1155772233
last-checkpoint/rng_state.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:dc65651f7b378ef8fe4da6f30ced75150f8b4133aff002554882ce4b7f48b94e
3
  size 14244
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2f01983801a45234478c13db780514a852cdeaff2aa79f279442e47dc68cb11d
3
  size 14244
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:f32a07717606d664792d4ebfd434223710fe948a637ff5f34234da98aa96ac43
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b911007def71d4e0760e095fb38b9695588c94c6dfc0cb1ee70d933b50c5ece7
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
- "best_metric": 0.44668668508529663,
3
- "best_model_checkpoint": "mikhail-panzo/zlm_b128_le5_s8000/checkpoint-1500",
4
- "epoch": 2.513089005235602,
5
  "eval_steps": 500,
6
- "global_step": 1500,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -241,6 +241,84 @@
241
  "eval_samples_per_second": 33.638,
242
  "eval_steps_per_second": 4.208,
243
  "step": 1500
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
244
  }
245
  ],
246
  "logging_steps": 50,
@@ -260,7 +338,7 @@
260
  "attributes": {}
261
  }
262
  },
263
- "total_flos": 2.686672014814656e+16,
264
  "train_batch_size": 16,
265
  "trial_name": null,
266
  "trial_params": null
 
1
  {
2
+ "best_metric": 0.4236186444759369,
3
+ "best_model_checkpoint": "mikhail-panzo/zlm_b128_le5_s8000/checkpoint-2000",
4
+ "epoch": 3.350785340314136,
5
  "eval_steps": 500,
6
+ "global_step": 2000,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
241
  "eval_samples_per_second": 33.638,
242
  "eval_steps_per_second": 4.208,
243
  "step": 1500
244
+ },
245
+ {
246
+ "epoch": 2.5968586387434556,
247
+ "grad_norm": 1.6376454830169678,
248
+ "learning_rate": 7.745e-06,
249
+ "loss": 0.503,
250
+ "step": 1550
251
+ },
252
+ {
253
+ "epoch": 2.680628272251309,
254
+ "grad_norm": 1.7104960680007935,
255
+ "learning_rate": 7.995e-06,
256
+ "loss": 0.4988,
257
+ "step": 1600
258
+ },
259
+ {
260
+ "epoch": 2.7643979057591626,
261
+ "grad_norm": 2.3372185230255127,
262
+ "learning_rate": 8.245000000000002e-06,
263
+ "loss": 0.4954,
264
+ "step": 1650
265
+ },
266
+ {
267
+ "epoch": 2.8481675392670156,
268
+ "grad_norm": 2.170474052429199,
269
+ "learning_rate": 8.495e-06,
270
+ "loss": 0.4971,
271
+ "step": 1700
272
+ },
273
+ {
274
+ "epoch": 2.931937172774869,
275
+ "grad_norm": 2.3041465282440186,
276
+ "learning_rate": 8.745000000000002e-06,
277
+ "loss": 0.4948,
278
+ "step": 1750
279
+ },
280
+ {
281
+ "epoch": 3.0157068062827226,
282
+ "grad_norm": 2.1798529624938965,
283
+ "learning_rate": 8.995000000000001e-06,
284
+ "loss": 0.4849,
285
+ "step": 1800
286
+ },
287
+ {
288
+ "epoch": 3.099476439790576,
289
+ "grad_norm": 1.8813133239746094,
290
+ "learning_rate": 9.245e-06,
291
+ "loss": 0.4839,
292
+ "step": 1850
293
+ },
294
+ {
295
+ "epoch": 3.183246073298429,
296
+ "grad_norm": 1.8749632835388184,
297
+ "learning_rate": 9.49e-06,
298
+ "loss": 0.4871,
299
+ "step": 1900
300
+ },
301
+ {
302
+ "epoch": 3.2670157068062826,
303
+ "grad_norm": 1.917149305343628,
304
+ "learning_rate": 9.74e-06,
305
+ "loss": 0.4724,
306
+ "step": 1950
307
+ },
308
+ {
309
+ "epoch": 3.350785340314136,
310
+ "grad_norm": 1.9401865005493164,
311
+ "learning_rate": 9.990000000000001e-06,
312
+ "loss": 0.4776,
313
+ "step": 2000
314
+ },
315
+ {
316
+ "epoch": 3.350785340314136,
317
+ "eval_loss": 0.4236186444759369,
318
+ "eval_runtime": 261.4044,
319
+ "eval_samples_per_second": 32.475,
320
+ "eval_steps_per_second": 4.063,
321
+ "step": 2000
322
  }
323
  ],
324
  "logging_steps": 50,
 
338
  "attributes": {}
339
  }
340
  },
341
+ "total_flos": 3.5804068604023104e+16,
342
  "train_batch_size": 16,
343
  "trial_name": null,
344
  "trial_params": null