mikhail-panzo commited on
Commit
fdbb4a2
1 Parent(s): efd9faf

Training in progress, step 4000, checkpoint

Browse files
last-checkpoint/model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:12ebe9f8ae824e04ec42c21281b6ee77ecf6ec560c2067b986f9a3a7439557b1
3
  size 577789320
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:09f46755ffeadc3d999dd23fd7022daf35bf2fbdd0db7cbfb92677a532f0a38f
3
  size 577789320
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:0ff28dd97c1be0347d1839efcbc7062ce46057ca02707999acb5822aa7d44926
3
  size 1155772233
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4766a7bbe36c3cafb785ca7ab63a31dbb580385e53a5efb78b1082ff7c77f718
3
  size 1155772233
last-checkpoint/rng_state.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:aac92226f0d21eb319c78dbae49877455a40d317910aac9583ab04de32de7d1c
3
  size 14244
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:56cc8e2d77d54e9e58cdbed4bf8357f729e20efe110f0bdfcd24e4976aebb3b0
3
  size 14244
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:1a4b47ac4e852aed8ea089749b4911c7dbd61b1aa03f00c4803df03fc43f36e5
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6f3323bc562f08c84a55f57ffe42cad4e899b9bed240a410d83f006b05ef914d
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
- "best_metric": 0.3431868255138397,
3
- "best_model_checkpoint": "mikhail_panzo/zlm_b128_le4_s8000/checkpoint-3500",
4
- "epoch": 5.863874345549738,
5
  "eval_steps": 500,
6
- "global_step": 3500,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -553,6 +553,84 @@
553
  "eval_samples_per_second": 31.102,
554
  "eval_steps_per_second": 3.891,
555
  "step": 3500
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
556
  }
557
  ],
558
  "logging_steps": 50,
@@ -572,7 +650,7 @@
572
  "attributes": {}
573
  }
574
  },
575
- "total_flos": 6.269975786347891e+16,
576
  "train_batch_size": 16,
577
  "trial_name": null,
578
  "trial_params": null
 
1
  {
2
+ "best_metric": 0.33282962441444397,
3
+ "best_model_checkpoint": "mikhail_panzo/zlm_b128_le4_s8000/checkpoint-4000",
4
+ "epoch": 6.701570680628272,
5
  "eval_steps": 500,
6
+ "global_step": 4000,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
553
  "eval_samples_per_second": 31.102,
554
  "eval_steps_per_second": 3.891,
555
  "step": 3500
556
+ },
557
+ {
558
+ "epoch": 5.947643979057592,
559
+ "grad_norm": 1.914294719696045,
560
+ "learning_rate": 7.42e-05,
561
+ "loss": 0.3673,
562
+ "step": 3550
563
+ },
564
+ {
565
+ "epoch": 6.031413612565445,
566
+ "grad_norm": 1.3186005353927612,
567
+ "learning_rate": 7.336666666666667e-05,
568
+ "loss": 0.3665,
569
+ "step": 3600
570
+ },
571
+ {
572
+ "epoch": 6.115183246073299,
573
+ "grad_norm": 2.019273519515991,
574
+ "learning_rate": 7.253333333333334e-05,
575
+ "loss": 0.3697,
576
+ "step": 3650
577
+ },
578
+ {
579
+ "epoch": 6.198952879581152,
580
+ "grad_norm": 1.0517597198486328,
581
+ "learning_rate": 7.17e-05,
582
+ "loss": 0.3674,
583
+ "step": 3700
584
+ },
585
+ {
586
+ "epoch": 6.282722513089006,
587
+ "grad_norm": 1.0202686786651611,
588
+ "learning_rate": 7.086666666666666e-05,
589
+ "loss": 0.3706,
590
+ "step": 3750
591
+ },
592
+ {
593
+ "epoch": 6.366492146596858,
594
+ "grad_norm": 1.4179818630218506,
595
+ "learning_rate": 7.003333333333335e-05,
596
+ "loss": 0.3681,
597
+ "step": 3800
598
+ },
599
+ {
600
+ "epoch": 6.450261780104712,
601
+ "grad_norm": 1.3820505142211914,
602
+ "learning_rate": 6.92e-05,
603
+ "loss": 0.3671,
604
+ "step": 3850
605
+ },
606
+ {
607
+ "epoch": 6.534031413612565,
608
+ "grad_norm": 1.3857202529907227,
609
+ "learning_rate": 6.836666666666667e-05,
610
+ "loss": 0.3641,
611
+ "step": 3900
612
+ },
613
+ {
614
+ "epoch": 6.617801047120419,
615
+ "grad_norm": 1.0996108055114746,
616
+ "learning_rate": 6.753333333333334e-05,
617
+ "loss": 0.3685,
618
+ "step": 3950
619
+ },
620
+ {
621
+ "epoch": 6.701570680628272,
622
+ "grad_norm": 0.9405946731567383,
623
+ "learning_rate": 6.670000000000001e-05,
624
+ "loss": 0.366,
625
+ "step": 4000
626
+ },
627
+ {
628
+ "epoch": 6.701570680628272,
629
+ "eval_loss": 0.33282962441444397,
630
+ "eval_runtime": 271.6112,
631
+ "eval_samples_per_second": 31.254,
632
+ "eval_steps_per_second": 3.91,
633
+ "step": 4000
634
  }
635
  ],
636
  "logging_steps": 50,
 
650
  "attributes": {}
651
  }
652
  },
653
+ "total_flos": 7.164514367145274e+16,
654
  "train_batch_size": 16,
655
  "trial_name": null,
656
  "trial_params": null