joelniklaus commited on
Commit
70c1a9d
1 Parent(s): 598c023

Training in progress, step 800000

Browse files
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:017878d7c611ba78f49d36fac2d29744f60586a2f3748e728a8daa9a71cdc581
3
  size 3480942553
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7a59e7f6df51e0bac72889672011d5abed4955a7286c0ba1bfc6cbbc6d06b648
3
  size 3480942553
last-checkpoint/pytorch_model.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:39eb0b770ba80ef91093b86ebb60b6ce4aa8879c4b6c52b6fa67d411db4e4956
3
  size 1740493675
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:bf81887de6dde40582dab3038c8fbf1a72819d49641a2f1dd373aaea0579ec65
3
  size 1740493675
last-checkpoint/rng_state_0.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:9e432fc8945094b64cfb44d4ef20ccce8569657d41d36365d65d61ece0bc81dc
3
  size 13611
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2fc69467f39f910ee617c785262f1f30d892abb42f18e232a2331dd23a585486
3
  size 13611
last-checkpoint/rng_state_1.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:9e432fc8945094b64cfb44d4ef20ccce8569657d41d36365d65d61ece0bc81dc
3
  size 13611
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2fc69467f39f910ee617c785262f1f30d892abb42f18e232a2331dd23a585486
3
  size 13611
last-checkpoint/rng_state_2.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:9e432fc8945094b64cfb44d4ef20ccce8569657d41d36365d65d61ece0bc81dc
3
  size 13611
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2fc69467f39f910ee617c785262f1f30d892abb42f18e232a2331dd23a585486
3
  size 13611
last-checkpoint/rng_state_3.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:9e432fc8945094b64cfb44d4ef20ccce8569657d41d36365d65d61ece0bc81dc
3
  size 13611
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2fc69467f39f910ee617c785262f1f30d892abb42f18e232a2331dd23a585486
3
  size 13611
last-checkpoint/rng_state_4.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:9e432fc8945094b64cfb44d4ef20ccce8569657d41d36365d65d61ece0bc81dc
3
  size 13611
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2fc69467f39f910ee617c785262f1f30d892abb42f18e232a2331dd23a585486
3
  size 13611
last-checkpoint/rng_state_5.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:9e432fc8945094b64cfb44d4ef20ccce8569657d41d36365d65d61ece0bc81dc
3
  size 13611
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2fc69467f39f910ee617c785262f1f30d892abb42f18e232a2331dd23a585486
3
  size 13611
last-checkpoint/rng_state_6.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:9e432fc8945094b64cfb44d4ef20ccce8569657d41d36365d65d61ece0bc81dc
3
  size 13611
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2fc69467f39f910ee617c785262f1f30d892abb42f18e232a2331dd23a585486
3
  size 13611
last-checkpoint/rng_state_7.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:9e432fc8945094b64cfb44d4ef20ccce8569657d41d36365d65d61ece0bc81dc
3
  size 13611
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2fc69467f39f910ee617c785262f1f30d892abb42f18e232a2331dd23a585486
3
  size 13611
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:411052d7656a2fbf4baa154bd61bcb86c1d4e17113e6919b82f37e9aff99019f
3
  size 623
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:50e51b9224ded3ddffee57f26ec45414409de0232579ddafb7f3e083076fa4c5
3
  size 623
last-checkpoint/trainer_state.json CHANGED
@@ -1,8 +1,8 @@
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
- "epoch": 0.75,
5
- "global_step": 750000,
6
  "is_hyper_param_search": false,
7
  "is_local_process_zero": true,
8
  "is_world_process_zero": true,
@@ -4626,11 +4626,319 @@
4626
  "eval_samples_per_second": 27.492,
4627
  "eval_steps_per_second": 0.434,
4628
  "step": 750000
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4629
  }
4630
  ],
4631
  "max_steps": 1000000,
4632
  "num_train_epochs": 9223372036854775807,
4633
- "total_flos": 4.4751579512832e+19,
4634
  "trial_name": null,
4635
  "trial_params": null
4636
  }
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
+ "epoch": 0.8,
5
+ "global_step": 800000,
6
  "is_hyper_param_search": false,
7
  "is_local_process_zero": true,
8
  "is_world_process_zero": true,
4626
  "eval_samples_per_second": 27.492,
4627
  "eval_steps_per_second": 0.434,
4628
  "step": 750000
4629
+ },
4630
+ {
4631
+ "epoch": 0.75,
4632
+ "learning_rate": 1.6014457078461353e-05,
4633
+ "loss": 0.753,
4634
+ "step": 751000
4635
+ },
4636
+ {
4637
+ "epoch": 0.75,
4638
+ "learning_rate": 1.5893364398662176e-05,
4639
+ "loss": 0.7844,
4640
+ "step": 752000
4641
+ },
4642
+ {
4643
+ "epoch": 0.75,
4644
+ "learning_rate": 1.5772644703565565e-05,
4645
+ "loss": 0.7481,
4646
+ "step": 753000
4647
+ },
4648
+ {
4649
+ "epoch": 0.75,
4650
+ "learning_rate": 1.5652299313342773e-05,
4651
+ "loss": 0.7615,
4652
+ "step": 754000
4653
+ },
4654
+ {
4655
+ "epoch": 0.76,
4656
+ "learning_rate": 1.553232954407171e-05,
4657
+ "loss": 0.7467,
4658
+ "step": 755000
4659
+ },
4660
+ {
4661
+ "epoch": 0.76,
4662
+ "learning_rate": 1.5412736707722537e-05,
4663
+ "loss": 0.7427,
4664
+ "step": 756000
4665
+ },
4666
+ {
4667
+ "epoch": 0.76,
4668
+ "learning_rate": 1.5293522112143373e-05,
4669
+ "loss": 0.7631,
4670
+ "step": 757000
4671
+ },
4672
+ {
4673
+ "epoch": 0.76,
4674
+ "learning_rate": 1.517468706104589e-05,
4675
+ "loss": 0.7668,
4676
+ "step": 758000
4677
+ },
4678
+ {
4679
+ "epoch": 0.76,
4680
+ "learning_rate": 1.5056232853991209e-05,
4681
+ "loss": 0.7717,
4682
+ "step": 759000
4683
+ },
4684
+ {
4685
+ "epoch": 0.76,
4686
+ "learning_rate": 1.4938160786375572e-05,
4687
+ "loss": 0.7677,
4688
+ "step": 760000
4689
+ },
4690
+ {
4691
+ "epoch": 0.76,
4692
+ "learning_rate": 1.4820472149416154e-05,
4693
+ "loss": 0.7653,
4694
+ "step": 761000
4695
+ },
4696
+ {
4697
+ "epoch": 0.76,
4698
+ "learning_rate": 1.470316823013707e-05,
4699
+ "loss": 0.748,
4700
+ "step": 762000
4701
+ },
4702
+ {
4703
+ "epoch": 0.76,
4704
+ "learning_rate": 1.4586250311355132e-05,
4705
+ "loss": 0.7836,
4706
+ "step": 763000
4707
+ },
4708
+ {
4709
+ "epoch": 0.76,
4710
+ "learning_rate": 1.4469719671666043e-05,
4711
+ "loss": 0.7645,
4712
+ "step": 764000
4713
+ },
4714
+ {
4715
+ "epoch": 0.77,
4716
+ "learning_rate": 1.435357758543015e-05,
4717
+ "loss": 0.7676,
4718
+ "step": 765000
4719
+ },
4720
+ {
4721
+ "epoch": 0.77,
4722
+ "learning_rate": 1.4237825322758736e-05,
4723
+ "loss": 0.7529,
4724
+ "step": 766000
4725
+ },
4726
+ {
4727
+ "epoch": 0.77,
4728
+ "learning_rate": 1.412246414949997e-05,
4729
+ "loss": 0.7637,
4730
+ "step": 767000
4731
+ },
4732
+ {
4733
+ "epoch": 0.77,
4734
+ "learning_rate": 1.4007495327225162e-05,
4735
+ "loss": 0.7537,
4736
+ "step": 768000
4737
+ },
4738
+ {
4739
+ "epoch": 0.77,
4740
+ "learning_rate": 1.389292011321498e-05,
4741
+ "loss": 0.7647,
4742
+ "step": 769000
4743
+ },
4744
+ {
4745
+ "epoch": 0.77,
4746
+ "learning_rate": 1.3778739760445552e-05,
4747
+ "loss": 0.7705,
4748
+ "step": 770000
4749
+ },
4750
+ {
4751
+ "epoch": 0.77,
4752
+ "learning_rate": 1.3664955517574968e-05,
4753
+ "loss": 0.762,
4754
+ "step": 771000
4755
+ },
4756
+ {
4757
+ "epoch": 0.77,
4758
+ "learning_rate": 1.3551568628929434e-05,
4759
+ "loss": 0.7448,
4760
+ "step": 772000
4761
+ },
4762
+ {
4763
+ "epoch": 0.77,
4764
+ "learning_rate": 1.343858033448982e-05,
4765
+ "loss": 0.7486,
4766
+ "step": 773000
4767
+ },
4768
+ {
4769
+ "epoch": 0.77,
4770
+ "learning_rate": 1.3325991869878013e-05,
4771
+ "loss": 0.7718,
4772
+ "step": 774000
4773
+ },
4774
+ {
4775
+ "epoch": 0.78,
4776
+ "learning_rate": 1.3213804466343421e-05,
4777
+ "loss": 0.773,
4778
+ "step": 775000
4779
+ },
4780
+ {
4781
+ "epoch": 0.78,
4782
+ "learning_rate": 1.3102019350749528e-05,
4783
+ "loss": 0.7734,
4784
+ "step": 776000
4785
+ },
4786
+ {
4787
+ "epoch": 0.78,
4788
+ "learning_rate": 1.299063774556042e-05,
4789
+ "loss": 0.7602,
4790
+ "step": 777000
4791
+ },
4792
+ {
4793
+ "epoch": 0.78,
4794
+ "learning_rate": 1.2879660868827508e-05,
4795
+ "loss": 0.7623,
4796
+ "step": 778000
4797
+ },
4798
+ {
4799
+ "epoch": 0.78,
4800
+ "learning_rate": 1.2769089934176126e-05,
4801
+ "loss": 0.7619,
4802
+ "step": 779000
4803
+ },
4804
+ {
4805
+ "epoch": 0.78,
4806
+ "learning_rate": 1.2658926150792322e-05,
4807
+ "loss": 0.7544,
4808
+ "step": 780000
4809
+ },
4810
+ {
4811
+ "epoch": 0.78,
4812
+ "learning_rate": 1.2549170723409549e-05,
4813
+ "loss": 0.758,
4814
+ "step": 781000
4815
+ },
4816
+ {
4817
+ "epoch": 0.78,
4818
+ "learning_rate": 1.243982485229559e-05,
4819
+ "loss": 0.7331,
4820
+ "step": 782000
4821
+ },
4822
+ {
4823
+ "epoch": 0.78,
4824
+ "learning_rate": 1.233088973323937e-05,
4825
+ "loss": 0.7447,
4826
+ "step": 783000
4827
+ },
4828
+ {
4829
+ "epoch": 0.78,
4830
+ "learning_rate": 1.2222366557537911e-05,
4831
+ "loss": 0.7417,
4832
+ "step": 784000
4833
+ },
4834
+ {
4835
+ "epoch": 0.79,
4836
+ "learning_rate": 1.2114256511983274e-05,
4837
+ "loss": 0.7313,
4838
+ "step": 785000
4839
+ },
4840
+ {
4841
+ "epoch": 0.79,
4842
+ "learning_rate": 1.2006560778849578e-05,
4843
+ "loss": 0.7442,
4844
+ "step": 786000
4845
+ },
4846
+ {
4847
+ "epoch": 0.79,
4848
+ "learning_rate": 1.1899280535880119e-05,
4849
+ "loss": 0.7454,
4850
+ "step": 787000
4851
+ },
4852
+ {
4853
+ "epoch": 0.79,
4854
+ "learning_rate": 1.1792416956274444e-05,
4855
+ "loss": 0.7448,
4856
+ "step": 788000
4857
+ },
4858
+ {
4859
+ "epoch": 0.79,
4860
+ "learning_rate": 1.1685971208675539e-05,
4861
+ "loss": 0.7349,
4862
+ "step": 789000
4863
+ },
4864
+ {
4865
+ "epoch": 0.79,
4866
+ "learning_rate": 1.157994445715706e-05,
4867
+ "loss": 0.7434,
4868
+ "step": 790000
4869
+ },
4870
+ {
4871
+ "epoch": 0.79,
4872
+ "learning_rate": 1.1474337861210543e-05,
4873
+ "loss": 0.7769,
4874
+ "step": 791000
4875
+ },
4876
+ {
4877
+ "epoch": 0.79,
4878
+ "learning_rate": 1.1369152575732822e-05,
4879
+ "loss": 0.7681,
4880
+ "step": 792000
4881
+ },
4882
+ {
4883
+ "epoch": 0.79,
4884
+ "learning_rate": 1.1264389751013326e-05,
4885
+ "loss": 0.7697,
4886
+ "step": 793000
4887
+ },
4888
+ {
4889
+ "epoch": 0.79,
4890
+ "learning_rate": 1.1160050532721528e-05,
4891
+ "loss": 0.7367,
4892
+ "step": 794000
4893
+ },
4894
+ {
4895
+ "epoch": 0.8,
4896
+ "learning_rate": 1.1056136061894384e-05,
4897
+ "loss": 0.7663,
4898
+ "step": 795000
4899
+ },
4900
+ {
4901
+ "epoch": 0.8,
4902
+ "learning_rate": 1.095264747492391e-05,
4903
+ "loss": 0.7311,
4904
+ "step": 796000
4905
+ },
4906
+ {
4907
+ "epoch": 0.8,
4908
+ "learning_rate": 1.0849585903544706e-05,
4909
+ "loss": 0.7607,
4910
+ "step": 797000
4911
+ },
4912
+ {
4913
+ "epoch": 0.8,
4914
+ "learning_rate": 1.0746952474821614e-05,
4915
+ "loss": 0.7645,
4916
+ "step": 798000
4917
+ },
4918
+ {
4919
+ "epoch": 0.8,
4920
+ "learning_rate": 1.0644748311137376e-05,
4921
+ "loss": 0.765,
4922
+ "step": 799000
4923
+ },
4924
+ {
4925
+ "epoch": 0.8,
4926
+ "learning_rate": 1.0542974530180327e-05,
4927
+ "loss": 0.7529,
4928
+ "step": 800000
4929
+ },
4930
+ {
4931
+ "epoch": 0.8,
4932
+ "eval_loss": 0.384126216173172,
4933
+ "eval_runtime": 188.1503,
4934
+ "eval_samples_per_second": 26.574,
4935
+ "eval_steps_per_second": 0.42,
4936
+ "step": 800000
4937
  }
4938
  ],
4939
  "max_steps": 1000000,
4940
  "num_train_epochs": 9223372036854775807,
4941
+ "total_flos": 4.77350181470208e+19,
4942
  "trial_name": null,
4943
  "trial_params": null
4944
  }
pytorch_model.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:39eb0b770ba80ef91093b86ebb60b6ce4aa8879c4b6c52b6fa67d411db4e4956
3
  size 1740493675
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:bf81887de6dde40582dab3038c8fbf1a72819d49641a2f1dd373aaea0579ec65
3
  size 1740493675
runs/Mar22_03-02-10_t1v-n-ae339136-w-0/events.out.tfevents.1679454966.t1v-n-ae339136-w-0.10622.0 CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:3c2540c671b3739602ebca9c18ce48494cf726814b5298e2ec76a5f6a5a74070
3
- size 20365
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:02247ce8505148a1eebdf34cd44f6b9840860d3a9395b89c1b99be180a942fb9
3
+ size 28641