joelniklaus commited on
Commit
38f12ec
1 Parent(s): 4872d96

Training in progress, step 650000

Browse files
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:69b6701bd22ffdc878411bf1dc4db5ba25bd2d3977ba5bfb9c755b5ccc760666
3
  size 885325017
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7b936634e1dfc46ccbf5d05442acdf50be4c41a28a88b6765a1eef45e08b8630
3
  size 885325017
last-checkpoint/pytorch_model.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:cd75095c4597acd6406bda79e55f3e47837f8f338b561712587309fcdb721a0b
3
  size 442675755
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ab1ec67e0f440b016179e8869a41e8d020330d2dd0454d4d4861673b4f892817
3
  size 442675755
last-checkpoint/rng_state_0.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:cc2331d91bd1c63f909214b2a204ed4a77d034f703611a4b0d2b658b9d565d4a
3
  size 13611
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:746e89f09f32dffb623a925841ddd5ad900b2452171cd343a4f6909ee43e4b5c
3
  size 13611
last-checkpoint/rng_state_1.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:cc2331d91bd1c63f909214b2a204ed4a77d034f703611a4b0d2b658b9d565d4a
3
  size 13611
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:746e89f09f32dffb623a925841ddd5ad900b2452171cd343a4f6909ee43e4b5c
3
  size 13611
last-checkpoint/rng_state_2.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:cc2331d91bd1c63f909214b2a204ed4a77d034f703611a4b0d2b658b9d565d4a
3
  size 13611
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:746e89f09f32dffb623a925841ddd5ad900b2452171cd343a4f6909ee43e4b5c
3
  size 13611
last-checkpoint/rng_state_3.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:cc2331d91bd1c63f909214b2a204ed4a77d034f703611a4b0d2b658b9d565d4a
3
  size 13611
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:746e89f09f32dffb623a925841ddd5ad900b2452171cd343a4f6909ee43e4b5c
3
  size 13611
last-checkpoint/rng_state_4.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:cc2331d91bd1c63f909214b2a204ed4a77d034f703611a4b0d2b658b9d565d4a
3
  size 13611
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:746e89f09f32dffb623a925841ddd5ad900b2452171cd343a4f6909ee43e4b5c
3
  size 13611
last-checkpoint/rng_state_5.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:cc2331d91bd1c63f909214b2a204ed4a77d034f703611a4b0d2b658b9d565d4a
3
  size 13611
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:746e89f09f32dffb623a925841ddd5ad900b2452171cd343a4f6909ee43e4b5c
3
  size 13611
last-checkpoint/rng_state_6.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:cc2331d91bd1c63f909214b2a204ed4a77d034f703611a4b0d2b658b9d565d4a
3
  size 13611
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:746e89f09f32dffb623a925841ddd5ad900b2452171cd343a4f6909ee43e4b5c
3
  size 13611
last-checkpoint/rng_state_7.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:cc2331d91bd1c63f909214b2a204ed4a77d034f703611a4b0d2b658b9d565d4a
3
  size 13611
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:746e89f09f32dffb623a925841ddd5ad900b2452171cd343a4f6909ee43e4b5c
3
  size 13611
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:f595742cd0d96240559aaf1ff72fa8686f62da9f07c5878ab2af30ab1e4f0a07
3
  size 623
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9d7fa20411577666fac76fe76348b4f9231439cc2e524d6e3185910c258591e9
3
  size 623
last-checkpoint/trainer_state.json CHANGED
@@ -1,8 +1,8 @@
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
- "epoch": 4.015832,
5
- "global_step": 600000,
6
  "is_hyper_param_search": false,
7
  "is_local_process_zero": true,
8
  "is_world_process_zero": true,
@@ -3702,11 +3702,319 @@
3702
  "eval_samples_per_second": 457.799,
3703
  "eval_steps_per_second": 3.662,
3704
  "step": 600000
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3705
  }
3706
  ],
3707
  "max_steps": 1000000,
3708
  "num_train_epochs": 9223372036854775807,
3709
- "total_flos": 1.0107289699690217e+19,
3710
  "trial_name": null,
3711
  "trial_params": null
3712
  }
 
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
+ "epoch": 4.065832,
5
+ "global_step": 650000,
6
  "is_hyper_param_search": false,
7
  "is_local_process_zero": true,
8
  "is_world_process_zero": true,
 
3702
  "eval_samples_per_second": 457.799,
3703
  "eval_steps_per_second": 3.662,
3704
  "step": 600000
3705
+ },
3706
+ {
3707
+ "epoch": 4.02,
3708
+ "learning_rate": 3.756550564175727e-05,
3709
+ "loss": 0.6136,
3710
+ "step": 601000
3711
+ },
3712
+ {
3713
+ "epoch": 4.02,
3714
+ "learning_rate": 3.74054216221926e-05,
3715
+ "loss": 0.6637,
3716
+ "step": 602000
3717
+ },
3718
+ {
3719
+ "epoch": 4.02,
3720
+ "learning_rate": 3.7245475334919246e-05,
3721
+ "loss": 0.7611,
3722
+ "step": 603000
3723
+ },
3724
+ {
3725
+ "epoch": 4.02,
3726
+ "learning_rate": 3.7085668529084184e-05,
3727
+ "loss": 0.7121,
3728
+ "step": 604000
3729
+ },
3730
+ {
3731
+ "epoch": 4.02,
3732
+ "learning_rate": 3.6926002952309016e-05,
3733
+ "loss": 0.6331,
3734
+ "step": 605000
3735
+ },
3736
+ {
3737
+ "epoch": 4.02,
3738
+ "learning_rate": 3.676648035067093e-05,
3739
+ "loss": 0.595,
3740
+ "step": 606000
3741
+ },
3742
+ {
3743
+ "epoch": 4.02,
3744
+ "learning_rate": 3.6607102468683526e-05,
3745
+ "loss": 0.6658,
3746
+ "step": 607000
3747
+ },
3748
+ {
3749
+ "epoch": 4.02,
3750
+ "learning_rate": 3.6447871049277796e-05,
3751
+ "loss": 0.7491,
3752
+ "step": 608000
3753
+ },
3754
+ {
3755
+ "epoch": 4.02,
3756
+ "learning_rate": 3.628878783378302e-05,
3757
+ "loss": 0.7413,
3758
+ "step": 609000
3759
+ },
3760
+ {
3761
+ "epoch": 4.03,
3762
+ "learning_rate": 3.612985456190778e-05,
3763
+ "loss": 0.6191,
3764
+ "step": 610000
3765
+ },
3766
+ {
3767
+ "epoch": 4.03,
3768
+ "learning_rate": 3.597107297172084e-05,
3769
+ "loss": 0.5868,
3770
+ "step": 611000
3771
+ },
3772
+ {
3773
+ "epoch": 4.03,
3774
+ "learning_rate": 3.581244479963225e-05,
3775
+ "loss": 0.6674,
3776
+ "step": 612000
3777
+ },
3778
+ {
3779
+ "epoch": 4.03,
3780
+ "learning_rate": 3.5653971780374295e-05,
3781
+ "loss": 0.7556,
3782
+ "step": 613000
3783
+ },
3784
+ {
3785
+ "epoch": 4.03,
3786
+ "learning_rate": 3.5495655646982505e-05,
3787
+ "loss": 0.7313,
3788
+ "step": 614000
3789
+ },
3790
+ {
3791
+ "epoch": 4.03,
3792
+ "learning_rate": 3.533749813077677e-05,
3793
+ "loss": 0.6026,
3794
+ "step": 615000
3795
+ },
3796
+ {
3797
+ "epoch": 4.03,
3798
+ "learning_rate": 3.517950096134232e-05,
3799
+ "loss": 0.6135,
3800
+ "step": 616000
3801
+ },
3802
+ {
3803
+ "epoch": 4.03,
3804
+ "learning_rate": 3.5021665866510925e-05,
3805
+ "loss": 0.6554,
3806
+ "step": 617000
3807
+ },
3808
+ {
3809
+ "epoch": 4.03,
3810
+ "learning_rate": 3.4863994572341843e-05,
3811
+ "loss": 0.7581,
3812
+ "step": 618000
3813
+ },
3814
+ {
3815
+ "epoch": 4.03,
3816
+ "learning_rate": 3.470648880310313e-05,
3817
+ "loss": 0.7068,
3818
+ "step": 619000
3819
+ },
3820
+ {
3821
+ "epoch": 4.04,
3822
+ "learning_rate": 3.4549150281252636e-05,
3823
+ "loss": 0.6393,
3824
+ "step": 620000
3825
+ },
3826
+ {
3827
+ "epoch": 4.04,
3828
+ "learning_rate": 3.439198072741921e-05,
3829
+ "loss": 0.5937,
3830
+ "step": 621000
3831
+ },
3832
+ {
3833
+ "epoch": 4.04,
3834
+ "learning_rate": 3.423498186038393e-05,
3835
+ "loss": 0.6572,
3836
+ "step": 622000
3837
+ },
3838
+ {
3839
+ "epoch": 4.04,
3840
+ "learning_rate": 3.407815539706124e-05,
3841
+ "loss": 0.7421,
3842
+ "step": 623000
3843
+ },
3844
+ {
3845
+ "epoch": 4.04,
3846
+ "learning_rate": 3.392150305248024e-05,
3847
+ "loss": 0.7376,
3848
+ "step": 624000
3849
+ },
3850
+ {
3851
+ "epoch": 4.04,
3852
+ "learning_rate": 3.3765026539765834e-05,
3853
+ "loss": 0.6221,
3854
+ "step": 625000
3855
+ },
3856
+ {
3857
+ "epoch": 4.04,
3858
+ "learning_rate": 3.360872757012011e-05,
3859
+ "loss": 0.5877,
3860
+ "step": 626000
3861
+ },
3862
+ {
3863
+ "epoch": 4.04,
3864
+ "learning_rate": 3.3452607852803584e-05,
3865
+ "loss": 0.6545,
3866
+ "step": 627000
3867
+ },
3868
+ {
3869
+ "epoch": 4.04,
3870
+ "learning_rate": 3.329666909511645e-05,
3871
+ "loss": 0.7516,
3872
+ "step": 628000
3873
+ },
3874
+ {
3875
+ "epoch": 4.04,
3876
+ "learning_rate": 3.3140913002379995e-05,
3877
+ "loss": 0.7279,
3878
+ "step": 629000
3879
+ },
3880
+ {
3881
+ "epoch": 4.05,
3882
+ "learning_rate": 3.298534127791785e-05,
3883
+ "loss": 0.6074,
3884
+ "step": 630000
3885
+ },
3886
+ {
3887
+ "epoch": 4.05,
3888
+ "learning_rate": 3.282995562303754e-05,
3889
+ "loss": 0.6122,
3890
+ "step": 631000
3891
+ },
3892
+ {
3893
+ "epoch": 4.05,
3894
+ "learning_rate": 3.267475773701161e-05,
3895
+ "loss": 0.6485,
3896
+ "step": 632000
3897
+ },
3898
+ {
3899
+ "epoch": 4.05,
3900
+ "learning_rate": 3.251974931705933e-05,
3901
+ "loss": 0.754,
3902
+ "step": 633000
3903
+ },
3904
+ {
3905
+ "epoch": 4.05,
3906
+ "learning_rate": 3.236493205832795e-05,
3907
+ "loss": 0.7052,
3908
+ "step": 634000
3909
+ },
3910
+ {
3911
+ "epoch": 4.05,
3912
+ "learning_rate": 3.221030765387417e-05,
3913
+ "loss": 0.6425,
3914
+ "step": 635000
3915
+ },
3916
+ {
3917
+ "epoch": 4.05,
3918
+ "learning_rate": 3.205587779464576e-05,
3919
+ "loss": 0.5927,
3920
+ "step": 636000
3921
+ },
3922
+ {
3923
+ "epoch": 4.05,
3924
+ "learning_rate": 3.190164416946285e-05,
3925
+ "loss": 0.6501,
3926
+ "step": 637000
3927
+ },
3928
+ {
3929
+ "epoch": 4.05,
3930
+ "learning_rate": 3.1747608464999725e-05,
3931
+ "loss": 0.736,
3932
+ "step": 638000
3933
+ },
3934
+ {
3935
+ "epoch": 4.05,
3936
+ "learning_rate": 3.1593772365766105e-05,
3937
+ "loss": 0.7347,
3938
+ "step": 639000
3939
+ },
3940
+ {
3941
+ "epoch": 4.06,
3942
+ "learning_rate": 3.144013755408895e-05,
3943
+ "loss": 0.6254,
3944
+ "step": 640000
3945
+ },
3946
+ {
3947
+ "epoch": 4.06,
3948
+ "learning_rate": 3.128670571009399e-05,
3949
+ "loss": 0.5889,
3950
+ "step": 641000
3951
+ },
3952
+ {
3953
+ "epoch": 4.06,
3954
+ "learning_rate": 3.113347851168721e-05,
3955
+ "loss": 0.6433,
3956
+ "step": 642000
3957
+ },
3958
+ {
3959
+ "epoch": 4.06,
3960
+ "learning_rate": 3.098045763453678e-05,
3961
+ "loss": 0.7507,
3962
+ "step": 643000
3963
+ },
3964
+ {
3965
+ "epoch": 4.06,
3966
+ "learning_rate": 3.082764475205442e-05,
3967
+ "loss": 0.7253,
3968
+ "step": 644000
3969
+ },
3970
+ {
3971
+ "epoch": 4.06,
3972
+ "learning_rate": 3.0675041535377405e-05,
3973
+ "loss": 0.6144,
3974
+ "step": 645000
3975
+ },
3976
+ {
3977
+ "epoch": 4.06,
3978
+ "learning_rate": 3.052264965335e-05,
3979
+ "loss": 0.6076,
3980
+ "step": 646000
3981
+ },
3982
+ {
3983
+ "epoch": 4.06,
3984
+ "learning_rate": 3.0370470772505433e-05,
3985
+ "loss": 0.6384,
3986
+ "step": 647000
3987
+ },
3988
+ {
3989
+ "epoch": 4.06,
3990
+ "learning_rate": 3.0218506557047598e-05,
3991
+ "loss": 0.7499,
3992
+ "step": 648000
3993
+ },
3994
+ {
3995
+ "epoch": 4.06,
3996
+ "learning_rate": 3.006675866883275e-05,
3997
+ "loss": 0.7044,
3998
+ "step": 649000
3999
+ },
4000
+ {
4001
+ "epoch": 4.07,
4002
+ "learning_rate": 2.991522876735154e-05,
4003
+ "loss": 0.6471,
4004
+ "step": 650000
4005
+ },
4006
+ {
4007
+ "epoch": 4.07,
4008
+ "eval_loss": 0.49757879972457886,
4009
+ "eval_runtime": 10.8784,
4010
+ "eval_samples_per_second": 459.624,
4011
+ "eval_steps_per_second": 3.677,
4012
+ "step": 650000
4013
  }
4014
  ],
4015
  "max_steps": 1000000,
4016
  "num_train_epochs": 9223372036854775807,
4017
+ "total_flos": 1.0949559629981417e+19,
4018
  "trial_name": null,
4019
  "trial_params": null
4020
  }
pytorch_model.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:cd75095c4597acd6406bda79e55f3e47837f8f338b561712587309fcdb721a0b
3
  size 442675755
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ab1ec67e0f440b016179e8869a41e8d020330d2dd0454d4d4861673b4f892817
3
  size 442675755
runs/Dec27_23-39-04_t1v-n-6071ee6f-w-0/events.out.tfevents.1672184365.t1v-n-6071ee6f-w-0.108001.0 CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:e161f68f5d425e1485115cd1d86b943be71aa2833b81b38e52c45658c4731eec
3
- size 103045
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6cdcb04bf875594c9cd8a7bfd71509bee6f455a645199ff52ec612a57de2563b
3
+ size 111321