mohammadmahdinouri commited on
Commit
18009c5
·
verified ·
1 Parent(s): 52be375

Training in progress, step 14000, checkpoint

Browse files
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:17cf6035c18a35eb3572679a23069ccc79cd23efcfc0078a5c1cf86297170924
3
  size 487156538
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4a8af821523f7f828a05693158f90c4eb3a0034faa0c2293ad1124f5f93f2750
3
  size 487156538
last-checkpoint/pytorch_model.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:84c1b4bf21482d963b8011b76d195f7c49050d5defd6af5443b08c2b90ecee0f
3
  size 1059459406
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:91553bbc60905f9dec7e2b273a7b78b1aaa7bfa9c652a40341722951f2285fa2
3
  size 1059459406
last-checkpoint/rng_state_0.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:05a052e7e6348bc7494c72619c890f2e3476b9f839cef0b49ecc276679526559
3
  size 14960
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4523bbe94ce68cf422359680d501e02156c5a468572eaddf29b6fc30a80a5c85
3
  size 14960
last-checkpoint/rng_state_1.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:6f122efb87b91e3eff0b25823045fc0c5f85d24a4aa94dab33368420c47c5e9f
3
  size 14960
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4775592d06656304b14fa76806a517bea34547605af51b0919af58d9e3ad34f6
3
  size 14960
last-checkpoint/rng_state_2.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:df3fca4bdf56d6d2a9a1b743e4ddc9d4603d0d924fe05d13b2631bce9a0604bb
3
  size 14960
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5cc0d600480a1d336ef5ed5d595520ccc7fd9075dda439dbae6adbb69ff279e7
3
  size 14960
last-checkpoint/rng_state_3.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:0ff801e058bb223ddd809f4daef47a9ac6db228fc7d5bc2b359b625eb178bcf1
3
  size 14960
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c2f1d1a83fbbd54a97f127d07293b97435077087576259edb01b1d629c65d3ad
3
  size 14960
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:f088542ee72a30c872186e89a32b77ade9deb1bc587be27292b788df16567de0
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:bf353f402a8187c44395ca6064b65c6f690bca29a45070f40a2616e51dfc5dd0
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -2,9 +2,9 @@
2
  "best_global_step": null,
3
  "best_metric": null,
4
  "best_model_checkpoint": null,
5
- "epoch": 0.01925709105345176,
6
  "eval_steps": 500,
7
- "global_step": 13000,
8
  "is_hyper_param_search": false,
9
  "is_local_process_zero": true,
10
  "is_world_process_zero": true,
@@ -4558,6 +4558,356 @@
4558
  "learning_rate": 0.0004969134125670214,
4559
  "loss": 18.7422,
4560
  "step": 13000
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4561
  }
4562
  ],
4563
  "logging_steps": 20,
@@ -4577,7 +4927,7 @@
4577
  "attributes": {}
4578
  }
4579
  },
4580
- "total_flos": 2.8932354441343926e+19,
4581
  "train_batch_size": 48,
4582
  "trial_name": null,
4583
  "trial_params": null
 
2
  "best_global_step": null,
3
  "best_metric": null,
4
  "best_model_checkpoint": null,
5
+ "epoch": 0.020738405749871125,
6
  "eval_steps": 500,
7
+ "global_step": 14000,
8
  "is_hyper_param_search": false,
9
  "is_local_process_zero": true,
10
  "is_world_process_zero": true,
 
4558
  "learning_rate": 0.0004969134125670214,
4559
  "loss": 18.7422,
4560
  "step": 13000
4561
+ },
4562
+ {
4563
+ "epoch": 0.019286717347380147,
4564
+ "grad_norm": 7.46875,
4565
+ "learning_rate": 0.0004969084736320138,
4566
+ "loss": 18.6768,
4567
+ "step": 13020
4568
+ },
4569
+ {
4570
+ "epoch": 0.019316343641308534,
4571
+ "grad_norm": 9.125,
4572
+ "learning_rate": 0.0004969035346970062,
4573
+ "loss": 18.7947,
4574
+ "step": 13040
4575
+ },
4576
+ {
4577
+ "epoch": 0.01934596993523692,
4578
+ "grad_norm": 7.15625,
4579
+ "learning_rate": 0.0004968985957619986,
4580
+ "loss": 18.8366,
4581
+ "step": 13060
4582
+ },
4583
+ {
4584
+ "epoch": 0.01937559622916531,
4585
+ "grad_norm": 7.15625,
4586
+ "learning_rate": 0.0004968936568269911,
4587
+ "loss": 18.7926,
4588
+ "step": 13080
4589
+ },
4590
+ {
4591
+ "epoch": 0.019405222523093697,
4592
+ "grad_norm": 7.84375,
4593
+ "learning_rate": 0.0004968887178919835,
4594
+ "loss": 18.8219,
4595
+ "step": 13100
4596
+ },
4597
+ {
4598
+ "epoch": 0.019434848817022083,
4599
+ "grad_norm": 8.0625,
4600
+ "learning_rate": 0.0004968837789569759,
4601
+ "loss": 18.7769,
4602
+ "step": 13120
4603
+ },
4604
+ {
4605
+ "epoch": 0.01946447511095047,
4606
+ "grad_norm": 8.375,
4607
+ "learning_rate": 0.0004968788400219684,
4608
+ "loss": 18.7364,
4609
+ "step": 13140
4610
+ },
4611
+ {
4612
+ "epoch": 0.01949410140487886,
4613
+ "grad_norm": 7.625,
4614
+ "learning_rate": 0.0004968739010869608,
4615
+ "loss": 18.7277,
4616
+ "step": 13160
4617
+ },
4618
+ {
4619
+ "epoch": 0.019523727698807246,
4620
+ "grad_norm": 8.0625,
4621
+ "learning_rate": 0.0004968689621519533,
4622
+ "loss": 18.7254,
4623
+ "step": 13180
4624
+ },
4625
+ {
4626
+ "epoch": 0.019553353992735632,
4627
+ "grad_norm": 7.78125,
4628
+ "learning_rate": 0.0004968640232169457,
4629
+ "loss": 18.7818,
4630
+ "step": 13200
4631
+ },
4632
+ {
4633
+ "epoch": 0.01958298028666402,
4634
+ "grad_norm": 8.125,
4635
+ "learning_rate": 0.0004968590842819381,
4636
+ "loss": 18.7875,
4637
+ "step": 13220
4638
+ },
4639
+ {
4640
+ "epoch": 0.01961260658059241,
4641
+ "grad_norm": 8.9375,
4642
+ "learning_rate": 0.0004968541453469306,
4643
+ "loss": 18.738,
4644
+ "step": 13240
4645
+ },
4646
+ {
4647
+ "epoch": 0.019642232874520795,
4648
+ "grad_norm": 7.4375,
4649
+ "learning_rate": 0.000496849206411923,
4650
+ "loss": 18.7297,
4651
+ "step": 13260
4652
+ },
4653
+ {
4654
+ "epoch": 0.01967185916844918,
4655
+ "grad_norm": 9.1875,
4656
+ "learning_rate": 0.0004968442674769154,
4657
+ "loss": 18.7477,
4658
+ "step": 13280
4659
+ },
4660
+ {
4661
+ "epoch": 0.019701485462377568,
4662
+ "grad_norm": 8.4375,
4663
+ "learning_rate": 0.0004968393285419079,
4664
+ "loss": 18.7385,
4665
+ "step": 13300
4666
+ },
4667
+ {
4668
+ "epoch": 0.019731111756305958,
4669
+ "grad_norm": 7.84375,
4670
+ "learning_rate": 0.0004968343896069003,
4671
+ "loss": 18.7936,
4672
+ "step": 13320
4673
+ },
4674
+ {
4675
+ "epoch": 0.019760738050234344,
4676
+ "grad_norm": 9.6875,
4677
+ "learning_rate": 0.0004968294506718928,
4678
+ "loss": 18.7235,
4679
+ "step": 13340
4680
+ },
4681
+ {
4682
+ "epoch": 0.01979036434416273,
4683
+ "grad_norm": 9.5,
4684
+ "learning_rate": 0.0004968245117368852,
4685
+ "loss": 18.8098,
4686
+ "step": 13360
4687
+ },
4688
+ {
4689
+ "epoch": 0.019819990638091117,
4690
+ "grad_norm": 9.5625,
4691
+ "learning_rate": 0.0004968195728018777,
4692
+ "loss": 18.762,
4693
+ "step": 13380
4694
+ },
4695
+ {
4696
+ "epoch": 0.019849616932019507,
4697
+ "grad_norm": 8.4375,
4698
+ "learning_rate": 0.0004968146338668701,
4699
+ "loss": 18.8215,
4700
+ "step": 13400
4701
+ },
4702
+ {
4703
+ "epoch": 0.019879243225947894,
4704
+ "grad_norm": 12.6875,
4705
+ "learning_rate": 0.0004968096949318625,
4706
+ "loss": 18.6931,
4707
+ "step": 13420
4708
+ },
4709
+ {
4710
+ "epoch": 0.01990886951987628,
4711
+ "grad_norm": 7.96875,
4712
+ "learning_rate": 0.0004968047559968548,
4713
+ "loss": 18.7158,
4714
+ "step": 13440
4715
+ },
4716
+ {
4717
+ "epoch": 0.019938495813804667,
4718
+ "grad_norm": 8.4375,
4719
+ "learning_rate": 0.0004967998170618474,
4720
+ "loss": 18.8109,
4721
+ "step": 13460
4722
+ },
4723
+ {
4724
+ "epoch": 0.019968122107733056,
4725
+ "grad_norm": 7.1875,
4726
+ "learning_rate": 0.0004967948781268397,
4727
+ "loss": 18.791,
4728
+ "step": 13480
4729
+ },
4730
+ {
4731
+ "epoch": 0.019997748401661443,
4732
+ "grad_norm": 7.15625,
4733
+ "learning_rate": 0.0004967899391918321,
4734
+ "loss": 18.7768,
4735
+ "step": 13500
4736
+ },
4737
+ {
4738
+ "epoch": 0.02002737469558983,
4739
+ "grad_norm": 7.8125,
4740
+ "learning_rate": 0.0004967850002568246,
4741
+ "loss": 18.7521,
4742
+ "step": 13520
4743
+ },
4744
+ {
4745
+ "epoch": 0.020057000989518216,
4746
+ "grad_norm": 8.25,
4747
+ "learning_rate": 0.000496780061321817,
4748
+ "loss": 18.8413,
4749
+ "step": 13540
4750
+ },
4751
+ {
4752
+ "epoch": 0.020086627283446606,
4753
+ "grad_norm": 24.75,
4754
+ "learning_rate": 0.0004967751223868095,
4755
+ "loss": 18.7564,
4756
+ "step": 13560
4757
+ },
4758
+ {
4759
+ "epoch": 0.020116253577374992,
4760
+ "grad_norm": 9.375,
4761
+ "learning_rate": 0.0004967701834518019,
4762
+ "loss": 18.7654,
4763
+ "step": 13580
4764
+ },
4765
+ {
4766
+ "epoch": 0.02014587987130338,
4767
+ "grad_norm": 23.625,
4768
+ "learning_rate": 0.0004967652445167944,
4769
+ "loss": 18.7145,
4770
+ "step": 13600
4771
+ },
4772
+ {
4773
+ "epoch": 0.020175506165231765,
4774
+ "grad_norm": 6.875,
4775
+ "learning_rate": 0.0004967603055817868,
4776
+ "loss": 18.7534,
4777
+ "step": 13620
4778
+ },
4779
+ {
4780
+ "epoch": 0.020205132459160155,
4781
+ "grad_norm": 8.6875,
4782
+ "learning_rate": 0.0004967553666467792,
4783
+ "loss": 18.7354,
4784
+ "step": 13640
4785
+ },
4786
+ {
4787
+ "epoch": 0.02023475875308854,
4788
+ "grad_norm": 7.21875,
4789
+ "learning_rate": 0.0004967504277117716,
4790
+ "loss": 18.6682,
4791
+ "step": 13660
4792
+ },
4793
+ {
4794
+ "epoch": 0.020264385047016928,
4795
+ "grad_norm": 8.625,
4796
+ "learning_rate": 0.0004967454887767641,
4797
+ "loss": 18.7693,
4798
+ "step": 13680
4799
+ },
4800
+ {
4801
+ "epoch": 0.020294011340945314,
4802
+ "grad_norm": 9.4375,
4803
+ "learning_rate": 0.0004967405498417565,
4804
+ "loss": 18.6937,
4805
+ "step": 13700
4806
+ },
4807
+ {
4808
+ "epoch": 0.020323637634873704,
4809
+ "grad_norm": 8.125,
4810
+ "learning_rate": 0.000496735610906749,
4811
+ "loss": 18.7447,
4812
+ "step": 13720
4813
+ },
4814
+ {
4815
+ "epoch": 0.02035326392880209,
4816
+ "grad_norm": 8.3125,
4817
+ "learning_rate": 0.0004967306719717414,
4818
+ "loss": 18.6705,
4819
+ "step": 13740
4820
+ },
4821
+ {
4822
+ "epoch": 0.020382890222730477,
4823
+ "grad_norm": 8.375,
4824
+ "learning_rate": 0.0004967257330367339,
4825
+ "loss": 18.6713,
4826
+ "step": 13760
4827
+ },
4828
+ {
4829
+ "epoch": 0.020412516516658864,
4830
+ "grad_norm": 9.0,
4831
+ "learning_rate": 0.0004967207941017263,
4832
+ "loss": 18.7607,
4833
+ "step": 13780
4834
+ },
4835
+ {
4836
+ "epoch": 0.020442142810587254,
4837
+ "grad_norm": 9.0625,
4838
+ "learning_rate": 0.0004967158551667187,
4839
+ "loss": 18.6956,
4840
+ "step": 13800
4841
+ },
4842
+ {
4843
+ "epoch": 0.02047176910451564,
4844
+ "grad_norm": 7.90625,
4845
+ "learning_rate": 0.0004967109162317111,
4846
+ "loss": 18.7564,
4847
+ "step": 13820
4848
+ },
4849
+ {
4850
+ "epoch": 0.020501395398444026,
4851
+ "grad_norm": 7.71875,
4852
+ "learning_rate": 0.0004967059772967036,
4853
+ "loss": 18.6956,
4854
+ "step": 13840
4855
+ },
4856
+ {
4857
+ "epoch": 0.020531021692372413,
4858
+ "grad_norm": 8.125,
4859
+ "learning_rate": 0.000496701038361696,
4860
+ "loss": 18.6879,
4861
+ "step": 13860
4862
+ },
4863
+ {
4864
+ "epoch": 0.020560647986300803,
4865
+ "grad_norm": 9.1875,
4866
+ "learning_rate": 0.0004966960994266884,
4867
+ "loss": 18.7128,
4868
+ "step": 13880
4869
+ },
4870
+ {
4871
+ "epoch": 0.02059027428022919,
4872
+ "grad_norm": 9.375,
4873
+ "learning_rate": 0.0004966911604916809,
4874
+ "loss": 18.6706,
4875
+ "step": 13900
4876
+ },
4877
+ {
4878
+ "epoch": 0.020619900574157576,
4879
+ "grad_norm": 8.0,
4880
+ "learning_rate": 0.0004966862215566733,
4881
+ "loss": 18.6614,
4882
+ "step": 13920
4883
+ },
4884
+ {
4885
+ "epoch": 0.020649526868085962,
4886
+ "grad_norm": 7.625,
4887
+ "learning_rate": 0.0004966812826216658,
4888
+ "loss": 18.7468,
4889
+ "step": 13940
4890
+ },
4891
+ {
4892
+ "epoch": 0.020679153162014352,
4893
+ "grad_norm": 7.40625,
4894
+ "learning_rate": 0.0004966763436866582,
4895
+ "loss": 18.7248,
4896
+ "step": 13960
4897
+ },
4898
+ {
4899
+ "epoch": 0.02070877945594274,
4900
+ "grad_norm": 7.4375,
4901
+ "learning_rate": 0.0004966714047516507,
4902
+ "loss": 18.7211,
4903
+ "step": 13980
4904
+ },
4905
+ {
4906
+ "epoch": 0.020738405749871125,
4907
+ "grad_norm": 7.21875,
4908
+ "learning_rate": 0.0004966664658166431,
4909
+ "loss": 18.6651,
4910
+ "step": 14000
4911
  }
4912
  ],
4913
  "logging_steps": 20,
 
4927
  "attributes": {}
4928
  }
4929
  },
4930
+ "total_flos": 3.1157911970490876e+19,
4931
  "train_batch_size": 48,
4932
  "trial_name": null,
4933
  "trial_params": null