joelniklaus commited on
Commit
b0f5e50
1 Parent(s): 70ada5c

Training in progress, step 1000000

Browse files
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:521bbc228d242fb6c3c57a94fc98b11d5c23043a69577ddaf269db054962d98d
3
  size 885325017
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:89f5957cfcccc0bb0743e01a59ba21707366323123e6172dd29713c3b92b209b
3
  size 885325017
last-checkpoint/pytorch_model.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:2e10d7aca263c950dcf92ba3d37d7fa925cd226547cbf86afb8720743f8755fe
3
  size 442675755
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:18847c8b3cd7596f94fde3815a850b993021139f09a4f8f641432555005d213d
3
  size 442675755
last-checkpoint/rng_state_0.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:67d3e3c76aa8dedd7b6bb27b884bf93747ec50761277b4d0c926cdb38dc0dfa2
3
  size 13611
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:feba4caf27a18b22dc31e5bc356e70e86b39bdc2c82625b1cce13dd9b45b2f68
3
  size 13611
last-checkpoint/rng_state_1.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:9220991b7745c472d8510ec3043328cdad90c291984be5d25f26cd327245c86e
3
  size 13611
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:73bf9980261b27620b7c6a9413d0d74be1612ef0357cc0374016e4ff47dbfa05
3
  size 13611
last-checkpoint/rng_state_2.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:9220991b7745c472d8510ec3043328cdad90c291984be5d25f26cd327245c86e
3
  size 13611
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:73bf9980261b27620b7c6a9413d0d74be1612ef0357cc0374016e4ff47dbfa05
3
  size 13611
last-checkpoint/rng_state_3.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:9220991b7745c472d8510ec3043328cdad90c291984be5d25f26cd327245c86e
3
  size 13611
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:73bf9980261b27620b7c6a9413d0d74be1612ef0357cc0374016e4ff47dbfa05
3
  size 13611
last-checkpoint/rng_state_4.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:67d3e3c76aa8dedd7b6bb27b884bf93747ec50761277b4d0c926cdb38dc0dfa2
3
  size 13611
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:feba4caf27a18b22dc31e5bc356e70e86b39bdc2c82625b1cce13dd9b45b2f68
3
  size 13611
last-checkpoint/rng_state_5.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:67d3e3c76aa8dedd7b6bb27b884bf93747ec50761277b4d0c926cdb38dc0dfa2
3
  size 13611
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:feba4caf27a18b22dc31e5bc356e70e86b39bdc2c82625b1cce13dd9b45b2f68
3
  size 13611
last-checkpoint/rng_state_6.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:9220991b7745c472d8510ec3043328cdad90c291984be5d25f26cd327245c86e
3
  size 13611
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:73bf9980261b27620b7c6a9413d0d74be1612ef0357cc0374016e4ff47dbfa05
3
  size 13611
last-checkpoint/rng_state_7.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:9220991b7745c472d8510ec3043328cdad90c291984be5d25f26cd327245c86e
3
  size 13611
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:73bf9980261b27620b7c6a9413d0d74be1612ef0357cc0374016e4ff47dbfa05
3
  size 13611
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:bc2c2c8416f63b11e9c82d6dac05baa6ad73177ac658621e099b23ff71f2f801
3
  size 623
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:906bc3ed48818cc1785b6a98c1e064532a322520b99cdf458cfd827674d9b7ec
3
  size 623
last-checkpoint/trainer_state.json CHANGED
@@ -1,8 +1,8 @@
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
- "epoch": 12.069497,
5
- "global_step": 950000,
6
  "is_hyper_param_search": false,
7
  "is_local_process_zero": true,
8
  "is_world_process_zero": true,
@@ -5858,11 +5858,319 @@
5858
  "eval_samples_per_second": 407.643,
5859
  "eval_steps_per_second": 3.261,
5860
  "step": 950000
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5861
  }
5862
  ],
5863
  "max_steps": 1000000,
5864
  "num_train_epochs": 9223372036854775807,
5865
- "total_flos": 1.6003225536574783e+19,
5866
  "trial_name": null,
5867
  "trial_params": null
5868
  }
 
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
+ "epoch": 13.044672,
5
+ "global_step": 1000000,
6
  "is_hyper_param_search": false,
7
  "is_local_process_zero": true,
8
  "is_world_process_zero": true,
 
5858
  "eval_samples_per_second": 407.643,
5859
  "eval_steps_per_second": 3.261,
5860
  "step": 950000
5861
+ },
5862
+ {
5863
+ "epoch": 12.07,
5864
+ "learning_rate": 6.549893279788277e-07,
5865
+ "loss": 0.4494,
5866
+ "step": 951000
5867
+ },
5868
+ {
5869
+ "epoch": 12.07,
5870
+ "learning_rate": 6.285834552247128e-07,
5871
+ "loss": 0.4465,
5872
+ "step": 952000
5873
+ },
5874
+ {
5875
+ "epoch": 12.07,
5876
+ "learning_rate": 6.027175003719354e-07,
5877
+ "loss": 0.4663,
5878
+ "step": 953000
5879
+ },
5880
+ {
5881
+ "epoch": 12.07,
5882
+ "learning_rate": 5.773917462864264e-07,
5883
+ "loss": 0.5254,
5884
+ "step": 954000
5885
+ },
5886
+ {
5887
+ "epoch": 12.07,
5888
+ "learning_rate": 5.526064699265753e-07,
5889
+ "loss": 0.4647,
5890
+ "step": 955000
5891
+ },
5892
+ {
5893
+ "epoch": 13.0,
5894
+ "learning_rate": 5.283619423401998e-07,
5895
+ "loss": 0.4716,
5896
+ "step": 956000
5897
+ },
5898
+ {
5899
+ "epoch": 13.0,
5900
+ "learning_rate": 5.046584286615697e-07,
5901
+ "loss": 0.4556,
5902
+ "step": 957000
5903
+ },
5904
+ {
5905
+ "epoch": 13.0,
5906
+ "learning_rate": 4.814961881085045e-07,
5907
+ "loss": 0.4911,
5908
+ "step": 958000
5909
+ },
5910
+ {
5911
+ "epoch": 13.0,
5912
+ "learning_rate": 4.5887547397955864e-07,
5913
+ "loss": 0.5024,
5914
+ "step": 959000
5915
+ },
5916
+ {
5917
+ "epoch": 13.0,
5918
+ "learning_rate": 4.367965336512403e-07,
5919
+ "loss": 0.4838,
5920
+ "step": 960000
5921
+ },
5922
+ {
5923
+ "epoch": 13.01,
5924
+ "learning_rate": 4.1525960857530243e-07,
5925
+ "loss": 0.4784,
5926
+ "step": 961000
5927
+ },
5928
+ {
5929
+ "epoch": 13.01,
5930
+ "learning_rate": 3.9426493427611177e-07,
5931
+ "loss": 0.5217,
5932
+ "step": 962000
5933
+ },
5934
+ {
5935
+ "epoch": 13.01,
5936
+ "learning_rate": 3.738127403480507e-07,
5937
+ "loss": 0.5145,
5938
+ "step": 963000
5939
+ },
5940
+ {
5941
+ "epoch": 13.01,
5942
+ "learning_rate": 3.5390325045304706e-07,
5943
+ "loss": 0.493,
5944
+ "step": 964000
5945
+ },
5946
+ {
5947
+ "epoch": 13.01,
5948
+ "learning_rate": 3.3453668231809286e-07,
5949
+ "loss": 0.4808,
5950
+ "step": 965000
5951
+ },
5952
+ {
5953
+ "epoch": 13.01,
5954
+ "learning_rate": 3.157132477328628e-07,
5955
+ "loss": 0.523,
5956
+ "step": 966000
5957
+ },
5958
+ {
5959
+ "epoch": 13.01,
5960
+ "learning_rate": 2.9743315254743833e-07,
5961
+ "loss": 0.5228,
5962
+ "step": 967000
5963
+ },
5964
+ {
5965
+ "epoch": 13.01,
5966
+ "learning_rate": 2.796965966699927e-07,
5967
+ "loss": 0.4934,
5968
+ "step": 968000
5969
+ },
5970
+ {
5971
+ "epoch": 13.01,
5972
+ "learning_rate": 2.625037740646763e-07,
5973
+ "loss": 0.478,
5974
+ "step": 969000
5975
+ },
5976
+ {
5977
+ "epoch": 13.01,
5978
+ "learning_rate": 2.458548727494292e-07,
5979
+ "loss": 0.5233,
5980
+ "step": 970000
5981
+ },
5982
+ {
5983
+ "epoch": 13.02,
5984
+ "learning_rate": 2.2975007479397738e-07,
5985
+ "loss": 0.5158,
5986
+ "step": 971000
5987
+ },
5988
+ {
5989
+ "epoch": 13.02,
5990
+ "learning_rate": 2.1418955631781202e-07,
5991
+ "loss": 0.486,
5992
+ "step": 972000
5993
+ },
5994
+ {
5995
+ "epoch": 13.02,
5996
+ "learning_rate": 1.9917348748826335e-07,
5997
+ "loss": 0.4902,
5998
+ "step": 973000
5999
+ },
6000
+ {
6001
+ "epoch": 13.02,
6002
+ "learning_rate": 1.847020325186577e-07,
6003
+ "loss": 0.4818,
6004
+ "step": 974000
6005
+ },
6006
+ {
6007
+ "epoch": 13.02,
6008
+ "learning_rate": 1.7077534966650766e-07,
6009
+ "loss": 0.4967,
6010
+ "step": 975000
6011
+ },
6012
+ {
6013
+ "epoch": 13.02,
6014
+ "learning_rate": 1.5739359123178587e-07,
6015
+ "loss": 0.5156,
6016
+ "step": 976000
6017
+ },
6018
+ {
6019
+ "epoch": 13.02,
6020
+ "learning_rate": 1.4455690355525964e-07,
6021
+ "loss": 0.5312,
6022
+ "step": 977000
6023
+ },
6024
+ {
6025
+ "epoch": 13.02,
6026
+ "learning_rate": 1.3226542701689215e-07,
6027
+ "loss": 0.4816,
6028
+ "step": 978000
6029
+ },
6030
+ {
6031
+ "epoch": 13.02,
6032
+ "learning_rate": 1.2051929603428825e-07,
6033
+ "loss": 0.4891,
6034
+ "step": 979000
6035
+ },
6036
+ {
6037
+ "epoch": 13.02,
6038
+ "learning_rate": 1.0931863906127327e-07,
6039
+ "loss": 0.5107,
6040
+ "step": 980000
6041
+ },
6042
+ {
6043
+ "epoch": 13.03,
6044
+ "learning_rate": 9.866357858642205e-08,
6045
+ "loss": 0.5282,
6046
+ "step": 981000
6047
+ },
6048
+ {
6049
+ "epoch": 13.03,
6050
+ "learning_rate": 8.855423113177664e-08,
6051
+ "loss": 0.483,
6052
+ "step": 982000
6053
+ },
6054
+ {
6055
+ "epoch": 13.03,
6056
+ "learning_rate": 7.899070725153613e-08,
6057
+ "loss": 0.5034,
6058
+ "step": 983000
6059
+ },
6060
+ {
6061
+ "epoch": 13.03,
6062
+ "learning_rate": 6.997311153086883e-08,
6063
+ "loss": 0.5341,
6064
+ "step": 984000
6065
+ },
6066
+ {
6067
+ "epoch": 13.03,
6068
+ "learning_rate": 6.150154258476315e-08,
6069
+ "loss": 0.5145,
6070
+ "step": 985000
6071
+ },
6072
+ {
6073
+ "epoch": 13.03,
6074
+ "learning_rate": 5.3576093056922906e-08,
6075
+ "loss": 0.4675,
6076
+ "step": 986000
6077
+ },
6078
+ {
6079
+ "epoch": 13.03,
6080
+ "learning_rate": 4.619684961881254e-08,
6081
+ "loss": 0.4735,
6082
+ "step": 987000
6083
+ },
6084
+ {
6085
+ "epoch": 13.03,
6086
+ "learning_rate": 3.936389296864129e-08,
6087
+ "loss": 0.522,
6088
+ "step": 988000
6089
+ },
6090
+ {
6091
+ "epoch": 13.03,
6092
+ "learning_rate": 3.3077297830541584e-08,
6093
+ "loss": 0.5232,
6094
+ "step": 989000
6095
+ },
6096
+ {
6097
+ "epoch": 13.03,
6098
+ "learning_rate": 2.7337132953697554e-08,
6099
+ "loss": 0.473,
6100
+ "step": 990000
6101
+ },
6102
+ {
6103
+ "epoch": 13.04,
6104
+ "learning_rate": 2.214346111164556e-08,
6105
+ "loss": 0.4686,
6106
+ "step": 991000
6107
+ },
6108
+ {
6109
+ "epoch": 13.04,
6110
+ "learning_rate": 1.749633910153592e-08,
6111
+ "loss": 0.5288,
6112
+ "step": 992000
6113
+ },
6114
+ {
6115
+ "epoch": 13.04,
6116
+ "learning_rate": 1.3395817743561134e-08,
6117
+ "loss": 0.5264,
6118
+ "step": 993000
6119
+ },
6120
+ {
6121
+ "epoch": 13.04,
6122
+ "learning_rate": 9.841941880361916e-09,
6123
+ "loss": 0.4746,
6124
+ "step": 994000
6125
+ },
6126
+ {
6127
+ "epoch": 13.04,
6128
+ "learning_rate": 6.834750376549792e-09,
6129
+ "loss": 0.4896,
6130
+ "step": 995000
6131
+ },
6132
+ {
6133
+ "epoch": 13.04,
6134
+ "learning_rate": 4.3742761183018784e-09,
6135
+ "loss": 0.5191,
6136
+ "step": 996000
6137
+ },
6138
+ {
6139
+ "epoch": 13.04,
6140
+ "learning_rate": 2.4605460129556445e-09,
6141
+ "loss": 0.5255,
6142
+ "step": 997000
6143
+ },
6144
+ {
6145
+ "epoch": 13.04,
6146
+ "learning_rate": 1.0935809887702154e-09,
6147
+ "loss": 0.4732,
6148
+ "step": 998000
6149
+ },
6150
+ {
6151
+ "epoch": 13.04,
6152
+ "learning_rate": 2.7339599464326627e-10,
6153
+ "loss": 0.4697,
6154
+ "step": 999000
6155
+ },
6156
+ {
6157
+ "epoch": 13.04,
6158
+ "learning_rate": 0.0,
6159
+ "loss": 0.4974,
6160
+ "step": 1000000
6161
+ },
6162
+ {
6163
+ "epoch": 13.04,
6164
+ "eval_loss": 0.4091758728027344,
6165
+ "eval_runtime": 12.5131,
6166
+ "eval_samples_per_second": 399.581,
6167
+ "eval_steps_per_second": 3.197,
6168
+ "step": 1000000
6169
  }
6170
  ],
6171
  "max_steps": 1000000,
6172
  "num_train_epochs": 9223372036854775807,
6173
+ "total_flos": 1.6845503889565286e+19,
6174
  "trial_name": null,
6175
  "trial_params": null
6176
  }
pytorch_model.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:2e10d7aca263c950dcf92ba3d37d7fa925cd226547cbf86afb8720743f8755fe
3
  size 442675755
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:18847c8b3cd7596f94fde3815a850b993021139f09a4f8f641432555005d213d
3
  size 442675755
runs/Apr11_08-45-40_t1v-n-fb892c44-w-0/events.out.tfevents.1681202915.t1v-n-fb892c44-w-0.3296147.0 CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:38149782ea6b7e5332a795edfc7bab650ea60c34562bf46495f8d7a7f4f5d82d
3
- size 136265
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d176d553263a5eea2b964b389f095abd9f9b296227c6fedc8de21354e77e880e
3
+ size 144541