joelniklaus commited on
Commit
618a276
1 Parent(s): 9c4da38

Training in progress, step 1000000

Browse files
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:72cad57ae599372b1775daab8ed6df0397ced72996d9fa2edd093808cfe400a4
3
  size 1475917081
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3e6c5c58329ef888378799c1553da2721cd6143a7d1581dad62eb4357f2a22d6
3
  size 1475917081
last-checkpoint/pytorch_model.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:2fb7acac33e4f38fc79dc957e435bff0e83f26677ce0d27b3d2e63cd987cb20a
3
  size 737971755
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:86a118c032ac69034e4977ea7a4bb263056a16d7eacb04c82f234aabf5a2d0e7
3
  size 737971755
last-checkpoint/rng_state_0.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:d3f988a9f99a0b5464f47b3f2e652699d46e76f7aa98dea6ae21be564d50c084
3
  size 13611
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c521200981c625df088d1c9ecf00d1ed99b766912221c97b61ef045b8afc8180
3
  size 13611
last-checkpoint/rng_state_1.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:d3f988a9f99a0b5464f47b3f2e652699d46e76f7aa98dea6ae21be564d50c084
3
  size 13611
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c521200981c625df088d1c9ecf00d1ed99b766912221c97b61ef045b8afc8180
3
  size 13611
last-checkpoint/rng_state_2.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:d3f988a9f99a0b5464f47b3f2e652699d46e76f7aa98dea6ae21be564d50c084
3
  size 13611
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c521200981c625df088d1c9ecf00d1ed99b766912221c97b61ef045b8afc8180
3
  size 13611
last-checkpoint/rng_state_3.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:d3f988a9f99a0b5464f47b3f2e652699d46e76f7aa98dea6ae21be564d50c084
3
  size 13611
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c521200981c625df088d1c9ecf00d1ed99b766912221c97b61ef045b8afc8180
3
  size 13611
last-checkpoint/rng_state_4.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:d3f988a9f99a0b5464f47b3f2e652699d46e76f7aa98dea6ae21be564d50c084
3
  size 13611
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c521200981c625df088d1c9ecf00d1ed99b766912221c97b61ef045b8afc8180
3
  size 13611
last-checkpoint/rng_state_5.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:d3f988a9f99a0b5464f47b3f2e652699d46e76f7aa98dea6ae21be564d50c084
3
  size 13611
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c521200981c625df088d1c9ecf00d1ed99b766912221c97b61ef045b8afc8180
3
  size 13611
last-checkpoint/rng_state_6.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:d3f988a9f99a0b5464f47b3f2e652699d46e76f7aa98dea6ae21be564d50c084
3
  size 13611
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c521200981c625df088d1c9ecf00d1ed99b766912221c97b61ef045b8afc8180
3
  size 13611
last-checkpoint/rng_state_7.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:d3f988a9f99a0b5464f47b3f2e652699d46e76f7aa98dea6ae21be564d50c084
3
  size 13611
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c521200981c625df088d1c9ecf00d1ed99b766912221c97b61ef045b8afc8180
3
  size 13611
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:bc2c2c8416f63b11e9c82d6dac05baa6ad73177ac658621e099b23ff71f2f801
3
  size 623
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:906bc3ed48818cc1785b6a98c1e064532a322520b99cdf458cfd827674d9b7ec
3
  size 623
last-checkpoint/trainer_state.json CHANGED
@@ -1,8 +1,8 @@
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
- "epoch": 0.95,
5
- "global_step": 950000,
6
  "is_hyper_param_search": false,
7
  "is_local_process_zero": true,
8
  "is_world_process_zero": true,
@@ -5858,11 +5858,319 @@
5858
  "eval_samples_per_second": 75.881,
5859
  "eval_steps_per_second": 0.607,
5860
  "step": 950000
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5861
  }
5862
  ],
5863
  "max_steps": 1000000,
5864
  "num_train_epochs": 9223372036854775807,
5865
- "total_flos": 1.60210593251328e+19,
5866
  "trial_name": null,
5867
  "trial_params": null
5868
  }
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
+ "epoch": 1.0,
5
+ "global_step": 1000000,
6
  "is_hyper_param_search": false,
7
  "is_local_process_zero": true,
8
  "is_world_process_zero": true,
5858
  "eval_samples_per_second": 75.881,
5859
  "eval_steps_per_second": 0.607,
5860
  "step": 950000
5861
+ },
5862
+ {
5863
+ "epoch": 0.95,
5864
+ "learning_rate": 6.549893279788277e-07,
5865
+ "loss": 0.8154,
5866
+ "step": 951000
5867
+ },
5868
+ {
5869
+ "epoch": 0.95,
5870
+ "learning_rate": 6.285834552247128e-07,
5871
+ "loss": 0.8177,
5872
+ "step": 952000
5873
+ },
5874
+ {
5875
+ "epoch": 0.95,
5876
+ "learning_rate": 6.027175003719354e-07,
5877
+ "loss": 0.8213,
5878
+ "step": 953000
5879
+ },
5880
+ {
5881
+ "epoch": 0.95,
5882
+ "learning_rate": 5.773917462864264e-07,
5883
+ "loss": 0.8164,
5884
+ "step": 954000
5885
+ },
5886
+ {
5887
+ "epoch": 0.95,
5888
+ "learning_rate": 5.526064699265753e-07,
5889
+ "loss": 0.8177,
5890
+ "step": 955000
5891
+ },
5892
+ {
5893
+ "epoch": 0.96,
5894
+ "learning_rate": 5.283619423401998e-07,
5895
+ "loss": 0.8101,
5896
+ "step": 956000
5897
+ },
5898
+ {
5899
+ "epoch": 0.96,
5900
+ "learning_rate": 5.046584286615697e-07,
5901
+ "loss": 0.7825,
5902
+ "step": 957000
5903
+ },
5904
+ {
5905
+ "epoch": 0.96,
5906
+ "learning_rate": 4.814961881085045e-07,
5907
+ "loss": 0.7899,
5908
+ "step": 958000
5909
+ },
5910
+ {
5911
+ "epoch": 0.96,
5912
+ "learning_rate": 4.5887547397955864e-07,
5913
+ "loss": 0.7768,
5914
+ "step": 959000
5915
+ },
5916
+ {
5917
+ "epoch": 0.96,
5918
+ "learning_rate": 4.367965336512403e-07,
5919
+ "loss": 0.7668,
5920
+ "step": 960000
5921
+ },
5922
+ {
5923
+ "epoch": 0.96,
5924
+ "learning_rate": 4.1525960857530243e-07,
5925
+ "loss": 0.7703,
5926
+ "step": 961000
5927
+ },
5928
+ {
5929
+ "epoch": 0.96,
5930
+ "learning_rate": 3.9426493427611177e-07,
5931
+ "loss": 0.7643,
5932
+ "step": 962000
5933
+ },
5934
+ {
5935
+ "epoch": 0.96,
5936
+ "learning_rate": 3.738127403480507e-07,
5937
+ "loss": 0.7798,
5938
+ "step": 963000
5939
+ },
5940
+ {
5941
+ "epoch": 0.96,
5942
+ "learning_rate": 3.5390325045304706e-07,
5943
+ "loss": 0.7798,
5944
+ "step": 964000
5945
+ },
5946
+ {
5947
+ "epoch": 0.96,
5948
+ "learning_rate": 3.3453668231809286e-07,
5949
+ "loss": 0.7513,
5950
+ "step": 965000
5951
+ },
5952
+ {
5953
+ "epoch": 0.97,
5954
+ "learning_rate": 3.157132477328628e-07,
5955
+ "loss": 0.7495,
5956
+ "step": 966000
5957
+ },
5958
+ {
5959
+ "epoch": 0.97,
5960
+ "learning_rate": 2.9743315254743833e-07,
5961
+ "loss": 0.7556,
5962
+ "step": 967000
5963
+ },
5964
+ {
5965
+ "epoch": 0.97,
5966
+ "learning_rate": 2.796965966699927e-07,
5967
+ "loss": 0.7669,
5968
+ "step": 968000
5969
+ },
5970
+ {
5971
+ "epoch": 0.97,
5972
+ "learning_rate": 2.625037740646763e-07,
5973
+ "loss": 0.7642,
5974
+ "step": 969000
5975
+ },
5976
+ {
5977
+ "epoch": 0.97,
5978
+ "learning_rate": 2.458548727494292e-07,
5979
+ "loss": 0.7617,
5980
+ "step": 970000
5981
+ },
5982
+ {
5983
+ "epoch": 0.97,
5984
+ "learning_rate": 2.2975007479397738e-07,
5985
+ "loss": 0.7558,
5986
+ "step": 971000
5987
+ },
5988
+ {
5989
+ "epoch": 0.97,
5990
+ "learning_rate": 2.1418955631781202e-07,
5991
+ "loss": 0.7666,
5992
+ "step": 972000
5993
+ },
5994
+ {
5995
+ "epoch": 0.97,
5996
+ "learning_rate": 1.9917348748826335e-07,
5997
+ "loss": 0.7648,
5998
+ "step": 973000
5999
+ },
6000
+ {
6001
+ "epoch": 0.97,
6002
+ "learning_rate": 1.847020325186577e-07,
6003
+ "loss": 0.7933,
6004
+ "step": 974000
6005
+ },
6006
+ {
6007
+ "epoch": 0.97,
6008
+ "learning_rate": 1.7077534966650766e-07,
6009
+ "loss": 0.7956,
6010
+ "step": 975000
6011
+ },
6012
+ {
6013
+ "epoch": 0.98,
6014
+ "learning_rate": 1.5739359123178587e-07,
6015
+ "loss": 0.7901,
6016
+ "step": 976000
6017
+ },
6018
+ {
6019
+ "epoch": 0.98,
6020
+ "learning_rate": 1.4455690355525964e-07,
6021
+ "loss": 0.7815,
6022
+ "step": 977000
6023
+ },
6024
+ {
6025
+ "epoch": 0.98,
6026
+ "learning_rate": 1.3226542701689215e-07,
6027
+ "loss": 0.8116,
6028
+ "step": 978000
6029
+ },
6030
+ {
6031
+ "epoch": 0.98,
6032
+ "learning_rate": 1.2051929603428825e-07,
6033
+ "loss": 0.7959,
6034
+ "step": 979000
6035
+ },
6036
+ {
6037
+ "epoch": 0.98,
6038
+ "learning_rate": 1.0931863906127327e-07,
6039
+ "loss": 0.7908,
6040
+ "step": 980000
6041
+ },
6042
+ {
6043
+ "epoch": 0.98,
6044
+ "learning_rate": 9.866357858642205e-08,
6045
+ "loss": 0.8115,
6046
+ "step": 981000
6047
+ },
6048
+ {
6049
+ "epoch": 0.98,
6050
+ "learning_rate": 8.855423113177664e-08,
6051
+ "loss": 0.8025,
6052
+ "step": 982000
6053
+ },
6054
+ {
6055
+ "epoch": 0.98,
6056
+ "learning_rate": 7.899070725153613e-08,
6057
+ "loss": 0.8166,
6058
+ "step": 983000
6059
+ },
6060
+ {
6061
+ "epoch": 0.98,
6062
+ "learning_rate": 6.997311153086883e-08,
6063
+ "loss": 0.7991,
6064
+ "step": 984000
6065
+ },
6066
+ {
6067
+ "epoch": 0.98,
6068
+ "learning_rate": 6.150154258476315e-08,
6069
+ "loss": 0.8127,
6070
+ "step": 985000
6071
+ },
6072
+ {
6073
+ "epoch": 0.99,
6074
+ "learning_rate": 5.3576093056922906e-08,
6075
+ "loss": 0.8191,
6076
+ "step": 986000
6077
+ },
6078
+ {
6079
+ "epoch": 0.99,
6080
+ "learning_rate": 4.619684961881254e-08,
6081
+ "loss": 0.7956,
6082
+ "step": 987000
6083
+ },
6084
+ {
6085
+ "epoch": 0.99,
6086
+ "learning_rate": 3.936389296864129e-08,
6087
+ "loss": 0.7759,
6088
+ "step": 988000
6089
+ },
6090
+ {
6091
+ "epoch": 0.99,
6092
+ "learning_rate": 3.3077297830541584e-08,
6093
+ "loss": 0.7988,
6094
+ "step": 989000
6095
+ },
6096
+ {
6097
+ "epoch": 0.99,
6098
+ "learning_rate": 2.7337132953697554e-08,
6099
+ "loss": 0.801,
6100
+ "step": 990000
6101
+ },
6102
+ {
6103
+ "epoch": 0.99,
6104
+ "learning_rate": 2.214346111164556e-08,
6105
+ "loss": 0.7963,
6106
+ "step": 991000
6107
+ },
6108
+ {
6109
+ "epoch": 0.99,
6110
+ "learning_rate": 1.749633910153592e-08,
6111
+ "loss": 0.7973,
6112
+ "step": 992000
6113
+ },
6114
+ {
6115
+ "epoch": 0.99,
6116
+ "learning_rate": 1.3395817743561134e-08,
6117
+ "loss": 0.8083,
6118
+ "step": 993000
6119
+ },
6120
+ {
6121
+ "epoch": 0.99,
6122
+ "learning_rate": 9.841941880361916e-09,
6123
+ "loss": 0.8207,
6124
+ "step": 994000
6125
+ },
6126
+ {
6127
+ "epoch": 0.99,
6128
+ "learning_rate": 6.834750376549792e-09,
6129
+ "loss": 0.8163,
6130
+ "step": 995000
6131
+ },
6132
+ {
6133
+ "epoch": 1.0,
6134
+ "learning_rate": 4.3742761183018784e-09,
6135
+ "loss": 0.7993,
6136
+ "step": 996000
6137
+ },
6138
+ {
6139
+ "epoch": 1.0,
6140
+ "learning_rate": 2.4605460129556445e-09,
6141
+ "loss": 0.8185,
6142
+ "step": 997000
6143
+ },
6144
+ {
6145
+ "epoch": 1.0,
6146
+ "learning_rate": 1.0935809887702154e-09,
6147
+ "loss": 0.8205,
6148
+ "step": 998000
6149
+ },
6150
+ {
6151
+ "epoch": 1.0,
6152
+ "learning_rate": 2.7339599464326627e-10,
6153
+ "loss": 0.8165,
6154
+ "step": 999000
6155
+ },
6156
+ {
6157
+ "epoch": 1.0,
6158
+ "learning_rate": 0.0,
6159
+ "loss": 0.8279,
6160
+ "step": 1000000
6161
+ },
6162
+ {
6163
+ "epoch": 1.0,
6164
+ "eval_loss": 0.548393726348877,
6165
+ "eval_runtime": 58.8399,
6166
+ "eval_samples_per_second": 84.976,
6167
+ "eval_steps_per_second": 0.68,
6168
+ "step": 1000000
6169
  }
6170
  ],
6171
  "max_steps": 1000000,
6172
  "num_train_epochs": 9223372036854775807,
6173
+ "total_flos": 1.6864272973824e+19,
6174
  "trial_name": null,
6175
  "trial_params": null
6176
  }
pytorch_model.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:2fb7acac33e4f38fc79dc957e435bff0e83f26677ce0d27b3d2e63cd987cb20a
3
  size 737971755
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:86a118c032ac69034e4977ea7a4bb263056a16d7eacb04c82f234aabf5a2d0e7
3
  size 737971755
runs/Feb08_20-05-26_t1v-n-9f780742-w-0/events.out.tfevents.1675886954.t1v-n-9f780742-w-0.24792.0 CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:f3e300bea4f3fc32a2a718e59c89d410b4e3139b01588e348827e6289e3f862a
3
- size 28590
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:686edc9c7a9eeaf8cb634b3e399883280a24a26f0c571915b7450f63d469aeb0
3
+ size 36866