joelniklaus commited on
Commit
7e2fe2f
·
1 Parent(s): 153bf55

Training in progress, step 850000

Browse files
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:d3c518c542da6ca07a2d571b90738f73da990906513e6a5aaea2556298a95e09
3
  size 885325017
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6ad441ea99d1cf6fe2d1e72819c0f5db00a2a82c3dde945949e50f8f2ffe61e3
3
  size 885325017
last-checkpoint/pytorch_model.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:3742551ffb857f2a82abee664e6f7b2e4edc6956074c64f93b167503507fb01f
3
  size 442675755
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:276a7965140304f9b691ebf5dbaef28e68a0d61c7432397f5d9a4edba8ae4065
3
  size 442675755
last-checkpoint/rng_state_0.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:955f098dfc9b991d6514f58c6cd390ca9bbd2f63a85293e44ed6f81ba87e71e0
3
  size 13611
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8a2c3c50439af9a540b521038344b9830557724b4d1f3808af26f2999c2f1ea7
3
  size 13611
last-checkpoint/rng_state_1.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:955f098dfc9b991d6514f58c6cd390ca9bbd2f63a85293e44ed6f81ba87e71e0
3
  size 13611
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8a2c3c50439af9a540b521038344b9830557724b4d1f3808af26f2999c2f1ea7
3
  size 13611
last-checkpoint/rng_state_2.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:955f098dfc9b991d6514f58c6cd390ca9bbd2f63a85293e44ed6f81ba87e71e0
3
  size 13611
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8a2c3c50439af9a540b521038344b9830557724b4d1f3808af26f2999c2f1ea7
3
  size 13611
last-checkpoint/rng_state_3.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:955f098dfc9b991d6514f58c6cd390ca9bbd2f63a85293e44ed6f81ba87e71e0
3
  size 13611
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8a2c3c50439af9a540b521038344b9830557724b4d1f3808af26f2999c2f1ea7
3
  size 13611
last-checkpoint/rng_state_4.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:955f098dfc9b991d6514f58c6cd390ca9bbd2f63a85293e44ed6f81ba87e71e0
3
  size 13611
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8a2c3c50439af9a540b521038344b9830557724b4d1f3808af26f2999c2f1ea7
3
  size 13611
last-checkpoint/rng_state_5.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:955f098dfc9b991d6514f58c6cd390ca9bbd2f63a85293e44ed6f81ba87e71e0
3
  size 13611
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8a2c3c50439af9a540b521038344b9830557724b4d1f3808af26f2999c2f1ea7
3
  size 13611
last-checkpoint/rng_state_6.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:955f098dfc9b991d6514f58c6cd390ca9bbd2f63a85293e44ed6f81ba87e71e0
3
  size 13611
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8a2c3c50439af9a540b521038344b9830557724b4d1f3808af26f2999c2f1ea7
3
  size 13611
last-checkpoint/rng_state_7.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:955f098dfc9b991d6514f58c6cd390ca9bbd2f63a85293e44ed6f81ba87e71e0
3
  size 13611
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8a2c3c50439af9a540b521038344b9830557724b4d1f3808af26f2999c2f1ea7
3
  size 13611
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:50e51b9224ded3ddffee57f26ec45414409de0232579ddafb7f3e083076fa4c5
3
  size 623
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:adedebe0cc7e07de957a9e2967d6e9c3934a9fdca3245f46a29d125e5e36192e
3
  size 623
last-checkpoint/trainer_state.json CHANGED
@@ -1,8 +1,8 @@
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
- "epoch": 5.078545,
5
- "global_step": 800000,
6
  "is_hyper_param_search": false,
7
  "is_local_process_zero": true,
8
  "is_world_process_zero": true,
@@ -4934,11 +4934,319 @@
4934
  "eval_samples_per_second": 503.264,
4935
  "eval_steps_per_second": 4.026,
4936
  "step": 800000
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4937
  }
4938
  ],
4939
  "max_steps": 1000000,
4940
  "num_train_epochs": 9223372036854775807,
4941
- "total_flos": 1.3476339941407457e+19,
4942
  "trial_name": null,
4943
  "trial_params": null
4944
  }
 
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
+ "epoch": 5.128545,
5
+ "global_step": 850000,
6
  "is_hyper_param_search": false,
7
  "is_local_process_zero": true,
8
  "is_world_process_zero": true,
 
4934
  "eval_samples_per_second": 503.264,
4935
  "eval_steps_per_second": 4.026,
4936
  "step": 800000
4937
+ },
4938
+ {
4939
+ "epoch": 5.08,
4940
+ "learning_rate": 1.0441632244932237e-05,
4941
+ "loss": 0.5538,
4942
+ "step": 801000
4943
+ },
4944
+ {
4945
+ "epoch": 5.08,
4946
+ "learning_rate": 1.0340722563656107e-05,
4947
+ "loss": 0.5625,
4948
+ "step": 802000
4949
+ },
4950
+ {
4951
+ "epoch": 5.08,
4952
+ "learning_rate": 1.0240246589884044e-05,
4953
+ "loss": 0.5641,
4954
+ "step": 803000
4955
+ },
4956
+ {
4957
+ "epoch": 5.08,
4958
+ "learning_rate": 1.0140205422405214e-05,
4959
+ "loss": 0.5654,
4960
+ "step": 804000
4961
+ },
4962
+ {
4963
+ "epoch": 5.08,
4964
+ "learning_rate": 1.0040600155253765e-05,
4965
+ "loss": 0.5659,
4966
+ "step": 805000
4967
+ },
4968
+ {
4969
+ "epoch": 5.08,
4970
+ "learning_rate": 9.941431877696955e-06,
4971
+ "loss": 0.572,
4972
+ "step": 806000
4973
+ },
4974
+ {
4975
+ "epoch": 5.09,
4976
+ "learning_rate": 9.842701674223187e-06,
4977
+ "loss": 0.5642,
4978
+ "step": 807000
4979
+ },
4980
+ {
4981
+ "epoch": 5.09,
4982
+ "learning_rate": 9.744410624530148e-06,
4983
+ "loss": 0.5555,
4984
+ "step": 808000
4985
+ },
4986
+ {
4987
+ "epoch": 5.09,
4988
+ "learning_rate": 9.646559803512994e-06,
4989
+ "loss": 0.5548,
4990
+ "step": 809000
4991
+ },
4992
+ {
4993
+ "epoch": 5.09,
4994
+ "learning_rate": 9.549150281252633e-06,
4995
+ "loss": 0.5529,
4996
+ "step": 810000
4997
+ },
4998
+ {
4999
+ "epoch": 5.09,
5000
+ "learning_rate": 9.452183123004e-06,
5001
+ "loss": 0.5515,
5002
+ "step": 811000
5003
+ },
5004
+ {
5005
+ "epoch": 5.09,
5006
+ "learning_rate": 9.355659389184396e-06,
5007
+ "loss": 0.5623,
5008
+ "step": 812000
5009
+ },
5010
+ {
5011
+ "epoch": 5.09,
5012
+ "learning_rate": 9.259580135361929e-06,
5013
+ "loss": 0.5646,
5014
+ "step": 813000
5015
+ },
5016
+ {
5017
+ "epoch": 5.09,
5018
+ "learning_rate": 9.163946412243896e-06,
5019
+ "loss": 0.5617,
5020
+ "step": 814000
5021
+ },
5022
+ {
5023
+ "epoch": 5.09,
5024
+ "learning_rate": 9.068759265665384e-06,
5025
+ "loss": 0.563,
5026
+ "step": 815000
5027
+ },
5028
+ {
5029
+ "epoch": 5.09,
5030
+ "learning_rate": 8.974019736577777e-06,
5031
+ "loss": 0.5691,
5032
+ "step": 816000
5033
+ },
5034
+ {
5035
+ "epoch": 5.1,
5036
+ "learning_rate": 8.879728861037384e-06,
5037
+ "loss": 0.5643,
5038
+ "step": 817000
5039
+ },
5040
+ {
5041
+ "epoch": 5.1,
5042
+ "learning_rate": 8.785887670194138e-06,
5043
+ "loss": 0.5557,
5044
+ "step": 818000
5045
+ },
5046
+ {
5047
+ "epoch": 5.1,
5048
+ "learning_rate": 8.692497190280224e-06,
5049
+ "loss": 0.5515,
5050
+ "step": 819000
5051
+ },
5052
+ {
5053
+ "epoch": 5.1,
5054
+ "learning_rate": 8.599558442598998e-06,
5055
+ "loss": 0.5521,
5056
+ "step": 820000
5057
+ },
5058
+ {
5059
+ "epoch": 5.1,
5060
+ "learning_rate": 8.507072443513702e-06,
5061
+ "loss": 0.5509,
5062
+ "step": 821000
5063
+ },
5064
+ {
5065
+ "epoch": 5.1,
5066
+ "learning_rate": 8.415040204436426e-06,
5067
+ "loss": 0.5594,
5068
+ "step": 822000
5069
+ },
5070
+ {
5071
+ "epoch": 5.1,
5072
+ "learning_rate": 8.323462731816961e-06,
5073
+ "loss": 0.5628,
5074
+ "step": 823000
5075
+ },
5076
+ {
5077
+ "epoch": 5.1,
5078
+ "learning_rate": 8.232341027131885e-06,
5079
+ "loss": 0.5631,
5080
+ "step": 824000
5081
+ },
5082
+ {
5083
+ "epoch": 5.1,
5084
+ "learning_rate": 8.141676086873572e-06,
5085
+ "loss": 0.5645,
5086
+ "step": 825000
5087
+ },
5088
+ {
5089
+ "epoch": 5.1,
5090
+ "learning_rate": 8.051468902539272e-06,
5091
+ "loss": 0.5673,
5092
+ "step": 826000
5093
+ },
5094
+ {
5095
+ "epoch": 5.11,
5096
+ "learning_rate": 7.96172046062032e-06,
5097
+ "loss": 0.5657,
5098
+ "step": 827000
5099
+ },
5100
+ {
5101
+ "epoch": 5.11,
5102
+ "learning_rate": 7.872431742591268e-06,
5103
+ "loss": 0.5529,
5104
+ "step": 828000
5105
+ },
5106
+ {
5107
+ "epoch": 5.11,
5108
+ "learning_rate": 7.783603724899257e-06,
5109
+ "loss": 0.5545,
5110
+ "step": 829000
5111
+ },
5112
+ {
5113
+ "epoch": 5.11,
5114
+ "learning_rate": 7.695237378953223e-06,
5115
+ "loss": 0.5506,
5116
+ "step": 830000
5117
+ },
5118
+ {
5119
+ "epoch": 5.11,
5120
+ "learning_rate": 7.607333671113409e-06,
5121
+ "loss": 0.551,
5122
+ "step": 831000
5123
+ },
5124
+ {
5125
+ "epoch": 5.11,
5126
+ "learning_rate": 7.519893562680663e-06,
5127
+ "loss": 0.5624,
5128
+ "step": 832000
5129
+ },
5130
+ {
5131
+ "epoch": 5.11,
5132
+ "learning_rate": 7.432918009885997e-06,
5133
+ "loss": 0.5624,
5134
+ "step": 833000
5135
+ },
5136
+ {
5137
+ "epoch": 5.11,
5138
+ "learning_rate": 7.3464079638801365e-06,
5139
+ "loss": 0.562,
5140
+ "step": 834000
5141
+ },
5142
+ {
5143
+ "epoch": 5.11,
5144
+ "learning_rate": 7.260364370723044e-06,
5145
+ "loss": 0.5636,
5146
+ "step": 835000
5147
+ },
5148
+ {
5149
+ "epoch": 5.11,
5150
+ "learning_rate": 7.174788171373731e-06,
5151
+ "loss": 0.5694,
5152
+ "step": 836000
5153
+ },
5154
+ {
5155
+ "epoch": 5.12,
5156
+ "learning_rate": 7.089680301679752e-06,
5157
+ "loss": 0.5614,
5158
+ "step": 837000
5159
+ },
5160
+ {
5161
+ "epoch": 5.12,
5162
+ "learning_rate": 7.005041692367154e-06,
5163
+ "loss": 0.5513,
5164
+ "step": 838000
5165
+ },
5166
+ {
5167
+ "epoch": 5.12,
5168
+ "learning_rate": 6.92087326903022e-06,
5169
+ "loss": 0.5499,
5170
+ "step": 839000
5171
+ },
5172
+ {
5173
+ "epoch": 5.12,
5174
+ "learning_rate": 6.837175952121306e-06,
5175
+ "loss": 0.5498,
5176
+ "step": 840000
5177
+ },
5178
+ {
5179
+ "epoch": 5.12,
5180
+ "learning_rate": 6.753950656940905e-06,
5181
+ "loss": 0.5501,
5182
+ "step": 841000
5183
+ },
5184
+ {
5185
+ "epoch": 5.12,
5186
+ "learning_rate": 6.671198293627479e-06,
5187
+ "loss": 0.5592,
5188
+ "step": 842000
5189
+ },
5190
+ {
5191
+ "epoch": 5.12,
5192
+ "learning_rate": 6.588919767147639e-06,
5193
+ "loss": 0.5606,
5194
+ "step": 843000
5195
+ },
5196
+ {
5197
+ "epoch": 5.12,
5198
+ "learning_rate": 6.5071159772861436e-06,
5199
+ "loss": 0.5597,
5200
+ "step": 844000
5201
+ },
5202
+ {
5203
+ "epoch": 5.12,
5204
+ "learning_rate": 6.425787818636131e-06,
5205
+ "loss": 0.5616,
5206
+ "step": 845000
5207
+ },
5208
+ {
5209
+ "epoch": 5.12,
5210
+ "learning_rate": 6.344936180589351e-06,
5211
+ "loss": 0.5676,
5212
+ "step": 846000
5213
+ },
5214
+ {
5215
+ "epoch": 5.13,
5216
+ "learning_rate": 6.264561947326331e-06,
5217
+ "loss": 0.5613,
5218
+ "step": 847000
5219
+ },
5220
+ {
5221
+ "epoch": 5.13,
5222
+ "learning_rate": 6.184665997806832e-06,
5223
+ "loss": 0.5507,
5224
+ "step": 848000
5225
+ },
5226
+ {
5227
+ "epoch": 5.13,
5228
+ "learning_rate": 6.1052492057601275e-06,
5229
+ "loss": 0.5523,
5230
+ "step": 849000
5231
+ },
5232
+ {
5233
+ "epoch": 5.13,
5234
+ "learning_rate": 6.026312439675552e-06,
5235
+ "loss": 0.5479,
5236
+ "step": 850000
5237
+ },
5238
+ {
5239
+ "epoch": 5.13,
5240
+ "eval_loss": 0.3478800654411316,
5241
+ "eval_runtime": 11.8923,
5242
+ "eval_samples_per_second": 420.44,
5243
+ "eval_steps_per_second": 3.364,
5244
+ "step": 850000
5245
  }
5246
  ],
5247
  "max_steps": 1000000,
5248
  "num_train_epochs": 9223372036854775807,
5249
+ "total_flos": 1.4318609871698657e+19,
5250
  "trial_name": null,
5251
  "trial_params": null
5252
  }
pytorch_model.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:3742551ffb857f2a82abee664e6f7b2e4edc6956074c64f93b167503507fb01f
3
  size 442675755
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:276a7965140304f9b691ebf5dbaef28e68a0d61c7432397f5d9a4edba8ae4065
3
  size 442675755
runs/Dec30_08-05-16_t1v-n-4a21561c-w-0/events.out.tfevents.1672387541.t1v-n-4a21561c-w-0.14765.0 CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:ee543c073ebe6fb978e78e13a8d156dcd2b1dd696388f28438886856bf72948c
3
- size 136149
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:625a070d1a811cbf7e5bc01d5ab0a9bfa65a1a6143d426b99dfb4c69daba6d48
3
+ size 144425