sgarrett commited on
Commit
279a9c8
1 Parent(s): 7741c8c

Upload 14 files

Browse files
added_tokens.json ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "<PAD>": 50263,
3
+ "<endoftext>": 50262,
4
+ "<startoftext>": 50261,
5
+ "LABEL:": 50258,
6
+ "NEGATIVE": 50260,
7
+ "POSITIVE": 50259,
8
+ "SEQUENCE:": 50257
9
+ }
config.json ADDED
@@ -0,0 +1,39 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "nferruz/ProtGPT2",
3
+ "activation_function": "gelu_new",
4
+ "architectures": [
5
+ "GPT2LMHeadModel"
6
+ ],
7
+ "attn_pdrop": 0.1,
8
+ "bos_token_id": 0,
9
+ "embd_pdrop": 0.1,
10
+ "eos_token_id": 0,
11
+ "initializer_range": 0.02,
12
+ "layer_norm_epsilon": 1e-05,
13
+ "model_type": "gpt2",
14
+ "n_ctx": 1024,
15
+ "n_embd": 1280,
16
+ "n_head": 20,
17
+ "n_inner": null,
18
+ "n_layer": 36,
19
+ "n_positions": 1024,
20
+ "reorder_and_upcast_attn": false,
21
+ "resid_pdrop": 0.1,
22
+ "scale_attn_by_inverse_layer_idx": false,
23
+ "scale_attn_weights": true,
24
+ "summary_activation": null,
25
+ "summary_first_dropout": 0.1,
26
+ "summary_proj_to_labels": true,
27
+ "summary_type": "cls_index",
28
+ "summary_use_proj": true,
29
+ "task_specific_params": {
30
+ "text-generation": {
31
+ "do_sample": true,
32
+ "max_length": 50
33
+ }
34
+ },
35
+ "torch_dtype": "float32",
36
+ "transformers_version": "4.42.0.dev0",
37
+ "use_cache": true,
38
+ "vocab_size": 50264
39
+ }
generation_config.json ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ {
2
+ "_from_model_config": true,
3
+ "bos_token_id": 0,
4
+ "eos_token_id": 0,
5
+ "transformers_version": "4.42.0.dev0"
6
+ }
merges.txt ADDED
The diff for this file is too large to render. See raw diff
 
optimizer.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e9b1f30d882f1b6f2ec3212b26242a411b4919a1b76cbb72655820b45e195de8
3
+ size 6192685221
pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:58cee2ec7e0bf86f755d18fc68ea1bf9684f63a9a4805984dd506c6f8cc837e4
3
+ size 3096298525
rng_state.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2120c31a262838b9d300e27c163e00cab2168499d96f1cfc1b07ca1542680fc6
3
+ size 14575
scheduler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7b70922c9e159e0cd7c345fb3a4e08e2c9e68d6f2bad6d74739c0f031720a67a
3
+ size 627
special_tokens_map.json ADDED
@@ -0,0 +1,30 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token": {
3
+ "content": "<startoftext>",
4
+ "lstrip": false,
5
+ "normalized": false,
6
+ "rstrip": false,
7
+ "single_word": false
8
+ },
9
+ "eos_token": {
10
+ "content": "<endoftext>",
11
+ "lstrip": false,
12
+ "normalized": false,
13
+ "rstrip": false,
14
+ "single_word": false
15
+ },
16
+ "pad_token": {
17
+ "content": "<PAD>",
18
+ "lstrip": false,
19
+ "normalized": false,
20
+ "rstrip": false,
21
+ "single_word": false
22
+ },
23
+ "unk_token": {
24
+ "content": "<|endoftext|>",
25
+ "lstrip": false,
26
+ "normalized": true,
27
+ "rstrip": false,
28
+ "single_word": false
29
+ }
30
+ }
tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
tokenizer_config.json ADDED
@@ -0,0 +1,76 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_prefix_space": false,
3
+ "added_tokens_decoder": {
4
+ "0": {
5
+ "content": "<|endoftext|>",
6
+ "lstrip": false,
7
+ "normalized": true,
8
+ "rstrip": false,
9
+ "single_word": false,
10
+ "special": true
11
+ },
12
+ "50257": {
13
+ "content": "SEQUENCE:",
14
+ "lstrip": false,
15
+ "normalized": true,
16
+ "rstrip": false,
17
+ "single_word": false,
18
+ "special": false
19
+ },
20
+ "50258": {
21
+ "content": "LABEL:",
22
+ "lstrip": false,
23
+ "normalized": true,
24
+ "rstrip": false,
25
+ "single_word": false,
26
+ "special": false
27
+ },
28
+ "50259": {
29
+ "content": "POSITIVE",
30
+ "lstrip": false,
31
+ "normalized": true,
32
+ "rstrip": false,
33
+ "single_word": false,
34
+ "special": false
35
+ },
36
+ "50260": {
37
+ "content": "NEGATIVE",
38
+ "lstrip": false,
39
+ "normalized": true,
40
+ "rstrip": false,
41
+ "single_word": false,
42
+ "special": false
43
+ },
44
+ "50261": {
45
+ "content": "<startoftext>",
46
+ "lstrip": false,
47
+ "normalized": false,
48
+ "rstrip": false,
49
+ "single_word": false,
50
+ "special": true
51
+ },
52
+ "50262": {
53
+ "content": "<endoftext>",
54
+ "lstrip": false,
55
+ "normalized": false,
56
+ "rstrip": false,
57
+ "single_word": false,
58
+ "special": true
59
+ },
60
+ "50263": {
61
+ "content": "<PAD>",
62
+ "lstrip": false,
63
+ "normalized": false,
64
+ "rstrip": false,
65
+ "single_word": false,
66
+ "special": true
67
+ }
68
+ },
69
+ "bos_token": "<startoftext>",
70
+ "clean_up_tokenization_spaces": true,
71
+ "eos_token": "<endoftext>",
72
+ "model_max_length": 1000000000000000019884624838656,
73
+ "pad_token": "<PAD>",
74
+ "tokenizer_class": "GPT2Tokenizer",
75
+ "unk_token": "<|endoftext|>"
76
+ }
trainer_state.json ADDED
@@ -0,0 +1,1251 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": null,
3
+ "best_model_checkpoint": null,
4
+ "epoch": 151.8324607329843,
5
+ "eval_steps": 500,
6
+ "global_step": 87000,
7
+ "is_hyper_param_search": false,
8
+ "is_local_process_zero": true,
9
+ "is_world_process_zero": true,
10
+ "log_history": [
11
+ {
12
+ "epoch": 0.8726003490401396,
13
+ "grad_norm": 0.931673526763916,
14
+ "learning_rate": 0.0009956369982547994,
15
+ "loss": 2.6507,
16
+ "step": 500
17
+ },
18
+ {
19
+ "epoch": 1.7452006980802792,
20
+ "grad_norm": 1.8975260257720947,
21
+ "learning_rate": 0.0009912739965095986,
22
+ "loss": 1.6969,
23
+ "step": 1000
24
+ },
25
+ {
26
+ "epoch": 2.6178010471204187,
27
+ "grad_norm": 0.5923702120780945,
28
+ "learning_rate": 0.000986910994764398,
29
+ "loss": 1.287,
30
+ "step": 1500
31
+ },
32
+ {
33
+ "epoch": 3.4904013961605584,
34
+ "grad_norm": 0.79058438539505,
35
+ "learning_rate": 0.000982547993019197,
36
+ "loss": 0.9398,
37
+ "step": 2000
38
+ },
39
+ {
40
+ "epoch": 4.363001745200698,
41
+ "grad_norm": 0.7330807447433472,
42
+ "learning_rate": 0.0009781849912739965,
43
+ "loss": 0.6968,
44
+ "step": 2500
45
+ },
46
+ {
47
+ "epoch": 5.2356020942408374,
48
+ "grad_norm": 0.6996338963508606,
49
+ "learning_rate": 0.0009738219895287959,
50
+ "loss": 0.4816,
51
+ "step": 3000
52
+ },
53
+ {
54
+ "epoch": 6.108202443280978,
55
+ "grad_norm": 0.5383062362670898,
56
+ "learning_rate": 0.0009694589877835951,
57
+ "loss": 0.3189,
58
+ "step": 3500
59
+ },
60
+ {
61
+ "epoch": 6.980802792321117,
62
+ "grad_norm": 0.6486771106719971,
63
+ "learning_rate": 0.0009650959860383944,
64
+ "loss": 0.2218,
65
+ "step": 4000
66
+ },
67
+ {
68
+ "epoch": 7.853403141361256,
69
+ "grad_norm": 0.507366955280304,
70
+ "learning_rate": 0.0009607329842931938,
71
+ "loss": 0.1542,
72
+ "step": 4500
73
+ },
74
+ {
75
+ "epoch": 8.726003490401396,
76
+ "grad_norm": 0.8452386260032654,
77
+ "learning_rate": 0.000956369982547993,
78
+ "loss": 0.1285,
79
+ "step": 5000
80
+ },
81
+ {
82
+ "epoch": 9.598603839441536,
83
+ "grad_norm": 0.43142977356910706,
84
+ "learning_rate": 0.0009520069808027923,
85
+ "loss": 0.1175,
86
+ "step": 5500
87
+ },
88
+ {
89
+ "epoch": 10.471204188481675,
90
+ "grad_norm": 0.2331060916185379,
91
+ "learning_rate": 0.0009476439790575916,
92
+ "loss": 0.1053,
93
+ "step": 6000
94
+ },
95
+ {
96
+ "epoch": 11.343804537521814,
97
+ "grad_norm": 0.4272315800189972,
98
+ "learning_rate": 0.000943280977312391,
99
+ "loss": 0.092,
100
+ "step": 6500
101
+ },
102
+ {
103
+ "epoch": 12.216404886561955,
104
+ "grad_norm": 0.2999955117702484,
105
+ "learning_rate": 0.0009389179755671902,
106
+ "loss": 0.0878,
107
+ "step": 7000
108
+ },
109
+ {
110
+ "epoch": 13.089005235602095,
111
+ "grad_norm": 2.55757474899292,
112
+ "learning_rate": 0.0009345549738219895,
113
+ "loss": 0.0884,
114
+ "step": 7500
115
+ },
116
+ {
117
+ "epoch": 13.961605584642234,
118
+ "grad_norm": 0.33913654088974,
119
+ "learning_rate": 0.0009301919720767889,
120
+ "loss": 0.0805,
121
+ "step": 8000
122
+ },
123
+ {
124
+ "epoch": 14.834205933682373,
125
+ "grad_norm": 0.3642922341823578,
126
+ "learning_rate": 0.0009258289703315882,
127
+ "loss": 0.0721,
128
+ "step": 8500
129
+ },
130
+ {
131
+ "epoch": 15.706806282722512,
132
+ "grad_norm": 0.7718423008918762,
133
+ "learning_rate": 0.0009214659685863874,
134
+ "loss": 0.0687,
135
+ "step": 9000
136
+ },
137
+ {
138
+ "epoch": 16.57940663176265,
139
+ "grad_norm": 0.5820666551589966,
140
+ "learning_rate": 0.0009171029668411868,
141
+ "loss": 0.0644,
142
+ "step": 9500
143
+ },
144
+ {
145
+ "epoch": 17.452006980802793,
146
+ "grad_norm": 0.2773011028766632,
147
+ "learning_rate": 0.000912739965095986,
148
+ "loss": 0.0598,
149
+ "step": 10000
150
+ },
151
+ {
152
+ "epoch": 18.324607329842934,
153
+ "grad_norm": 0.25250518321990967,
154
+ "learning_rate": 0.0009083769633507853,
155
+ "loss": 0.0653,
156
+ "step": 10500
157
+ },
158
+ {
159
+ "epoch": 19.19720767888307,
160
+ "grad_norm": 0.24120768904685974,
161
+ "learning_rate": 0.0009040139616055847,
162
+ "loss": 0.0632,
163
+ "step": 11000
164
+ },
165
+ {
166
+ "epoch": 20.069808027923212,
167
+ "grad_norm": 0.2897244989871979,
168
+ "learning_rate": 0.0008996509598603839,
169
+ "loss": 0.0594,
170
+ "step": 11500
171
+ },
172
+ {
173
+ "epoch": 20.94240837696335,
174
+ "grad_norm": 0.25145065784454346,
175
+ "learning_rate": 0.0008952879581151833,
176
+ "loss": 0.0556,
177
+ "step": 12000
178
+ },
179
+ {
180
+ "epoch": 21.81500872600349,
181
+ "grad_norm": 0.27175939083099365,
182
+ "learning_rate": 0.0008909249563699826,
183
+ "loss": 0.0481,
184
+ "step": 12500
185
+ },
186
+ {
187
+ "epoch": 22.68760907504363,
188
+ "grad_norm": 0.8626015782356262,
189
+ "learning_rate": 0.0008865619546247818,
190
+ "loss": 0.0466,
191
+ "step": 13000
192
+ },
193
+ {
194
+ "epoch": 23.56020942408377,
195
+ "grad_norm": 0.18672889471054077,
196
+ "learning_rate": 0.0008821989528795812,
197
+ "loss": 0.0512,
198
+ "step": 13500
199
+ },
200
+ {
201
+ "epoch": 24.43280977312391,
202
+ "grad_norm": 0.2387542873620987,
203
+ "learning_rate": 0.0008778359511343804,
204
+ "loss": 0.0512,
205
+ "step": 14000
206
+ },
207
+ {
208
+ "epoch": 25.305410122164048,
209
+ "grad_norm": 0.2665075957775116,
210
+ "learning_rate": 0.0008734729493891797,
211
+ "loss": 0.0483,
212
+ "step": 14500
213
+ },
214
+ {
215
+ "epoch": 26.17801047120419,
216
+ "grad_norm": 0.16715960204601288,
217
+ "learning_rate": 0.0008691099476439791,
218
+ "loss": 0.0441,
219
+ "step": 15000
220
+ },
221
+ {
222
+ "epoch": 27.050610820244327,
223
+ "grad_norm": 0.1875993311405182,
224
+ "learning_rate": 0.0008647469458987784,
225
+ "loss": 0.0397,
226
+ "step": 15500
227
+ },
228
+ {
229
+ "epoch": 27.923211169284468,
230
+ "grad_norm": 0.25451162457466125,
231
+ "learning_rate": 0.0008603839441535776,
232
+ "loss": 0.039,
233
+ "step": 16000
234
+ },
235
+ {
236
+ "epoch": 28.79581151832461,
237
+ "grad_norm": 0.307054728269577,
238
+ "learning_rate": 0.000856020942408377,
239
+ "loss": 0.0443,
240
+ "step": 16500
241
+ },
242
+ {
243
+ "epoch": 29.668411867364746,
244
+ "grad_norm": 0.2467741221189499,
245
+ "learning_rate": 0.0008516579406631763,
246
+ "loss": 0.038,
247
+ "step": 17000
248
+ },
249
+ {
250
+ "epoch": 30.541012216404887,
251
+ "grad_norm": 0.23178012669086456,
252
+ "learning_rate": 0.0008472949389179755,
253
+ "loss": 0.0351,
254
+ "step": 17500
255
+ },
256
+ {
257
+ "epoch": 31.413612565445025,
258
+ "grad_norm": 0.35061487555503845,
259
+ "learning_rate": 0.0008429319371727748,
260
+ "loss": 0.0323,
261
+ "step": 18000
262
+ },
263
+ {
264
+ "epoch": 32.28621291448517,
265
+ "grad_norm": 0.3363337218761444,
266
+ "learning_rate": 0.0008385689354275742,
267
+ "loss": 0.038,
268
+ "step": 18500
269
+ },
270
+ {
271
+ "epoch": 33.1588132635253,
272
+ "grad_norm": 0.24015972018241882,
273
+ "learning_rate": 0.0008342059336823735,
274
+ "loss": 0.0361,
275
+ "step": 19000
276
+ },
277
+ {
278
+ "epoch": 34.031413612565444,
279
+ "grad_norm": 0.5147364735603333,
280
+ "learning_rate": 0.0008298429319371727,
281
+ "loss": 0.0318,
282
+ "step": 19500
283
+ },
284
+ {
285
+ "epoch": 34.904013961605585,
286
+ "grad_norm": 0.17477251589298248,
287
+ "learning_rate": 0.0008254799301919721,
288
+ "loss": 0.0307,
289
+ "step": 20000
290
+ },
291
+ {
292
+ "epoch": 35.776614310645726,
293
+ "grad_norm": 0.2511900067329407,
294
+ "learning_rate": 0.0008211169284467714,
295
+ "loss": 0.0348,
296
+ "step": 20500
297
+ },
298
+ {
299
+ "epoch": 36.64921465968587,
300
+ "grad_norm": 0.2036200314760208,
301
+ "learning_rate": 0.0008167539267015707,
302
+ "loss": 0.0298,
303
+ "step": 21000
304
+ },
305
+ {
306
+ "epoch": 37.521815008726,
307
+ "grad_norm": 0.16981545090675354,
308
+ "learning_rate": 0.00081239092495637,
309
+ "loss": 0.0319,
310
+ "step": 21500
311
+ },
312
+ {
313
+ "epoch": 38.39441535776614,
314
+ "grad_norm": 0.22329097986221313,
315
+ "learning_rate": 0.0008080279232111692,
316
+ "loss": 0.0359,
317
+ "step": 22000
318
+ },
319
+ {
320
+ "epoch": 39.26701570680628,
321
+ "grad_norm": 0.13258185982704163,
322
+ "learning_rate": 0.0008036649214659686,
323
+ "loss": 0.0266,
324
+ "step": 22500
325
+ },
326
+ {
327
+ "epoch": 40.139616055846425,
328
+ "grad_norm": 0.12790647149085999,
329
+ "learning_rate": 0.000799301919720768,
330
+ "loss": 0.026,
331
+ "step": 23000
332
+ },
333
+ {
334
+ "epoch": 41.01221640488656,
335
+ "grad_norm": 0.23635344207286835,
336
+ "learning_rate": 0.0007949389179755671,
337
+ "loss": 0.0279,
338
+ "step": 23500
339
+ },
340
+ {
341
+ "epoch": 41.8848167539267,
342
+ "grad_norm": 0.11364254355430603,
343
+ "learning_rate": 0.0007905759162303665,
344
+ "loss": 0.0257,
345
+ "step": 24000
346
+ },
347
+ {
348
+ "epoch": 42.75741710296684,
349
+ "grad_norm": 0.2781168520450592,
350
+ "learning_rate": 0.0007862129144851659,
351
+ "loss": 0.0295,
352
+ "step": 24500
353
+ },
354
+ {
355
+ "epoch": 43.63001745200698,
356
+ "grad_norm": 0.106789730489254,
357
+ "learning_rate": 0.0007818499127399651,
358
+ "loss": 0.0308,
359
+ "step": 25000
360
+ },
361
+ {
362
+ "epoch": 44.50261780104712,
363
+ "grad_norm": 0.16404911875724792,
364
+ "learning_rate": 0.0007774869109947644,
365
+ "loss": 0.0222,
366
+ "step": 25500
367
+ },
368
+ {
369
+ "epoch": 45.37521815008726,
370
+ "grad_norm": 0.14249293506145477,
371
+ "learning_rate": 0.0007731239092495637,
372
+ "loss": 0.0225,
373
+ "step": 26000
374
+ },
375
+ {
376
+ "epoch": 46.2478184991274,
377
+ "grad_norm": 0.1853444129228592,
378
+ "learning_rate": 0.0007687609075043631,
379
+ "loss": 0.0261,
380
+ "step": 26500
381
+ },
382
+ {
383
+ "epoch": 47.12041884816754,
384
+ "grad_norm": 0.1456003040075302,
385
+ "learning_rate": 0.0007643979057591623,
386
+ "loss": 0.0252,
387
+ "step": 27000
388
+ },
389
+ {
390
+ "epoch": 47.99301919720768,
391
+ "grad_norm": 0.16386698186397552,
392
+ "learning_rate": 0.0007600349040139616,
393
+ "loss": 0.0259,
394
+ "step": 27500
395
+ },
396
+ {
397
+ "epoch": 48.86561954624782,
398
+ "grad_norm": 0.12221992015838623,
399
+ "learning_rate": 0.000755671902268761,
400
+ "loss": 0.0253,
401
+ "step": 28000
402
+ },
403
+ {
404
+ "epoch": 49.738219895287955,
405
+ "grad_norm": 0.14093224704265594,
406
+ "learning_rate": 0.0007513089005235602,
407
+ "loss": 0.0203,
408
+ "step": 28500
409
+ },
410
+ {
411
+ "epoch": 50.610820244328096,
412
+ "grad_norm": 0.1189383938908577,
413
+ "learning_rate": 0.0007469458987783595,
414
+ "loss": 0.0207,
415
+ "step": 29000
416
+ },
417
+ {
418
+ "epoch": 51.48342059336824,
419
+ "grad_norm": 0.1471104919910431,
420
+ "learning_rate": 0.0007425828970331589,
421
+ "loss": 0.0209,
422
+ "step": 29500
423
+ },
424
+ {
425
+ "epoch": 52.35602094240838,
426
+ "grad_norm": 0.08947575837373734,
427
+ "learning_rate": 0.0007382198952879581,
428
+ "loss": 0.0234,
429
+ "step": 30000
430
+ },
431
+ {
432
+ "epoch": 53.22862129144852,
433
+ "grad_norm": 0.18746259808540344,
434
+ "learning_rate": 0.0007338568935427574,
435
+ "loss": 0.0245,
436
+ "step": 30500
437
+ },
438
+ {
439
+ "epoch": 54.10122164048865,
440
+ "grad_norm": 0.1539311408996582,
441
+ "learning_rate": 0.0007294938917975568,
442
+ "loss": 0.0214,
443
+ "step": 31000
444
+ },
445
+ {
446
+ "epoch": 54.973821989528794,
447
+ "grad_norm": 0.11201947182416916,
448
+ "learning_rate": 0.000725130890052356,
449
+ "loss": 0.0194,
450
+ "step": 31500
451
+ },
452
+ {
453
+ "epoch": 55.846422338568935,
454
+ "grad_norm": 0.16618479788303375,
455
+ "learning_rate": 0.0007207678883071554,
456
+ "loss": 0.0185,
457
+ "step": 32000
458
+ },
459
+ {
460
+ "epoch": 56.719022687609076,
461
+ "grad_norm": 0.1569599211215973,
462
+ "learning_rate": 0.0007164048865619547,
463
+ "loss": 0.0234,
464
+ "step": 32500
465
+ },
466
+ {
467
+ "epoch": 57.59162303664922,
468
+ "grad_norm": 0.11062045395374298,
469
+ "learning_rate": 0.0007120418848167539,
470
+ "loss": 0.0187,
471
+ "step": 33000
472
+ },
473
+ {
474
+ "epoch": 58.46422338568935,
475
+ "grad_norm": 0.1617700457572937,
476
+ "learning_rate": 0.0007076788830715533,
477
+ "loss": 0.0176,
478
+ "step": 33500
479
+ },
480
+ {
481
+ "epoch": 59.33682373472949,
482
+ "grad_norm": 0.11750755459070206,
483
+ "learning_rate": 0.0007033158813263525,
484
+ "loss": 0.0188,
485
+ "step": 34000
486
+ },
487
+ {
488
+ "epoch": 60.20942408376963,
489
+ "grad_norm": 0.24937282502651215,
490
+ "learning_rate": 0.0006989528795811518,
491
+ "loss": 0.0225,
492
+ "step": 34500
493
+ },
494
+ {
495
+ "epoch": 61.082024432809774,
496
+ "grad_norm": 0.22410957515239716,
497
+ "learning_rate": 0.0006945898778359512,
498
+ "loss": 0.0192,
499
+ "step": 35000
500
+ },
501
+ {
502
+ "epoch": 61.954624781849915,
503
+ "grad_norm": 0.18029357492923737,
504
+ "learning_rate": 0.0006902268760907505,
505
+ "loss": 0.0169,
506
+ "step": 35500
507
+ },
508
+ {
509
+ "epoch": 62.82722513089005,
510
+ "grad_norm": 0.14077898859977722,
511
+ "learning_rate": 0.0006858638743455497,
512
+ "loss": 0.0167,
513
+ "step": 36000
514
+ },
515
+ {
516
+ "epoch": 63.69982547993019,
517
+ "grad_norm": 0.12804169952869415,
518
+ "learning_rate": 0.0006815008726003491,
519
+ "loss": 0.0167,
520
+ "step": 36500
521
+ },
522
+ {
523
+ "epoch": 64.57242582897034,
524
+ "grad_norm": 0.11356078833341599,
525
+ "learning_rate": 0.0006771378708551484,
526
+ "loss": 0.0183,
527
+ "step": 37000
528
+ },
529
+ {
530
+ "epoch": 65.44502617801047,
531
+ "grad_norm": 0.07214757055044174,
532
+ "learning_rate": 0.0006727748691099476,
533
+ "loss": 0.0167,
534
+ "step": 37500
535
+ },
536
+ {
537
+ "epoch": 66.3176265270506,
538
+ "grad_norm": 0.20497408509254456,
539
+ "learning_rate": 0.0006684118673647469,
540
+ "loss": 0.0186,
541
+ "step": 38000
542
+ },
543
+ {
544
+ "epoch": 67.19022687609075,
545
+ "grad_norm": 0.09408937394618988,
546
+ "learning_rate": 0.0006640488656195463,
547
+ "loss": 0.0169,
548
+ "step": 38500
549
+ },
550
+ {
551
+ "epoch": 68.06282722513089,
552
+ "grad_norm": 0.12423662841320038,
553
+ "learning_rate": 0.0006596858638743456,
554
+ "loss": 0.0179,
555
+ "step": 39000
556
+ },
557
+ {
558
+ "epoch": 68.93542757417103,
559
+ "grad_norm": 0.4406953752040863,
560
+ "learning_rate": 0.0006553228621291448,
561
+ "loss": 0.019,
562
+ "step": 39500
563
+ },
564
+ {
565
+ "epoch": 69.80802792321117,
566
+ "grad_norm": 0.11233725398778915,
567
+ "learning_rate": 0.0006509598603839442,
568
+ "loss": 0.0151,
569
+ "step": 40000
570
+ },
571
+ {
572
+ "epoch": 70.68062827225131,
573
+ "grad_norm": 0.08892803639173508,
574
+ "learning_rate": 0.0006465968586387435,
575
+ "loss": 0.0137,
576
+ "step": 40500
577
+ },
578
+ {
579
+ "epoch": 71.55322862129145,
580
+ "grad_norm": 0.11712398380041122,
581
+ "learning_rate": 0.0006422338568935428,
582
+ "loss": 0.0145,
583
+ "step": 41000
584
+ },
585
+ {
586
+ "epoch": 72.4258289703316,
587
+ "grad_norm": 0.11560557782649994,
588
+ "learning_rate": 0.0006378708551483421,
589
+ "loss": 0.0181,
590
+ "step": 41500
591
+ },
592
+ {
593
+ "epoch": 73.29842931937172,
594
+ "grad_norm": 0.10780952870845795,
595
+ "learning_rate": 0.0006335078534031413,
596
+ "loss": 0.0162,
597
+ "step": 42000
598
+ },
599
+ {
600
+ "epoch": 74.17102966841186,
601
+ "grad_norm": 0.06540343165397644,
602
+ "learning_rate": 0.0006291448516579407,
603
+ "loss": 0.0157,
604
+ "step": 42500
605
+ },
606
+ {
607
+ "epoch": 75.043630017452,
608
+ "grad_norm": 0.08087161183357239,
609
+ "learning_rate": 0.00062478184991274,
610
+ "loss": 0.0181,
611
+ "step": 43000
612
+ },
613
+ {
614
+ "epoch": 75.91623036649214,
615
+ "grad_norm": 0.08909798413515091,
616
+ "learning_rate": 0.0006204188481675392,
617
+ "loss": 0.0136,
618
+ "step": 43500
619
+ },
620
+ {
621
+ "epoch": 76.78883071553228,
622
+ "grad_norm": 0.09045905619859695,
623
+ "learning_rate": 0.0006160558464223386,
624
+ "loss": 0.0124,
625
+ "step": 44000
626
+ },
627
+ {
628
+ "epoch": 77.66143106457243,
629
+ "grad_norm": 0.1375885307788849,
630
+ "learning_rate": 0.000611692844677138,
631
+ "loss": 0.013,
632
+ "step": 44500
633
+ },
634
+ {
635
+ "epoch": 78.53403141361257,
636
+ "grad_norm": 0.06584478169679642,
637
+ "learning_rate": 0.0006073298429319371,
638
+ "loss": 0.0149,
639
+ "step": 45000
640
+ },
641
+ {
642
+ "epoch": 79.40663176265271,
643
+ "grad_norm": 0.10484465211629868,
644
+ "learning_rate": 0.0006029668411867365,
645
+ "loss": 0.0141,
646
+ "step": 45500
647
+ },
648
+ {
649
+ "epoch": 80.27923211169285,
650
+ "grad_norm": 0.11171326786279678,
651
+ "learning_rate": 0.0005986038394415358,
652
+ "loss": 0.0142,
653
+ "step": 46000
654
+ },
655
+ {
656
+ "epoch": 81.15183246073299,
657
+ "grad_norm": 0.12636296451091766,
658
+ "learning_rate": 0.000594240837696335,
659
+ "loss": 0.0154,
660
+ "step": 46500
661
+ },
662
+ {
663
+ "epoch": 82.02443280977312,
664
+ "grad_norm": 0.06967689841985703,
665
+ "learning_rate": 0.0005898778359511344,
666
+ "loss": 0.0144,
667
+ "step": 47000
668
+ },
669
+ {
670
+ "epoch": 82.89703315881326,
671
+ "grad_norm": 0.09147424250841141,
672
+ "learning_rate": 0.0005855148342059337,
673
+ "loss": 0.0127,
674
+ "step": 47500
675
+ },
676
+ {
677
+ "epoch": 83.7696335078534,
678
+ "grad_norm": 0.6871227025985718,
679
+ "learning_rate": 0.000581151832460733,
680
+ "loss": 0.0113,
681
+ "step": 48000
682
+ },
683
+ {
684
+ "epoch": 84.64223385689354,
685
+ "grad_norm": 0.07160250097513199,
686
+ "learning_rate": 0.0005767888307155323,
687
+ "loss": 0.014,
688
+ "step": 48500
689
+ },
690
+ {
691
+ "epoch": 85.51483420593368,
692
+ "grad_norm": 0.06839723885059357,
693
+ "learning_rate": 0.0005724258289703316,
694
+ "loss": 0.0121,
695
+ "step": 49000
696
+ },
697
+ {
698
+ "epoch": 86.38743455497382,
699
+ "grad_norm": 0.05615156516432762,
700
+ "learning_rate": 0.0005680628272251309,
701
+ "loss": 0.0136,
702
+ "step": 49500
703
+ },
704
+ {
705
+ "epoch": 87.26003490401396,
706
+ "grad_norm": 0.10079669952392578,
707
+ "learning_rate": 0.0005636998254799302,
708
+ "loss": 0.0116,
709
+ "step": 50000
710
+ },
711
+ {
712
+ "epoch": 88.1326352530541,
713
+ "grad_norm": 0.09019599854946136,
714
+ "learning_rate": 0.0005593368237347295,
715
+ "loss": 0.0122,
716
+ "step": 50500
717
+ },
718
+ {
719
+ "epoch": 89.00523560209425,
720
+ "grad_norm": 0.05459260195493698,
721
+ "learning_rate": 0.0005549738219895288,
722
+ "loss": 0.0113,
723
+ "step": 51000
724
+ },
725
+ {
726
+ "epoch": 89.87783595113439,
727
+ "grad_norm": 0.07002388685941696,
728
+ "learning_rate": 0.0005506108202443281,
729
+ "loss": 0.0112,
730
+ "step": 51500
731
+ },
732
+ {
733
+ "epoch": 90.75043630017451,
734
+ "grad_norm": 0.12079566717147827,
735
+ "learning_rate": 0.0005462478184991274,
736
+ "loss": 0.0115,
737
+ "step": 52000
738
+ },
739
+ {
740
+ "epoch": 91.62303664921465,
741
+ "grad_norm": 0.08160880208015442,
742
+ "learning_rate": 0.0005418848167539267,
743
+ "loss": 0.0135,
744
+ "step": 52500
745
+ },
746
+ {
747
+ "epoch": 92.4956369982548,
748
+ "grad_norm": 0.053758975118398666,
749
+ "learning_rate": 0.000537521815008726,
750
+ "loss": 0.0111,
751
+ "step": 53000
752
+ },
753
+ {
754
+ "epoch": 93.36823734729494,
755
+ "grad_norm": 0.08977492153644562,
756
+ "learning_rate": 0.0005331588132635254,
757
+ "loss": 0.0118,
758
+ "step": 53500
759
+ },
760
+ {
761
+ "epoch": 94.24083769633508,
762
+ "grad_norm": 0.14162831008434296,
763
+ "learning_rate": 0.0005287958115183245,
764
+ "loss": 0.0118,
765
+ "step": 54000
766
+ },
767
+ {
768
+ "epoch": 95.11343804537522,
769
+ "grad_norm": 0.09927529096603394,
770
+ "learning_rate": 0.0005244328097731239,
771
+ "loss": 0.011,
772
+ "step": 54500
773
+ },
774
+ {
775
+ "epoch": 95.98603839441536,
776
+ "grad_norm": 0.08714314550161362,
777
+ "learning_rate": 0.0005200698080279233,
778
+ "loss": 0.0101,
779
+ "step": 55000
780
+ },
781
+ {
782
+ "epoch": 96.8586387434555,
783
+ "grad_norm": 0.09934234619140625,
784
+ "learning_rate": 0.0005157068062827224,
785
+ "loss": 0.0109,
786
+ "step": 55500
787
+ },
788
+ {
789
+ "epoch": 97.73123909249564,
790
+ "grad_norm": 0.11947502195835114,
791
+ "learning_rate": 0.0005113438045375218,
792
+ "loss": 0.0114,
793
+ "step": 56000
794
+ },
795
+ {
796
+ "epoch": 98.60383944153578,
797
+ "grad_norm": 0.0667385384440422,
798
+ "learning_rate": 0.0005069808027923212,
799
+ "loss": 0.0099,
800
+ "step": 56500
801
+ },
802
+ {
803
+ "epoch": 99.47643979057591,
804
+ "grad_norm": 0.11594051122665405,
805
+ "learning_rate": 0.0005026178010471204,
806
+ "loss": 0.0117,
807
+ "step": 57000
808
+ },
809
+ {
810
+ "epoch": 100.34904013961605,
811
+ "grad_norm": 0.06558683514595032,
812
+ "learning_rate": 0.0004982547993019197,
813
+ "loss": 0.0104,
814
+ "step": 57500
815
+ },
816
+ {
817
+ "epoch": 101.22164048865619,
818
+ "grad_norm": 0.07644475996494293,
819
+ "learning_rate": 0.000493891797556719,
820
+ "loss": 0.0098,
821
+ "step": 58000
822
+ },
823
+ {
824
+ "epoch": 102.09424083769633,
825
+ "grad_norm": 0.06626095622777939,
826
+ "learning_rate": 0.0004895287958115183,
827
+ "loss": 0.0095,
828
+ "step": 58500
829
+ },
830
+ {
831
+ "epoch": 102.96684118673647,
832
+ "grad_norm": 0.10808754712343216,
833
+ "learning_rate": 0.0004851657940663176,
834
+ "loss": 0.0108,
835
+ "step": 59000
836
+ },
837
+ {
838
+ "epoch": 103.83944153577661,
839
+ "grad_norm": 0.05127561092376709,
840
+ "learning_rate": 0.000480802792321117,
841
+ "loss": 0.01,
842
+ "step": 59500
843
+ },
844
+ {
845
+ "epoch": 104.71204188481676,
846
+ "grad_norm": 0.12128196656703949,
847
+ "learning_rate": 0.00047643979057591625,
848
+ "loss": 0.0092,
849
+ "step": 60000
850
+ },
851
+ {
852
+ "epoch": 105.5846422338569,
853
+ "grad_norm": 0.06882330775260925,
854
+ "learning_rate": 0.0004720767888307155,
855
+ "loss": 0.0111,
856
+ "step": 60500
857
+ },
858
+ {
859
+ "epoch": 106.45724258289704,
860
+ "grad_norm": 0.05268734693527222,
861
+ "learning_rate": 0.00046771378708551485,
862
+ "loss": 0.0094,
863
+ "step": 61000
864
+ },
865
+ {
866
+ "epoch": 107.32984293193718,
867
+ "grad_norm": 0.046268824487924576,
868
+ "learning_rate": 0.00046335078534031417,
869
+ "loss": 0.0082,
870
+ "step": 61500
871
+ },
872
+ {
873
+ "epoch": 108.2024432809773,
874
+ "grad_norm": 0.04883289709687233,
875
+ "learning_rate": 0.00045898778359511344,
876
+ "loss": 0.0085,
877
+ "step": 62000
878
+ },
879
+ {
880
+ "epoch": 109.07504363001745,
881
+ "grad_norm": 0.0723048597574234,
882
+ "learning_rate": 0.00045462478184991276,
883
+ "loss": 0.0093,
884
+ "step": 62500
885
+ },
886
+ {
887
+ "epoch": 109.94764397905759,
888
+ "grad_norm": 0.06026541814208031,
889
+ "learning_rate": 0.00045026178010471203,
890
+ "loss": 0.0095,
891
+ "step": 63000
892
+ },
893
+ {
894
+ "epoch": 110.82024432809773,
895
+ "grad_norm": 0.058908674865961075,
896
+ "learning_rate": 0.00044589877835951136,
897
+ "loss": 0.0085,
898
+ "step": 63500
899
+ },
900
+ {
901
+ "epoch": 111.69284467713787,
902
+ "grad_norm": 0.05758107081055641,
903
+ "learning_rate": 0.0004415357766143107,
904
+ "loss": 0.01,
905
+ "step": 64000
906
+ },
907
+ {
908
+ "epoch": 112.56544502617801,
909
+ "grad_norm": 0.06559444963932037,
910
+ "learning_rate": 0.00043717277486910995,
911
+ "loss": 0.0088,
912
+ "step": 64500
913
+ },
914
+ {
915
+ "epoch": 113.43804537521815,
916
+ "grad_norm": 0.05080035701394081,
917
+ "learning_rate": 0.0004328097731239092,
918
+ "loss": 0.0083,
919
+ "step": 65000
920
+ },
921
+ {
922
+ "epoch": 114.3106457242583,
923
+ "grad_norm": 0.0523524135351181,
924
+ "learning_rate": 0.0004284467713787086,
925
+ "loss": 0.0078,
926
+ "step": 65500
927
+ },
928
+ {
929
+ "epoch": 115.18324607329843,
930
+ "grad_norm": 0.14169646799564362,
931
+ "learning_rate": 0.00042408376963350787,
932
+ "loss": 0.0085,
933
+ "step": 66000
934
+ },
935
+ {
936
+ "epoch": 116.05584642233858,
937
+ "grad_norm": 0.05305915325880051,
938
+ "learning_rate": 0.00041972076788830714,
939
+ "loss": 0.0091,
940
+ "step": 66500
941
+ },
942
+ {
943
+ "epoch": 116.9284467713787,
944
+ "grad_norm": 0.05915080010890961,
945
+ "learning_rate": 0.00041535776614310646,
946
+ "loss": 0.0085,
947
+ "step": 67000
948
+ },
949
+ {
950
+ "epoch": 117.80104712041884,
951
+ "grad_norm": 0.07950141280889511,
952
+ "learning_rate": 0.0004109947643979058,
953
+ "loss": 0.0075,
954
+ "step": 67500
955
+ },
956
+ {
957
+ "epoch": 118.67364746945898,
958
+ "grad_norm": 0.083484947681427,
959
+ "learning_rate": 0.00040663176265270506,
960
+ "loss": 0.0086,
961
+ "step": 68000
962
+ },
963
+ {
964
+ "epoch": 119.54624781849913,
965
+ "grad_norm": 0.1149265244603157,
966
+ "learning_rate": 0.0004022687609075044,
967
+ "loss": 0.0087,
968
+ "step": 68500
969
+ },
970
+ {
971
+ "epoch": 120.41884816753927,
972
+ "grad_norm": 0.10079418867826462,
973
+ "learning_rate": 0.00039790575916230365,
974
+ "loss": 0.0087,
975
+ "step": 69000
976
+ },
977
+ {
978
+ "epoch": 121.29144851657941,
979
+ "grad_norm": 0.04444717988371849,
980
+ "learning_rate": 0.000393542757417103,
981
+ "loss": 0.0083,
982
+ "step": 69500
983
+ },
984
+ {
985
+ "epoch": 122.16404886561955,
986
+ "grad_norm": 0.015783872455358505,
987
+ "learning_rate": 0.0003891797556719023,
988
+ "loss": 0.0074,
989
+ "step": 70000
990
+ },
991
+ {
992
+ "epoch": 123.03664921465969,
993
+ "grad_norm": 0.07311473041772842,
994
+ "learning_rate": 0.00038481675392670157,
995
+ "loss": 0.007,
996
+ "step": 70500
997
+ },
998
+ {
999
+ "epoch": 123.90924956369983,
1000
+ "grad_norm": 0.03907659277319908,
1001
+ "learning_rate": 0.00038045375218150084,
1002
+ "loss": 0.0071,
1003
+ "step": 71000
1004
+ },
1005
+ {
1006
+ "epoch": 124.78184991273997,
1007
+ "grad_norm": 0.031013870611786842,
1008
+ "learning_rate": 0.0003760907504363002,
1009
+ "loss": 0.0071,
1010
+ "step": 71500
1011
+ },
1012
+ {
1013
+ "epoch": 125.6544502617801,
1014
+ "grad_norm": 0.05435263365507126,
1015
+ "learning_rate": 0.0003717277486910995,
1016
+ "loss": 0.0077,
1017
+ "step": 72000
1018
+ },
1019
+ {
1020
+ "epoch": 126.52705061082024,
1021
+ "grad_norm": 0.015180529095232487,
1022
+ "learning_rate": 0.00036736474694589876,
1023
+ "loss": 0.0072,
1024
+ "step": 72500
1025
+ },
1026
+ {
1027
+ "epoch": 127.39965095986038,
1028
+ "grad_norm": 0.02177223190665245,
1029
+ "learning_rate": 0.0003630017452006981,
1030
+ "loss": 0.0074,
1031
+ "step": 73000
1032
+ },
1033
+ {
1034
+ "epoch": 128.27225130890054,
1035
+ "grad_norm": 0.04897777736186981,
1036
+ "learning_rate": 0.0003586387434554974,
1037
+ "loss": 0.0076,
1038
+ "step": 73500
1039
+ },
1040
+ {
1041
+ "epoch": 129.14485165794068,
1042
+ "grad_norm": 0.02429993264377117,
1043
+ "learning_rate": 0.0003542757417102967,
1044
+ "loss": 0.0067,
1045
+ "step": 74000
1046
+ },
1047
+ {
1048
+ "epoch": 130.0174520069808,
1049
+ "grad_norm": 0.019401997327804565,
1050
+ "learning_rate": 0.000349912739965096,
1051
+ "loss": 0.0071,
1052
+ "step": 74500
1053
+ },
1054
+ {
1055
+ "epoch": 130.89005235602093,
1056
+ "grad_norm": 0.05895571410655975,
1057
+ "learning_rate": 0.00034554973821989527,
1058
+ "loss": 0.0067,
1059
+ "step": 75000
1060
+ },
1061
+ {
1062
+ "epoch": 131.76265270506107,
1063
+ "grad_norm": 0.08564560860395432,
1064
+ "learning_rate": 0.0003411867364746946,
1065
+ "loss": 0.0066,
1066
+ "step": 75500
1067
+ },
1068
+ {
1069
+ "epoch": 132.6352530541012,
1070
+ "grad_norm": 0.0574815534055233,
1071
+ "learning_rate": 0.0003368237347294939,
1072
+ "loss": 0.0063,
1073
+ "step": 76000
1074
+ },
1075
+ {
1076
+ "epoch": 133.50785340314135,
1077
+ "grad_norm": 0.08703868091106415,
1078
+ "learning_rate": 0.0003324607329842932,
1079
+ "loss": 0.0064,
1080
+ "step": 76500
1081
+ },
1082
+ {
1083
+ "epoch": 134.3804537521815,
1084
+ "grad_norm": 0.1667858362197876,
1085
+ "learning_rate": 0.00032809773123909246,
1086
+ "loss": 0.0074,
1087
+ "step": 77000
1088
+ },
1089
+ {
1090
+ "epoch": 135.25305410122164,
1091
+ "grad_norm": 0.07352814823389053,
1092
+ "learning_rate": 0.00032373472949389184,
1093
+ "loss": 0.0067,
1094
+ "step": 77500
1095
+ },
1096
+ {
1097
+ "epoch": 136.12565445026178,
1098
+ "grad_norm": 0.10321661829948425,
1099
+ "learning_rate": 0.0003193717277486911,
1100
+ "loss": 0.0065,
1101
+ "step": 78000
1102
+ },
1103
+ {
1104
+ "epoch": 136.99825479930192,
1105
+ "grad_norm": 0.046309106051921844,
1106
+ "learning_rate": 0.0003150087260034904,
1107
+ "loss": 0.0065,
1108
+ "step": 78500
1109
+ },
1110
+ {
1111
+ "epoch": 137.87085514834206,
1112
+ "grad_norm": 0.014806479215621948,
1113
+ "learning_rate": 0.0003106457242582897,
1114
+ "loss": 0.0061,
1115
+ "step": 79000
1116
+ },
1117
+ {
1118
+ "epoch": 138.7434554973822,
1119
+ "grad_norm": 0.041382819414138794,
1120
+ "learning_rate": 0.000306282722513089,
1121
+ "loss": 0.0059,
1122
+ "step": 79500
1123
+ },
1124
+ {
1125
+ "epoch": 139.61605584642234,
1126
+ "grad_norm": 0.07493896782398224,
1127
+ "learning_rate": 0.0003019197207678883,
1128
+ "loss": 0.0061,
1129
+ "step": 80000
1130
+ },
1131
+ {
1132
+ "epoch": 140.48865619546248,
1133
+ "grad_norm": 0.052245959639549255,
1134
+ "learning_rate": 0.0002975567190226876,
1135
+ "loss": 0.006,
1136
+ "step": 80500
1137
+ },
1138
+ {
1139
+ "epoch": 141.36125654450262,
1140
+ "grad_norm": 0.10472971946001053,
1141
+ "learning_rate": 0.0002931937172774869,
1142
+ "loss": 0.0061,
1143
+ "step": 81000
1144
+ },
1145
+ {
1146
+ "epoch": 142.23385689354276,
1147
+ "grad_norm": 0.033921029418706894,
1148
+ "learning_rate": 0.0002888307155322862,
1149
+ "loss": 0.0059,
1150
+ "step": 81500
1151
+ },
1152
+ {
1153
+ "epoch": 143.1064572425829,
1154
+ "grad_norm": 0.06276967376470566,
1155
+ "learning_rate": 0.00028446771378708553,
1156
+ "loss": 0.0059,
1157
+ "step": 82000
1158
+ },
1159
+ {
1160
+ "epoch": 143.97905759162305,
1161
+ "grad_norm": 0.022356705740094185,
1162
+ "learning_rate": 0.0002801047120418848,
1163
+ "loss": 0.0056,
1164
+ "step": 82500
1165
+ },
1166
+ {
1167
+ "epoch": 144.8516579406632,
1168
+ "grad_norm": 0.008057367987930775,
1169
+ "learning_rate": 0.0002757417102966841,
1170
+ "loss": 0.0054,
1171
+ "step": 83000
1172
+ },
1173
+ {
1174
+ "epoch": 145.72425828970333,
1175
+ "grad_norm": 0.00805575679987669,
1176
+ "learning_rate": 0.00027137870855148345,
1177
+ "loss": 0.0054,
1178
+ "step": 83500
1179
+ },
1180
+ {
1181
+ "epoch": 146.59685863874344,
1182
+ "grad_norm": 0.07681386172771454,
1183
+ "learning_rate": 0.0002670157068062827,
1184
+ "loss": 0.0053,
1185
+ "step": 84000
1186
+ },
1187
+ {
1188
+ "epoch": 147.46945898778358,
1189
+ "grad_norm": 0.041651804000139236,
1190
+ "learning_rate": 0.00026265270506108205,
1191
+ "loss": 0.0054,
1192
+ "step": 84500
1193
+ },
1194
+ {
1195
+ "epoch": 148.34205933682372,
1196
+ "grad_norm": 0.09435189515352249,
1197
+ "learning_rate": 0.0002582897033158813,
1198
+ "loss": 0.0057,
1199
+ "step": 85000
1200
+ },
1201
+ {
1202
+ "epoch": 149.21465968586386,
1203
+ "grad_norm": 0.02968364767730236,
1204
+ "learning_rate": 0.00025392670157068064,
1205
+ "loss": 0.0053,
1206
+ "step": 85500
1207
+ },
1208
+ {
1209
+ "epoch": 150.087260034904,
1210
+ "grad_norm": 0.04307426139712334,
1211
+ "learning_rate": 0.00024956369982547996,
1212
+ "loss": 0.0053,
1213
+ "step": 86000
1214
+ },
1215
+ {
1216
+ "epoch": 150.95986038394415,
1217
+ "grad_norm": 0.08466316014528275,
1218
+ "learning_rate": 0.00024520069808027923,
1219
+ "loss": 0.0051,
1220
+ "step": 86500
1221
+ },
1222
+ {
1223
+ "epoch": 151.8324607329843,
1224
+ "grad_norm": 0.14644889533519745,
1225
+ "learning_rate": 0.00024083769633507853,
1226
+ "loss": 0.005,
1227
+ "step": 87000
1228
+ }
1229
+ ],
1230
+ "logging_steps": 500,
1231
+ "max_steps": 114600,
1232
+ "num_input_tokens_seen": 0,
1233
+ "num_train_epochs": 200,
1234
+ "save_steps": 500,
1235
+ "stateful_callbacks": {
1236
+ "TrainerControl": {
1237
+ "args": {
1238
+ "should_epoch_stop": false,
1239
+ "should_evaluate": false,
1240
+ "should_log": false,
1241
+ "should_save": true,
1242
+ "should_training_stop": false
1243
+ },
1244
+ "attributes": {}
1245
+ }
1246
+ },
1247
+ "total_flos": 1.5133036050382848e+18,
1248
+ "train_batch_size": 4,
1249
+ "trial_name": null,
1250
+ "trial_params": null
1251
+ }
training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1b0cb1e3e955b96e463c55b5ae0d2670a2f71e733d8db03784a94b235a8359b0
3
+ size 4603
vocab.json ADDED
The diff for this file is too large to render. See raw diff