kiddothe2b commited on
Commit
830e0fb
1 Parent(s): 7415c03

Training in progress, step 6400

Browse files
.gitignore ADDED
@@ -0,0 +1 @@
 
 
1
+ checkpoint-*/
config.json ADDED
@@ -0,0 +1,41 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "data/models/longformer-predicted-pos-encodings",
3
+ "architectures": [
4
+ "LongformerForMaskedLM"
5
+ ],
6
+ "attention_mode": "longformer",
7
+ "attention_probs_dropout_prob": 0.1,
8
+ "attention_window": [
9
+ 512,
10
+ 512,
11
+ 512,
12
+ 512,
13
+ 512,
14
+ 512
15
+ ],
16
+ "bos_token_id": 0,
17
+ "classifier_dropout": null,
18
+ "cls_token_id": 0,
19
+ "eos_token_id": 2,
20
+ "gradient_checkpointing": false,
21
+ "hidden_act": "gelu",
22
+ "hidden_dropout_prob": 0.1,
23
+ "hidden_size": 768,
24
+ "ignore_attention_mask": false,
25
+ "initializer_range": 0.02,
26
+ "intermediate_size": 3072,
27
+ "layer_norm_eps": 1e-05,
28
+ "max_position_embeddings": 1026,
29
+ "model_max_length": 1024,
30
+ "model_type": "longformer",
31
+ "num_attention_heads": 12,
32
+ "num_hidden_layers": 6,
33
+ "pad_token_id": 1,
34
+ "position_embedding_type": "absolute",
35
+ "sep_token_id": 2,
36
+ "torch_dtype": "float32",
37
+ "transformers_version": "4.20.0",
38
+ "type_vocab_size": 1,
39
+ "use_cache": true,
40
+ "vocab_size": 50265
41
+ }
last-checkpoint/config.json ADDED
@@ -0,0 +1,41 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "data/models/longformer-predicted-pos-encodings",
3
+ "architectures": [
4
+ "LongformerForMaskedLM"
5
+ ],
6
+ "attention_mode": "longformer",
7
+ "attention_probs_dropout_prob": 0.1,
8
+ "attention_window": [
9
+ 512,
10
+ 512,
11
+ 512,
12
+ 512,
13
+ 512,
14
+ 512
15
+ ],
16
+ "bos_token_id": 0,
17
+ "classifier_dropout": null,
18
+ "cls_token_id": 0,
19
+ "eos_token_id": 2,
20
+ "gradient_checkpointing": false,
21
+ "hidden_act": "gelu",
22
+ "hidden_dropout_prob": 0.1,
23
+ "hidden_size": 768,
24
+ "ignore_attention_mask": false,
25
+ "initializer_range": 0.02,
26
+ "intermediate_size": 3072,
27
+ "layer_norm_eps": 1e-05,
28
+ "max_position_embeddings": 1026,
29
+ "model_max_length": 1024,
30
+ "model_type": "longformer",
31
+ "num_attention_heads": 12,
32
+ "num_hidden_layers": 6,
33
+ "pad_token_id": 1,
34
+ "position_embedding_type": "absolute",
35
+ "sep_token_id": 2,
36
+ "torch_dtype": "float32",
37
+ "transformers_version": "4.20.0",
38
+ "type_vocab_size": 1,
39
+ "use_cache": true,
40
+ "vocab_size": 50265
41
+ }
last-checkpoint/merges.txt ADDED
The diff for this file is too large to render. See raw diff
 
last-checkpoint/optimizer.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a5d52832157d0f5b80b6a377d00b6a204e38b38fba5786f88358c0819d171ee4
3
+ size 6318359
last-checkpoint/pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b7c7e586c98c80af7b8b14023b28831609a0aa6b2bd8e695f6d4f000731d7e55
3
+ size 372832803
last-checkpoint/rng_state.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ff7dc9aba89d2b981ce2c9aa897aebeb0dab9301700e5fac047b6fa6ef1a780f
3
+ size 15523
last-checkpoint/scheduler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:841a158b0d212253f125ebf1f87bda4797e00292f1d39571b4724f0ab5ed90ad
3
+ size 623
last-checkpoint/special_tokens_map.json ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token": "<s>",
3
+ "cls_token": "<s>",
4
+ "eos_token": "</s>",
5
+ "mask_token": {
6
+ "content": "<mask>",
7
+ "lstrip": true,
8
+ "normalized": false,
9
+ "rstrip": false,
10
+ "single_word": false
11
+ },
12
+ "pad_token": "<pad>",
13
+ "sep_token": "</s>",
14
+ "unk_token": "<unk>"
15
+ }
last-checkpoint/tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
last-checkpoint/tokenizer_config.json ADDED
@@ -0,0 +1,17 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_prefix_space": false,
3
+ "bos_token": "<s>",
4
+ "cls_token": "<s>",
5
+ "eos_token": "</s>",
6
+ "errors": "replace",
7
+ "mask_token": "<mask>",
8
+ "max_length": 1024,
9
+ "model_max_length": 512,
10
+ "name_or_path": "data/models/longformer-predicted-pos-encodings",
11
+ "pad_token": "<pad>",
12
+ "sep_token": "</s>",
13
+ "special_tokens_map_file": null,
14
+ "tokenizer_class": "RobertaTokenizer",
15
+ "trim_offsets": true,
16
+ "unk_token": "<unk>"
17
+ }
last-checkpoint/trainer_state.json ADDED
@@ -0,0 +1,409 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": null,
3
+ "best_model_checkpoint": null,
4
+ "epoch": 0.1,
5
+ "global_step": 6400,
6
+ "is_hyper_param_search": false,
7
+ "is_local_process_zero": true,
8
+ "is_world_process_zero": true,
9
+ "log_history": [
10
+ {
11
+ "epoch": 0.0,
12
+ "learning_rate": 1.5625e-05,
13
+ "loss": 5.407,
14
+ "step": 100
15
+ },
16
+ {
17
+ "epoch": 0.0,
18
+ "learning_rate": 3.125e-05,
19
+ "loss": 5.1931,
20
+ "step": 200
21
+ },
22
+ {
23
+ "epoch": 0.0,
24
+ "learning_rate": 4.6875e-05,
25
+ "loss": 4.7863,
26
+ "step": 300
27
+ },
28
+ {
29
+ "epoch": 0.01,
30
+ "learning_rate": 6.25e-05,
31
+ "loss": 4.2329,
32
+ "step": 400
33
+ },
34
+ {
35
+ "epoch": 0.01,
36
+ "learning_rate": 7.8125e-05,
37
+ "loss": 3.5639,
38
+ "step": 500
39
+ },
40
+ {
41
+ "epoch": 0.01,
42
+ "learning_rate": 9.375e-05,
43
+ "loss": 3.092,
44
+ "step": 600
45
+ },
46
+ {
47
+ "epoch": 0.01,
48
+ "learning_rate": 0.000109375,
49
+ "loss": 2.9092,
50
+ "step": 700
51
+ },
52
+ {
53
+ "epoch": 0.01,
54
+ "learning_rate": 0.000125,
55
+ "loss": 2.7752,
56
+ "step": 800
57
+ },
58
+ {
59
+ "epoch": 0.01,
60
+ "learning_rate": 0.00014062500000000002,
61
+ "loss": 2.7062,
62
+ "step": 900
63
+ },
64
+ {
65
+ "epoch": 0.02,
66
+ "learning_rate": 0.00015625,
67
+ "loss": 2.6179,
68
+ "step": 1000
69
+ },
70
+ {
71
+ "epoch": 0.02,
72
+ "learning_rate": 0.000171875,
73
+ "loss": 2.6036,
74
+ "step": 1100
75
+ },
76
+ {
77
+ "epoch": 0.02,
78
+ "learning_rate": 0.0001875,
79
+ "loss": 2.5718,
80
+ "step": 1200
81
+ },
82
+ {
83
+ "epoch": 0.02,
84
+ "learning_rate": 0.00020312500000000002,
85
+ "loss": 2.5639,
86
+ "step": 1300
87
+ },
88
+ {
89
+ "epoch": 0.02,
90
+ "learning_rate": 0.00021875,
91
+ "loss": 2.5482,
92
+ "step": 1400
93
+ },
94
+ {
95
+ "epoch": 0.02,
96
+ "learning_rate": 0.000234375,
97
+ "loss": 2.5276,
98
+ "step": 1500
99
+ },
100
+ {
101
+ "epoch": 0.03,
102
+ "learning_rate": 0.00025,
103
+ "loss": 2.4814,
104
+ "step": 1600
105
+ },
106
+ {
107
+ "epoch": 0.03,
108
+ "learning_rate": 0.000265625,
109
+ "loss": 2.4904,
110
+ "step": 1700
111
+ },
112
+ {
113
+ "epoch": 0.03,
114
+ "learning_rate": 0.00028125000000000003,
115
+ "loss": 2.5022,
116
+ "step": 1800
117
+ },
118
+ {
119
+ "epoch": 0.03,
120
+ "learning_rate": 0.000296875,
121
+ "loss": 2.4521,
122
+ "step": 1900
123
+ },
124
+ {
125
+ "epoch": 0.03,
126
+ "learning_rate": 0.0003125,
127
+ "loss": 2.45,
128
+ "step": 2000
129
+ },
130
+ {
131
+ "epoch": 0.03,
132
+ "learning_rate": 0.000328125,
133
+ "loss": 2.4725,
134
+ "step": 2100
135
+ },
136
+ {
137
+ "epoch": 0.03,
138
+ "learning_rate": 0.00034375,
139
+ "loss": 2.4415,
140
+ "step": 2200
141
+ },
142
+ {
143
+ "epoch": 0.04,
144
+ "learning_rate": 0.000359375,
145
+ "loss": 2.443,
146
+ "step": 2300
147
+ },
148
+ {
149
+ "epoch": 0.04,
150
+ "learning_rate": 0.000375,
151
+ "loss": 2.4593,
152
+ "step": 2400
153
+ },
154
+ {
155
+ "epoch": 0.04,
156
+ "learning_rate": 0.000390625,
157
+ "loss": 2.4487,
158
+ "step": 2500
159
+ },
160
+ {
161
+ "epoch": 0.04,
162
+ "learning_rate": 0.00040625000000000004,
163
+ "loss": 2.4259,
164
+ "step": 2600
165
+ },
166
+ {
167
+ "epoch": 0.04,
168
+ "learning_rate": 0.000421875,
169
+ "loss": 2.4278,
170
+ "step": 2700
171
+ },
172
+ {
173
+ "epoch": 0.04,
174
+ "learning_rate": 0.0004375,
175
+ "loss": 2.4052,
176
+ "step": 2800
177
+ },
178
+ {
179
+ "epoch": 0.05,
180
+ "learning_rate": 0.000453125,
181
+ "loss": 2.4142,
182
+ "step": 2900
183
+ },
184
+ {
185
+ "epoch": 0.05,
186
+ "learning_rate": 0.00046875,
187
+ "loss": 2.4338,
188
+ "step": 3000
189
+ },
190
+ {
191
+ "epoch": 0.05,
192
+ "learning_rate": 0.000484375,
193
+ "loss": 2.443,
194
+ "step": 3100
195
+ },
196
+ {
197
+ "epoch": 0.05,
198
+ "learning_rate": 0.0005,
199
+ "loss": 2.4501,
200
+ "step": 3200
201
+ },
202
+ {
203
+ "epoch": 0.05,
204
+ "learning_rate": 0.000515625,
205
+ "loss": 2.4415,
206
+ "step": 3300
207
+ },
208
+ {
209
+ "epoch": 0.05,
210
+ "learning_rate": 0.00053125,
211
+ "loss": 2.4484,
212
+ "step": 3400
213
+ },
214
+ {
215
+ "epoch": 0.05,
216
+ "learning_rate": 0.000546875,
217
+ "loss": 2.4132,
218
+ "step": 3500
219
+ },
220
+ {
221
+ "epoch": 0.06,
222
+ "learning_rate": 0.0005625000000000001,
223
+ "loss": 2.4372,
224
+ "step": 3600
225
+ },
226
+ {
227
+ "epoch": 0.06,
228
+ "learning_rate": 0.000578125,
229
+ "loss": 2.3958,
230
+ "step": 3700
231
+ },
232
+ {
233
+ "epoch": 0.06,
234
+ "learning_rate": 0.00059375,
235
+ "loss": 2.407,
236
+ "step": 3800
237
+ },
238
+ {
239
+ "epoch": 0.06,
240
+ "learning_rate": 0.000609375,
241
+ "loss": 2.4142,
242
+ "step": 3900
243
+ },
244
+ {
245
+ "epoch": 0.06,
246
+ "learning_rate": 0.000625,
247
+ "loss": 2.4185,
248
+ "step": 4000
249
+ },
250
+ {
251
+ "epoch": 0.06,
252
+ "learning_rate": 0.000640625,
253
+ "loss": 2.4373,
254
+ "step": 4100
255
+ },
256
+ {
257
+ "epoch": 0.07,
258
+ "learning_rate": 0.00065625,
259
+ "loss": 2.4092,
260
+ "step": 4200
261
+ },
262
+ {
263
+ "epoch": 0.07,
264
+ "learning_rate": 0.0006718750000000001,
265
+ "loss": 2.4009,
266
+ "step": 4300
267
+ },
268
+ {
269
+ "epoch": 0.07,
270
+ "learning_rate": 0.0006875,
271
+ "loss": 2.4403,
272
+ "step": 4400
273
+ },
274
+ {
275
+ "epoch": 0.07,
276
+ "learning_rate": 0.000703125,
277
+ "loss": 2.4071,
278
+ "step": 4500
279
+ },
280
+ {
281
+ "epoch": 0.07,
282
+ "learning_rate": 0.00071875,
283
+ "loss": 2.4034,
284
+ "step": 4600
285
+ },
286
+ {
287
+ "epoch": 0.07,
288
+ "learning_rate": 0.000734375,
289
+ "loss": 2.4111,
290
+ "step": 4700
291
+ },
292
+ {
293
+ "epoch": 0.07,
294
+ "learning_rate": 0.00075,
295
+ "loss": 2.4132,
296
+ "step": 4800
297
+ },
298
+ {
299
+ "epoch": 0.08,
300
+ "learning_rate": 0.000765625,
301
+ "loss": 2.4142,
302
+ "step": 4900
303
+ },
304
+ {
305
+ "epoch": 0.08,
306
+ "learning_rate": 0.00078125,
307
+ "loss": 2.4,
308
+ "step": 5000
309
+ },
310
+ {
311
+ "epoch": 0.08,
312
+ "learning_rate": 0.0007968750000000001,
313
+ "loss": 2.4329,
314
+ "step": 5100
315
+ },
316
+ {
317
+ "epoch": 0.08,
318
+ "learning_rate": 0.0008125000000000001,
319
+ "loss": 2.4136,
320
+ "step": 5200
321
+ },
322
+ {
323
+ "epoch": 0.08,
324
+ "learning_rate": 0.000828125,
325
+ "loss": 2.4137,
326
+ "step": 5300
327
+ },
328
+ {
329
+ "epoch": 0.08,
330
+ "learning_rate": 0.00084375,
331
+ "loss": 2.4154,
332
+ "step": 5400
333
+ },
334
+ {
335
+ "epoch": 0.09,
336
+ "learning_rate": 0.000859375,
337
+ "loss": 2.4205,
338
+ "step": 5500
339
+ },
340
+ {
341
+ "epoch": 0.09,
342
+ "learning_rate": 0.000875,
343
+ "loss": 2.4154,
344
+ "step": 5600
345
+ },
346
+ {
347
+ "epoch": 0.09,
348
+ "learning_rate": 0.000890625,
349
+ "loss": 2.4248,
350
+ "step": 5700
351
+ },
352
+ {
353
+ "epoch": 0.09,
354
+ "learning_rate": 0.00090625,
355
+ "loss": 2.4149,
356
+ "step": 5800
357
+ },
358
+ {
359
+ "epoch": 0.09,
360
+ "learning_rate": 0.0009218750000000001,
361
+ "loss": 2.4124,
362
+ "step": 5900
363
+ },
364
+ {
365
+ "epoch": 0.09,
366
+ "learning_rate": 0.0009375,
367
+ "loss": 2.4105,
368
+ "step": 6000
369
+ },
370
+ {
371
+ "epoch": 0.1,
372
+ "learning_rate": 0.000953125,
373
+ "loss": 2.4132,
374
+ "step": 6100
375
+ },
376
+ {
377
+ "epoch": 0.1,
378
+ "learning_rate": 0.00096875,
379
+ "loss": 2.4079,
380
+ "step": 6200
381
+ },
382
+ {
383
+ "epoch": 0.1,
384
+ "learning_rate": 0.000984375,
385
+ "loss": 2.4307,
386
+ "step": 6300
387
+ },
388
+ {
389
+ "epoch": 0.1,
390
+ "learning_rate": 0.001,
391
+ "loss": 2.4311,
392
+ "step": 6400
393
+ },
394
+ {
395
+ "epoch": 0.1,
396
+ "eval_accuracy": 0.5866917775886576,
397
+ "eval_loss": 2.239835500717163,
398
+ "eval_runtime": 7535.276,
399
+ "eval_samples_per_second": 43.51,
400
+ "eval_steps_per_second": 2.719,
401
+ "step": 6400
402
+ }
403
+ ],
404
+ "max_steps": 64000,
405
+ "num_train_epochs": 9223372036854775807,
406
+ "total_flos": 3.38491764375552e+16,
407
+ "trial_name": null,
408
+ "trial_params": null
409
+ }
last-checkpoint/training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:172d3bdde37eb599ca806460800494c112d2931eb57b1a43eac6a609f86eb993
3
+ size 3375
last-checkpoint/vocab.json ADDED
The diff for this file is too large to render. See raw diff
 
merges.txt ADDED
The diff for this file is too large to render. See raw diff
 
pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b7c7e586c98c80af7b8b14023b28831609a0aa6b2bd8e695f6d4f000731d7e55
3
+ size 372832803
special_tokens_map.json ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token": "<s>",
3
+ "cls_token": "<s>",
4
+ "eos_token": "</s>",
5
+ "mask_token": {
6
+ "content": "<mask>",
7
+ "lstrip": true,
8
+ "normalized": false,
9
+ "rstrip": false,
10
+ "single_word": false
11
+ },
12
+ "pad_token": "<pad>",
13
+ "sep_token": "</s>",
14
+ "unk_token": "<unk>"
15
+ }
tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
tokenizer_config.json ADDED
@@ -0,0 +1,17 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_prefix_space": false,
3
+ "bos_token": "<s>",
4
+ "cls_token": "<s>",
5
+ "eos_token": "</s>",
6
+ "errors": "replace",
7
+ "mask_token": "<mask>",
8
+ "max_length": 1024,
9
+ "model_max_length": 512,
10
+ "name_or_path": "data/models/longformer-predicted-pos-encodings",
11
+ "pad_token": "<pad>",
12
+ "sep_token": "</s>",
13
+ "special_tokens_map_file": null,
14
+ "tokenizer_class": "RobertaTokenizer",
15
+ "trim_offsets": true,
16
+ "unk_token": "<unk>"
17
+ }
training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:172d3bdde37eb599ca806460800494c112d2931eb57b1a43eac6a609f86eb993
3
+ size 3375
vocab.json ADDED
The diff for this file is too large to render. See raw diff