joelniklaus commited on
Commit
f5b3eb6
1 Parent(s): 38c8420

Model save

Browse files
last-checkpoint/config.json DELETED
@@ -1,47 +0,0 @@
1
- {
2
- "_name_or_path": "data/plms/legal-xlm-longformer-base",
3
- "architectures": [
4
- "LongformerForMaskedLM"
5
- ],
6
- "attention_mode": "longformer",
7
- "attention_probs_dropout_prob": 0.1,
8
- "attention_window": [
9
- 128,
10
- 128,
11
- 128,
12
- 128,
13
- 128,
14
- 128,
15
- 128,
16
- 128,
17
- 128,
18
- 128,
19
- 128,
20
- 128
21
- ],
22
- "bos_token_id": 1,
23
- "classifier_dropout": null,
24
- "cls_token_id": 1,
25
- "eos_token_id": 2,
26
- "gradient_checkpointing": false,
27
- "hidden_act": "gelu",
28
- "hidden_dropout_prob": 0.1,
29
- "hidden_size": 768,
30
- "ignore_attention_mask": false,
31
- "initializer_range": 0.02,
32
- "intermediate_size": 3072,
33
- "layer_norm_eps": 1e-05,
34
- "max_position_embeddings": 4098,
35
- "model_max_length": 4096,
36
- "model_type": "longformer",
37
- "num_attention_heads": 12,
38
- "num_hidden_layers": 12,
39
- "onnx_export": false,
40
- "pad_token_id": 0,
41
- "position_embedding_type": "absolute",
42
- "sep_token_id": 2,
43
- "torch_dtype": "float32",
44
- "transformers_version": "4.28.1",
45
- "type_vocab_size": 1,
46
- "vocab_size": 128000
47
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
last-checkpoint/optimizer.pt DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:52e4c82d133e24d979fa50dd1f5da3cf993a9afba45f61351030b87bca8dbce1
3
- size 1668076741
 
 
 
 
last-checkpoint/pytorch_model.bin DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:9fd8f7f94f77022b5215a867cb7b7f69f0fd389a147841d8656911b44dad2903
3
- size 834053717
 
 
 
 
last-checkpoint/rng_state.pth DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:23dc9ace1d0befcffec0a9d9e2805d913d33cd0dc2a3259a8ebde02165bcc5ae
3
- size 17641
 
 
 
 
last-checkpoint/scaler.pt DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:0d4c16f0bde844db19274bd1d26b71a9ec83b8f990d97e0bf43faadfb838da90
3
- size 557
 
 
 
 
last-checkpoint/scheduler.pt DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:25d6cb48c005e9d751ad0f279e5f0e2c753346576d9fd073fad547f015a9c4a4
3
- size 627
 
 
 
 
last-checkpoint/special_tokens_map.json DELETED
@@ -1,9 +0,0 @@
1
- {
2
- "bos_token": "<s>",
3
- "cls_token": "<s>",
4
- "eos_token": "</s>",
5
- "mask_token": "<mask>",
6
- "pad_token": "<pad>",
7
- "sep_token": "</s>",
8
- "unk_token": "<unk>"
9
- }
 
 
 
 
 
 
 
 
 
 
last-checkpoint/tokenizer.json DELETED
The diff for this file is too large to render. See raw diff
 
last-checkpoint/tokenizer_config.json DELETED
@@ -1,12 +0,0 @@
1
- {
2
- "bos_token": "<s>",
3
- "clean_up_tokenization_spaces": true,
4
- "cls_token": "<s>",
5
- "eos_token": "</s>",
6
- "mask_token": "<mask>",
7
- "model_max_length": 4096,
8
- "pad_token": "<pad>",
9
- "sep_token": "</s>",
10
- "tokenizer_class": "PreTrainedTokenizerFast",
11
- "unk_token": "<unk>"
12
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
last-checkpoint/trainer_state.json DELETED
@@ -1,361 +0,0 @@
1
- {
2
- "best_metric": null,
3
- "best_model_checkpoint": null,
4
- "epoch": 1.0,
5
- "global_step": 50000,
6
- "is_hyper_param_search": false,
7
- "is_local_process_zero": true,
8
- "is_world_process_zero": true,
9
- "log_history": [
10
- {
11
- "epoch": 0.02,
12
- "learning_rate": 1.1964e-05,
13
- "loss": 1.6251,
14
- "step": 1000
15
- },
16
- {
17
- "epoch": 0.04,
18
- "learning_rate": 2.3952e-05,
19
- "loss": 1.0682,
20
- "step": 2000
21
- },
22
- {
23
- "epoch": 0.06,
24
- "learning_rate": 2.9991929541758342e-05,
25
- "loss": 0.8531,
26
- "step": 3000
27
- },
28
- {
29
- "epoch": 0.08,
30
- "learning_rate": 2.9926636060938516e-05,
31
- "loss": 0.8384,
32
- "step": 4000
33
- },
34
- {
35
- "epoch": 0.1,
36
- "learning_rate": 2.9796072198520905e-05,
37
- "loss": 0.8226,
38
- "step": 5000
39
- },
40
- {
41
- "epoch": 0.12,
42
- "learning_rate": 2.96010361969696e-05,
43
- "loss": 0.8162,
44
- "step": 6000
45
- },
46
- {
47
- "epoch": 0.14,
48
- "learning_rate": 2.9342281162131742e-05,
49
- "loss": 0.802,
50
- "step": 7000
51
- },
52
- {
53
- "epoch": 0.16,
54
- "learning_rate": 2.902058372494256e-05,
55
- "loss": 0.8099,
56
- "step": 8000
57
- },
58
- {
59
- "epoch": 0.18,
60
- "learning_rate": 2.8637577842487164e-05,
61
- "loss": 0.8511,
62
- "step": 9000
63
- },
64
- {
65
- "epoch": 0.2,
66
- "learning_rate": 2.8194938302038455e-05,
67
- "loss": 0.8355,
68
- "step": 10000
69
- },
70
- {
71
- "epoch": 0.2,
72
- "eval_accuracy": 0.8438622275145564,
73
- "eval_loss": 0.6963315010070801,
74
- "eval_runtime": 212.1584,
75
- "eval_samples_per_second": 23.567,
76
- "eval_steps_per_second": 0.589,
77
- "step": 10000
78
- },
79
- {
80
- "epoch": 0.22,
81
- "learning_rate": 2.7695129103900878e-05,
82
- "loss": 0.8541,
83
- "step": 11000
84
- },
85
- {
86
- "epoch": 0.24,
87
- "learning_rate": 2.7139335532657592e-05,
88
- "loss": 0.8387,
89
- "step": 12000
90
- },
91
- {
92
- "epoch": 0.26,
93
- "learning_rate": 2.6530459735832775e-05,
94
- "loss": 0.8294,
95
- "step": 13000
96
- },
97
- {
98
- "epoch": 0.28,
99
- "learning_rate": 2.5871164172365078e-05,
100
- "loss": 0.8318,
101
- "step": 14000
102
- },
103
- {
104
- "epoch": 0.3,
105
- "learning_rate": 2.5164331774003996e-05,
106
- "loss": 0.8036,
107
- "step": 15000
108
- },
109
- {
110
- "epoch": 0.32,
111
- "learning_rate": 2.441459810041615e-05,
112
- "loss": 0.8184,
113
- "step": 16000
114
- },
115
- {
116
- "epoch": 0.34,
117
- "learning_rate": 2.3622237699269646e-05,
118
- "loss": 0.8164,
119
- "step": 17000
120
- },
121
- {
122
- "epoch": 0.36,
123
- "learning_rate": 2.2792174446485267e-05,
124
- "loss": 0.8178,
125
- "step": 18000
126
- },
127
- {
128
- "epoch": 0.38,
129
- "learning_rate": 2.192803799746817e-05,
130
- "loss": 0.8153,
131
- "step": 19000
132
- },
133
- {
134
- "epoch": 0.4,
135
- "learning_rate": 2.1033607001041156e-05,
136
- "loss": 0.8333,
137
- "step": 20000
138
- },
139
- {
140
- "epoch": 0.4,
141
- "eval_accuracy": 0.8461335925242551,
142
- "eval_loss": 0.6882692575454712,
143
- "eval_runtime": 222.9746,
144
- "eval_samples_per_second": 22.424,
145
- "eval_steps_per_second": 0.561,
146
- "step": 20000
147
- },
148
- {
149
- "epoch": 0.42,
150
- "learning_rate": 2.011372523790927e-05,
151
- "loss": 0.8297,
152
- "step": 21000
153
- },
154
- {
155
- "epoch": 0.44,
156
- "learning_rate": 1.9171527138850477e-05,
157
- "loss": 0.8453,
158
- "step": 22000
159
- },
160
- {
161
- "epoch": 0.46,
162
- "learning_rate": 1.8210155373841292e-05,
163
- "loss": 0.8384,
164
- "step": 23000
165
- },
166
- {
167
- "epoch": 0.48,
168
- "learning_rate": 1.7234746416166368e-05,
169
- "loss": 0.828,
170
- "step": 24000
171
- },
172
- {
173
- "epoch": 0.5,
174
- "learning_rate": 1.624956548101695e-05,
175
- "loss": 0.8226,
176
- "step": 25000
177
- },
178
- {
179
- "epoch": 0.52,
180
- "learning_rate": 1.5259912447470205e-05,
181
- "loss": 0.8188,
182
- "step": 26000
183
- },
184
- {
185
- "epoch": 0.54,
186
- "learning_rate": 1.4268134252092541e-05,
187
- "loss": 0.807,
188
- "step": 27000
189
- },
190
- {
191
- "epoch": 0.56,
192
- "learning_rate": 1.3279556319416353e-05,
193
- "loss": 0.8235,
194
- "step": 28000
195
- },
196
- {
197
- "epoch": 0.58,
198
- "learning_rate": 1.2298501449209877e-05,
199
- "loss": 0.802,
200
- "step": 29000
201
- },
202
- {
203
- "epoch": 0.6,
204
- "learning_rate": 1.1330221470267496e-05,
205
- "loss": 0.8327,
206
- "step": 30000
207
- },
208
- {
209
- "epoch": 0.6,
210
- "eval_accuracy": 0.8488932773912237,
211
- "eval_loss": 0.6712204217910767,
212
- "eval_runtime": 182.5232,
213
- "eval_samples_per_second": 27.394,
214
- "eval_steps_per_second": 0.685,
215
- "step": 30000
216
- },
217
- {
218
- "epoch": 0.62,
219
- "learning_rate": 1.0377012633509545e-05,
220
- "loss": 0.8178,
221
- "step": 31000
222
- },
223
- {
224
- "epoch": 0.64,
225
- "learning_rate": 9.444018944243678e-06,
226
- "loss": 0.8298,
227
- "step": 32000
228
- },
229
- {
230
- "epoch": 0.66,
231
- "learning_rate": 8.535320146484432e-06,
232
- "loss": 0.8438,
233
- "step": 33000
234
- },
235
- {
236
- "epoch": 0.68,
237
- "learning_rate": 7.655754766677537e-06,
238
- "loss": 0.8532,
239
- "step": 34000
240
- },
241
- {
242
- "epoch": 0.7,
243
- "learning_rate": 6.807408671366549e-06,
244
- "loss": 0.8595,
245
- "step": 35000
246
- },
247
- {
248
- "epoch": 0.72,
249
- "learning_rate": 5.995680124886882e-06,
250
- "loss": 0.8587,
251
- "step": 36000
252
- },
253
- {
254
- "epoch": 0.74,
255
- "learning_rate": 5.222494120486821e-06,
256
- "loss": 0.8702,
257
- "step": 37000
258
- },
259
- {
260
- "epoch": 0.76,
261
- "learning_rate": 4.492770657745851e-06,
262
- "loss": 0.8598,
263
- "step": 38000
264
- },
265
- {
266
- "epoch": 0.78,
267
- "learning_rate": 3.808240256977668e-06,
268
- "loss": 0.8558,
269
- "step": 39000
270
- },
271
- {
272
- "epoch": 0.8,
273
- "learning_rate": 3.1726485739618023e-06,
274
- "loss": 0.8659,
275
- "step": 40000
276
- },
277
- {
278
- "epoch": 0.8,
279
- "eval_accuracy": 0.8498886317841439,
280
- "eval_loss": 0.6668724417686462,
281
- "eval_runtime": 209.1326,
282
- "eval_samples_per_second": 23.908,
283
- "eval_steps_per_second": 0.598,
284
- "step": 40000
285
- },
286
- {
287
- "epoch": 0.82,
288
- "learning_rate": 2.5893320607754046e-06,
289
- "loss": 0.8543,
290
- "step": 41000
291
- },
292
- {
293
- "epoch": 0.84,
294
- "learning_rate": 2.0596740374389e-06,
295
- "loss": 0.8245,
296
- "step": 42000
297
- },
298
- {
299
- "epoch": 0.86,
300
- "learning_rate": 1.587044866629908e-06,
301
- "loss": 0.8451,
302
- "step": 43000
303
- },
304
- {
305
- "epoch": 0.88,
306
- "learning_rate": 1.1725653761999294e-06,
307
- "loss": 0.8217,
308
- "step": 44000
309
- },
310
- {
311
- "epoch": 0.9,
312
- "learning_rate": 8.185497391612495e-07,
313
- "loss": 0.7959,
314
- "step": 45000
315
- },
316
- {
317
- "epoch": 0.92,
318
- "learning_rate": 5.265459758197716e-07,
319
- "loss": 0.8161,
320
- "step": 46000
321
- },
322
- {
323
- "epoch": 0.94,
324
- "learning_rate": 2.980276904853785e-07,
325
- "loss": 0.811,
326
- "step": 47000
327
- },
328
- {
329
- "epoch": 0.96,
330
- "learning_rate": 1.3353680835990033e-07,
331
- "loss": 0.8219,
332
- "step": 48000
333
- },
334
- {
335
- "epoch": 0.98,
336
- "learning_rate": 3.412003109892648e-08,
337
- "loss": 0.8384,
338
- "step": 49000
339
- },
340
- {
341
- "epoch": 1.0,
342
- "learning_rate": 1.3123017789107294e-11,
343
- "loss": 0.8297,
344
- "step": 50000
345
- },
346
- {
347
- "epoch": 1.0,
348
- "eval_accuracy": 0.8507072999639782,
349
- "eval_loss": 0.6616092920303345,
350
- "eval_runtime": 184.1184,
351
- "eval_samples_per_second": 27.156,
352
- "eval_steps_per_second": 0.679,
353
- "step": 50000
354
- }
355
- ],
356
- "max_steps": 50000,
357
- "num_train_epochs": 9223372036854775807,
358
- "total_flos": 1.578331275264e+19,
359
- "trial_name": null,
360
- "trial_params": null
361
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
last-checkpoint/training_args.bin DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:9d6713f867320f7860774658b8877a69298e4930ee45a074eaf1cb0103baaca9
3
- size 3707