Doowon96 commited on
Commit
cd309b7
1 Parent(s): afebd21

Training in progress, step 2000

Browse files
model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:fc4412e58adfe81e43df51040e7a48246d29f912feddd84fc48c5a9fc38b7c32
3
  size 442518124
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ed213df476af660691639ba54337804321e550a1b8a9f149ef61bd710ffb112a
3
  size 442518124
run-3/checkpoint-1500/config.json ADDED
@@ -0,0 +1,48 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "klue/roberta-base",
3
+ "architectures": [
4
+ "RobertaForSequenceClassification"
5
+ ],
6
+ "attention_probs_dropout_prob": 0.1,
7
+ "bos_token_id": 0,
8
+ "classifier_dropout": null,
9
+ "eos_token_id": 2,
10
+ "gradient_checkpointing": false,
11
+ "hidden_act": "gelu",
12
+ "hidden_dropout_prob": 0.1,
13
+ "hidden_size": 768,
14
+ "id2label": {
15
+ "0": "LABEL_0",
16
+ "1": "LABEL_1",
17
+ "2": "LABEL_2",
18
+ "3": "LABEL_3",
19
+ "4": "LABEL_4",
20
+ "5": "LABEL_5",
21
+ "6": "LABEL_6"
22
+ },
23
+ "initializer_range": 0.02,
24
+ "intermediate_size": 3072,
25
+ "label2id": {
26
+ "LABEL_0": 0,
27
+ "LABEL_1": 1,
28
+ "LABEL_2": 2,
29
+ "LABEL_3": 3,
30
+ "LABEL_4": 4,
31
+ "LABEL_5": 5,
32
+ "LABEL_6": 6
33
+ },
34
+ "layer_norm_eps": 1e-05,
35
+ "max_position_embeddings": 514,
36
+ "model_type": "roberta",
37
+ "num_attention_heads": 12,
38
+ "num_hidden_layers": 12,
39
+ "pad_token_id": 1,
40
+ "position_embedding_type": "absolute",
41
+ "problem_type": "single_label_classification",
42
+ "tokenizer_class": "BertTokenizer",
43
+ "torch_dtype": "float32",
44
+ "transformers_version": "4.37.0",
45
+ "type_vocab_size": 1,
46
+ "use_cache": true,
47
+ "vocab_size": 32000
48
+ }
run-3/checkpoint-1500/model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fc4412e58adfe81e43df51040e7a48246d29f912feddd84fc48c5a9fc38b7c32
3
+ size 442518124
run-3/checkpoint-1500/optimizer.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:29cb25c88d15e4250930a0c558a5811a12de5db35a084704519e8dd136b120fb
3
+ size 885156090
run-3/checkpoint-1500/rng_state.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:39e9d21a327b35258062c6aa735e94fd30ebde84bdf48012fb95adc39351b7f6
3
+ size 14244
run-3/checkpoint-1500/scheduler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2b546e77315880c5070aacecb74ccc7c25b93ad2dff35e95169569f7475db7e9
3
+ size 1064
run-3/checkpoint-1500/special_tokens_map.json ADDED
@@ -0,0 +1,51 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token": {
3
+ "content": "[CLS]",
4
+ "lstrip": false,
5
+ "normalized": false,
6
+ "rstrip": false,
7
+ "single_word": false
8
+ },
9
+ "cls_token": {
10
+ "content": "[CLS]",
11
+ "lstrip": false,
12
+ "normalized": false,
13
+ "rstrip": false,
14
+ "single_word": false
15
+ },
16
+ "eos_token": {
17
+ "content": "[SEP]",
18
+ "lstrip": false,
19
+ "normalized": false,
20
+ "rstrip": false,
21
+ "single_word": false
22
+ },
23
+ "mask_token": {
24
+ "content": "[MASK]",
25
+ "lstrip": false,
26
+ "normalized": false,
27
+ "rstrip": false,
28
+ "single_word": false
29
+ },
30
+ "pad_token": {
31
+ "content": "[PAD]",
32
+ "lstrip": false,
33
+ "normalized": false,
34
+ "rstrip": false,
35
+ "single_word": false
36
+ },
37
+ "sep_token": {
38
+ "content": "[SEP]",
39
+ "lstrip": false,
40
+ "normalized": false,
41
+ "rstrip": false,
42
+ "single_word": false
43
+ },
44
+ "unk_token": {
45
+ "content": "[UNK]",
46
+ "lstrip": false,
47
+ "normalized": false,
48
+ "rstrip": false,
49
+ "single_word": false
50
+ }
51
+ }
run-3/checkpoint-1500/tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
run-3/checkpoint-1500/tokenizer_config.json ADDED
@@ -0,0 +1,59 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "added_tokens_decoder": {
3
+ "0": {
4
+ "content": "[CLS]",
5
+ "lstrip": false,
6
+ "normalized": false,
7
+ "rstrip": false,
8
+ "single_word": false,
9
+ "special": true
10
+ },
11
+ "1": {
12
+ "content": "[PAD]",
13
+ "lstrip": false,
14
+ "normalized": false,
15
+ "rstrip": false,
16
+ "single_word": false,
17
+ "special": true
18
+ },
19
+ "2": {
20
+ "content": "[SEP]",
21
+ "lstrip": false,
22
+ "normalized": false,
23
+ "rstrip": false,
24
+ "single_word": false,
25
+ "special": true
26
+ },
27
+ "3": {
28
+ "content": "[UNK]",
29
+ "lstrip": false,
30
+ "normalized": false,
31
+ "rstrip": false,
32
+ "single_word": false,
33
+ "special": true
34
+ },
35
+ "4": {
36
+ "content": "[MASK]",
37
+ "lstrip": false,
38
+ "normalized": false,
39
+ "rstrip": false,
40
+ "single_word": false,
41
+ "special": true
42
+ }
43
+ },
44
+ "bos_token": "[CLS]",
45
+ "clean_up_tokenization_spaces": true,
46
+ "cls_token": "[CLS]",
47
+ "do_basic_tokenize": true,
48
+ "do_lower_case": false,
49
+ "eos_token": "[SEP]",
50
+ "mask_token": "[MASK]",
51
+ "model_max_length": 512,
52
+ "never_split": null,
53
+ "pad_token": "[PAD]",
54
+ "sep_token": "[SEP]",
55
+ "strip_accents": null,
56
+ "tokenize_chinese_chars": true,
57
+ "tokenizer_class": "BertTokenizer",
58
+ "unk_token": "[UNK]"
59
+ }
run-3/checkpoint-1500/trainer_state.json ADDED
@@ -0,0 +1,476 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": 0.8346071667451987,
3
+ "best_model_checkpoint": "test-klue/ynat/run-3/checkpoint-1500",
4
+ "epoch": 2.626970227670753,
5
+ "eval_steps": 50,
6
+ "global_step": 1500,
7
+ "is_hyper_param_search": true,
8
+ "is_local_process_zero": true,
9
+ "is_world_process_zero": true,
10
+ "log_history": [
11
+ {
12
+ "epoch": 0.09,
13
+ "learning_rate": 3.851823303957499e-06,
14
+ "loss": 1.934,
15
+ "step": 50
16
+ },
17
+ {
18
+ "epoch": 0.09,
19
+ "eval_f1": 0.01705140497086806,
20
+ "eval_loss": 2.002474546432495,
21
+ "eval_runtime": 12.8723,
22
+ "eval_samples_per_second": 707.486,
23
+ "eval_steps_per_second": 1.398,
24
+ "step": 50
25
+ },
26
+ {
27
+ "epoch": 0.18,
28
+ "learning_rate": 3.783163352015298e-06,
29
+ "loss": 1.9174,
30
+ "step": 100
31
+ },
32
+ {
33
+ "epoch": 0.18,
34
+ "eval_f1": 0.16773144497786774,
35
+ "eval_loss": 1.9946870803833008,
36
+ "eval_runtime": 13.4529,
37
+ "eval_samples_per_second": 676.953,
38
+ "eval_steps_per_second": 1.338,
39
+ "step": 100
40
+ },
41
+ {
42
+ "epoch": 0.26,
43
+ "learning_rate": 3.7145034000730966e-06,
44
+ "loss": 1.6984,
45
+ "step": 150
46
+ },
47
+ {
48
+ "epoch": 0.26,
49
+ "eval_f1": 0.579749057690188,
50
+ "eval_loss": 1.6373496055603027,
51
+ "eval_runtime": 13.0114,
52
+ "eval_samples_per_second": 699.924,
53
+ "eval_steps_per_second": 1.383,
54
+ "step": 150
55
+ },
56
+ {
57
+ "epoch": 0.35,
58
+ "learning_rate": 3.645843448130895e-06,
59
+ "loss": 1.2585,
60
+ "step": 200
61
+ },
62
+ {
63
+ "epoch": 0.35,
64
+ "eval_f1": 0.6734776928120875,
65
+ "eval_loss": 1.2733124494552612,
66
+ "eval_runtime": 12.6224,
67
+ "eval_samples_per_second": 721.493,
68
+ "eval_steps_per_second": 1.426,
69
+ "step": 200
70
+ },
71
+ {
72
+ "epoch": 0.44,
73
+ "learning_rate": 3.5771834961886936e-06,
74
+ "loss": 0.9487,
75
+ "step": 250
76
+ },
77
+ {
78
+ "epoch": 0.44,
79
+ "eval_f1": 0.6814953813845488,
80
+ "eval_loss": 1.1104496717453003,
81
+ "eval_runtime": 12.492,
82
+ "eval_samples_per_second": 729.025,
83
+ "eval_steps_per_second": 1.441,
84
+ "step": 250
85
+ },
86
+ {
87
+ "epoch": 0.53,
88
+ "learning_rate": 3.5085235442464923e-06,
89
+ "loss": 0.8072,
90
+ "step": 300
91
+ },
92
+ {
93
+ "epoch": 0.53,
94
+ "eval_f1": 0.6717305786202503,
95
+ "eval_loss": 1.0609983205795288,
96
+ "eval_runtime": 12.6873,
97
+ "eval_samples_per_second": 717.806,
98
+ "eval_steps_per_second": 1.419,
99
+ "step": 300
100
+ },
101
+ {
102
+ "epoch": 0.61,
103
+ "learning_rate": 3.439863592304291e-06,
104
+ "loss": 0.7549,
105
+ "step": 350
106
+ },
107
+ {
108
+ "epoch": 0.61,
109
+ "eval_f1": 0.7228793656517608,
110
+ "eval_loss": 0.9494264721870422,
111
+ "eval_runtime": 12.8373,
112
+ "eval_samples_per_second": 709.416,
113
+ "eval_steps_per_second": 1.402,
114
+ "step": 350
115
+ },
116
+ {
117
+ "epoch": 0.7,
118
+ "learning_rate": 3.3712036403620893e-06,
119
+ "loss": 0.6644,
120
+ "step": 400
121
+ },
122
+ {
123
+ "epoch": 0.7,
124
+ "eval_f1": 0.7719549711033966,
125
+ "eval_loss": 0.8139678239822388,
126
+ "eval_runtime": 12.9124,
127
+ "eval_samples_per_second": 705.29,
128
+ "eval_steps_per_second": 1.394,
129
+ "step": 400
130
+ },
131
+ {
132
+ "epoch": 0.79,
133
+ "learning_rate": 3.302543688419888e-06,
134
+ "loss": 0.6306,
135
+ "step": 450
136
+ },
137
+ {
138
+ "epoch": 0.79,
139
+ "eval_f1": 0.7738311952589042,
140
+ "eval_loss": 0.7844408750534058,
141
+ "eval_runtime": 12.8236,
142
+ "eval_samples_per_second": 710.174,
143
+ "eval_steps_per_second": 1.404,
144
+ "step": 450
145
+ },
146
+ {
147
+ "epoch": 0.88,
148
+ "learning_rate": 3.2338837364776867e-06,
149
+ "loss": 0.6093,
150
+ "step": 500
151
+ },
152
+ {
153
+ "epoch": 0.88,
154
+ "eval_f1": 0.7850534531405503,
155
+ "eval_loss": 0.7450836300849915,
156
+ "eval_runtime": 12.7168,
157
+ "eval_samples_per_second": 716.14,
158
+ "eval_steps_per_second": 1.415,
159
+ "step": 500
160
+ },
161
+ {
162
+ "epoch": 0.96,
163
+ "learning_rate": 3.1652237845354854e-06,
164
+ "loss": 0.599,
165
+ "step": 550
166
+ },
167
+ {
168
+ "epoch": 0.96,
169
+ "eval_f1": 0.7932118091019561,
170
+ "eval_loss": 0.7213732004165649,
171
+ "eval_runtime": 12.9294,
172
+ "eval_samples_per_second": 704.366,
173
+ "eval_steps_per_second": 1.392,
174
+ "step": 550
175
+ },
176
+ {
177
+ "epoch": 1.05,
178
+ "learning_rate": 3.0965638325932837e-06,
179
+ "loss": 0.5234,
180
+ "step": 600
181
+ },
182
+ {
183
+ "epoch": 1.05,
184
+ "eval_f1": 0.8216123467232329,
185
+ "eval_loss": 0.6262369751930237,
186
+ "eval_runtime": 13.578,
187
+ "eval_samples_per_second": 670.72,
188
+ "eval_steps_per_second": 1.326,
189
+ "step": 600
190
+ },
191
+ {
192
+ "epoch": 1.14,
193
+ "learning_rate": 3.0279038806510824e-06,
194
+ "loss": 0.4465,
195
+ "step": 650
196
+ },
197
+ {
198
+ "epoch": 1.14,
199
+ "eval_f1": 0.7867065499958213,
200
+ "eval_loss": 0.7198110818862915,
201
+ "eval_runtime": 13.2261,
202
+ "eval_samples_per_second": 688.561,
203
+ "eval_steps_per_second": 1.361,
204
+ "step": 650
205
+ },
206
+ {
207
+ "epoch": 1.23,
208
+ "learning_rate": 2.959243928708881e-06,
209
+ "loss": 0.4521,
210
+ "step": 700
211
+ },
212
+ {
213
+ "epoch": 1.23,
214
+ "eval_f1": 0.8063113996725187,
215
+ "eval_loss": 0.6468276381492615,
216
+ "eval_runtime": 12.8551,
217
+ "eval_samples_per_second": 708.435,
218
+ "eval_steps_per_second": 1.4,
219
+ "step": 700
220
+ },
221
+ {
222
+ "epoch": 1.31,
223
+ "learning_rate": 2.89058397676668e-06,
224
+ "loss": 0.4339,
225
+ "step": 750
226
+ },
227
+ {
228
+ "epoch": 1.31,
229
+ "eval_f1": 0.8166775515876931,
230
+ "eval_loss": 0.6203234195709229,
231
+ "eval_runtime": 12.669,
232
+ "eval_samples_per_second": 718.842,
233
+ "eval_steps_per_second": 1.421,
234
+ "step": 750
235
+ },
236
+ {
237
+ "epoch": 1.4,
238
+ "learning_rate": 2.821924024824478e-06,
239
+ "loss": 0.4757,
240
+ "step": 800
241
+ },
242
+ {
243
+ "epoch": 1.4,
244
+ "eval_f1": 0.8161608316711845,
245
+ "eval_loss": 0.6249090433120728,
246
+ "eval_runtime": 12.9976,
247
+ "eval_samples_per_second": 700.668,
248
+ "eval_steps_per_second": 1.385,
249
+ "step": 800
250
+ },
251
+ {
252
+ "epoch": 1.49,
253
+ "learning_rate": 2.753264072882277e-06,
254
+ "loss": 0.4594,
255
+ "step": 850
256
+ },
257
+ {
258
+ "epoch": 1.49,
259
+ "eval_f1": 0.8290833266193817,
260
+ "eval_loss": 0.5703582167625427,
261
+ "eval_runtime": 12.8799,
262
+ "eval_samples_per_second": 707.072,
263
+ "eval_steps_per_second": 1.398,
264
+ "step": 850
265
+ },
266
+ {
267
+ "epoch": 1.58,
268
+ "learning_rate": 2.6846041209400755e-06,
269
+ "loss": 0.4032,
270
+ "step": 900
271
+ },
272
+ {
273
+ "epoch": 1.58,
274
+ "eval_f1": 0.8215012797906703,
275
+ "eval_loss": 0.5891036987304688,
276
+ "eval_runtime": 12.8777,
277
+ "eval_samples_per_second": 707.193,
278
+ "eval_steps_per_second": 1.398,
279
+ "step": 900
280
+ },
281
+ {
282
+ "epoch": 1.66,
283
+ "learning_rate": 2.615944168997874e-06,
284
+ "loss": 0.4204,
285
+ "step": 950
286
+ },
287
+ {
288
+ "epoch": 1.66,
289
+ "eval_f1": 0.8327085312817779,
290
+ "eval_loss": 0.5532290935516357,
291
+ "eval_runtime": 12.7986,
292
+ "eval_samples_per_second": 711.563,
293
+ "eval_steps_per_second": 1.406,
294
+ "step": 950
295
+ },
296
+ {
297
+ "epoch": 1.75,
298
+ "learning_rate": 2.5472842170556725e-06,
299
+ "loss": 0.4386,
300
+ "step": 1000
301
+ },
302
+ {
303
+ "epoch": 1.75,
304
+ "eval_f1": 0.8286389311967884,
305
+ "eval_loss": 0.5586702227592468,
306
+ "eval_runtime": 12.7254,
307
+ "eval_samples_per_second": 715.653,
308
+ "eval_steps_per_second": 1.414,
309
+ "step": 1000
310
+ },
311
+ {
312
+ "epoch": 1.84,
313
+ "learning_rate": 2.478624265113471e-06,
314
+ "loss": 0.4258,
315
+ "step": 1050
316
+ },
317
+ {
318
+ "epoch": 1.84,
319
+ "eval_f1": 0.8194650098039734,
320
+ "eval_loss": 0.5945160388946533,
321
+ "eval_runtime": 12.8508,
322
+ "eval_samples_per_second": 708.674,
323
+ "eval_steps_per_second": 1.401,
324
+ "step": 1050
325
+ },
326
+ {
327
+ "epoch": 1.93,
328
+ "learning_rate": 2.4099643131712695e-06,
329
+ "loss": 0.4544,
330
+ "step": 1100
331
+ },
332
+ {
333
+ "epoch": 1.93,
334
+ "eval_f1": 0.8274559023611527,
335
+ "eval_loss": 0.5760587453842163,
336
+ "eval_runtime": 13.4335,
337
+ "eval_samples_per_second": 677.933,
338
+ "eval_steps_per_second": 1.34,
339
+ "step": 1100
340
+ },
341
+ {
342
+ "epoch": 2.01,
343
+ "learning_rate": 2.341304361229068e-06,
344
+ "loss": 0.418,
345
+ "step": 1150
346
+ },
347
+ {
348
+ "epoch": 2.01,
349
+ "eval_f1": 0.8304623515332191,
350
+ "eval_loss": 0.5600340962409973,
351
+ "eval_runtime": 13.2414,
352
+ "eval_samples_per_second": 687.766,
353
+ "eval_steps_per_second": 1.359,
354
+ "step": 1150
355
+ },
356
+ {
357
+ "epoch": 2.1,
358
+ "learning_rate": 2.2726444092868665e-06,
359
+ "loss": 0.3918,
360
+ "step": 1200
361
+ },
362
+ {
363
+ "epoch": 2.1,
364
+ "eval_f1": 0.8230072950822092,
365
+ "eval_loss": 0.5960156321525574,
366
+ "eval_runtime": 12.9264,
367
+ "eval_samples_per_second": 704.529,
368
+ "eval_steps_per_second": 1.393,
369
+ "step": 1200
370
+ },
371
+ {
372
+ "epoch": 2.19,
373
+ "learning_rate": 2.203984457344665e-06,
374
+ "loss": 0.4001,
375
+ "step": 1250
376
+ },
377
+ {
378
+ "epoch": 2.19,
379
+ "eval_f1": 0.8370136201480073,
380
+ "eval_loss": 0.5412853360176086,
381
+ "eval_runtime": 12.8464,
382
+ "eval_samples_per_second": 708.917,
383
+ "eval_steps_per_second": 1.401,
384
+ "step": 1250
385
+ },
386
+ {
387
+ "epoch": 2.28,
388
+ "learning_rate": 2.135324505402464e-06,
389
+ "loss": 0.3369,
390
+ "step": 1300
391
+ },
392
+ {
393
+ "epoch": 2.28,
394
+ "eval_f1": 0.8144549401231569,
395
+ "eval_loss": 0.6154327392578125,
396
+ "eval_runtime": 12.8518,
397
+ "eval_samples_per_second": 708.616,
398
+ "eval_steps_per_second": 1.401,
399
+ "step": 1300
400
+ },
401
+ {
402
+ "epoch": 2.36,
403
+ "learning_rate": 2.0666645534602626e-06,
404
+ "loss": 0.3639,
405
+ "step": 1350
406
+ },
407
+ {
408
+ "epoch": 2.36,
409
+ "eval_f1": 0.8218463781690494,
410
+ "eval_loss": 0.6043773889541626,
411
+ "eval_runtime": 12.8354,
412
+ "eval_samples_per_second": 709.522,
413
+ "eval_steps_per_second": 1.402,
414
+ "step": 1350
415
+ },
416
+ {
417
+ "epoch": 2.45,
418
+ "learning_rate": 1.998004601518061e-06,
419
+ "loss": 0.305,
420
+ "step": 1400
421
+ },
422
+ {
423
+ "epoch": 2.45,
424
+ "eval_f1": 0.8330006702059005,
425
+ "eval_loss": 0.5544472336769104,
426
+ "eval_runtime": 12.869,
427
+ "eval_samples_per_second": 707.672,
428
+ "eval_steps_per_second": 1.399,
429
+ "step": 1400
430
+ },
431
+ {
432
+ "epoch": 2.54,
433
+ "learning_rate": 1.9293446495758596e-06,
434
+ "loss": 0.4444,
435
+ "step": 1450
436
+ },
437
+ {
438
+ "epoch": 2.54,
439
+ "eval_f1": 0.8346019595913722,
440
+ "eval_loss": 0.559039294719696,
441
+ "eval_runtime": 12.8371,
442
+ "eval_samples_per_second": 709.431,
443
+ "eval_steps_per_second": 1.402,
444
+ "step": 1450
445
+ },
446
+ {
447
+ "epoch": 2.63,
448
+ "learning_rate": 1.8606846976336585e-06,
449
+ "loss": 0.3428,
450
+ "step": 1500
451
+ },
452
+ {
453
+ "epoch": 2.63,
454
+ "eval_f1": 0.8346071667451987,
455
+ "eval_loss": 0.5472803115844727,
456
+ "eval_runtime": 12.6738,
457
+ "eval_samples_per_second": 718.567,
458
+ "eval_steps_per_second": 1.42,
459
+ "step": 1500
460
+ }
461
+ ],
462
+ "logging_steps": 50,
463
+ "max_steps": 2855,
464
+ "num_input_tokens_seen": 0,
465
+ "num_train_epochs": 5,
466
+ "save_steps": 500,
467
+ "total_flos": 122183570161200.0,
468
+ "train_batch_size": 8,
469
+ "trial_name": null,
470
+ "trial_params": {
471
+ "learning_rate": 3.851823303957499e-06,
472
+ "num_train_epochs": 5,
473
+ "per_device_train_batch_size": 8,
474
+ "seed": 30
475
+ }
476
+ }
run-3/checkpoint-1500/training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c9a16d730bb2e3e7b3ca811a9c52609a11c500a75019c1d976993f937b24f628
3
+ size 4728
run-3/checkpoint-1500/vocab.txt ADDED
The diff for this file is too large to render. See raw diff
 
run-3/checkpoint-2000/config.json ADDED
@@ -0,0 +1,48 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "klue/roberta-base",
3
+ "architectures": [
4
+ "RobertaForSequenceClassification"
5
+ ],
6
+ "attention_probs_dropout_prob": 0.1,
7
+ "bos_token_id": 0,
8
+ "classifier_dropout": null,
9
+ "eos_token_id": 2,
10
+ "gradient_checkpointing": false,
11
+ "hidden_act": "gelu",
12
+ "hidden_dropout_prob": 0.1,
13
+ "hidden_size": 768,
14
+ "id2label": {
15
+ "0": "LABEL_0",
16
+ "1": "LABEL_1",
17
+ "2": "LABEL_2",
18
+ "3": "LABEL_3",
19
+ "4": "LABEL_4",
20
+ "5": "LABEL_5",
21
+ "6": "LABEL_6"
22
+ },
23
+ "initializer_range": 0.02,
24
+ "intermediate_size": 3072,
25
+ "label2id": {
26
+ "LABEL_0": 0,
27
+ "LABEL_1": 1,
28
+ "LABEL_2": 2,
29
+ "LABEL_3": 3,
30
+ "LABEL_4": 4,
31
+ "LABEL_5": 5,
32
+ "LABEL_6": 6
33
+ },
34
+ "layer_norm_eps": 1e-05,
35
+ "max_position_embeddings": 514,
36
+ "model_type": "roberta",
37
+ "num_attention_heads": 12,
38
+ "num_hidden_layers": 12,
39
+ "pad_token_id": 1,
40
+ "position_embedding_type": "absolute",
41
+ "problem_type": "single_label_classification",
42
+ "tokenizer_class": "BertTokenizer",
43
+ "torch_dtype": "float32",
44
+ "transformers_version": "4.37.0",
45
+ "type_vocab_size": 1,
46
+ "use_cache": true,
47
+ "vocab_size": 32000
48
+ }
run-3/checkpoint-2000/model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ed213df476af660691639ba54337804321e550a1b8a9f149ef61bd710ffb112a
3
+ size 442518124
run-3/checkpoint-2000/optimizer.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:60b0aef3093247c6137181ada5d4ee4adbd529e41e10596b25dff796945ccca0
3
+ size 885156090
run-3/checkpoint-2000/rng_state.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9831ba1f87fe265bec496d24aca2f4b8e77dd5c6245fd938e51cb838360d7bfd
3
+ size 14244
run-3/checkpoint-2000/scheduler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:76c71abc30a8a5940655551c4b9f8a44f5d89d4d06facc393e0cac05c57146fc
3
+ size 1064
run-3/checkpoint-2000/special_tokens_map.json ADDED
@@ -0,0 +1,51 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token": {
3
+ "content": "[CLS]",
4
+ "lstrip": false,
5
+ "normalized": false,
6
+ "rstrip": false,
7
+ "single_word": false
8
+ },
9
+ "cls_token": {
10
+ "content": "[CLS]",
11
+ "lstrip": false,
12
+ "normalized": false,
13
+ "rstrip": false,
14
+ "single_word": false
15
+ },
16
+ "eos_token": {
17
+ "content": "[SEP]",
18
+ "lstrip": false,
19
+ "normalized": false,
20
+ "rstrip": false,
21
+ "single_word": false
22
+ },
23
+ "mask_token": {
24
+ "content": "[MASK]",
25
+ "lstrip": false,
26
+ "normalized": false,
27
+ "rstrip": false,
28
+ "single_word": false
29
+ },
30
+ "pad_token": {
31
+ "content": "[PAD]",
32
+ "lstrip": false,
33
+ "normalized": false,
34
+ "rstrip": false,
35
+ "single_word": false
36
+ },
37
+ "sep_token": {
38
+ "content": "[SEP]",
39
+ "lstrip": false,
40
+ "normalized": false,
41
+ "rstrip": false,
42
+ "single_word": false
43
+ },
44
+ "unk_token": {
45
+ "content": "[UNK]",
46
+ "lstrip": false,
47
+ "normalized": false,
48
+ "rstrip": false,
49
+ "single_word": false
50
+ }
51
+ }
run-3/checkpoint-2000/tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
run-3/checkpoint-2000/tokenizer_config.json ADDED
@@ -0,0 +1,59 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "added_tokens_decoder": {
3
+ "0": {
4
+ "content": "[CLS]",
5
+ "lstrip": false,
6
+ "normalized": false,
7
+ "rstrip": false,
8
+ "single_word": false,
9
+ "special": true
10
+ },
11
+ "1": {
12
+ "content": "[PAD]",
13
+ "lstrip": false,
14
+ "normalized": false,
15
+ "rstrip": false,
16
+ "single_word": false,
17
+ "special": true
18
+ },
19
+ "2": {
20
+ "content": "[SEP]",
21
+ "lstrip": false,
22
+ "normalized": false,
23
+ "rstrip": false,
24
+ "single_word": false,
25
+ "special": true
26
+ },
27
+ "3": {
28
+ "content": "[UNK]",
29
+ "lstrip": false,
30
+ "normalized": false,
31
+ "rstrip": false,
32
+ "single_word": false,
33
+ "special": true
34
+ },
35
+ "4": {
36
+ "content": "[MASK]",
37
+ "lstrip": false,
38
+ "normalized": false,
39
+ "rstrip": false,
40
+ "single_word": false,
41
+ "special": true
42
+ }
43
+ },
44
+ "bos_token": "[CLS]",
45
+ "clean_up_tokenization_spaces": true,
46
+ "cls_token": "[CLS]",
47
+ "do_basic_tokenize": true,
48
+ "do_lower_case": false,
49
+ "eos_token": "[SEP]",
50
+ "mask_token": "[MASK]",
51
+ "model_max_length": 512,
52
+ "never_split": null,
53
+ "pad_token": "[PAD]",
54
+ "sep_token": "[SEP]",
55
+ "strip_accents": null,
56
+ "tokenize_chinese_chars": true,
57
+ "tokenizer_class": "BertTokenizer",
58
+ "unk_token": "[UNK]"
59
+ }
run-3/checkpoint-2000/trainer_state.json ADDED
@@ -0,0 +1,626 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": 0.8412270942316189,
3
+ "best_model_checkpoint": "test-klue/ynat/run-3/checkpoint-2000",
4
+ "epoch": 3.502626970227671,
5
+ "eval_steps": 50,
6
+ "global_step": 2000,
7
+ "is_hyper_param_search": true,
8
+ "is_local_process_zero": true,
9
+ "is_world_process_zero": true,
10
+ "log_history": [
11
+ {
12
+ "epoch": 0.09,
13
+ "learning_rate": 3.851823303957499e-06,
14
+ "loss": 1.934,
15
+ "step": 50
16
+ },
17
+ {
18
+ "epoch": 0.09,
19
+ "eval_f1": 0.01705140497086806,
20
+ "eval_loss": 2.002474546432495,
21
+ "eval_runtime": 12.8723,
22
+ "eval_samples_per_second": 707.486,
23
+ "eval_steps_per_second": 1.398,
24
+ "step": 50
25
+ },
26
+ {
27
+ "epoch": 0.18,
28
+ "learning_rate": 3.783163352015298e-06,
29
+ "loss": 1.9174,
30
+ "step": 100
31
+ },
32
+ {
33
+ "epoch": 0.18,
34
+ "eval_f1": 0.16773144497786774,
35
+ "eval_loss": 1.9946870803833008,
36
+ "eval_runtime": 13.4529,
37
+ "eval_samples_per_second": 676.953,
38
+ "eval_steps_per_second": 1.338,
39
+ "step": 100
40
+ },
41
+ {
42
+ "epoch": 0.26,
43
+ "learning_rate": 3.7145034000730966e-06,
44
+ "loss": 1.6984,
45
+ "step": 150
46
+ },
47
+ {
48
+ "epoch": 0.26,
49
+ "eval_f1": 0.579749057690188,
50
+ "eval_loss": 1.6373496055603027,
51
+ "eval_runtime": 13.0114,
52
+ "eval_samples_per_second": 699.924,
53
+ "eval_steps_per_second": 1.383,
54
+ "step": 150
55
+ },
56
+ {
57
+ "epoch": 0.35,
58
+ "learning_rate": 3.645843448130895e-06,
59
+ "loss": 1.2585,
60
+ "step": 200
61
+ },
62
+ {
63
+ "epoch": 0.35,
64
+ "eval_f1": 0.6734776928120875,
65
+ "eval_loss": 1.2733124494552612,
66
+ "eval_runtime": 12.6224,
67
+ "eval_samples_per_second": 721.493,
68
+ "eval_steps_per_second": 1.426,
69
+ "step": 200
70
+ },
71
+ {
72
+ "epoch": 0.44,
73
+ "learning_rate": 3.5771834961886936e-06,
74
+ "loss": 0.9487,
75
+ "step": 250
76
+ },
77
+ {
78
+ "epoch": 0.44,
79
+ "eval_f1": 0.6814953813845488,
80
+ "eval_loss": 1.1104496717453003,
81
+ "eval_runtime": 12.492,
82
+ "eval_samples_per_second": 729.025,
83
+ "eval_steps_per_second": 1.441,
84
+ "step": 250
85
+ },
86
+ {
87
+ "epoch": 0.53,
88
+ "learning_rate": 3.5085235442464923e-06,
89
+ "loss": 0.8072,
90
+ "step": 300
91
+ },
92
+ {
93
+ "epoch": 0.53,
94
+ "eval_f1": 0.6717305786202503,
95
+ "eval_loss": 1.0609983205795288,
96
+ "eval_runtime": 12.6873,
97
+ "eval_samples_per_second": 717.806,
98
+ "eval_steps_per_second": 1.419,
99
+ "step": 300
100
+ },
101
+ {
102
+ "epoch": 0.61,
103
+ "learning_rate": 3.439863592304291e-06,
104
+ "loss": 0.7549,
105
+ "step": 350
106
+ },
107
+ {
108
+ "epoch": 0.61,
109
+ "eval_f1": 0.7228793656517608,
110
+ "eval_loss": 0.9494264721870422,
111
+ "eval_runtime": 12.8373,
112
+ "eval_samples_per_second": 709.416,
113
+ "eval_steps_per_second": 1.402,
114
+ "step": 350
115
+ },
116
+ {
117
+ "epoch": 0.7,
118
+ "learning_rate": 3.3712036403620893e-06,
119
+ "loss": 0.6644,
120
+ "step": 400
121
+ },
122
+ {
123
+ "epoch": 0.7,
124
+ "eval_f1": 0.7719549711033966,
125
+ "eval_loss": 0.8139678239822388,
126
+ "eval_runtime": 12.9124,
127
+ "eval_samples_per_second": 705.29,
128
+ "eval_steps_per_second": 1.394,
129
+ "step": 400
130
+ },
131
+ {
132
+ "epoch": 0.79,
133
+ "learning_rate": 3.302543688419888e-06,
134
+ "loss": 0.6306,
135
+ "step": 450
136
+ },
137
+ {
138
+ "epoch": 0.79,
139
+ "eval_f1": 0.7738311952589042,
140
+ "eval_loss": 0.7844408750534058,
141
+ "eval_runtime": 12.8236,
142
+ "eval_samples_per_second": 710.174,
143
+ "eval_steps_per_second": 1.404,
144
+ "step": 450
145
+ },
146
+ {
147
+ "epoch": 0.88,
148
+ "learning_rate": 3.2338837364776867e-06,
149
+ "loss": 0.6093,
150
+ "step": 500
151
+ },
152
+ {
153
+ "epoch": 0.88,
154
+ "eval_f1": 0.7850534531405503,
155
+ "eval_loss": 0.7450836300849915,
156
+ "eval_runtime": 12.7168,
157
+ "eval_samples_per_second": 716.14,
158
+ "eval_steps_per_second": 1.415,
159
+ "step": 500
160
+ },
161
+ {
162
+ "epoch": 0.96,
163
+ "learning_rate": 3.1652237845354854e-06,
164
+ "loss": 0.599,
165
+ "step": 550
166
+ },
167
+ {
168
+ "epoch": 0.96,
169
+ "eval_f1": 0.7932118091019561,
170
+ "eval_loss": 0.7213732004165649,
171
+ "eval_runtime": 12.9294,
172
+ "eval_samples_per_second": 704.366,
173
+ "eval_steps_per_second": 1.392,
174
+ "step": 550
175
+ },
176
+ {
177
+ "epoch": 1.05,
178
+ "learning_rate": 3.0965638325932837e-06,
179
+ "loss": 0.5234,
180
+ "step": 600
181
+ },
182
+ {
183
+ "epoch": 1.05,
184
+ "eval_f1": 0.8216123467232329,
185
+ "eval_loss": 0.6262369751930237,
186
+ "eval_runtime": 13.578,
187
+ "eval_samples_per_second": 670.72,
188
+ "eval_steps_per_second": 1.326,
189
+ "step": 600
190
+ },
191
+ {
192
+ "epoch": 1.14,
193
+ "learning_rate": 3.0279038806510824e-06,
194
+ "loss": 0.4465,
195
+ "step": 650
196
+ },
197
+ {
198
+ "epoch": 1.14,
199
+ "eval_f1": 0.7867065499958213,
200
+ "eval_loss": 0.7198110818862915,
201
+ "eval_runtime": 13.2261,
202
+ "eval_samples_per_second": 688.561,
203
+ "eval_steps_per_second": 1.361,
204
+ "step": 650
205
+ },
206
+ {
207
+ "epoch": 1.23,
208
+ "learning_rate": 2.959243928708881e-06,
209
+ "loss": 0.4521,
210
+ "step": 700
211
+ },
212
+ {
213
+ "epoch": 1.23,
214
+ "eval_f1": 0.8063113996725187,
215
+ "eval_loss": 0.6468276381492615,
216
+ "eval_runtime": 12.8551,
217
+ "eval_samples_per_second": 708.435,
218
+ "eval_steps_per_second": 1.4,
219
+ "step": 700
220
+ },
221
+ {
222
+ "epoch": 1.31,
223
+ "learning_rate": 2.89058397676668e-06,
224
+ "loss": 0.4339,
225
+ "step": 750
226
+ },
227
+ {
228
+ "epoch": 1.31,
229
+ "eval_f1": 0.8166775515876931,
230
+ "eval_loss": 0.6203234195709229,
231
+ "eval_runtime": 12.669,
232
+ "eval_samples_per_second": 718.842,
233
+ "eval_steps_per_second": 1.421,
234
+ "step": 750
235
+ },
236
+ {
237
+ "epoch": 1.4,
238
+ "learning_rate": 2.821924024824478e-06,
239
+ "loss": 0.4757,
240
+ "step": 800
241
+ },
242
+ {
243
+ "epoch": 1.4,
244
+ "eval_f1": 0.8161608316711845,
245
+ "eval_loss": 0.6249090433120728,
246
+ "eval_runtime": 12.9976,
247
+ "eval_samples_per_second": 700.668,
248
+ "eval_steps_per_second": 1.385,
249
+ "step": 800
250
+ },
251
+ {
252
+ "epoch": 1.49,
253
+ "learning_rate": 2.753264072882277e-06,
254
+ "loss": 0.4594,
255
+ "step": 850
256
+ },
257
+ {
258
+ "epoch": 1.49,
259
+ "eval_f1": 0.8290833266193817,
260
+ "eval_loss": 0.5703582167625427,
261
+ "eval_runtime": 12.8799,
262
+ "eval_samples_per_second": 707.072,
263
+ "eval_steps_per_second": 1.398,
264
+ "step": 850
265
+ },
266
+ {
267
+ "epoch": 1.58,
268
+ "learning_rate": 2.6846041209400755e-06,
269
+ "loss": 0.4032,
270
+ "step": 900
271
+ },
272
+ {
273
+ "epoch": 1.58,
274
+ "eval_f1": 0.8215012797906703,
275
+ "eval_loss": 0.5891036987304688,
276
+ "eval_runtime": 12.8777,
277
+ "eval_samples_per_second": 707.193,
278
+ "eval_steps_per_second": 1.398,
279
+ "step": 900
280
+ },
281
+ {
282
+ "epoch": 1.66,
283
+ "learning_rate": 2.615944168997874e-06,
284
+ "loss": 0.4204,
285
+ "step": 950
286
+ },
287
+ {
288
+ "epoch": 1.66,
289
+ "eval_f1": 0.8327085312817779,
290
+ "eval_loss": 0.5532290935516357,
291
+ "eval_runtime": 12.7986,
292
+ "eval_samples_per_second": 711.563,
293
+ "eval_steps_per_second": 1.406,
294
+ "step": 950
295
+ },
296
+ {
297
+ "epoch": 1.75,
298
+ "learning_rate": 2.5472842170556725e-06,
299
+ "loss": 0.4386,
300
+ "step": 1000
301
+ },
302
+ {
303
+ "epoch": 1.75,
304
+ "eval_f1": 0.8286389311967884,
305
+ "eval_loss": 0.5586702227592468,
306
+ "eval_runtime": 12.7254,
307
+ "eval_samples_per_second": 715.653,
308
+ "eval_steps_per_second": 1.414,
309
+ "step": 1000
310
+ },
311
+ {
312
+ "epoch": 1.84,
313
+ "learning_rate": 2.478624265113471e-06,
314
+ "loss": 0.4258,
315
+ "step": 1050
316
+ },
317
+ {
318
+ "epoch": 1.84,
319
+ "eval_f1": 0.8194650098039734,
320
+ "eval_loss": 0.5945160388946533,
321
+ "eval_runtime": 12.8508,
322
+ "eval_samples_per_second": 708.674,
323
+ "eval_steps_per_second": 1.401,
324
+ "step": 1050
325
+ },
326
+ {
327
+ "epoch": 1.93,
328
+ "learning_rate": 2.4099643131712695e-06,
329
+ "loss": 0.4544,
330
+ "step": 1100
331
+ },
332
+ {
333
+ "epoch": 1.93,
334
+ "eval_f1": 0.8274559023611527,
335
+ "eval_loss": 0.5760587453842163,
336
+ "eval_runtime": 13.4335,
337
+ "eval_samples_per_second": 677.933,
338
+ "eval_steps_per_second": 1.34,
339
+ "step": 1100
340
+ },
341
+ {
342
+ "epoch": 2.01,
343
+ "learning_rate": 2.341304361229068e-06,
344
+ "loss": 0.418,
345
+ "step": 1150
346
+ },
347
+ {
348
+ "epoch": 2.01,
349
+ "eval_f1": 0.8304623515332191,
350
+ "eval_loss": 0.5600340962409973,
351
+ "eval_runtime": 13.2414,
352
+ "eval_samples_per_second": 687.766,
353
+ "eval_steps_per_second": 1.359,
354
+ "step": 1150
355
+ },
356
+ {
357
+ "epoch": 2.1,
358
+ "learning_rate": 2.2726444092868665e-06,
359
+ "loss": 0.3918,
360
+ "step": 1200
361
+ },
362
+ {
363
+ "epoch": 2.1,
364
+ "eval_f1": 0.8230072950822092,
365
+ "eval_loss": 0.5960156321525574,
366
+ "eval_runtime": 12.9264,
367
+ "eval_samples_per_second": 704.529,
368
+ "eval_steps_per_second": 1.393,
369
+ "step": 1200
370
+ },
371
+ {
372
+ "epoch": 2.19,
373
+ "learning_rate": 2.203984457344665e-06,
374
+ "loss": 0.4001,
375
+ "step": 1250
376
+ },
377
+ {
378
+ "epoch": 2.19,
379
+ "eval_f1": 0.8370136201480073,
380
+ "eval_loss": 0.5412853360176086,
381
+ "eval_runtime": 12.8464,
382
+ "eval_samples_per_second": 708.917,
383
+ "eval_steps_per_second": 1.401,
384
+ "step": 1250
385
+ },
386
+ {
387
+ "epoch": 2.28,
388
+ "learning_rate": 2.135324505402464e-06,
389
+ "loss": 0.3369,
390
+ "step": 1300
391
+ },
392
+ {
393
+ "epoch": 2.28,
394
+ "eval_f1": 0.8144549401231569,
395
+ "eval_loss": 0.6154327392578125,
396
+ "eval_runtime": 12.8518,
397
+ "eval_samples_per_second": 708.616,
398
+ "eval_steps_per_second": 1.401,
399
+ "step": 1300
400
+ },
401
+ {
402
+ "epoch": 2.36,
403
+ "learning_rate": 2.0666645534602626e-06,
404
+ "loss": 0.3639,
405
+ "step": 1350
406
+ },
407
+ {
408
+ "epoch": 2.36,
409
+ "eval_f1": 0.8218463781690494,
410
+ "eval_loss": 0.6043773889541626,
411
+ "eval_runtime": 12.8354,
412
+ "eval_samples_per_second": 709.522,
413
+ "eval_steps_per_second": 1.402,
414
+ "step": 1350
415
+ },
416
+ {
417
+ "epoch": 2.45,
418
+ "learning_rate": 1.998004601518061e-06,
419
+ "loss": 0.305,
420
+ "step": 1400
421
+ },
422
+ {
423
+ "epoch": 2.45,
424
+ "eval_f1": 0.8330006702059005,
425
+ "eval_loss": 0.5544472336769104,
426
+ "eval_runtime": 12.869,
427
+ "eval_samples_per_second": 707.672,
428
+ "eval_steps_per_second": 1.399,
429
+ "step": 1400
430
+ },
431
+ {
432
+ "epoch": 2.54,
433
+ "learning_rate": 1.9293446495758596e-06,
434
+ "loss": 0.4444,
435
+ "step": 1450
436
+ },
437
+ {
438
+ "epoch": 2.54,
439
+ "eval_f1": 0.8346019595913722,
440
+ "eval_loss": 0.559039294719696,
441
+ "eval_runtime": 12.8371,
442
+ "eval_samples_per_second": 709.431,
443
+ "eval_steps_per_second": 1.402,
444
+ "step": 1450
445
+ },
446
+ {
447
+ "epoch": 2.63,
448
+ "learning_rate": 1.8606846976336585e-06,
449
+ "loss": 0.3428,
450
+ "step": 1500
451
+ },
452
+ {
453
+ "epoch": 2.63,
454
+ "eval_f1": 0.8346071667451987,
455
+ "eval_loss": 0.5472803115844727,
456
+ "eval_runtime": 12.6738,
457
+ "eval_samples_per_second": 718.567,
458
+ "eval_steps_per_second": 1.42,
459
+ "step": 1500
460
+ },
461
+ {
462
+ "epoch": 2.71,
463
+ "learning_rate": 1.792024745691457e-06,
464
+ "loss": 0.473,
465
+ "step": 1550
466
+ },
467
+ {
468
+ "epoch": 2.71,
469
+ "eval_f1": 0.8339861521101763,
470
+ "eval_loss": 0.5484662055969238,
471
+ "eval_runtime": 12.9188,
472
+ "eval_samples_per_second": 704.942,
473
+ "eval_steps_per_second": 1.393,
474
+ "step": 1550
475
+ },
476
+ {
477
+ "epoch": 2.8,
478
+ "learning_rate": 1.7233647937492555e-06,
479
+ "loss": 0.3423,
480
+ "step": 1600
481
+ },
482
+ {
483
+ "epoch": 2.8,
484
+ "eval_f1": 0.834766650650793,
485
+ "eval_loss": 0.5558911561965942,
486
+ "eval_runtime": 13.6223,
487
+ "eval_samples_per_second": 668.535,
488
+ "eval_steps_per_second": 1.321,
489
+ "step": 1600
490
+ },
491
+ {
492
+ "epoch": 2.89,
493
+ "learning_rate": 1.654704841807054e-06,
494
+ "loss": 0.3275,
495
+ "step": 1650
496
+ },
497
+ {
498
+ "epoch": 2.89,
499
+ "eval_f1": 0.8462797776414777,
500
+ "eval_loss": 0.5118501782417297,
501
+ "eval_runtime": 13.6479,
502
+ "eval_samples_per_second": 667.283,
503
+ "eval_steps_per_second": 1.319,
504
+ "step": 1650
505
+ },
506
+ {
507
+ "epoch": 2.98,
508
+ "learning_rate": 1.5860448898648526e-06,
509
+ "loss": 0.3701,
510
+ "step": 1700
511
+ },
512
+ {
513
+ "epoch": 2.98,
514
+ "eval_f1": 0.8425787867333626,
515
+ "eval_loss": 0.5238479375839233,
516
+ "eval_runtime": 12.8261,
517
+ "eval_samples_per_second": 710.034,
518
+ "eval_steps_per_second": 1.403,
519
+ "step": 1700
520
+ },
521
+ {
522
+ "epoch": 3.06,
523
+ "learning_rate": 1.5173849379226511e-06,
524
+ "loss": 0.3139,
525
+ "step": 1750
526
+ },
527
+ {
528
+ "epoch": 3.06,
529
+ "eval_f1": 0.8416221241169144,
530
+ "eval_loss": 0.5329692959785461,
531
+ "eval_runtime": 12.7867,
532
+ "eval_samples_per_second": 712.223,
533
+ "eval_steps_per_second": 1.408,
534
+ "step": 1750
535
+ },
536
+ {
537
+ "epoch": 3.15,
538
+ "learning_rate": 1.4487249859804498e-06,
539
+ "loss": 0.2975,
540
+ "step": 1800
541
+ },
542
+ {
543
+ "epoch": 3.15,
544
+ "eval_f1": 0.8343340529328881,
545
+ "eval_loss": 0.5499953627586365,
546
+ "eval_runtime": 12.7893,
547
+ "eval_samples_per_second": 712.078,
548
+ "eval_steps_per_second": 1.407,
549
+ "step": 1800
550
+ },
551
+ {
552
+ "epoch": 3.24,
553
+ "learning_rate": 1.3800650340382483e-06,
554
+ "loss": 0.2981,
555
+ "step": 1850
556
+ },
557
+ {
558
+ "epoch": 3.24,
559
+ "eval_f1": 0.8350192040740547,
560
+ "eval_loss": 0.5687865018844604,
561
+ "eval_runtime": 12.92,
562
+ "eval_samples_per_second": 704.877,
563
+ "eval_steps_per_second": 1.393,
564
+ "step": 1850
565
+ },
566
+ {
567
+ "epoch": 3.33,
568
+ "learning_rate": 1.311405082096047e-06,
569
+ "loss": 0.2378,
570
+ "step": 1900
571
+ },
572
+ {
573
+ "epoch": 3.33,
574
+ "eval_f1": 0.8413969129271989,
575
+ "eval_loss": 0.5414060354232788,
576
+ "eval_runtime": 12.8936,
577
+ "eval_samples_per_second": 706.32,
578
+ "eval_steps_per_second": 1.396,
579
+ "step": 1900
580
+ },
581
+ {
582
+ "epoch": 3.42,
583
+ "learning_rate": 1.2427451301538455e-06,
584
+ "loss": 0.3733,
585
+ "step": 1950
586
+ },
587
+ {
588
+ "epoch": 3.42,
589
+ "eval_f1": 0.8405393522238971,
590
+ "eval_loss": 0.5475384593009949,
591
+ "eval_runtime": 12.7243,
592
+ "eval_samples_per_second": 715.714,
593
+ "eval_steps_per_second": 1.415,
594
+ "step": 1950
595
+ },
596
+ {
597
+ "epoch": 3.5,
598
+ "learning_rate": 1.1740851782116442e-06,
599
+ "loss": 0.3825,
600
+ "step": 2000
601
+ },
602
+ {
603
+ "epoch": 3.5,
604
+ "eval_f1": 0.8412270942316189,
605
+ "eval_loss": 0.5363861322402954,
606
+ "eval_runtime": 12.7385,
607
+ "eval_samples_per_second": 714.922,
608
+ "eval_steps_per_second": 1.413,
609
+ "step": 2000
610
+ }
611
+ ],
612
+ "logging_steps": 50,
613
+ "max_steps": 2855,
614
+ "num_input_tokens_seen": 0,
615
+ "num_train_epochs": 5,
616
+ "save_steps": 500,
617
+ "total_flos": 162914167744800.0,
618
+ "train_batch_size": 8,
619
+ "trial_name": null,
620
+ "trial_params": {
621
+ "learning_rate": 3.851823303957499e-06,
622
+ "num_train_epochs": 5,
623
+ "per_device_train_batch_size": 8,
624
+ "seed": 30
625
+ }
626
+ }
run-3/checkpoint-2000/training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c9a16d730bb2e3e7b3ca811a9c52609a11c500a75019c1d976993f937b24f628
3
+ size 4728
run-3/checkpoint-2000/vocab.txt ADDED
The diff for this file is too large to render. See raw diff