ngxtnhi commited on
Commit
4e9dfa5
1 Parent(s): 061a84a

Upload folder using huggingface_hub

Browse files
Files changed (7) hide show
  1. config.json +28 -0
  2. model.safetensors +3 -0
  3. optimizer.pt +3 -0
  4. rng_state.pth +3 -0
  5. scheduler.pt +3 -0
  6. trainer_state.json +432 -0
  7. training_args.bin +3 -0
config.json ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "facebook/xlm-v-base",
3
+ "architectures": [
4
+ "XLMRobertaForSequenceClassification"
5
+ ],
6
+ "attention_probs_dropout_prob": 0.1,
7
+ "bos_token_id": 0,
8
+ "classifier_dropout": null,
9
+ "eos_token_id": 2,
10
+ "hidden_act": "gelu",
11
+ "hidden_dropout_prob": 0.1,
12
+ "hidden_size": 768,
13
+ "initializer_range": 0.02,
14
+ "intermediate_size": 3072,
15
+ "layer_norm_eps": 1e-05,
16
+ "max_position_embeddings": 514,
17
+ "model_type": "xlm-roberta",
18
+ "num_attention_heads": 12,
19
+ "num_hidden_layers": 12,
20
+ "pad_token_id": 1,
21
+ "position_embedding_type": "absolute",
22
+ "problem_type": "single_label_classification",
23
+ "torch_dtype": "float32",
24
+ "transformers_version": "4.41.2",
25
+ "type_vocab_size": 1,
26
+ "use_cache": true,
27
+ "vocab_size": 901629
28
+ }
model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e9ccdbfa0867a79924388d382a8233ff27668c1729559b60d9d85a68b0ddeb5d
3
+ size 3114003416
optimizer.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4ed4b2b4d1bb9d823778935e2e89da2cab824b3d9cf062a86d0867fdf4dc4c8f
3
+ size 6228131858
rng_state.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a36fedb56c18b8956a1a960fd61eee9346a81b45d97785ce8389eb6293135c9a
3
+ size 14244
scheduler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:936c6f5bbf3a3dbe10feca3e4110e021edb85a659f5354a5f21437182e998a50
3
+ size 1064
trainer_state.json ADDED
@@ -0,0 +1,432 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": 0.7877622937690213,
3
+ "best_model_checkpoint": "./XLM-V_64-multi-outputs/checkpoint-16000",
4
+ "epoch": 14.8619957537155,
5
+ "eval_steps": 1000,
6
+ "global_step": 21000,
7
+ "is_hyper_param_search": false,
8
+ "is_local_process_zero": true,
9
+ "is_world_process_zero": true,
10
+ "log_history": [
11
+ {
12
+ "epoch": 0.7077140835102619,
13
+ "grad_norm": 0.7589670419692993,
14
+ "learning_rate": 4.7169811320754717e-07,
15
+ "loss": 0.6944,
16
+ "step": 1000
17
+ },
18
+ {
19
+ "epoch": 0.7077140835102619,
20
+ "eval_accuracy": 0.5096086826645425,
21
+ "eval_f1": 0.6748960327414351,
22
+ "eval_loss": 0.6923578381538391,
23
+ "eval_precision": 0.5093155325296403,
24
+ "eval_recall": 1.0,
25
+ "eval_runtime": 57.7937,
26
+ "eval_samples_per_second": 173.773,
27
+ "eval_steps_per_second": 2.717,
28
+ "step": 1000
29
+ },
30
+ {
31
+ "epoch": 1.4154281670205238,
32
+ "grad_norm": 6.16334867477417,
33
+ "learning_rate": 9.433962264150943e-07,
34
+ "loss": 0.6848,
35
+ "step": 2000
36
+ },
37
+ {
38
+ "epoch": 1.4154281670205238,
39
+ "eval_accuracy": 0.6086826645424674,
40
+ "eval_f1": 0.7141402385801571,
41
+ "eval_loss": 0.6570077538490295,
42
+ "eval_precision": 0.5684344603983326,
43
+ "eval_recall": 0.9602895148669797,
44
+ "eval_runtime": 57.4971,
45
+ "eval_samples_per_second": 174.67,
46
+ "eval_steps_per_second": 2.731,
47
+ "step": 2000
48
+ },
49
+ {
50
+ "epoch": 2.1231422505307855,
51
+ "grad_norm": 2.09871768951416,
52
+ "learning_rate": 9.538663171690695e-07,
53
+ "loss": 0.6613,
54
+ "step": 3000
55
+ },
56
+ {
57
+ "epoch": 2.1231422505307855,
58
+ "eval_accuracy": 0.6230210096584686,
59
+ "eval_f1": 0.7231241772707327,
60
+ "eval_loss": 0.6287678480148315,
61
+ "eval_precision": 0.5774351786965662,
62
+ "eval_recall": 0.9671361502347418,
63
+ "eval_runtime": 57.6578,
64
+ "eval_samples_per_second": 174.183,
65
+ "eval_steps_per_second": 2.723,
66
+ "step": 3000
67
+ },
68
+ {
69
+ "epoch": 2.8308563340410475,
70
+ "grad_norm": 1.726118803024292,
71
+ "learning_rate": 9.014416775884666e-07,
72
+ "loss": 0.6505,
73
+ "step": 4000
74
+ },
75
+ {
76
+ "epoch": 2.8308563340410475,
77
+ "eval_accuracy": 0.635965348999303,
78
+ "eval_f1": 0.7311764705882353,
79
+ "eval_loss": 0.615561306476593,
80
+ "eval_precision": 0.5857681432610744,
81
+ "eval_recall": 0.9726134585289515,
82
+ "eval_runtime": 57.6722,
83
+ "eval_samples_per_second": 174.139,
84
+ "eval_steps_per_second": 2.722,
85
+ "step": 4000
86
+ },
87
+ {
88
+ "epoch": 3.538570417551309,
89
+ "grad_norm": 18.405258178710938,
90
+ "learning_rate": 8.490170380078637e-07,
91
+ "loss": 0.6453,
92
+ "step": 5000
93
+ },
94
+ {
95
+ "epoch": 3.538570417551309,
96
+ "eval_accuracy": 0.6402469381658867,
97
+ "eval_f1": 0.7314353675760054,
98
+ "eval_loss": 0.6071421504020691,
99
+ "eval_precision": 0.5898573312552452,
100
+ "eval_recall": 0.9624413145539906,
101
+ "eval_runtime": 57.5099,
102
+ "eval_samples_per_second": 174.631,
103
+ "eval_steps_per_second": 2.73,
104
+ "step": 5000
105
+ },
106
+ {
107
+ "epoch": 4.246284501061571,
108
+ "grad_norm": 1.6453580856323242,
109
+ "learning_rate": 7.965923984272608e-07,
110
+ "loss": 0.632,
111
+ "step": 6000
112
+ },
113
+ {
114
+ "epoch": 4.246284501061571,
115
+ "eval_accuracy": 0.6659364731653888,
116
+ "eval_f1": 0.7478770571879462,
117
+ "eval_loss": 0.5868598222732544,
118
+ "eval_precision": 0.6071995118974984,
119
+ "eval_recall": 0.97339593114241,
120
+ "eval_runtime": 58.3002,
121
+ "eval_samples_per_second": 172.263,
122
+ "eval_steps_per_second": 2.693,
123
+ "step": 6000
124
+ },
125
+ {
126
+ "epoch": 4.953998584571833,
127
+ "grad_norm": 3.636608362197876,
128
+ "learning_rate": 7.44167758846658e-07,
129
+ "loss": 0.6232,
130
+ "step": 7000
131
+ },
132
+ {
133
+ "epoch": 4.953998584571833,
134
+ "eval_accuracy": 0.6733047894055562,
135
+ "eval_f1": 0.7526200708738596,
136
+ "eval_loss": 0.5761261582374573,
137
+ "eval_precision": 0.6123175070543492,
138
+ "eval_recall": 0.9763302034428795,
139
+ "eval_runtime": 57.6672,
140
+ "eval_samples_per_second": 174.154,
141
+ "eval_steps_per_second": 2.723,
142
+ "step": 7000
143
+ },
144
+ {
145
+ "epoch": 5.661712668082095,
146
+ "grad_norm": 24.744699478149414,
147
+ "learning_rate": 6.91743119266055e-07,
148
+ "loss": 0.6141,
149
+ "step": 8000
150
+ },
151
+ {
152
+ "epoch": 5.661712668082095,
153
+ "eval_accuracy": 0.688140993726974,
154
+ "eval_f1": 0.7601837672281776,
155
+ "eval_loss": 0.5643310546875,
156
+ "eval_precision": 0.6245596376446905,
157
+ "eval_recall": 0.9710485133020345,
158
+ "eval_runtime": 57.5859,
159
+ "eval_samples_per_second": 174.4,
160
+ "eval_steps_per_second": 2.726,
161
+ "step": 8000
162
+ },
163
+ {
164
+ "epoch": 6.369426751592357,
165
+ "grad_norm": 9.586795806884766,
166
+ "learning_rate": 6.393184796854522e-07,
167
+ "loss": 0.605,
168
+ "step": 9000
169
+ },
170
+ {
171
+ "epoch": 6.369426751592357,
172
+ "eval_accuracy": 0.6972020312655581,
173
+ "eval_f1": 0.7631066448547168,
174
+ "eval_loss": 0.553417980670929,
175
+ "eval_precision": 0.6340453074433657,
176
+ "eval_recall": 0.9581377151799687,
177
+ "eval_runtime": 57.7405,
178
+ "eval_samples_per_second": 173.933,
179
+ "eval_steps_per_second": 2.719,
180
+ "step": 9000
181
+ },
182
+ {
183
+ "epoch": 7.077140835102618,
184
+ "grad_norm": 8.45578384399414,
185
+ "learning_rate": 5.868938401048492e-07,
186
+ "loss": 0.6021,
187
+ "step": 10000
188
+ },
189
+ {
190
+ "epoch": 7.077140835102618,
191
+ "eval_accuracy": 0.7043712038235587,
192
+ "eval_f1": 0.7691829277773459,
193
+ "eval_loss": 0.5444474816322327,
194
+ "eval_precision": 0.6382402270674752,
195
+ "eval_recall": 0.9677230046948356,
196
+ "eval_runtime": 57.654,
197
+ "eval_samples_per_second": 174.194,
198
+ "eval_steps_per_second": 2.723,
199
+ "step": 10000
200
+ },
201
+ {
202
+ "epoch": 7.78485491861288,
203
+ "grad_norm": 10.198156356811523,
204
+ "learning_rate": 5.344692005242464e-07,
205
+ "loss": 0.5934,
206
+ "step": 11000
207
+ },
208
+ {
209
+ "epoch": 7.78485491861288,
210
+ "eval_accuracy": 0.7097480832420592,
211
+ "eval_f1": 0.7714263310593585,
212
+ "eval_loss": 0.5372178554534912,
213
+ "eval_precision": 0.6437639052480042,
214
+ "eval_recall": 0.962245696400626,
215
+ "eval_runtime": 57.4867,
216
+ "eval_samples_per_second": 174.701,
217
+ "eval_steps_per_second": 2.731,
218
+ "step": 11000
219
+ },
220
+ {
221
+ "epoch": 8.492569002123142,
222
+ "grad_norm": 34.88262176513672,
223
+ "learning_rate": 4.820445609436435e-07,
224
+ "loss": 0.5842,
225
+ "step": 12000
226
+ },
227
+ {
228
+ "epoch": 8.492569002123142,
229
+ "eval_accuracy": 0.7273722991138106,
230
+ "eval_f1": 0.7810650887573964,
231
+ "eval_loss": 0.5250558853149414,
232
+ "eval_precision": 0.6605355693805789,
233
+ "eval_recall": 0.9553990610328639,
234
+ "eval_runtime": 57.6716,
235
+ "eval_samples_per_second": 174.141,
236
+ "eval_steps_per_second": 2.722,
237
+ "step": 12000
238
+ },
239
+ {
240
+ "epoch": 9.200283085633403,
241
+ "grad_norm": 63.931617736816406,
242
+ "learning_rate": 4.296199213630406e-07,
243
+ "loss": 0.5775,
244
+ "step": 13000
245
+ },
246
+ {
247
+ "epoch": 9.200283085633403,
248
+ "eval_accuracy": 0.7322513193268944,
249
+ "eval_f1": 0.7830226740902122,
250
+ "eval_loss": 0.5154615640640259,
251
+ "eval_precision": 0.6663919791237467,
252
+ "eval_recall": 0.9491392801251957,
253
+ "eval_runtime": 57.6583,
254
+ "eval_samples_per_second": 174.181,
255
+ "eval_steps_per_second": 2.723,
256
+ "step": 13000
257
+ },
258
+ {
259
+ "epoch": 9.907997169143666,
260
+ "grad_norm": 18.855731964111328,
261
+ "learning_rate": 3.771952817824377e-07,
262
+ "loss": 0.5694,
263
+ "step": 14000
264
+ },
265
+ {
266
+ "epoch": 9.907997169143666,
267
+ "eval_accuracy": 0.7335457532609778,
268
+ "eval_f1": 0.7848182695400451,
269
+ "eval_loss": 0.5107560157775879,
270
+ "eval_precision": 0.6663025669033316,
271
+ "eval_recall": 0.9546165884194053,
272
+ "eval_runtime": 57.5122,
273
+ "eval_samples_per_second": 174.624,
274
+ "eval_steps_per_second": 2.73,
275
+ "step": 14000
276
+ },
277
+ {
278
+ "epoch": 10.615711252653927,
279
+ "grad_norm": 5.973861217498779,
280
+ "learning_rate": 3.247706422018349e-07,
281
+ "loss": 0.5645,
282
+ "step": 15000
283
+ },
284
+ {
285
+ "epoch": 10.615711252653927,
286
+ "eval_accuracy": 0.7354376182415613,
287
+ "eval_f1": 0.7858466994438623,
288
+ "eval_loss": 0.5042764544487,
289
+ "eval_precision": 0.6682659355723098,
290
+ "eval_recall": 0.9536384976525821,
291
+ "eval_runtime": 57.6441,
292
+ "eval_samples_per_second": 174.224,
293
+ "eval_steps_per_second": 2.724,
294
+ "step": 15000
295
+ },
296
+ {
297
+ "epoch": 11.32342533616419,
298
+ "grad_norm": 13.824345588684082,
299
+ "learning_rate": 2.72346002621232e-07,
300
+ "loss": 0.5555,
301
+ "step": 16000
302
+ },
303
+ {
304
+ "epoch": 11.32342533616419,
305
+ "eval_accuracy": 0.7361346211291446,
306
+ "eval_f1": 0.7877622937690213,
307
+ "eval_loss": 0.5019333362579346,
308
+ "eval_precision": 0.666937889883374,
309
+ "eval_recall": 0.9620500782472613,
310
+ "eval_runtime": 57.7949,
311
+ "eval_samples_per_second": 173.77,
312
+ "eval_steps_per_second": 2.717,
313
+ "step": 16000
314
+ },
315
+ {
316
+ "epoch": 12.031139419674451,
317
+ "grad_norm": 12.343938827514648,
318
+ "learning_rate": 2.199213630406291e-07,
319
+ "loss": 0.5517,
320
+ "step": 17000
321
+ },
322
+ {
323
+ "epoch": 12.031139419674451,
324
+ "eval_accuracy": 0.7386239171562282,
325
+ "eval_f1": 0.786879922058943,
326
+ "eval_loss": 0.4967314302921295,
327
+ "eval_precision": 0.672588480222068,
328
+ "eval_recall": 0.9479655712050078,
329
+ "eval_runtime": 57.6752,
330
+ "eval_samples_per_second": 174.13,
331
+ "eval_steps_per_second": 2.722,
332
+ "step": 17000
333
+ },
334
+ {
335
+ "epoch": 12.738853503184714,
336
+ "grad_norm": 10.652567863464355,
337
+ "learning_rate": 1.6749672346002622e-07,
338
+ "loss": 0.5485,
339
+ "step": 18000
340
+ },
341
+ {
342
+ "epoch": 12.738853503184714,
343
+ "eval_accuracy": 0.7389226326794782,
344
+ "eval_f1": 0.7872444011684518,
345
+ "eval_loss": 0.49510088562965393,
346
+ "eval_precision": 0.6726289517470881,
347
+ "eval_recall": 0.948943661971831,
348
+ "eval_runtime": 57.6551,
349
+ "eval_samples_per_second": 174.191,
350
+ "eval_steps_per_second": 2.723,
351
+ "step": 18000
352
+ },
353
+ {
354
+ "epoch": 13.446567586694975,
355
+ "grad_norm": 7.330574035644531,
356
+ "learning_rate": 1.1507208387942332e-07,
357
+ "loss": 0.5448,
358
+ "step": 19000
359
+ },
360
+ {
361
+ "epoch": 13.446567586694975,
362
+ "eval_accuracy": 0.7379269142686449,
363
+ "eval_f1": 0.7868825910931174,
364
+ "eval_loss": 0.49567386507987976,
365
+ "eval_precision": 0.6713180436584691,
366
+ "eval_recall": 0.9505086071987481,
367
+ "eval_runtime": 57.6941,
368
+ "eval_samples_per_second": 174.073,
369
+ "eval_steps_per_second": 2.721,
370
+ "step": 19000
371
+ },
372
+ {
373
+ "epoch": 14.154281670205236,
374
+ "grad_norm": 22.538665771484375,
375
+ "learning_rate": 6.264744429882045e-08,
376
+ "loss": 0.5441,
377
+ "step": 20000
378
+ },
379
+ {
380
+ "epoch": 14.154281670205236,
381
+ "eval_accuracy": 0.7379269142686449,
382
+ "eval_f1": 0.7872615583575816,
383
+ "eval_loss": 0.4930832087993622,
384
+ "eval_precision": 0.6707988980716253,
385
+ "eval_recall": 0.952660406885759,
386
+ "eval_runtime": 57.6062,
387
+ "eval_samples_per_second": 174.339,
388
+ "eval_steps_per_second": 2.725,
389
+ "step": 20000
390
+ },
391
+ {
392
+ "epoch": 14.8619957537155,
393
+ "grad_norm": 11.810145378112793,
394
+ "learning_rate": 1.0222804718217562e-08,
395
+ "loss": 0.5416,
396
+ "step": 21000
397
+ },
398
+ {
399
+ "epoch": 14.8619957537155,
400
+ "eval_accuracy": 0.7375286269043114,
401
+ "eval_f1": 0.7866968765172357,
402
+ "eval_loss": 0.4928380250930786,
403
+ "eval_precision": 0.6708528843499862,
404
+ "eval_recall": 0.9508998435054773,
405
+ "eval_runtime": 57.6166,
406
+ "eval_samples_per_second": 174.308,
407
+ "eval_steps_per_second": 2.725,
408
+ "step": 21000
409
+ }
410
+ ],
411
+ "logging_steps": 1000,
412
+ "max_steps": 21195,
413
+ "num_input_tokens_seen": 0,
414
+ "num_train_epochs": 15,
415
+ "save_steps": 1000,
416
+ "stateful_callbacks": {
417
+ "TrainerControl": {
418
+ "args": {
419
+ "should_epoch_stop": false,
420
+ "should_evaluate": false,
421
+ "should_log": false,
422
+ "should_save": true,
423
+ "should_training_stop": false
424
+ },
425
+ "attributes": {}
426
+ }
427
+ },
428
+ "total_flos": 3.173409489802339e+17,
429
+ "train_batch_size": 64,
430
+ "trial_name": null,
431
+ "trial_params": null
432
+ }
training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a6b36f62b0edb9b7f5539033fb40b0e4058b128ea7755e1d2e55a763b3dd6243
3
+ size 5048