josecannete commited on
Commit
bfd5f4c
1 Parent(s): 59da72d

adding model finetuned on POS

Browse files
all_results.json ADDED
@@ -0,0 +1,17 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 4.0,
3
+ "eval_accuracy": 0.9839718066450924,
4
+ "eval_f1": 0.9833950734391069,
5
+ "eval_loss": 0.05716780200600624,
6
+ "eval_precision": 0.9829141716566866,
7
+ "eval_recall": 0.9838764460250544,
8
+ "eval_runtime": 11.7694,
9
+ "eval_samples": 1654,
10
+ "eval_samples_per_second": 140.534,
11
+ "eval_steps_per_second": 8.836,
12
+ "train_loss": 0.05827242064409416,
13
+ "train_runtime": 5244.9021,
14
+ "train_samples": 14305,
15
+ "train_samples_per_second": 10.91,
16
+ "train_steps_per_second": 0.683
17
+ }
config.json ADDED
@@ -0,0 +1,75 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "CenIA/albert_xxlarge_spanish",
3
+ "architectures": [
4
+ "AlbertForTokenClassification"
5
+ ],
6
+ "attention_probs_dropout_prob": 0,
7
+ "bos_token_id": 2,
8
+ "classifier_dropout_prob": 0.1,
9
+ "down_scale_factor": 1,
10
+ "embedding_size": 128,
11
+ "eos_token_id": 3,
12
+ "finetuning_task": "pos",
13
+ "gap_size": 0,
14
+ "hidden_act": "gelu",
15
+ "hidden_dropout_prob": 0,
16
+ "hidden_size": 4096,
17
+ "id2label": {
18
+ "0": "LABEL_0",
19
+ "1": "LABEL_1",
20
+ "2": "LABEL_2",
21
+ "3": "LABEL_3",
22
+ "4": "LABEL_4",
23
+ "5": "LABEL_5",
24
+ "6": "LABEL_6",
25
+ "7": "LABEL_7",
26
+ "8": "LABEL_8",
27
+ "9": "LABEL_9",
28
+ "10": "LABEL_10",
29
+ "11": "LABEL_11",
30
+ "12": "LABEL_12",
31
+ "13": "LABEL_13",
32
+ "14": "LABEL_14",
33
+ "15": "LABEL_15",
34
+ "16": "LABEL_16",
35
+ "17": "LABEL_17"
36
+ },
37
+ "initializer_range": 0.01,
38
+ "inner_group_num": 1,
39
+ "intermediate_size": 16384,
40
+ "label2id": {
41
+ "LABEL_0": 0,
42
+ "LABEL_1": 1,
43
+ "LABEL_10": 10,
44
+ "LABEL_11": 11,
45
+ "LABEL_12": 12,
46
+ "LABEL_13": 13,
47
+ "LABEL_14": 14,
48
+ "LABEL_15": 15,
49
+ "LABEL_16": 16,
50
+ "LABEL_17": 17,
51
+ "LABEL_2": 2,
52
+ "LABEL_3": 3,
53
+ "LABEL_4": 4,
54
+ "LABEL_5": 5,
55
+ "LABEL_6": 6,
56
+ "LABEL_7": 7,
57
+ "LABEL_8": 8,
58
+ "LABEL_9": 9
59
+ },
60
+ "layer_norm_eps": 1e-12,
61
+ "layers_to_keep": [],
62
+ "max_position_embeddings": 512,
63
+ "model_type": "albert",
64
+ "net_structure_type": 0,
65
+ "num_attention_heads": 64,
66
+ "num_hidden_groups": 1,
67
+ "num_hidden_layers": 12,
68
+ "num_memory_blocks": 0,
69
+ "pad_token_id": 0,
70
+ "position_embedding_type": "absolute",
71
+ "torch_dtype": "float32",
72
+ "transformers_version": "4.12.5",
73
+ "type_vocab_size": 2,
74
+ "vocab_size": 31000
75
+ }
eval_results.json ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 4.0,
3
+ "eval_accuracy": 0.9839718066450924,
4
+ "eval_f1": 0.9833950734391069,
5
+ "eval_loss": 0.05716780200600624,
6
+ "eval_precision": 0.9829141716566866,
7
+ "eval_recall": 0.9838764460250544,
8
+ "eval_runtime": 11.7694,
9
+ "eval_samples": 1654,
10
+ "eval_samples_per_second": 140.534,
11
+ "eval_steps_per_second": 8.836
12
+ }
pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:24bd05067ad36d961a0f3b5fe3dd2342446c843c5924ae870591f8989e41e56c
3
+ size 824078295
special_tokens_map.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"bos_token": "[CLS]", "eos_token": "[SEP]", "unk_token": "<unk>", "sep_token": "[SEP]", "pad_token": "<pad>", "cls_token": "[CLS]", "mask_token": "[MASK]"}
tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
tokenizer_config.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"do_lower_case": true, "remove_space": true, "keep_accents": true, "bos_token": "[CLS]", "eos_token": "[SEP]", "unk_token": "<unk>", "sep_token": "[SEP]", "pad_token": "<pad>", "cls_token": "[CLS]", "mask_token": {"content": "[MASK]", "single_word": false, "lstrip": true, "rstrip": false, "normalized": true, "__type": "AddedToken"}, "model_max_length": 512, "special_tokens_map_file": null, "name_or_path": "CenIA/albert_xxlarge_spanish", "tokenizer_class": "AlbertTokenizer"}
train_results.json ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 4.0,
3
+ "train_loss": 0.05827242064409416,
4
+ "train_runtime": 5244.9021,
5
+ "train_samples": 14305,
6
+ "train_samples_per_second": 10.91,
7
+ "train_steps_per_second": 0.683
8
+ }
trainer_state.json ADDED
@@ -0,0 +1,271 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": 0.05716780200600624,
3
+ "best_model_checkpoint": "/data/jcanete/all_results/pos/albeto_xxlarge/epochs_4_bs_16_lr_5e-6/checkpoint-1800",
4
+ "epoch": 4.0,
5
+ "global_step": 3580,
6
+ "is_hyper_param_search": false,
7
+ "is_local_process_zero": true,
8
+ "is_world_process_zero": true,
9
+ "log_history": [
10
+ {
11
+ "epoch": 0.22,
12
+ "eval_accuracy": 0.9740770234323376,
13
+ "eval_f1": 0.9703136222169021,
14
+ "eval_loss": 0.09671590477228165,
15
+ "eval_precision": 0.9689025240552224,
16
+ "eval_recall": 0.9717288365866816,
17
+ "eval_runtime": 12.1146,
18
+ "eval_samples_per_second": 136.529,
19
+ "eval_steps_per_second": 8.585,
20
+ "step": 200
21
+ },
22
+ {
23
+ "epoch": 0.45,
24
+ "eval_accuracy": 0.978109486453974,
25
+ "eval_f1": 0.9763370891925068,
26
+ "eval_loss": 0.08312373608350754,
27
+ "eval_precision": 0.9753928372018824,
28
+ "eval_recall": 0.9772831711654113,
29
+ "eval_runtime": 12.0635,
30
+ "eval_samples_per_second": 137.107,
31
+ "eval_steps_per_second": 8.621,
32
+ "step": 400
33
+ },
34
+ {
35
+ "epoch": 0.56,
36
+ "learning_rate": 4.305865921787709e-06,
37
+ "loss": 0.2346,
38
+ "step": 500
39
+ },
40
+ {
41
+ "epoch": 0.67,
42
+ "eval_accuracy": 0.9802951491841887,
43
+ "eval_f1": 0.9781281181400918,
44
+ "eval_loss": 0.06705235689878464,
45
+ "eval_precision": 0.9769778149853495,
46
+ "eval_recall": 0.979281133244091,
47
+ "eval_runtime": 12.0607,
48
+ "eval_samples_per_second": 137.14,
49
+ "eval_steps_per_second": 8.623,
50
+ "step": 600
51
+ },
52
+ {
53
+ "epoch": 0.89,
54
+ "eval_accuracy": 0.9816167126954812,
55
+ "eval_f1": 0.980212056468521,
56
+ "eval_loss": 0.06424280256032944,
57
+ "eval_precision": 0.9796252319849933,
58
+ "eval_recall": 0.9807995844238876,
59
+ "eval_runtime": 12.0651,
60
+ "eval_samples_per_second": 137.09,
61
+ "eval_steps_per_second": 8.62,
62
+ "step": 800
63
+ },
64
+ {
65
+ "epoch": 1.12,
66
+ "learning_rate": 3.607541899441341e-06,
67
+ "loss": 0.0581,
68
+ "step": 1000
69
+ },
70
+ {
71
+ "epoch": 1.12,
72
+ "eval_accuracy": 0.9821419494756104,
73
+ "eval_f1": 0.9813241767565355,
74
+ "eval_loss": 0.06397537887096405,
75
+ "eval_precision": 0.9805313971116253,
76
+ "eval_recall": 0.9821182393958162,
77
+ "eval_runtime": 12.0587,
78
+ "eval_samples_per_second": 137.163,
79
+ "eval_steps_per_second": 8.624,
80
+ "step": 1000
81
+ },
82
+ {
83
+ "epoch": 1.34,
84
+ "eval_accuracy": 0.982734958743498,
85
+ "eval_f1": 0.9817945902784708,
86
+ "eval_loss": 0.06401154398918152,
87
+ "eval_precision": 0.9809330062426455,
88
+ "eval_recall": 0.9826576891570598,
89
+ "eval_runtime": 12.05,
90
+ "eval_samples_per_second": 137.262,
91
+ "eval_steps_per_second": 8.631,
92
+ "step": 1200
93
+ },
94
+ {
95
+ "epoch": 1.56,
96
+ "eval_accuracy": 0.9834635129869029,
97
+ "eval_f1": 0.982266245955985,
98
+ "eval_loss": 0.05947747826576233,
99
+ "eval_precision": 0.9818152783599816,
100
+ "eval_recall": 0.9827176280194202,
101
+ "eval_runtime": 12.0881,
102
+ "eval_samples_per_second": 136.829,
103
+ "eval_steps_per_second": 8.604,
104
+ "step": 1400
105
+ },
106
+ {
107
+ "epoch": 1.68,
108
+ "learning_rate": 2.9092178770949727e-06,
109
+ "loss": 0.04,
110
+ "step": 1500
111
+ },
112
+ {
113
+ "epoch": 1.79,
114
+ "eval_accuracy": 0.9830229918164721,
115
+ "eval_f1": 0.982217698479224,
116
+ "eval_loss": 0.05940423533320427,
117
+ "eval_precision": 0.9811407268595124,
118
+ "eval_recall": 0.9832970370222374,
119
+ "eval_runtime": 12.0517,
120
+ "eval_samples_per_second": 137.242,
121
+ "eval_steps_per_second": 8.629,
122
+ "step": 1600
123
+ },
124
+ {
125
+ "epoch": 2.01,
126
+ "eval_accuracy": 0.9839718066450924,
127
+ "eval_f1": 0.9833950734391069,
128
+ "eval_loss": 0.05716780200600624,
129
+ "eval_precision": 0.9829141716566866,
130
+ "eval_recall": 0.9838764460250544,
131
+ "eval_runtime": 12.0638,
132
+ "eval_samples_per_second": 137.105,
133
+ "eval_steps_per_second": 8.621,
134
+ "step": 1800
135
+ },
136
+ {
137
+ "epoch": 2.23,
138
+ "learning_rate": 2.2108938547486037e-06,
139
+ "loss": 0.0327,
140
+ "step": 2000
141
+ },
142
+ {
143
+ "epoch": 2.23,
144
+ "eval_accuracy": 0.983700716694058,
145
+ "eval_f1": 0.9826579207475964,
146
+ "eval_loss": 0.06271136552095413,
147
+ "eval_precision": 0.9820794252644183,
148
+ "eval_recall": 0.9832370981598769,
149
+ "eval_runtime": 12.0524,
150
+ "eval_samples_per_second": 137.234,
151
+ "eval_steps_per_second": 8.629,
152
+ "step": 2000
153
+ },
154
+ {
155
+ "epoch": 2.46,
156
+ "eval_accuracy": 0.9840734653767303,
157
+ "eval_f1": 0.9836376523674989,
158
+ "eval_loss": 0.06178496032953262,
159
+ "eval_precision": 0.982980166806337,
160
+ "eval_recall": 0.9842960180615772,
161
+ "eval_runtime": 12.0504,
162
+ "eval_samples_per_second": 137.257,
163
+ "eval_steps_per_second": 8.63,
164
+ "step": 2200
165
+ },
166
+ {
167
+ "epoch": 2.68,
168
+ "eval_accuracy": 0.9839887497670321,
169
+ "eval_f1": 0.983732286767129,
170
+ "eval_loss": 0.0652877539396286,
171
+ "eval_precision": 0.9833885716567504,
172
+ "eval_recall": 0.9840762422329224,
173
+ "eval_runtime": 12.0582,
174
+ "eval_samples_per_second": 137.168,
175
+ "eval_steps_per_second": 8.625,
176
+ "step": 2400
177
+ },
178
+ {
179
+ "epoch": 2.79,
180
+ "learning_rate": 1.5125698324022347e-06,
181
+ "loss": 0.022,
182
+ "step": 2500
183
+ },
184
+ {
185
+ "epoch": 2.91,
186
+ "eval_accuracy": 0.9843445553277647,
187
+ "eval_f1": 0.9838023527532006,
188
+ "eval_loss": 0.061138641089200974,
189
+ "eval_precision": 0.9834487990895842,
190
+ "eval_recall": 0.9841561607160696,
191
+ "eval_runtime": 12.0686,
192
+ "eval_samples_per_second": 137.05,
193
+ "eval_steps_per_second": 8.617,
194
+ "step": 2600
195
+ },
196
+ {
197
+ "epoch": 3.13,
198
+ "eval_accuracy": 0.984378441571644,
199
+ "eval_f1": 0.9841013042523019,
200
+ "eval_loss": 0.06497478485107422,
201
+ "eval_precision": 0.9837869137232195,
202
+ "eval_recall": 0.984415895786298,
203
+ "eval_runtime": 12.0336,
204
+ "eval_samples_per_second": 137.449,
205
+ "eval_steps_per_second": 8.642,
206
+ "step": 2800
207
+ },
208
+ {
209
+ "epoch": 3.35,
210
+ "learning_rate": 8.142458100558661e-07,
211
+ "loss": 0.0161,
212
+ "step": 3000
213
+ },
214
+ {
215
+ "epoch": 3.35,
216
+ "eval_accuracy": 0.9844631571813423,
217
+ "eval_f1": 0.9838933546357782,
218
+ "eval_loss": 0.06659159064292908,
219
+ "eval_precision": 0.9834710743801653,
220
+ "eval_recall": 0.984315997682364,
221
+ "eval_runtime": 12.058,
222
+ "eval_samples_per_second": 137.17,
223
+ "eval_steps_per_second": 8.625,
224
+ "step": 3000
225
+ },
226
+ {
227
+ "epoch": 3.58,
228
+ "eval_accuracy": 0.9840056928889718,
229
+ "eval_f1": 0.983372447196285,
230
+ "eval_loss": 0.06790520250797272,
231
+ "eval_precision": 0.9830484785560258,
232
+ "eval_recall": 0.9836966294379733,
233
+ "eval_runtime": 12.0261,
234
+ "eval_samples_per_second": 137.534,
235
+ "eval_steps_per_second": 8.648,
236
+ "step": 3200
237
+ },
238
+ {
239
+ "epoch": 3.8,
240
+ "eval_accuracy": 0.9842767828400061,
241
+ "eval_f1": 0.9837931757586651,
242
+ "eval_loss": 0.06859102845191956,
243
+ "eval_precision": 0.9833905613670846,
244
+ "eval_recall": 0.9841961199576432,
245
+ "eval_runtime": 12.0273,
246
+ "eval_samples_per_second": 137.521,
247
+ "eval_steps_per_second": 8.647,
248
+ "step": 3400
249
+ },
250
+ {
251
+ "epoch": 3.91,
252
+ "learning_rate": 1.1592178770949721e-07,
253
+ "loss": 0.012,
254
+ "step": 3500
255
+ },
256
+ {
257
+ "epoch": 4.0,
258
+ "step": 3580,
259
+ "total_flos": 5364557434522752.0,
260
+ "train_loss": 0.05827242064409416,
261
+ "train_runtime": 5244.9021,
262
+ "train_samples_per_second": 10.91,
263
+ "train_steps_per_second": 0.683
264
+ }
265
+ ],
266
+ "max_steps": 3580,
267
+ "num_train_epochs": 4,
268
+ "total_flos": 5364557434522752.0,
269
+ "trial_name": null,
270
+ "trial_params": null
271
+ }
training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:211fc83f4ac4107250cd13c933f937281918a6fa555449d8858952b06da464f0
3
+ size 2863