upload model
Browse files- README.md +0 -0
- all_results.json +19 -0
- config.json +39 -0
- eval_results.json +9 -0
- merges.txt +0 -0
- pytorch_model.bin +3 -0
- special_tokens_map.json +1 -0
- tokenizer.json +0 -0
- tokenizer_config.json +1 -0
- train_results.json +8 -0
- trainer_state.json +175 -0
- training_args.bin +3 -0
- vocab.json +0 -0
README.md
ADDED
File without changes
|
all_results.json
ADDED
@@ -0,0 +1,19 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"epoch": 10.0,
|
3 |
+
"eval_accuracy": 0.8090737462043762,
|
4 |
+
"eval_loss": 1.4761372804641724,
|
5 |
+
"eval_runtime": 15.9645,
|
6 |
+
"eval_samples": 2117,
|
7 |
+
"eval_samples_per_second": 132.544,
|
8 |
+
"eval_steps_per_second": 4.197,
|
9 |
+
"predict_accuracy": 0.7912139892578125,
|
10 |
+
"predict_loss": 1.6161744594573975,
|
11 |
+
"predict_runtime": 16.0928,
|
12 |
+
"predict_samples_per_second": 131.55,
|
13 |
+
"predict_steps_per_second": 4.163,
|
14 |
+
"train_loss": 0.1654055107068024,
|
15 |
+
"train_runtime": 5360.0658,
|
16 |
+
"train_samples": 16930,
|
17 |
+
"train_samples_per_second": 31.585,
|
18 |
+
"train_steps_per_second": 0.987
|
19 |
+
}
|
config.json
ADDED
@@ -0,0 +1,39 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"_name_or_path": "BSC-TeMU/roberta-base-ca",
|
3 |
+
"architectures": [
|
4 |
+
"RobertaForSequenceClassification"
|
5 |
+
],
|
6 |
+
"attention_probs_dropout_prob": 0.1,
|
7 |
+
"bos_token_id": 0,
|
8 |
+
"eos_token_id": 2,
|
9 |
+
"finetuning_task": "textual-entailment",
|
10 |
+
"gradient_checkpointing": false,
|
11 |
+
"hidden_act": "gelu",
|
12 |
+
"hidden_dropout_prob": 0.1,
|
13 |
+
"hidden_size": 768,
|
14 |
+
"id2label": {
|
15 |
+
"0": "ENTAILMENT",
|
16 |
+
"1": "NEUTRAL",
|
17 |
+
"2": "CONTRADICTION"
|
18 |
+
},
|
19 |
+
"initializer_range": 0.02,
|
20 |
+
"intermediate_size": 3072,
|
21 |
+
"label2id": {
|
22 |
+
"ENTAILMENT": 0,
|
23 |
+
"NEUTRAL": 1,
|
24 |
+
"CONTRADICTION": 2
|
25 |
+
},
|
26 |
+
"layer_norm_eps": 1e-05,
|
27 |
+
"max_position_embeddings": 514,
|
28 |
+
"model_type": "roberta",
|
29 |
+
"num_attention_heads": 12,
|
30 |
+
"num_hidden_layers": 12,
|
31 |
+
"pad_token_id": 1,
|
32 |
+
"position_embedding_type": "absolute",
|
33 |
+
"problem_type": "single_label_classification",
|
34 |
+
"torch_dtype": "float32",
|
35 |
+
"transformers_version": "4.9.1",
|
36 |
+
"type_vocab_size": 1,
|
37 |
+
"use_cache": true,
|
38 |
+
"vocab_size": 52000
|
39 |
+
}
|
eval_results.json
ADDED
@@ -0,0 +1,9 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"epoch": 10.0,
|
3 |
+
"eval_accuracy": 0.8090737462043762,
|
4 |
+
"eval_loss": 1.4761372804641724,
|
5 |
+
"eval_runtime": 15.9645,
|
6 |
+
"eval_samples": 2116,
|
7 |
+
"eval_samples_per_second": 132.544,
|
8 |
+
"eval_steps_per_second": 4.197
|
9 |
+
}
|
merges.txt
ADDED
The diff for this file is too large to render.
See raw diff
|
|
pytorch_model.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:6d56f1dcd9072d54c03b8571a6ef27332b68c6999985ac26e2bda02e09466d01
|
3 |
+
size 504010159
|
special_tokens_map.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"bos_token": {"content": "<s>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true}, "eos_token": {"content": "</s>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true}, "unk_token": {"content": "<unk>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true}, "sep_token": {"content": "</s>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true}, "pad_token": {"content": "<pad>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true}, "cls_token": {"content": "<s>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true}, "mask_token": {"content": "<mask>", "single_word": false, "lstrip": true, "rstrip": false, "normalized": true}}
|
tokenizer.json
ADDED
The diff for this file is too large to render.
See raw diff
|
|
tokenizer_config.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"unk_token": {"content": "<unk>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true, "__type": "AddedToken"}, "bos_token": {"content": "<s>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true, "__type": "AddedToken"}, "eos_token": {"content": "</s>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true, "__type": "AddedToken"}, "add_prefix_space": true, "errors": "replace", "sep_token": {"content": "</s>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true, "__type": "AddedToken"}, "cls_token": {"content": "<s>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true, "__type": "AddedToken"}, "pad_token": {"content": "<pad>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true, "__type": "AddedToken"}, "mask_token": {"content": "<mask>", "single_word": false, "lstrip": true, "rstrip": false, "normalized": true, "__type": "AddedToken"}, "max_len": 512, "special_tokens_map_file": null, "name_or_path": "BSC-TeMU/roberta-base-ca", "tokenizer_class": "RobertaTokenizer"}
|
train_results.json
ADDED
@@ -0,0 +1,8 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"epoch": 10.0,
|
3 |
+
"train_loss": 0.1654055107068024,
|
4 |
+
"train_runtime": 5360.0658,
|
5 |
+
"train_samples": 16930,
|
6 |
+
"train_samples_per_second": 31.585,
|
7 |
+
"train_steps_per_second": 0.987
|
8 |
+
}
|
trainer_state.json
ADDED
@@ -0,0 +1,175 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"best_metric": 0.8090737462043762,
|
3 |
+
"best_model_checkpoint": "/home/ccasimiro/ccasimiro/berta/src/finetuning/te/roberta-base-ca-cased-te/checkpoint-5290",
|
4 |
+
"epoch": 9.999055712936732,
|
5 |
+
"global_step": 5290,
|
6 |
+
"is_hyper_param_search": false,
|
7 |
+
"is_local_process_zero": true,
|
8 |
+
"is_world_process_zero": true,
|
9 |
+
"log_history": [
|
10 |
+
{
|
11 |
+
"epoch": 0.94,
|
12 |
+
"learning_rate": 4.527410207939509e-05,
|
13 |
+
"loss": 0.7295,
|
14 |
+
"step": 500
|
15 |
+
},
|
16 |
+
{
|
17 |
+
"epoch": 1.0,
|
18 |
+
"eval_accuracy": 0.7693761587142944,
|
19 |
+
"eval_loss": 0.5496639013290405,
|
20 |
+
"eval_runtime": 15.769,
|
21 |
+
"eval_samples_per_second": 134.188,
|
22 |
+
"eval_steps_per_second": 4.249,
|
23 |
+
"step": 529
|
24 |
+
},
|
25 |
+
{
|
26 |
+
"epoch": 1.89,
|
27 |
+
"learning_rate": 4.054820415879017e-05,
|
28 |
+
"loss": 0.4162,
|
29 |
+
"step": 1000
|
30 |
+
},
|
31 |
+
{
|
32 |
+
"epoch": 2.0,
|
33 |
+
"eval_accuracy": 0.782136082649231,
|
34 |
+
"eval_loss": 0.5493781566619873,
|
35 |
+
"eval_runtime": 15.7381,
|
36 |
+
"eval_samples_per_second": 134.45,
|
37 |
+
"eval_steps_per_second": 4.257,
|
38 |
+
"step": 1058
|
39 |
+
},
|
40 |
+
{
|
41 |
+
"epoch": 2.83,
|
42 |
+
"learning_rate": 3.582230623818525e-05,
|
43 |
+
"loss": 0.237,
|
44 |
+
"step": 1500
|
45 |
+
},
|
46 |
+
{
|
47 |
+
"epoch": 3.0,
|
48 |
+
"eval_accuracy": 0.7948960065841675,
|
49 |
+
"eval_loss": 0.6657664179801941,
|
50 |
+
"eval_runtime": 15.8042,
|
51 |
+
"eval_samples_per_second": 133.888,
|
52 |
+
"eval_steps_per_second": 4.239,
|
53 |
+
"step": 1587
|
54 |
+
},
|
55 |
+
{
|
56 |
+
"epoch": 3.78,
|
57 |
+
"learning_rate": 3.1096408317580344e-05,
|
58 |
+
"loss": 0.1316,
|
59 |
+
"step": 2000
|
60 |
+
},
|
61 |
+
{
|
62 |
+
"epoch": 4.0,
|
63 |
+
"eval_accuracy": 0.79347825050354,
|
64 |
+
"eval_loss": 0.910639226436615,
|
65 |
+
"eval_runtime": 15.913,
|
66 |
+
"eval_samples_per_second": 132.973,
|
67 |
+
"eval_steps_per_second": 4.21,
|
68 |
+
"step": 2116
|
69 |
+
},
|
70 |
+
{
|
71 |
+
"epoch": 4.73,
|
72 |
+
"learning_rate": 2.637051039697543e-05,
|
73 |
+
"loss": 0.0853,
|
74 |
+
"step": 2500
|
75 |
+
},
|
76 |
+
{
|
77 |
+
"epoch": 5.0,
|
78 |
+
"eval_accuracy": 0.8057655692100525,
|
79 |
+
"eval_loss": 1.0349944829940796,
|
80 |
+
"eval_runtime": 15.8236,
|
81 |
+
"eval_samples_per_second": 133.724,
|
82 |
+
"eval_steps_per_second": 4.234,
|
83 |
+
"step": 2645
|
84 |
+
},
|
85 |
+
{
|
86 |
+
"epoch": 5.67,
|
87 |
+
"learning_rate": 2.1644612476370512e-05,
|
88 |
+
"loss": 0.0593,
|
89 |
+
"step": 3000
|
90 |
+
},
|
91 |
+
{
|
92 |
+
"epoch": 6.0,
|
93 |
+
"eval_accuracy": 0.8010396957397461,
|
94 |
+
"eval_loss": 1.2216821908950806,
|
95 |
+
"eval_runtime": 15.8613,
|
96 |
+
"eval_samples_per_second": 133.407,
|
97 |
+
"eval_steps_per_second": 4.224,
|
98 |
+
"step": 3174
|
99 |
+
},
|
100 |
+
{
|
101 |
+
"epoch": 6.62,
|
102 |
+
"learning_rate": 1.6918714555765597e-05,
|
103 |
+
"loss": 0.0384,
|
104 |
+
"step": 3500
|
105 |
+
},
|
106 |
+
{
|
107 |
+
"epoch": 7.0,
|
108 |
+
"eval_accuracy": 0.8010396957397461,
|
109 |
+
"eval_loss": 1.3548704385757446,
|
110 |
+
"eval_runtime": 15.9719,
|
111 |
+
"eval_samples_per_second": 132.483,
|
112 |
+
"eval_steps_per_second": 4.195,
|
113 |
+
"step": 3703
|
114 |
+
},
|
115 |
+
{
|
116 |
+
"epoch": 7.56,
|
117 |
+
"learning_rate": 1.2192816635160681e-05,
|
118 |
+
"loss": 0.0231,
|
119 |
+
"step": 4000
|
120 |
+
},
|
121 |
+
{
|
122 |
+
"epoch": 8.0,
|
123 |
+
"eval_accuracy": 0.8024574518203735,
|
124 |
+
"eval_loss": 1.385912537574768,
|
125 |
+
"eval_runtime": 15.9537,
|
126 |
+
"eval_samples_per_second": 132.633,
|
127 |
+
"eval_steps_per_second": 4.2,
|
128 |
+
"step": 4232
|
129 |
+
},
|
130 |
+
{
|
131 |
+
"epoch": 8.51,
|
132 |
+
"learning_rate": 7.466918714555767e-06,
|
133 |
+
"loss": 0.0142,
|
134 |
+
"step": 4500
|
135 |
+
},
|
136 |
+
{
|
137 |
+
"epoch": 9.0,
|
138 |
+
"eval_accuracy": 0.8062381744384766,
|
139 |
+
"eval_loss": 1.4750466346740723,
|
140 |
+
"eval_runtime": 15.969,
|
141 |
+
"eval_samples_per_second": 132.507,
|
142 |
+
"eval_steps_per_second": 4.196,
|
143 |
+
"step": 4761
|
144 |
+
},
|
145 |
+
{
|
146 |
+
"epoch": 9.45,
|
147 |
+
"learning_rate": 2.741020793950851e-06,
|
148 |
+
"loss": 0.0118,
|
149 |
+
"step": 5000
|
150 |
+
},
|
151 |
+
{
|
152 |
+
"epoch": 10.0,
|
153 |
+
"eval_accuracy": 0.8090737462043762,
|
154 |
+
"eval_loss": 1.4761372804641724,
|
155 |
+
"eval_runtime": 15.9022,
|
156 |
+
"eval_samples_per_second": 133.064,
|
157 |
+
"eval_steps_per_second": 4.213,
|
158 |
+
"step": 5290
|
159 |
+
},
|
160 |
+
{
|
161 |
+
"epoch": 10.0,
|
162 |
+
"step": 5290,
|
163 |
+
"total_flos": 1.1136143848628736e+16,
|
164 |
+
"train_loss": 0.1654055107068024,
|
165 |
+
"train_runtime": 5360.0658,
|
166 |
+
"train_samples_per_second": 31.585,
|
167 |
+
"train_steps_per_second": 0.987
|
168 |
+
}
|
169 |
+
],
|
170 |
+
"max_steps": 5290,
|
171 |
+
"num_train_epochs": 10,
|
172 |
+
"total_flos": 1.1136143848628736e+16,
|
173 |
+
"trial_name": null,
|
174 |
+
"trial_params": null
|
175 |
+
}
|
training_args.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:767fab7545b0cbbfae578348bab15195548649b2efccfd04a94d1b1bdabf1723
|
3 |
+
size 49074176
|
vocab.json
ADDED
The diff for this file is too large to render.
See raw diff
|
|