model update
Browse files- README.md +38 -38
- analogy.forward.json +1 -1
- classification.json +1 -1
- config.json +6 -6
- finetuning_config.json +2 -2
- pytorch_model.bin +2 -2
- relation_mapping.json +0 -0
- special_tokens_map.json +15 -1
- tokenizer.json +4 -2
- tokenizer_config.json +16 -1
README.md
CHANGED
@@ -14,7 +14,7 @@ model-index:
|
|
14 |
metrics:
|
15 |
- name: Accuracy
|
16 |
type: accuracy
|
17 |
-
value: 0.
|
18 |
- task:
|
19 |
name: Analogy Questions (SAT full)
|
20 |
type: multiple-choice-qa
|
@@ -25,7 +25,7 @@ model-index:
|
|
25 |
metrics:
|
26 |
- name: Accuracy
|
27 |
type: accuracy
|
28 |
-
value: 0.
|
29 |
- task:
|
30 |
name: Analogy Questions (SAT)
|
31 |
type: multiple-choice-qa
|
@@ -36,7 +36,7 @@ model-index:
|
|
36 |
metrics:
|
37 |
- name: Accuracy
|
38 |
type: accuracy
|
39 |
-
value: 0.
|
40 |
- task:
|
41 |
name: Analogy Questions (BATS)
|
42 |
type: multiple-choice-qa
|
@@ -47,7 +47,7 @@ model-index:
|
|
47 |
metrics:
|
48 |
- name: Accuracy
|
49 |
type: accuracy
|
50 |
-
value: 0.
|
51 |
- task:
|
52 |
name: Analogy Questions (Google)
|
53 |
type: multiple-choice-qa
|
@@ -58,7 +58,7 @@ model-index:
|
|
58 |
metrics:
|
59 |
- name: Accuracy
|
60 |
type: accuracy
|
61 |
-
value: 0.
|
62 |
- task:
|
63 |
name: Analogy Questions (U2)
|
64 |
type: multiple-choice-qa
|
@@ -69,7 +69,7 @@ model-index:
|
|
69 |
metrics:
|
70 |
- name: Accuracy
|
71 |
type: accuracy
|
72 |
-
value: 0.
|
73 |
- task:
|
74 |
name: Analogy Questions (U4)
|
75 |
type: multiple-choice-qa
|
@@ -80,7 +80,7 @@ model-index:
|
|
80 |
metrics:
|
81 |
- name: Accuracy
|
82 |
type: accuracy
|
83 |
-
value: 0.
|
84 |
- task:
|
85 |
name: Analogy Questions (ConceptNet Analogy)
|
86 |
type: multiple-choice-qa
|
@@ -91,7 +91,7 @@ model-index:
|
|
91 |
metrics:
|
92 |
- name: Accuracy
|
93 |
type: accuracy
|
94 |
-
value: 0.
|
95 |
- task:
|
96 |
name: Analogy Questions (TREX Analogy)
|
97 |
type: multiple-choice-qa
|
@@ -102,7 +102,7 @@ model-index:
|
|
102 |
metrics:
|
103 |
- name: Accuracy
|
104 |
type: accuracy
|
105 |
-
value: 0.
|
106 |
- task:
|
107 |
name: Analogy Questions (NELL-ONE Analogy)
|
108 |
type: multiple-choice-qa
|
@@ -113,7 +113,7 @@ model-index:
|
|
113 |
metrics:
|
114 |
- name: Accuracy
|
115 |
type: accuracy
|
116 |
-
value: 0.
|
117 |
- task:
|
118 |
name: Lexical Relation Classification (BLESS)
|
119 |
type: classification
|
@@ -124,10 +124,10 @@ model-index:
|
|
124 |
metrics:
|
125 |
- name: F1
|
126 |
type: f1
|
127 |
-
value: 0.
|
128 |
- name: F1 (macro)
|
129 |
type: f1_macro
|
130 |
-
value: 0.
|
131 |
- task:
|
132 |
name: Lexical Relation Classification (CogALexV)
|
133 |
type: classification
|
@@ -138,10 +138,10 @@ model-index:
|
|
138 |
metrics:
|
139 |
- name: F1
|
140 |
type: f1
|
141 |
-
value: 0.
|
142 |
- name: F1 (macro)
|
143 |
type: f1_macro
|
144 |
-
value: 0.
|
145 |
- task:
|
146 |
name: Lexical Relation Classification (EVALution)
|
147 |
type: classification
|
@@ -152,10 +152,10 @@ model-index:
|
|
152 |
metrics:
|
153 |
- name: F1
|
154 |
type: f1
|
155 |
-
value: 0.
|
156 |
- name: F1 (macro)
|
157 |
type: f1_macro
|
158 |
-
value: 0.
|
159 |
- task:
|
160 |
name: Lexical Relation Classification (K&H+N)
|
161 |
type: classification
|
@@ -166,10 +166,10 @@ model-index:
|
|
166 |
metrics:
|
167 |
- name: F1
|
168 |
type: f1
|
169 |
-
value: 0.
|
170 |
- name: F1 (macro)
|
171 |
type: f1_macro
|
172 |
-
value: 0.
|
173 |
- task:
|
174 |
name: Lexical Relation Classification (ROOT09)
|
175 |
type: classification
|
@@ -180,34 +180,34 @@ model-index:
|
|
180 |
metrics:
|
181 |
- name: F1
|
182 |
type: f1
|
183 |
-
value: 0.
|
184 |
- name: F1 (macro)
|
185 |
type: f1_macro
|
186 |
-
value: 0.
|
187 |
|
188 |
---
|
189 |
# relbert/relbert-roberta-base-nce-semeval2012-0
|
190 |
|
191 |
-
RelBERT based on [roberta-
|
192 |
This model achieves the following results on the relation understanding tasks:
|
193 |
- Analogy Question ([dataset](https://huggingface.co/datasets/relbert/analogy_questions), [full result](https://huggingface.co/relbert/relbert-roberta-base-nce-semeval2012-0/raw/main/analogy.forward.json)):
|
194 |
-
- Accuracy on SAT (full): 0.
|
195 |
-
- Accuracy on SAT: 0.
|
196 |
-
- Accuracy on BATS: 0.
|
197 |
-
- Accuracy on U2: 0.
|
198 |
-
- Accuracy on U4: 0.
|
199 |
-
- Accuracy on Google: 0.
|
200 |
-
- Accuracy on ConceptNet Analogy: 0.
|
201 |
-
- Accuracy on T-Rex Analogy: 0.
|
202 |
-
- Accuracy on NELL-ONE Analogy: 0.
|
203 |
- Lexical Relation Classification ([dataset](https://huggingface.co/datasets/relbert/lexical_relation_classification), [full result](https://huggingface.co/relbert/relbert-roberta-base-nce-semeval2012-0/raw/main/classification.json)):
|
204 |
-
- Micro F1 score on BLESS: 0.
|
205 |
-
- Micro F1 score on CogALexV: 0.
|
206 |
-
- Micro F1 score on EVALution: 0.
|
207 |
-
- Micro F1 score on K&H+N: 0.
|
208 |
-
- Micro F1 score on ROOT09: 0.
|
209 |
- Relation Mapping ([dataset](https://huggingface.co/datasets/relbert/relation_mapping), [full result](https://huggingface.co/relbert/relbert-roberta-base-nce-semeval2012-0/raw/main/relation_mapping.json)):
|
210 |
-
- Accuracy on Relation Mapping: 0.
|
211 |
|
212 |
|
213 |
### Usage
|
@@ -224,7 +224,7 @@ vector = model.get_embedding(['Tokyo', 'Japan']) # shape of (n_dim, )
|
|
224 |
|
225 |
### Training hyperparameters
|
226 |
|
227 |
-
- model: roberta-
|
228 |
- max_length: 64
|
229 |
- epoch: 10
|
230 |
- batch: 32
|
@@ -239,7 +239,7 @@ vector = model.get_embedding(['Tokyo', 'Japan']) # shape of (n_dim, )
|
|
239 |
- split_valid: validation
|
240 |
- loss_function: nce
|
241 |
- classification_loss: False
|
242 |
-
- loss_function_config: {'temperature': 0.05, 'num_negative':
|
243 |
- augment_negative_by_positive: True
|
244 |
|
245 |
See the full configuration at [config file](https://huggingface.co/relbert/relbert-roberta-base-nce-semeval2012-0/raw/main/finetuning_config.json).
|
|
|
14 |
metrics:
|
15 |
- name: Accuracy
|
16 |
type: accuracy
|
17 |
+
value: 0.8133333333333334
|
18 |
- task:
|
19 |
name: Analogy Questions (SAT full)
|
20 |
type: multiple-choice-qa
|
|
|
25 |
metrics:
|
26 |
- name: Accuracy
|
27 |
type: accuracy
|
28 |
+
value: 0.6818181818181818
|
29 |
- task:
|
30 |
name: Analogy Questions (SAT)
|
31 |
type: multiple-choice-qa
|
|
|
36 |
metrics:
|
37 |
- name: Accuracy
|
38 |
type: accuracy
|
39 |
+
value: 0.6824925816023739
|
40 |
- task:
|
41 |
name: Analogy Questions (BATS)
|
42 |
type: multiple-choice-qa
|
|
|
47 |
metrics:
|
48 |
- name: Accuracy
|
49 |
type: accuracy
|
50 |
+
value: 0.783212896053363
|
51 |
- task:
|
52 |
name: Analogy Questions (Google)
|
53 |
type: multiple-choice-qa
|
|
|
58 |
metrics:
|
59 |
- name: Accuracy
|
60 |
type: accuracy
|
61 |
+
value: 0.934
|
62 |
- task:
|
63 |
name: Analogy Questions (U2)
|
64 |
type: multiple-choice-qa
|
|
|
69 |
metrics:
|
70 |
- name: Accuracy
|
71 |
type: accuracy
|
72 |
+
value: 0.6754385964912281
|
73 |
- task:
|
74 |
name: Analogy Questions (U4)
|
75 |
type: multiple-choice-qa
|
|
|
80 |
metrics:
|
81 |
- name: Accuracy
|
82 |
type: accuracy
|
83 |
+
value: 0.6388888888888888
|
84 |
- task:
|
85 |
name: Analogy Questions (ConceptNet Analogy)
|
86 |
type: multiple-choice-qa
|
|
|
91 |
metrics:
|
92 |
- name: Accuracy
|
93 |
type: accuracy
|
94 |
+
value: 0.43288590604026844
|
95 |
- task:
|
96 |
name: Analogy Questions (TREX Analogy)
|
97 |
type: multiple-choice-qa
|
|
|
102 |
metrics:
|
103 |
- name: Accuracy
|
104 |
type: accuracy
|
105 |
+
value: 0.6775956284153005
|
106 |
- task:
|
107 |
name: Analogy Questions (NELL-ONE Analogy)
|
108 |
type: multiple-choice-qa
|
|
|
113 |
metrics:
|
114 |
- name: Accuracy
|
115 |
type: accuracy
|
116 |
+
value: 0.605
|
117 |
- task:
|
118 |
name: Lexical Relation Classification (BLESS)
|
119 |
type: classification
|
|
|
124 |
metrics:
|
125 |
- name: F1
|
126 |
type: f1
|
127 |
+
value: 0.9148711767364774
|
128 |
- name: F1 (macro)
|
129 |
type: f1_macro
|
130 |
+
value: 0.9119056356713013
|
131 |
- task:
|
132 |
name: Lexical Relation Classification (CogALexV)
|
133 |
type: classification
|
|
|
138 |
metrics:
|
139 |
- name: F1
|
140 |
type: f1
|
141 |
+
value: 0.8485915492957746
|
142 |
- name: F1 (macro)
|
143 |
type: f1_macro
|
144 |
+
value: 0.6811794888962958
|
145 |
- task:
|
146 |
name: Lexical Relation Classification (EVALution)
|
147 |
type: classification
|
|
|
152 |
metrics:
|
153 |
- name: F1
|
154 |
type: f1
|
155 |
+
value: 0.6690140845070423
|
156 |
- name: F1 (macro)
|
157 |
type: f1_macro
|
158 |
+
value: 0.6624009209291007
|
159 |
- task:
|
160 |
name: Lexical Relation Classification (K&H+N)
|
161 |
type: classification
|
|
|
166 |
metrics:
|
167 |
- name: F1
|
168 |
type: f1
|
169 |
+
value: 0.9508937886902692
|
170 |
- name: F1 (macro)
|
171 |
type: f1_macro
|
172 |
+
value: 0.8677983904224069
|
173 |
- task:
|
174 |
name: Lexical Relation Classification (ROOT09)
|
175 |
type: classification
|
|
|
180 |
metrics:
|
181 |
- name: F1
|
182 |
type: f1
|
183 |
+
value: 0.8918834221247258
|
184 |
- name: F1 (macro)
|
185 |
type: f1_macro
|
186 |
+
value: 0.8905814580868343
|
187 |
|
188 |
---
|
189 |
# relbert/relbert-roberta-base-nce-semeval2012-0
|
190 |
|
191 |
+
RelBERT based on [roberta-large](https://huggingface.co/roberta-large) fine-tuned on [relbert/semeval2012_relational_similarity](https://huggingface.co/datasets/relbert/semeval2012_relational_similarity) (see the [`relbert`](https://github.com/asahi417/relbert) for more detail of fine-tuning).
|
192 |
This model achieves the following results on the relation understanding tasks:
|
193 |
- Analogy Question ([dataset](https://huggingface.co/datasets/relbert/analogy_questions), [full result](https://huggingface.co/relbert/relbert-roberta-base-nce-semeval2012-0/raw/main/analogy.forward.json)):
|
194 |
+
- Accuracy on SAT (full): 0.6818181818181818
|
195 |
+
- Accuracy on SAT: 0.6824925816023739
|
196 |
+
- Accuracy on BATS: 0.783212896053363
|
197 |
+
- Accuracy on U2: 0.6754385964912281
|
198 |
+
- Accuracy on U4: 0.6388888888888888
|
199 |
+
- Accuracy on Google: 0.934
|
200 |
+
- Accuracy on ConceptNet Analogy: 0.43288590604026844
|
201 |
+
- Accuracy on T-Rex Analogy: 0.6775956284153005
|
202 |
+
- Accuracy on NELL-ONE Analogy: 0.605
|
203 |
- Lexical Relation Classification ([dataset](https://huggingface.co/datasets/relbert/lexical_relation_classification), [full result](https://huggingface.co/relbert/relbert-roberta-base-nce-semeval2012-0/raw/main/classification.json)):
|
204 |
+
- Micro F1 score on BLESS: 0.9148711767364774
|
205 |
+
- Micro F1 score on CogALexV: 0.8485915492957746
|
206 |
+
- Micro F1 score on EVALution: 0.6690140845070423
|
207 |
+
- Micro F1 score on K&H+N: 0.9508937886902692
|
208 |
+
- Micro F1 score on ROOT09: 0.8918834221247258
|
209 |
- Relation Mapping ([dataset](https://huggingface.co/datasets/relbert/relation_mapping), [full result](https://huggingface.co/relbert/relbert-roberta-base-nce-semeval2012-0/raw/main/relation_mapping.json)):
|
210 |
+
- Accuracy on Relation Mapping: 0.8133333333333334
|
211 |
|
212 |
|
213 |
### Usage
|
|
|
224 |
|
225 |
### Training hyperparameters
|
226 |
|
227 |
+
- model: roberta-large
|
228 |
- max_length: 64
|
229 |
- epoch: 10
|
230 |
- batch: 32
|
|
|
239 |
- split_valid: validation
|
240 |
- loss_function: nce
|
241 |
- classification_loss: False
|
242 |
+
- loss_function_config: {'temperature': 0.05, 'num_negative': 100, 'num_positive': 10}
|
243 |
- augment_negative_by_positive: True
|
244 |
|
245 |
See the full configuration at [config file](https://huggingface.co/relbert/relbert-roberta-base-nce-semeval2012-0/raw/main/finetuning_config.json).
|
analogy.forward.json
CHANGED
@@ -1 +1 @@
|
|
1 |
-
{"semeval2012_relational_similarity/validation": 0.
|
|
|
1 |
+
{"semeval2012_relational_similarity/validation": 0.7468354430379747, "scan/test": 0.2592821782178218, "sat_full/test": 0.6818181818181818, "sat/test": 0.6824925816023739, "u2/test": 0.6754385964912281, "u4/test": 0.6388888888888888, "google/test": 0.934, "bats/test": 0.783212896053363, "t_rex_relational_similarity/test": 0.6775956284153005, "conceptnet_relational_similarity/test": 0.43288590604026844, "nell_relational_similarity/test": 0.605}
|
classification.json
CHANGED
@@ -1 +1 @@
|
|
1 |
-
{"lexical_relation_classification/BLESS": {"classifier_config": {"activation": "relu", "alpha": 0.0001, "batch_size": "auto", "beta_1": 0.9, "beta_2": 0.999, "early_stopping": false, "epsilon": 1e-08, "hidden_layer_sizes": [100], "learning_rate": "constant", "learning_rate_init": 0.001, "max_fun": 15000, "max_iter": 200, "momentum": 0.9, "n_iter_no_change": 10, "nesterovs_momentum": true, "power_t": 0.5, "random_state": 0, "shuffle": true, "solver": "adam", "tol": 0.0001, "validation_fraction": 0.1, "verbose": false, "warm_start": false}, "test/accuracy": 0.
|
|
|
1 |
+
{"lexical_relation_classification/BLESS": {"classifier_config": {"activation": "relu", "alpha": 0.0001, "batch_size": "auto", "beta_1": 0.9, "beta_2": 0.999, "early_stopping": false, "epsilon": 1e-08, "hidden_layer_sizes": [100], "learning_rate": "constant", "learning_rate_init": 0.001, "max_fun": 15000, "max_iter": 200, "momentum": 0.9, "n_iter_no_change": 10, "nesterovs_momentum": true, "power_t": 0.5, "random_state": 0, "shuffle": true, "solver": "adam", "tol": 0.0001, "validation_fraction": 0.1, "verbose": false, "warm_start": false}, "test/accuracy": 0.9148711767364773, "test/f1_macro": 0.9119056356713013, "test/f1_micro": 0.9148711767364774, "test/p_macro": 0.908504306097206, "test/p_micro": 0.9148711767364773, "test/r_macro": 0.9162009423798682, "test/r_micro": 0.9148711767364773, "test/f1/attri": 0.9090909090909092, "test/p/attri": 0.9130434782608695, "test/r/attri": 0.9051724137931034, "test/f1/coord": 0.9614309670206819, "test/p/coord": 0.948180815876516, "test/r/coord": 0.9750566893424036, "test/f1/event": 0.8515742128935532, "test/p/event": 0.8145315487571702, "test/r/event": 0.8921465968586387, "test/f1/hyper": 0.9331395348837209, "test/p/hyper": 0.9497041420118343, "test/r/hyper": 0.9171428571428571, "test/f1/mero": 0.8878566688785667, "test/p/mero": 0.8791064388961892, "test/r/mero": 0.8967828418230563, "test/f1/random": 0.9283415212603762, "test/p/random": 0.9464594127806563, "test/r/random": 0.910904255319149}, "lexical_relation_classification/CogALexV": {"classifier_config": {"activation": "relu", "alpha": 0.0001, "batch_size": "auto", "beta_1": 0.9, "beta_2": 0.999, "early_stopping": false, "epsilon": 1e-08, "hidden_layer_sizes": [100], "learning_rate": "constant", "learning_rate_init": 0.001, "max_fun": 15000, "max_iter": 200, "momentum": 0.9, "n_iter_no_change": 10, "nesterovs_momentum": true, "power_t": 0.5, "random_state": 0, "shuffle": true, "solver": "adam", "tol": 0.0001, "validation_fraction": 0.1, "verbose": false, "warm_start": false}, "test/accuracy": 0.8485915492957746, "test/f1_macro": 0.6811794888962958, "test/f1_micro": 0.8485915492957746, "test/p_macro": 0.716549900315324, "test/p_micro": 0.8485915492957746, "test/r_macro": 0.6520962513564919, "test/r_micro": 0.8485915492957746, "test/f1/ANT": 0.7326440177252584, "test/p/ANT": 0.7823343848580442, "test/r/ANT": 0.6888888888888889, "test/f1/HYPER": 0.6017441860465116, "test/p/HYPER": 0.6764705882352942, "test/r/HYPER": 0.5418848167539267, "test/f1/PART_OF": 0.6872037914691943, "test/p/PART_OF": 0.7323232323232324, "test/r/PART_OF": 0.6473214285714286, "test/f1/RANDOM": 0.926255562619199, "test/p/RANDOM": 0.9013300340241261, "test/r/RANDOM": 0.952598888525662, "test/f1/SYN": 0.4580498866213152, "test/p/SYN": 0.49029126213592233, "test/r/SYN": 0.4297872340425532}, "lexical_relation_classification/EVALution": {"classifier_config": {"activation": "relu", "alpha": 0.0001, "batch_size": "auto", "beta_1": 0.9, "beta_2": 0.999, "early_stopping": false, "epsilon": 1e-08, "hidden_layer_sizes": [100], "learning_rate": "constant", "learning_rate_init": 0.001, "max_fun": 15000, "max_iter": 200, "momentum": 0.9, "n_iter_no_change": 10, "nesterovs_momentum": true, "power_t": 0.5, "random_state": 0, "shuffle": true, "solver": "adam", "tol": 0.0001, "validation_fraction": 0.1, "verbose": false, "warm_start": false}, "test/accuracy": 0.6690140845070423, "test/f1_macro": 0.6624009209291007, "test/f1_micro": 0.6690140845070423, "test/p_macro": 0.6762790775482931, "test/p_micro": 0.6690140845070423, "test/r_macro": 0.6547629962331695, "test/r_micro": 0.6690140845070423, "test/f1/Antonym": 0.7820512820512822, "test/p/Antonym": 0.8356164383561644, "test/r/Antonym": 0.7349397590361446, "test/f1/HasA": 0.6754098360655738, "test/p/HasA": 0.6319018404907976, "test/r/HasA": 0.7253521126760564, "test/f1/HasProperty": 0.8161434977578476, "test/p/HasProperty": 0.7867435158501441, "test/r/HasProperty": 0.8478260869565217, "test/f1/IsA": 0.6150670794633643, "test/p/IsA": 0.5843137254901961, "test/r/IsA": 0.6492374727668845, "test/f1/MadeOf": 0.6282051282051283, "test/p/MadeOf": 0.7, "test/r/MadeOf": 0.5697674418604651, "test/f1/PartOf": 0.706766917293233, "test/p/PartOf": 0.7768595041322314, "test/r/PartOf": 0.6482758620689655, "test/f1/Synonym": 0.41316270566727603, "test/p/Synonym": 0.4185185185185185, "test/r/Synonym": 0.40794223826714804}, "lexical_relation_classification/K&H+N": {"classifier_config": {"activation": "relu", "alpha": 0.0001, "batch_size": "auto", "beta_1": 0.9, "beta_2": 0.999, "early_stopping": false, "epsilon": 1e-08, "hidden_layer_sizes": [100], "learning_rate": "constant", "learning_rate_init": 0.001, "max_fun": 15000, "max_iter": 200, "momentum": 0.9, "n_iter_no_change": 10, "nesterovs_momentum": true, "power_t": 0.5, "random_state": 0, "shuffle": true, "solver": "adam", "tol": 0.0001, "validation_fraction": 0.1, "verbose": false, "warm_start": false}, "test/accuracy": 0.9508937886902692, "test/f1_macro": 0.8677983904224069, "test/f1_micro": 0.9508937886902692, "test/p_macro": 0.8771720388507485, "test/p_micro": 0.9508937886902692, "test/r_macro": 0.8593517976091283, "test/r_micro": 0.9508937886902692, "test/f1/false": 0.960130428338521, "test/p/false": 0.9599881446354476, "test/r/false": 0.9602727542247258, "test/f1/hypo": 0.9278557114228457, "test/p/hypo": 0.9706498951781971, "test/r/hypo": 0.8886756238003839, "test/f1/mero": 0.6263048016701461, "test/p/mero": 0.6276150627615062, "test/r/mero": 0.625, "test/f1/sibl": 0.956902620258115, "test/p/sibl": 0.9504350528278434, "test/r/sibl": 0.9634588124114034}, "lexical_relation_classification/ROOT09": {"classifier_config": {"activation": "relu", "alpha": 0.0001, "batch_size": "auto", "beta_1": 0.9, "beta_2": 0.999, "early_stopping": false, "epsilon": 1e-08, "hidden_layer_sizes": [100], "learning_rate": "constant", "learning_rate_init": 0.001, "max_fun": 15000, "max_iter": 200, "momentum": 0.9, "n_iter_no_change": 10, "nesterovs_momentum": true, "power_t": 0.5, "random_state": 0, "shuffle": true, "solver": "adam", "tol": 0.0001, "validation_fraction": 0.1, "verbose": false, "warm_start": false}, "test/accuracy": 0.8918834221247258, "test/f1_macro": 0.8905814580868343, "test/f1_micro": 0.8918834221247258, "test/p_macro": 0.8915743175976042, "test/p_micro": 0.8918834221247258, "test/r_macro": 0.8896098856499345, "test/r_micro": 0.8918834221247258, "test/f1/COORD": 0.971709717097171, "test/p/COORD": 0.9753086419753086, "test/r/COORD": 0.9681372549019608, "test/f1/HYPER": 0.8047117172969622, "test/p/HYPER": 0.8072139303482587, "test/r/HYPER": 0.8022249690976514, "test/f1/RANDOM": 0.8953229398663697, "test/p/RANDOM": 0.8922003804692454, "test/r/RANDOM": 0.8984674329501916}}
|
config.json
CHANGED
@@ -1,5 +1,5 @@
|
|
1 |
{
|
2 |
-
"_name_or_path": "roberta-
|
3 |
"architectures": [
|
4 |
"RobertaModel"
|
5 |
],
|
@@ -9,14 +9,14 @@
|
|
9 |
"eos_token_id": 2,
|
10 |
"hidden_act": "gelu",
|
11 |
"hidden_dropout_prob": 0.1,
|
12 |
-
"hidden_size":
|
13 |
"initializer_range": 0.02,
|
14 |
-
"intermediate_size":
|
15 |
"layer_norm_eps": 1e-05,
|
16 |
"max_position_embeddings": 514,
|
17 |
"model_type": "roberta",
|
18 |
-
"num_attention_heads":
|
19 |
-
"num_hidden_layers":
|
20 |
"pad_token_id": 1,
|
21 |
"position_embedding_type": "absolute",
|
22 |
"relbert_config": {
|
@@ -24,7 +24,7 @@
|
|
24 |
"template": "Today, I finally discovered the relation between <subj> and <obj> : <subj> is the <mask> of <obj>"
|
25 |
},
|
26 |
"torch_dtype": "float32",
|
27 |
-
"transformers_version": "4.
|
28 |
"type_vocab_size": 1,
|
29 |
"use_cache": true,
|
30 |
"vocab_size": 50265
|
|
|
1 |
{
|
2 |
+
"_name_or_path": "roberta-large",
|
3 |
"architectures": [
|
4 |
"RobertaModel"
|
5 |
],
|
|
|
9 |
"eos_token_id": 2,
|
10 |
"hidden_act": "gelu",
|
11 |
"hidden_dropout_prob": 0.1,
|
12 |
+
"hidden_size": 1024,
|
13 |
"initializer_range": 0.02,
|
14 |
+
"intermediate_size": 4096,
|
15 |
"layer_norm_eps": 1e-05,
|
16 |
"max_position_embeddings": 514,
|
17 |
"model_type": "roberta",
|
18 |
+
"num_attention_heads": 16,
|
19 |
+
"num_hidden_layers": 24,
|
20 |
"pad_token_id": 1,
|
21 |
"position_embedding_type": "absolute",
|
22 |
"relbert_config": {
|
|
|
24 |
"template": "Today, I finally discovered the relation between <subj> and <obj> : <subj> is the <mask> of <obj>"
|
25 |
},
|
26 |
"torch_dtype": "float32",
|
27 |
+
"transformers_version": "4.26.1",
|
28 |
"type_vocab_size": 1,
|
29 |
"use_cache": true,
|
30 |
"vocab_size": 50265
|
finetuning_config.json
CHANGED
@@ -1,6 +1,6 @@
|
|
1 |
{
|
2 |
"template": "Today, I finally discovered the relation between <subj> and <obj> : <subj> is the <mask> of <obj>",
|
3 |
-
"model": "roberta-
|
4 |
"max_length": 64,
|
5 |
"epoch": 10,
|
6 |
"batch": 32,
|
@@ -17,7 +17,7 @@
|
|
17 |
"classification_loss": false,
|
18 |
"loss_function_config": {
|
19 |
"temperature": 0.05,
|
20 |
-
"num_negative":
|
21 |
"num_positive": 10
|
22 |
},
|
23 |
"augment_negative_by_positive": true
|
|
|
1 |
{
|
2 |
"template": "Today, I finally discovered the relation between <subj> and <obj> : <subj> is the <mask> of <obj>",
|
3 |
+
"model": "roberta-large",
|
4 |
"max_length": 64,
|
5 |
"epoch": 10,
|
6 |
"batch": 32,
|
|
|
17 |
"classification_loss": false,
|
18 |
"loss_function_config": {
|
19 |
"temperature": 0.05,
|
20 |
+
"num_negative": 100,
|
21 |
"num_positive": 10
|
22 |
},
|
23 |
"augment_negative_by_positive": true
|
pytorch_model.bin
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
-
size
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:3222067087f440c0c63e7bec1c581c12ba961c9c47a777787f99bad40f363360
|
3 |
+
size 1421575277
|
relation_mapping.json
CHANGED
The diff for this file is too large to render.
See raw diff
|
|
special_tokens_map.json
CHANGED
@@ -1 +1,15 @@
|
|
1 |
-
{
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"bos_token": "<s>",
|
3 |
+
"cls_token": "<s>",
|
4 |
+
"eos_token": "</s>",
|
5 |
+
"mask_token": {
|
6 |
+
"content": "<mask>",
|
7 |
+
"lstrip": true,
|
8 |
+
"normalized": false,
|
9 |
+
"rstrip": false,
|
10 |
+
"single_word": false
|
11 |
+
},
|
12 |
+
"pad_token": "<pad>",
|
13 |
+
"sep_token": "</s>",
|
14 |
+
"unk_token": "<unk>"
|
15 |
+
}
|
tokenizer.json
CHANGED
@@ -53,7 +53,8 @@
|
|
53 |
"pre_tokenizer": {
|
54 |
"type": "ByteLevel",
|
55 |
"add_prefix_space": false,
|
56 |
-
"trim_offsets": true
|
|
|
57 |
},
|
58 |
"post_processor": {
|
59 |
"type": "RobertaProcessing",
|
@@ -71,7 +72,8 @@
|
|
71 |
"decoder": {
|
72 |
"type": "ByteLevel",
|
73 |
"add_prefix_space": true,
|
74 |
-
"trim_offsets": true
|
|
|
75 |
},
|
76 |
"model": {
|
77 |
"type": "BPE",
|
|
|
53 |
"pre_tokenizer": {
|
54 |
"type": "ByteLevel",
|
55 |
"add_prefix_space": false,
|
56 |
+
"trim_offsets": true,
|
57 |
+
"use_regex": true
|
58 |
},
|
59 |
"post_processor": {
|
60 |
"type": "RobertaProcessing",
|
|
|
72 |
"decoder": {
|
73 |
"type": "ByteLevel",
|
74 |
"add_prefix_space": true,
|
75 |
+
"trim_offsets": true,
|
76 |
+
"use_regex": true
|
77 |
},
|
78 |
"model": {
|
79 |
"type": "BPE",
|
tokenizer_config.json
CHANGED
@@ -1 +1,16 @@
|
|
1 |
-
{
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"add_prefix_space": false,
|
3 |
+
"bos_token": "<s>",
|
4 |
+
"cls_token": "<s>",
|
5 |
+
"eos_token": "</s>",
|
6 |
+
"errors": "replace",
|
7 |
+
"mask_token": "<mask>",
|
8 |
+
"model_max_length": 512,
|
9 |
+
"name_or_path": "roberta-large",
|
10 |
+
"pad_token": "<pad>",
|
11 |
+
"sep_token": "</s>",
|
12 |
+
"special_tokens_map_file": null,
|
13 |
+
"tokenizer_class": "RobertaTokenizer",
|
14 |
+
"trim_offsets": true,
|
15 |
+
"unk_token": "<unk>"
|
16 |
+
}
|