asahi417 commited on
Commit
1bce5f8
1 Parent(s): db9b2cd

model update

Browse files
README.md CHANGED
@@ -14,7 +14,7 @@ model-index:
14
  metrics:
15
  - name: Accuracy
16
  type: accuracy
17
- value: 0.7843055555555556
18
  - task:
19
  name: Analogy Questions (SAT full)
20
  type: multiple-choice-qa
@@ -25,7 +25,7 @@ model-index:
25
  metrics:
26
  - name: Accuracy
27
  type: accuracy
28
- value: 0.4786096256684492
29
  - task:
30
  name: Analogy Questions (SAT)
31
  type: multiple-choice-qa
@@ -36,7 +36,7 @@ model-index:
36
  metrics:
37
  - name: Accuracy
38
  type: accuracy
39
- value: 0.47774480712166173
40
  - task:
41
  name: Analogy Questions (BATS)
42
  type: multiple-choice-qa
@@ -47,7 +47,7 @@ model-index:
47
  metrics:
48
  - name: Accuracy
49
  type: accuracy
50
- value: 0.6842690383546415
51
  - task:
52
  name: Analogy Questions (Google)
53
  type: multiple-choice-qa
@@ -58,7 +58,7 @@ model-index:
58
  metrics:
59
  - name: Accuracy
60
  type: accuracy
61
- value: 0.824
62
  - task:
63
  name: Analogy Questions (U2)
64
  type: multiple-choice-qa
@@ -69,7 +69,7 @@ model-index:
69
  metrics:
70
  - name: Accuracy
71
  type: accuracy
72
- value: 0.4692982456140351
73
  - task:
74
  name: Analogy Questions (U4)
75
  type: multiple-choice-qa
@@ -80,7 +80,7 @@ model-index:
80
  metrics:
81
  - name: Accuracy
82
  type: accuracy
83
- value: 0.4675925925925926
84
  - task:
85
  name: Lexical Relation Classification (BLESS)
86
  type: classification
@@ -91,10 +91,10 @@ model-index:
91
  metrics:
92
  - name: F1
93
  type: f1
94
- value: 0.9056802772336899
95
  - name: F1 (macro)
96
  type: f1_macro
97
- value: 0.8978336210952347
98
  - task:
99
  name: Lexical Relation Classification (CogALexV)
100
  type: classification
@@ -105,10 +105,10 @@ model-index:
105
  metrics:
106
  - name: F1
107
  type: f1
108
- value: 0.8429577464788731
109
  - name: F1 (macro)
110
  type: f1_macro
111
- value: 0.6640675224037798
112
  - task:
113
  name: Lexical Relation Classification (EVALution)
114
  type: classification
@@ -119,10 +119,10 @@ model-index:
119
  metrics:
120
  - name: F1
121
  type: f1
122
- value: 0.6690140845070423
123
  - name: F1 (macro)
124
  type: f1_macro
125
- value: 0.6550072748489306
126
  - task:
127
  name: Lexical Relation Classification (K&H+N)
128
  type: classification
@@ -133,10 +133,10 @@ model-index:
133
  metrics:
134
  - name: F1
135
  type: f1
136
- value: 0.960283786603603
137
  - name: F1 (macro)
138
  type: f1_macro
139
- value: 0.8858405261189973
140
  - task:
141
  name: Lexical Relation Classification (ROOT09)
142
  type: classification
@@ -147,10 +147,10 @@ model-index:
147
  metrics:
148
  - name: F1
149
  type: f1
150
- value: 0.8884362268881228
151
  - name: F1 (macro)
152
  type: f1_macro
153
- value: 0.8833158049359685
154
 
155
  ---
156
  # relbert/relbert-roberta-large-semeval2012-v6-mask-prompt-b-nce-0
@@ -160,20 +160,20 @@ RelBERT fine-tuned from [roberta-large](https://huggingface.co/roberta-large) on
160
  Fine-tuning is done via [RelBERT](https://github.com/asahi417/relbert) library (see the repository for more detail).
161
  It achieves the following results on the relation understanding tasks:
162
  - Analogy Question ([dataset](https://huggingface.co/datasets/relbert/analogy_questions), [full result](https://huggingface.co/relbert/relbert-roberta-large-semeval2012-v6-mask-prompt-b-nce-0/raw/main/analogy.json)):
163
- - Accuracy on SAT (full): 0.4786096256684492
164
- - Accuracy on SAT: 0.47774480712166173
165
- - Accuracy on BATS: 0.6842690383546415
166
- - Accuracy on U2: 0.4692982456140351
167
- - Accuracy on U4: 0.4675925925925926
168
- - Accuracy on Google: 0.824
169
  - Lexical Relation Classification ([dataset](https://huggingface.co/datasets/relbert/lexical_relation_classification), [full result](https://huggingface.co/relbert/relbert-roberta-large-semeval2012-v6-mask-prompt-b-nce-0/raw/main/classification.json)):
170
- - Micro F1 score on BLESS: 0.9056802772336899
171
- - Micro F1 score on CogALexV: 0.8429577464788731
172
- - Micro F1 score on EVALution: 0.6690140845070423
173
- - Micro F1 score on K&H+N: 0.960283786603603
174
- - Micro F1 score on ROOT09: 0.8884362268881228
175
  - Relation Mapping ([dataset](https://huggingface.co/datasets/relbert/relation_mapping), [full result](https://huggingface.co/relbert/relbert-roberta-large-semeval2012-v6-mask-prompt-b-nce-0/raw/main/relation_mapping.json)):
176
- - Accuracy on Relation Mapping: 0.7843055555555556
177
 
178
 
179
  ### Usage
@@ -202,7 +202,7 @@ The following hyperparameters were used during training:
202
  - classification_loss: False
203
  - temperature_nce_constant: 0.05
204
  - temperature_nce_rank: {'min': 0.01, 'max': 0.05, 'type': 'linear'}
205
- - epoch: 3
206
  - batch: 128
207
  - lr: 5e-06
208
  - lr_decay: False
@@ -210,7 +210,7 @@ The following hyperparameters were used during training:
210
  - weight_decay: 0
211
  - random_seed: 0
212
  - exclude_relation: None
213
- - n_sample: 320
214
  - gradient_accumulation: 8
215
  - relation_level: None
216
 
 
14
  metrics:
15
  - name: Accuracy
16
  type: accuracy
17
+ value: None
18
  - task:
19
  name: Analogy Questions (SAT full)
20
  type: multiple-choice-qa
 
25
  metrics:
26
  - name: Accuracy
27
  type: accuracy
28
+ value: None
29
  - task:
30
  name: Analogy Questions (SAT)
31
  type: multiple-choice-qa
 
36
  metrics:
37
  - name: Accuracy
38
  type: accuracy
39
+ value: None
40
  - task:
41
  name: Analogy Questions (BATS)
42
  type: multiple-choice-qa
 
47
  metrics:
48
  - name: Accuracy
49
  type: accuracy
50
+ value: None
51
  - task:
52
  name: Analogy Questions (Google)
53
  type: multiple-choice-qa
 
58
  metrics:
59
  - name: Accuracy
60
  type: accuracy
61
+ value: None
62
  - task:
63
  name: Analogy Questions (U2)
64
  type: multiple-choice-qa
 
69
  metrics:
70
  - name: Accuracy
71
  type: accuracy
72
+ value: None
73
  - task:
74
  name: Analogy Questions (U4)
75
  type: multiple-choice-qa
 
80
  metrics:
81
  - name: Accuracy
82
  type: accuracy
83
+ value: None
84
  - task:
85
  name: Lexical Relation Classification (BLESS)
86
  type: classification
 
91
  metrics:
92
  - name: F1
93
  type: f1
94
+ value: None
95
  - name: F1 (macro)
96
  type: f1_macro
97
+ value: None
98
  - task:
99
  name: Lexical Relation Classification (CogALexV)
100
  type: classification
 
105
  metrics:
106
  - name: F1
107
  type: f1
108
+ value: None
109
  - name: F1 (macro)
110
  type: f1_macro
111
+ value: None
112
  - task:
113
  name: Lexical Relation Classification (EVALution)
114
  type: classification
 
119
  metrics:
120
  - name: F1
121
  type: f1
122
+ value: None
123
  - name: F1 (macro)
124
  type: f1_macro
125
+ value: None
126
  - task:
127
  name: Lexical Relation Classification (K&H+N)
128
  type: classification
 
133
  metrics:
134
  - name: F1
135
  type: f1
136
+ value: None
137
  - name: F1 (macro)
138
  type: f1_macro
139
+ value: None
140
  - task:
141
  name: Lexical Relation Classification (ROOT09)
142
  type: classification
 
147
  metrics:
148
  - name: F1
149
  type: f1
150
+ value: None
151
  - name: F1 (macro)
152
  type: f1_macro
153
+ value: None
154
 
155
  ---
156
  # relbert/relbert-roberta-large-semeval2012-v6-mask-prompt-b-nce-0
 
160
  Fine-tuning is done via [RelBERT](https://github.com/asahi417/relbert) library (see the repository for more detail).
161
  It achieves the following results on the relation understanding tasks:
162
  - Analogy Question ([dataset](https://huggingface.co/datasets/relbert/analogy_questions), [full result](https://huggingface.co/relbert/relbert-roberta-large-semeval2012-v6-mask-prompt-b-nce-0/raw/main/analogy.json)):
163
+ - Accuracy on SAT (full): None
164
+ - Accuracy on SAT: None
165
+ - Accuracy on BATS: None
166
+ - Accuracy on U2: None
167
+ - Accuracy on U4: None
168
+ - Accuracy on Google: None
169
  - Lexical Relation Classification ([dataset](https://huggingface.co/datasets/relbert/lexical_relation_classification), [full result](https://huggingface.co/relbert/relbert-roberta-large-semeval2012-v6-mask-prompt-b-nce-0/raw/main/classification.json)):
170
+ - Micro F1 score on BLESS: None
171
+ - Micro F1 score on CogALexV: None
172
+ - Micro F1 score on EVALution: None
173
+ - Micro F1 score on K&H+N: None
174
+ - Micro F1 score on ROOT09: None
175
  - Relation Mapping ([dataset](https://huggingface.co/datasets/relbert/relation_mapping), [full result](https://huggingface.co/relbert/relbert-roberta-large-semeval2012-v6-mask-prompt-b-nce-0/raw/main/relation_mapping.json)):
176
+ - Accuracy on Relation Mapping: None
177
 
178
 
179
  ### Usage
 
202
  - classification_loss: False
203
  - temperature_nce_constant: 0.05
204
  - temperature_nce_rank: {'min': 0.01, 'max': 0.05, 'type': 'linear'}
205
+ - epoch: 6
206
  - batch: 128
207
  - lr: 5e-06
208
  - lr_decay: False
 
210
  - weight_decay: 0
211
  - random_seed: 0
212
  - exclude_relation: None
213
+ - n_sample: 640
214
  - gradient_accumulation: 8
215
  - relation_level: None
216
 
config.json CHANGED
@@ -1,5 +1,5 @@
1
  {
2
- "_name_or_path": "relbert_output/models/semeval2012-v6.b.nce_logout.mask.roberta-large.0.000005.8.0.05.640.0/best_model",
3
  "architectures": [
4
  "RobertaModel"
5
  ],
 
1
  {
2
+ "_name_or_path": "roberta-large",
3
  "architectures": [
4
  "RobertaModel"
5
  ],
tokenizer_config.json CHANGED
@@ -6,7 +6,7 @@
6
  "errors": "replace",
7
  "mask_token": "<mask>",
8
  "model_max_length": 512,
9
- "name_or_path": "relbert_output/models/semeval2012-v6.b.nce_logout.mask.roberta-large.0.000005.8.0.05.640.0/best_model",
10
  "pad_token": "<pad>",
11
  "sep_token": "</s>",
12
  "special_tokens_map_file": null,
 
6
  "errors": "replace",
7
  "mask_token": "<mask>",
8
  "model_max_length": 512,
9
+ "name_or_path": "roberta-large",
10
  "pad_token": "<pad>",
11
  "sep_token": "</s>",
12
  "special_tokens_map_file": null,
trainer_config.json CHANGED
@@ -1 +1 @@
1
- {"model": "roberta-large", "max_length": 64, "mode": "mask", "data": "relbert/semeval2012_relational_similarity_v6", "split": "train", "split_eval": "validation", "template_mode": "manual", "template": "Today, I finally discovered the relation between <subj> and <obj> : <obj> is <subj>'s <mask>", "loss_function": "nce_logout", "classification_loss": false, "temperature_nce_constant": 0.05, "temperature_nce_rank": {"min": 0.01, "max": 0.05, "type": "linear"}, "epoch": 3, "batch": 128, "lr": 5e-06, "lr_decay": false, "lr_warmup": 1, "weight_decay": 0, "random_seed": 0, "exclude_relation": null, "n_sample": 320, "gradient_accumulation": 8, "relation_level": null}
 
1
+ {"model": "roberta-large", "max_length": 64, "mode": "mask", "data": "relbert/semeval2012_relational_similarity_v6", "split": "train", "split_eval": "validation", "template_mode": "manual", "template": "Today, I finally discovered the relation between <subj> and <obj> : <obj> is <subj>'s <mask>", "loss_function": "nce_logout", "classification_loss": false, "temperature_nce_constant": 0.05, "temperature_nce_rank": {"min": 0.01, "max": 0.05, "type": "linear"}, "epoch": 6, "batch": 128, "lr": 5e-06, "lr_decay": false, "lr_warmup": 1, "weight_decay": 0, "random_seed": 0, "exclude_relation": null, "n_sample": 640, "gradient_accumulation": 8, "relation_level": null}
validation_loss.json CHANGED
@@ -1 +1 @@
1
- {"split": "validation", "loss": 13.770311405474391, "data": "relbert/semeval2012_relational_similarity_v6", "exclude_relation": null, "relation_level": null}
 
1
+ {"split": "validation", "loss": 13.479636465386397, "data": "relbert/semeval2012_relational_similarity_v6", "exclude_relation": null, "relation_level": null}