Add evaluation results on the squad_v2 config of squad_v2
#1
by
autoevaluator
HF staff
- opened
README.md
CHANGED
@@ -2,6 +2,26 @@
|
|
2 |
datasets:
|
3 |
- squad_v2
|
4 |
license: cc-by-4.0
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
5 |
---
|
6 |
|
7 |
# MiniLM-L12-H384-uncased for QA
|
|
|
2 |
datasets:
|
3 |
- squad_v2
|
4 |
license: cc-by-4.0
|
5 |
+
model-index:
|
6 |
+
- name: deepset/minilm-uncased-squad2
|
7 |
+
results:
|
8 |
+
- task:
|
9 |
+
type: question-answering
|
10 |
+
name: Question Answering
|
11 |
+
dataset:
|
12 |
+
name: squad_v2
|
13 |
+
type: squad_v2
|
14 |
+
config: squad_v2
|
15 |
+
split: validation
|
16 |
+
metrics:
|
17 |
+
- name: Exact Match
|
18 |
+
type: exact_match
|
19 |
+
value: 76.1921
|
20 |
+
verified: true
|
21 |
+
- name: F1
|
22 |
+
type: f1
|
23 |
+
value: 79.5483
|
24 |
+
verified: true
|
25 |
---
|
26 |
|
27 |
# MiniLM-L12-H384-uncased for QA
|