Add evaluation results on the adversarialQA config of adversarial_qa

#1
by autoevaluator HF staff - opened
Files changed (1) hide show
  1. README.md +18 -1
README.md CHANGED
@@ -4,7 +4,24 @@ tags:
4
  - generated_from_trainer
5
  model-index:
6
  - name: rob-base-superqa
7
- results: []
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
8
  task:
9
  - question-answering
10
  datasets:
 
4
  - generated_from_trainer
5
  model-index:
6
  - name: rob-base-superqa
7
+ results:
8
+ - task:
9
+ type: question-answering
10
+ name: Question Answering
11
+ dataset:
12
+ name: adversarial_qa
13
+ type: adversarial_qa
14
+ config: adversarialQA
15
+ split: validation
16
+ metrics:
17
+ - name: Exact Match
18
+ type: exact_match
19
+ value: 43.8667
20
+ verified: true
21
+ - name: F1
22
+ type: f1
23
+ value: 55.135
24
+ verified: true
25
  task:
26
  - question-answering
27
  datasets: