eladsegal commited on
Commit
21ec5fe
1 Parent(s): 51564ff

Update metrics/scrolls.py

Browse files
Files changed (1) hide show
  1. metrics/scrolls.py +55 -14
metrics/scrolls.py CHANGED
@@ -1,4 +1,4 @@
1
- """ Scrolls benchmark metric. """
2
 
3
  from collections import defaultdict
4
  from copy import deepcopy
@@ -15,10 +15,9 @@ _CITATION = """\
15
  """
16
 
17
  _DESCRIPTION = """\
18
- Scrolls: Standardized CompaRison Over Long Language Sequences
19
- Recent progress in NLP has created models that can process long inputs consisting of thousands of words.
20
- But how well do these models understand the information in the input text?
21
- The Scrolls benchmark aims to measure the ability of models to semantically understand long texts.
22
  """
23
 
24
  _KWARGS_DESCRIPTION = """
@@ -53,14 +52,46 @@ Examples:
53
  """
54
 
55
  DATASET_TO_METRICS = {
56
- "contract_nli": {"metrics_to_compute": ["exact_match"], "score": "exact_match"},
57
- "gov_report": {"metrics_to_compute": ["rouge"], "score": "rouge/geometric_mean"},
58
- "narrative_qa": {"metrics_to_compute": ["f1"], "score": "f1"},
59
- "qasper": {"metrics_to_compute": ["f1"], "score": "f1"},
60
- "qmsum": {"metrics_to_compute": ["rouge"], "score": "rouge/geometric_mean"},
61
- "summ_screen_fd": {"metrics_to_compute": ["rouge"], "score": "rouge/geometric_mean"},
62
- "quality": {"metrics_to_compute": ["exact_match"], "score": "exact_match"},
63
- "quality_hard": {"metrics_to_compute": ["exact_match"], "score": "exact_match"},
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
64
  }
65
 
66
 
@@ -153,7 +184,17 @@ class Scrolls(datasets.Metric):
153
  metrics = {key: round(value, 4) for key, value in metrics.items()}
154
 
155
  if self.config_name in DATASET_TO_METRICS:
156
- metrics["score"] = metrics[DATASET_TO_METRICS[self.config_name]["score"]]
 
 
 
 
 
 
 
 
 
 
157
 
158
  return metrics
159
 
 
1
+ """ SCROLLS benchmark metric. """
2
 
3
  from collections import defaultdict
4
  from copy import deepcopy
 
15
  """
16
 
17
  _DESCRIPTION = """\
18
+ SCROLLS: Standardized CompaRison Over Long Language Sequences.
19
+ A suite of natural language datasets that require reasoning over long texts.
20
+ https://scrolls-benchmark.com/
 
21
  """
22
 
23
  _KWARGS_DESCRIPTION = """
 
52
  """
53
 
54
  DATASET_TO_METRICS = {
55
+ "contract_nli": {
56
+ "metrics_to_compute": ["exact_match"],
57
+ "scrolls_score_key": "exact_match",
58
+ "display_keys": ["exact_match"],
59
+ },
60
+ "gov_report": {
61
+ "metrics_to_compute": ["rouge"],
62
+ "scrolls_score_key": "rouge/geometric_mean",
63
+ "display_keys": ["rouge/rouge1", "rouge/rouge2", "rouge/rougeL"],
64
+ },
65
+ "narrative_qa": {
66
+ "metrics_to_compute": ["f1"],
67
+ "scrolls_score_key": "f1",
68
+ "display_keys": ["f1"],
69
+ },
70
+ "qasper": {
71
+ "metrics_to_compute": ["f1"],
72
+ "scrolls_score_key": "f1",
73
+ "display_keys": ["f1"],
74
+ },
75
+ "qmsum": {
76
+ "metrics_to_compute": ["rouge"],
77
+ "scrolls_score_key": "rouge/geometric_mean",
78
+ "display_keys": ["rouge/rouge1", "rouge/rouge2", "rouge/rougeL"],
79
+ },
80
+ "summ_screen_fd": {
81
+ "metrics_to_compute": ["rouge"],
82
+ "scrolls_score_key": "rouge/geometric_mean",
83
+ "display_keys": ["rouge/rouge1", "rouge/rouge2", "rouge/rougeL"],
84
+ },
85
+ "quality": {
86
+ "metrics_to_compute": ["exact_match"],
87
+ "scrolls_score_key": "exact_match",
88
+ "display_keys": ["exact_match"],
89
+ },
90
+ "quality_hard": {
91
+ "metrics_to_compute": ["exact_match"],
92
+ "scrolls_score_key": None,
93
+ "display_keys": ["exact_match"],
94
+ },
95
  }
96
 
97
 
 
184
  metrics = {key: round(value, 4) for key, value in metrics.items()}
185
 
186
  if self.config_name in DATASET_TO_METRICS:
187
+ scrolls_score_key = DATASET_TO_METRICS[self.config_name]["scrolls_score_key"]
188
+ if scrolls_score_key is not None:
189
+ metrics["scrolls_score"] = metrics[scrolls_score_key]
190
+ else:
191
+ metrics["scrolls_score"] = None
192
+
193
+ display_keys = DATASET_TO_METRICS[self.config_name]["display_keys"]
194
+ metrics["display_keys"] = display_keys
195
+ metrics["display"] = []
196
+ for display_key in display_keys:
197
+ metrics["display"].append(metrics[display_key])
198
 
199
  return metrics
200