eladsegal commited on
Commit
94ecabc
1 Parent(s): b4f5fc1

Create scrolls.py

Browse files
Files changed (1) hide show
  1. metrics/scrolls.py +261 -0
metrics/scrolls.py ADDED
@@ -0,0 +1,261 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2020 The HuggingFace Datasets Authors.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """ Scrolls benchmark metric. """
16
+
17
+ from collections import defaultdict
18
+ from copy import deepcopy
19
+ import datasets
20
+
21
+ # fmt: off
22
+ from .rouge import compute_rouge, postprocess_text as rouge_postprocess_text # From: https://huggingface.co/datasets/tau/scrolls/raw/main/metrics/rouge.py
23
+ from .exact_match import compute_exact_match # From: https://huggingface.co/datasets/tau/scrolls/raw/main/metrics/exact_match.py
24
+ from .f1 import compute_f1 # From: https://huggingface.co/datasets/tau/scrolls/raw/main/metrics/f1.py
25
+ # fmt: on
26
+
27
+ _CITATION = """\
28
+ # TODO: Add citation
29
+ """
30
+
31
+ _DESCRIPTION = """\
32
+ Scrolls: Standardized CompaRison Over Long Language Sequences
33
+ Recent progress in NLP has created models that can process long inputs consisting of thousands of words.
34
+ But how well do these models understand the information in the input text?
35
+ The Scrolls benchmark aims to measure the ability of models to semantically understand long texts.
36
+ """
37
+
38
+ _KWARGS_DESCRIPTION = """
39
+ Compute Scrolls evaluation metric associated to each Scrolls dataset.
40
+ Args:
41
+ predictions: list of predictions to score.
42
+ Each prediction should be a string.
43
+ references: list of lists of references for each example.
44
+ Each reference should be a string.
45
+ Returns: depending on the Scrolls subset, one or several of:
46
+ "exact_match": Exact Match score
47
+ "f1": F1 score
48
+ "rouge": ROUGE score
49
+ Examples:
50
+ predictions = ["exact match example", "hello there", "general kenobi"] # List[str]
51
+ references = [["exact match example"], ["hello", "hi there"], ["commander kenobi"]] # List[List[str]]
52
+
53
+ >>> scrolls_metric = datasets.load_metric('src/metrics/scrolls.py', 'gov_report') # 'gov_report' or any of ["qmsum", "summ_screen_fd"]
54
+ >>> results = scrolls_metric.compute(predictions=predictions, references=references)
55
+ >>> print(results)
56
+ {'rouge/rouge1': 72.2222, 'rouge/rouge2': 33.3333, 'rouge/rougeL': 72.2222, 'rouge/rougeLsum': 72.2222, 'rouge/geometric_mean': 55.8136, 'num_predicted': 3, 'mean_prediction_length_characters': 14.6667}
57
+
58
+ >>> scrolls_metric = datasets.load_metric('src/metrics/scrolls.py', 'contract_nli') # 'contract_nli' or any of ["quality", "quality_hard"]
59
+ >>> results = scrolls_metric.compute(predictions=predictions, references=references)
60
+ >>> print(results)
61
+ {'exact_match': 33.3333, 'num_predicted': 3, 'mean_prediction_length_characters': 14.6667}
62
+
63
+ >>> scrolls_metric = datasets.load_metric('src/metrics/scrolls.py', 'narrative_qa') # 'narrative_qa' or any of ["qasper"]
64
+ >>> results = scrolls_metric.compute(predictions=predictions, references=references)
65
+ >>> print(results)
66
+ {'f1': 72.2222, 'num_predicted': 3, 'mean_prediction_length_characters': 14.6667}
67
+ """
68
+
69
+ DATASET_TO_METRICS = {
70
+ "contract_nli": {"metrics_to_compute": ["exact_match"], "score": "exact_match"},
71
+ "gov_report": {"metrics_to_compute": ["rouge"], "score": "rouge/geometric_mean"},
72
+ "narrative_qa": {"metrics_to_compute": ["f1"], "score": "f1"},
73
+ "qasper": {"metrics_to_compute": ["f1"], "score": "f1"},
74
+ "qmsum": {"metrics_to_compute": ["rouge"], "score": "rouge/geometric_mean"},
75
+ "summ_screen_fd": {"metrics_to_compute": ["rouge"], "score": "rouge/geometric_mean"},
76
+ "quality": {"metrics_to_compute": ["exact_match"], "score": "exact_match"},
77
+ "quality_hard": {"metrics_to_compute": ["exact_match"], "score": "exact_match"},
78
+ }
79
+
80
+
81
+ @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION)
82
+ class Scrolls(datasets.Metric):
83
+ def __init__(self, *args, **kwargs):
84
+ super().__init__(*args, **kwargs)
85
+
86
+ self._compute_helper_kwargs_fn = {
87
+ "rouge": lambda: {
88
+ "metric_fn": compute_rouge,
89
+ "agg_fn": max,
90
+ "metric_fn_kwargs": {"use_stemmer": False},
91
+ "metric_returns_per_example": True,
92
+ "transform_single_input_fn": lambda text: rouge_postprocess_text(text),
93
+ "transform_result_fn": lambda output: {
94
+ key: (value[0] if isinstance(value, list) else value).fmeasure * 100
95
+ for key, value in output.items()
96
+ },
97
+ "transform_aggregated_result_fn": lambda output: output.update(
98
+ {"geometric_mean": (output["rouge1"] * output["rouge2"] * output["rougeL"]) ** (1.0 / 3.0)}
99
+ )
100
+ or output,
101
+ },
102
+ "exact_match": lambda: {
103
+ "metric_fn": compute_exact_match,
104
+ "agg_fn": None, # compute_exact_match already takes max
105
+ "transform_result_fn": lambda output: {None: output},
106
+ },
107
+ "f1": lambda: {
108
+ "metric_fn": compute_f1,
109
+ "agg_fn": None, # compute_f1 already takes max
110
+ "transform_result_fn": lambda output: {None: output},
111
+ },
112
+ }
113
+
114
+ custom_metrics = (
115
+ [metric for metric in self.config_name.split(",") if len(metric) > 0]
116
+ if self.config_name.startswith(",")
117
+ else None
118
+ )
119
+ if custom_metrics is not None:
120
+ for metric in custom_metrics:
121
+ if metric not in self._compute_helper_kwargs_fn:
122
+ raise KeyError(
123
+ f"You should supply a metric name selected in {list(self._compute_helper_kwargs_fn.keys())}"
124
+ )
125
+ self._metrics_to_compute = custom_metrics
126
+ else:
127
+ if self.config_name not in DATASET_TO_METRICS:
128
+ raise KeyError(f"You should supply a configuration name selected in {list(DATASET_TO_METRICS.keys())}")
129
+ self._metrics_to_compute = DATASET_TO_METRICS[self.config_name]["metrics_to_compute"]
130
+
131
+ def _info(self):
132
+ return datasets.MetricInfo(
133
+ description=_DESCRIPTION,
134
+ citation=_CITATION,
135
+ inputs_description=_KWARGS_DESCRIPTION,
136
+ features=datasets.Features(
137
+ {
138
+ "predictions": datasets.Value("string"),
139
+ "references": datasets.Sequence(datasets.Value("string")),
140
+ }
141
+ ),
142
+ codebase_urls=[],
143
+ reference_urls=[],
144
+ )
145
+
146
+ def convert_from_map_format(self, id_to_pred, id_to_labels):
147
+ index_to_id = list(id_to_pred.keys())
148
+ predictions = [id_to_pred[id_] for id_ in index_to_id]
149
+ references = [id_to_labels[id_] for id_ in index_to_id]
150
+ return {"predictions": predictions, "references": references}
151
+
152
+ def _compute(self, predictions, references):
153
+ metrics = {}
154
+ for metric in self._metrics_to_compute:
155
+ result = _compute_helper(
156
+ deepcopy(predictions),
157
+ deepcopy(references),
158
+ **self._compute_helper_kwargs_fn[metric](),
159
+ )
160
+ metrics.update(
161
+ {(f"{metric}/{key}" if key is not None else metric): value for key, value in result.items()}
162
+ )
163
+ metrics["num_predicted"] = len(predictions)
164
+ prediction_lengths = [len(prediction) for prediction in predictions]
165
+ metrics["mean_prediction_length_characters"] = sum(prediction_lengths) / len(prediction_lengths)
166
+
167
+ metrics = {key: round(value, 4) for key, value in metrics.items()}
168
+
169
+ if self.config_name in DATASET_TO_METRICS:
170
+ metrics["score"] = metrics[DATASET_TO_METRICS[self.config_name]["score"]]
171
+
172
+ return metrics
173
+
174
+
175
+ def _compute_helper(
176
+ predictions,
177
+ references,
178
+ metric_fn,
179
+ agg_fn,
180
+ metric_fn_kwargs=None,
181
+ transform_single_input_fn=None,
182
+ transform_result_fn=None,
183
+ transform_aggregated_result_fn=None,
184
+ metric_returns_per_example=False,
185
+ ):
186
+ if metric_fn_kwargs is None:
187
+ metric_fn_kwargs = {}
188
+
189
+ if agg_fn is None:
190
+ assert metric_returns_per_example is False
191
+
192
+ if transform_single_input_fn is not None:
193
+ predictions = [transform_single_input_fn(prediction) for prediction in predictions]
194
+ references = [
195
+ [transform_single_input_fn(reference) for reference in reference_list] for reference_list in references
196
+ ]
197
+
198
+ if transform_result_fn is None:
199
+ transform_result_fn = lambda x: x
200
+ do_transform_result = False
201
+ else:
202
+ do_transform_result = True
203
+
204
+ if transform_aggregated_result_fn is None:
205
+ transform_aggregated_result_fn = lambda x: x
206
+
207
+ if agg_fn is not None:
208
+ # Required when the metric doesn't do the aggregation we need
209
+ scores = defaultdict(list)
210
+ if metric_returns_per_example is False:
211
+ # If when given a list of prediction and references the metric returns an aggregated score,
212
+ # we need to compute the metric for each prediction and reference and then aggregate the results.
213
+ # This is only an issue when we want to get the best aggregated score (e.g. max) for prediction
214
+ # with multiple references.
215
+ for prediction, reference_list in zip(predictions, references):
216
+ prediction_scores = defaultdict(list)
217
+ for reference in reference_list:
218
+ result = transform_result_fn(metric_fn([prediction], [reference], **metric_fn_kwargs))
219
+ for key in result:
220
+ prediction_scores[key].append(result[key])
221
+ for key in prediction_scores:
222
+ scores[key].append(agg_fn(prediction_scores[key]))
223
+ else:
224
+ # Flatten the references and then aggregate per prediction with agg_fn
225
+ mapping = [[] for _ in range(len(predictions))]
226
+ flattened_predictions = []
227
+ flattened_references = []
228
+ for i, prediction in enumerate(predictions):
229
+ for reference in references[i]:
230
+ flattened_predictions.append(prediction)
231
+ flattened_references.append(reference)
232
+ mapping[i].append(len(flattened_references) - 1)
233
+
234
+ results = metric_fn(flattened_predictions, flattened_references, **metric_fn_kwargs)
235
+ if isinstance(results, dict):
236
+ # Convert a dictionary with lists per key to a list with dictionary with the same keys per element
237
+ results_list = [{k: None for k in results} for _ in range(len(flattened_predictions))]
238
+ for k, v in results.items():
239
+ for i in range(len(v)):
240
+ results_list[i][k] = v[i]
241
+ else:
242
+ results_list = results
243
+
244
+ if do_transform_result:
245
+ for i in range(len(results_list)):
246
+ results_list[i] = transform_result_fn(results_list[i])
247
+
248
+ for reference_indexes in mapping:
249
+ prediction_scores = defaultdict(list)
250
+ for reference_index in reference_indexes:
251
+ result = results_list[reference_index]
252
+ for key in result:
253
+ prediction_scores[key].append(result[key])
254
+ for key in prediction_scores:
255
+ scores[key].append(agg_fn(prediction_scores[key]))
256
+
257
+ return transform_aggregated_result_fn({key: sum(value) / len(value) for key, value in scores.items()})
258
+ else:
259
+ return transform_aggregated_result_fn(
260
+ transform_result_fn(metric_fn(predictions, references, **metric_fn_kwargs))
261
+ )