File size: 4,126 Bytes
ced0e09
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
6a4fac9
 
ced0e09
 
 
 
 
 
 
 
 
 
 
 
 
 
 
6a4fac9
ced0e09
 
 
 
6a4fac9
ced0e09
6a4fac9
 
 
 
 
 
 
 
 
 
 
ced0e09
6a4fac9
ced0e09
6a4fac9
 
 
 
ced0e09
6a4fac9
ced0e09
 
 
 
6a4fac9
ced0e09
 
 
 
 
6a4fac9
 
 
 
 
 
 
 
 
 
 
 
 
 
ced0e09
 
6a4fac9
 
 
 
 
 
 
 
 
 
 
 
 
 
 
ced0e09
6a4fac9
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
# Copyright 2022 The HuggingFace Datasets Authors and the current dataset script contributor.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""ANLS - Average Normalized Levenshtein Similarity"""

import datasets
import evaluate

from compute_score import compute_score


_CITATION = """\
@article{,
    title = {Binary codes capable of correcting deletions, insertions, and reversals},
    journal = {Soviet physics doklady},
    volume = {10},
    number = {8},
    pages = {707--710},
    year = {1966},
    url = {https://nymity.ch/sybilhunting/pdf/Levenshtein1966a.pdf},
    author = {V. I. Levenshtein},
}
"""

_DESCRIPTION = """\
ANLS refer to the average normalized Levenshtein similarity.
"""


_KWARGS_DESCRIPTION = """
Computes Average Normalized Levenshtein Similarity (ANLS).
Args:
    predictions: List of question-answers dictionaries with the following key-values:
        - 'id': id of the question-answer pair as given in the references (see below)
        - 'prediction_text': the text of the answer
    references: List of question-answers dictionaries with the following key-values:
        - 'id': id of the question-answer pair (see above),
        - 'answers': a Dict in the SQuAD dataset format
            {
                'text': list of possible texts for the answer, as a list of strings
                'answer_start': list of start positions for the answer, as a list of ints
            }
            Note that answer_start values are not taken into account to compute the metric.
Returns:
    'anls': The ANLS score of predicted tokens versus the gold answer
Examples:
    >>> predictions = [{'prediction_text': '1976', 'id': '56e10a3be3433e1400422b22'}]
    >>> references = [{'answers': {'answer_start': [97], 'text': ['1976']}, 'id': '56e10a3be3433e1400422b22'}]
    >>> anls_metric = evaluate.load("anls")
    >>> results = anls_metric.compute(predictions=predictions, references=references)
    >>> print(results)
    {'anls_score': 100.0}
"""


@evaluate.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION)
class Anls(evaluate.Metric):
    def _info(self):
        return evaluate.MetricInfo(
            description=_DESCRIPTION,
            citation=_CITATION,
            inputs_description=_KWARGS_DESCRIPTION,
            features=datasets.Features(
                {
                    "predictions": {"id": datasets.Value("string"), "prediction_text": datasets.Value("string")},
                    "references": {
                        "id": datasets.Value("string"),
                        "answers": datasets.features.Sequence(
                            {
                                "text": datasets.Value("string"),
                                "answer_start": datasets.Value("int32"),
                            }
                        ),
                    },
                }
            )
        )

    def _compute(self, predictions, references):
        prediction_dict = {prediction["id"]: prediction["prediction_text"] for prediction in predictions}
        dataset = [
            {
                "paragraphs": [
                    {
                        "qas": [
                            {
                                "answers": [{"text": answer_text} for answer_text in ref["answers"]["text"]],
                                "id": ref["id"],
                            }
                            for ref in references
                        ]
                    }
                ]
            }
        ]
        score = compute_score(dataset=dataset, predictions=prediction_dict)
        return score