Sasha commited on
Commit
0a279d3
β€’
1 Parent(s): 6902f2e

initial metric card explorer commit

Browse files
Files changed (7) hide show
  1. README.md +6 -7
  2. app.py +69 -0
  3. bleu.py +111 -0
  4. bleu_metric_card.md +123 -0
  5. requirements.txt +2 -0
  6. rouge.py +130 -0
  7. rouge_metric_card.md +121 -0
README.md CHANGED
@@ -1,13 +1,12 @@
1
  ---
2
- title: Metric Explorer
3
- emoji: πŸš€
4
- colorFrom: red
5
- colorTo: blue
6
- sdk: streamlit
7
- sdk_version: 1.2.0
8
  app_file: app.py
9
  pinned: false
10
- license: cc-by-sa-4.0
11
  ---
12
 
13
  Check out the configuration reference at https://huggingface.co/docs/hub/spaces#reference
1
  ---
2
+ title: Accuracy_metric
3
+ emoji: πŸ“ˆ
4
+ colorFrom: green
5
+ colorTo: red
6
+ sdk: gradio
7
+ sdk_version: 2.8.13
8
  app_file: app.py
9
  pinned: false
 
10
  ---
11
 
12
  Check out the configuration reference at https://huggingface.co/docs/hub/spaces#reference
app.py ADDED
@@ -0,0 +1,69 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from bleu import Bleu
2
+ from rouge import Rouge
3
+ from datasets import load_metric
4
+ from pathlib import Path
5
+ import streamlit as st
6
+ import streamlit.components.v1 as components
7
+ #from .nmt_bleu import compute_bleu # From: https://github.com/tensorflow/nmt/blob/master/nmt/scripts/bleu.py
8
+
9
+ rouge = Rouge()
10
+ bleu = Bleu()
11
+
12
+ def read_markdown_file(markdown_file):
13
+ return Path(markdown_file).read_text()
14
+
15
+
16
+ def compute(data):
17
+ return metric.compute(predictions=data["predictions"], references=data["references"])["accuracy"]
18
+
19
+ with st.sidebar.expander("Metric", expanded=True):
20
+ metrics= ['rouge','bleu']
21
+ metric_name = st.selectbox(
22
+ f"Choose metric to explore:",
23
+ metrics)
24
+
25
+ metric = load_metric(metric_name)
26
+ st.markdown("# You chose " + metric_name.upper())
27
+
28
+ st.markdown("## You can test it out below:")
29
+
30
+ reference = st.text_input('Input a reference sentence here:')
31
+ prediction = st.text_input('Input a prediction sentence here:')
32
+
33
+ predictions = []
34
+ predictions.append(prediction.split())
35
+ #print(predictions)
36
+ references = []
37
+ references.append(reference.split())
38
+ #print(references)
39
+
40
+ if metric_name == "bleu":
41
+ score = metric.compute(predictions=predictions, references=[references])
42
+ col1, col2, col3 = st.columns(3)
43
+ col1.metric("BLEU", score['bleu'])
44
+ col2.metric("Brevity penalty", score['brevity_penalty'])
45
+ col3.metric('Length Ratio', score['length_ratio'])
46
+
47
+
48
+ if metric_name == "rouge":
49
+ score = metric.compute(predictions=predictions, references=references)
50
+ #print(score)
51
+ col1, col2, col3 = st.columns(3)
52
+ col1.metric("Rouge 1 Precision", score['rouge1'].mid.precision)
53
+ col2.metric("Rouge 1 Recall", score['rouge1'].mid.recall)
54
+ col3.metric("Rouge 1 FMeasure", score['rouge1'].mid.fmeasure)
55
+
56
+ col4, col5, col6 = st.columns(3)
57
+ col4.metric("Rouge 2 Precision", score['rouge2'].mid.precision)
58
+ col5.metric("Rouge 2 Recall", score['rouge2'].mid.recall)
59
+ col6.metric("Rouge 2 FMeasure", score['rouge2'].mid.fmeasure)
60
+
61
+
62
+ # col1.metric("BLEU", score['bleu'])
63
+ # col2.metric("Brevity penalty", score['brevity_penalty'])
64
+ # col3.metric('Length Ratio', score['length_ratio'])
65
+
66
+ st.markdown('===================================================================================')
67
+ #components.html("""<hr style="height:10px;border:none;color:#333;background-color:#333;" /> """)
68
+
69
+ st.markdown(read_markdown_file(metric_name+"_metric_card.md"))
bleu.py ADDED
@@ -0,0 +1,111 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ """ BLEU metric. """
3
+
4
+ import datasets
5
+
6
+ _CITATION = """\
7
+ @INPROCEEDINGS{Papineni02bleu:a,
8
+ author = {Kishore Papineni and Salim Roukos and Todd Ward and Wei-jing Zhu},
9
+ title = {BLEU: a Method for Automatic Evaluation of Machine Translation},
10
+ booktitle = {},
11
+ year = {2002},
12
+ pages = {311--318}
13
+ }
14
+ @inproceedings{lin-och-2004-orange,
15
+ title = "{ORANGE}: a Method for Evaluating Automatic Evaluation Metrics for Machine Translation",
16
+ author = "Lin, Chin-Yew and
17
+ Och, Franz Josef",
18
+ booktitle = "{COLING} 2004: Proceedings of the 20th International Conference on Computational Linguistics",
19
+ month = "aug 23{--}aug 27",
20
+ year = "2004",
21
+ address = "Geneva, Switzerland",
22
+ publisher = "COLING",
23
+ url = "https://www.aclweb.org/anthology/C04-1072",
24
+ pages = "501--507",
25
+ }
26
+ """
27
+
28
+ _DESCRIPTION = """\
29
+ BLEU (bilingual evaluation understudy) is an algorithm for evaluating the quality of text which has been machine-translated from one natural language to another.
30
+ Quality is considered to be the correspondence between a machine's output and that of a human: "the closer a machine translation is to a professional human translation,
31
+ the better it is" – this is the central idea behind BLEU. BLEU was one of the first metrics to claim a high correlation with human judgements of quality, and
32
+ remains one of the most popular automated and inexpensive metrics.
33
+
34
+ Scores are calculated for individual translated segmentsβ€”generally sentencesβ€”by comparing them with a set of good quality reference translations.
35
+ Those scores are then averaged over the whole corpus to reach an estimate of the translation's overall quality. Intelligibility or grammatical correctness
36
+ are not taken into account[citation needed].
37
+
38
+ BLEU's output is always a number between 0 and 1. This value indicates how similar the candidate text is to the reference texts, with values closer to 1
39
+ representing more similar texts. Few human translations will attain a score of 1, since this would indicate that the candidate is identical to one of the
40
+ reference translations. For this reason, it is not necessary to attain a score of 1. Because there are more opportunities to match, adding additional
41
+ reference translations will increase the BLEU score.
42
+ """
43
+
44
+ _KWARGS_DESCRIPTION = """
45
+ Computes BLEU score of translated segments against one or more references.
46
+ Args:
47
+ predictions: list of translations to score.
48
+ Each translation should be tokenized into a list of tokens.
49
+ references: list of lists of references for each translation.
50
+ Each reference should be tokenized into a list of tokens.
51
+ max_order: Maximum n-gram order to use when computing BLEU score.
52
+ smooth: Whether or not to apply Lin et al. 2004 smoothing.
53
+ Returns:
54
+ 'bleu': bleu score,
55
+ 'precisions': geometric mean of n-gram precisions,
56
+ 'brevity_penalty': brevity penalty,
57
+ 'length_ratio': ratio of lengths,
58
+ 'translation_length': translation_length,
59
+ 'reference_length': reference_length
60
+ Examples:
61
+
62
+ >>> predictions = [
63
+ ... ["hello", "there", "general", "kenobi"], # tokenized prediction of the first sample
64
+ ... ["foo", "bar", "foobar"] # tokenized prediction of the second sample
65
+ ... ]
66
+ >>> references = [
67
+ ... [["hello", "there", "general", "kenobi"], ["hello", "there", "!"]], # tokenized references for the first sample (2 references)
68
+ ... [["foo", "bar", "foobar"]] # tokenized references for the second sample (1 reference)
69
+ ... ]
70
+ >>> bleu = datasets.load_metric("bleu")
71
+ >>> results = bleu.compute(predictions=predictions, references=references)
72
+ >>> print(results["bleu"])
73
+ 1.0
74
+ """
75
+
76
+
77
+ @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION)
78
+ class Bleu(datasets.Metric):
79
+ def _info(self):
80
+ return datasets.MetricInfo(
81
+ description=_DESCRIPTION,
82
+ citation=_CITATION,
83
+ inputs_description=_KWARGS_DESCRIPTION,
84
+ features=datasets.Features(
85
+ {
86
+ "predictions": datasets.Sequence(datasets.Value("string", id="token"), id="sequence"),
87
+ "references": datasets.Sequence(
88
+ datasets.Sequence(datasets.Value("string", id="token"), id="sequence"), id="references"
89
+ ),
90
+ }
91
+ ),
92
+ codebase_urls=["https://github.com/tensorflow/nmt/blob/master/nmt/scripts/bleu.py"],
93
+ reference_urls=[
94
+ "https://en.wikipedia.org/wiki/BLEU",
95
+ "https://towardsdatascience.com/evaluating-text-output-in-nlp-bleu-at-your-own-risk-e8609665a213",
96
+ ],
97
+ )
98
+
99
+ def _compute(self, predictions, references, max_order=4, smooth=False):
100
+ score = compute_bleu(
101
+ reference_corpus=references, translation_corpus=predictions, max_order=max_order, smooth=smooth
102
+ )
103
+ (bleu, precisions, bp, ratio, translation_length, reference_length) = score
104
+ return {
105
+ "bleu": bleu,
106
+ "precisions": precisions,
107
+ "brevity_penalty": bp,
108
+ "length_ratio": ratio,
109
+ "translation_length": translation_length,
110
+ "reference_length": reference_length,
111
+ }
bleu_metric_card.md ADDED
@@ -0,0 +1,123 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Metric Card for BLEU
2
+
3
+
4
+ ## Metric Description
5
+ BLEU (Bilingual Evaluation Understudy) is an algorithm for evaluating the quality of text which has been machine-translated from one natural language to another. Quality is considered to be the correspondence between a machine's output and that of a human: "the closer a machine translation is to a professional human translation, the better it is" – this is the central idea behind BLEU. BLEU was one of the first metrics to claim a high correlation with human judgements of quality, and remains one of the most popular automated and inexpensive metrics.
6
+
7
+ Scores are calculated for individual translated segmentsβ€”generally sentencesβ€”by comparing them with a set of good quality reference translations. Those scores are then averaged over the whole corpus to reach an estimate of the translation's overall quality. Neither intelligibility nor grammatical correctness are not taken into account.
8
+
9
+ ## Intended Uses
10
+ BLEU and BLEU-derived metrics are most often used for machine translation.
11
+
12
+ ## How to Use
13
+
14
+ This metric takes as input lists of predicted sentences and reference sentences:
15
+
16
+ ```python
17
+ >>> predictions = [
18
+ ... ["hello", "there", "general", "kenobi",
19
+ ... ["foo", "bar" "foobar"]
20
+ ... ]
21
+ >>> references = [
22
+ ... [["hello", "there", "general", "kenobi"]],
23
+ ... [["foo", "bar", "foobar"]]
24
+ ... ]
25
+ >>> bleu = datasets.load_metric("bleu")
26
+ >>> results = bleu.compute(predictions=predictions, references=references)
27
+ >>> print(results)
28
+ {'bleu': 0.6370964381207871, 'precisions': [0.8333333333333334, 0.75, 1.0, 1.0], 'brevity_penalty': 0.7165313105737893, 'length_ratio': 0.75, 'translation_length': 6, 'reference_length': 8}
29
+ ```
30
+
31
+ ### Inputs
32
+ - **predictions** (`list` of `list`s): Translations to score. Each translation should be tokenized into a list of tokens.
33
+ - **references** (`list` of `list`s): references for each translation. Each reference should be tokenized into a list of tokens.
34
+ - **max_order** (`int`): Maximum n-gram order to use when computing BLEU score. Defaults to `4`.
35
+ - **smooth** (`boolean`): Whether or not to apply Lin et al. 2004 smoothing. Defaults to `False`.
36
+
37
+ ### Output Values
38
+ - **bleu** (`float`): bleu score
39
+ - **precisions** (`list` of `float`s): geometric mean of n-gram precisions,
40
+ - **brevity_penalty** (`float`): brevity penalty,
41
+ - **length_ratio** (`float`): ratio of lengths,
42
+ - **translation_length** (`int`): translation_length,
43
+ - **reference_length** (`int`): reference_length
44
+
45
+ Output Example:
46
+ ```python
47
+ {'bleu': 1.0, 'precisions': [1.0, 1.0, 1.0, 1.0], 'brevity_penalty': 1.0, 'length_ratio': 1.167, 'translation_length': 7, 'reference_length': 6}
48
+ ```
49
+
50
+ BLEU's output is always a number between 0 and 1. This value indicates how similar the candidate text is to the reference texts, with values closer to 1 representing more similar texts. Few human translations will attain a score of 1, since this would indicate that the candidate is identical to one of the reference translations. For this reason, it is not necessary to attain a score of 1. Because there are more opportunities to match, adding additional reference translations will increase the BLEU score.
51
+
52
+ #### Values from Popular Papers
53
+ The [original BLEU paper](https://aclanthology.org/P02-1040/) (Papineni et al. 2002) compares BLEU scores of five different models on the same 500-sentence corpus. These scores ranged from 0.0527 to 0.2571.
54
+
55
+ The [Attention is All you Need paper](https://proceedings.neurips.cc/paper/2017/file/3f5ee243547dee91fbd053c1c4a845aa-Paper.pdf) (Vaswani et al. 2017) got a BLEU score of 0.284 on the WMT 2014 English-to-German translation task, and 0.41 on the WMT 2014 English-to-French translation task.
56
+
57
+ ### Examples
58
+
59
+ Example where each sample has 1 reference:
60
+ ```python
61
+ >>> predictions = [
62
+ ... ["hello", "there", "general", "kenobi",
63
+ ... ["foo", "bar" "foobar"]
64
+ ... ]
65
+ >>> references = [
66
+ ... [["hello", "there", "general", "kenobi"]],
67
+ ... [["foo", "bar", "foobar"]]
68
+ ... ]
69
+ >>> bleu = datasets.load_metric("bleu")
70
+ >>> results = bleu.compute(predictions=predictions, references=references)
71
+ >>> print(results)
72
+ {'bleu': 0.6370964381207871, 'precisions': [0.8333333333333334, 0.75, 1.0, 1.0], 'brevity_penalty': 0.7165313105737893, 'length_ratio': 0.75, 'translation_length': 6, 'reference_length': 8}
73
+ ```
74
+
75
+ Example where the second sample has 2 references:
76
+ ```python
77
+ >>> predictions = [
78
+ ... ["hello", "there", "general", "kenobi",
79
+ ... ["foo", "bar" "foobar"]
80
+ ... ]
81
+ >>> references = [
82
+ ... [["hello", "there", "general", "kenobi"], ["hello", "there", "!"]],
83
+ ... [["foo", "bar", "foobar"]]
84
+ ... ]
85
+ >>> bleu = datasets.load_metric("bleu")
86
+ >>> results = bleu.compute(predictions=predictions, references=references)
87
+ >>> print(results)
88
+ {'bleu': 1.0, 'precisions': [1.0, 1.0, 1.0, 1.0], 'brevity_penalty': 1.0, 'length_ratio': 1.1666666666666667, 'translation_length': 7, 'reference_length': 6}
89
+ ```
90
+
91
+ ## Limitations and Bias
92
+ This metric hase multiple known limitations and biases:
93
+ - BLEU compares overlap in tokens from the predictions and references, instead of comparing meaning. This can lead to discrepencies between BLEU scores and human ratings.
94
+ - BLEU scores are not comparable across different datasets, nor are they comparable across different languages.
95
+ - BLEU scores can vary greatly depending on which parameters are used to generate the scores, especially when different tokenization and normalization techniques are used. It is therefore not possible to compare BLEU scores generated using different parameters, or when these parameters are unknown.
96
+ - Shorter predicted translations achieve higher scores than longer ones, simply due to how the score is calculated. A brevity penalty is introduced to attempt to counteract this.
97
+
98
+
99
+ ## Citation
100
+ ```bibtex
101
+ @INPROCEEDINGS{Papineni02bleu:a,
102
+ author = {Kishore Papineni and Salim Roukos and Todd Ward and Wei-jing Zhu},
103
+ title = {BLEU: a Method for Automatic Evaluation of Machine Translation},
104
+ booktitle = {},
105
+ year = {2002},
106
+ pages = {311--318}
107
+ }
108
+ @inproceedings{lin-och-2004-orange,
109
+ title = "{ORANGE}: a Method for Evaluating Automatic Evaluation Metrics for Machine Translation",
110
+ author = "Lin, Chin-Yew and
111
+ Och, Franz Josef",
112
+ booktitle = "{COLING} 2004: Proceedings of the 20th International Conference on Computational Linguistics",
113
+ month = "aug 23{--}aug 27",
114
+ year = "2004",
115
+ address = "Geneva, Switzerland",
116
+ publisher = "COLING",
117
+ url = "https://www.aclweb.org/anthology/C04-1072",
118
+ pages = "501--507",
119
+ }
120
+ ```
121
+
122
+ ## Further References
123
+ - This Hugging Face implementation uses [this Tensorflow implementation](https://github.com/tensorflow/nmt/blob/master/nmt/scripts/bleu.py)
requirements.txt ADDED
@@ -0,0 +1,2 @@
 
 
1
+ datasets
2
+ sklearn
rouge.py ADDED
@@ -0,0 +1,130 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2020 The HuggingFace Datasets Authors.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ """ ROUGE metric from Google Research github repo. """
15
+
16
+ # The dependencies in https://github.com/google-research/google-research/blob/master/rouge/requirements.txt
17
+ import absl # Here to have a nice missing dependency error message early on
18
+ import nltk # Here to have a nice missing dependency error message early on
19
+ import numpy # Here to have a nice missing dependency error message early on
20
+ import six # Here to have a nice missing dependency error message early on
21
+ from rouge_score import rouge_scorer, scoring
22
+
23
+ import datasets
24
+
25
+
26
+ _CITATION = """\
27
+ @inproceedings{lin-2004-rouge,
28
+ title = "{ROUGE}: A Package for Automatic Evaluation of Summaries",
29
+ author = "Lin, Chin-Yew",
30
+ booktitle = "Text Summarization Branches Out",
31
+ month = jul,
32
+ year = "2004",
33
+ address = "Barcelona, Spain",
34
+ publisher = "Association for Computational Linguistics",
35
+ url = "https://www.aclweb.org/anthology/W04-1013",
36
+ pages = "74--81",
37
+ }
38
+ """
39
+
40
+ _DESCRIPTION = """\
41
+ ROUGE, or Recall-Oriented Understudy for Gisting Evaluation, is a set of metrics and a software package used for
42
+ evaluating automatic summarization and machine translation software in natural language processing.
43
+ The metrics compare an automatically produced summary or translation against a reference or a set of references (human-produced) summary or translation.
44
+
45
+ Note that ROUGE is case insensitive, meaning that upper case letters are treated the same way as lower case letters.
46
+
47
+ This metrics is a wrapper around Google Research reimplementation of ROUGE:
48
+ https://github.com/google-research/google-research/tree/master/rouge
49
+ """
50
+
51
+ _KWARGS_DESCRIPTION = """
52
+ Calculates average rouge scores for a list of hypotheses and references
53
+ Args:
54
+ predictions: list of predictions to score. Each predictions
55
+ should be a string with tokens separated by spaces.
56
+ references: list of reference for each prediction. Each
57
+ reference should be a string with tokens separated by spaces.
58
+ rouge_types: A list of rouge types to calculate.
59
+ Valid names:
60
+ `"rouge{n}"` (e.g. `"rouge1"`, `"rouge2"`) where: {n} is the n-gram based scoring,
61
+ `"rougeL"`: Longest common subsequence based scoring.
62
+ `"rougeLSum"`: rougeLsum splits text using `"\n"`.
63
+ See details in https://github.com/huggingface/datasets/issues/617
64
+ use_stemmer: Bool indicating whether Porter stemmer should be used to strip word suffixes.
65
+ use_agregator: Return aggregates if this is set to True
66
+ Returns:
67
+ rouge1: rouge_1 (precision, recall, f1),
68
+ rouge2: rouge_2 (precision, recall, f1),
69
+ rougeL: rouge_l (precision, recall, f1),
70
+ rougeLsum: rouge_lsum (precision, recall, f1)
71
+ Examples:
72
+
73
+ >>> rouge = datasets.load_metric('rouge')
74
+ >>> predictions = ["hello there", "general kenobi"]
75
+ >>> references = ["hello there", "general kenobi"]
76
+ >>> results = rouge.compute(predictions=predictions, references=references)
77
+ >>> print(list(results.keys()))
78
+ ['rouge1', 'rouge2', 'rougeL', 'rougeLsum']
79
+ >>> print(results["rouge1"])
80
+ AggregateScore(low=Score(precision=1.0, recall=1.0, fmeasure=1.0), mid=Score(precision=1.0, recall=1.0, fmeasure=1.0), high=Score(precision=1.0, recall=1.0, fmeasure=1.0))
81
+ >>> print(results["rouge1"].mid.fmeasure)
82
+ 1.0
83
+ """
84
+
85
+
86
+ @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION)
87
+ class Rouge(datasets.Metric):
88
+ def _info(self):
89
+ return datasets.MetricInfo(
90
+ description=_DESCRIPTION,
91
+ citation=_CITATION,
92
+ inputs_description=_KWARGS_DESCRIPTION,
93
+ features=datasets.Features(
94
+ {
95
+ "predictions": datasets.Value("string", id="sequence"),
96
+ "references": datasets.Value("string", id="sequence"),
97
+ }
98
+ ),
99
+ codebase_urls=["https://github.com/google-research/google-research/tree/master/rouge"],
100
+ reference_urls=[
101
+ "https://en.wikipedia.org/wiki/ROUGE_(metric)",
102
+ "https://github.com/google-research/google-research/tree/master/rouge",
103
+ ],
104
+ )
105
+
106
+ def _compute(self, predictions, references, rouge_types=None, use_agregator=True, use_stemmer=False):
107
+ if rouge_types is None:
108
+ rouge_types = ["rouge1", "rouge2", "rougeL", "rougeLsum"]
109
+
110
+ scorer = rouge_scorer.RougeScorer(rouge_types=rouge_types, use_stemmer=use_stemmer)
111
+ if use_agregator:
112
+ aggregator = scoring.BootstrapAggregator()
113
+ else:
114
+ scores = []
115
+
116
+ for ref, pred in zip(references, predictions):
117
+ score = scorer.score(ref, pred)
118
+ if use_agregator:
119
+ aggregator.add_scores(score)
120
+ else:
121
+ scores.append(score)
122
+
123
+ if use_agregator:
124
+ result = aggregator.aggregate()
125
+ else:
126
+ result = {}
127
+ for key in scores[0]:
128
+ result[key] = list(score[key] for score in scores)
129
+
130
+ return result
rouge_metric_card.md ADDED
@@ -0,0 +1,121 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Metric Card for ROUGE
2
+
3
+ ## Metric Description
4
+ ROUGE, or Recall-Oriented Understudy for Gisting Evaluation, is a set of metrics and a software package used for evaluating automatic summarization and machine translation software in natural language processing. The metrics compare an automatically produced summary or translation against a reference or a set of references (human-produced) summary or translation.
5
+
6
+ Note that ROUGE is case insensitive, meaning that upper case letters are treated the same way as lower case letters.
7
+
8
+ This metrics is a wrapper around the [Google Research reimplementation of ROUGE](https://github.com/google-research/google-research/tree/master/rouge)
9
+
10
+ ## How to Use
11
+ At minimum, this metric takes as input a list of predictions and a list of references:
12
+ ```python
13
+ >>> rouge = datasets.load_metric('rouge')
14
+ >>> predictions = ["hello there", "general kenobi"]
15
+ >>> references = ["hello there", "general kenobi"]
16
+ >>> results = rouge.compute(predictions=predictions,
17
+ ... references=references)
18
+ >>> print(list(results.keys()))
19
+ ['rouge1', 'rouge2', 'rougeL', 'rougeLsum']
20
+ >>> print(results["rouge1"])
21
+ AggregateScore(low=Score(precision=1.0, recall=1.0, fmeasure=1.0), mid=Score(precision=1.0, recall=1.0, fmeasure=1.0), high=Score(precision=1.0, recall=1.0, fmeasure=1.0))
22
+ >>> print(results["rouge1"].mid.fmeasure)
23
+ 1.0
24
+ ```
25
+
26
+ ### Inputs
27
+ - **predictions** (`list`): list of predictions to score. Each prediction
28
+ should be a string with tokens separated by spaces.
29
+ - **references** (`list`): list of reference for each prediction. Each
30
+ reference should be a string with tokens separated by spaces.
31
+ - **rouge_types** (`list`): A list of rouge types to calculate. Defaults to `['rouge1', 'rouge2', 'rougeL', 'rougeLsum']`.
32
+ - Valid rouge types:
33
+ - `"rouge1"`: unigram (1-gram) based scoring
34
+ - `"rouge2"`: bigram (2-gram) based scoring
35
+ - `"rougeL"`: Longest common subsequence based scoring.
36
+ - `"rougeLSum"`: splits text using `"\n"`
37
+ - See [here](https://github.com/huggingface/datasets/issues/617) for more information
38
+ - **use_aggregator** (`boolean`): If True, returns aggregates. Defaults to `True`.
39
+ - **use_stemmer** (`boolean`): If `True`, uses Porter stemmer to strip word suffixes. Defaults to `False`.
40
+
41
+ ### Output Values
42
+ The output is a dictionary with one entry for each rouge type in the input list `rouge_types`. If `use_aggregator=False`, each dictionary entry is a list of Score objects, with one score for each sentence. Each Score object includes the `precision`, `recall`, and `fmeasure`. E.g. if `rouge_types=['rouge1', 'rouge2']` and `use_aggregator=False`, the output is:
43
+
44
+ ```python
45
+ {'rouge1': [Score(precision=1.0, recall=0.5, fmeasure=0.6666666666666666), Score(precision=1.0, recall=1.0, fmeasure=1.0)], 'rouge2': [Score(precision=0.0, recall=0.0, fmeasure=0.0), Score(precision=1.0, recall=1.0, fmeasure=1.0)]}
46
+ ```
47
+
48
+ If `rouge_types=['rouge1', 'rouge2']` and `use_aggregator=True`, the output is of the following format:
49
+ ```python
50
+ {'rouge1': AggregateScore(low=Score(precision=1.0, recall=1.0, fmeasure=1.0), mid=Score(precision=1.0, recall=1.0, fmeasure=1.0), high=Score(precision=1.0, recall=1.0, fmeasure=1.0)), 'rouge2': AggregateScore(low=Score(precision=1.0, recall=1.0, fmeasure=1.0), mid=Score(precision=1.0, recall=1.0, fmeasure=1.0), high=Score(precision=1.0, recall=1.0, fmeasure=1.0))}
51
+ ```
52
+
53
+ The `precision`, `recall`, and `fmeasure` values all have a range of 0 to 1.
54
+
55
+
56
+ #### Values from Popular Papers
57
+
58
+
59
+ ### Examples
60
+ An example without aggregation:
61
+ ```python
62
+ >>> rouge = datasets.load_metric('rouge')
63
+ >>> predictions = ["hello goodbye", "ankh morpork"]
64
+ >>> references = ["goodbye", "general kenobi"]
65
+ >>> results = rouge.compute(predictions=predictions,
66
+ ... references=references)
67
+ >>> print(list(results.keys()))
68
+ ['rouge1', 'rouge2', 'rougeL', 'rougeLsum']
69
+ >>> print(results["rouge1"])
70
+ [Score(precision=0.5, recall=0.5, fmeasure=0.5), Score(precision=0.0, recall=0.0, fmeasure=0.0)]
71
+ ```
72
+
73
+ The same example, but with aggregation:
74
+ ```python
75
+ >>> rouge = datasets.load_metric('rouge')
76
+ >>> predictions = ["hello goodbye", "ankh morpork"]
77
+ >>> references = ["goodbye", "general kenobi"]
78
+ >>> results = rouge.compute(predictions=predictions,
79
+ ... references=references,
80
+ ... use_aggregator=True)
81
+ >>> print(list(results.keys()))
82
+ ['rouge1', 'rouge2', 'rougeL', 'rougeLsum']
83
+ >>> print(results["rouge1"])
84
+ AggregateScore(low=Score(precision=0.0, recall=0.0, fmeasure=0.0), mid=Score(precision=0.25, recall=0.25, fmeasure=0.25), high=Score(precision=0.5, recall=0.5, fmeasure=0.5))
85
+ ```
86
+
87
+ The same example, but only calculating `rouge_1`:
88
+ ```python
89
+ >>> rouge = datasets.load_metric('rouge')
90
+ >>> predictions = ["hello goodbye", "ankh morpork"]
91
+ >>> references = ["goodbye", "general kenobi"]
92
+ >>> results = rouge.compute(predictions=predictions,
93
+ ... references=references,
94
+ ... rouge_types=['rouge_1'],
95
+ ... use_aggregator=True)
96
+ >>> print(list(results.keys()))
97
+ ['rouge1']
98
+ >>> print(results["rouge1"])
99
+ AggregateScore(low=Score(precision=0.0, recall=0.0, fmeasure=0.0), mid=Score(precision=0.25, recall=0.25, fmeasure=0.25), high=Score(precision=0.5, recall=0.5, fmeasure=0.5))
100
+ ```
101
+
102
+ ## Limitations and Bias
103
+ See [Schluter (2017)](https://aclanthology.org/E17-2007/) for an in-depth discussion of many of ROUGE's limits.
104
+
105
+ ## Citation
106
+ ```bibtex
107
+ @inproceedings{lin-2004-rouge,
108
+ title = "{ROUGE}: A Package for Automatic Evaluation of Summaries",
109
+ author = "Lin, Chin-Yew",
110
+ booktitle = "Text Summarization Branches Out",
111
+ month = jul,
112
+ year = "2004",
113
+ address = "Barcelona, Spain",
114
+ publisher = "Association for Computational Linguistics",
115
+ url = "https://www.aclweb.org/anthology/W04-1013",
116
+ pages = "74--81",
117
+ }
118
+ ```
119
+
120
+ ## Further References
121
+ - This metrics is a wrapper around the [Google Research reimplementation of ROUGE](https://github.com/google-research/google-research/tree/master/rouge)