Elad
commited on
Commit
•
ed00236
1
Parent(s):
850cbd8
add metrics
Browse files- metrics/bleu.py +110 -0
- metrics/rouge.py +39 -0
metrics/bleu.py
ADDED
@@ -0,0 +1,110 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Copied from https://github.com/tensorflow/nmt/blob/0be864257a76c151eef20ea689755f08bc1faf4e/nmt/scripts/bleu.py
|
2 |
+
|
3 |
+
# Copyright 2017 Google Inc. All Rights Reserved.
|
4 |
+
#
|
5 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
6 |
+
# you may not use this file except in compliance with the License.
|
7 |
+
# You may obtain a copy of the License at
|
8 |
+
#
|
9 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
10 |
+
#
|
11 |
+
# Unless required by applicable law or agreed to in writing, software
|
12 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
13 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
14 |
+
# See the License for the specific language governing permissions and
|
15 |
+
# limitations under the License.
|
16 |
+
# ==============================================================================
|
17 |
+
|
18 |
+
"""Python implementation of BLEU and smooth-BLEU.
|
19 |
+
|
20 |
+
This module provides a Python implementation of BLEU and smooth-BLEU.
|
21 |
+
Smooth BLEU is computed following the method outlined in the paper:
|
22 |
+
Chin-Yew Lin, Franz Josef Och. ORANGE: a method for evaluating automatic
|
23 |
+
evaluation metrics for machine translation. COLING 2004.
|
24 |
+
"""
|
25 |
+
|
26 |
+
import collections
|
27 |
+
import math
|
28 |
+
|
29 |
+
|
30 |
+
def _get_ngrams(segment, max_order):
|
31 |
+
"""Extracts all n-grams upto a given maximum order from an input segment.
|
32 |
+
|
33 |
+
Args:
|
34 |
+
segment: text segment from which n-grams will be extracted.
|
35 |
+
max_order: maximum length in tokens of the n-grams returned by this
|
36 |
+
methods.
|
37 |
+
|
38 |
+
Returns:
|
39 |
+
The Counter containing all n-grams upto max_order in segment
|
40 |
+
with a count of how many times each n-gram occurred.
|
41 |
+
"""
|
42 |
+
ngram_counts = collections.Counter()
|
43 |
+
for order in range(1, max_order + 1):
|
44 |
+
for i in range(0, len(segment) - order + 1):
|
45 |
+
ngram = tuple(segment[i : i + order])
|
46 |
+
ngram_counts[ngram] += 1
|
47 |
+
return ngram_counts
|
48 |
+
|
49 |
+
|
50 |
+
def compute_bleu(reference_corpus, translation_corpus, max_order=4, smooth=False):
|
51 |
+
"""Computes BLEU score of translated segments against one or more references.
|
52 |
+
|
53 |
+
Args:
|
54 |
+
reference_corpus: list of lists of references for each translation. Each
|
55 |
+
reference should be tokenized into a list of tokens.
|
56 |
+
translation_corpus: list of translations to score. Each translation
|
57 |
+
should be tokenized into a list of tokens.
|
58 |
+
max_order: Maximum n-gram order to use when computing BLEU score.
|
59 |
+
smooth: Whether or not to apply Lin et al. 2004 smoothing.
|
60 |
+
|
61 |
+
Returns:
|
62 |
+
3-Tuple with the BLEU score, n-gram precisions, geometric mean of n-gram
|
63 |
+
precisions and brevity penalty.
|
64 |
+
"""
|
65 |
+
matches_by_order = [0] * max_order
|
66 |
+
possible_matches_by_order = [0] * max_order
|
67 |
+
reference_length = 0
|
68 |
+
translation_length = 0
|
69 |
+
for (references, translation) in zip(reference_corpus, translation_corpus):
|
70 |
+
reference_length += min(len(r) for r in references)
|
71 |
+
translation_length += len(translation)
|
72 |
+
|
73 |
+
merged_ref_ngram_counts = collections.Counter()
|
74 |
+
for reference in references:
|
75 |
+
merged_ref_ngram_counts |= _get_ngrams(reference, max_order)
|
76 |
+
translation_ngram_counts = _get_ngrams(translation, max_order)
|
77 |
+
overlap = translation_ngram_counts & merged_ref_ngram_counts
|
78 |
+
for ngram in overlap:
|
79 |
+
matches_by_order[len(ngram) - 1] += overlap[ngram]
|
80 |
+
for order in range(1, max_order + 1):
|
81 |
+
possible_matches = len(translation) - order + 1
|
82 |
+
if possible_matches > 0:
|
83 |
+
possible_matches_by_order[order - 1] += possible_matches
|
84 |
+
|
85 |
+
precisions = [0] * max_order
|
86 |
+
for i in range(0, max_order):
|
87 |
+
if smooth:
|
88 |
+
precisions[i] = (matches_by_order[i] + 1.0) / (possible_matches_by_order[i] + 1.0)
|
89 |
+
else:
|
90 |
+
if possible_matches_by_order[i] > 0:
|
91 |
+
precisions[i] = float(matches_by_order[i]) / possible_matches_by_order[i]
|
92 |
+
else:
|
93 |
+
precisions[i] = 0.0
|
94 |
+
|
95 |
+
if min(precisions) > 0:
|
96 |
+
p_log_sum = sum((1.0 / max_order) * math.log(p) for p in precisions)
|
97 |
+
geo_mean = math.exp(p_log_sum)
|
98 |
+
else:
|
99 |
+
geo_mean = 0
|
100 |
+
|
101 |
+
ratio = float(translation_length) / reference_length
|
102 |
+
|
103 |
+
if ratio > 1.0:
|
104 |
+
bp = 1.0
|
105 |
+
else:
|
106 |
+
bp = math.exp(1 - 1.0 / ratio)
|
107 |
+
|
108 |
+
bleu = geo_mean * bp
|
109 |
+
|
110 |
+
return (bleu, precisions, bp, ratio, translation_length, reference_length)
|
metrics/rouge.py
ADDED
@@ -0,0 +1,39 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Copied from https://github.com/huggingface/datasets/blob/d3c7b9481d427ce41256edaf6773c47570f06f3b/metrics/rouge/rouge.py
|
2 |
+
|
3 |
+
import nltk
|
4 |
+
from rouge_score import rouge_scorer, scoring
|
5 |
+
|
6 |
+
|
7 |
+
def compute_rouge(predictions, references, rouge_types=None, use_agregator=True, use_stemmer=False):
|
8 |
+
if rouge_types is None:
|
9 |
+
rouge_types = ["rouge1", "rouge2", "rougeL", "rougeLsum"]
|
10 |
+
|
11 |
+
scorer = rouge_scorer.RougeScorer(rouge_types=rouge_types, use_stemmer=use_stemmer)
|
12 |
+
if use_agregator:
|
13 |
+
aggregator = scoring.BootstrapAggregator()
|
14 |
+
else:
|
15 |
+
scores = []
|
16 |
+
|
17 |
+
for ref, pred in zip(references, predictions):
|
18 |
+
score = scorer.score(ref, pred)
|
19 |
+
if use_agregator:
|
20 |
+
aggregator.add_scores(score)
|
21 |
+
else:
|
22 |
+
scores.append(score)
|
23 |
+
|
24 |
+
if use_agregator:
|
25 |
+
result = aggregator.aggregate()
|
26 |
+
else:
|
27 |
+
result = {}
|
28 |
+
for key in scores[0]:
|
29 |
+
result[key] = list(score[key] for score in scores)
|
30 |
+
|
31 |
+
return result
|
32 |
+
|
33 |
+
|
34 |
+
# TODO: Check if it is necessary
|
35 |
+
# Copied from https://github.com/huggingface/transformers/blob/3977b58437b8ce1ea1da6e31747d888efec2419b/examples/pytorch/summarization/run_summarization.py#L520
|
36 |
+
def rouge_postprocess_text(texts):
|
37 |
+
# rougeLSum expects newline after each sentence
|
38 |
+
texts = ["\n".join(nltk.sent_tokenize(text)) for text in texts]
|
39 |
+
return texts
|