Elad
commited on
Commit
•
fcc2240
1
Parent(s):
ed00236
add metrics
Browse files- metrics/bart_score.py +45 -0
- metrics/bleu.py +13 -1
- metrics/exact_match.py +40 -0
- metrics/f1.py +56 -0
- metrics/rouge.py +6 -7
metrics/bart_score.py
ADDED
@@ -0,0 +1,45 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
from BARTScore.bart_score import BARTScorer
|
3 |
+
|
4 |
+
|
5 |
+
def get_scorers():
|
6 |
+
assert os.path.isfile(
|
7 |
+
os.path.join("BARTScore", "bart.pth")
|
8 |
+
), "You must download `bart.pth` to use BARTScore.\nUse `gdown --id 1_7JfF7KOInb7ZrxKHIigTMR4ChVET01m --output bart.pth`"
|
9 |
+
|
10 |
+
scorers = {}
|
11 |
+
|
12 |
+
scorers["vanilla"] = BARTScorer(device="cuda:0", checkpoint="facebook/bart-large")
|
13 |
+
|
14 |
+
scorers["cnn"] = BARTScorer(device="cuda:0", checkpoint="facebook/bart-large-cnn")
|
15 |
+
|
16 |
+
# for the parabank model, first init a bart model, then load the local para model from BARTScore/bart.pth
|
17 |
+
# see the documentation from https://github.com/neulab/BARTScore for reference
|
18 |
+
scorers["para"] = BARTScorer(device="cuda:0", checkpoint="facebook/bart-large-cnn")
|
19 |
+
scorers["para"].load(path="BARTScore/bart.pth")
|
20 |
+
|
21 |
+
return scorers
|
22 |
+
|
23 |
+
|
24 |
+
def compute_bart_score_for_scorer(predictions, references, scorer_name, scorer):
|
25 |
+
precisions = scorer.score(predictions, references, batch_size=4)
|
26 |
+
recalls = scorer.score(references, predictions, batch_size=4)
|
27 |
+
f_scores = 0.5 * (precisions + recalls)
|
28 |
+
|
29 |
+
return [
|
30 |
+
{
|
31 |
+
f"{scorer_name}_f_score": f_scores[i],
|
32 |
+
f"{scorer_name}_precision": precisions[i],
|
33 |
+
f"{scorer_name}_recall": recalls[i],
|
34 |
+
}
|
35 |
+
for i in len(range(predictions))
|
36 |
+
]
|
37 |
+
|
38 |
+
|
39 |
+
def compute_bart_score(predictions, references, scorers):
|
40 |
+
result = [{} for _ in len(range(predictions))]
|
41 |
+
for scorer_name, scorer in scorers.items():
|
42 |
+
scorer_result = compute_bart_score_for_scorer(predictions, references, scorer_name, scorer)
|
43 |
+
for i, element in enumerate(scorer_result):
|
44 |
+
result[i].update(element)
|
45 |
+
return result
|
metrics/bleu.py
CHANGED
@@ -107,4 +107,16 @@ def compute_bleu(reference_corpus, translation_corpus, max_order=4, smooth=False
|
|
107 |
|
108 |
bleu = geo_mean * bp
|
109 |
|
110 |
-
return
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
107 |
|
108 |
bleu = geo_mean * bp
|
109 |
|
110 |
+
return {
|
111 |
+
"bleu": bleu,
|
112 |
+
**{f"precision-{i+1}": round(p, 4) for i, p in enumerate(precisions)},
|
113 |
+
"brevity_penalty": bp,
|
114 |
+
"length_ratio": ratio,
|
115 |
+
"translation_length": translation_length,
|
116 |
+
"reference_length": reference_length,
|
117 |
+
}
|
118 |
+
|
119 |
+
|
120 |
+
def bleu_postprocess_text(text):
|
121 |
+
# TODO: Tokenize properly
|
122 |
+
return text.split()
|
metrics/exact_match.py
ADDED
@@ -0,0 +1,40 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import re
|
2 |
+
import string
|
3 |
+
|
4 |
+
|
5 |
+
def normalize_answer(s):
|
6 |
+
"""Lower text and remove punctuation, articles and extra whitespace."""
|
7 |
+
|
8 |
+
def remove_articles(text):
|
9 |
+
return re.sub(r"\b(a|an|the)\b", " ", text)
|
10 |
+
|
11 |
+
def white_space_fix(text):
|
12 |
+
return " ".join(text.split())
|
13 |
+
|
14 |
+
def remove_punc(text):
|
15 |
+
exclude = set(string.punctuation)
|
16 |
+
return "".join(ch for ch in text if ch not in exclude)
|
17 |
+
|
18 |
+
def lower(text):
|
19 |
+
return text.lower()
|
20 |
+
|
21 |
+
return white_space_fix(remove_articles(remove_punc(lower(s))))
|
22 |
+
|
23 |
+
|
24 |
+
def exact_match_score(prediction, ground_truth):
|
25 |
+
return normalize_answer(prediction) == normalize_answer(ground_truth)
|
26 |
+
|
27 |
+
|
28 |
+
def metric_max_over_ground_truths(metric_fn, prediction, ground_truths):
|
29 |
+
scores_for_ground_truths = []
|
30 |
+
for ground_truth in ground_truths:
|
31 |
+
score = metric_fn(prediction, ground_truth)
|
32 |
+
scores_for_ground_truths.append(score)
|
33 |
+
return max(scores_for_ground_truths)
|
34 |
+
|
35 |
+
|
36 |
+
def compute_exact_match(predictions, references):
|
37 |
+
exact_match = 0
|
38 |
+
for prediction, ground_truths in zip(predictions, references):
|
39 |
+
exact_match += metric_max_over_ground_truths(exact_match_score, prediction, ground_truths)
|
40 |
+
return 100.0 * exact_match / len(predictions)
|
metrics/f1.py
ADDED
@@ -0,0 +1,56 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Copied from https://github.com/huggingface/datasets/blob/d3c7b9481d427ce41256edaf6773c47570f06f3b/metrics/squad/evaluate.py
|
2 |
+
|
3 |
+
import re
|
4 |
+
import string
|
5 |
+
from collections import Counter
|
6 |
+
|
7 |
+
|
8 |
+
def normalize_answer(s):
|
9 |
+
"""Lower text and remove punctuation, articles and extra whitespace."""
|
10 |
+
|
11 |
+
def remove_articles(text):
|
12 |
+
return re.sub(r"\b(a|an|the)\b", " ", text)
|
13 |
+
|
14 |
+
def white_space_fix(text):
|
15 |
+
return " ".join(text.split())
|
16 |
+
|
17 |
+
def remove_punc(text):
|
18 |
+
exclude = set(string.punctuation)
|
19 |
+
return "".join(ch for ch in text if ch not in exclude)
|
20 |
+
|
21 |
+
def lower(text):
|
22 |
+
return text.lower()
|
23 |
+
|
24 |
+
return white_space_fix(remove_articles(remove_punc(lower(s))))
|
25 |
+
|
26 |
+
|
27 |
+
def f1_score(prediction, ground_truth):
|
28 |
+
prediction_tokens = normalize_answer(prediction).split()
|
29 |
+
ground_truth_tokens = normalize_answer(ground_truth).split()
|
30 |
+
common = Counter(prediction_tokens) & Counter(ground_truth_tokens)
|
31 |
+
num_same = sum(common.values())
|
32 |
+
if num_same == 0:
|
33 |
+
return 0
|
34 |
+
precision = 1.0 * num_same / len(prediction_tokens)
|
35 |
+
recall = 1.0 * num_same / len(ground_truth_tokens)
|
36 |
+
f1 = (2 * precision * recall) / (precision + recall)
|
37 |
+
return f1
|
38 |
+
|
39 |
+
|
40 |
+
def exact_match_score(prediction, ground_truth):
|
41 |
+
return normalize_answer(prediction) == normalize_answer(ground_truth)
|
42 |
+
|
43 |
+
|
44 |
+
def metric_max_over_ground_truths(metric_fn, prediction, ground_truths):
|
45 |
+
scores_for_ground_truths = []
|
46 |
+
for ground_truth in ground_truths:
|
47 |
+
score = metric_fn(prediction, ground_truth)
|
48 |
+
scores_for_ground_truths.append(score)
|
49 |
+
return max(scores_for_ground_truths)
|
50 |
+
|
51 |
+
|
52 |
+
def compute_f1(predictions, references):
|
53 |
+
f1 = 0
|
54 |
+
for prediction, ground_truths in zip(predictions, references):
|
55 |
+
f1 += metric_max_over_ground_truths(f1_score, prediction, ground_truths)
|
56 |
+
return 100.0 * f1 / len(predictions)
|
metrics/rouge.py
CHANGED
@@ -4,24 +4,24 @@ import nltk
|
|
4 |
from rouge_score import rouge_scorer, scoring
|
5 |
|
6 |
|
7 |
-
def compute_rouge(predictions, references, rouge_types=None,
|
8 |
if rouge_types is None:
|
9 |
rouge_types = ["rouge1", "rouge2", "rougeL", "rougeLsum"]
|
10 |
|
11 |
scorer = rouge_scorer.RougeScorer(rouge_types=rouge_types, use_stemmer=use_stemmer)
|
12 |
-
if
|
13 |
aggregator = scoring.BootstrapAggregator()
|
14 |
else:
|
15 |
scores = []
|
16 |
|
17 |
for ref, pred in zip(references, predictions):
|
18 |
score = scorer.score(ref, pred)
|
19 |
-
if
|
20 |
aggregator.add_scores(score)
|
21 |
else:
|
22 |
scores.append(score)
|
23 |
|
24 |
-
if
|
25 |
result = aggregator.aggregate()
|
26 |
else:
|
27 |
result = {}
|
@@ -33,7 +33,6 @@ def compute_rouge(predictions, references, rouge_types=None, use_agregator=True,
|
|
33 |
|
34 |
# TODO: Check if it is necessary
|
35 |
# Copied from https://github.com/huggingface/transformers/blob/3977b58437b8ce1ea1da6e31747d888efec2419b/examples/pytorch/summarization/run_summarization.py#L520
|
36 |
-
def rouge_postprocess_text(
|
37 |
# rougeLSum expects newline after each sentence
|
38 |
-
|
39 |
-
return texts
|
|
|
4 |
from rouge_score import rouge_scorer, scoring
|
5 |
|
6 |
|
7 |
+
def compute_rouge(predictions, references, rouge_types=None, use_aggregator=True, use_stemmer=False):
|
8 |
if rouge_types is None:
|
9 |
rouge_types = ["rouge1", "rouge2", "rougeL", "rougeLsum"]
|
10 |
|
11 |
scorer = rouge_scorer.RougeScorer(rouge_types=rouge_types, use_stemmer=use_stemmer)
|
12 |
+
if use_aggregator:
|
13 |
aggregator = scoring.BootstrapAggregator()
|
14 |
else:
|
15 |
scores = []
|
16 |
|
17 |
for ref, pred in zip(references, predictions):
|
18 |
score = scorer.score(ref, pred)
|
19 |
+
if use_aggregator:
|
20 |
aggregator.add_scores(score)
|
21 |
else:
|
22 |
scores.append(score)
|
23 |
|
24 |
+
if use_aggregator:
|
25 |
result = aggregator.aggregate()
|
26 |
else:
|
27 |
result = {}
|
|
|
33 |
|
34 |
# TODO: Check if it is necessary
|
35 |
# Copied from https://github.com/huggingface/transformers/blob/3977b58437b8ce1ea1da6e31747d888efec2419b/examples/pytorch/summarization/run_summarization.py#L520
|
36 |
+
def rouge_postprocess_text(text):
|
37 |
# rougeLSum expects newline after each sentence
|
38 |
+
return "\n".join(nltk.sent_tokenize(text))
|
|