scrolls / metrics /bart_score.py
omerlevy's picture
Update metrics/bart_score.py
8b4bfe0
raw
history blame
2.06 kB
import os
import numpy as np
from BARTScore.bart_score import BARTScorer
def get_scorers():
assert os.path.isfile(
os.path.join("BARTScore", "bart.pth")
), "You must download `bart.pth` to use BARTScore.\nUse `gdown --id 1_7JfF7KOInb7ZrxKHIigTMR4ChVET01m --output bart.pth`"
scorers = {}
scorers["vanilla"] = BARTScorer(device="cuda:0", checkpoint="facebook/bart-large")
scorers["cnn"] = BARTScorer(device="cuda:0", checkpoint="facebook/bart-large-cnn")
# for the parabank model, first init a bart model, then load the local para model from BARTScore/bart.pth
# see the documentation from https://github.com/neulab/BARTScore for reference
scorers["para"] = BARTScorer(device="cuda:0", checkpoint="facebook/bart-large-cnn")
scorers["para"].load(path="BARTScore/bart.pth")
return scorers
def compute_bart_score_for_scorer(predictions, references, scorer_name, scorer):
#precisions = np.array(scorer.score(references, predictions, batch_size=4))
recalls = np.array(scorer.score(predictions, references, batch_size=4))
#f_scores = 0.5 * (precisions + recalls)
baselines = np.array(scorer.score(references, references, batch_size=4))
normalized = baselines / recalls
diffs = recalls - baselines
expdiffs = np.exp(diffs)
return [
{
#f"{scorer_name}_f_score": f_scores[i],
#f"{scorer_name}_precision": precisions[i],
f"{scorer_name}_recall": recalls[i],
f"{scorer_name}_normalized": normalized[i],
f"{scorer_name}_diffs": diffs[i],
f"{scorer_name}_expdiffs": expdiffs[i],
}
for i in range(len(predictions))
]
def compute_bart_score(predictions, references, scorers):
result = [{} for _ in range(len(predictions))]
for scorer_name, scorer in scorers.items():
scorer_result = compute_bart_score_for_scorer(predictions, references, scorer_name, scorer)
for i, element in enumerate(scorer_result):
result[i].update(element)
return result