Datasets:
File size: 1,262 Bytes
1643ce6 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 |
from functools import lru_cache
def lev_dist(prediction, ground_truth):
@lru_cache(None) # for memorization
def min_dist(s1, s2):
if s1 == len(prediction) or s2 == len(ground_truth):
return len(prediction) - s1 + len(ground_truth) - s2
# no change required
if prediction[s1] == ground_truth[s2]:
return min_dist(s1 + 1, s2 + 1)
return 1 + min(
min_dist(s1, s2 + 1), # insert character
min_dist(s1 + 1, s2), # delete character
min_dist(s1 + 1, s2 + 1), # replace character
)
return min_dist(0, 0)
def edit_sim_score(a, b):
return 1 - lev_dist(a, b) / max(len(a), len(b))
def metric_max_over_ground_truths(metric_fn, prediction, ground_truths):
scores_for_ground_truths = []
for ground_truth in ground_truths:
score = metric_fn(prediction, ground_truth)
scores_for_ground_truths.append(score)
return max(scores_for_ground_truths)
def compute_edit_sim(predictions, references):
edit_sim = 0
for prediction, ground_truths in zip(predictions, references):
edit_sim += metric_max_over_ground_truths(edit_sim_score, prediction, ground_truths)
return 100.0 * edit_sim / len(predictions) |