Elad commited on
Commit
47cfd77
1 Parent(s): ba92bd5

fix metrics

Browse files
Files changed (3) hide show
  1. metrics/bleu.py +3 -3
  2. metrics/exact_match.py +1 -1
  3. metrics/f1.py +1 -1
metrics/bleu.py CHANGED
@@ -47,14 +47,14 @@ def _get_ngrams(segment, max_order):
47
  return ngram_counts
48
 
49
 
50
- def compute_bleu(reference_corpus, translation_corpus, max_order=4, smooth=False):
51
  """Computes BLEU score of translated segments against one or more references.
52
 
53
  Args:
54
- reference_corpus: list of lists of references for each translation. Each
55
- reference should be tokenized into a list of tokens.
56
  translation_corpus: list of translations to score. Each translation
57
  should be tokenized into a list of tokens.
 
 
58
  max_order: Maximum n-gram order to use when computing BLEU score.
59
  smooth: Whether or not to apply Lin et al. 2004 smoothing.
60
 
 
47
  return ngram_counts
48
 
49
 
50
+ def compute_bleu(translation_corpus, reference_corpus, max_order=4, smooth=False):
51
  """Computes BLEU score of translated segments against one or more references.
52
 
53
  Args:
 
 
54
  translation_corpus: list of translations to score. Each translation
55
  should be tokenized into a list of tokens.
56
+ reference_corpus: list of lists of references for each translation. Each
57
+ reference should be tokenized into a list of tokens.
58
  max_order: Maximum n-gram order to use when computing BLEU score.
59
  smooth: Whether or not to apply Lin et al. 2004 smoothing.
60
 
metrics/exact_match.py CHANGED
@@ -37,4 +37,4 @@ def compute_exact_match(predictions, references):
37
  exact_match = 0
38
  for prediction, ground_truths in zip(predictions, references):
39
  exact_match += metric_max_over_ground_truths(exact_match_score, prediction, ground_truths)
40
- return {"exact_match": 100.0 * exact_match / len(predictions)}
 
37
  exact_match = 0
38
  for prediction, ground_truths in zip(predictions, references):
39
  exact_match += metric_max_over_ground_truths(exact_match_score, prediction, ground_truths)
40
+ return 100.0 * exact_match / len(predictions)
metrics/f1.py CHANGED
@@ -53,4 +53,4 @@ def compute_f1(predictions, references):
53
  f1 = 0
54
  for prediction, ground_truths in zip(predictions, references):
55
  f1 += metric_max_over_ground_truths(f1_score, prediction, ground_truths)
56
- return {"f1": 100.0 * f1 / len(predictions)}
 
53
  f1 = 0
54
  for prediction, ground_truths in zip(predictions, references):
55
  f1 += metric_max_over_ground_truths(f1_score, prediction, ground_truths)
56
+ return 100.0 * f1 / len(predictions)