fasterinnerlooper commited on
Commit
e90329b
1 Parent(s): 41db68b

Upload codebleu.py

Browse files

Reworked code so that it can run the metric. Added comments for fellow developers to use the metric correctly.

Files changed (1) hide show
  1. codebleu.py +3 -3
codebleu.py CHANGED
@@ -41,7 +41,7 @@ Args:
41
  should be a string with tokens separated by spaces.
42
  references: list of reference for each prediction. Each
43
  reference should be a string with tokens separated by spaces.
44
- language: programming language in ['java','js','c_sharp','php','c','python','cpp'].
45
  weights: tuple of 4 floats to use as weights for scores. Defaults to (0.25, 0.25, 0.25, 0.25).
46
  Returns:
47
  codebleu: resulting `CodeBLEU` score,
@@ -53,7 +53,7 @@ Examples:
53
  >>> metric = evaluate.load("k4black/codebleu")
54
  >>> ref = "def sum ( first , second ) :\n return second + first"
55
  >>> pred = "def add ( a , b ) :\n return a + b"
56
- >>> results = metric.compute(references=[ref], predictions=[pred], language="python")
57
  >>> print(results)
58
  """
59
 
@@ -113,7 +113,7 @@ class codebleu(evaluate.Metric):
113
  return self.codebleu_package.calc_codebleu(
114
  references=references,
115
  predictions=predictions,
116
- lang=lang,
117
  weights=weights,
118
  tokenizer=tokenizer,
119
  )
 
41
  should be a string with tokens separated by spaces.
42
  references: list of reference for each prediction. Each
43
  reference should be a string with tokens separated by spaces.
44
+ language: programming language in ['java','js','c_sharp','php','c','python','cpp']. Please note that, due to the way Datasets works, the number of entities in the language array must match the number of entries in the predictions and references arrays, but only the first value from the languages array will be used. This means that you will not be able to compute a metric for different langauges at the same time, but mst do them as sequential calls to CodeBleu.
45
  weights: tuple of 4 floats to use as weights for scores. Defaults to (0.25, 0.25, 0.25, 0.25).
46
  Returns:
47
  codebleu: resulting `CodeBLEU` score,
 
53
  >>> metric = evaluate.load("k4black/codebleu")
54
  >>> ref = "def sum ( first , second ) :\n return second + first"
55
  >>> pred = "def add ( a , b ) :\n return a + b"
56
+ >>> results = metric.compute(references=[ref], predictions=[pred], language=["python"])
57
  >>> print(results)
58
  """
59
 
 
113
  return self.codebleu_package.calc_codebleu(
114
  references=references,
115
  predictions=predictions,
116
+ lang=lang[0],
117
  weights=weights,
118
  tokenizer=tokenizer,
119
  )