Update codebleu.py
Browse files- codebleu.py +3 -76
codebleu.py
CHANGED
@@ -15,11 +15,7 @@
|
|
15 |
|
16 |
import evaluate
|
17 |
import datasets
|
18 |
-
from .
|
19 |
-
from .weighted_ngram_match import *
|
20 |
-
from .syntax_match import *
|
21 |
-
from .dataflow_match import *
|
22 |
-
from tree_sitter import Language, Parser
|
23 |
import os
|
24 |
|
25 |
|
@@ -92,79 +88,10 @@ class CodeBLEU(evaluate.Metric):
|
|
92 |
# TODO: Download external resources if needed
|
93 |
if self.config_name == "python":
|
94 |
Language.build_library('./parser/my-languages.so',['tree-sitter-python'])
|
95 |
-
elif self.config_name == "go":
|
96 |
-
Language.build_library('./parser/my-languages.so',['tree-sitter-go'])
|
97 |
-
elif self.config_name == "javascript":
|
98 |
-
Language.build_library('./parser/my-languages.so',['tree-sitter-javascript'])
|
99 |
-
elif self.config_name == "php":
|
100 |
-
Language.build_library('./parser/my-languages.so',['tree-sitter-php'])
|
101 |
-
elif self.config_name == "java":
|
102 |
-
Language.build_library('./parser/my-languages.so',['tree-sitter-java'])
|
103 |
-
elif self.config_name == "ruby":
|
104 |
-
Language.build_library('./parser/my-languages.so',['tree-sitter-ruby'])
|
105 |
-
elif self.config_name == "c-sharp":
|
106 |
-
Language.build_library('./parser/my-languages.so',['tree-sitter-c-sharp'])
|
107 |
elif self.config_name == "cpp":
|
108 |
Language.build_library('./parser/my-languages.so',['tree-sitter-cpp'])
|
109 |
|
110 |
|
111 |
def _compute(self, predictions, references, language="python", alpha=0.25, beta=0.25, gamma=0.25, theta=0.25):
|
112 |
-
|
113 |
-
|
114 |
-
pre_references = [[s.strip() for s in my_list] for my_list in references]
|
115 |
-
#pre_references = [[x.strip() for x in open(file, 'r', encoding='utf-8').readlines()] for file in references]
|
116 |
-
hypothesis = [s.strip() for s in predictions]
|
117 |
-
#hypothesis = [x.strip() for x in open(predictions, 'r', encoding='utf-8').readlines()]
|
118 |
-
|
119 |
-
for i in range(len(pre_references)):
|
120 |
-
assert len(hypothesis) == len(pre_references[i])
|
121 |
-
|
122 |
-
references = []
|
123 |
-
for i in range(len(hypothesis)):
|
124 |
-
ref_for_instance = []
|
125 |
-
for j in range(len(pre_references)):
|
126 |
-
ref_for_instance.append(pre_references[j][i])
|
127 |
-
references.append(ref_for_instance)
|
128 |
-
assert len(references) == len(pre_references)*len(hypothesis)
|
129 |
-
|
130 |
-
|
131 |
-
# calculate ngram match (BLEU)
|
132 |
-
tokenized_hyps = [x.split() for x in hypothesis]
|
133 |
-
tokenized_refs = [[x.split() for x in reference] for reference in references]
|
134 |
-
|
135 |
-
ngram_match_score = corpus_bleu(tokenized_refs,tokenized_hyps)
|
136 |
-
|
137 |
-
# calculate weighted ngram match
|
138 |
-
# from os import listdir
|
139 |
-
# from os.path import isfile, join
|
140 |
-
# onlyfiles = [f for f in listdir("./keywords") if isfile(join("keywords", f))]
|
141 |
-
# print(onlyfiles)
|
142 |
-
curr_path = os.path.dirname(os.path.abspath(__file__))
|
143 |
-
keywords = [x.strip() for x in open(curr_path + language +'.txt', 'r', encoding='utf-8').readlines()]
|
144 |
-
def make_weights(reference_tokens, key_word_list):
|
145 |
-
return {token:1 if token in key_word_list else 0.2 \
|
146 |
-
for token in reference_tokens}
|
147 |
-
tokenized_refs_with_weights = [[[reference_tokens, make_weights(reference_tokens, keywords)]\
|
148 |
-
for reference_tokens in reference] for reference in tokenized_refs]
|
149 |
-
|
150 |
-
weighted_ngram_match_score = corpus_weighted_ngram_match(tokenized_refs_with_weights,tokenized_hyps)
|
151 |
-
|
152 |
-
# calculate syntax match
|
153 |
-
syntax_match_score = corpus_syntax_match(references, hypothesis, language)
|
154 |
-
|
155 |
-
# calculate dataflow match
|
156 |
-
dataflow_match_score = corpus_dataflow_match(references, hypothesis, language)
|
157 |
-
|
158 |
-
|
159 |
-
|
160 |
-
code_bleu_score = alpha*ngram_match_score\
|
161 |
-
+ beta*weighted_ngram_match_score\
|
162 |
-
+ gamma*syntax_match_score\
|
163 |
-
+ theta*dataflow_match_score
|
164 |
-
return {
|
165 |
-
"ngram_match_score": ngram_match_score,
|
166 |
-
"weighted_ngram_match_score": weighted_ngram_match_score,
|
167 |
-
"syntax_match_score": syntax_match_score,
|
168 |
-
"dataflow_match_score": dataflow_match_score,
|
169 |
-
"code_bleu_score": code_bleu_score
|
170 |
-
}
|
|
|
15 |
|
16 |
import evaluate
|
17 |
import datasets
|
18 |
+
from .calc_code_bleu import calculate
|
|
|
|
|
|
|
|
|
19 |
import os
|
20 |
|
21 |
|
|
|
88 |
# TODO: Download external resources if needed
|
89 |
if self.config_name == "python":
|
90 |
Language.build_library('./parser/my-languages.so',['tree-sitter-python'])
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
91 |
elif self.config_name == "cpp":
|
92 |
Language.build_library('./parser/my-languages.so',['tree-sitter-cpp'])
|
93 |
|
94 |
|
95 |
def _compute(self, predictions, references, language="python", alpha=0.25, beta=0.25, gamma=0.25, theta=0.25):
|
96 |
+
|
97 |
+
return calculate(predictions, references, language, alpha, beta, gamma, theta)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|