add metric
Browse files- README.md +0 -2
- __pycache__/tldr_eval.cpython-37.pyc +0 -0
- app.py +1 -1
- bash_eval.py +0 -95
- requirements.txt +2 -1
- run.py +31 -0
- test_results_test_same.json +0 -0
- tests.py +0 -17
- tldr_eval.py +183 -0
README.md
CHANGED
@@ -1,7 +1,5 @@
|
|
1 |
---
|
2 |
title: bash_eval
|
3 |
-
datasets:
|
4 |
-
-
|
5 |
tags:
|
6 |
- evaluate
|
7 |
- metric
|
1 |
---
|
2 |
title: bash_eval
|
|
|
|
|
3 |
tags:
|
4 |
- evaluate
|
5 |
- metric
|
__pycache__/tldr_eval.cpython-37.pyc
ADDED
Binary file (5.46 kB). View file
|
app.py
CHANGED
@@ -2,5 +2,5 @@ import evaluate
|
|
2 |
from evaluate.utils import launch_gradio_widget
|
3 |
|
4 |
|
5 |
-
module = evaluate.load("
|
6 |
launch_gradio_widget(module)
|
2 |
from evaluate.utils import launch_gradio_widget
|
3 |
|
4 |
|
5 |
+
module = evaluate.load("neulab/tldr_eval")
|
6 |
launch_gradio_widget(module)
|
bash_eval.py
DELETED
@@ -1,95 +0,0 @@
|
|
1 |
-
# Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
|
2 |
-
#
|
3 |
-
# Licensed under the Apache License, Version 2.0 (the "License");
|
4 |
-
# you may not use this file except in compliance with the License.
|
5 |
-
# You may obtain a copy of the License at
|
6 |
-
#
|
7 |
-
# http://www.apache.org/licenses/LICENSE-2.0
|
8 |
-
#
|
9 |
-
# Unless required by applicable law or agreed to in writing, software
|
10 |
-
# distributed under the License is distributed on an "AS IS" BASIS,
|
11 |
-
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
12 |
-
# See the License for the specific language governing permissions and
|
13 |
-
# limitations under the License.
|
14 |
-
"""TODO: Add a description here."""
|
15 |
-
|
16 |
-
import evaluate
|
17 |
-
import datasets
|
18 |
-
|
19 |
-
|
20 |
-
# TODO: Add BibTeX citation
|
21 |
-
_CITATION = """\
|
22 |
-
@InProceedings{huggingface:module,
|
23 |
-
title = {A great new module},
|
24 |
-
authors={huggingface, Inc.},
|
25 |
-
year={2020}
|
26 |
-
}
|
27 |
-
"""
|
28 |
-
|
29 |
-
# TODO: Add description of the module here
|
30 |
-
_DESCRIPTION = """\
|
31 |
-
This new module is designed to solve this great ML task and is crafted with a lot of care.
|
32 |
-
"""
|
33 |
-
|
34 |
-
|
35 |
-
# TODO: Add description of the arguments of the module here
|
36 |
-
_KWARGS_DESCRIPTION = """
|
37 |
-
Calculates how good are predictions given some references, using certain scores
|
38 |
-
Args:
|
39 |
-
predictions: list of predictions to score. Each predictions
|
40 |
-
should be a string with tokens separated by spaces.
|
41 |
-
references: list of reference for each prediction. Each
|
42 |
-
reference should be a string with tokens separated by spaces.
|
43 |
-
Returns:
|
44 |
-
accuracy: description of the first score,
|
45 |
-
another_score: description of the second score,
|
46 |
-
Examples:
|
47 |
-
Examples should be written in doctest format, and should illustrate how
|
48 |
-
to use the function.
|
49 |
-
|
50 |
-
>>> my_new_module = evaluate.load("my_new_module")
|
51 |
-
>>> results = my_new_module.compute(references=[0, 1], predictions=[0, 1])
|
52 |
-
>>> print(results)
|
53 |
-
{'accuracy': 1.0}
|
54 |
-
"""
|
55 |
-
|
56 |
-
# TODO: Define external resources urls if needed
|
57 |
-
BAD_WORDS_URL = "http://url/to/external/resource/bad_words.txt"
|
58 |
-
|
59 |
-
|
60 |
-
@evaluate.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION)
|
61 |
-
class bash_eval(evaluate.Metric):
|
62 |
-
"""TODO: Short description of my evaluation module."""
|
63 |
-
|
64 |
-
def _info(self):
|
65 |
-
# TODO: Specifies the evaluate.EvaluationModuleInfo object
|
66 |
-
return evaluate.MetricInfo(
|
67 |
-
# This is the description that will appear on the modules page.
|
68 |
-
module_type="metric",
|
69 |
-
description=_DESCRIPTION,
|
70 |
-
citation=_CITATION,
|
71 |
-
inputs_description=_KWARGS_DESCRIPTION,
|
72 |
-
# This defines the format of each prediction and reference
|
73 |
-
features=datasets.Features({
|
74 |
-
'predictions': datasets.Value('int64'),
|
75 |
-
'references': datasets.Value('int64'),
|
76 |
-
}),
|
77 |
-
# Homepage of the module for documentation
|
78 |
-
homepage="http://module.homepage",
|
79 |
-
# Additional links to the codebase or references
|
80 |
-
codebase_urls=["http://github.com/path/to/codebase/of/new_module"],
|
81 |
-
reference_urls=["http://path.to.reference.url/new_module"]
|
82 |
-
)
|
83 |
-
|
84 |
-
def _download_and_prepare(self, dl_manager):
|
85 |
-
"""Optional: download external resources useful to compute the scores"""
|
86 |
-
# TODO: Download external resources if needed
|
87 |
-
pass
|
88 |
-
|
89 |
-
def _compute(self, predictions, references):
|
90 |
-
"""Returns the scores"""
|
91 |
-
# TODO: Compute the different scores of the module
|
92 |
-
accuracy = sum(i == j for i, j in zip(predictions, references)) / len(predictions)
|
93 |
-
return {
|
94 |
-
"accuracy": accuracy,
|
95 |
-
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
requirements.txt
CHANGED
@@ -1 +1,2 @@
|
|
1 |
-
git+https://github.com/huggingface/evaluate@main
|
|
1 |
+
git+https://github.com/huggingface/evaluate@main
|
2 |
+
sacrebleu
|
run.py
ADDED
@@ -0,0 +1,31 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import json
|
2 |
+
from tldr_eval import TLDREval
|
3 |
+
from collections import Counter
|
4 |
+
|
5 |
+
with open('test_results_test_same.json', 'r') as f:
|
6 |
+
data = []
|
7 |
+
for line in f:
|
8 |
+
item = json.loads(line)
|
9 |
+
data.append(item)
|
10 |
+
|
11 |
+
split_data = [[] for _ in range(10)]
|
12 |
+
qid_counter = Counter()
|
13 |
+
for item in data:
|
14 |
+
if item['question_id'] in ['9931', '7895', '3740', '8077', '4737', '7057', '9530']:
|
15 |
+
continue
|
16 |
+
split_idx = qid_counter[item['question_id']]
|
17 |
+
split_data[split_idx].append(item)
|
18 |
+
qid_counter[item['question_id']] += 1
|
19 |
+
assert all([len(x) in [918, 1845, 0] for x in split_data])
|
20 |
+
print([len(x) for x in split_data])
|
21 |
+
|
22 |
+
refs = []
|
23 |
+
preds = []
|
24 |
+
for item in split_data[0]:
|
25 |
+
refs.append(item['gold'].replace('\n', ""))
|
26 |
+
preds.append(item['clean_code'].replace('\n', ""))
|
27 |
+
|
28 |
+
|
29 |
+
evaluator = TLDREval()
|
30 |
+
metrics = evaluator._compute(preds, refs)
|
31 |
+
print(metrics)
|
test_results_test_same.json
ADDED
The diff for this file is too large to render.
See raw diff
|
tests.py
DELETED
@@ -1,17 +0,0 @@
|
|
1 |
-
test_cases = [
|
2 |
-
{
|
3 |
-
"predictions": [0, 0],
|
4 |
-
"references": [1, 1],
|
5 |
-
"result": {"metric_score": 0}
|
6 |
-
},
|
7 |
-
{
|
8 |
-
"predictions": [1, 1],
|
9 |
-
"references": [1, 1],
|
10 |
-
"result": {"metric_score": 1}
|
11 |
-
},
|
12 |
-
{
|
13 |
-
"predictions": [1, 0],
|
14 |
-
"references": [1, 1],
|
15 |
-
"result": {"metric_score": 0.5}
|
16 |
-
}
|
17 |
-
]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
tldr_eval.py
ADDED
@@ -0,0 +1,183 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
|
2 |
+
#
|
3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
4 |
+
# you may not use this file except in compliance with the License.
|
5 |
+
# You may obtain a copy of the License at
|
6 |
+
#
|
7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
8 |
+
#
|
9 |
+
# Unless required by applicable law or agreed to in writing, software
|
10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
12 |
+
# See the License for the specific language governing permissions and
|
13 |
+
# limitations under the License.
|
14 |
+
"""TODO: Add a description here."""
|
15 |
+
|
16 |
+
import evaluate
|
17 |
+
import datasets
|
18 |
+
from collections import defaultdict, Counter
|
19 |
+
import re
|
20 |
+
import numpy as np
|
21 |
+
from sacrebleu.metrics import BLEU
|
22 |
+
|
23 |
+
# TODO: Add BibTeX citation
|
24 |
+
_CITATION = """\
|
25 |
+
@article{zhou2022doccoder,
|
26 |
+
title={DocCoder: Generating Code by Retrieving and Reading Docs},
|
27 |
+
author={Zhou, Shuyan and Alon, Uri and Xu, Frank F and Jiang, Zhengbao and Neubig, Graham},
|
28 |
+
journal={arXiv preprint arXiv:2207.05987},
|
29 |
+
year={2022}
|
30 |
+
}
|
31 |
+
"""
|
32 |
+
|
33 |
+
_DESCRIPTION = """\
|
34 |
+
This metric is used to evaluate the quality of a generated bash script.
|
35 |
+
"""
|
36 |
+
|
37 |
+
|
38 |
+
_KWARGS_DESCRIPTION = """
|
39 |
+
predictions: list of str. The predictions
|
40 |
+
references: list of str. The references
|
41 |
+
|
42 |
+
Return
|
43 |
+
|
44 |
+
"""
|
45 |
+
|
46 |
+
VAR_STR = "[[VAR]]"
|
47 |
+
|
48 |
+
|
49 |
+
def clean_command(s):
|
50 |
+
s = s.replace("sudo", "").strip()
|
51 |
+
s = s.replace("`", "").replace('"', "").replace("'", "")
|
52 |
+
# '>', '|', '+'
|
53 |
+
s = s.replace("|", " ").replace(">", " ").replace("<", " ")
|
54 |
+
s = " ".join(s.split())
|
55 |
+
return s
|
56 |
+
|
57 |
+
def anonymize_command(s):
|
58 |
+
s = s.replace("={", " {")
|
59 |
+
var_to_pc_holder = defaultdict(lambda: len(var_to_pc_holder))
|
60 |
+
for var in re.findall("{{(.*?)}}", s):
|
61 |
+
_ = var_to_pc_holder[var]
|
62 |
+
for var, id in var_to_pc_holder.items():
|
63 |
+
var_str = "{{%s}}" % var
|
64 |
+
s = s.replace(var_str, f"{VAR_STR}_{id}")
|
65 |
+
# s = re.sub("{{.*?}}", VAR_STR, s)
|
66 |
+
return s
|
67 |
+
|
68 |
+
def clean_anonymize_command(s):
|
69 |
+
return anonymize_command(clean_command(s))
|
70 |
+
|
71 |
+
|
72 |
+
def get_bag_of_words(cmd):
|
73 |
+
cmd = clean_anonymize_command(cmd)
|
74 |
+
tokens = cmd.strip().split()
|
75 |
+
return tokens
|
76 |
+
|
77 |
+
def calc_template_matching(gold, pred):
|
78 |
+
ag = clean_anonymize_command(gold)
|
79 |
+
ap = clean_anonymize_command(pred)
|
80 |
+
m = {'template_matching': int(ag == ap)}
|
81 |
+
return m
|
82 |
+
|
83 |
+
def token_prf(tok_gold, tok_pred, match_blank=False):
|
84 |
+
if match_blank and len(tok_gold) == 0: # do not generate anything
|
85 |
+
if len(tok_pred) == 0:
|
86 |
+
m = {'r': 1, 'p': 1, 'f1': 1}
|
87 |
+
else:
|
88 |
+
m = {'r': 0, 'p': 0, 'f1': 0}
|
89 |
+
else:
|
90 |
+
tok_gold_dict = Counter(tok_gold)
|
91 |
+
tok_pred_dict = Counter(tok_pred)
|
92 |
+
tokens = set([*tok_gold_dict] + [*tok_pred_dict])
|
93 |
+
hit = 0
|
94 |
+
for token in tokens:
|
95 |
+
hit += min(tok_gold_dict.get(token, 0), tok_pred_dict.get(token, 0))
|
96 |
+
p = hit / (sum(tok_pred_dict.values()) + 1e-10)
|
97 |
+
r = hit / (sum(tok_gold_dict.values()) + 1e-10)
|
98 |
+
f1 = 2 * p * r / (p + r + 1e-10)
|
99 |
+
m = {'r': r, 'p': p, 'f1': f1}
|
100 |
+
return m
|
101 |
+
|
102 |
+
def measure_bag_of_word(gold, pred):
|
103 |
+
tok_gold = get_bag_of_words(gold)
|
104 |
+
tok_pred = get_bag_of_words(pred)
|
105 |
+
m = token_prf(tok_gold, tok_pred) # whole sentence
|
106 |
+
gold_cmd = tok_gold[0] if len(tok_gold) else "NONE_GOLD"
|
107 |
+
pred_cmd = tok_pred[0] if len(tok_pred) else "NONE_PRED"
|
108 |
+
m = {**m, 'command_accuracy': int(gold_cmd == pred_cmd)}
|
109 |
+
|
110 |
+
return m
|
111 |
+
|
112 |
+
def tldr_metrics(references, predictions):
|
113 |
+
assert len(references) == len(predictions)
|
114 |
+
metric_list = defaultdict(list)
|
115 |
+
for ref, pred in zip(references, predictions):
|
116 |
+
for k, v in calc_template_matching(ref, pred).items():
|
117 |
+
metric_list[k].append(v)
|
118 |
+
for k, v in measure_bag_of_word(ref, pred).items():
|
119 |
+
metric_list[k].append(v)
|
120 |
+
|
121 |
+
for k, v in metric_list.items():
|
122 |
+
metric_list[k] = np.mean(v)
|
123 |
+
|
124 |
+
def clean_for_bleu(s):
|
125 |
+
s = s.replace("sudo", "").strip()
|
126 |
+
s = s.replace("`", "").replace('"', "").replace("'", "")
|
127 |
+
# '>', '|', '+'
|
128 |
+
s = s.replace("|", " ").replace(">", " ").replace("<", " ")
|
129 |
+
s = " ".join(s.split())
|
130 |
+
s = s.replace("={", " {")
|
131 |
+
var_to_pc_holder = defaultdict(lambda: len(var_to_pc_holder))
|
132 |
+
for var in re.findall("{{(.*?)}}", s):
|
133 |
+
_ = var_to_pc_holder[var]
|
134 |
+
for var, id in var_to_pc_holder.items():
|
135 |
+
var_str = "{{%s}}" % var
|
136 |
+
s = s.replace(var_str, f"${id}")
|
137 |
+
# s = re.sub("{{.*?}}", VAR_STR, s)
|
138 |
+
# print(s)
|
139 |
+
return s
|
140 |
+
|
141 |
+
def to_characters(s):
|
142 |
+
# s = s.replace(" ", "")
|
143 |
+
# s = ' '.join(list(s))
|
144 |
+
return s
|
145 |
+
# character level
|
146 |
+
bleu = BLEU(tokenize='char')
|
147 |
+
predictions = [to_characters(clean_for_bleu(x)) for x in predictions]
|
148 |
+
references = [to_characters(clean_for_bleu(x)) for x in references]
|
149 |
+
bleu_score = bleu.corpus_score(predictions, [references]).score
|
150 |
+
metric_list['bleu_char'] = bleu_score
|
151 |
+
return metric_list
|
152 |
+
|
153 |
+
@evaluate.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION)
|
154 |
+
class TLDREval(evaluate.Metric):
|
155 |
+
"""Evaluate Bash scripts."""
|
156 |
+
|
157 |
+
def _info(self):
|
158 |
+
return evaluate.MetricInfo(
|
159 |
+
# This is the description that will appear on the modules page.
|
160 |
+
module_type="metric",
|
161 |
+
description=_DESCRIPTION,
|
162 |
+
citation=_CITATION,
|
163 |
+
inputs_description=_KWARGS_DESCRIPTION,
|
164 |
+
# This defines the format of each prediction and reference
|
165 |
+
features=datasets.Features({
|
166 |
+
"predictions": datasets.Value("string", id="sequence"),
|
167 |
+
"references": datasets.Value("string", id="sequence"),
|
168 |
+
}),
|
169 |
+
# Homepage of the module for documentation
|
170 |
+
homepage="https://github.com/shuyanzhou/docprompting",
|
171 |
+
# Additional links to the codebase or references
|
172 |
+
codebase_urls=["https://github.com/shuyanzhou/docprompting"],
|
173 |
+
reference_urls=["https://github.com/shuyanzhou/docprompting"]
|
174 |
+
)
|
175 |
+
|
176 |
+
def _compute(self, predictions, references):
|
177 |
+
"""Returns the scores"""
|
178 |
+
metrics = tldr_metrics(references, predictions)
|
179 |
+
# rename for better display
|
180 |
+
metrics['token_recall'] = metrics.pop('r')
|
181 |
+
metrics['token_precision'] = metrics.pop('p')
|
182 |
+
metrics['token_f1'] = metrics.pop('f1')
|
183 |
+
return dict(metrics)
|