exebench / exebench.py
Jordi Armengol-Estape
added O3 stuff
093085f
# coding=utf-8
# Copyright 2022 ExeBench authors
# The code required to produce and load this dataset is licensed under MIT License.
# The code samples included in this dataset keep their own licenses, which can be retrieved via their metadata.
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Please note that the dataset release is still work in progress.
"""The ExeBench dataset."""
import json
import datasets
from pathlib import Path
_CITATION = """\
@inproceedings{10.1145/3520312.3534867,
author = {Armengol-Estap\'{e}, Jordi and Woodruff, Jackson and Brauckmann, Alexander and Magalh\~{a}es, Jos\'{e} Wesley de Souza and O'Boyle, Michael F. P.},
title = {ExeBench: An ML-Scale Dataset of Executable C Functions},
year = {2022},
isbn = {9781450392730},
publisher = {Association for Computing Machinery},
address = {New York, NY, USA},
url = {https://doi.org/10.1145/3520312.3534867},
doi = {10.1145/3520312.3534867},
abstract = {Machine-learning promises to transform compilation and software engineering, yet is frequently limited by the scope of available datasets. In particular, there is a lack of runnable, real-world datasets required for a range of tasks ranging from neural program synthesis to machine learning-guided program optimization. We introduce a new dataset, ExeBench, which attempts to address this. It tackles two key issues with real-world code: references to external types and functions and scalable generation of IO examples. ExeBench is the first publicly available dataset that pairs real-world C code taken from GitHub with IO examples that allow these programs to be run. We develop a toolchain that scrapes GitHub, analyzes the code, and generates runnable snippets of code. We analyze our benchmark suite using several metrics, and show it is representative of real-world code. ExeBench contains 4.5M compilable and 700k executable C functions. This scale of executable, real functions will enable the next generation of machine learning-based programming tasks.},
booktitle = {Proceedings of the 6th ACM SIGPLAN International Symposium on Machine Programming},
pages = {50–59},
numpages = {10},
keywords = {Code Dataset, Program Synthesis, Mining Software Repositories, C, Machine Learning for Code, Compilers},
location = {San Diego, CA, USA},
series = {MAPS 2022}
}
"""
_DESCRIPTION = """\
An ML-scale dataset of executable C functions
""" # TODO: expand
_HOMEPAGE = "https://github.com/jordiae/exebench"
_LICENSE = "Multiple: see each function license (fields 'ref' and 'path')"
_URL = "" # "https://huggingface.co/datasets/jordiae/exebench-test/resolve/main/"
_REMOVED_FEATURES = ["doc", "angha_error", "real_error", "angha_io_error", "real_io_error",
"angha_io_pairs_are_trivial", "real_io_pairs_are_trivial"]
_RENAMED_FEATURES = {"angha_deps": "synth_deps", "angha_io_pairs": "synth_io_pairs",
"angha_exe_wrapper": "synth_exe_wrapper", "angha_iospec": "synth_iospec"}
_FEATURES = datasets.Features(
{
"path": datasets.Value("string"),
"func_def": datasets.Value("string"),
"func_head": datasets.Value("string"),
"func_head_types": datasets.Value("string"),
"fname": datasets.Value("string"),
"signature": datasets.Sequence(datasets.Value("string")),
# "doc": datasets.Value("string"),
# "angha_error": datasets.Value("string"),
# "real_error": datasets.Value("string"),
"asm": datasets.Sequence({'target': datasets.Value("string"), 'code': datasets.Value("string")}), # unflat dict#Optional[Dict[str, Optional[FuncAsm]]] = None
"synth_deps": datasets.Value("string"),
"real_deps": datasets.Value("string"),
"synth_io_pairs": datasets.Sequence({
"input": datasets.Sequence({'var': datasets.Value("string"), 'value': datasets.Value("string")}),
"output": datasets.Sequence({'var': datasets.Value("string"), 'value': datasets.Value("string")}),
"dummy_funcs": datasets.Value("string"),
"dummy_funcs_seed": datasets.Value("int64")
}),
"real_io_pairs": datasets.Sequence({
"input": datasets.Sequence({'var': datasets.Value("string"), 'value': datasets.Value("string")}),
"output": datasets.Sequence({'var': datasets.Value("string"), 'value': datasets.Value("string")}),
"dummy_funcs": datasets.Value("string"),
"dummy_funcs_seed": datasets.Value("int64")
}),
# "angha_io_error": datasets.Value("string"),
# "real_io_error": datasets.Value("string"),
"synth_exe_wrapper": datasets.Value("string"),
"real_exe_wrapper": datasets.Value("string"),
# "angha_io_pairs_are_trivial": datasets.Value("bool"),
# "real_io_pairs_are_trivial": datasets.Value("bool"),
"ref": datasets.Value("string"),
"synth_iospec": datasets.Value("string"), # serialized, TODO: improve
"real_iospec": datasets.Value("string")
}
)
class ExeBenchConfig(datasets.BuilderConfig):
"""BuilderConfig for ExeBench."""
def __init__(self, *args, **kwargs):
"""BuilderConfig for The Pile.
Args:
**kwargs: keyword arguments forwarded to super.
"""
super().__init__(
*args,
**kwargs,
)
class ExeBench(datasets.GeneratorBasedBuilder):
"""ExeBench dataset"""
BUILDER_CONFIGS = [
ExeBenchConfig(
name="ExeBench",
version=datasets.Version("1.0.4"),
description="Executable C dataset"
),
]
def _info(self):
"""Give information and typings for the dataset."""
return datasets.DatasetInfo(
# This is the description that will appear on the datasets page.
description=_DESCRIPTION,
# This defines the different columns of the dataset and their types
features=_FEATURES,
# If there's a common (input, target) tuple from the features,
# specify them here. They'll be used if as_supervised=True in
# builder.as_dataset.
supervised_keys=None,
# Homepage of the dataset for documentation
homepage=_HOMEPAGE,
# License for the dataset if available
license=_LICENSE,
# Citation for the dataset
citation=_CITATION,
)
def _split_generators(self, dl_manager):
"""Returns SplitGenerators."""
urls_to_download = {
"train_not_compilable": f"{_URL}train_no_compilable.tar.gz",
"train_synth_compilable": f"{_URL}train_synth_compilable.tar.gz",
"train_real_compilable": f"{_URL}train_real_compilable.tar.gz",
"train_synth_simple_io": f"{_URL}train_synth_simple_io.tar.gz",
"train_real_simple_io": f"{_URL}train_real_simple_io.tar.gz",
"train_synth_rich_io": f"{_URL}train_synth_rich_io.tar.gz",
"valid_synth": f"{_URL}valid_synth.tar.gz",
"valid_real": f"{_URL}valid_real.tar.gz",
"test_synth": f"{_URL}test_synth.tar.gz",
"test_real": f"{_URL}test_real.tar.gz",
}
downloaded_files = dl_manager.download_and_extract(urls_to_download)
return [
datasets.SplitGenerator(name='train_not_compilable',
gen_kwargs={"files": downloaded_files["train_not_compilable"]}),
datasets.SplitGenerator(name='train_synth_compilable',
gen_kwargs={"files": downloaded_files["train_synth_compilable"]}),
datasets.SplitGenerator(name='train_real_compilable',
gen_kwargs={"files": downloaded_files["train_real_compilable"]}),
datasets.SplitGenerator(name='train_synth_simple_io',
gen_kwargs={"files": downloaded_files["train_synth_simple_io"]}),
datasets.SplitGenerator(name='train_real_simple_io',
gen_kwargs={"files": downloaded_files["train_real_simple_io"]}),
datasets.SplitGenerator(name='train_synth_rich_io',
gen_kwargs={"files": downloaded_files["train_synth_rich_io"]}),
datasets.SplitGenerator(name='valid_synth',
gen_kwargs={"files": downloaded_files["valid_synth"]}),
datasets.SplitGenerator(name='valid_real',
gen_kwargs={"files": downloaded_files["valid_real"]}),
datasets.SplitGenerator(name='test_synth',
gen_kwargs={"files": downloaded_files["test_synth"]}),
datasets.SplitGenerator(name='test_real',
gen_kwargs={"files": downloaded_files["test_real"]}),
]
def _generate_examples(self, files):
"""Yield examples as (key, example) tuples."""
key = 0
import zstandard as zstd
for path in Path(files).rglob('*.jsonl.zst'):
with zstd.open(open(path, "rb"), "rt", encoding="utf-8") as f:
for row in f:
data = json.loads(row)
data = data['text']
data = self._fixes(data)
for io_pairs_kind in ('synth_io_pairs', 'real_io_pairs'):
if data[io_pairs_kind]:
new_io_pairs = []
for e in data[io_pairs_kind]:
new_e = {}
new_e['input'] = [{'var': var, 'value': json.dumps(value)} for (var, value) in e['input'].items()] if e['input'] else []
new_e['output'] = [{'var': var, 'value': json.dumps(value)} for (var, value) in e['output'].items()] if e['output'] else []
new_e['dummy_funcs'] = e['dummy_funcs']
new_e['dummy_funcs_seed'] = e['dummy_funcs_seed']
new_io_pairs.append(new_e)
data[io_pairs_kind] = new_io_pairs
data['synth_iospec'] = json.dumps(data['synth_iospec'])
data['real_iospec'] = json.dumps(data['real_iospec'])
yield key, data
key += 1
def _fixes(self, row):
if 'angha_iospec' not in row:
row['angha_iospec'] = None
if 'real_iospec' not in row:
row['real_iospec'] = None
if 'func_head_types' not in row:
row['func_head_types'] = ''
row['asm'] = [{'target': target, 'code': code['func_asm'] if code else None} for (target, code) in
row['asm'].items()] # TODO: pre_asm etc
for removed_key in _REMOVED_FEATURES:
if removed_key in row:
del row[removed_key]
for original_key, new_key in _RENAMED_FEATURES.items():
row[new_key] = row[original_key]
del row[original_key]
return row