Datasets:
Tasks:
Text Generation
Modalities:
Text
Sub-tasks:
document-retrieval
Size:
100K - 1M
ArXiv:
Tags:
code
License:
# Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor. | |
# | |
# Licensed under the Apache License, Version 2.0 (the "License"); | |
# you may not use this file except in compliance with the License. | |
# You may obtain a copy of the License at | |
# | |
# http://www.apache.org/licenses/LICENSE-2.0 | |
# | |
# Unless required by applicable law or agreed to in writing, software | |
# distributed under the License is distributed on an "AS IS" BASIS, | |
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |
# See the License for the specific language governing permissions and | |
# limitations under the License. | |
"""RepoBench: Benchmarking Repository-Level Code Auto-Completion Systems""" | |
import gzip | |
import pickle | |
import textwrap | |
import datasets | |
_CITATION = """\ | |
@misc{liu2023repobench, | |
title={RepoBench: Benchmarking Repository-Level Code Auto-Completion Systems}, | |
author={Tianyang Liu and Canwen Xu and Julian McAuley}, | |
year={2023}, | |
eprint={2306.03091}, | |
archivePrefix={arXiv}, | |
primaryClass={cs.CL} | |
} | |
""" | |
_DESCRIPTION = """\ | |
RepoBench is a dataset that benchmarks repository-level code auto-completion systems. | |
RepoBench-C denotes RepoBench for code completion, | |
which is subtask of RepoBench for next-line code prediction given both cross-file and in-file context. | |
""" | |
_HOMEPAGE = "https://github.com/Leolty/repobench" | |
_LICENSE = "Apache License 2.0" | |
_URLs = { | |
"python_cff": "https://raw.githubusercontent.com/Leolty/repobench/main/data/code_completion/python/cross_file_first.gz", | |
"python_cfr": "https://raw.githubusercontent.com/Leolty/repobench/main/data/code_completion/python/cross_file_random.gz", | |
"python_if": "https://raw.githubusercontent.com/Leolty/repobench/main/data/code_completion/python/in_file.gz", | |
"java_cff": "https://raw.githubusercontent.com/Leolty/repobench/main/data/code_completion/java/cross_file_first.gz", | |
"java_cfr": "https://raw.githubusercontent.com/Leolty/repobench/main/data/code_completion/java/cross_file_random.gz", | |
"java_if": "https://raw.githubusercontent.com/Leolty/repobench/main/data/code_completion/java/in_file.gz" | |
} | |
def construct_prompt(data_point:dict, language:str): | |
if language == "python": | |
path = f"# Path: {data_point['file_path']}" | |
elif language == "java": | |
path = f"// Path: {data_point['file_path']}" | |
prompt = f"""{data_point['context']} | |
{path} | |
{data_point['import_statement']} | |
{data_point['code']}""" | |
return prompt | |
class RepoBenchR(datasets.GeneratorBasedBuilder): | |
"""RepoBench""" | |
VERSION = datasets.Version("1.0.0") | |
BUILDER_CONFIGS = [ | |
datasets.BuilderConfig( | |
name="python_cff", | |
description=textwrap.dedent( | |
""" | |
cff: cross_file_first -> mask the the line that a cross-file module is first used | |
""" | |
) | |
), | |
datasets.BuilderConfig( | |
name="python_cfr", | |
description=textwrap.dedent( | |
""" | |
cfr: cross_file_random -> mask a random line that a cross-file module is used (not the first time) | |
""" | |
) | |
), | |
datasets.BuilderConfig( | |
name="python_if", | |
description=textwrap.dedent( | |
""" | |
if: in_file -> mask a random line with no cross-file module | |
""" | |
) | |
), | |
datasets.BuilderConfig( | |
name="java_cff", | |
description=textwrap.dedent( | |
""" | |
cff: cross_file_first -> mask the the line that a cross-file module is first used | |
""" | |
) | |
), | |
datasets.BuilderConfig( | |
name="java_cfr", | |
description=textwrap.dedent( | |
""" | |
cfr: cross_file_random -> mask a random line that a cross-file module is used (not the first time) | |
""" | |
) | |
), | |
datasets.BuilderConfig( | |
name="java_if", | |
description=textwrap.dedent( | |
""" | |
if: in_file -> mask a random line with no cross-file module | |
""" | |
) | |
) | |
] | |
def _info(self): | |
features = datasets.Features( | |
{ | |
"repo_name": datasets.Value("string"), | |
"file_path": datasets.Value("string"), | |
"context": datasets.Sequence(datasets.Value("string")), | |
"import_statement": datasets.Value("string"), | |
"code": datasets.Value("string"), | |
"prompt": datasets.Value("string"), | |
"next_line": datasets.Value("string") | |
} | |
) | |
return datasets.DatasetInfo( | |
description=_DESCRIPTION, | |
features=features, | |
homepage=_HOMEPAGE, | |
license=_LICENSE, | |
citation=_CITATION, | |
) | |
def _split_generators(self, dl_manager): | |
"""Returns SplitGenerators.""" | |
config_urls = _URLs[self.config.name] | |
data_dir = dl_manager.download(config_urls) | |
return [ | |
datasets.SplitGenerator( | |
name=datasets.Split("train"), | |
gen_kwargs={"data_dir": data_dir, "split": "train"}, | |
), | |
datasets.SplitGenerator( | |
name=datasets.Split("dev"), | |
gen_kwargs={"data_dir": data_dir, "split": "dev"}, | |
), | |
datasets.SplitGenerator( | |
name=datasets.Split("test"), | |
gen_kwargs={"data_dir": data_dir, "split": "test"}, | |
) | |
] | |
def _generate_examples(self, data_dir, split): | |
""" Yields examples. """ | |
with gzip.open(data_dir, "rb") as f: | |
data = pickle.load(f) | |
for i, example in enumerate(data[split]): | |
prompt = construct_prompt(example, self.config.name.split("_")[0]) | |
yield i, { | |
"repo_name": example["repo_name"], | |
"file_path": example["file_path"], | |
"context": example["context"], | |
"import_statement": example["import_statement"], | |
"code": example["code"], | |
"prompt": prompt, | |
"next_line": example["next_line"] | |
} | |