Datasets:
Tasks:
Text Retrieval
Modalities:
Text
Sub-tasks:
document-retrieval
Languages:
code
Size:
100K - 1M
ArXiv:
License:
| # Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor. | |
| # | |
| # Licensed under the Apache License, Version 2.0 (the "License"); | |
| # you may not use this file except in compliance with the License. | |
| # You may obtain a copy of the License at | |
| # | |
| # http://www.apache.org/licenses/LICENSE-2.0 | |
| # | |
| # Unless required by applicable law or agreed to in writing, software | |
| # distributed under the License is distributed on an "AS IS" BASIS, | |
| # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |
| # See the License for the specific language governing permissions and | |
| # limitations under the License. | |
| """RepoBench: Benchmarking Repository-Level Code Auto-Completion Systems""" | |
| import pickle | |
| import gzip | |
| import textwrap | |
| import datasets | |
| _CITATION = """\ | |
| @misc{liu2023repobench, | |
| title={RepoBench: Benchmarking Repository-Level Code Auto-Completion Systems}, | |
| author={Tianyang Liu and Canwen Xu and Julian McAuley}, | |
| year={2023}, | |
| eprint={2306.03091}, | |
| archivePrefix={arXiv}, | |
| primaryClass={cs.CL} | |
| } | |
| """ | |
| _DESCRIPTION = """\ | |
| RepoBench is a dataset that benchmarks repository-level code auto-completion systems. | |
| RepoBench-R denotes RepoBench for Retrieval, which is a sub-task of RepoBench, | |
| aiming to evaluate the ability of code auto-completion systems to retrieve | |
| relevant code snippets for next-line code completion. | |
| """ | |
| _HOMEPAGE = "https://github.com/Leolty/repobench" | |
| _LICENSE = "Apache License 2.0" | |
| _URLs = { | |
| "python-cff": "https://raw.githubusercontent.com/Leolty/repobench/main/data/retrieval/python/cross_file_first.gz", | |
| "python-cfr": "https://raw.githubusercontent.com/Leolty/repobench/main/data/retrieval/python/cross_file_random.gz", | |
| "java-cff": "https://raw.githubusercontent.com/Leolty/repobench/main/data/retrieval/java/cross_file_first.gz", | |
| "java-cfr": "https://raw.githubusercontent.com/Leolty/repobench/main/data/retrieval/java/cross_file_random.gz" | |
| } | |
| class RepoBench(datasets.GeneratorBasedBuilder): | |
| """RepoBench""" | |
| VERSION = datasets.Version("1.0.0") | |
| BUILDER_CONFIGS = [ | |
| datasets.BuilderConfig( | |
| name="python-cff", | |
| description=textwrap.dedent( | |
| """ | |
| cff: cross_file_first -> mask the the line that a cross-file module is first used | |
| """ | |
| ) | |
| ), | |
| datasets.BuilderConfig( | |
| name="python-cfr", | |
| description=textwrap.dedent( | |
| """ | |
| cfr: cross_file_random -> mask a random line that a cross-file module is used (not the first time) | |
| """ | |
| ) | |
| ), | |
| datasets.BuilderConfig( | |
| name="java-cff", | |
| description=textwrap.dedent( | |
| """ | |
| cff: cross_file_first -> mask the the line that a cross-file module is first used | |
| """ | |
| ) | |
| ), | |
| datasets.BuilderConfig( | |
| name="java-cfr", | |
| description=textwrap.dedent( | |
| """ | |
| cfr: cross_file_random -> mask a random line that a cross-file module is used (not the first time) | |
| """ | |
| ) | |
| ) | |
| ] | |
| def _info(self): | |
| features = datasets.Features( | |
| { | |
| "file_path": datasets.Value("string"), | |
| "context": datasets.Sequence(datasets.Value("string")), | |
| "import_statement": datasets.Value("string"), | |
| "code": datasets.Value("string"), | |
| "next_line": datasets.Value("string"), | |
| "gold_snippet_index": datasets.Value("int32") | |
| } | |
| ) | |
| return datasets.DatasetInfo( | |
| description=_DESCRIPTION, | |
| features=features, | |
| homepage=_HOMEPAGE, | |
| license=_LICENSE, | |
| citation=_CITATION, | |
| ) | |
| def _split_generators(self, dl_manager): | |
| """Returns SplitGenerators.""" | |
| config_urls = _URLs[self.config.name] | |
| data_dir = dl_manager.download(config_urls) | |
| return [ | |
| datasets.SplitGenerator( | |
| name=datasets.Split("easy"), | |
| gen_kwargs={"data_dir": data_dir, "split": "easy"}, | |
| ), | |
| datasets.SplitGenerator( | |
| name=datasets.Split("hard"), | |
| gen_kwargs={"data_dir": data_dir, "split": "hard"}, | |
| ) | |
| ] | |
| def _generate_examples(self, data_dir, split): | |
| """ Yields examples. """ | |
| try: | |
| with gzip.open(data_dir, "rb") as f: | |
| data = pickle.load(f) | |
| except: | |
| raise Exception(f"Cannot load data from {data_dir}!") | |
| for i, example in enumerate(data[split]): | |
| yield i, { | |
| "file_path": example["file_path"], | |
| "context": example["context"], | |
| "import_statement": example["import_statement"], | |
| "code": example["code"], | |
| "next_line": example["next_line"], | |
| "gold_snippet_index": example["gold_snippet_index"] | |
| } | |