|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
"""RepoBench: Benchmarking Repository-Level Code Auto-Completion Systems""" |
|
|
|
import gzip |
|
import pickle |
|
import textwrap |
|
import datasets |
|
|
|
_CITATION = """\ |
|
@misc{liu2023repobench, |
|
title={RepoBench: Benchmarking Repository-Level Code Auto-Completion Systems}, |
|
author={Tianyang Liu and Canwen Xu and Julian McAuley}, |
|
year={2023}, |
|
eprint={2306.03091}, |
|
archivePrefix={arXiv}, |
|
primaryClass={cs.CL} |
|
} |
|
""" |
|
|
|
_DESCRIPTION = """\ |
|
RepoBench is a dataset that benchmarks repository-level code auto-completion systems. |
|
|
|
RepoBench-P denotes RepoBench for pipeline, |
|
which is subtask of RepoBench including both relevant code retrieval and next-line code prediction. |
|
""" |
|
|
|
_HOMEPAGE = "https://github.com/Leolty/repobench" |
|
|
|
_LICENSE = "Apache License 2.0" |
|
|
|
_URLs = { |
|
"python": "https://raw.githubusercontent.com/Leolty/repobench/main/data/pipeline/python/", |
|
"java": "https://raw.githubusercontent.com/Leolty/repobench/main/data/pipeline/java/" |
|
} |
|
|
|
class RepoBenchP(datasets.GeneratorBasedBuilder): |
|
"""RepoBench""" |
|
|
|
VERSION = datasets.Version("1.0.0") |
|
|
|
BUILDER_CONFIGS = [ |
|
datasets.BuilderConfig( |
|
name="python", |
|
description=textwrap.dedent( |
|
""" |
|
This part of RepoBench-P is for python language. |
|
""" |
|
) |
|
), |
|
datasets.BuilderConfig( |
|
name="java", |
|
description=textwrap.dedent( |
|
""" |
|
This part of RepoBench-P is for java language. |
|
""" |
|
) |
|
) |
|
] |
|
|
|
def _info(self): |
|
features = datasets.Features( |
|
{ |
|
"repo_name": datasets.Value("string"), |
|
"file_path": datasets.Value("string"), |
|
"context": datasets.Sequence(feature={ |
|
"path": datasets.Value("string"), |
|
"identifier": datasets.Value("string"), |
|
"snippet": datasets.Value("string") |
|
}), |
|
"import_statement": datasets.Value("string"), |
|
"code": datasets.Value("string"), |
|
"next_line": datasets.Value("string"), |
|
"gold_snippet_index": datasets.Value("int32"), |
|
} |
|
) |
|
|
|
return datasets.DatasetInfo( |
|
description=_DESCRIPTION, |
|
features=features, |
|
homepage=_HOMEPAGE, |
|
license=_LICENSE, |
|
citation=_CITATION, |
|
) |
|
|
|
def _split_generators(self, dl_manager): |
|
"""Returns SplitGenerators.""" |
|
config_urls = _URLs[self.config.name] |
|
data_dir = dl_manager.download({ |
|
"cff": config_urls + "cross_file_first.gz", |
|
"cfr": config_urls + "cross_file_random.gz", |
|
"if": config_urls + "in_file.gz" |
|
} |
|
) |
|
|
|
return [ |
|
datasets.SplitGenerator( |
|
name=datasets.Split("cff"), |
|
gen_kwargs={"data_dir": data_dir, "split": "cff"} |
|
), |
|
datasets.SplitGenerator( |
|
name=datasets.Split("cfr"), |
|
gen_kwargs={"data_dir": data_dir, "split": "cfr"} |
|
), |
|
datasets.SplitGenerator( |
|
name=datasets.Split("if"), |
|
gen_kwargs={"data_dir": data_dir, "split": "if"} |
|
) |
|
] |
|
|
|
def _generate_examples(self, data_dir, split): |
|
""" Yields examples. """ |
|
with gzip.open(data_dir[split], "rb") as f: |
|
data = pickle.load(f) |
|
|
|
|
|
for i, example in enumerate(data): |
|
|
|
yield i, example |
|
|
|
|