Datasets:

Modalities:
Text
Languages:
code
ArXiv:
Tags:
License:
tianyang commited on
Commit
f2f8fe4
1 Parent(s): 6c9c30d

upload repobench

Browse files
Files changed (1) hide show
  1. repobench.py +147 -0
repobench.py ADDED
@@ -0,0 +1,147 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ """RepoBench: Benchmarking Repository-Level Code Auto-Completion Systems"""
16
+
17
+ import pickle
18
+ import gzip
19
+ import textwrap
20
+ import datasets
21
+
22
+ _CITATION = """\
23
+ @misc{liu2023repobench,
24
+ title={RepoBench: Benchmarking Repository-Level Code Auto-Completion Systems},
25
+ author={Tianyang Liu and Canwen Xu and Julian McAuley},
26
+ year={2023},
27
+ eprint={2306.03091},
28
+ archivePrefix={arXiv},
29
+ primaryClass={cs.CL}
30
+ }
31
+ """
32
+
33
+ _DESCRIPTION = """\
34
+ RepoBench is a dataset that benchmarks repository-level code auto-completion systems.
35
+
36
+ RepoBench-R denotes RepoBench for Retrieval, which is a sub-task of RepoBench,
37
+ aiming to evaluate the ability of code auto-completion systems to retrieve
38
+ relevant code snippets for next-line code completion.
39
+ """
40
+
41
+ _HOMEPAGE = "https://github.com/Leolty/repobench"
42
+
43
+ _LICENSE = "Apache License 2.0"
44
+
45
+ _URLs = {
46
+ "python-cff": "https://raw.githubusercontent.com/Leolty/repobench/main/data/retrieval/python/cross_file_first.gz",
47
+ "python-cfr": "https://raw.githubusercontent.com/Leolty/repobench/main/data/retrieval/python/cross_file_random.gz",
48
+ "java-cff": "https://raw.githubusercontent.com/Leolty/repobench/main/data/retrieval/java/cross_file_first.gz",
49
+ "java-cfr": "https://raw.githubusercontent.com/Leolty/repobench/main/data/retrieval/java/cross_file_random.gz"
50
+ }
51
+
52
+
53
+
54
+ class RepoBench(datasets.GeneratorBasedBuilder):
55
+ """RepoBench"""
56
+
57
+ VERSION = datasets.Version("1.0.0")
58
+
59
+ BUILDER_CONFIGS = [
60
+ datasets.BuilderConfig(
61
+ name="python-cff",
62
+ description=textwrap.dedent(
63
+ """
64
+ cff: cross_file_first -> mask the the line that a cross-file module is first used
65
+ """
66
+ )
67
+ ),
68
+ datasets.BuilderConfig(
69
+ name="python-cfr",
70
+ description=textwrap.dedent(
71
+ """
72
+ cfr: cross_file_random -> mask a random line that a cross-file module is used (not the first time)
73
+ """
74
+ )
75
+ ),
76
+ datasets.BuilderConfig(
77
+ name="java-cff",
78
+ description=textwrap.dedent(
79
+ """
80
+ cff: cross_file_first -> mask the the line that a cross-file module is first used
81
+ """
82
+ )
83
+ ),
84
+ datasets.BuilderConfig(
85
+ name="java-cfr",
86
+ description=textwrap.dedent(
87
+ """
88
+ cfr: cross_file_random -> mask a random line that a cross-file module is used (not the first time)
89
+ """
90
+ )
91
+ )
92
+ ]
93
+
94
+ def _info(self):
95
+ features = datasets.Features(
96
+ {
97
+ "file_path": datasets.Value("string"),
98
+ "context": datasets.Value("list", items_type=datasets.Value("string")),
99
+ "import_statement": datasets.Value("string"),
100
+ "code": datasets.Value("string"),
101
+ "next_line": datasets.Value("string"),
102
+ "gold_snippet_index": datasets.Value("int32")
103
+ }
104
+ )
105
+
106
+ return datasets.DatasetInfo(
107
+ description=_DESCRIPTION,
108
+ features=features,
109
+ homepage=_HOMEPAGE,
110
+ license=_LICENSE,
111
+ citation=_CITATION,
112
+ )
113
+
114
+ def _split_generators(self, dl_manager):
115
+ """Returns SplitGenerators."""
116
+ config_urls = _URLs[self.config.name]
117
+ data_dir = dl_manager.download_and_extract(config_urls)
118
+
119
+ return [
120
+ datasets.SplitGenerator(
121
+ name=datasets.Split("easy"),
122
+ gen_kwargs={"data_dir": data_dir, "split": "easy"},
123
+ ),
124
+ datasets.SplitGenerator(
125
+ name=datasets.Split("hard"),
126
+ gen_kwargs={"data_dir": data_dir, "split": "hard"},
127
+ )
128
+ ]
129
+
130
+ def _generate_examples(self, data_dir, split):
131
+ """ Yields examples. """
132
+ with gzip.open(data_dir, "rb") as f:
133
+ data = pickle.load(f)
134
+
135
+ for i, example in enumerate(data[split]):
136
+ yield i, {
137
+ "file_path": example["file_path"],
138
+ "context": example["context"],
139
+ "import_statement": example["import_statement"],
140
+ "code": example["code"],
141
+ "next_line": example["next_line"],
142
+ "gold_snippet_index": example["gold_snippet_index"]
143
+ }
144
+
145
+
146
+
147
+