Datasets:
Tasks:
Text Retrieval
Modalities:
Text
Sub-tasks:
document-retrieval
Languages:
code
Size:
100K - 1M
ArXiv:
License:
change back
Browse files- repobench-r.py +35 -12
repobench-r.py
CHANGED
@@ -57,12 +57,18 @@ _LICENSE = "Apache License 2.0"
|
|
57 |
# }
|
58 |
|
59 |
_URLs = {
|
60 |
-
"java-cff": "https://huggingface.co/datasets/tianyang/repobench-r/resolve/main/
|
61 |
-
"java-cfr": "https://huggingface.co/datasets/tianyang/repobench-r/resolve/main/
|
62 |
-
"python-cff": "https://huggingface.co/datasets/tianyang/repobench-r/resolve/main/
|
63 |
-
"python-cfr": "https://huggingface.co/datasets/tianyang/repobench-r/resolve/main/
|
64 |
}
|
65 |
|
|
|
|
|
|
|
|
|
|
|
|
|
66 |
|
67 |
|
68 |
|
@@ -111,8 +117,6 @@ class RepoBenchR(datasets.GeneratorBasedBuilder):
|
|
111 |
def _info(self):
|
112 |
features = datasets.Features(
|
113 |
{
|
114 |
-
"level": datasets.Value("string"),
|
115 |
-
"repo_name": datasets.Value("string"),
|
116 |
"file_path": datasets.Value("string"),
|
117 |
"context": datasets.Sequence(datasets.Value("string")),
|
118 |
"import_statement": datasets.Value("string"),
|
@@ -137,12 +141,20 @@ class RepoBenchR(datasets.GeneratorBasedBuilder):
|
|
137 |
|
138 |
return [
|
139 |
datasets.SplitGenerator(
|
140 |
-
name=datasets.Split("
|
141 |
-
gen_kwargs={"data_dir": data_dir, "split": "
|
|
|
|
|
|
|
|
|
142 |
),
|
143 |
datasets.SplitGenerator(
|
144 |
-
name=datasets.Split("
|
145 |
-
gen_kwargs={"data_dir": data_dir, "split": "
|
|
|
|
|
|
|
|
|
146 |
)
|
147 |
]
|
148 |
|
@@ -150,6 +162,17 @@ class RepoBenchR(datasets.GeneratorBasedBuilder):
|
|
150 |
""" Yields examples. """
|
151 |
with gzip.open(data_dir, "rb") as f:
|
152 |
data = pickle.load(f)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
153 |
|
154 |
-
for i, example in enumerate(data[split]):
|
155 |
-
yield i, example
|
|
|
57 |
# }
|
58 |
|
59 |
_URLs = {
|
60 |
+
"java-cff": "https://huggingface.co/datasets/tianyang/repobench-r/resolve/main/data/java_cff.gz",
|
61 |
+
"java-cfr": "https://huggingface.co/datasets/tianyang/repobench-r/resolve/main/data/java_cfr.gz",
|
62 |
+
"python-cff": "https://huggingface.co/datasets/tianyang/repobench-r/resolve/main/data/python_cff.gz",
|
63 |
+
"python-cfr": "https://huggingface.co/datasets/tianyang/repobench-r/resolve/main/data/python_cfr.gz"
|
64 |
}
|
65 |
|
66 |
+
# _URLs = {
|
67 |
+
# "java-cff": "https://huggingface.co/datasets/tianyang/repobench-r/blob/main/json_data/java_cff.json.gz",
|
68 |
+
# "java-cfr": "https://huggingface.co/datasets/tianyang/repobench-r/blob/main/json_data/java_cfr.json.gz",
|
69 |
+
# "python-cff": "https://huggingface.co/datasets/tianyang/repobench-r/blob/main/json_data/python_cff.json.gz",
|
70 |
+
# "python-cfr": "https://huggingface.co/datasets/tianyang/repobench-r/blob/main/json_data/python_cfr.json.gz"
|
71 |
+
# }
|
72 |
|
73 |
|
74 |
|
|
|
117 |
def _info(self):
|
118 |
features = datasets.Features(
|
119 |
{
|
|
|
|
|
120 |
"file_path": datasets.Value("string"),
|
121 |
"context": datasets.Sequence(datasets.Value("string")),
|
122 |
"import_statement": datasets.Value("string"),
|
|
|
141 |
|
142 |
return [
|
143 |
datasets.SplitGenerator(
|
144 |
+
name=datasets.Split("train_easy"),
|
145 |
+
gen_kwargs={"data_dir": data_dir, "split": "train_easy"},
|
146 |
+
),
|
147 |
+
datasets.SplitGenerator(
|
148 |
+
name=datasets.Split("train_hard"),
|
149 |
+
gen_kwargs={"data_dir": data_dir, "split": "train_hard"},
|
150 |
),
|
151 |
datasets.SplitGenerator(
|
152 |
+
name=datasets.Split("test_easy"),
|
153 |
+
gen_kwargs={"data_dir": data_dir, "split": "test_easy"},
|
154 |
+
),
|
155 |
+
datasets.SplitGenerator(
|
156 |
+
name=datasets.Split("test_hard"),
|
157 |
+
gen_kwargs={"data_dir": data_dir, "split": "test_hard"},
|
158 |
)
|
159 |
]
|
160 |
|
|
|
162 |
""" Yields examples. """
|
163 |
with gzip.open(data_dir, "rb") as f:
|
164 |
data = pickle.load(f)
|
165 |
+
|
166 |
+
subset, level = split.split("_")
|
167 |
+
|
168 |
+
for i, example in enumerate(data[subset][level]):
|
169 |
+
yield i, {
|
170 |
+
"repo_name": example["repo_name"],
|
171 |
+
"file_path": example["file_path"],
|
172 |
+
"context": example["context"],
|
173 |
+
"import_statement": example["import_statement"],
|
174 |
+
"code": example["code"],
|
175 |
+
"next_line": example["next_line"],
|
176 |
+
"gold_snippet_index": example["golden_snippet_index"]
|
177 |
+
}
|
178 |
|
|
|
|