Datasets:

Modalities:
Text
Languages:
code
ArXiv:
Libraries:
Datasets
License:
tianyang commited on
Commit
0260143
1 Parent(s): b59fcbe

update repobench-r

Browse files
Files changed (2) hide show
  1. .gitignore +1 -0
  2. repobench-r.py +19 -9
.gitignore ADDED
@@ -0,0 +1 @@
 
 
1
+ /retrieval
repobench-r.py CHANGED
@@ -43,10 +43,10 @@ _HOMEPAGE = "https://github.com/Leolty/repobench"
43
  _LICENSE = "Apache License 2.0"
44
 
45
  _URLs = {
46
- "python-cff": "https://raw.githubusercontent.com/Leolty/repobench/main/data/retrieval/python/cross_file_first.gz",
47
- "python-cfr": "https://raw.githubusercontent.com/Leolty/repobench/main/data/retrieval/python/cross_file_random.gz",
48
- "java-cff": "https://raw.githubusercontent.com/Leolty/repobench/main/data/retrieval/java/cross_file_first.gz",
49
- "java-cfr": "https://raw.githubusercontent.com/Leolty/repobench/main/data/retrieval/java/cross_file_random.gz"
50
  }
51
 
52
 
@@ -118,12 +118,20 @@ class RepoBenchR(datasets.GeneratorBasedBuilder):
118
 
119
  return [
120
  datasets.SplitGenerator(
121
- name=datasets.Split("easy"),
122
- gen_kwargs={"data_dir": data_dir, "split": "easy"},
123
  ),
124
  datasets.SplitGenerator(
125
- name=datasets.Split("hard"),
126
- gen_kwargs={"data_dir": data_dir, "split": "hard"},
 
 
 
 
 
 
 
 
127
  )
128
  ]
129
 
@@ -132,8 +140,10 @@ class RepoBenchR(datasets.GeneratorBasedBuilder):
132
  with gzip.open(data_dir, "rb") as f:
133
  data = pickle.load(f)
134
 
135
- for i, example in enumerate(data[split]):
 
136
  yield i, {
 
137
  "file_path": example["file_path"],
138
  "context": example["context"],
139
  "import_statement": example["import_statement"],
 
43
  _LICENSE = "Apache License 2.0"
44
 
45
  _URLs = {
46
+ "java-cff": "https://drive.google.com/uc?export=download&id=1IJMQubP-74foQfF-hviFwfkvBf4rzRrN",
47
+ "java-cfr": "https://drive.google.com/uc?export=download&id=1zJGLhzA4am1aXErp4KDrL5m2nqwxZhGp",
48
+ "python-cff": "https://drive.google.com/uc?export=download&id=1RxF0BfmfdkQ5gTOVaCKzbmwjVRdZYUw4",
49
+ "python-cfr": "https://drive.google.com/uc?export=download&id=1Bg_NQ00m0KCZ6KAtJ3v0cpzsWLj0HwKN"
50
  }
51
 
52
 
 
118
 
119
  return [
120
  datasets.SplitGenerator(
121
+ name=datasets.Split("train_easy"),
122
+ gen_kwargs={"data_dir": data_dir, "split": "train_easy"},
123
  ),
124
  datasets.SplitGenerator(
125
+ name=datasets.Split("train_hard"),
126
+ gen_kwargs={"data_dir": data_dir, "split": "train_hard"},
127
+ ),
128
+ datasets.SplitGenerator(
129
+ name=datasets.Split("test_easy"),
130
+ gen_kwargs={"data_dir": data_dir, "split": "test_easy"},
131
+ ),
132
+ datasets.SplitGenerator(
133
+ name=datasets.Split("test_hard"),
134
+ gen_kwargs={"data_dir": data_dir, "split": "test_hard"},
135
  )
136
  ]
137
 
 
140
  with gzip.open(data_dir, "rb") as f:
141
  data = pickle.load(f)
142
 
143
+ split, level = split.split("_")
144
+ for i, example in enumerate(data[split][level]):
145
  yield i, {
146
+ "repo_name": example["repo_name"],
147
  "file_path": example["file_path"],
148
  "context": example["context"],
149
  "import_statement": example["import_statement"],