Datasets:

Sub-tasks:
extractive-qa
Languages:
English
Multilinguality:
monolingual
Size Categories:
1M<n<10M
n<1K
Language Creators:
crowdsourced
Annotations Creators:
expert-generated
Source Datasets:
original
ArXiv:
Tags:
License:
system HF staff commited on
Commit
5332c46
1 Parent(s): 6eea7bf

Update files from the datasets library (from 1.16.0)

Browse files

Release notes: https://github.com/huggingface/datasets/releases/tag/1.16.0

Files changed (2) hide show
  1. README.md +1 -0
  2. neural_code_search.py +53 -49
README.md CHANGED
@@ -1,4 +1,5 @@
1
  ---
 
2
  annotations_creators:
3
  - expert-generated
4
  language_creators:
1
  ---
2
+ pretty_name: Neural Code Search
3
  annotations_creators:
4
  - expert-generated
5
  language_creators:
neural_code_search.py CHANGED
@@ -16,7 +16,7 @@
16
 
17
 
18
  import json
19
- import os
20
 
21
  import datasets
22
 
@@ -118,53 +118,57 @@ class NeuralCodeSearch(datasets.GeneratorBasedBuilder):
118
 
119
  def _split_generators(self, dl_manager):
120
  """Returns SplitGenerators."""
121
- my_urls = [url for config, url in _URLs.items() if config.startswith(self.config.name)]
122
- data_dir = dl_manager.download_and_extract(my_urls)
123
-
124
- return [
125
- datasets.SplitGenerator(
126
- name=datasets.Split.TRAIN,
127
- gen_kwargs={
128
- "datapath": data_dir,
129
- "split": "train",
130
- },
131
- ),
132
- ]
133
-
134
- def _generate_examples(self, datapath, split):
 
 
 
 
 
 
 
135
  """Yields examples."""
136
  id_ = 0
137
- for dp in datapath:
138
- if self.config.name == "evaluation_dataset":
139
- with open(dp, encoding="utf-8") as f:
140
- data = json.load(f)
141
- for row in data:
142
- yield id_, {
143
- "stackoverflow_id": row["stackoverflow_id"],
144
- "question": row["question"],
145
- "question_url": row["question_url"],
146
- "question_author": row["question_author"],
147
- "question_author_url": row["question_author_url"],
148
- "answer": row["answer"],
149
- "answer_url": row["answer_url"],
150
- "answer_author": row["answer_author"],
151
- "answer_author_url": row["answer_author_url"],
152
- "examples": row["examples"],
153
- "examples_url": row["examples_url"],
154
- }
155
- id_ += 1
156
- else:
157
- for dirpath, _, fnames in sorted(os.walk(dp)):
158
- for fname in sorted(fnames):
159
- with open(os.path.join(dirpath, fname), encoding="utf-8") as f:
160
- for row in f:
161
- data_dict = json.loads(row)
162
- yield id_, {
163
- "id": data_dict["id"],
164
- "filepath": data_dict["filepath"],
165
- "method_name": data_dict["method_name"],
166
- "start_line": data_dict["start_line"],
167
- "end_line": data_dict["end_line"],
168
- "url": data_dict["url"],
169
- }
170
- id_ += 1
16
 
17
 
18
  import json
19
+ from itertools import chain
20
 
21
  import datasets
22
 
118
 
119
  def _split_generators(self, dl_manager):
120
  """Returns SplitGenerators."""
121
+ if self.config.name == "evaluation_dataset":
122
+ filepath = dl_manager.download_and_extract(_URLs[self.config.name])
123
+ return [
124
+ datasets.SplitGenerator(
125
+ name=datasets.Split.TRAIN,
126
+ gen_kwargs={"filepath": filepath},
127
+ ),
128
+ ]
129
+ else:
130
+ my_urls = [url for config, url in _URLs.items() if config.startswith(self.config.name)]
131
+ archives = dl_manager.download(my_urls)
132
+ return [
133
+ datasets.SplitGenerator(
134
+ name=datasets.Split.TRAIN,
135
+ gen_kwargs={
136
+ "files": chain(*(dl_manager.iter_archive(archive) for archive in archives)),
137
+ },
138
+ ),
139
+ ]
140
+
141
+ def _generate_examples(self, filepath=None, files=None):
142
  """Yields examples."""
143
  id_ = 0
144
+ if self.config.name == "evaluation_dataset":
145
+ with open(filepath, encoding="utf-8") as f:
146
+ data = json.load(f)
147
+ for row in data:
148
+ yield id_, {
149
+ "stackoverflow_id": row["stackoverflow_id"],
150
+ "question": row["question"],
151
+ "question_url": row["question_url"],
152
+ "question_author": row["question_author"],
153
+ "question_author_url": row["question_author_url"],
154
+ "answer": row["answer"],
155
+ "answer_url": row["answer_url"],
156
+ "answer_author": row["answer_author"],
157
+ "answer_author_url": row["answer_author_url"],
158
+ "examples": row["examples"],
159
+ "examples_url": row["examples_url"],
160
+ }
161
+ id_ += 1
162
+ else:
163
+ for _, f in files:
164
+ for row in f:
165
+ data_dict = json.loads(row.decode("utf-8"))
166
+ yield id_, {
167
+ "id": data_dict["id"],
168
+ "filepath": data_dict["filepath"],
169
+ "method_name": data_dict["method_name"],
170
+ "start_line": data_dict["start_line"],
171
+ "end_line": data_dict["end_line"],
172
+ "url": data_dict["url"],
173
+ }
174
+ id_ += 1