system HF staff commited on
Commit
a0a167f
1 Parent(s): 7a13110

Update files from the datasets library (from 1.16.0)

Browse files

Release notes: https://github.com/huggingface/datasets/releases/tag/1.16.0

Files changed (2) hide show
  1. README.md +1 -0
  2. py_ast.py +21 -18
README.md CHANGED
@@ -1,4 +1,5 @@
1
  ---
 
2
  annotations_creators:
3
  - machine-generated
4
  language_creators:
 
1
  ---
2
+ pretty_name: PyAst
3
  annotations_creators:
4
  - machine-generated
5
  language_creators:
py_ast.py CHANGED
@@ -16,7 +16,6 @@
16
 
17
 
18
  import json
19
- import os
20
 
21
  import datasets
22
 
@@ -118,37 +117,41 @@ class PyAst(datasets.GeneratorBasedBuilder):
118
  # It can accept any type or nested list/dict and will give back the same structure with the url replaced with path to local files.
119
  # By default the archives will be extracted and a path to a cached folder where they are extracted is returned instead of the archive
120
  my_urls = _URLs[self.config.name]
121
- data_dir = dl_manager.download_and_extract(my_urls)
122
  return [
123
  datasets.SplitGenerator(
124
  name=datasets.Split.TRAIN,
125
  # These kwargs will be passed to _generate_examples
126
  gen_kwargs={
127
- "filepath": os.path.join(data_dir, "python100k_train.json"),
128
- "split": "train",
129
  },
130
  ),
131
  datasets.SplitGenerator(
132
  name=datasets.Split.TEST,
133
  # These kwargs will be passed to _generate_examples
134
- gen_kwargs={"filepath": os.path.join(data_dir, "python50k_eval.json"), "split": "test"},
 
 
 
135
  ),
136
  ]
137
 
138
- def _generate_examples(self, filepath, split):
139
  """Yields examples."""
140
  # TODO: This method will receive as arguments the `gen_kwargs` defined in the previous `_split_generators` method.
141
  # It is in charge of opening the given file and yielding (key, example) tuples from the dataset
142
  # The key is not important, it's more here for legacy reason (legacy from tfds)
143
-
144
- with open(filepath, encoding="utf-8") as f:
145
- for id_, row in enumerate(f):
146
- row_data = json.loads(row)
147
- for node in row_data:
148
- if "value" not in node:
149
- node["value"] = "N/A"
150
- if "children" not in node:
151
- node["children"] = []
152
- yield id_, {
153
- "ast": row_data,
154
- }
 
 
16
 
17
 
18
  import json
 
19
 
20
  import datasets
21
 
 
117
  # It can accept any type or nested list/dict and will give back the same structure with the url replaced with path to local files.
118
  # By default the archives will be extracted and a path to a cached folder where they are extracted is returned instead of the archive
119
  my_urls = _URLs[self.config.name]
120
+ archive = dl_manager.download(my_urls)
121
  return [
122
  datasets.SplitGenerator(
123
  name=datasets.Split.TRAIN,
124
  # These kwargs will be passed to _generate_examples
125
  gen_kwargs={
126
+ "filepath": "python100k_train.json",
127
+ "files": dl_manager.iter_archive(archive),
128
  },
129
  ),
130
  datasets.SplitGenerator(
131
  name=datasets.Split.TEST,
132
  # These kwargs will be passed to _generate_examples
133
+ gen_kwargs={
134
+ "filepath": "python50k_eval.json",
135
+ "files": dl_manager.iter_archive(archive),
136
+ },
137
  ),
138
  ]
139
 
140
+ def _generate_examples(self, filepath, files):
141
  """Yields examples."""
142
  # TODO: This method will receive as arguments the `gen_kwargs` defined in the previous `_split_generators` method.
143
  # It is in charge of opening the given file and yielding (key, example) tuples from the dataset
144
  # The key is not important, it's more here for legacy reason (legacy from tfds)
145
+ for path, f in files:
146
+ if path == filepath:
147
+ for id_, row in enumerate(f):
148
+ row_data = json.loads(row.decode("utf-8"))
149
+ for node in row_data:
150
+ if "value" not in node:
151
+ node["value"] = "N/A"
152
+ if "children" not in node:
153
+ node["children"] = []
154
+ yield id_, {
155
+ "ast": row_data,
156
+ }
157
+ break