Datasets:
Update files from the datasets library (from 1.16.0)
Browse filesRelease notes: https://github.com/huggingface/datasets/releases/tag/1.16.0
- wi_locness.py +21 -10
wi_locness.py
CHANGED
@@ -26,7 +26,6 @@ of their systems on the full range of English levels and abilities."""
|
|
26 |
|
27 |
|
28 |
import json
|
29 |
-
from pathlib import Path
|
30 |
|
31 |
import datasets
|
32 |
|
@@ -160,19 +159,24 @@ submissions and assigned them a CEFR level."""
|
|
160 |
# dl_manager is a datasets.download.DownloadManager that can be used to download and extract URLs
|
161 |
# It can accept any type or nested list/dict and will give back the same structure with the url replaced with path to local files.
|
162 |
# By default the archives will be extracted and a path to a cached folder where they are extracted is returned instead of the archive
|
163 |
-
|
|
|
164 |
|
165 |
if self.config.name == "wi":
|
166 |
return [
|
167 |
datasets.SplitGenerator(
|
168 |
name=datasets.Split.TRAIN,
|
169 |
# These kwargs will be passed to _generate_examples
|
170 |
-
gen_kwargs={"
|
171 |
),
|
172 |
datasets.SplitGenerator(
|
173 |
name=datasets.Split.VALIDATION,
|
174 |
# These kwargs will be passed to _generate_examples
|
175 |
-
gen_kwargs={
|
|
|
|
|
|
|
|
|
176 |
),
|
177 |
]
|
178 |
elif self.config.name == "locness":
|
@@ -180,13 +184,17 @@ submissions and assigned them a CEFR level."""
|
|
180 |
datasets.SplitGenerator(
|
181 |
name=datasets.Split.VALIDATION,
|
182 |
# These kwargs will be passed to _generate_examples
|
183 |
-
gen_kwargs={
|
|
|
|
|
|
|
|
|
184 |
),
|
185 |
]
|
186 |
else:
|
187 |
assert False
|
188 |
|
189 |
-
def _generate_examples(self,
|
190 |
"""Yields examples."""
|
191 |
|
192 |
if split == "validation":
|
@@ -198,12 +206,15 @@ submissions and assigned them a CEFR level."""
|
|
198 |
levels = ["N"]
|
199 |
else:
|
200 |
assert False
|
201 |
-
|
202 |
id_ = 0
|
203 |
-
for
|
204 |
-
|
|
|
|
|
|
|
205 |
for line in fp:
|
206 |
-
o = json.loads(line)
|
207 |
|
208 |
edits = []
|
209 |
for (start, end, text) in o["edits"][0][1:][0]:
|
|
|
26 |
|
27 |
|
28 |
import json
|
|
|
29 |
|
30 |
import datasets
|
31 |
|
|
|
159 |
# dl_manager is a datasets.download.DownloadManager that can be used to download and extract URLs
|
160 |
# It can accept any type or nested list/dict and will give back the same structure with the url replaced with path to local files.
|
161 |
# By default the archives will be extracted and a path to a cached folder where they are extracted is returned instead of the archive
|
162 |
+
archive = dl_manager.download(_URL)
|
163 |
+
data_dir = "wi+locness/json"
|
164 |
|
165 |
if self.config.name == "wi":
|
166 |
return [
|
167 |
datasets.SplitGenerator(
|
168 |
name=datasets.Split.TRAIN,
|
169 |
# These kwargs will be passed to _generate_examples
|
170 |
+
gen_kwargs={"data_dir": data_dir, "split": "train", "files": dl_manager.iter_archive(archive)},
|
171 |
),
|
172 |
datasets.SplitGenerator(
|
173 |
name=datasets.Split.VALIDATION,
|
174 |
# These kwargs will be passed to _generate_examples
|
175 |
+
gen_kwargs={
|
176 |
+
"data_dir": data_dir,
|
177 |
+
"split": "validation",
|
178 |
+
"files": dl_manager.iter_archive(archive),
|
179 |
+
},
|
180 |
),
|
181 |
]
|
182 |
elif self.config.name == "locness":
|
|
|
184 |
datasets.SplitGenerator(
|
185 |
name=datasets.Split.VALIDATION,
|
186 |
# These kwargs will be passed to _generate_examples
|
187 |
+
gen_kwargs={
|
188 |
+
"data_dir": data_dir,
|
189 |
+
"split": "validation",
|
190 |
+
"files": dl_manager.iter_archive(archive),
|
191 |
+
},
|
192 |
),
|
193 |
]
|
194 |
else:
|
195 |
assert False
|
196 |
|
197 |
+
def _generate_examples(self, data_dir, split, files):
|
198 |
"""Yields examples."""
|
199 |
|
200 |
if split == "validation":
|
|
|
206 |
levels = ["N"]
|
207 |
else:
|
208 |
assert False
|
209 |
+
filepaths = [f"{data_dir}/{level}.{split}.json" for level in levels]
|
210 |
id_ = 0
|
211 |
+
for path, fp in files:
|
212 |
+
if not filepaths:
|
213 |
+
break
|
214 |
+
if path in filepaths:
|
215 |
+
filepaths.remove(path)
|
216 |
for line in fp:
|
217 |
+
o = json.loads(line.decode("utf-8"))
|
218 |
|
219 |
edits = []
|
220 |
for (start, end, text) in o["edits"][0][1:][0]:
|