Datasets:

Modalities:
Text
Formats:
parquet
ArXiv:
Libraries:
Datasets
pandas
License:
system HF staff commited on
Commit
3bd79fe
1 Parent(s): 1d6aeb3

Update files from the datasets library (from 1.16.0)

Browse files

Release notes: https://github.com/huggingface/datasets/releases/tag/1.16.0

Files changed (1) hide show
  1. wikiann.py +31 -28
wikiann.py CHANGED
@@ -326,24 +326,24 @@ class Wikiann(datasets.GeneratorBasedBuilder):
326
  def _split_generators(self, dl_manager):
327
  wikiann_dl_dir = dl_manager.download_and_extract(_DATA_URL)
328
  lang = self.config.name
329
- lang_folder = dl_manager.extract(os.path.join(wikiann_dl_dir, lang + ".tar.gz"))
330
 
331
  return [
332
  datasets.SplitGenerator(
333
  name=datasets.Split.VALIDATION,
334
- gen_kwargs={"filepath": os.path.join(lang_folder, "dev")},
335
  ),
336
  datasets.SplitGenerator(
337
  name=datasets.Split.TEST,
338
- gen_kwargs={"filepath": os.path.join(lang_folder, "test")},
339
  ),
340
  datasets.SplitGenerator(
341
  name=datasets.Split.TRAIN,
342
- gen_kwargs={"filepath": os.path.join(lang_folder, "train")},
343
  ),
344
  ]
345
 
346
- def _generate_examples(self, filepath):
347
  """Reads line by line format of the NER dataset and generates examples.
348
  Input Format:
349
  en:rick B-PER
@@ -365,27 +365,30 @@ class Wikiann(datasets.GeneratorBasedBuilder):
365
  Examples with the format listed above.
366
  """
367
  guid_index = 1
368
- with open(filepath, encoding="utf-8") as f:
369
- tokens = []
370
- ner_tags = []
371
- langs = []
372
- for line in f:
373
- if line == "" or line == "\n":
374
- if tokens:
375
- spans = self._get_spans(tokens, ner_tags)
376
- yield guid_index, {"tokens": tokens, "ner_tags": ner_tags, "langs": langs, "spans": spans}
377
- guid_index += 1
378
- tokens = []
379
- ner_tags = []
380
- langs = []
381
- else:
382
- # wikiann data is tab separated
383
- splits = line.split("\t")
384
- # strip out en: prefix
385
- langs.append(splits[0].split(":")[0])
386
- tokens.append(":".join(splits[0].split(":")[1:]))
387
- if len(splits) > 1:
388
- ner_tags.append(splits[-1].replace("\n", ""))
389
  else:
390
- # examples have no label in test set
391
- ner_tags.append("O")
 
 
 
 
 
 
 
 
 
 
326
  def _split_generators(self, dl_manager):
327
  wikiann_dl_dir = dl_manager.download_and_extract(_DATA_URL)
328
  lang = self.config.name
329
+ lang_archive = os.path.join(wikiann_dl_dir, lang + ".tar.gz")
330
 
331
  return [
332
  datasets.SplitGenerator(
333
  name=datasets.Split.VALIDATION,
334
+ gen_kwargs={"filepath": "dev", "files": dl_manager.iter_archive(lang_archive)},
335
  ),
336
  datasets.SplitGenerator(
337
  name=datasets.Split.TEST,
338
+ gen_kwargs={"filepath": "test", "files": dl_manager.iter_archive(lang_archive)},
339
  ),
340
  datasets.SplitGenerator(
341
  name=datasets.Split.TRAIN,
342
+ gen_kwargs={"filepath": "train", "files": dl_manager.iter_archive(lang_archive)},
343
  ),
344
  ]
345
 
346
+ def _generate_examples(self, filepath, files):
347
  """Reads line by line format of the NER dataset and generates examples.
348
  Input Format:
349
  en:rick B-PER
 
365
  Examples with the format listed above.
366
  """
367
  guid_index = 1
368
+ for path, f in files:
369
+ if path == filepath:
370
+ tokens = []
371
+ ner_tags = []
372
+ langs = []
373
+ for line in f:
374
+ line = line.decode("utf-8")
375
+ if line == "" or line == "\n":
376
+ if tokens:
377
+ spans = self._get_spans(tokens, ner_tags)
378
+ yield guid_index, {"tokens": tokens, "ner_tags": ner_tags, "langs": langs, "spans": spans}
379
+ guid_index += 1
380
+ tokens = []
381
+ ner_tags = []
382
+ langs = []
 
 
 
 
 
 
383
  else:
384
+ # wikiann data is tab separated
385
+ splits = line.split("\t")
386
+ # strip out en: prefix
387
+ langs.append(splits[0].split(":")[0])
388
+ tokens.append(":".join(splits[0].split(":")[1:]))
389
+ if len(splits) > 1:
390
+ ner_tags.append(splits[-1].replace("\n", ""))
391
+ else:
392
+ # examples have no label in test set
393
+ ner_tags.append("O")
394
+ break