system HF staff commited on
Commit
b5ea44f
1 Parent(s): 954af59

Update files from the datasets library (from 1.6.0)

Browse files

Release notes: https://github.com/huggingface/datasets/releases/tag/1.6.0

Files changed (3) hide show
  1. README.md +5 -2
  2. dataset_infos.json +0 -0
  3. wikiann.py +78 -2
README.md CHANGED
@@ -429,7 +429,10 @@ WikiANN (sometimes called PAN-X) is a multilingual named entity recognition data
429
 
430
  ### Data Fields
431
 
432
- [More Information Needed]
 
 
 
433
 
434
  ### Data Splits
435
 
@@ -535,4 +538,4 @@ while the 176 languages supported in this version are associated with the follow
535
 
536
  ### Contributions
537
 
538
- Thanks to [@lewtun](https://github.com/lewtun) for adding this dataset.
 
429
 
430
  ### Data Fields
431
 
432
+ - `tokens`: a `list` of `string` features.
433
+ - `langs`: a `list` of `string` features that correspond to the language of each token.
434
+ - `ner_tags`: a `list` of classification labels, with possible values including `O` (0), `B-PER` (1), `I-PER` (2), `B-ORG` (3), `I-ORG` (4), `B-LOC` (5), `I-LOC` (6).
435
+ - `spans`: a `list` of `string` features, that is the list of named entities in the input text formatted as ``<TAG>: <mention>``
436
 
437
  ### Data Splits
438
 
 
538
 
539
  ### Contributions
540
 
541
+ Thanks to [@lewtun](https://github.com/lewtun) and [@rabeehk](https://github.com/rabeehk) for adding this dataset.
dataset_infos.json CHANGED
The diff for this file is too large to render. See raw diff
 
wikiann.py CHANGED
@@ -15,7 +15,6 @@
15
 
16
  """The WikiANN dataset for multilingual named entity recognition"""
17
 
18
- from __future__ import absolute_import, division, print_function
19
 
20
  import os
21
 
@@ -241,6 +240,61 @@ class Wikiann(datasets.GeneratorBasedBuilder):
241
  WikiannConfig(name=lang, description=f"WikiANN NER examples in language {lang}") for lang in _LANGS
242
  ]
243
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
244
  def _info(self):
245
  features = datasets.Features(
246
  {
@@ -259,6 +313,7 @@ class Wikiann(datasets.GeneratorBasedBuilder):
259
  )
260
  ),
261
  "langs": datasets.Sequence(datasets.Value("string")),
 
262
  }
263
  )
264
  return datasets.DatasetInfo(
@@ -290,6 +345,26 @@ class Wikiann(datasets.GeneratorBasedBuilder):
290
  ]
291
 
292
  def _generate_examples(self, filepath):
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
293
  guid_index = 1
294
  with open(filepath, encoding="utf-8") as f:
295
  tokens = []
@@ -298,7 +373,8 @@ class Wikiann(datasets.GeneratorBasedBuilder):
298
  for line in f:
299
  if line == "" or line == "\n":
300
  if tokens:
301
- yield guid_index, {"tokens": tokens, "ner_tags": ner_tags, "langs": langs}
 
302
  guid_index += 1
303
  tokens = []
304
  ner_tags = []
 
15
 
16
  """The WikiANN dataset for multilingual named entity recognition"""
17
 
 
18
 
19
  import os
20
 
 
240
  WikiannConfig(name=lang, description=f"WikiANN NER examples in language {lang}") for lang in _LANGS
241
  ]
242
 
243
+ def _tags_to_spans(self, tags):
244
+ """Convert tags to spans."""
245
+ spans = set()
246
+ span_start = 0
247
+ span_end = 0
248
+ active_conll_tag = None
249
+ for index, string_tag in enumerate(tags):
250
+ # Actual BIO tag.
251
+ bio_tag = string_tag[0]
252
+ assert bio_tag in ["B", "I", "O"], "Invalid Tag"
253
+ conll_tag = string_tag[2:]
254
+ if bio_tag == "O":
255
+ # The span has ended.
256
+ if active_conll_tag:
257
+ spans.add((active_conll_tag, (span_start, span_end)))
258
+ active_conll_tag = None
259
+ # We don't care about tags we are
260
+ # told to ignore, so we do nothing.
261
+ continue
262
+ elif bio_tag == "B":
263
+ # We are entering a new span; reset indices and active tag to new span.
264
+ if active_conll_tag:
265
+ spans.add((active_conll_tag, (span_start, span_end)))
266
+ active_conll_tag = conll_tag
267
+ span_start = index
268
+ span_end = index
269
+ elif bio_tag == "I" and conll_tag == active_conll_tag:
270
+ # We're inside a span.
271
+ span_end += 1
272
+ else:
273
+ # This is the case the bio label is an "I", but either:
274
+ # 1) the span hasn't started - i.e. an ill formed span.
275
+ # 2) We have IOB1 tagging scheme.
276
+ # We'll process the previous span if it exists, but also include this
277
+ # span. This is important, because otherwise, a model may get a perfect
278
+ # F1 score whilst still including false positive ill-formed spans.
279
+ if active_conll_tag:
280
+ spans.add((active_conll_tag, (span_start, span_end)))
281
+ active_conll_tag = conll_tag
282
+ span_start = index
283
+ span_end = index
284
+ # Last token might have been a part of a valid span.
285
+ if active_conll_tag:
286
+ spans.add((active_conll_tag, (span_start, span_end)))
287
+ # Return sorted list of spans
288
+ return sorted(list(spans), key=lambda x: x[1][0])
289
+
290
+ def _get_spans(self, tokens, tags):
291
+ """Convert tags to textspans."""
292
+ spans = self._tags_to_spans(tags)
293
+ text_spans = [x[0] + ": " + " ".join([tokens[i] for i in range(x[1][0], x[1][1] + 1)]) for x in spans]
294
+ if not text_spans:
295
+ text_spans = ["None"]
296
+ return text_spans
297
+
298
  def _info(self):
299
  features = datasets.Features(
300
  {
 
313
  )
314
  ),
315
  "langs": datasets.Sequence(datasets.Value("string")),
316
+ "spans": datasets.Sequence(datasets.Value("string")),
317
  }
318
  )
319
  return datasets.DatasetInfo(
 
345
  ]
346
 
347
  def _generate_examples(self, filepath):
348
+ """Reads line by line format of the NER dataset and generates examples.
349
+ Input Format:
350
+ en:rick B-PER
351
+ en:and O
352
+ en:morty B-PER
353
+ en:are O
354
+ en:cool O
355
+ en:. O
356
+ Output Format:
357
+ {
358
+ 'tokens': ["rick", "and", "morty", "are", "cool", "."],
359
+ 'ner_tags': ["B-PER", "O" , "B-PER", "O", "O", "O"],
360
+ 'langs': ["en", "en", "en", "en", "en", "en"]
361
+ 'spans': ["PER: rick", "PER: morty"]
362
+ }
363
+ Args:
364
+ filepath: Path to file with line by line NER format.
365
+ Returns:
366
+ Examples with the format listed above.
367
+ """
368
  guid_index = 1
369
  with open(filepath, encoding="utf-8") as f:
370
  tokens = []
 
373
  for line in f:
374
  if line == "" or line == "\n":
375
  if tokens:
376
+ spans = self._get_spans(tokens, ner_tags)
377
+ yield guid_index, {"tokens": tokens, "ner_tags": ner_tags, "langs": langs, "spans": spans}
378
  guid_index += 1
379
  tokens = []
380
  ner_tags = []