imvladikon commited on
Commit
c3cdfca
1 Parent(s): b14a0f3

Update nemo_corpus.py

Browse files
Files changed (1) hide show
  1. nemo_corpus.py +102 -4
nemo_corpus.py CHANGED
@@ -1,10 +1,7 @@
1
- #!/usr/bin/env python3
2
- # -*- coding: utf-8 -*-
3
-
4
  import os
5
  import tempfile
6
  from pathlib import Path
7
-
8
  import datasets
9
 
10
  logger = datasets.logging.get_logger(__name__)
@@ -31,6 +28,93 @@ _DESCRIPTION = """\
31
  URL = "https://github.com/OnlpLab/NEMO-Corpus"
32
 
33
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
34
  class NemoCorpusConfig(datasets.BuilderConfig):
35
  """BuilderConfig for NemoCorpus"""
36
 
@@ -48,6 +132,7 @@ class NemoCorpusConfig(datasets.BuilderConfig):
48
  {
49
  "id": datasets.Value("string"),
50
  "tokens": datasets.Sequence(datasets.Value("string")),
 
51
  "ner_tags": datasets.Sequence(
52
  datasets.features.ClassLabel(
53
  names=['S-ANG', 'B-ANG', 'I-ANG', 'E-ANG',
@@ -62,6 +147,14 @@ class NemoCorpusConfig(datasets.BuilderConfig):
62
  'B-WOA', 'E-WOA', 'I-WOA', 'S-WOA']
63
  )
64
  ),
 
 
 
 
 
 
 
 
65
  }
66
  )
67
 
@@ -138,6 +231,7 @@ class NemoCorpus(datasets.GeneratorBasedBuilder):
138
  "validation": dl_manager.download(folder / "morph_gold_dev.bmes"),
139
  "test": dl_manager.download(folder / "morph_gold_test.bmes"),
140
  }
 
141
  return [
142
  datasets.SplitGenerator(name=datasets.Split.TRAIN,
143
  gen_kwargs={"filepath": data_files["train"]}),
@@ -164,8 +258,10 @@ class NemoCorpus(datasets.GeneratorBasedBuilder):
164
  if tokens:
165
  yield guid, {
166
  "id": str(guid),
 
167
  "tokens": tokens,
168
  "ner_tags": ner_tags,
 
169
  }
170
  guid += 1
171
  tokens = []
@@ -177,8 +273,10 @@ class NemoCorpus(datasets.GeneratorBasedBuilder):
177
  # last example
178
  yield guid, {
179
  "id": str(guid),
 
180
  "tokens": tokens,
181
  "ner_tags": ner_tags,
 
182
  }
183
 
184
  def _generate_examples_nested(self, filepath, sep=" "):
 
 
 
 
1
  import os
2
  import tempfile
3
  from pathlib import Path
4
+ from typing import Iterable, Tuple, List
5
  import datasets
6
 
7
  logger = datasets.logging.get_logger(__name__)
 
28
  URL = "https://github.com/OnlpLab/NEMO-Corpus"
29
 
30
 
31
+ def tokens_with_tags_to_spans(tags: Iterable[str], tokens: Iterable[str]) -> List[
32
+ Tuple[str, int, int]]:
33
+ """
34
+ Convert a list of tokens and tags to a list of spans for BIOSE/BIOLU schemes tags.
35
+ Args:
36
+ tags: list of entities tags
37
+ tokens: list of tokens
38
+ Note that the end index returned by this function is exclusive.
39
+ No, need to increment the end by 1.
40
+ Returns:
41
+ list of {span, start, end, entity, start_char, end_char}
42
+ where span is a phrase/tokens, start and end are the indices of the span,
43
+ entity is the entity type, and start_char and end_char are the start
44
+ and end characters of the span.
45
+ """
46
+ entities = []
47
+ start = None
48
+ start_char = None
49
+ words = []
50
+ curr_pos = 0
51
+ for i, (tag, token) in enumerate(zip(tags, tokens)):
52
+ if tag is None or tag.startswith("-"):
53
+ if start is not None:
54
+ start = None
55
+ start_char = None
56
+ words = []
57
+ else:
58
+ end_pos = curr_pos + len(token)
59
+ words.append(token)
60
+ entities.append({
61
+ "entity": "",
62
+ "span": " ".join(words),
63
+ "start": i,
64
+ "end": i + 1,
65
+ "start_char": curr_pos,
66
+ "end_char": end_pos
67
+ })
68
+ elif tag.startswith("O"):
69
+ pass
70
+ elif tag.startswith("I"):
71
+ words.append(token)
72
+ if start is None:
73
+ raise ValueError(
74
+ "Invalid BILUO tag sequence: Got a tag starting with {start} "
75
+ "without a preceding 'B' (beginning of an entity). "
76
+ "Tag sequence:\n{tags}".format(start="I", tags=list(tags)[: i + 1])
77
+ )
78
+ elif tag.startswith("U") or tag.startswith("S"):
79
+ end_pos = curr_pos + len(token)
80
+ entities.append({
81
+ "entity": tag[2:],
82
+ "span": token,
83
+ "start": i,
84
+ "end": i + 1,
85
+ "start_char": curr_pos,
86
+ "end_char": end_pos
87
+ })
88
+ elif tag.startswith("B"):
89
+ start = i
90
+ start_char = curr_pos
91
+ words.append(token)
92
+ elif tag.startswith("L") or tag.startswith("E"):
93
+ if start is None:
94
+ raise ValueError(
95
+ "Invalid BILUO tag sequence: Got a tag starting with {start} "
96
+ "without a preceding 'B' (beginning of an entity). "
97
+ "Tag sequence:\n{tags}".format(start="L", tags=list(tags)[: i + 1])
98
+ )
99
+ end_pos = curr_pos + len(token)
100
+ words.append(token)
101
+ entities.append({
102
+ "entity": tag[2:],
103
+ "span": " ".join(words),
104
+ "start": start,
105
+ "end": i + 1,
106
+ "start_char": start_char,
107
+ "end_char": end_pos
108
+ })
109
+ start = None
110
+ start_char = None
111
+ words = []
112
+ else:
113
+ raise ValueError("Invalid BILUO tag: '{}'.".format(tag))
114
+ curr_pos += len(token) + len(" ")
115
+ return entities
116
+
117
+
118
  class NemoCorpusConfig(datasets.BuilderConfig):
119
  """BuilderConfig for NemoCorpus"""
120
 
 
132
  {
133
  "id": datasets.Value("string"),
134
  "tokens": datasets.Sequence(datasets.Value("string")),
135
+ "sentence": datasets.Value("string"),
136
  "ner_tags": datasets.Sequence(
137
  datasets.features.ClassLabel(
138
  names=['S-ANG', 'B-ANG', 'I-ANG', 'E-ANG',
 
147
  'B-WOA', 'E-WOA', 'I-WOA', 'S-WOA']
148
  )
149
  ),
150
+ "spans": datasets.Sequence({
151
+ "span": datasets.Value("string"),
152
+ "start": datasets.Value("int32"),
153
+ "end": datasets.Value("int32"),
154
+ "entity": datasets.Value("string"),
155
+ "start_char": datasets.Value("int32"),
156
+ "end_char": datasets.Value("int32"),
157
+ })
158
  }
159
  )
160
 
 
231
  "validation": dl_manager.download(folder / "morph_gold_dev.bmes"),
232
  "test": dl_manager.download(folder / "morph_gold_test.bmes"),
233
  }
234
+
235
  return [
236
  datasets.SplitGenerator(name=datasets.Split.TRAIN,
237
  gen_kwargs={"filepath": data_files["train"]}),
 
258
  if tokens:
259
  yield guid, {
260
  "id": str(guid),
261
+ "sentence": " ".join(tokens),
262
  "tokens": tokens,
263
  "ner_tags": ner_tags,
264
+ "spans": tokens_with_tags_to_spans(ner_tags, tokens)
265
  }
266
  guid += 1
267
  tokens = []
 
273
  # last example
274
  yield guid, {
275
  "id": str(guid),
276
+ "sentence": " ".join(tokens),
277
  "tokens": tokens,
278
  "ner_tags": ner_tags,
279
+ "spans": tokens_with_tags_to_spans(ner_tags, tokens)
280
  }
281
 
282
  def _generate_examples_nested(self, filepath, sep=" "):