NazaGara commited on
Commit
b61ef82
1 Parent(s): 516ea84

keep pos tags

Browse files
Files changed (1) hide show
  1. wikiner.py +287 -5
wikiner.py CHANGED
@@ -62,6 +62,285 @@ class Wikiner(datasets.GeneratorBasedBuilder):
62
  {
63
  "id": datasets.Value("string"),
64
  "tokens": datasets.Sequence(datasets.Value("string")),
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
65
  "ner_tags": datasets.Sequence(
66
  datasets.features.ClassLabel(
67
  names=[
@@ -100,29 +379,32 @@ class Wikiner(datasets.GeneratorBasedBuilder):
100
 
101
  def _generate_examples(self, filepath):
102
  logging.info("⏳ Generating examples from = %s", filepath)
103
- with open(filepath, encoding="utf-8") as f:
104
  guid = 0
105
- tokens = []
106
- ner_tags = []
107
  for line in f:
108
  if line.startswith("-DOCSTART-") or line == "" or line == "\n":
109
  if tokens:
110
  yield guid, {
111
  "id": str(guid),
112
  "tokens": tokens,
 
113
  "ner_tags": ner_tags,
114
  }
115
  guid += 1
116
  tokens = []
 
117
  ner_tags = []
118
  else:
119
- splits = line.split(' ')
120
  tokens.append(splits[0])
121
- ner_tags.append(splits[1].rstrip())
 
122
  # last example
123
  if tokens:
124
  yield guid, {
125
  "id": str(guid),
126
  "tokens": tokens,
 
127
  "ner_tags": ner_tags,
128
  }
 
62
  {
63
  "id": datasets.Value("string"),
64
  "tokens": datasets.Sequence(datasets.Value("string")),
65
+ "pos_tags": datasets.Sequence(
66
+ datasets.features.ClassLabel(
67
+ names=[
68
+ "ACRNM",
69
+ "ADJ",
70
+ "ADV",
71
+ "ALFS",
72
+ "ART",
73
+ "BACKSLASH",
74
+ "CARD",
75
+ "CC",
76
+ "CCAD",
77
+ "CCNEG",
78
+ "CM",
79
+ "CODE",
80
+ "COLON",
81
+ "CQUE",
82
+ "CSUBF",
83
+ "CSUBI",
84
+ "CSUBX",
85
+ "DM",
86
+ "DOTS",
87
+ "FS",
88
+ "INT",
89
+ "LP",
90
+ "NC",
91
+ "NEG",
92
+ "NMEA",
93
+ "NMON",
94
+ "NP",
95
+ "ORD",
96
+ "PAL",
97
+ "PDEL",
98
+ "PE",
99
+ "PERCT",
100
+ "PPC",
101
+ "PPO",
102
+ "PPX",
103
+ "PREP",
104
+ "QT",
105
+ "QU",
106
+ "REL",
107
+ "RP",
108
+ "SE",
109
+ "SEMICOLON",
110
+ "SLASH",
111
+ "SYM",
112
+ "UMMX",
113
+ "VCLIfin",
114
+ "VCLIger",
115
+ "VCLIinf",
116
+ "VEadj",
117
+ "VEfin",
118
+ "VEger",
119
+ "VEinf",
120
+ "VHadj",
121
+ "VHfin",
122
+ "VHger",
123
+ "VHinf",
124
+ "VLadj",
125
+ "VLfin",
126
+ "VLger",
127
+ "VLinf",
128
+ "VMadj",
129
+ "VMfin",
130
+ "VMger",
131
+ "VMinf",
132
+ "VSadj",
133
+ "VSfin",
134
+ "VSger",
135
+ "VSinf",
136
+ ]
137
+ )
138
+ ),
139
+ "ner_tags": datasets.Sequence(
140
+ datasets.features.ClassLabel(
141
+ names=[
142
+ "O",
143
+ "B-PER",
144
+ "I-PER",
145
+ "B-ORG",
146
+ "I-ORG",
147
+ "B-LOC",
148
+ "I-LOC",
149
+ "B-MISC",
150
+ "I-MISC",
151
+ ]
152
+ )
153
+ ),
154
+ }
155
+ ),
156
+ supervised_keys=None,
157
+ homepage=_URL,
158
+ citation=_CITATION,
159
+ )
160
+
161
+ def _split_generators(self, dl_manager):
162
+ """Returns SplitGenerators."""
163
+ urls_to_download = {
164
+ "train": f"{_URL}{_TRAINING_FILE}",
165
+ }
166
+ downloaded_files = dl_manager.download_and_extract(urls_to_download)
167
+
168
+ return [
169
+ datasets.SplitGenerator(
170
+ name=datasets.Split.TRAIN,
171
+ gen_kwargs={"filepath": downloaded_files["train"]},
172
+ ),
173
+ ]
174
+
175
+ def _generate_examples(self, filepath):
176
+ logging.info("⏳ Generating examples from = %s", filepath)
177
+ with open(filepath, encoding="utf-8") as f:
178
+ guid = 0
179
+ tokens, pos_tags, ner_tags = [], [], []
180
+ for line in f:
181
+ if line.startswith("-DOCSTART-") or line == "" or line == "\n":
182
+ if tokens:
183
+ yield guid, {
184
+ "id": str(guid),
185
+ "tokens": tokens,
186
+ "pos_tags": pos_tags,
187
+ "ner_tags": ner_tags,
188
+ }
189
+ guid += 1
190
+ tokens = []
191
+ pos_tags = []
192
+ ner_tags = []
193
+ else:
194
+ splits = line.split(" ")
195
+ tokens.append(splits[0])
196
+ pos_tags.append(splits[1])
197
+ ner_tags.append(splits[2].rstrip())
198
+ # last example
199
+ if tokens:
200
+ yield guid, {
201
+ "id": str(guid),
202
+ "tokens": tokens,
203
+ "pos_tags": pos_tags,
204
+ "ner_tags": ner_tags,
205
+ }
206
+ # coding=utf-8
207
+ # Copyright 2020 HuggingFace Datasets Authors.
208
+ #
209
+ # Licensed under the Apache License, Version 2.0 (the "License");
210
+ # you may not use this file except in compliance with the License.
211
+ # You may obtain a copy of the License at
212
+ #
213
+ # http://www.apache.org/licenses/LICENSE-2.0
214
+ #
215
+ # Unless required by applicable law or agreed to in writing, software
216
+ # distributed under the License is distributed on an "AS IS" BASIS,
217
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
218
+ # See the License for the specific language governing permissions and
219
+ # limitations under the License.
220
+
221
+ import logging
222
+
223
+ import datasets
224
+
225
+
226
+ _CITATION = """\
227
+ @inproceedings{,
228
+ title = "",
229
+ author = "Garagiola, Nazareno",
230
+ year = "2022",
231
+ url = ""
232
+ }
233
+ """
234
+
235
+ _DESCRIPTION = """Dataset used to train a NER model"""
236
+
237
+ _URL = "https://raw.githubusercontent.com/NazaGara/betoNER/main/data/wikiner/"
238
+ _TRAINING_FILE = "train.conllu"
239
+
240
+
241
+ class WikinerConfig(datasets.BuilderConfig):
242
+ """BuilderConfig"""
243
+
244
+ def __init__(self, **kwargs):
245
+ """BuilderConfig
246
+ Args:
247
+ **kwargs: keyword arguments forwarded to super.
248
+ """
249
+ super(WikinerConfig, self).__init__(**kwargs)
250
+
251
+
252
+ class Wikiner(datasets.GeneratorBasedBuilder):
253
+ """Wikiner dataset."""
254
+
255
+ BUILDER_CONFIGS = [
256
+ WikinerConfig(
257
+ name="wikiner",
258
+ version=datasets.Version("1.0.0"),
259
+ description="wikiner dataset",
260
+ ),
261
+ ]
262
+
263
+ def _info(self):
264
+ return datasets.DatasetInfo(
265
+ description=_DESCRIPTION,
266
+ features=datasets.Features(
267
+ {
268
+ "id": datasets.Value("string"),
269
+ "tokens": datasets.Sequence(datasets.Value("string")),
270
+ "pos_tags": datasets.Sequence(
271
+ datasets.features.ClassLabel(
272
+ names=[
273
+ "ACRNM",
274
+ "ADJ",
275
+ "ADV",
276
+ "ALFS",
277
+ "ART",
278
+ "BACKSLASH",
279
+ "CARD",
280
+ "CC",
281
+ "CCAD",
282
+ "CCNEG",
283
+ "CM",
284
+ "CODE",
285
+ "COLON",
286
+ "CQUE",
287
+ "CSUBF",
288
+ "CSUBI",
289
+ "CSUBX",
290
+ "DM",
291
+ "DOTS",
292
+ "FS",
293
+ "INT",
294
+ "LP",
295
+ "NC",
296
+ "NEG",
297
+ "NMEA",
298
+ "NMON",
299
+ "NP",
300
+ "ORD",
301
+ "PAL",
302
+ "PDEL",
303
+ "PE",
304
+ "PERCT",
305
+ "PPC",
306
+ "PPO",
307
+ "PPX",
308
+ "PREP",
309
+ "QT",
310
+ "QU",
311
+ "REL",
312
+ "RP",
313
+ "SE",
314
+ "SEMICOLON",
315
+ "SLASH",
316
+ "SYM",
317
+ "UMMX",
318
+ "VCLIfin",
319
+ "VCLIger",
320
+ "VCLIinf",
321
+ "VEadj",
322
+ "VEfin",
323
+ "VEger",
324
+ "VEinf",
325
+ "VHadj",
326
+ "VHfin",
327
+ "VHger",
328
+ "VHinf",
329
+ "VLadj",
330
+ "VLfin",
331
+ "VLger",
332
+ "VLinf",
333
+ "VMadj",
334
+ "VMfin",
335
+ "VMger",
336
+ "VMinf",
337
+ "VSadj",
338
+ "VSfin",
339
+ "VSger",
340
+ "VSinf",
341
+ ]
342
+ )
343
+ ),
344
  "ner_tags": datasets.Sequence(
345
  datasets.features.ClassLabel(
346
  names=[
 
379
 
380
  def _generate_examples(self, filepath):
381
  logging.info("⏳ Generating examples from = %s", filepath)
382
+ with open(filepath, encoding="utf-8") as f:
383
  guid = 0
384
+ tokens, pos_tags, ner_tags = [], [], []
 
385
  for line in f:
386
  if line.startswith("-DOCSTART-") or line == "" or line == "\n":
387
  if tokens:
388
  yield guid, {
389
  "id": str(guid),
390
  "tokens": tokens,
391
+ "pos_tags": pos_tags,
392
  "ner_tags": ner_tags,
393
  }
394
  guid += 1
395
  tokens = []
396
+ pos_tags = []
397
  ner_tags = []
398
  else:
399
+ splits = line.split(" ")
400
  tokens.append(splits[0])
401
+ pos_tags.append(splits[1])
402
+ ner_tags.append(splits[2].rstrip())
403
  # last example
404
  if tokens:
405
  yield guid, {
406
  "id": str(guid),
407
  "tokens": tokens,
408
+ "pos_tags": pos_tags,
409
  "ner_tags": ner_tags,
410
  }