davanstrien HF staff commited on
Commit
0dc3436
1 Parent(s): d357e48

switch to using sentence segmented v1.4 datasets

Browse files
Files changed (1) hide show
  1. hipe2020.py +24 -9
hipe2020.py CHANGED
@@ -17,6 +17,7 @@
17
  """TODO"""
18
 
19
  from datetime import datetime
 
20
  import datasets
21
  import re
22
 
@@ -29,20 +30,20 @@ _DESCRIPTION = """\
29
  TODO
30
  """
31
 
32
- _BASE_URL_TRAIN_DEV = "https://raw.githubusercontent.com/impresso/CLEF-HIPE-2020/master/data/training-v1.2/"
33
 
34
 
35
  _URLs = {
36
  "EN": {
37
- "dev": _BASE_URL_TRAIN_DEV + "en/HIPE-data-v1.2-dev-en.tsv?raw=true"
38
  }, # English only has dev
39
  "DE": {
40
- "dev": _BASE_URL_TRAIN_DEV + "de/HIPE-data-v1.2-dev-de.tsv?raw=true",
41
- "train": _BASE_URL_TRAIN_DEV + "de/HIPE-data-v1.2-train-de.tsv?raw=true",
42
  },
43
  "FR": {
44
- "dev": _BASE_URL_TRAIN_DEV + "fr/HIPE-data-v1.2-dev-fr.tsv?raw=true",
45
- "train": _BASE_URL_TRAIN_DEV + "fr/HIPE-data-v1.2-train-fr.tsv?raw=true",
46
  },
47
  }
48
 
@@ -50,9 +51,8 @@ _URLs = {
50
  class HIPE2020Config(datasets.BuilderConfig):
51
  """BuilderConfig for HIPE2020"""
52
 
53
- def __init__(self, data_urls, **kwargs):
54
  """BuilderConfig for HIPE2020.
55
-
56
  Args:
57
  **kwargs: keyword arguments forwarded to super.
58
  """
@@ -106,6 +106,7 @@ class HIPE2020(datasets.GeneratorBasedBuilder):
106
  "I-pers",
107
  "I-prod",
108
  "I-time",
 
109
  ]
110
  )
111
  ),
@@ -120,6 +121,7 @@ class HIPE2020(datasets.GeneratorBasedBuilder):
120
  "I-loc",
121
  "I-org",
122
  "I-pers",
 
123
  ]
124
  )
125
  ),
@@ -179,6 +181,7 @@ class HIPE2020(datasets.GeneratorBasedBuilder):
179
  "I-prod.media",
180
  "I-time",
181
  "I-time.date.abs",
 
182
  ]
183
  )
184
  ),
@@ -206,6 +209,7 @@ class HIPE2020(datasets.GeneratorBasedBuilder):
206
  "I-org.ent",
207
  "I-pers",
208
  "I-pers.ind",
 
209
  ]
210
  )
211
  ),
@@ -223,6 +227,7 @@ class HIPE2020(datasets.GeneratorBasedBuilder):
223
  "I-comp.name",
224
  "I-comp.qualifier",
225
  "I-comp.title",
 
226
  ]
227
  )
228
  ),
@@ -258,6 +263,7 @@ class HIPE2020(datasets.GeneratorBasedBuilder):
258
  "I-org.adm",
259
  "I-org.ent",
260
  "I-pers.ind",
 
261
  ]
262
  )
263
  ),
@@ -265,6 +271,7 @@ class HIPE2020(datasets.GeneratorBasedBuilder):
265
  "NEL_METO_ID": datasets.Sequence(datasets.Value("string")),
266
  "no_space_after": datasets.Sequence(datasets.Value("bool")),
267
  "end_of_line": datasets.Sequence(datasets.Value("bool")),
 
268
  "date": datasets.Value("timestamp[s]"),
269
  "title": datasets.Value("string"),
270
  "document_id": datasets.Value("string"),
@@ -276,7 +283,7 @@ class HIPE2020(datasets.GeneratorBasedBuilder):
276
  )
277
 
278
  def _split_generators(self, dl_manager):
279
- """Returns SplitGenerators."""
280
  downloaded_files = dl_manager.download_and_extract(self.config.data_urls)
281
  if self.config.name != "en":
282
  data_files = {
@@ -323,6 +330,7 @@ class HIPE2020(datasets.GeneratorBasedBuilder):
323
  NEL_METO_ID = []
324
  no_space_after = []
325
  end_of_line = []
 
326
  for line in f:
327
  if line.startswith(
328
  "TOKEN NE-COARSE-LIT NE-COARSE-METO NE-FINE-LIT NE-FINE-METO NE-FINE-COMP NE-NESTED NEL-LIT NEL-METO MISC"
@@ -353,6 +361,7 @@ class HIPE2020(datasets.GeneratorBasedBuilder):
353
  "NEL_METO_ID": NEL_METO_ID,
354
  "no_space_after": no_space_after,
355
  "end_of_line": end_of_line,
 
356
  "date": date,
357
  "title": title,
358
  "document_id": document_id,
@@ -369,6 +378,7 @@ class HIPE2020(datasets.GeneratorBasedBuilder):
369
  NEL_METO_ID = []
370
  no_space_after = []
371
  end_of_line = []
 
372
  else:
373
  # HIPE 2020 tokens are tab separated
374
  splits = line.split(
@@ -386,8 +396,11 @@ class HIPE2020(datasets.GeneratorBasedBuilder):
386
  misc = splits[-1]
387
  is_space = "NoSpaceAfter" in misc
388
  is_end_of_line = "EndOfLine" in misc
 
389
  no_space_after.append(is_space)
390
  end_of_line.append(is_end_of_line)
 
 
391
 
392
  # last example
393
  yield guid, {
@@ -403,7 +416,9 @@ class HIPE2020(datasets.GeneratorBasedBuilder):
403
  "NEL_METO_ID": NEL_METO_ID,
404
  "no_space_after": no_space_after,
405
  "end_of_line": end_of_line,
 
406
  "date": date,
407
  "title": title,
408
  "document_id": document_id,
409
  }
 
 
17
  """TODO"""
18
 
19
  from datetime import datetime
20
+ from typing import Optional
21
  import datasets
22
  import re
23
 
 
30
  TODO
31
  """
32
 
33
+ _BASE_URL_TRAIN_DEV = "https://raw.githubusercontent.com/impresso/CLEF-HIPE-2020/master/data/v1.4/"
34
 
35
 
36
  _URLs = {
37
  "EN": {
38
+ "dev": _BASE_URL_TRAIN_DEV + "en/HIPE-data-v1.4-dev-en.tsv?raw=true"
39
  }, # English only has dev
40
  "DE": {
41
+ "dev": _BASE_URL_TRAIN_DEV + "de/HIPE-data-v1.4-dev-de.tsv?raw=true",
42
+ "train": _BASE_URL_TRAIN_DEV + "de/HIPE-data-v1.4-train-de.tsv?raw=true",
43
  },
44
  "FR": {
45
+ "dev": _BASE_URL_TRAIN_DEV + "fr/HIPE-data-v1.4-dev-fr.tsv?raw=true",
46
+ "train": _BASE_URL_TRAIN_DEV + "fr/HIPE-data-v1.4-train-fr.tsv?raw=true",
47
  },
48
  }
49
 
 
51
  class HIPE2020Config(datasets.BuilderConfig):
52
  """BuilderConfig for HIPE2020"""
53
 
54
+ def __init__(self, data_urls,**kwargs):
55
  """BuilderConfig for HIPE2020.
 
56
  Args:
57
  **kwargs: keyword arguments forwarded to super.
58
  """
 
106
  "I-pers",
107
  "I-prod",
108
  "I-time",
109
+ "_",
110
  ]
111
  )
112
  ),
 
121
  "I-loc",
122
  "I-org",
123
  "I-pers",
124
+ "_",
125
  ]
126
  )
127
  ),
 
181
  "I-prod.media",
182
  "I-time",
183
  "I-time.date.abs",
184
+ "_",
185
  ]
186
  )
187
  ),
 
209
  "I-org.ent",
210
  "I-pers",
211
  "I-pers.ind",
212
+ "_",
213
  ]
214
  )
215
  ),
 
227
  "I-comp.name",
228
  "I-comp.qualifier",
229
  "I-comp.title",
230
+ "_",
231
  ]
232
  )
233
  ),
 
263
  "I-org.adm",
264
  "I-org.ent",
265
  "I-pers.ind",
266
+ "_",
267
  ]
268
  )
269
  ),
 
271
  "NEL_METO_ID": datasets.Sequence(datasets.Value("string")),
272
  "no_space_after": datasets.Sequence(datasets.Value("bool")),
273
  "end_of_line": datasets.Sequence(datasets.Value("bool")),
274
+ "PySBDSegment":datasets.Sequence(datasets.Value("bool")),
275
  "date": datasets.Value("timestamp[s]"),
276
  "title": datasets.Value("string"),
277
  "document_id": datasets.Value("string"),
 
283
  )
284
 
285
  def _split_generators(self, dl_manager):
286
+ """Returns SplitGenerators."""
287
  downloaded_files = dl_manager.download_and_extract(self.config.data_urls)
288
  if self.config.name != "en":
289
  data_files = {
 
330
  NEL_METO_ID = []
331
  no_space_after = []
332
  end_of_line = []
333
+ pysdbsegment = []
334
  for line in f:
335
  if line.startswith(
336
  "TOKEN NE-COARSE-LIT NE-COARSE-METO NE-FINE-LIT NE-FINE-METO NE-FINE-COMP NE-NESTED NEL-LIT NEL-METO MISC"
 
361
  "NEL_METO_ID": NEL_METO_ID,
362
  "no_space_after": no_space_after,
363
  "end_of_line": end_of_line,
364
+ "PySBDSegment":pysdbsegment,
365
  "date": date,
366
  "title": title,
367
  "document_id": document_id,
 
378
  NEL_METO_ID = []
379
  no_space_after = []
380
  end_of_line = []
381
+ pysdbsegment = []
382
  else:
383
  # HIPE 2020 tokens are tab separated
384
  splits = line.split(
 
396
  misc = splits[-1]
397
  is_space = "NoSpaceAfter" in misc
398
  is_end_of_line = "EndOfLine" in misc
399
+ PySBDSegment = "PySBDSegment" in misc
400
  no_space_after.append(is_space)
401
  end_of_line.append(is_end_of_line)
402
+ pysdbsegment.append(PySBDSegment)
403
+
404
 
405
  # last example
406
  yield guid, {
 
416
  "NEL_METO_ID": NEL_METO_ID,
417
  "no_space_after": no_space_after,
418
  "end_of_line": end_of_line,
419
+ "PySBDSegment":pysdbsegment,
420
  "date": date,
421
  "title": title,
422
  "document_id": document_id,
423
  }
424
+