albertvillanova HF staff commited on
Commit
738d9ce
1 Parent(s): 1b477e0

Convert dataset to Parquet (#3)

Browse files

- Convert dataset to Parquet (b889f014742f7388c8257801741bef3cd9e729b2)
- Add 'nl-kb' config data files (6e52a93c52eec58ef4ed9c05850216eadb48b08a)
- Add 'de-sbb' config data files (470baf4e6d7a29de17aeaa8bc7362e5b4a54a352)
- Add 'de-onb' config data files (5030def27a385a09b1388a71ad29b6192cd85c04)
- Add 'de-lft' config data files (269c1682b0a43919dffbeab42c5a61fccbe5460d)
- Delete loading script (49b2e1c050d0abee069b9b4d360f23ea73e1d6cf)

README.md CHANGED
@@ -22,7 +22,7 @@ task_ids:
22
  paperswithcode_id: europeana-newspapers
23
  pretty_name: Europeana Newspapers
24
  dataset_info:
25
- - config_name: fr-bnf
26
  features:
27
  - name: id
28
  dtype: string
@@ -41,11 +41,11 @@ dataset_info:
41
  '6': I-LOC
42
  splits:
43
  - name: train
44
- num_bytes: 3340299
45
  num_examples: 1
46
- download_size: 1542418
47
- dataset_size: 3340299
48
- - config_name: nl-kb
49
  features:
50
  - name: id
51
  dtype: string
@@ -64,10 +64,10 @@ dataset_info:
64
  '6': I-LOC
65
  splits:
66
  - name: train
67
- num_bytes: 3104213
68
  num_examples: 1
69
- download_size: 1502162
70
- dataset_size: 3104213
71
  - config_name: de-sbb
72
  features:
73
  - name: id
@@ -87,11 +87,11 @@ dataset_info:
87
  '6': I-LOC
88
  splits:
89
  - name: train
90
- num_bytes: 817295
91
  num_examples: 1
92
- download_size: 407756
93
- dataset_size: 817295
94
- - config_name: de-onb
95
  features:
96
  - name: id
97
  dtype: string
@@ -110,11 +110,11 @@ dataset_info:
110
  '6': I-LOC
111
  splits:
112
  - name: train
113
- num_bytes: 502369
114
  num_examples: 1
115
- download_size: 271252
116
- dataset_size: 502369
117
- - config_name: de-lft
118
  features:
119
  - name: id
120
  dtype: string
@@ -133,10 +133,31 @@ dataset_info:
133
  '6': I-LOC
134
  splits:
135
  - name: train
136
- num_bytes: 1263429
137
  num_examples: 1
138
- download_size: 677779
139
- dataset_size: 1263429
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
140
  ---
141
 
142
  # Dataset Card for Europeana Newspapers
 
22
  paperswithcode_id: europeana-newspapers
23
  pretty_name: Europeana Newspapers
24
  dataset_info:
25
+ - config_name: de-lft
26
  features:
27
  - name: id
28
  dtype: string
 
41
  '6': I-LOC
42
  splits:
43
  - name: train
44
+ num_bytes: 1263426
45
  num_examples: 1
46
+ download_size: 394615
47
+ dataset_size: 1263426
48
+ - config_name: de-onb
49
  features:
50
  - name: id
51
  dtype: string
 
64
  '6': I-LOC
65
  splits:
66
  - name: train
67
+ num_bytes: 502353
68
  num_examples: 1
69
+ download_size: 165235
70
+ dataset_size: 502353
71
  - config_name: de-sbb
72
  features:
73
  - name: id
 
87
  '6': I-LOC
88
  splits:
89
  - name: train
90
+ num_bytes: 817279
91
  num_examples: 1
92
+ download_size: 200613
93
+ dataset_size: 817279
94
+ - config_name: fr-bnf
95
  features:
96
  - name: id
97
  dtype: string
 
110
  '6': I-LOC
111
  splits:
112
  - name: train
113
+ num_bytes: 3340283
114
  num_examples: 1
115
+ download_size: 687579
116
+ dataset_size: 3340283
117
+ - config_name: nl-kb
118
  features:
119
  - name: id
120
  dtype: string
 
133
  '6': I-LOC
134
  splits:
135
  - name: train
136
+ num_bytes: 3104197
137
  num_examples: 1
138
+ download_size: 695197
139
+ dataset_size: 3104197
140
+ configs:
141
+ - config_name: de-lft
142
+ data_files:
143
+ - split: train
144
+ path: de-lft/train-*
145
+ - config_name: de-onb
146
+ data_files:
147
+ - split: train
148
+ path: de-onb/train-*
149
+ - config_name: de-sbb
150
+ data_files:
151
+ - split: train
152
+ path: de-sbb/train-*
153
+ - config_name: fr-bnf
154
+ data_files:
155
+ - split: train
156
+ path: fr-bnf/train-*
157
+ - config_name: nl-kb
158
+ data_files:
159
+ - split: train
160
+ path: nl-kb/train-*
161
  ---
162
 
163
  # Dataset Card for Europeana Newspapers
de-lft/train-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:af6f511600a7c0f5851f2f382c0e332480e6a3a691fdaeba1c46d617034f060e
3
+ size 394615
de-onb/train-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:905cee04c8995d99232778b61f426e6c1dc80c69bcf5303c036e0308946d79e3
3
+ size 165235
de-sbb/train-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f2fef5df64cebc5972cc13d522aa2130d01e61b3a27c0c4f241db389b6573f0f
3
+ size 200613
euronews.py DELETED
@@ -1,161 +0,0 @@
1
- # coding=utf-8
2
- # Copyright 2020 HuggingFace Datasets Authors.
3
- #
4
- # Licensed under the Apache License, Version 2.0 (the "License");
5
- # you may not use this file except in compliance with the License.
6
- # You may obtain a copy of the License at
7
- #
8
- # http://www.apache.org/licenses/LICENSE-2.0
9
- #
10
- # Unless required by applicable law or agreed to in writing, software
11
- # distributed under the License is distributed on an "AS IS" BASIS,
12
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
- # See the License for the specific language governing permissions and
14
- # limitations under the License.
15
-
16
- # Lint as: python3
17
- """Named Entity Recognition corpora for Dutch, French, German from Europeana Newspapers."""
18
-
19
- import datasets
20
-
21
-
22
- logger = datasets.logging.get_logger(__name__)
23
-
24
-
25
- _CITATION = """\
26
- @InProceedings{NEUDECKER16.110,
27
- author = {Clemens Neudecker},
28
- title = {An Open Corpus for Named Entity Recognition in Historic Newspapers},
29
- booktitle = {Proceedings of the Tenth International Conference on Language Resources and Evaluation (LREC 2016)},
30
- year = {2016},
31
- month = {may},
32
- date = {23-28},
33
- location = {Portorož, Slovenia},
34
- editor = {Nicoletta Calzolari (Conference Chair) and Khalid Choukri and Thierry Declerck and Sara Goggi and Marko Grobelnik and Bente Maegaard and Joseph Mariani and Helene Mazo and Asuncion Moreno and Jan Odijk and Stelios Piperidis},
35
- publisher = {European Language Resources Association (ELRA)},
36
- address = {Paris, France},
37
- isbn = {978-2-9517408-9-1},
38
- language = {english}
39
- }
40
- """
41
-
42
- _DESCRIPTION = """\
43
- The corpora comprise of files per data provider that are encoded in the IOB format (Ramshaw & Marcus, 1995). The IOB format is a simple text chunking format that divides texts into single tokens per line, and, separated by a whitespace, tags to mark named entities. The most commonly used categories for tags are PER (person), LOC (location) and ORG (organization). To mark named entities that span multiple tokens, the tags have a prefix of either B- (beginning of named entity) or I- (inside of named entity). O (outside of named entity) tags are used to mark tokens that are not a named entity.
44
- """
45
-
46
- _URL = "https://raw.githubusercontent.com/EuropeanaNewspapers/ner-corpora/master/"
47
- _FR_BNF = "enp_FR.bnf.bio/enp_FR.bnf.bio"
48
- _NL_KB = "enp_NL.kb.bio/enp_NL.kb.bio"
49
- _DE_SBB = "enp_DE.sbb.bio/enp_DE.sbb.bio"
50
- _DE_ONB = "enp_DE.onb.bio/enp_DE.onb.bio"
51
- _DE_LFT = "enp_DE.lft.bio/enp_DE.lft.bio"
52
-
53
- _TAGS = [
54
- "O",
55
- "B-PER",
56
- "I-PER",
57
- "B-ORG",
58
- "I-ORG",
59
- "B-LOC",
60
- "I-LOC",
61
- ]
62
-
63
-
64
- class EuroNewsConfig(datasets.BuilderConfig):
65
- """BuilderConfig for Europana Newspaper"""
66
-
67
- def __init__(self, **kwargs):
68
- """BuilderConfig for Europana Newspaper.
69
-
70
- Args:
71
- **kwargs: keyword arguments forwarded to super.
72
- """
73
- super(EuroNewsConfig, self).__init__(**kwargs)
74
-
75
-
76
- class Euronews(datasets.GeneratorBasedBuilder):
77
- """Europana Newspaper dataset."""
78
-
79
- BUILDER_CONFIGS = [
80
- EuroNewsConfig(
81
- name="fr-bnf", version=datasets.Version("1.0.0"), description="National Library of France Dataset"
82
- ),
83
- EuroNewsConfig(
84
- name="nl-kb", version=datasets.Version("1.0.0"), description="National Library of the Netherlands Dataset"
85
- ),
86
- EuroNewsConfig(name="de-sbb", version=datasets.Version("1.0.0"), description="Berlin State Library Dataset"),
87
- EuroNewsConfig(
88
- name="de-onb", version=datasets.Version("1.0.0"), description="Austrian National Library Dataset"
89
- ),
90
- EuroNewsConfig(
91
- name="de-lft", version=datasets.Version("1.0.0"), description="Dr Friedrich Teßmann Library Dataset"
92
- ),
93
- ]
94
-
95
- def _info(self):
96
- return datasets.DatasetInfo(
97
- description=_DESCRIPTION,
98
- features=datasets.Features(
99
- {
100
- "id": datasets.Value("string"),
101
- "tokens": datasets.Sequence(datasets.Value("string")),
102
- "ner_tags": datasets.Sequence(datasets.features.ClassLabel(names=_TAGS)),
103
- }
104
- ),
105
- supervised_keys=None,
106
- homepage="https://github.com/EuropeanaNewspapers/ner-corpora",
107
- citation=_CITATION,
108
- )
109
-
110
- def _split_generators(self, dl_manager):
111
- """Returns SplitGenerators."""
112
- if self.config.name == "fr-bnf":
113
- url_to_download = _URL + _FR_BNF
114
- elif self.config.name == "nl-kb":
115
- url_to_download = _URL + _NL_KB
116
- elif self.config.name == "de-sbb":
117
- url_to_download = _URL + _DE_SBB
118
- elif self.config.name == "de-onb":
119
- url_to_download = _URL + _DE_ONB
120
- elif self.config.name == "de-lft":
121
- url_to_download = _URL + _DE_LFT
122
-
123
- downloaded_files = dl_manager.download_and_extract(url_to_download)
124
-
125
- return [
126
- datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath": downloaded_files}),
127
- ]
128
-
129
- def _generate_examples(self, filepath):
130
- logger.info("⏳ Generating examples from = %s", filepath)
131
- with open(filepath, encoding="utf-8") as f:
132
- guid = 0
133
- tokens = []
134
- ner_tags = []
135
- for line in f:
136
- splits = line.split()
137
- if len(splits) != 2:
138
- continue
139
- if line == "" or line == "\n":
140
- if tokens:
141
- yield guid, {
142
- "id": str(guid),
143
- "tokens": tokens,
144
- "ner_tags": ner_tags,
145
- }
146
- guid += 1
147
- tokens = []
148
- ner_tags = []
149
- else:
150
- # Europana Newspaper tokens are space separated
151
- tag = splits[1].rstrip().upper()
152
- if tag not in _TAGS:
153
- continue
154
- tokens.append(splits[0])
155
- ner_tags.append(tag)
156
- # last example
157
- yield guid, {
158
- "id": str(guid),
159
- "tokens": tokens,
160
- "ner_tags": ner_tags,
161
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
fr-bnf/train-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2271efedd6b273c9f46ca9b1a0017dac4862571b807c121bbe3a6bc714c265b2
3
+ size 687579
nl-kb/train-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e2ebaad94abca0725be9438f715e4acbca7507bcb65b960226a992e8d72a39e7
3
+ size 695197