albertvillanova HF staff commited on
Commit
fd671e6
1 Parent(s): 81c6434

Convert dataset to Parquet (#4)

Browse files

- Convert dataset to Parquet (0c62fd9ce0d93056eaadad0e0eb4c78210a19591)
- Delete loading script (0285e7448d0d5a4e904a7f7b407330f36bfc603d)
- Delete data file (feaf59b31f8453f4f0fddb59d7f2e1112a6f623e)

README.md CHANGED
@@ -76,8 +76,14 @@ dataset_info:
76
  - name: train
77
  num_bytes: 1611597178
78
  num_examples: 537558
79
- download_size: 1224029060
80
  dataset_size: 1611597178
 
 
 
 
 
 
81
  ---
82
 
83
  # Dataset Card for BnL Historical Newspapers
76
  - name: train
77
  num_bytes: 1611597178
78
  num_examples: 537558
79
+ download_size: 1033457256
80
  dataset_size: 1611597178
81
+ configs:
82
+ - config_name: processed
83
+ data_files:
84
+ - split: train
85
+ path: processed/train-*
86
+ default: true
87
  ---
88
 
89
  # Dataset Card for BnL Historical Newspapers
bnl_newspapers.py DELETED
@@ -1,184 +0,0 @@
1
- # coding=utf-8
2
- # Copyright 2021 The HuggingFace Datasets Authors and the current dataset script contributor.
3
- #
4
- # Licensed under the Apache License, Version 2.0 (the "License");
5
- # you may not use this file except in compliance with the License.
6
- # You may obtain a copy of the License at
7
- #
8
- # http://www.apache.org/licenses/LICENSE-2.0
9
- #
10
- # Unless required by applicable law or agreed to in writing, software
11
- # distributed under the License is distributed on an "AS IS" BASIS,
12
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
- # See the License for the specific language governing permissions and
14
- # limitations under the License.
15
- """Digitised historic newspapers from the BNL"""
16
-
17
- import os
18
- import xml.etree.ElementTree as ET
19
- from datetime import datetime
20
-
21
- import datasets
22
- from datasets.tasks import LanguageModeling
23
-
24
-
25
- _CITATION = """\
26
- @misc{bnl_newspapers,
27
- title={Historical Newspapers},
28
- url={https://data.bnl.lu/data/historical-newspapers/},
29
- author={ Bibliothèque nationale du Luxembourg},
30
- """
31
-
32
- _DESCRIPTION = """\
33
- Digitised historic newspapers from the Bibliothèque nationale (BnL) - the National Library of Luxembourg.
34
- """
35
-
36
- _HOMEPAGE = "https://data.bnl.lu/data/historical-newspapers/"
37
-
38
- _LICENSE = "CC0"
39
-
40
-
41
- # Source: https://data.bnl.lu/open-data/digitization/newspapers/export01-newspapers1841-1878.zi
42
- _URLs = {"processed": "data/export01-newspapers1841-1878.zip"}
43
-
44
-
45
- class BNLNewspapersConfig(datasets.BuilderConfig):
46
- """Builder config for BNLNewspapers"""
47
-
48
- def __init__(self, data_url, citation, url, **kwargs):
49
- """
50
- Args:
51
- data_url: `string`, url to download the zip file from.
52
- citation: `string`, citation for the data set.
53
- url: `string`, url for information about the data set.
54
- **kwargs: keyword arguments forwarded to super.
55
- """
56
- super(BNLNewspapersConfig, self).__init__(version=datasets.Version("1.17.0"), **kwargs)
57
- self.data_url = data_url
58
- self.citation = citation
59
- self.url = url
60
-
61
-
62
- class BNLNewspapers(datasets.GeneratorBasedBuilder):
63
- """Historic newspapers from the BNL"""
64
-
65
- BUILDER_CONFIGS = [
66
- BNLNewspapersConfig(
67
- name="processed",
68
- description="""This dataset covers the 'processed newspapers' portion of the BnL newspapers.
69
- These newspapers cover 38 years of news (1841-1878) and include 510,505 extracted articles.
70
- """,
71
- data_url=_URLs["processed"],
72
- citation=_CITATION,
73
- url=_HOMEPAGE,
74
- ),
75
- ]
76
-
77
- DEFAULT_CONFIG_NAME = "processed"
78
-
79
- def _info(self):
80
- features = datasets.Features(
81
- {
82
- "id": datasets.Value("string"),
83
- "source": datasets.Value("string"),
84
- "url": datasets.Value("string"),
85
- "title": datasets.Value("string"),
86
- "ispartof": datasets.Value("string"),
87
- "text": datasets.Value("string"),
88
- "pub_date": datasets.Value("timestamp[s]"),
89
- "publisher": datasets.Value("string"),
90
- "language": datasets.Value("string"),
91
- "article_type": datasets.ClassLabel(
92
- names=[
93
- "ADVERTISEMENT_SECTION",
94
- "BIBLIOGRAPHY",
95
- "CHAPTER",
96
- "INDEX",
97
- "CONTRIBUTION",
98
- "TABLE_OF_CONTENTS",
99
- "WEATHER",
100
- "SHIPPING",
101
- "SECTION",
102
- "ARTICLE",
103
- "TITLE_SECTION",
104
- "DEATH_NOTICE",
105
- "SUPPLEMENT",
106
- "TABLE",
107
- "ADVERTISEMENT",
108
- "CHART_DIAGRAM",
109
- "ILLUSTRATION",
110
- "ISSUE",
111
- ]
112
- ),
113
- "extent": datasets.Value("int32"),
114
- }
115
- )
116
- return datasets.DatasetInfo(
117
- description=_DESCRIPTION,
118
- features=features,
119
- homepage=_HOMEPAGE,
120
- license=_LICENSE,
121
- citation=_CITATION,
122
- task_templates=[LanguageModeling(text_column="text")],
123
- )
124
-
125
- def _split_generators(self, dl_manager):
126
- _URL = self.config.data_url
127
- data_dir = dl_manager.download_and_extract(_URL)
128
- return [
129
- datasets.SplitGenerator(
130
- name=datasets.Split.TRAIN,
131
- gen_kwargs={
132
- "paths": dl_manager.iter_files([data_dir]),
133
- },
134
- ),
135
- ]
136
-
137
- def _generate_examples(self, paths):
138
- key = 0
139
- for path in paths:
140
- if os.path.basename(path).endswith(".xml"):
141
- data = parse_xml(path)
142
- yield key, data
143
- key += 1
144
-
145
-
146
- def parse_xml(path):
147
- ns = {
148
- "": "http://www.openarchives.org/OAI/2.0/",
149
- "xsi": "http://www.w3.org/2001/XMLSchema-instance",
150
- "oai_dc": "http://www.openarchives.org/OAI/2.0/oai_dc/",
151
- "dc": "http://purl.org/dc/elements/1.1/",
152
- "dcterms": "http://purl.org/dc/terms/",
153
- }
154
- tree = ET.parse(path)
155
- source = tree.find(".//dc:source", ns).text
156
- ark_id = tree.find(".//dc:identifier", ns).text
157
- ispartof = tree.find(".//dcterms:isPartOf", ns).text
158
- date = tree.find(".//dc:date", ns).text
159
- if date:
160
- date = datetime.strptime(date, "%Y-%m-%d")
161
- publisher = tree.find(".//dc:publisher", ns)
162
- if publisher is not None:
163
- publisher = publisher.text
164
- hasversion = tree.find(".//dcterms:hasVersion", ns).text
165
- description = tree.find(".//dc:description", ns).text
166
- title = tree.find(".//dc:title", ns).text
167
- article_type = tree.find(".//dc:type", ns).text
168
- extent = tree.find(".//dcterms:extent", ns).text
169
- language = tree.find(".//dc:language", ns)
170
- if language is not None:
171
- language = language.text
172
- return {
173
- "id": ark_id,
174
- "source": source,
175
- "url": hasversion,
176
- "title": title,
177
- "text": description,
178
- "pub_date": date,
179
- "publisher": publisher,
180
- "article_type": article_type,
181
- "extent": extent,
182
- "ispartof": ispartof,
183
- "language": language,
184
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
data/export01-newspapers1841-1878.zip → processed/train-00000-of-00004.parquet RENAMED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:72032d30d93eb367c567651db970bdb6f645fa6377b2d6a2453d30cf870df946
3
- size 1224029060
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:24f4b6b59284f334268cd35bb4dcf0c24a569f20368a4f2addea32092aeefa13
3
+ size 258201852
processed/train-00001-of-00004.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ec530099cea0d255cdfe968c2bf72686a24130824ad5849f63bea634ba3d83fc
3
+ size 260525746
processed/train-00002-of-00004.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c81a729be9c75a86368dda2a3770e05ec21ed453a49a2ae329824272db4e85bc
3
+ size 258528289
processed/train-00003-of-00004.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d5eeac8a900ec0f08b749f9817ec6464e48a2d2192d94afd551d3c2fe3157636
3
+ size 256201369