guilhermelmello commited on
Commit
09ef168
1 Parent(s): 556ac02

Add loading script.

Browse files
Files changed (1) hide show
  1. corpus-carolina.py +210 -0
corpus-carolina.py ADDED
@@ -0,0 +1,210 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2020 The HuggingFace Datasets Authors and the current dataset
2
+ # script contributor.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """Carolina Corpus"""
16
+
17
+ from lxml import etree
18
+ import os
19
+ import datasets
20
+
21
+
22
+ _HOMEPAGE = "https://sites.usp.br/corpuscarolina/"
23
+
24
+
25
+ _DESCRIPTION = """
26
+ Carolina is an Open Corpus for Linguistics and Artificial Intelligence with a
27
+ robust volume of texts of varied typology in contemporary Brazilian Portuguese
28
+ (1970-2021).
29
+ """
30
+
31
+
32
+ _CITATION = r"""
33
+ @misc{corpusCarolinaV1.1,
34
+ title={
35
+ Carolina:
36
+ The Open Corpus for Linguistics and Artificial Intelligence},
37
+ author={
38
+ Finger, Marcelo and
39
+ Paixão de Sousa, Maria Clara and
40
+ Namiuti, Cristiane and
41
+ Martins do Monte, Vanessa and
42
+ Costa, Aline Silva and
43
+ Serras, Felipe Ribas and
44
+ Sturzeneker, Mariana Lourenço and
45
+ Guets, Raquel de Paula and
46
+ Mesquita, Renata Morais and
47
+ Mello, Guilherme Lamartine de and
48
+ Crespo, Maria Clara Ramos Morales and
49
+ Rocha, Maria Lina de Souza Jeannine and
50
+ Brasil, Patrícia and
51
+ Silva, Mariana Marques da and
52
+ Palma, Mayara Feliciano},
53
+ howpublished={\url{https://sites.usp.br/corpuscarolina/corpus}},
54
+ year={2022},
55
+ note={Version 1.1 (Ada)},
56
+ }
57
+ """
58
+
59
+
60
+ _LICENSE = """
61
+ The Open Corpus for Linguistics and Artificial Intelligence (Carolina) was
62
+ compiled for academic purposes, namely linguistic and computational analysis.
63
+ It is composed of texts assembled in various digital repositories, whose
64
+ licenses are multiple and therefore should be observed when making use of the
65
+ corpus. The Carolina headers are licensed under Creative Commons
66
+ Attribution-NonCommercial-ShareAlike 4.0 International."
67
+ """
68
+
69
+
70
+ def _taxonomies():
71
+ """Creates a map between taxonomy code and name
72
+
73
+ Returns
74
+ -------
75
+ dict
76
+ The dictionary of codes and names.
77
+ """
78
+ return dict(
79
+ dat="datasets and other corpora",
80
+ jud="judicial branch",
81
+ leg="legislative branch",
82
+ pub="public domain works",
83
+ soc="social media",
84
+ uni="university_domains",
85
+ wik="wikis",
86
+ )
87
+
88
+
89
+ _VERSION = "1.1.0"
90
+ _CORPUS_URL = "corpus/{taxonomy}/"
91
+ _CHECKSUM_FNAME = _CORPUS_URL + "checksum.sha256"
92
+
93
+
94
+ class CarolinaConfig(datasets.BuilderConfig):
95
+ """Carolina Configuration."""
96
+ def __init__(self, taxonomy: str = None, **kwargs):
97
+ """BuilderConfig for Carolina
98
+
99
+ Parameters
100
+ ----------
101
+ taxonomy : str
102
+ The taxonomy code (3 letters). The code defines the taxonomy
103
+ to download. If `None`, all taxonomies will be downloaded.
104
+ **kwargs
105
+ Arguments passed to super.
106
+ """
107
+ # validates taxonomy
108
+ if taxonomy is None:
109
+ taxonomy = "all"
110
+ elif taxonomy != "all" and taxonomy not in _taxonomies():
111
+ raise ValueError(f"Invalid taxonomy: {taxonomy}")
112
+
113
+ # custom name and description
114
+ description = "Carolina corpus."
115
+ if taxonomy == "all":
116
+ name = "carolina"
117
+ description += " Using all taxonomies."
118
+ else:
119
+ name = _taxonomies()[taxonomy]
120
+ description += f" Using taxonomy {taxonomy}"
121
+
122
+ super(CarolinaConfig, self).__init__(
123
+ name=name, description=description, **kwargs)
124
+
125
+ # Carolina attributes
126
+ self.taxonomy = taxonomy
127
+ self.version = datasets.Version(_VERSION)
128
+
129
+
130
+ class Carolina(datasets.GeneratorBasedBuilder):
131
+ """Carolina Downloader and Builder"""
132
+
133
+ BUILDER_CONFIG_CLASS = CarolinaConfig
134
+
135
+ def _info(self):
136
+ features = datasets.Features({
137
+ "meta": datasets.Value("string"),
138
+ "text": datasets.Value("string")
139
+ })
140
+
141
+ return datasets.DatasetInfo(
142
+ description=_DESCRIPTION,
143
+ homepage=_HOMEPAGE,
144
+ citation=_CITATION,
145
+ features=features,
146
+ license=_LICENSE
147
+ )
148
+
149
+ def _split_generators(self, dl_manager):
150
+ # list taxonomies to download
151
+ if self.config.taxonomy == "all":
152
+ taxonomies = _taxonomies().values()
153
+ else:
154
+ taxonomies = [_taxonomies()[self.config.taxonomy]]
155
+
156
+ zip_urls = dict()
157
+ for taxonomy in taxonomies:
158
+ # download checksum file
159
+ checksum_path = _CHECKSUM_FNAME.format(taxonomy=taxonomy)
160
+ checksum_path = dl_manager.download(checksum_path)
161
+
162
+ tax_url = _CORPUS_URL.format(taxonomy=taxonomy)
163
+
164
+ # extract and build zip urls
165
+ with open(checksum_path, encoding="utf-8") as cfile:
166
+ for line in cfile:
167
+ fname = line.split()[1]
168
+ if fname.endswith(".xml.zip"):
169
+ zip_url = tax_url + fname # download url
170
+ fname = os.path.split(fname)[1] # removes subdirs
171
+ fname = fname[:-4] # removes .zip
172
+ zip_urls[fname] = zip_url # xml -> zip url
173
+
174
+ # extractions are made in cache folders and
175
+ # the path returned is the folder path, not the
176
+ # extracted file (or files). It is necessary to
177
+ # build the xml file path. It is made using the
178
+ # zip_urls dict structure.
179
+ extracted = dl_manager.download_and_extract(zip_urls)
180
+ xml_files = [os.path.join(v, k) for k, v in extracted.items()]
181
+ xml_files = sorted(xml_files)
182
+
183
+ return [
184
+ datasets.SplitGenerator(
185
+ name="corpus",
186
+ gen_kwargs={"filepaths": xml_files}
187
+ )
188
+ ]
189
+
190
+ def _generate_examples(self, filepaths):
191
+ TEI_NS = "{http://www.tei-c.org/ns/1.0}"
192
+ parser_params = dict(
193
+ huge_tree=True,
194
+ encoding="utf-8",
195
+ tag=f"{TEI_NS}TEI"
196
+ )
197
+
198
+ _key = 0
199
+ for path in filepaths:
200
+ # parse xml file
201
+ for _, tei in etree.iterparse(path, **parser_params):
202
+ header = tei.find(f"{TEI_NS}teiHeader")
203
+
204
+ example = {
205
+ "meta": etree.tostring(
206
+ header, encoding="utf-8").decode("utf-8"),
207
+ "text": tei.find(f".//{TEI_NS}body/{TEI_NS}p").text
208
+ }
209
+ yield _key, example
210
+ _key += 1