Datasets:
Languages:
Portuguese
Multilinguality:
monolingual
Size Categories:
1B<n<10B
Language Creators:
crowdsourced
Annotations Creators:
no-annotation
Source Datasets:
original
License:
# Copyright 2020 The HuggingFace Datasets Authors and the current dataset | |
# script contributor. | |
# | |
# Licensed under the Apache License, Version 2.0 (the "License"); | |
# you may not use this file except in compliance with the License. | |
# You may obtain a copy of the License at | |
# | |
# http://www.apache.org/licenses/LICENSE-2.0 | |
# | |
# Unless required by applicable law or agreed to in writing, software | |
# distributed under the License is distributed on an "AS IS" BASIS, | |
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |
# See the License for the specific language governing permissions and | |
# limitations under the License. | |
"""Carolina Corpus""" | |
from lxml import etree | |
import os | |
import datasets | |
_HOMEPAGE = "https://sites.usp.br/corpuscarolina/" | |
_DESCRIPTION = """ | |
Carolina is an Open Corpus for Linguistics and Artificial Intelligence with a | |
robust volume of texts of varied typology in contemporary Brazilian Portuguese | |
(1970-2021). | |
""" | |
_CITATION = r""" | |
@misc{corpusCarolinaV1.1, | |
title={ | |
Carolina: | |
The Open Corpus for Linguistics and Artificial Intelligence}, | |
author={ | |
Finger, Marcelo and | |
Paixão de Sousa, Maria Clara and | |
Namiuti, Cristiane and | |
Martins do Monte, Vanessa and | |
Costa, Aline Silva and | |
Serras, Felipe Ribas and | |
Sturzeneker, Mariana Lourenço and | |
Guets, Raquel de Paula and | |
Mesquita, Renata Morais and | |
Mello, Guilherme Lamartine de and | |
Crespo, Maria Clara Ramos Morales and | |
Rocha, Maria Lina de Souza Jeannine and | |
Brasil, Patrícia and | |
Silva, Mariana Marques da and | |
Palma, Mayara Feliciano}, | |
howpublished={\url{https://sites.usp.br/corpuscarolina/corpus}}, | |
year={2022}, | |
note={Version 1.1 (Ada)}, | |
} | |
""" | |
_LICENSE = """ | |
The Open Corpus for Linguistics and Artificial Intelligence (Carolina) was | |
compiled for academic purposes, namely linguistic and computational analysis. | |
It is composed of texts assembled in various digital repositories, whose | |
licenses are multiple and therefore should be observed when making use of the | |
corpus. The Carolina headers are licensed under Creative Commons | |
Attribution-NonCommercial-ShareAlike 4.0 International." | |
""" | |
def _taxonomies(): | |
"""Creates a map between taxonomy code and name | |
Returns | |
------- | |
dict | |
The dictionary of codes and names. | |
""" | |
return dict( | |
dat="datasets and other corpora", | |
jud="judicial branch", | |
leg="legislative branch", | |
pub="public domain works", | |
soc="social media", | |
uni="university_domains", | |
wik="wikis", | |
) | |
_VERSION = "1.1.0" | |
_CORPUS_URL = "corpus/{taxonomy}/" | |
_CHECKSUM_FNAME = _CORPUS_URL + "checksum.sha256" | |
class CarolinaConfig(datasets.BuilderConfig): | |
"""Carolina Configuration.""" | |
def __init__(self, taxonomy: str = None, **kwargs): | |
"""BuilderConfig for Carolina | |
Parameters | |
---------- | |
taxonomy : str | |
The taxonomy code (3 letters). The code defines the taxonomy | |
to download. If `None`, all taxonomies will be downloaded. | |
**kwargs | |
Arguments passed to super. | |
""" | |
# validates taxonomy | |
if taxonomy is None: | |
taxonomy = "all" | |
elif taxonomy != "all" and taxonomy not in _taxonomies(): | |
raise ValueError(f"Invalid taxonomy: {taxonomy}") | |
# custom name and description | |
description = "Carolina corpus." | |
if taxonomy == "all": | |
name = "carolina" | |
description += " Using all taxonomies." | |
else: | |
name = _taxonomies()[taxonomy] | |
description += f" Using taxonomy {taxonomy}" | |
super(CarolinaConfig, self).__init__( | |
name=name, description=description, **kwargs) | |
# Carolina attributes | |
self.taxonomy = taxonomy | |
self.version = datasets.Version(_VERSION) | |
class Carolina(datasets.GeneratorBasedBuilder): | |
"""Carolina Downloader and Builder""" | |
BUILDER_CONFIG_CLASS = CarolinaConfig | |
def _info(self): | |
features = datasets.Features({ | |
"meta": datasets.Value("string"), | |
"text": datasets.Value("string") | |
}) | |
return datasets.DatasetInfo( | |
description=_DESCRIPTION, | |
homepage=_HOMEPAGE, | |
citation=_CITATION, | |
features=features, | |
license=_LICENSE | |
) | |
def _split_generators(self, dl_manager): | |
# list taxonomies to download | |
if self.config.taxonomy == "all": | |
taxonomies = _taxonomies().values() | |
else: | |
taxonomies = [_taxonomies()[self.config.taxonomy]] | |
zip_urls = dict() | |
for taxonomy in taxonomies: | |
# download checksum file | |
checksum_path = _CHECKSUM_FNAME.format(taxonomy=taxonomy) | |
checksum_path = dl_manager.download(checksum_path) | |
tax_url = _CORPUS_URL.format(taxonomy=taxonomy) | |
# extract and build zip urls | |
with open(checksum_path, encoding="utf-8") as cfile: | |
for line in cfile: | |
fname = line.split()[1] | |
if fname.endswith(".xml.zip"): | |
zip_url = tax_url + fname # download url | |
fname = os.path.split(fname)[1] # removes subdirs | |
fname = fname[:-4] # removes .zip | |
zip_urls[fname] = zip_url # xml -> zip url | |
# extractions are made in cache folders and | |
# the path returned is the folder path, not the | |
# extracted file (or files). It is necessary to | |
# build the xml file path. It is made using the | |
# zip_urls dict structure. | |
extracted = dl_manager.download_and_extract(zip_urls) | |
xml_files = [os.path.join(v, k) for k, v in extracted.items()] | |
xml_files = sorted(xml_files) | |
return [ | |
datasets.SplitGenerator( | |
name="corpus", | |
gen_kwargs={"filepaths": xml_files} | |
) | |
] | |
def _generate_examples(self, filepaths): | |
TEI_NS = "{http://www.tei-c.org/ns/1.0}" | |
parser_params = dict( | |
huge_tree=True, | |
encoding="utf-8", | |
tag=f"{TEI_NS}TEI" | |
) | |
_key = 0 | |
for path in filepaths: | |
# parse xml file | |
for _, tei in etree.iterparse(path, **parser_params): | |
header = tei.find(f"{TEI_NS}teiHeader") | |
example = { | |
"meta": etree.tostring( | |
header, encoding="utf-8").decode("utf-8"), | |
"text": tei.find(f".//{TEI_NS}body/{TEI_NS}p").text | |
} | |
yield _key, example | |
_key += 1 | |