# coding=utf-8 # Copyright 2022 esCorpius authors # The code required to produce and load this dataset is licensed under MIT License. # The code samples included in this dataset keep their own licenses, which can be retrieved via their metadata. # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # Please note that the dataset release is still work in progress. """The esCorpius dataset.""" import json import datasets from pathlib import Path _CITATION = """\ @misc{TODO } """ _DESCRIPTION = """\ Spanish dataset """ # TODO: expand _HOMEPAGE = "https://huggingface.co/datasets/LHF/escorpius" _LICENSE = "CC BY-NC-ND 4.0" _URL = "" _FEATURES = datasets.Features( { "id": datasets.Value("string"), "text": datasets.Value("string"), "url_warc": datasets.Value("string"), "url": datasets.Value("string") } ) class EsCorpiusConfig(datasets.BuilderConfig): """BuilderConfig for esCorpius.""" def __init__(self, *args, **kwargs): """BuilderConfig for The Pile. Args: **kwargs: keyword arguments forwarded to super. """ super().__init__( *args, **kwargs, ) class EsCorpius(datasets.GeneratorBasedBuilder): """The esCorpius dataset.""" BUILDER_CONFIGS = [ EsCorpiusConfig( name="esCorpius", version=datasets.Version("1.0.1"), description="Spanish dataset" ), ] def _info(self): """Give information and typings for the dataset.""" return datasets.DatasetInfo( # This is the description that will appear on the datasets page. description=_DESCRIPTION, # This defines the different columns of the dataset and their types features=_FEATURES, # If there's a common (input, target) tuple from the features, # specify them here. They'll be used if as_supervised=True in # builder.as_dataset. supervised_keys=None, # Homepage of the dataset for documentation homepage=_HOMEPAGE, # License for the dataset if available license=_LICENSE, # Citation for the dataset citation=_CITATION, ) def _split_generators(self, dl_manager): """Returns SplitGenerators.""" urls_to_download = [ 'es_corpus.jsonl.aa', 'es_corpus.jsonl.ab', 'es_corpus.jsonl.ac', 'es_corpus.jsonl.ad', 'es_corpus.jsonl.ae', 'es_corpus.jsonl.af', 'es_corpus.jsonl.ag', 'es_corpus.jsonl.ah', 'es_corpus.jsonl.ai', 'es_corpus.jsonl.aj', 'es_corpus.jsonl.ak', 'es_corpus.jsonl.al', 'es_corpus.jsonl.am', 'es_corpus.jsonl.an', 'es_corpus.jsonl.ao', 'es_corpus.jsonl.ap', 'es_corpus.jsonl.aq', 'es_corpus.jsonl.ar', 'es_corpus.jsonl.as', 'es_corpus.jsonl.at', 'es_corpus.jsonl.au', 'es_corpus.jsonl.av', 'es_corpus.jsonl.aw', 'es_corpus.jsonl.ax', 'es_corpus.jsonl.ay', 'es_corpus.jsonl.az', 'es_corpus.jsonl.ba', 'es_corpus.jsonl.bb', 'es_corpus.jsonl.bc', 'es_corpus.jsonl.bd', 'es_corpus.jsonl.be', 'es_corpus.jsonl.bf', 'es_corpus.jsonl.bg' ] # urls_to_download = [urls_to_download[-1]] # testing downloaded_files = dl_manager.download_and_extract(urls_to_download) return [ datasets.SplitGenerator(name='train', gen_kwargs={"files": downloaded_files}), ] def _generate_examples(self, files): """Yield examples as (key, example) tuples.""" key = 0 for path in files: with open(path, "r", encoding="utf-8") as f: for row in f: data = json.loads(row) yield key, data key += 1