"""MultiLegalPile Chunks 500""" import json import datasets from huggingface_hub.file_download import hf_hub_url from datasets.download.streaming_download_manager import xopen try: import lzma as xz except ImportError: import pylzma as xz datasets.logging.set_verbosity_info() logger = datasets.logging.get_logger(__name__) _CITATION = """ """ _DESCRIPTION = """ A chunked version of the MultiLegalPile dataset. """ _URL = "https://huggingface.co/datasets/joelito/MultiLegalPile_Chunks_500" _LANGUAGES = ["bg", "cs", "da", "de", "el", "en", "es", "et", "fi", "fr", "ga", "hr", "hu", "it", "lt", "lv", "mt", "nl", "pl", "pt", "ro", "sk", "sl", "sv"] _TYPES = ["caselaw", "contracts", "legislation", "other"] _JURISDICTIONS = ["Austria", "Belgium", "Bulgaria", "Croatia", "Czechia", "Denmark", "Estonia", "Finland", "France", "Germany", "Greece", "Hungary", "Ireland", "Italy", "Latvia", "Lithuania", "Luxembourg", "Malta", "Netherlands", "Poland", "Portugal", "Romania", "Slovakia", "Slovenia", "Spain", "Sweden", "EU", "Switzerland", "UK", "US", "Canada", "N/A"] # IMPORTANT: Increase this once larger datasets are available (pt_caselaw has 35 at the moment) _HIGHEST_NUMBER_OF_SHARDS = 35 class MultiLegalPileChunks500Config(datasets.BuilderConfig): """BuilderConfig for MultiLegalPileChunks500.""" def __init__(self, name: str, **kwargs): """BuilderConfig for MultiLegalPileChunks500. Args: name: combination of language and type with _ language: One of bg,cs,da,de,el,en,es,et,fi,fr,ga,hr,hu,it,lt,lv,mt,nl,pl,pt,ro,sk,sl,sv or all type: One of caselaw,contracts,legislation,other or all **kwargs: keyword arguments forwarded to super. """ super(MultiLegalPileChunks500Config, self).__init__(**kwargs) self.name = name self.language = name.split("_")[0] self.type = name.split("_")[1] class MultiLegalPileChunks500(datasets.GeneratorBasedBuilder): """ MultiLegalPileChunks500: A filtered dataset of multilingual legal data in the EU languages """ BUILDER_CONFIG_CLASS = MultiLegalPileChunks500Config BUILDER_CONFIGS = [MultiLegalPileChunks500Config(f"{language}_{type}") for type in _TYPES + ["all"] for language in _LANGUAGES + ["all"]] def _info(self): return datasets.DatasetInfo( description=_DESCRIPTION, features=datasets.Features( { "language": datasets.Value("string"), # one of _LANGUAGES "type": datasets.Value("string"), # one of _TYPES "jurisdiction": datasets.Value("string"), # one of _JURISDICTIONS "text": datasets.Value("string"), } ), supervised_keys=None, homepage=_URL, citation=_CITATION, ) def _split_generators(self, dl_manager): def download_url(file_name): url = hf_hub_url(repo_id="joelito/MultiLegalPile_Chunks_500", filename=f"data/{file_name}.jsonl.xz", repo_type="dataset") return dl_manager.download(url) languages = _LANGUAGES if self.config.language == "all" else [self.config.language] types = _TYPES if self.config.type == "all" else [self.config.type] split_generators = [] for split in [datasets.Split.TRAIN, datasets.Split.VALIDATION]: filepaths = [] for language in languages: for type in types: for shard in range(1, _HIGHEST_NUMBER_OF_SHARDS + 1): try: filepaths.append(download_url(f"{language}_{type}_{split}_{shard}")) except: break # we found the last shard split_generators.append( datasets.SplitGenerator(name=split, gen_kwargs={"filepaths": filepaths}) ) return split_generators def _generate_examples(self, filepaths): """This function returns the examples in the raw (text) form by iterating on all the files.""" id_ = 0 for filepath in filepaths: logger.info("Generating examples from = %s", filepath) try: with xz.open(xopen(filepath, "rb"), "rt", encoding="utf-8") as f: for line in f: if line: example = json.loads(line) if example is not None and isinstance(example, dict): yield id_, example id_ += 1 except Exception: logger.debug("Error while processing file %s", filepath)