diff --git a/MultiLegalPile_Wikipedia_Filtered.py b/MultiLegalPile_Wikipedia_Filtered.py deleted file mode 100644 index d98e189d0da89b67b033a5608bad964349d3a97d..0000000000000000000000000000000000000000 --- a/MultiLegalPile_Wikipedia_Filtered.py +++ /dev/null @@ -1,121 +0,0 @@ -"""MultiLegalPile Wikipedia Filtered""" - -import json - -import datasets -from huggingface_hub.file_download import hf_hub_url - -try: - import lzma as xz -except ImportError: - import pylzma as xz - -datasets.logging.set_verbosity_info() -logger = datasets.logging.get_logger(__name__) - -_CITATION = """ -""" - -_DESCRIPTION = """ -A filtered version of the MultiLegalPile dataset, together with wikipedia articles. -""" - -_URL = "https://huggingface.co/datasets/joelito/MultiLegalPile_Wikipedia_Filtered" - -_LANGUAGES = ["bg", "cs", "da", "de", "el", "en", "es", "et", "fi", "fr", "ga", "hr", - "hu", "it", "lt", "lv", "mt", "nl", "pl", "pt", "ro", "sk", "sl", "sv"] - -_TYPES = ["caselaw", "contracts", "legislation", "other", "wikipedia"] - -_JURISDICTONS = ["Austria", "Belgium", "Bulgaria", "Croatia", "Czechia", "Denmark", "Estonia", "Finland", - "France", "Germany", "Greece", "Hungary", "Ireland", "Italy", "Latvia", "Lithuania", "Luxembourg", - "Malta", "Netherlands", "Poland", "Portugal", "Romania", "Slovakia", "Slovenia", "Spain", "Sweden", - "EU", "Switzerland", "UK", "US", "Canada", "N/A"] - -# IMPORTANT: Increase this once larger datasets are available (en_caselaw has 11 at the moment) -_HIGHEST_NUMBER_OF_SHARDS = 11 - - -class MultiLegalPileWikipediaFilteredConfig(datasets.BuilderConfig): - """BuilderConfig for MultiLegalPileWikipediaFiltered.""" - - def __init__(self, name: str, **kwargs): - """BuilderConfig for MultiLegalPileWikipediaFiltered. - Args: - name: combination of language and type with _ - language: One of bg,cs,da,de,el,en,es,et,fi,fr,ga,hr,hu,it,lt,lv,mt,nl,pl,pt,ro,sk,sl,sv or all - type: One of caselaw,contracts,legislation,other,wikipedia or all - **kwargs: keyword arguments forwarded to super. - """ - super(MultiLegalPileWikipediaFilteredConfig, self).__init__(**kwargs) - self.name = name - self.language = name.split("_")[0] - self.type = name.split("_")[1] - - -class MultiLegalPileWikipediaFiltered(datasets.GeneratorBasedBuilder): - """ - MultiLegalPileWikipediaFiltered: - A filtered dataset of multilingual legal data and wikipedias in the EU languages - """ - BUILDER_CONFIG_CLASS = MultiLegalPileWikipediaFilteredConfig - - BUILDER_CONFIGS = [MultiLegalPileWikipediaFilteredConfig(f"{language}_{type}") - for type in _TYPES + ["all"] - for language in _LANGUAGES + ["all"]] - - def _info(self): - return datasets.DatasetInfo( - description=_DESCRIPTION, - features=datasets.Features( - { - "language": datasets.Value("string"), # one of _LANGUAGES - "type": datasets.Value("string"), # one of _TYPES - "jurisdiction": datasets.Value("string"), # one of _JURISDICTONS - "text": datasets.Value("string"), - } - ), - supervised_keys=None, - homepage=_URL, - citation=_CITATION, - ) - - def _split_generators(self, dl_manager): - def download_url(file_name): - url = hf_hub_url(repo_id="joelito/MultiLegalPile_Wikipedia_Filtered", - filename=f"data/{file_name}.jsonl.xz", repo_type="dataset") - return dl_manager.download(url) - - languages = _LANGUAGES if self.config.language == "all" else [self.config.language] - types = _TYPES if self.config.type == "all" else [self.config.type] - - split_generators = [] - for split in [datasets.Split.TRAIN, datasets.Split.VALIDATION]: - filepaths = [] - for language in languages: - for type in types: - for shard in range(_HIGHEST_NUMBER_OF_SHARDS): - try: - filepaths.append(download_url(f"{language}_{type}_{split}_{shard}")) - except: - break # we found the last shard - split_generators.append( - datasets.SplitGenerator(name=split, gen_kwargs={"filepaths": filepaths}) - ) - return split_generators - - def _generate_examples(self, filepaths): - """This function returns the examples in the raw (text) form by iterating on all the files.""" - id_ = 0 - for filepath in filepaths: - logger.info("Generating examples from = %s", filepath) - try: - with xz.open(open(filepath, "rb"), "rt", encoding="utf-8") as f: - for line in f: - if line: - example = json.loads(line) - if example is not None and isinstance(example, dict): - yield id_, example - id_ += 1 - except Exception: - logger.exception("Error while processing file %s", filepath) diff --git a/README.md b/README.md deleted file mode 100644 index ec4466650dfe1aa10aae1c9ee8532b715a317357..0000000000000000000000000000000000000000 --- a/README.md +++ /dev/null @@ -1,209 +0,0 @@ ---- -annotations_creators: -- other -language_creators: -- found -language: -- bg -- cs -- da -- de -- el -- en -- es -- et -- fi -- fr -- ga -- hr -- hu -- it -- lt -- lv -- mt -- nl -- pl -- pt -- ro -- sk -- sl -- sv -license: -- cc-by-4.0 -multilinguality: -- multilingual -paperswithcode_id: null -pretty_name: "MultiLegalPile_Wikipedia_Filtered: A filtered version of the MultiLegalPile dataset, together with wikipedia articles." -size_categories: -- 10M ~100B tokens -# max file size: 4GB because of huggingface -# validation set: ~100M tokens ==> 200-400MB - -import glob -import json -import multiprocessing - -import tqdm -import os -import re -from multiprocessing import Pool - -from datasets import load_dataset -from tokenizers import normalizers - -_LANGUAGES = ['bg', 'cs', 'da', 'de', 'el', 'en', 'es', 'et', 'fi', 'fr', 'ga', 'hr', - 'hu', 'it', 'lt', 'lv', 'mt', 'nl', 'pl', 'pt', 'ro', 'sk', 'sl', 'sv'] -_DOMAIN_TYPES = ['legislation', 'caselaw', 'contracts', 'other', 'wikipedia'] - -custom_normalizer = normalizers.NFKD() - -VALIDATION_SIZE = 1_000 # ~1MB per configuration ==> some low-resource configs will only have a validation file - -filtered_dir = os.path.join('data', 'filtered') -os.makedirs(filtered_dir, exist_ok=True) - - -def preprocess_dataset(languages=None, domain_types=None): - lang_type_datasets = [] - # set defaults if they are not set - if languages is None: - languages = _LANGUAGES - if domain_types is None: - domain_types = _DOMAIN_TYPES - - for LANG in languages: - for DOMAIN_TYPE in domain_types: - try: - if DOMAIN_TYPE == 'wikipedia': - # get from EU_Wikipedias - dataset = load_dataset("joelito/EU_Wikipedias", date="20221120", language=LANG, - split='train', streaming=True, use_auth_token=True) - else: - # get from Multi_Legal_Pile - dataset = load_dataset("joelito/Multi_Legal_Pile", f'{LANG}_{DOMAIN_TYPE}', - split='train', streaming=True, use_auth_token=True) - dataset = dataset.shuffle(seed=42, buffer_size=10_000) - print(f'Found data for `{DOMAIN_TYPE}` in language `{LANG}`.') - except: - print(f'There is no data for `{DOMAIN_TYPE}` in language `{LANG}`.') - continue - lang_type_datasets.append(dataset) - return lang_type_datasets - - -def write_samples(dataset_number): - dataset, dataset_name = dataset_number - if len(dataset_name.split('_')) == 1: # wikipedia - language = dataset_name.split('.')[1] - domain_type = "wikipedia" - dataset_name = f"{language}_{domain_type}" # reformat the config name so that we have wikipedia in the name - else: - language, domain_type = dataset_name.split('_') - total_count, temp_count, all_samples, file_number = 0, 0, 0, 0 - out_file = open_file(dataset_name, file_number, "validation") # we save the first examples to the validation set - print(f'Processing for dataset {dataset_name} started!') - # Read each document - for sample in tqdm.tqdm(dataset): - try: - text = normalize_text(sample['text']) - if "validation" in out_file.name and temp_count > VALIDATION_SIZE: - # if we are saving to eval, and we have enough samples in the eval set, switch to train - out_file.close() - temp_count = 0 - out_file = open_file(dataset_name, file_number, "train") - # on average approx. 2GB per file, compresses (with xz) to around ~500MB (xz: ~75% compression ratio) - if "train" in out_file.name and temp_count > 500_000: # err on the small side of the file size - # if we are saving to train, and we reached the max size per file, switch to the next file - out_file.close() - file_number += 1 - temp_count = 0 - out_file = open_file(dataset_name, file_number, "train") - # if the text is usable for pretraining, save it - if is_text_usable(text): - jurisdiction = sample.get('jurisdiction', "N/A") # set defaults for wikipedia - type = sample.get("type", "wikipedia") # set defaults for wikipedia - entry = {"language": sample["language"], "type": type, "jurisdiction": jurisdiction, "text": text} - out_file.write(json.dumps(entry) + '\n') - total_count += 1 - temp_count += 1 - all_samples += 1 - except: - continue - - try: - out_file.close() - except: - pass - - print(f'Processing for dataset {dataset_name} finished with {total_count}/{all_samples}!') - return - - -def is_text_usable(text): - # Compute percentage of alphabetical characters in relation to full sequence length - punctuation = '!\"#$%&\'()*+,\-\./:;<=>?@\[\\\]\^_`{\|}~' - alpha_text = re.sub(rf'[{punctuation}\d]', '', text) # remove numbers and punctuation - alpha_percent = len(alpha_text) / len(text) - # Compute total chunk length - text_length = len(text.split()) - # Ignore sequences with more than 30% numbers or short sequences (less than 64 tokens) - return alpha_percent > 0.7 and text_length > 64 - - -def normalize_text(text): - # Normalize the document - text = custom_normalizer.normalize_str(text) - # Replace multiple newline and whitespaces - return re.sub(r'(\n )+', r'\n ', re.sub(r'( *[\n\r]+ *)+', r'\n ', re.sub(r'[\t ]+', r' ', text))) - - -def open_file(dataset_name, file_number, split): - return open(os.path.join(filtered_dir, f'{dataset_name}_{split}_{file_number}.jsonl'), 'w', encoding='utf8') - - -def clean_and_filter_documents(): - # Load all datasets across languages and types - lang_type_datasets = preprocess_dataset(languages=None, domain_types=None) - # also pass in dataset_name - lang_type_datasets = [(dataset, dataset.config_name) for dataset in lang_type_datasets] - print(lang_type_datasets) - - # Launch pool to preprocess datasets in parallel - max_num_processes = min(multiprocessing.cpu_count() - 2, len(lang_type_datasets)) - num_processes = max(max_num_processes, 1) - print(f'Launching a Pool with maximum {num_processes} processes...') - with Pool(num_processes) as pool: - pool.map(write_samples, lang_type_datasets) - - # Compress datasets - print(f"Compressing datasets at {filtered_dir}") - # Do this at the end because we use multithreading - for path in glob.glob(os.path.join(filtered_dir, '*.jsonl')): - print(f"Compressing {path}") - os.system(f'xz -zkf -T0 {path}') # -TO to use multithreading - print(f"Removing uncompressed file at {path}") - os.system(f'rm {path}') # remove uncompressed file to save space - - print(f"Finished preparing legal data") - - - -if __name__ == '__main__': - """ - Run with - export PYTHONPATH=. && python prepare_legal_data.py | tee prepare_legal_data.log - """ - clean_and_filter_documents() - -# Get locally -# def get_file(LANG, DOMAIN_TYPE, split, number): -# base_folder = "data/mlm_dataset/chunks_512" -# return f'{base_folder}/{LANG}_{DOMAIN_TYPE}_{split}_{number}.jsonl.xz' - -# files = [get_file(LANG, DOMAIN_TYPE, 'train', i) for i in range(1, 5)] -# files = [f for f in files if os.path.exists(f)] # make sure the file actually exists -# dataset = load_dataset("json", data_files={'train': files}, split='train', streaming=True) - -# TODO write dataset cards for chunked, eu wikipedia and filtered dataset