|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
import glob |
|
import json |
|
import multiprocessing |
|
|
|
import tqdm |
|
import os |
|
import re |
|
from multiprocessing import Pool |
|
|
|
from datasets import load_dataset |
|
from tokenizers import normalizers |
|
|
|
_LANGUAGES = ['bg', 'cs', 'da', 'de', 'el', 'en', 'es', 'et', 'fi', 'fr', 'ga', 'hr', |
|
'hu', 'it', 'lt', 'lv', 'mt', 'nl', 'pl', 'pt', 'ro', 'sk', 'sl', 'sv'] |
|
_DOMAIN_TYPES = ['legislation', 'caselaw', 'contracts', 'other', 'wikipedia'] |
|
|
|
custom_normalizer = normalizers.NFKD() |
|
|
|
VALIDATION_SIZE = 1_000 |
|
|
|
filtered_dir = os.path.join('data', 'filtered') |
|
os.makedirs(filtered_dir, exist_ok=True) |
|
|
|
|
|
def preprocess_dataset(languages=None, domain_types=None): |
|
lang_type_datasets = [] |
|
|
|
if languages is None: |
|
languages = _LANGUAGES |
|
if domain_types is None: |
|
domain_types = _DOMAIN_TYPES |
|
|
|
for LANG in languages: |
|
for DOMAIN_TYPE in domain_types: |
|
try: |
|
if DOMAIN_TYPE == 'wikipedia': |
|
|
|
dataset = load_dataset("joelito/EU_Wikipedias", date="20221120", language=LANG, |
|
split='train', streaming=True, use_auth_token=True) |
|
else: |
|
|
|
dataset = load_dataset("joelito/Multi_Legal_Pile", f'{LANG}_{DOMAIN_TYPE}', |
|
split='train', streaming=True, use_auth_token=True) |
|
dataset = dataset.shuffle(seed=42, buffer_size=10_000) |
|
print(f'Found data for `{DOMAIN_TYPE}` in language `{LANG}`.') |
|
except: |
|
print(f'There is no data for `{DOMAIN_TYPE}` in language `{LANG}`.') |
|
continue |
|
lang_type_datasets.append(dataset) |
|
return lang_type_datasets |
|
|
|
|
|
def write_samples(dataset_number): |
|
dataset, dataset_name = dataset_number |
|
if len(dataset_name.split('_')) == 1: |
|
language = dataset_name.split('.')[1] |
|
domain_type = "wikipedia" |
|
dataset_name = f"{language}_{domain_type}" |
|
else: |
|
language, domain_type = dataset_name.split('_') |
|
total_count, temp_count, all_samples, file_number = 0, 0, 0, 0 |
|
out_file = open_file(dataset_name, file_number, "validation") |
|
print(f'Processing for dataset {dataset_name} started!') |
|
|
|
for sample in tqdm.tqdm(dataset): |
|
try: |
|
text = normalize_text(sample['text']) |
|
if "validation" in out_file.name and temp_count > VALIDATION_SIZE: |
|
|
|
out_file.close() |
|
temp_count = 0 |
|
out_file = open_file(dataset_name, file_number, "train") |
|
|
|
if "train" in out_file.name and temp_count > 500_000: |
|
|
|
out_file.close() |
|
file_number += 1 |
|
temp_count = 0 |
|
out_file = open_file(dataset_name, file_number, "train") |
|
|
|
if is_text_usable(text): |
|
jurisdiction = sample.get('jurisdiction', "N/A") |
|
type = sample.get("type", "wikipedia") |
|
entry = {"language": sample["language"], "type": type, "jurisdiction": jurisdiction, "text": text} |
|
out_file.write(json.dumps(entry) + '\n') |
|
total_count += 1 |
|
temp_count += 1 |
|
all_samples += 1 |
|
except: |
|
continue |
|
|
|
try: |
|
out_file.close() |
|
except: |
|
pass |
|
|
|
print(f'Processing for dataset {dataset_name} finished with {total_count}/{all_samples}!') |
|
return |
|
|
|
|
|
def is_text_usable(text): |
|
|
|
punctuation = '!\"#$%&\'()*+,\-\./:;<=>?@\[\\\]\^_`{\|}~' |
|
alpha_text = re.sub(rf'[{punctuation}\d]', '', text) |
|
alpha_percent = len(alpha_text) / len(text) |
|
|
|
text_length = len(text.split()) |
|
|
|
return alpha_percent > 0.7 and text_length > 64 |
|
|
|
|
|
def normalize_text(text): |
|
|
|
text = custom_normalizer.normalize_str(text) |
|
|
|
return re.sub(r'(\n )+', r'\n ', re.sub(r'( *[\n\r]+ *)+', r'\n ', re.sub(r'[\t ]+', r' ', text))) |
|
|
|
|
|
def open_file(dataset_name, file_number, split): |
|
return open(os.path.join(filtered_dir, f'{dataset_name}_{split}_{file_number}.jsonl'), 'w', encoding='utf8') |
|
|
|
|
|
def clean_and_filter_documents(): |
|
|
|
lang_type_datasets = preprocess_dataset(languages=None, domain_types=None) |
|
|
|
lang_type_datasets = [(dataset, dataset.config_name) for dataset in lang_type_datasets] |
|
print(lang_type_datasets) |
|
|
|
|
|
max_num_processes = min(multiprocessing.cpu_count() - 2, len(lang_type_datasets)) |
|
num_processes = max(max_num_processes, 1) |
|
print(f'Launching a Pool with maximum {num_processes} processes...') |
|
with Pool(num_processes) as pool: |
|
pool.map(write_samples, lang_type_datasets) |
|
|
|
|
|
print(f"Compressing datasets at {filtered_dir}") |
|
|
|
for path in glob.glob(os.path.join(filtered_dir, '*.jsonl')): |
|
print(f"Compressing {path}") |
|
os.system(f'xz -zkf -T0 {path}') |
|
print(f"Removing uncompressed file at {path}") |
|
os.system(f'rm {path}') |
|
|
|
print(f"Finished preparing legal data") |
|
|
|
|
|
|
|
if __name__ == '__main__': |
|
""" |
|
Run with |
|
export PYTHONPATH=. && python prepare_legal_data.py | tee prepare_legal_data.log |
|
""" |
|
clean_and_filter_documents() |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|