File size: 7,251 Bytes
931df01
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
# No chunks, one doc per line

# remove new lines, etc.
# create a corpus of min 200-400 GB ==> ~100B tokens
# max file size: 4GB because of huggingface
# validation set: ~100M tokens ==> 200-400MB

import glob
import json
import multiprocessing

import tqdm
import os
import re
from multiprocessing import Pool

from datasets import load_dataset
from tokenizers import normalizers

_LANGUAGES = ['bg', 'cs', 'da', 'de', 'el', 'en', 'es', 'et', 'fi', 'fr', 'ga', 'hr',
              'hu', 'it', 'lt', 'lv', 'mt', 'nl', 'pl', 'pt', 'ro', 'sk', 'sl', 'sv']
_DOMAIN_TYPES = ['legislation', 'caselaw', 'contracts', 'other', 'wikipedia']

custom_normalizer = normalizers.NFKD()

VALIDATION_SIZE = 1_000  # ~1MB per configuration ==> some low-resource configs will only have a validation file

filtered_dir = os.path.join('data', 'filtered')
os.makedirs(filtered_dir, exist_ok=True)


def preprocess_dataset(languages=None, domain_types=None):
    lang_type_datasets = []
    # set defaults if they are not set
    if languages is None:
        languages = _LANGUAGES
    if domain_types is None:
        domain_types = _DOMAIN_TYPES

    for LANG in languages:
        for DOMAIN_TYPE in domain_types:
            try:
                if DOMAIN_TYPE == 'wikipedia':
                    # get from EU_Wikipedias
                    dataset = load_dataset("joelito/EU_Wikipedias", date="20221120", language=LANG,
                                           split='train', streaming=True, use_auth_token=True)
                else:
                    # get from Multi_Legal_Pile
                    dataset = load_dataset("joelito/Multi_Legal_Pile", f'{LANG}_{DOMAIN_TYPE}',
                                           split='train', streaming=True, use_auth_token=True)
                dataset = dataset.shuffle(seed=42, buffer_size=10_000)
                print(f'Found data for `{DOMAIN_TYPE}` in language `{LANG}`.')
            except:
                print(f'There is no data for `{DOMAIN_TYPE}` in language `{LANG}`.')
                continue
            lang_type_datasets.append(dataset)
    return lang_type_datasets


def write_samples(dataset_number):
    dataset, dataset_name = dataset_number
    if len(dataset_name.split('_')) == 1:  # wikipedia
        language = dataset_name.split('.')[1]
        domain_type = "wikipedia"
        dataset_name = f"{language}_{domain_type}"  # reformat the config name so that we have wikipedia in the name
    else:
        language, domain_type = dataset_name.split('_')
    total_count, temp_count, all_samples, file_number = 0, 0, 0, 0
    out_file = open_file(dataset_name, file_number, "validation")  # we save the first examples to the validation set
    print(f'Processing for dataset {dataset_name} started!')
    # Read each document
    for sample in tqdm.tqdm(dataset):
        try:
            text = normalize_text(sample['text'])
            if "validation" in out_file.name and temp_count > VALIDATION_SIZE:
                # if we are saving to eval, and we have enough samples in the eval set, switch to train
                out_file.close()
                temp_count = 0
                out_file = open_file(dataset_name, file_number, "train")
            # on average approx. 2GB per file, compresses (with xz) to around ~500MB (xz: ~75% compression ratio)
            if "train" in out_file.name and temp_count > 500_000: # err on the small side of the file size
                # if we are saving to train, and we reached the max size per file, switch to the next file
                out_file.close()
                file_number += 1
                temp_count = 0
                out_file = open_file(dataset_name, file_number, "train")
            # if the text is usable for pretraining, save it
            if is_text_usable(text):
                jurisdiction = sample.get('jurisdiction', "N/A")  # set defaults for wikipedia
                type = sample.get("type", "wikipedia")  # set defaults for wikipedia
                entry = {"language": sample["language"], "type": type, "jurisdiction": jurisdiction, "text": text}
                out_file.write(json.dumps(entry) + '\n')
                total_count += 1
                temp_count += 1
            all_samples += 1
        except:
            continue

    try:
        out_file.close()
    except:
        pass

    print(f'Processing for dataset {dataset_name} finished with {total_count}/{all_samples}!')
    return


def is_text_usable(text):
    # Compute percentage of alphabetical characters in relation to full sequence length
    punctuation = '!\"#$%&\'()*+,\-\./:;<=>?@\[\\\]\^_`{\|}~'
    alpha_text = re.sub(rf'[{punctuation}\d]', '', text)  # remove numbers and punctuation
    alpha_percent = len(alpha_text) / len(text)
    # Compute total chunk length
    text_length = len(text.split())
    # Ignore sequences with more than 30% numbers or short sequences (less than 64 tokens)
    return alpha_percent > 0.7 and text_length > 64


def normalize_text(text):
    # Normalize the document
    text = custom_normalizer.normalize_str(text)
    # Replace multiple newline and whitespaces
    return re.sub(r'(\n )+', r'\n ', re.sub(r'( *[\n\r]+ *)+', r'\n ', re.sub(r'[\t ]+', r' ', text)))


def open_file(dataset_name, file_number, split):
    return open(os.path.join(filtered_dir, f'{dataset_name}_{split}_{file_number}.jsonl'), 'w', encoding='utf8')


def clean_and_filter_documents():
    # Load all datasets across languages and types
    lang_type_datasets = preprocess_dataset(languages=None, domain_types=None)
    # also pass in dataset_name
    lang_type_datasets = [(dataset, dataset.config_name) for dataset in lang_type_datasets]
    print(lang_type_datasets)

    # Launch pool to preprocess datasets in parallel
    max_num_processes = min(multiprocessing.cpu_count() - 2, len(lang_type_datasets))
    num_processes = max(max_num_processes, 1)
    print(f'Launching a Pool with maximum {num_processes} processes...')
    with Pool(num_processes) as pool:
        pool.map(write_samples, lang_type_datasets)

    # Compress datasets
    print(f"Compressing datasets at {filtered_dir}")
    # Do this at the end because we use multithreading
    for path in glob.glob(os.path.join(filtered_dir, '*.jsonl')):
        print(f"Compressing {path}")
        os.system(f'xz -zkf -T0 {path}')  # -TO to use multithreading
        print(f"Removing uncompressed file at {path}")
        os.system(f'rm {path}')  # remove uncompressed file to save space

    print(f"Finished preparing legal data")



if __name__ == '__main__':
    """
    Run with 
    export PYTHONPATH=. && python prepare_legal_data.py | tee prepare_legal_data.log 
    """
    clean_and_filter_documents()

# Get locally
# def get_file(LANG, DOMAIN_TYPE, split, number):
#    base_folder = "data/mlm_dataset/chunks_512"
#    return f'{base_folder}/{LANG}_{DOMAIN_TYPE}_{split}_{number}.jsonl.xz'

# files = [get_file(LANG, DOMAIN_TYPE, 'train', i) for i in range(1, 5)]
# files = [f for f in files if os.path.exists(f)] # make sure the file actually exists
# dataset = load_dataset("json", data_files={'train': files}, split='train', streaming=True)

# TODO write dataset cards for chunked, eu wikipedia and filtered dataset