File size: 9,276 Bytes
796f4bd
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
f5277f5
 
796f4bd
ec755d7
 
 
796f4bd
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
f5277f5
796f4bd
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
ec755d7
796f4bd
ec755d7
796f4bd
 
 
f5277f5
796f4bd
 
ec755d7
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
796f4bd
 
 
 
 
 
 
 
 
 
 
ec755d7
f5277f5
796f4bd
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
ec755d7
 
796f4bd
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
ec755d7
 
 
 
 
 
 
796f4bd
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
import os
from glob import glob
from pathlib import Path

import numpy as np
import pandas as pd

from web_anno_tsv import open_web_anno_tsv
from web_anno_tsv.web_anno_tsv import ReadException, Annotation

pd.set_option('display.max_colwidth', None)
pd.set_option('display.max_columns', None)

annotation_labels = {'ADDRESS': ['building', 'city', 'country', 'place', 'postcode', 'street', 'territory'],
                     'AMOUNT': ['unit', 'value'],
                     'DATE': ['year', 'standard abbreviation', 'month', 'day of the week', 'day', 'calender event'],
                     'PERSON': ['age', 'email', 'ethnic category', 'family name', 'financial', 'given name – female',
                                'given name – male',
                                'health insurance number', 'id document number', 'initial name', 'marital status',
                                'medical record number',
                                'nationality', 'profession', 'role', 'social security number', 'title', 'url'],
                     'ORGANISATION': [],
                     'TIME': [],
                     'VEHICLE': ['build year', 'colour', 'license plate number', 'model', 'type']}

# make all labels upper case
annotation_labels = {key.upper(): [label.upper() for label in labels] for key, labels in annotation_labels.items()}
print(annotation_labels)
print("coarse_grained:", list(annotation_labels.keys()))
print("fine_grained:",
      [finegrained for finegrained in [finegrained_list for finegrained_list in annotation_labels.values()]])

base_path = Path("extracted")

# TODO future work can add these datasets too to make it larger
special_paths = {
    "EL": ["EL/ANNOTATED_DATA/LEGAL/AREIOSPAGOS1/annotated/full_dataset"],
    "EN": ["EN/ANNOTATED_DATA/ADMINISTRATIVE-LEGAL/annotated/full_dataset"],
    "FR": ["FR/ANNOTATED_DATA/LEGAL/COUR_CASSATION1/annotated/full_dataset/Civil",
           "FR/ANNOTATED_DATA/LEGAL/COUR_CASSATION1/annotated/full_dataset/Commercial",
           "FR/ANNOTATED_DATA/LEGAL/COUR_CASSATION1/annotated/full_dataset/Criminal",
           "FR/ANNOTATED_DATA/LEGAL/COUR_CASSATION2/annotated/full_dataset",
           "FR/ANNOTATED_DATA/MEDICAL/CAS1/annotated/full_dataset"],
    "IT": ["IT/ANNOTATED_DATA/Corte_Suprema_di_Cassazione/annotated"],
    "MT": ["MT/ANNOTATED_DATA/ADMINISTRATIVE/annotated/full_dataset",
           "MT/ANNOTATED_DATA/GENERAL_NEWS/News_1/annotated/full_dataset",
           "MT/ANNOTATED_DATA/LEGAL/Jurisprudence_1/annotated/full_dataset"],
}


def get_path(language):
    return base_path / language / "ANNOTATED_DATA/EUR_LEX/annotated/full_dataset"


def get_coarse_grained_for_fine_grained(label):
    for coarse_grained, fine_grained_set in annotation_labels.items():
        if label in fine_grained_set:
            return coarse_grained
    return None  # raise ValueError(f"Did not find fine_grained label {label}")


def is_fine_grained(label):
    for coarse_grained, fine_grained_set in annotation_labels.items():
        if label.upper() in fine_grained_set:
            return True
    return False


def is_coarse_grained(label):
    return label.upper() in annotation_labels.keys()


class HashableAnnotation(Annotation):
    def __init__(self, annotation):
        super()
        self.label = annotation.label
        self.start = annotation.start
        self.stop = annotation.stop
        self.text = annotation.text

    def __eq__(self, other):
        return self.label == other.label and self.start == other.start and self.stop == other.stop and self.text == other.text

    def __hash__(self):
        return hash(('label', self.label, 'start', self.start, 'stop', self.stop, 'text', self.text))


def get_token_annotations(token, annotations):
    annotations = list(dict.fromkeys([HashableAnnotation(ann) for ann in annotations]))  # remove duplicate annotations
    coarse_grained = "O"
    fine_grained = "o"
    for annotation in annotations:
        label = annotation.label
        # if token.start == annotation.start and token.stop == annotation.stop:  # fine_grained annotation
        if token.start >= annotation.start and token.stop <= annotation.stop:  # course_grained annotation
            # we don't support multilabel annotations for each token for simplicity.
            # So when a token already has an annotation for either coarse or fine grained, we don't assign new ones.
            if coarse_grained == "O" and is_coarse_grained(label):
                coarse_grained = label
            elif fine_grained == "o" and is_fine_grained(label):
                # some DATE are mislabeled as day but it is hard to correct this. So we ignore it
                fine_grained = label

    return coarse_grained.upper(), fine_grained.upper()


def generate_IOB_labelset(series, casing_function):
    last_ent = ""
    new_series = []
    for ent in series:
        if ent in ["o", "O"]:
            ent_to_add = ent
        else:
            if ent != last_ent:  # we are the first one
                ent_to_add = "B-" + ent
            else:
                ent_to_add = "I-" + ent
        new_series.append(casing_function(ent_to_add))
        last_ent = ent
    return new_series


def get_annotated_sentence(result_sentence, sentence):
    result_sentence["tokens"] = []
    result_sentence["coarse_grained"] = []
    result_sentence["fine_grained"] = []
    for k, token in enumerate(sentence.tokens):
        coarse_grained, fine_grained = get_token_annotations(token, sentence.annotations)
        token = token.text.replace(u'\xa0', u' ').strip()  # replace non-breaking spaces
        if token:  # remove empty tokens (only consisted of whitespace before
            result_sentence["tokens"].append(token)
            result_sentence["coarse_grained"].append(coarse_grained)
            result_sentence["fine_grained"].append(fine_grained)
    result_sentence["coarse_grained"] = generate_IOB_labelset(result_sentence["coarse_grained"], str.upper)
    result_sentence["fine_grained"] = generate_IOB_labelset(result_sentence["fine_grained"], str.upper)
    return result_sentence


languages = sorted([Path(file).stem for file in glob(str(base_path / "*"))])


def parse_files(language):
    data_path = get_path(language.upper())
    result_sentences = []
    not_parsable_files = 0
    file_names = sorted(list(glob(str(data_path / "*.tsv"))))
    for file in file_names:
        try:
            with open_web_anno_tsv(file) as f:
                for i, sentence in enumerate(f):
                    result_sentence = {"language": language, "type": "EUR-LEX",
                                       "file_name": Path(file).stem, "sentence_number": i}
                    result_sentence = get_annotated_sentence(result_sentence, sentence)
                    result_sentences.append(result_sentence)
            print(f"Successfully parsed file {file}")
        except ReadException as e:
            print(f"Could not parse file {file}")
            not_parsable_files += 1
    print("Not parsable files: ", not_parsable_files)
    return pd.DataFrame(result_sentences), not_parsable_files


stats = []
train_dfs, validation_dfs, test_dfs = [], [], []
for language in languages:
    language = language.lower()
    print(f"Parsing language {language}")
    df, not_parsable_files = parse_files(language)
    file_names = df.file_name.unique()

    # df.coarse_grained.apply(lambda x: print(set(x)))

    # split by file_name
    num_fn = len(file_names)
    train_fn, validation_fn, test_fn = np.split(np.array(file_names), [int(.8 * num_fn), int(.9 * num_fn)])

    lang_train = df[df.file_name.isin(train_fn)]
    lang_validation = df[df.file_name.isin(validation_fn)]
    lang_test = df[df.file_name.isin(test_fn)]

    train_dfs.append(lang_train)
    validation_dfs.append(lang_validation)
    test_dfs.append(lang_test)

    lang_stats = {"language": language}

    lang_stats["# train files"] = len(train_fn)
    lang_stats["# validation files"] = len(validation_fn)
    lang_stats["# test files"] = len(test_fn)

    lang_stats["# train sentences"] = len(lang_train.index)
    lang_stats["# validation sentences"] = len(lang_validation.index)
    lang_stats["# test sentences"] = len(lang_test.index)

    stats.append(lang_stats)

stat_df = pd.DataFrame(stats)
print(stat_df.to_markdown(index=False))

train = pd.concat(train_dfs)
validation = pd.concat(validation_dfs)
test = pd.concat(test_dfs)

df = pd.concat([train, validation, test])
print(f"The final coarse grained tagset (in IOB notation) is the following: "
      f"`{list(df.coarse_grained.explode().unique())}`")
print(f"The final fine grained tagset (in IOB notation) is the following: "
      f"`{list(df.fine_grained.explode().unique())}`")


# save splits
def save_splits_to_jsonl(config_name):
    # save to jsonl files for huggingface
    if config_name: os.makedirs(config_name, exist_ok=True)
    train.to_json(os.path.join(config_name, "train.jsonl"), lines=True, orient="records", force_ascii=False)
    validation.to_json(os.path.join(config_name, "validation.jsonl"), lines=True, orient="records", force_ascii=False)
    test.to_json(os.path.join(config_name, "test.jsonl"), lines=True, orient="records", force_ascii=False)


save_splits_to_jsonl("")