File size: 12,733 Bytes
06ba0d0
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
053885b
06ba0d0
 
 
053885b
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
06ba0d0
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
dd3cf43
06ba0d0
 
 
 
 
 
dd3cf43
 
 
 
06ba0d0
 
 
dd3cf43
06ba0d0
 
 
 
 
 
 
 
 
dd3cf43
06ba0d0
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
dd3cf43
 
 
 
06ba0d0
 
 
 
053885b
 
 
06ba0d0
 
 
 
053885b
 
 
 
06ba0d0
 
 
 
 
 
053885b
 
 
 
06ba0d0
 
 
 
 
 
 
 
 
 
dd3cf43
06ba0d0
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
# -*- coding: utf-8 -*-

import os
import sys
import json
import random
import argparse
import xml.etree.ElementTree as ET

def load_xml_file(path):
    """ Load an xml file.

    Args:
        path (:obj:`float`): Path where the XML file is stored.

    Returns:
        tree (:obj:`ElementTree`): Parsed tree structure of the XML file.


    """
    
    tree = ET.parse(path)

    return tree


def save_json(data, path):
    """ Save data as json file.

    Args:
        data (:obj:`list`): Data to store as json.
        path (:obj:`str`): Path where data will be stored.

    Returns:
        None

    """
        
    json.dump(data, open(path, 'w+', encoding='utf-8'))


def save_jsonl(data, path):
    """ Save data as a json file where each line is a dictionary

    Args:
        data (:obj:`list`): Data to store as json.
        path (:obj:`str`): Path where data will be stored.

    Returns:
        None

    """
    

    with open(path, 'w+', encoding='utf-8') as f:
        for sample_dict in data:
            f.write(json.dumps(sample_dict))
            f.write('\n')


def get_types(xml):
    """ Extract the different transformation types.

    Args:
        xml (:obj:`ElementTree`): XML as tree.

    Returns:
        transformations_types (:obj:`dict`): Mapping of transformations ids to their type

    Example:
        >>assert transformations_types[34] == 'Transformation - Noun to Verb' # True
        

    """
    
    transformations_types = {}
    root = xml.getroot()
    types = root[0]
    for curr_type in types:
        transformations_types[int(curr_type.attrib['id'])] = curr_type.text

    return transformations_types


def create_pair(text, simplified_text, transformation_id, transformation_type, source_dataset):
        """ Instanciate pair given parameters.

        Args:
            text (:obj:`str`): Raw text.
            simplified_text (:obj:`str`): Simplified text.
            transformation_id (:obj:`int`): Transformation ID
            transformation_type (:obj:`str`): Transformation Category
            source_dataset (:obj:`str`): Source dataset from which the pair comes from.

        Returns:
            pair (:obj:`dict`): Complex-Simplified Text pair with corresponding information.


        """
        pair = {}
        pair['transformation_id'] = transformation_id
        pair['transformation_type'] = transformation_type
        pair['source_dataset'] = source_dataset
        pair['text'] = text
        pair['simplified_text'] = simplified_text
        return pair


def fill_pairs_by_transformation(pairs_by_transformation, pair):
    """ This function adds a pair to the pairs_by_transformation dict. (into corresponding field).

    Args:
        pairs_by_transformation (:obj:`dict`): Dictionnary where pairs are organized by their
                                                transformation types.

        pair (:obj:`dict`): Complex-Simplified Text pair with corresponding information.

    Returns:
        None
    

    """

    transformation_id = pair['transformation_id']
    if transformation_id not in pairs_by_transformation.keys():
        pairs_by_transformation[transformation_id] = [pair]
    else:
        pairs_by_transformation[transformation_id].append(pair)


def fill_pairs_by_source_dataset(pairs_by_source_dataset, pair):
    """ This function adds a pair to the pairs_by_source_dataset dict. to corresponding field.

    Args:
        pairs_by_source_dataset (:obj:`dict`):  Dictionnary where pairs are organized by their
                                                source dataset.

        pair (:obj:`dict`): Complex-Simplified Text pair with corresponding information.

    Returns:
        None
    

    """

    source_dataset = pair['source_dataset']
    
    if source_dataset not in pairs_by_source_dataset.keys():
        pairs_by_source_dataset[source_dataset] = [pair]
    else:
        pairs_by_source_dataset[source_dataset].append(pair)


def get_pairs(xml, types):
    """ This function returns Complex-Simplified pairs from XML.

    Args:
        xml (:obj:`ElementTree`): XML simplifications pairs as tree structure.
        types (:obj:`dict`): Mapping of transformations ID to their transformations category.


    Returns:
        pairs (:obj:`list`): List of pairs (:obj:`dict`) without any ordering.
        pairs_by_transformation (:obj:`dict`): Pairs clustered by their transformation type.
        pairs_by_source_dataset (:obj:`dict`): Pairs clustered by their source dataset.

    """
    root = xml.getroot()
    simplifications = root[1]
    
    pairs = []
    pairs_by_transformation = {}
    pairs_by_source_dataset = {}
    for simplification in simplifications:
        transformation_id = int(simplification.attrib['type'])
        source = simplification.attrib['origin']
        raw_text = simplification[0].text
        simplified_text = simplification[1].text
        
        curr_pair = create_pair(raw_text, simplified_text, transformation_id, types[transformation_id], source)

        pairs.append(curr_pair)
        
        fill_pairs_by_transformation(pairs_by_transformation, curr_pair)
        fill_pairs_by_source_dataset(pairs_by_source_dataset, curr_pair)



    return pairs, pairs_by_transformation, pairs_by_source_dataset


def random_split(pairs, training_ratio):
    """ This function randomly splits pairs as train/val/test subsets.

    Args:
        pairs (:obj:`list`): List of pairs (:obj:`dict`) without any ordering.
        training_ratio (:obj:`float`): Ratio of training data (0<training_ratio<1).

    Returns:
        train (:obj:`list`): Training set
        validation (:obj:`list`): Validation set
        test (:obj:`list`): Testing set

    """

    random.shuffle(pairs)
    size = len(pairs)
    train_limit = int(size * training_ratio)
    val_limit = train_limit + int(size*(1-training_ratio)/2)
     
    train = pairs[:train_limit]
    val = pairs[train_limit:val_limit]
    test = pairs[val_limit:]

    return train, val, test



def challenge_seen_unseen_transformation_split(pairs_by_transformation, training_ratio):
    """ This function splits pairs s.t. evaluation can be done on seen and unseen
        transformations for more challenging robustness/generalization evaluation.
    
    Args:
        pairs_by_transformation (:obj:`dict`): Pairs organized by their transformation ID.
        training_ratio (:obj:`float`): Ratio of training data (0<training_ratio<1).

    Returns:
        train (:obj:`list`): Training set
        validation (:obj:`list`): Validation set
        seen_transformations_test (:obj:`list`): Seen Transformations testing set
        unseen_transformations_test (:obj:`list`): Unseen transformations testing set
    
    """

    # TODO transformations are hard-coded for now --> add argument in parser to specify them.
    seen_transformations_ids = [1, 2, 3, 11, 13, 21, 23, 31, 33, 34, 37]
    unseen_transformations_ids = [12, 22, 32, 35, 36]
    
    train = []
    val = []
    seen_transformations_test = []
    unseen_transformations_test = []
    total_samples= 0
    for transf_id in unseen_transformations_ids:
        total_samples += len(pairs_by_transformation[transf_id])
    
    for transf_id in seen_transformations_ids:
        curr_len = len(pairs_by_transformation[transf_id])
        train_limit = int(curr_len * training_ratio)
        val_limit = int(curr_len * (training_ratio + (1-training_ratio)/2.0))
        
        train += pairs_by_transformation[transf_id][:train_limit]
        val += pairs_by_transformation[transf_id][train_limit:val_limit]
        seen_transformations_test += pairs_by_transformation[transf_id][val_limit:]

    
    for transf_id in unseen_transformations_ids:
        unseen_transformations_test += pairs_by_transformation[transf_id]

     
    return train, val, seen_transformations_test, unseen_transformations_test


def challenge_seen_unseen_source_dataset_split(pairs_by_source_dataset, training_ratio):
    """ This function splits pairs s.t. evaluation can be done on seen and unseen
        source dataset for more challenging robustness/generalization evaluation.
    
    Args:
        pairs_by_source_dataset (:obj:`dict`): Pairs organized by their source dataset.
        training_ratio (:obj:`float`): Ratio of training data (0<training_ratio<1).

    Returns:
        train (:obj:`list`): Training set
        validation (:obj:`list`): Validation set
        seen_source_test (:obj:`list`): Seen source dataset testing set
        unseen_source_test (:obj:`list`): Unseen source dataset testing set
    
    """

    # TODO source dataset for training hard-coded --> add argument in parser.
    seen_source = ['itwiki']    # semi-supervised
    unseen_source = ['tn']  # manually annotated

    train = []
    val = []
    seen_source_test = []
    unseen_source_test = []

    for source in seen_source:
        random.shuffle(pairs_by_source_dataset[source])
        curr_len = len(pairs_by_source_dataset[source])
        train_limit = int(curr_len * training_ratio)
        val_limit = train_limit + int(curr_len * (1-training_ratio)/2.0)

        train += pairs_by_source_dataset[source][:train_limit]
        val += pairs_by_source_dataset[source][train_limit:val_limit]
        seen_source_test += pairs_by_source_dataset[source][val_limit:]

    for source in unseen_source:
        unseen_source_test += pairs_by_source_dataset[source]

    return train, val, seen_source_test, unseen_source_test


def split(args):
    """This function splits the XML file to produce specified subsets according to args.

    Args:
        args (:obj:`argparse.Namespace`): Parsed arguments.


    Returns:
        None

    """
    xml = load_xml_file(args.data_path)
    version = args.data_path[-6:-4]

    transformations_types = get_types(xml)
    pairs, pairs_by_transformation, pairs_by_source_dataset = get_pairs(xml, transformations_types)
    
    total_samples = 0 
    for transf_id in pairs_by_transformation.keys():
        total_samples += len(pairs_by_transformation[transf_id])

    if args.split_criteria == 'random':
        train, val, test = random_split(pairs, args.training_ratio)
        os.makedirs(f'{args.out_dir}/{version}/random_split/', exist_ok=True)
        save_jsonl(train, f'{args.out_dir}/{version}/random_split/train.jsonl')
        save_jsonl(val, f'{args.out_dir}/{version}/random_split/val.jsonl')
        save_jsonl(test, f'{args.out_dir}/{version}/random_split/test.jsonl')

    elif args.split_criteria == 'transformations':
        seen_transformations_train, seen_transformations_val, seen_transformations_test, unseen_transformations_test = challenge_seen_unseen_transformation_split(pairs_by_transformation, args.training_ratio)
        os.makedirs(f'{args.out_dir}/{version}/transformations_split/', exist_ok=True)
        save_jsonl(seen_transformations_train, f'{args.out_dir}/{version}/transformations_split/train.jsonl')
        save_jsonl(seen_transformations_val, f'{args.out_dir}/{version}/transformations_split/val.jsonl')
        save_jsonl(seen_transformations_test, f'{args.out_dir}/{version}/transformations_split/seen_transformations_test.jsonl')
        save_jsonl(unseen_transformations_test, f'{args.out_dir}/{version}/transformations_split/unseen_transformations_test.jsonl')

    elif args.split_criteria == 'source_dataset':
        
        itwiki_train, itwiki_val, itwiki_test, tn_test = challenge_seen_unseen_source_dataset_split(pairs_by_source_dataset, args.training_ratio)
        
        os.makedirs(f'{args.out_dir}/{version}/source_dataset_split/', exist_ok=True)
        save_jsonl(itwiki_train, f'{args.out_dir}/{version}/source_dataset_split/itwiki_train.jsonl')
        save_jsonl(itwiki_val, f'{args.out_dir}/{version}/source_dataset_split/itwiki_val.jsonl')
        save_jsonl(itwiki_test, f'{args.out_dir}/{version}/source_dataset_split/itwiki_test.jsonl')
        save_jsonl(tn_test, f'{args.out_dir}/{version}/source_dataset_split/tn_test.jsonl')
        


if __name__ == '__main__':

    parser = argparse.ArgumentParser(description="Creating Train/Val/Test files")

    parser.add_argument('--data_path', type=str, required=True, help='path to (single) data file')
    parser.add_argument('--out_dir', type=str, required=True, help='output dir to store files')

    parser.add_argument('--training_ratio', type=float, required=True, help='training ratio (e.g. 0.7). Remaining will be divided for val and test EQUALLY.')

    parser.add_argument('--split_criteria', type=str, required=True, choices=['random', 'transformations', 'source_dataset'], help='split criteria')

    
    args = parser.parse_args()
    split(args)
    
    sys.exit(0)