Sebastien Montella commited on
Commit
06ba0d0
1 Parent(s): 5351dd0

adding raw xml + json splits + scripts

Browse files
.gitattributes CHANGED
@@ -25,3 +25,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
25
  *.zip filter=lfs diff=lfs merge=lfs -text
26
  *.zstandard filter=lfs diff=lfs merge=lfs -text
27
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
25
  *.zip filter=lfs diff=lfs merge=lfs -text
26
  *.zstandard filter=lfs diff=lfs merge=lfs -text
27
  *tfevents* filter=lfs diff=lfs merge=lfs -text
28
+ *.json filter=lfs diff=lfs merge=lfs -text
raw/simpitiki-v1.xml ADDED
The diff for this file is too large to render. See raw diff
raw/simpitiki-v2.xml ADDED
The diff for this file is too large to render. See raw diff
simpitiki.py ADDED
@@ -0,0 +1,278 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """TODO: Add a description here."""
16
+
17
+
18
+ import csv
19
+ import json
20
+ import os
21
+ import datasets
22
+ from lxml import etree
23
+
24
+ # TODO: Add BibTeX citation
25
+ # Find for instance the citation on arxiv or on the dataset repo/website
26
+ _CITATION = """\
27
+ @article{tonelli2016simpitiki,
28
+ title={SIMPITIKI: a Simplification corpus for Italian},
29
+ author={Tonelli, Sara and Aprosio, Alessio Palmero and Saltori, Francesca},
30
+ journal={Proceedings of CLiC-it},
31
+ year={2016}
32
+ }
33
+ """
34
+
35
+ # TODO: Add description of the dataset here
36
+ # You can copy an official description
37
+ _DESCRIPTION = """\
38
+ SIMPITIKI is a Simplification corpus for Italian and it consists of two sets of simplified pairs: the first one is harvested from the Italian Wikipedia in a semi-automatic way; the second one is manually annotated sentence-by-sentence from documents in the administrative domain.
39
+ """
40
+
41
+ _HOMEPAGE = "https://github.com/dhfbk/simpitiki"
42
+
43
+ _LICENSE = "CC-BY 4.0"
44
+
45
+ _URLs = {
46
+ "v1":{
47
+ "random": {
48
+ "train":"./v1/random_split/train.json",
49
+ "val":"./v1/random_split/val.json",
50
+ "test":"./v1/random_split/test.json"
51
+ },
52
+ "transformations": {
53
+ "train": "./v1/transformations_split/train.json",
54
+ "val": "./v1/transformations_split/val.json",
55
+ "seen_transformations_test": "./v1/transformations_split/seen_transformations_test.json",
56
+ "unseen_transformations_test":"./v1/transformations_split/unseen_transformations_test.json"
57
+ },
58
+ "source_dataset": {
59
+ "itwiki_train":"./v1/source_dataset_split/itwiki_train.json",
60
+ "itwiki_val": "./v1/source_dataset_split/itwiki_val.json",
61
+ "itwiki_test":"./v1/source_dataset_split/itwiki_test.json",
62
+ "tn_test":"./v1/source_dataset_split/tn_test.json"
63
+ }
64
+
65
+
66
+ },
67
+ "v2":{
68
+ "random": {
69
+ "train":"./v2/random_split/train.json",
70
+ "val":"./v2/random_split/val.json",
71
+ "test":"./v2/random_split/test.json"
72
+ },
73
+ "transformations": {
74
+ "train": "./v2/transformations_split/train.json",
75
+ "val": "./v2/transformations_split/val.json",
76
+ "seen_transformations_test": "./v2/transformations_split/seen_transformations_test.json",
77
+ "unseen_transformations_test":"./v2/transformations_split/unseen_transformations_test.json"
78
+ },
79
+ "source_dataset": {
80
+ "itwiki_train":"./v2/source_dataset_split/itwiki_train.json",
81
+ "itwiki_val": "./v2/source_dataset_split/itwiki_val.json",
82
+ "itwiki_test":"./v2/source_dataset_split/itwiki_test.json",
83
+ "tn_test":"./v2/source_dataset_split/tn_test.json"
84
+ }
85
+
86
+
87
+ }
88
+
89
+ }
90
+
91
+
92
+ class SIMPITIKI(datasets.GeneratorBasedBuilder):
93
+ """TODO: Short description of my dataset."""
94
+
95
+ VERSION_1 = datasets.Version("1.0.0")
96
+ VERSION_2 = datasets.Version("2.0.0")
97
+
98
+ # This is an example of a dataset with multiple configurations.
99
+ # If you don't want/need to define several sub-sets in your dataset,
100
+ # just remove the BUILDER_CONFIG_CLASS and the BUILDER_CONFIGS attributes.
101
+
102
+ # If you need to make complex sub-parts in the datasets with configurable options
103
+ # You can create your own builder configuration class to store attribute, inheriting from datasets.BuilderConfig
104
+ # BUILDER_CONFIG_CLASS = MyBuilderConfig
105
+
106
+ # You will be able to load one or the other configurations in the following list with
107
+ # data = datasets.load_dataset('my_dataset', 'first_domain')
108
+ # data = datasets.load_dataset('my_dataset', 'second_domain')
109
+ BUILDER_CONFIGS = [
110
+ datasets.BuilderConfig(name="v1", version=VERSION_1, description="First version"),
111
+ datasets.BuilderConfig(name="v2", version=VERSION_2, description="Second version with better sentence boundaries."),
112
+ ]
113
+
114
+ DEFAULT_CONFIG_NAME = "v2" # It's not mandatory to have a default configuration. Just use one if it make sense.
115
+
116
+ def _info(self):
117
+ # This method specifies the datasets.DatasetInfo object which contains informations and typings for the dataset
118
+ features = datasets.Features(
119
+ {
120
+ "text": datasets.Value("string"),
121
+ "simplified_text": datasets.Value("string"),
122
+ "transformation_type":datasets.Value("string"),
123
+ "source_dataset":datasets.Value("string")
124
+ # These are the features of your dataset like images, labels ...
125
+ }
126
+ )
127
+ return datasets.DatasetInfo(
128
+ # This is the description that will appear on the datasets page.
129
+ description=_DESCRIPTION,
130
+ # This defines the different columns of the dataset and their types
131
+ features=features, # Here we define them above because they are different between the two configurations
132
+ # If there's a common (input, target) tuple from the features,
133
+ # specify them here. They'll be used if as_supervised=True in
134
+ # builder.as_dataset.
135
+ supervised_keys=None,
136
+ # Homepage of the dataset for documentation
137
+ homepage=_HOMEPAGE,
138
+ # License for the dataset if available
139
+ license=_LICENSE,
140
+ # Citation for the dataset
141
+ citation=_CITATION,
142
+ )
143
+
144
+ def _split_generators(self, dl_manager):
145
+ """Returns SplitGenerators."""
146
+ # This method is tasked with downloading/extracting the data and defining the splits depending on the configuration
147
+ # If several configurations are possible (listed in BUILDER_CONFIGS), the configuration selected by the user is in self.config.name
148
+
149
+ # dl_manager is a datasets.download.DownloadManager that can be used to download and extract URLs
150
+ # It can accept any type or nested list/dict and will give back the same structure with the url replaced with path to local files.
151
+ # By default the archives will be extracted and a path to a cached folder where they are extracted is returned instead of the archive
152
+
153
+ my_urls = _URLs[self.config.name]
154
+ downloaded_files = dl_manager.download_and_extract(my_urls)
155
+ return [
156
+ datasets.SplitGenerator(
157
+ name=datasets.Split.TRAIN,
158
+ # These kwargs will be passed to _generate_examples
159
+ gen_kwargs={
160
+ "filepath": downloaded_files['random']['train'],
161
+ "split": "train",
162
+ },
163
+ ),
164
+ datasets.SplitGenerator(
165
+ name=datasets.Split.VALIDATION,
166
+ # These kwargs will be passed to _generate_examples
167
+ gen_kwargs={
168
+ "filepath": downloaded_files['random']['val'],
169
+ "split": "val"
170
+ },
171
+ ),
172
+ datasets.SplitGenerator(
173
+ name=datasets.Split.TEST,
174
+ # These kwargs will be passed to _generate_examples
175
+ gen_kwargs={
176
+ "filepath": downloaded_files['random']['test'],
177
+ "split": "test",
178
+ },
179
+ ),
180
+
181
+ datasets.SplitGenerator(
182
+ name='challenge_seen_transformations_train',
183
+ # These kwargs will be passed to _generate_examples
184
+ gen_kwargs={
185
+ "filepath": downloaded_files['transformations']['train'],
186
+ "split": "challenge_seen_transformations_train",
187
+ },
188
+ ),
189
+
190
+
191
+ datasets.SplitGenerator(
192
+ name='challenge_seen_transformations_val',
193
+ # These kwargs will be passed to _generate_examples
194
+ gen_kwargs={
195
+ "filepath": downloaded_files['transformations']['val'],
196
+ "split": "challenge_seen_transformations_val",
197
+ },
198
+ ),
199
+
200
+ datasets.SplitGenerator(
201
+ name='challenge_seen_transformations_test',
202
+ # These kwargs will be passed to _generate_examples
203
+ gen_kwargs={
204
+ "filepath": downloaded_files['source_dataset']['seen_transformations_test'],
205
+ "split": "challenge_seen_transformations_test",
206
+ },
207
+ ),
208
+
209
+ datasets.SplitGenerator(
210
+ name='challenge_unseen_transformations_test',
211
+ # These kwargs will be passed to _generate_examples
212
+ gen_kwargs={
213
+ "filepath": downloaded_files['source_dataset']['unseen_transformations_test'],
214
+ "split": "challenge_unseen_transformations_test",
215
+ },
216
+ ),
217
+
218
+ datasets.SplitGenerator(
219
+ name='challenge_itwiki_train',
220
+ # These kwargs will be passed to _generate_examples
221
+ gen_kwargs={
222
+ "filepath": downloaded_files['source_dataset']['itwiki_train'],
223
+ "split": "challenge_itwiki_train",
224
+ },
225
+ ),
226
+
227
+ datasets.SplitGenerator(
228
+ name='challenge_itwiki_val',
229
+ # These kwargs will be passed to _generate_examples
230
+ gen_kwargs={
231
+ "filepath": downloaded_files['source_dataset']['itwiki_val'],
232
+ "split": "challenge_itwiki_val",
233
+ },
234
+ ),
235
+
236
+ datasets.SplitGenerator(
237
+ name='challenge_itwiki_test',
238
+ # These kwargs will be passed to _generate_examples
239
+ gen_kwargs={
240
+ "filepath": downloaded_files['source_dataset']['itwiki_test'],
241
+ "split": "challenge_itwiki_test",
242
+ },
243
+ ),
244
+
245
+ datasets.SplitGenerator(
246
+ name='challenge_tn_test',
247
+ # These kwargs will be passed to _generate_examples
248
+ gen_kwargs={
249
+ "filepath": downloaded_files['source_dataset']['tn_test'],
250
+ "split": "challenge_tn_test",
251
+ },
252
+ ),
253
+ ]
254
+
255
+ def _generate_examples(
256
+ self, filepath, split # method parameters are unpacked from `gen_kwargs` as given in `_split_generators`
257
+ ):
258
+ """ Yields examples as (key, example) tuples. """
259
+ # This method handles input defined in _split_generators to yield (key, example) tuples from the dataset.
260
+ # The `key` is here for legacy reason (tfds) and is not important in itself.
261
+
262
+ with open(filepath, encoding="utf-8") as f:
263
+ data = json.load(f)
264
+ for id_, row in enumerate(data):
265
+ yield id_, {
266
+ "text": data["text"],
267
+ "simplified_text": data["simplified_text"],
268
+ "transformation_type":data["transformation_type"],
269
+ "source_dataset": data["source_dataset"],
270
+ "gem_id": f"gem-SIMPITIKI-{split}-{id_}",
271
+ }
272
+
273
+
274
+ if __name__ == '__main__':
275
+ dataset = SIMPITIKI()
276
+
277
+
278
+
train_val_test_split.py ADDED
@@ -0,0 +1,343 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+
3
+ import os
4
+ import sys
5
+ import json
6
+ import random
7
+ import argparse
8
+ import xml.etree.ElementTree as ET
9
+
10
+ def load_xml_file(path):
11
+ """ Load an xml file.
12
+
13
+ Args:
14
+ path (:obj:`float`): Path where the XML file is stored.
15
+
16
+ Returns:
17
+ tree (:obj:`ElementTree`): Parsed tree structure of the XML file.
18
+
19
+
20
+ """
21
+
22
+ tree = ET.parse(path)
23
+
24
+ return tree
25
+
26
+
27
+ def save_json(data, path):
28
+ """ Save data as json file.
29
+
30
+ Args:
31
+ data (:obj:`list`): Data to store as json.
32
+ path (:obj:`str`): Path where data will be stored.
33
+
34
+ Returns:
35
+ None
36
+
37
+ """
38
+
39
+ json.dump(data, open(path, 'w+', encoding='utf-8'))
40
+
41
+
42
+ def get_types(xml):
43
+ """ Extract the different transformation types.
44
+
45
+ Args:
46
+ xml (:obj:`ElementTree`): XML as tree.
47
+
48
+ Returns:
49
+ transformations_types (:obj:`dict`): Mapping of transformations ids to their type
50
+
51
+ Example:
52
+ >>assert transformations_types[34] == 'Transformation - Noun to Verb' # True
53
+
54
+
55
+ """
56
+
57
+ transformations_types = {}
58
+ root = xml.getroot()
59
+ types = root[0]
60
+ for curr_type in types:
61
+ transformations_types[int(curr_type.attrib['id'])] = curr_type.text
62
+
63
+ return transformations_types
64
+
65
+
66
+ def create_pair(text, simplified_text, transformation_id, transformation_type, source_dataset):
67
+ """ Instanciate pair given parameters.
68
+
69
+ Args:
70
+ text (:obj:`str`): Raw text.
71
+ simplified_text (:obj:`str`): Simplified text.
72
+ transformation_id (:obj:`int`): Transformation ID
73
+ transformation_type (:obj:`str`): Transformation Category
74
+ source_dataset (:obj:`str`): Source dataset from which the pair comes from.
75
+
76
+ Returns:
77
+ pair (:obj:`dict`): Complex-Simplified Text pair with corresponding information.
78
+
79
+
80
+ """
81
+ pair = {}
82
+ pair['transformation_id'] = transformation_id
83
+ pair['transformation_type'] = transformation_type
84
+ pair['source_dataset'] = source_dataset
85
+ pair['text'] = text
86
+ pair['simplified_text'] = simplified_text
87
+ return pair
88
+
89
+
90
+ def fill_pairs_by_transformation(pairs_by_transformation, pair):
91
+ """ This function adds a pair to the pairs_by_transformation dict. (into corresponding field).
92
+
93
+ Args:
94
+ pairs_by_transformation (:obj:`dict`): Dictionnary where pairs are organized by their
95
+ transformation types.
96
+
97
+ pair (:obj:`dict`): Complex-Simplified Text pair with corresponding information.
98
+
99
+ Returns:
100
+ None
101
+
102
+
103
+ """
104
+
105
+ transformation_id = pair['transformation_id']
106
+ if transformation_id not in pairs_by_transformation.keys():
107
+ pairs_by_transformation[transformation_id] = [pair]
108
+ else:
109
+ pairs_by_transformation[transformation_id].append(pair)
110
+
111
+
112
+ def fill_pairs_by_source_dataset(pairs_by_source_dataset, pair):
113
+ """ This function adds a pair to the pairs_by_source_dataset dict. to corresponding field.
114
+
115
+ Args:
116
+ pairs_by_source_dataset (:obj:`dict`): Dictionnary where pairs are organized by their
117
+ source dataset.
118
+
119
+ pair (:obj:`dict`): Complex-Simplified Text pair with corresponding information.
120
+
121
+ Returns:
122
+ None
123
+
124
+
125
+ """
126
+
127
+ source_dataset = pair['source_dataset']
128
+
129
+ if source_dataset not in pairs_by_source_dataset.keys():
130
+ pairs_by_source_dataset[source_dataset] = [pair]
131
+ else:
132
+ pairs_by_source_dataset[source_dataset].append(pair)
133
+
134
+
135
+ def get_pairs(xml, types):
136
+ """ This function returns Complex-Simplified pairs from XML.
137
+
138
+ Args:
139
+ xml (:obj:`ElementTree`): XML simplifications pairs as tree structure.
140
+ types (:obj:`dict`): Mapping of transformations ID to their transformations category.
141
+
142
+
143
+ Returns:
144
+ pairs (:obj:`list`): List of pairs (:obj:`dict`) without any ordering.
145
+ pairs_by_transformation (:obj:`dict`): Pairs clustered by their transformation type.
146
+ pairs_by_source_dataset (:obj:`dict`): Pairs clustered by their source dataset.
147
+
148
+ """
149
+ root = xml.getroot()
150
+ simplifications = root[1]
151
+
152
+ pairs = []
153
+ pairs_by_transformation = {}
154
+ pairs_by_source_dataset = {}
155
+ for simplification in simplifications:
156
+ transformation_id = int(simplification.attrib['type'])
157
+ source = simplification.attrib['origin']
158
+ raw_text = simplification[0].text
159
+ simplified_text = simplification[1].text
160
+
161
+ curr_pair = create_pair(raw_text, simplified_text, transformation_id, types[transformation_id], source)
162
+
163
+ pairs.append(curr_pair)
164
+
165
+ fill_pairs_by_transformation(pairs_by_transformation, curr_pair)
166
+ fill_pairs_by_source_dataset(pairs_by_source_dataset, curr_pair)
167
+
168
+
169
+
170
+ return pairs, pairs_by_transformation, pairs_by_source_dataset
171
+
172
+
173
+ def random_split(pairs, training_ratio):
174
+ """ This function randomly splits pairs as train/val/test subsets.
175
+
176
+ Args:
177
+ pairs (:obj:`list`): List of pairs (:obj:`dict`) without any ordering.
178
+ training_ratio (:obj:`float`): Ratio of training data (0<training_ratio<1).
179
+
180
+ Returns:
181
+ train (:obj:`list`): Training set
182
+ validation (:obj:`list`): Validation set
183
+ test (:obj:`list`): Testing set
184
+
185
+ """
186
+
187
+ random.shuffle(pairs)
188
+ size = len(pairs)
189
+ train_limit = int(size * training_ratio)
190
+ val_limit = train_limit + int(size*(1-training_ratio)/2)
191
+
192
+ train = pairs[:train_limit]
193
+ val = pairs[train_limit:val_limit]
194
+ test = pairs[val_limit:]
195
+
196
+ return train, val, test
197
+
198
+
199
+
200
+ def challenge_seen_unseen_transformation_split(pairs_by_transformation, training_ratio):
201
+ """ This function splits pairs s.t. evaluation can be done on seen and unseen
202
+ transformations for more challenging robustness/generalization evaluation.
203
+
204
+ Args:
205
+ pairs_by_transformation (:obj:`dict`): Pairs organized by their transformation ID.
206
+ training_ratio (:obj:`float`): Ratio of training data (0<training_ratio<1).
207
+
208
+ Returns:
209
+ train (:obj:`list`): Training set
210
+ validation (:obj:`list`): Validation set
211
+ seen_transformations_test (:obj:`list`): Seen Transformations testing set
212
+ unseen_transformations_test (:obj:`list`): Unseen transformations testing set
213
+
214
+ """
215
+
216
+ # TODO transformations are hard-coded for now --> add argument in parser to specify them.
217
+ seen_transformations_ids = [1, 2, 3, 11, 13, 23, 31, 32, 33, 34, 37]
218
+ unseen_transformations_ids = [12, 22, 32, 35, 36]
219
+
220
+ train = []
221
+ val = []
222
+ seen_transformations_test = []
223
+ unseen_transformations_test = []
224
+
225
+ for transf_id in seen_transformations_ids:
226
+ curr_len = len(pairs_by_transformation[transf_id])
227
+ train_limit = int(curr_len * training_ratio)
228
+ val_limit = train_limit + int(curr_len * (1-training_ratio)/2.0)
229
+
230
+ train += pairs_by_transformation[transf_id][:train_limit]
231
+ val += pairs_by_transformation[transf_id][train_limit:val_limit]
232
+ seen_transformations_test += pairs_by_transformation[transf_id][val_limit:]
233
+
234
+
235
+ for transf_id in unseen_transformations_ids:
236
+ unseen_transformations_test += pairs_by_transformation[transf_id]
237
+
238
+
239
+ return train, val, seen_transformations_test, unseen_transformations_test
240
+
241
+
242
+ def challenge_seen_unseen_source_dataset_split(pairs_by_source_dataset, training_ratio):
243
+ """ This function splits pairs s.t. evaluation can be done on seen and unseen
244
+ source dataset for more challenging robustness/generalization evaluation.
245
+
246
+ Args:
247
+ pairs_by_source_dataset (:obj:`dict`): Pairs organized by their source dataset.
248
+ training_ratio (:obj:`float`): Ratio of training data (0<training_ratio<1).
249
+
250
+ Returns:
251
+ train (:obj:`list`): Training set
252
+ validation (:obj:`list`): Validation set
253
+ seen_source_test (:obj:`list`): Seen source dataset testing set
254
+ unseen_source_test (:obj:`list`): Unseen source dataset testing set
255
+
256
+ """
257
+
258
+ # TODO source dataset for training hard-coded --> add argument in parser.
259
+ seen_source = ['itwiki'] # semi-supervised
260
+ unseen_source = ['tn'] # manually annotated
261
+
262
+ train = []
263
+ val = []
264
+ seen_source_test = []
265
+ unseen_source_test = []
266
+
267
+ for source in seen_source:
268
+ random.shuffle(pairs_by_source_dataset[source])
269
+ curr_len = len(pairs_by_source_dataset[source])
270
+ train_limit = int(curr_len * training_ratio)
271
+ val_limit = train_limit + int(curr_len * (1-training_ratio)/2.0)
272
+
273
+ train += pairs_by_source_dataset[source][:train_limit]
274
+ val += pairs_by_source_dataset[source][train_limit:val_limit]
275
+ seen_source_test += pairs_by_source_dataset[source][val_limit:]
276
+
277
+ for source in unseen_source:
278
+ unseen_source_test += pairs_by_source_dataset[source]
279
+
280
+ return train, val, seen_source_test, unseen_source_test
281
+
282
+
283
+ def split(args):
284
+ """This function splits the XML file to produce specified subsets according to args.
285
+
286
+ Args:
287
+ args (:obj:`argparse.Namespace`): Parsed arguments.
288
+
289
+
290
+ Returns:
291
+ None
292
+
293
+ """
294
+ xml = load_xml_file(args.data_path)
295
+ version = args.data_path[-6:-4]
296
+
297
+ transformations_types = get_types(xml)
298
+ pairs, pairs_by_transformation, pairs_by_source_dataset = get_pairs(xml, transformations_types)
299
+
300
+ if args.split_criteria == 'random':
301
+ train, val, test = random_split(pairs, args.training_ratio)
302
+ os.makedirs(f'{args.out_dir}/{version}/random_split/', exist_ok=True)
303
+ save_json(train, f'{args.out_dir}/{version}/random_split/train.json')
304
+ save_json(val, f'{args.out_dir}/{version}/random_split/val.json')
305
+ save_json(test, f'{args.out_dir}/{version}/random_split/test.json')
306
+
307
+ elif args.split_criteria == 'transformations':
308
+ seen_transformations_train, seen_transformations_val, seen_transformations_test, unseen_transformations_test = challenge_seen_unseen_transformation_split(pairs_by_transformation, args.training_ratio)
309
+
310
+ os.makedirs(f'{args.out_dir}/{version}/transformations_split/', exist_ok=True)
311
+ save_json(seen_transformations_train, f'{args.out_dir}/{version}/transformations_split/train.json')
312
+ save_json(seen_transformations_val, f'{args.out_dir}/{version}/transformations_split/val.json')
313
+ save_json(seen_transformations_test, f'{args.out_dir}/{version}/transformations_split/seen_transformations_test.json')
314
+ save_json(unseen_transformations_test, f'{args.out_dir}/{version}/transformations_split/unseen_transformations_test.json')
315
+
316
+ elif args.split_criteria == 'source_dataset':
317
+
318
+ itwiki_train, itwiki_val, itwiki_test, tn_test = challenge_seen_unseen_source_dataset_split(pairs_by_source_dataset, args.training_ratio)
319
+
320
+ os.makedirs(f'{args.out_dir}/{version}/source_dataset_split/', exist_ok=True)
321
+ save_json(itwiki_train, f'{args.out_dir}/{version}/source_dataset_split/itwiki_train.json')
322
+ save_json(itwiki_val, f'{args.out_dir}/{version}/source_dataset_split/itwiki_val.json')
323
+ save_json(itwiki_test, f'{args.out_dir}/{version}/source_dataset_split/itwiki_test.json')
324
+ save_json(tn_test, f'{args.out_dir}/{version}/source_dataset_split/tn_test.json')
325
+
326
+
327
+
328
+ if __name__ == '__main__':
329
+
330
+ parser = argparse.ArgumentParser(description="Creating Train/Val/Test files")
331
+
332
+ parser.add_argument('--data_path', type=str, required=True, help='path to (single) data file')
333
+ parser.add_argument('--out_dir', type=str, required=True, help='output dir to store files')
334
+
335
+ parser.add_argument('--training_ratio', type=float, required=True, help='training ratio (e.g. 0.8). Remaining will be divided for val and test EQUALLY.')
336
+
337
+ parser.add_argument('--split_criteria', type=str, required=True, choices=['random', 'transformations', 'source_dataset'], help='split criteria')
338
+
339
+
340
+ args = parser.parse_args()
341
+ split(args)
342
+
343
+ sys.exit(0)
v1/random_split/test.json ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5dac082e3d051d78b25937282edc951ead893ec2a41e87430da684cb7d524990
3
+ size 299984
v1/random_split/train.json ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0cc8b506b54eb812364998689e181575d52df09fc23b67f5054875e3bf288478
3
+ size 1430831
v1/random_split/val.json ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:25256b0fc7d889fa6b25bac2a865e470929919c196cd3be8575b42b1402fdcf8
3
+ size 245148
v1/source_dataset_split/itwiki_test.json ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:69f4035eb57152e71f2f8b667d4ef88e0e2bb7a956829823bfc5a6f51ac900f4
3
+ size 279736
v1/source_dataset_split/itwiki_train.json ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a34e6a2092856be8dde443891faf53cac526866e77814d65a148b192288d23ce
3
+ size 1096254
v1/source_dataset_split/itwiki_val.json ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:61a87de9dcee3a0c0838722b6c2e4df972f200369947ea3b1db6ce0b640bb4eb
3
+ size 244635
v1/source_dataset_split/tn_test.json ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6a001c79926f5594e71d28003ce907f4916ffb0e1e489fb388465d717ab51c92
3
+ size 355338
v1/transformations_split/seen_transformations_test.json ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:047cdb0092a33bb9b6e09632e4e065f184ee9d1f0be28e6430f8a8af1e1024aa
3
+ size 509377
v1/transformations_split/train.json ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:afddddba2efb73de0b8575cc4b2481e5c50a43e1870a4a54515d636685482393
3
+ size 902444
v1/transformations_split/unseen_transformations_test.json ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1f832e8d7d939245a8a3e6dc6531fe24b9d44f7064d7f7e4e8f260c1b352b8ed
3
+ size 497149
v1/transformations_split/val.json ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9655310c9ec3aa5ff52ef8cc3df0bd0c420c932d1af21ebeb1b075e70cba60b0
3
+ size 446973
v2/random_split/test.json ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2e65e3eb598fbb694ceef759bdefc1c6e3957b9af1520b1e964203177ddbd859
3
+ size 142960
v2/random_split/train.json ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3282e371402269ffc06ee0e5e761ee9365c281f1cc8c855f726129643a7d4141
3
+ size 663238
v2/random_split/val.json ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a83774f26390e04688c5b1f99b35321d621adfd78d3f2ae65a55896367e3e2b7
3
+ size 129446
v2/source_dataset_split/itwiki_test.json ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:91bc8e5fb2dace099eb75780e8630125f2fedfbe397f507313eb5fdb4ca29942
3
+ size 87192
v2/source_dataset_split/itwiki_train.json ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:32d3f71c35b9b81c434eb52b66449a2e616fd38eeb9ceedc0e950a4af767328e
3
+ size 405402
v2/source_dataset_split/itwiki_val.json ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:21a06b3ba2aa1e9cf8602e1d39f44a43e908b8fb9692181bfd5db180e65db8e7
3
+ size 87712
v2/source_dataset_split/tn_test.json ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6a001c79926f5594e71d28003ce907f4916ffb0e1e489fb388465d717ab51c92
3
+ size 355338
v2/transformations_split/seen_transformations_test.json ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:72476c69784cb698d3d2bff5d86e790ae1fe7f8ca576d28fa22653cbb839129c
3
+ size 168164
v2/transformations_split/train.json ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2ffa6e04415c4bca8f457eb61b2fc817a6f74b161c962693b651bb53df11da6f
3
+ size 547046
v2/transformations_split/unseen_transformations_test.json ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d7da8a84d6a2ab6e1fee954932014adefedd97be471ec1f792a95fe3488debbb
3
+ size 270500
v2/transformations_split/val.json ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:92edca1d8e3d12cee4bc69a47823315dad1fd2c6d924b6635dc38daa8f8f3762
3
+ size 179045