SIMPITIKI / train_val_test_split.py
Sebastien Montella
fix challenge transformation error
dd3cf43
raw history blame
No virus
12.7 kB
# -*- coding: utf-8 -*-
import os
import sys
import json
import random
import argparse
import xml.etree.ElementTree as ET
def load_xml_file(path):
""" Load an xml file.
Args:
path (:obj:`float`): Path where the XML file is stored.
Returns:
tree (:obj:`ElementTree`): Parsed tree structure of the XML file.
"""
tree = ET.parse(path)
return tree
def save_json(data, path):
""" Save data as json file.
Args:
data (:obj:`list`): Data to store as json.
path (:obj:`str`): Path where data will be stored.
Returns:
None
"""
json.dump(data, open(path, 'w+', encoding='utf-8'))
def save_jsonl(data, path):
""" Save data as a json file where each line is a dictionary
Args:
data (:obj:`list`): Data to store as json.
path (:obj:`str`): Path where data will be stored.
Returns:
None
"""
with open(path, 'w+', encoding='utf-8') as f:
for sample_dict in data:
f.write(json.dumps(sample_dict))
f.write('\n')
def get_types(xml):
""" Extract the different transformation types.
Args:
xml (:obj:`ElementTree`): XML as tree.
Returns:
transformations_types (:obj:`dict`): Mapping of transformations ids to their type
Example:
>>assert transformations_types[34] == 'Transformation - Noun to Verb' # True
"""
transformations_types = {}
root = xml.getroot()
types = root[0]
for curr_type in types:
transformations_types[int(curr_type.attrib['id'])] = curr_type.text
return transformations_types
def create_pair(text, simplified_text, transformation_id, transformation_type, source_dataset):
""" Instanciate pair given parameters.
Args:
text (:obj:`str`): Raw text.
simplified_text (:obj:`str`): Simplified text.
transformation_id (:obj:`int`): Transformation ID
transformation_type (:obj:`str`): Transformation Category
source_dataset (:obj:`str`): Source dataset from which the pair comes from.
Returns:
pair (:obj:`dict`): Complex-Simplified Text pair with corresponding information.
"""
pair = {}
pair['transformation_id'] = transformation_id
pair['transformation_type'] = transformation_type
pair['source_dataset'] = source_dataset
pair['text'] = text
pair['simplified_text'] = simplified_text
return pair
def fill_pairs_by_transformation(pairs_by_transformation, pair):
""" This function adds a pair to the pairs_by_transformation dict. (into corresponding field).
Args:
pairs_by_transformation (:obj:`dict`): Dictionnary where pairs are organized by their
transformation types.
pair (:obj:`dict`): Complex-Simplified Text pair with corresponding information.
Returns:
None
"""
transformation_id = pair['transformation_id']
if transformation_id not in pairs_by_transformation.keys():
pairs_by_transformation[transformation_id] = [pair]
else:
pairs_by_transformation[transformation_id].append(pair)
def fill_pairs_by_source_dataset(pairs_by_source_dataset, pair):
""" This function adds a pair to the pairs_by_source_dataset dict. to corresponding field.
Args:
pairs_by_source_dataset (:obj:`dict`): Dictionnary where pairs are organized by their
source dataset.
pair (:obj:`dict`): Complex-Simplified Text pair with corresponding information.
Returns:
None
"""
source_dataset = pair['source_dataset']
if source_dataset not in pairs_by_source_dataset.keys():
pairs_by_source_dataset[source_dataset] = [pair]
else:
pairs_by_source_dataset[source_dataset].append(pair)
def get_pairs(xml, types):
""" This function returns Complex-Simplified pairs from XML.
Args:
xml (:obj:`ElementTree`): XML simplifications pairs as tree structure.
types (:obj:`dict`): Mapping of transformations ID to their transformations category.
Returns:
pairs (:obj:`list`): List of pairs (:obj:`dict`) without any ordering.
pairs_by_transformation (:obj:`dict`): Pairs clustered by their transformation type.
pairs_by_source_dataset (:obj:`dict`): Pairs clustered by their source dataset.
"""
root = xml.getroot()
simplifications = root[1]
pairs = []
pairs_by_transformation = {}
pairs_by_source_dataset = {}
for simplification in simplifications:
transformation_id = int(simplification.attrib['type'])
source = simplification.attrib['origin']
raw_text = simplification[0].text
simplified_text = simplification[1].text
curr_pair = create_pair(raw_text, simplified_text, transformation_id, types[transformation_id], source)
pairs.append(curr_pair)
fill_pairs_by_transformation(pairs_by_transformation, curr_pair)
fill_pairs_by_source_dataset(pairs_by_source_dataset, curr_pair)
return pairs, pairs_by_transformation, pairs_by_source_dataset
def random_split(pairs, training_ratio):
""" This function randomly splits pairs as train/val/test subsets.
Args:
pairs (:obj:`list`): List of pairs (:obj:`dict`) without any ordering.
training_ratio (:obj:`float`): Ratio of training data (0<training_ratio<1).
Returns:
train (:obj:`list`): Training set
validation (:obj:`list`): Validation set
test (:obj:`list`): Testing set
"""
random.shuffle(pairs)
size = len(pairs)
train_limit = int(size * training_ratio)
val_limit = train_limit + int(size*(1-training_ratio)/2)
train = pairs[:train_limit]
val = pairs[train_limit:val_limit]
test = pairs[val_limit:]
return train, val, test
def challenge_seen_unseen_transformation_split(pairs_by_transformation, training_ratio):
""" This function splits pairs s.t. evaluation can be done on seen and unseen
transformations for more challenging robustness/generalization evaluation.
Args:
pairs_by_transformation (:obj:`dict`): Pairs organized by their transformation ID.
training_ratio (:obj:`float`): Ratio of training data (0<training_ratio<1).
Returns:
train (:obj:`list`): Training set
validation (:obj:`list`): Validation set
seen_transformations_test (:obj:`list`): Seen Transformations testing set
unseen_transformations_test (:obj:`list`): Unseen transformations testing set
"""
# TODO transformations are hard-coded for now --> add argument in parser to specify them.
seen_transformations_ids = [1, 2, 3, 11, 13, 21, 23, 31, 33, 34, 37]
unseen_transformations_ids = [12, 22, 32, 35, 36]
train = []
val = []
seen_transformations_test = []
unseen_transformations_test = []
total_samples= 0
for transf_id in unseen_transformations_ids:
total_samples += len(pairs_by_transformation[transf_id])
for transf_id in seen_transformations_ids:
curr_len = len(pairs_by_transformation[transf_id])
train_limit = int(curr_len * training_ratio)
val_limit = int(curr_len * (training_ratio + (1-training_ratio)/2.0))
train += pairs_by_transformation[transf_id][:train_limit]
val += pairs_by_transformation[transf_id][train_limit:val_limit]
seen_transformations_test += pairs_by_transformation[transf_id][val_limit:]
for transf_id in unseen_transformations_ids:
unseen_transformations_test += pairs_by_transformation[transf_id]
return train, val, seen_transformations_test, unseen_transformations_test
def challenge_seen_unseen_source_dataset_split(pairs_by_source_dataset, training_ratio):
""" This function splits pairs s.t. evaluation can be done on seen and unseen
source dataset for more challenging robustness/generalization evaluation.
Args:
pairs_by_source_dataset (:obj:`dict`): Pairs organized by their source dataset.
training_ratio (:obj:`float`): Ratio of training data (0<training_ratio<1).
Returns:
train (:obj:`list`): Training set
validation (:obj:`list`): Validation set
seen_source_test (:obj:`list`): Seen source dataset testing set
unseen_source_test (:obj:`list`): Unseen source dataset testing set
"""
# TODO source dataset for training hard-coded --> add argument in parser.
seen_source = ['itwiki'] # semi-supervised
unseen_source = ['tn'] # manually annotated
train = []
val = []
seen_source_test = []
unseen_source_test = []
for source in seen_source:
random.shuffle(pairs_by_source_dataset[source])
curr_len = len(pairs_by_source_dataset[source])
train_limit = int(curr_len * training_ratio)
val_limit = train_limit + int(curr_len * (1-training_ratio)/2.0)
train += pairs_by_source_dataset[source][:train_limit]
val += pairs_by_source_dataset[source][train_limit:val_limit]
seen_source_test += pairs_by_source_dataset[source][val_limit:]
for source in unseen_source:
unseen_source_test += pairs_by_source_dataset[source]
return train, val, seen_source_test, unseen_source_test
def split(args):
"""This function splits the XML file to produce specified subsets according to args.
Args:
args (:obj:`argparse.Namespace`): Parsed arguments.
Returns:
None
"""
xml = load_xml_file(args.data_path)
version = args.data_path[-6:-4]
transformations_types = get_types(xml)
pairs, pairs_by_transformation, pairs_by_source_dataset = get_pairs(xml, transformations_types)
total_samples = 0
for transf_id in pairs_by_transformation.keys():
total_samples += len(pairs_by_transformation[transf_id])
if args.split_criteria == 'random':
train, val, test = random_split(pairs, args.training_ratio)
os.makedirs(f'{args.out_dir}/{version}/random_split/', exist_ok=True)
save_jsonl(train, f'{args.out_dir}/{version}/random_split/train.jsonl')
save_jsonl(val, f'{args.out_dir}/{version}/random_split/val.jsonl')
save_jsonl(test, f'{args.out_dir}/{version}/random_split/test.jsonl')
elif args.split_criteria == 'transformations':
seen_transformations_train, seen_transformations_val, seen_transformations_test, unseen_transformations_test = challenge_seen_unseen_transformation_split(pairs_by_transformation, args.training_ratio)
os.makedirs(f'{args.out_dir}/{version}/transformations_split/', exist_ok=True)
save_jsonl(seen_transformations_train, f'{args.out_dir}/{version}/transformations_split/train.jsonl')
save_jsonl(seen_transformations_val, f'{args.out_dir}/{version}/transformations_split/val.jsonl')
save_jsonl(seen_transformations_test, f'{args.out_dir}/{version}/transformations_split/seen_transformations_test.jsonl')
save_jsonl(unseen_transformations_test, f'{args.out_dir}/{version}/transformations_split/unseen_transformations_test.jsonl')
elif args.split_criteria == 'source_dataset':
itwiki_train, itwiki_val, itwiki_test, tn_test = challenge_seen_unseen_source_dataset_split(pairs_by_source_dataset, args.training_ratio)
os.makedirs(f'{args.out_dir}/{version}/source_dataset_split/', exist_ok=True)
save_jsonl(itwiki_train, f'{args.out_dir}/{version}/source_dataset_split/itwiki_train.jsonl')
save_jsonl(itwiki_val, f'{args.out_dir}/{version}/source_dataset_split/itwiki_val.jsonl')
save_jsonl(itwiki_test, f'{args.out_dir}/{version}/source_dataset_split/itwiki_test.jsonl')
save_jsonl(tn_test, f'{args.out_dir}/{version}/source_dataset_split/tn_test.jsonl')
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="Creating Train/Val/Test files")
parser.add_argument('--data_path', type=str, required=True, help='path to (single) data file')
parser.add_argument('--out_dir', type=str, required=True, help='output dir to store files')
parser.add_argument('--training_ratio', type=float, required=True, help='training ratio (e.g. 0.7). Remaining will be divided for val and test EQUALLY.')
parser.add_argument('--split_criteria', type=str, required=True, choices=['random', 'transformations', 'source_dataset'], help='split criteria')
args = parser.parse_args()
split(args)
sys.exit(0)