import os import datasets import pandas as pd _CITATION = """No citation information available.""" _DESCRIPTION = """\ This dataset contains a sample of sentences taken from the FLORES-101 dataset that were either translated from scratch or post-edited from an existing automatic translation by three human translators. Translation were performed for the English-Italian language pair, and translators' behavioral data (keystrokes, pauses, editing times) were collected using the PET platform. """ _HOMEPAGE = "https://www.rug.nl/masters/information-science/?lang=en" _LICENSE = "Sharing and publishing of the data is not allowed at the moment." _SPLITS = { "train": os.path.join("IK_NLP_22_PESTYLE", "train.tsv"), "test_mask_subject": os.path.join("IK_NLP_22_PESTYLE", "test.tsv"), "test_mask_modality": os.path.join("IK_NLP_22_PESTYLE", "test.tsv"), "test_mask_time": os.path.join("IK_NLP_22_PESTYLE", "test.tsv") } _ALL_FIELDS = [ "item_id", "subject_id", "modality", "src_text", "mt_text", "tgt_text", "edit_time", "k_total", "k_letter", "k_digit", "k_white", "k_symbol", "k_nav", "k_erase", "k_copy", "k_cut", "k_paste", "n_pause_geq_300", "len_pause_geq_300", "n_pause_geq_1000", "len_pause_geq_1000", "num_annotations", "n_insert", "n_delete", "n_substitute", "n_shift", "bleu", "chrf", "ter", "aligned_edit" ] _FIELDS_MASK_SUBJECT = [f for f in _ALL_FIELDS if f not in ["subject_id"]] _FIELDS_MASK_MODALITY = [f for f in _ALL_FIELDS if f not in [ "modality", "mt_text", "n_insert", "n_delete", "n_substitute", "n_shift", "ter", "bleu", "chrf", "aligned_edit" ]] _FIELDS_MASK_TIME = [f for f in _ALL_FIELDS if f not in [ "edit_time", "n_pause_geq_300", "len_pause_geq_300", "n_pause_geq_1000", "len_pause_geq_1000" ]] _DICT_FIELDS = { "train": _ALL_FIELDS, "test_mask_subject": _FIELDS_MASK_SUBJECT, "test_mask_modality": _FIELDS_MASK_MODALITY, "test_mask_time": _FIELDS_MASK_TIME } class IkNlp22PEStyleConfig(datasets.BuilderConfig): """BuilderConfig for the IK NLP '22 HT-Style Dataset.""" def __init__( self, features, **kwargs, ): """ Args: features: `list[string]`, list of the features that will appear in the feature dict. Should not include "label". **kwargs: keyword arguments forwarded to super. """ super().__init__(version=datasets.Version("1.0.0"), **kwargs) self.features = features class IkNlp22PEStyle(datasets.GeneratorBasedBuilder): VERSION = datasets.Version("1.0.0") BUILDER_CONFIGS = [ IkNlp22PEStyleConfig( name="main", features=_ALL_FIELDS, ), ] DEFAULT_CONFIG_NAME = "main" @property def manual_download_instructions(self): return ( "The access to the data is restricted to students of the IK MSc NLP 2022 course working on a related project." "To load the data using this dataset, download and extract the IK_NLP_22_PESTYLE folder you were provided upon selecting the final project." "After extracting it, the folder (referred to as root) must contain a IK_NLP_22_PESTYLE subfolder, containing train.csv and test.csv files." "Then, load the dataset with: `datasets.load_dataset('GroNLP/ik-nlp-22_pestyle', 'main', data_dir='path/to/root/folder')`" ) def _info(self): features = {feature: datasets.Value("int32") for feature in self.config.features} features["subject_id"] = datasets.Value("string") features["modality"] = datasets.Value("string") features["src_text"] = datasets.Value("string") features["mt_text"] = datasets.Value("string") features["tgt_text"] = datasets.Value("string") features["aligned_edit"] = datasets.Value("string") features["edit_time"] = datasets.Value("float32") features["bleu"] = datasets.Value("float32") features["chrf"] = datasets.Value("float32") features["ter"] = datasets.Value("float32") return datasets.DatasetInfo( description=_DESCRIPTION, features=datasets.Features(features), homepage=_HOMEPAGE, license=_LICENSE, citation=_CITATION, ) def _split_generators(self, dl_manager): """Returns SplitGenerators.""" data_dir = os.path.abspath(os.path.expanduser(dl_manager.manual_dir)) if not os.path.exists(data_dir): raise FileNotFoundError( "{} does not exist. Make sure you insert the unzipped IK_NLP_22_PESTYLE dir via " "`datasets.load_dataset('GroNLP/ik-nlp-22_pestyle', data_dir=...)`" "Manual download instructions: {}".format( data_dir, self.manual_download_instructions ) ) return [ datasets.SplitGenerator( name=name, gen_kwargs={ "filepath": os.path.join(data_dir, path), "fields": _DICT_FIELDS[name], }, ) for name, path in _SPLITS.items() ] def _generate_examples(self, filepath: str, fields): """Yields examples as (key, example) tuples.""" data = pd.read_csv(filepath) data = data[fields] print(data.shape) for id_, row in data.iterrows(): yield id_, row.to_dict()