Datasets:
Tasks:
Text Classification
Sub-tasks:
multi-class-classification
Size:
100K<n<1M
ArXiv:
Tags:
relation extraction
License:
| # coding=utf-8 | |
| # Copyright 2022 The current dataset script contributor. | |
| # | |
| # Licensed under the Apache License, Version 2.0 (the "License"); | |
| # you may not use this file except in compliance with the License. | |
| # You may obtain a copy of the License at | |
| # | |
| # http://www.apache.org/licenses/LICENSE-2.0 | |
| # | |
| # Unless required by applicable law or agreed to in writing, software | |
| # distributed under the License is distributed on an "AS IS" BASIS, | |
| # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |
| # See the License for the specific language governing permissions and | |
| # limitations under the License. | |
| """The MultiTACRED Relation Classification dataset in various languages""" | |
| import itertools | |
| import json | |
| import os | |
| import datasets | |
| _CITATION = """\ | |
| @inproceedings{hennig-etal-2023-multitacred, | |
| title = "MultiTACRED: A Multilingual Version of the TAC Relation Extraction Dataset", | |
| author = "Hennig, Leonhard and Thomas, Philippe and Möller, Sebastian", | |
| booktitle = "Proceedings of the 61st Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)", | |
| month = jul, | |
| year = "2023", | |
| address = "Online and Toronto, Canada", | |
| publisher = "Association for Computational Linguistics", | |
| } | |
| @inproceedings{zhang-etal-2017-position, | |
| title = "Position-aware Attention and Supervised Data Improve Slot Filling", | |
| author = "Zhang, Yuhao and | |
| Zhong, Victor and | |
| Chen, Danqi and | |
| Angeli, Gabor and | |
| Manning, Christopher D.", | |
| booktitle = "Proceedings of the 2017 Conference on Empirical Methods in Natural Language Processing", | |
| month = sep, | |
| year = "2017", | |
| address = "Copenhagen, Denmark", | |
| publisher = "Association for Computational Linguistics", | |
| url = "https://www.aclweb.org/anthology/D17-1004", | |
| doi = "10.18653/v1/D17-1004", | |
| pages = "35--45", | |
| } | |
| @inproceedings{alt-etal-2020-tacred, | |
| title = "{TACRED} Revisited: A Thorough Evaluation of the {TACRED} Relation Extraction Task", | |
| author = "Alt, Christoph and | |
| Gabryszak, Aleksandra and | |
| Hennig, Leonhard", | |
| booktitle = "Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics", | |
| month = jul, | |
| year = "2020", | |
| address = "Online", | |
| publisher = "Association for Computational Linguistics", | |
| url = "https://www.aclweb.org/anthology/2020.acl-main.142", | |
| doi = "10.18653/v1/2020.acl-main.142", | |
| pages = "1558--1569", | |
| } | |
| @inproceedings{DBLP:conf/aaai/StoicaPP21, | |
| author = {George Stoica and | |
| Emmanouil Antonios Platanios and | |
| Barnab{\'{a}}s P{\'{o}}czos}, | |
| title = {Re-TACRED: Addressing Shortcomings of the {TACRED} Dataset}, | |
| booktitle = {Thirty-Fifth {AAAI} Conference on Artificial Intelligence, {AAAI} | |
| 2021, Thirty-Third Conference on Innovative Applications of Artificial | |
| Intelligence, {IAAI} 2021, The Eleventh Symposium on Educational Advances | |
| in Artificial Intelligence, {EAAI} 2021, Virtual Event, February 2-9, | |
| 2021}, | |
| pages = {13843--13850}, | |
| publisher = {{AAAI} Press}, | |
| year = {2021}, | |
| url = {https://ojs.aaai.org/index.php/AAAI/article/view/17631}, | |
| } | |
| """ | |
| _DESCRIPTION = """\ | |
| MultiTACRED is a multilingual version of the large-scale TAC Relation Extraction Dataset | |
| (https://nlp.stanford.edu/projects/tacred). It covers 12 typologically diverse languages from 9 language families, | |
| and was created by the Speech & Language Technology group of DFKI (https://www.dfki.de/slt) by machine-translating the | |
| instances of the original TACRED dataset and automatically projecting their entity annotations. For details of the | |
| original TACRED's data collection and annotation process, see the Stanford paper (https://aclanthology.org/D17-1004/). | |
| Translations are syntactically validated by checking the correctness of the XML tag markup. Any translations with an | |
| invalid tag structure, e.g. missing or invalid head or tail tag pairs, are discarded (on average, 2.3% of the | |
| instances). | |
| Languages covered are: Arabic, Chinese, Finnish, French, German, Hindi, Hungarian, Japanese, Polish, | |
| Russian, Spanish, Turkish. Intended use is supervised relation classification. Audience - researchers. | |
| Please see our ACL paper (https://arxiv.org/abs/2305.04582) for full details. | |
| NOTE: This Datasetreader supports a reduced version of the original TACRED JSON format with the following changes: | |
| - Removed fields: stanford_pos, stanford_ner, stanford_head, stanford_deprel, docid | |
| The motivation for this is that we want to support additional languages, for which these fields were not required | |
| or available. The reader expects the specification of a language-specific configuration specifying the variant | |
| (original, revisited or retacred) and the language (as a two-letter iso code). | |
| The DatasetReader changes the offsets of the following fields, to conform with standard Python usage (see | |
| _generate_examples()): | |
| - subj_end to subj_end + 1 (make end offset exclusive) | |
| - obj_end to obj_end + 1 (make end offset exclusive) | |
| NOTE 2: The MultiTACRED dataset offers an additional 'split', namely the backtranslated test data (translated to a | |
| target language and then back to English). To access this split, use dataset['backtranslated_test']. | |
| You can find the TACRED dataset reader for the English version of the dataset at | |
| https://huggingface.co/datasets/DFKI-SLT/tacred. | |
| """ | |
| _HOMEPAGE = "https://github.com/DFKI-NLP/MultiTACRED" | |
| _LICENSE = "LDC" | |
| _URL = "https://catalog.ldc.upenn.edu/LDC2024T09" | |
| # The HuggingFace dataset library don't host the datasets but only point to the original files | |
| # This can be an arbitrary nested dict/list of URLs (see below in `_split_generators` method) | |
| _PATCH_URLs = { | |
| "dev": "https://raw.githubusercontent.com/DFKI-NLP/tacrev/master/patch/dev_patch.json", | |
| "test": "https://raw.githubusercontent.com/DFKI-NLP/tacrev/master/patch/test_patch.json", | |
| } | |
| _RETACRED_PATCH_URLs = { | |
| "train": "https://raw.githubusercontent.com/gstoica27/Re-TACRED/master/Re-TACRED/train_id2label.json", | |
| "dev": "https://raw.githubusercontent.com/gstoica27/Re-TACRED/master/Re-TACRED/dev_id2label.json", | |
| "test": "https://raw.githubusercontent.com/gstoica27/Re-TACRED/master/Re-TACRED/test_id2label.json" | |
| } | |
| _BACKTRANSLATION_TEST_SPLIT = "backtranslated_test" | |
| _RETACRED = "retacred" | |
| _REVISITED = "revisited" | |
| _ORIGINAL = "original" | |
| _VERSION = datasets.Version("1.1.0") | |
| _LANGS = [ | |
| "ar", | |
| "de", | |
| "es", | |
| "fi", | |
| "fr", | |
| "hi", | |
| "hu", | |
| "ja", | |
| "pl", | |
| "ru", | |
| "tr", | |
| "zh", | |
| ] | |
| _CLASS_LABELS = [ | |
| "no_relation", | |
| "org:alternate_names", | |
| "org:city_of_headquarters", | |
| "org:country_of_headquarters", | |
| "org:dissolved", | |
| "org:founded", | |
| "org:founded_by", | |
| "org:member_of", | |
| "org:members", | |
| "org:number_of_employees/members", | |
| "org:parents", | |
| "org:political/religious_affiliation", | |
| "org:shareholders", | |
| "org:stateorprovince_of_headquarters", | |
| "org:subsidiaries", | |
| "org:top_members/employees", | |
| "org:website", | |
| "per:age", | |
| "per:alternate_names", | |
| "per:cause_of_death", | |
| "per:charges", | |
| "per:children", | |
| "per:cities_of_residence", | |
| "per:city_of_birth", | |
| "per:city_of_death", | |
| "per:countries_of_residence", | |
| "per:country_of_birth", | |
| "per:country_of_death", | |
| "per:date_of_birth", | |
| "per:date_of_death", | |
| "per:employee_of", | |
| "per:origin", | |
| "per:other_family", | |
| "per:parents", | |
| "per:religion", | |
| "per:schools_attended", | |
| "per:siblings", | |
| "per:spouse", | |
| "per:stateorprovince_of_birth", | |
| "per:stateorprovince_of_death", | |
| "per:stateorprovinces_of_residence", | |
| "per:title", | |
| ] | |
| _RETACRED_CLASS_LABELS = [ | |
| "no_relation", | |
| "org:alternate_names", | |
| "org:city_of_branch", | |
| "org:country_of_branch", | |
| "org:dissolved", | |
| "org:founded", | |
| "org:founded_by", | |
| "org:member_of", | |
| "org:members", | |
| "org:number_of_employees/members", | |
| "org:political/religious_affiliation", | |
| "org:shareholders", | |
| "org:stateorprovince_of_branch", | |
| "org:top_members/employees", | |
| "org:website", | |
| "per:age", | |
| "per:cause_of_death", | |
| "per:charges", | |
| "per:children", | |
| "per:cities_of_residence", | |
| "per:city_of_birth", | |
| "per:city_of_death", | |
| "per:countries_of_residence", | |
| "per:country_of_birth", | |
| "per:country_of_death", | |
| "per:date_of_birth", | |
| "per:date_of_death", | |
| "per:employee_of", | |
| "per:identity", | |
| "per:origin", | |
| "per:other_family", | |
| "per:parents", | |
| "per:religion", | |
| "per:schools_attended", | |
| "per:siblings", | |
| "per:spouse", | |
| "per:stateorprovince_of_birth", | |
| "per:stateorprovince_of_death", | |
| "per:stateorprovinces_of_residence", | |
| "per:title" | |
| ] | |
| _NER_CLASS_LABELS = [ | |
| "LOCATION", | |
| "ORGANIZATION", | |
| "PERSON", | |
| "DATE", | |
| "MONEY", | |
| "PERCENT", | |
| "TIME", | |
| "CAUSE_OF_DEATH", | |
| "CITY", | |
| "COUNTRY", | |
| "CRIMINAL_CHARGE", | |
| "EMAIL", | |
| "HANDLE", | |
| "IDEOLOGY", | |
| "NATIONALITY", | |
| "RELIGION", | |
| "STATE_OR_PROVINCE", | |
| "TITLE", | |
| "URL", | |
| "NUMBER", | |
| "ORDINAL", | |
| "MISC", | |
| "DURATION", | |
| "O", | |
| ] | |
| _DESC_TEXTS = {_ORIGINAL: 'The original TACRED.', | |
| _REVISITED: 'TACRED Revisited (corrected labels for 5k most challenging examples in dev and test split).', | |
| _RETACRED: 'Relabeled TACRED (corrected labels for all splits and pruned)'} | |
| def convert_ptb_token(token: str) -> str: | |
| """Convert PTB tokens to normal tokens""" | |
| return { | |
| "-lrb-": "(", | |
| "-rrb-": ")", | |
| "-lsb-": "[", | |
| "-rsb-": "]", | |
| "-lcb-": "{", | |
| "-rcb-": "}", | |
| }.get(token.lower(), token) | |
| class MultiTacredConfig(datasets.BuilderConfig): | |
| """BuilderConfig for MultiTacred.""" | |
| def __init__(self, label_variant, language, **kwargs): | |
| """BuilderConfig for MultiTacred. | |
| Args: | |
| label_variant: `string`, source of labels, i.e. 'original', 'revisited' or 'retacred' | |
| language: `string`, 2-letter ISO 639-1 language code | |
| **kwargs: keyword arguments forwarded to super. | |
| """ | |
| super(MultiTacredConfig, self).__init__(version=_VERSION, **kwargs) | |
| self.language = language | |
| self.label_variant = label_variant | |
| class MultiTacred(datasets.GeneratorBasedBuilder): | |
| """MultiTACRED is a multilingual version of the large-scale TAC Relation Extraction Dataset (LDC2018T24).""" | |
| BUILDER_CONFIGS = [ | |
| MultiTacredConfig( | |
| name=f"{label_variant}-{language}", | |
| language=language, | |
| label_variant=label_variant, | |
| description=f"{_DESC_TEXTS[label_variant]} examples in language '{language}'.", | |
| ) | |
| for (language, label_variant) in itertools.product(_LANGS, [_ORIGINAL, _REVISITED, _RETACRED]) | |
| ] | |
| def manual_download_instructions(self): | |
| return ( | |
| "To use MultiTACRED you have to download it manually. " | |
| "It is available via the LDC at https://catalog.ldc.upenn.edu/LDC2024T09" | |
| "Please extract all files in one folder and load the a language with: " | |
| "`datasets.load_dataset('DFKI-SLT/multitacred', name='variant-language', data_dir='path/to/folder/folder_name')`." | |
| ) | |
| def _info(self): | |
| features = datasets.Features( | |
| { | |
| "id": datasets.Value("string"), | |
| "token": datasets.Sequence(datasets.Value("string")), | |
| "subj_start": datasets.Value("int32"), | |
| "subj_end": datasets.Value("int32"), | |
| "subj_type": datasets.ClassLabel(names=_NER_CLASS_LABELS), | |
| "obj_start": datasets.Value("int32"), | |
| "obj_end": datasets.Value("int32"), | |
| "obj_type": datasets.ClassLabel(names=_NER_CLASS_LABELS), | |
| "relation": datasets.ClassLabel( | |
| names=_RETACRED_CLASS_LABELS if self.config.label_variant == _RETACRED else _CLASS_LABELS), | |
| } | |
| ) | |
| return datasets.DatasetInfo( | |
| # This is the description that will appear on the datasets page. | |
| description=_DESCRIPTION, | |
| # This defines the different columns of the dataset and their types | |
| features=features, # Here we define them above because they are different between the two configurations | |
| # If there's a common (input, target) tuple from the features, | |
| # specify them here. They'll be used if as_supervised=True in | |
| # builder.as_dataset. | |
| supervised_keys=None, | |
| # Homepage of the dataset for documentation | |
| homepage=_HOMEPAGE, | |
| # License for the dataset if available | |
| license=_LICENSE, | |
| # Citation for the dataset | |
| citation=_CITATION, | |
| ) | |
| def _split_generators(self, dl_manager): | |
| """Returns SplitGenerators.""" | |
| # If several configurations are possible (listed in BUILDER_CONFIGS), the configuration selected by the user is in self.config.name | |
| # dl_manager is a datasets.download.DownloadManager that can be used to download and extract URLs | |
| # It can accept any type or nested list/dict and will give back the same structure with the url replaced with path to local files. | |
| # By default the archives will be extracted and a path to a cached folder where they are extracted is returned instead of the archive | |
| data_dir = os.path.abspath(os.path.expanduser(dl_manager.manual_dir)) | |
| if not os.path.exists(data_dir): | |
| raise FileNotFoundError( | |
| "{} does not exist. Make sure you insert a manual dir via `datasets.load_dataset(" | |
| "'DFKI-SLT/multitacred', name=..., data_dir=...)` that includes the unzipped files from the " | |
| "MULTITACRED_LDC zip. Manual download instructions: {}".format( | |
| data_dir, self.manual_download_instructions | |
| ) | |
| ) | |
| patch_files = {} | |
| if self.config.label_variant == _REVISITED: | |
| patch_files = dl_manager.download_and_extract(_PATCH_URLs) | |
| elif self.config.label_variant == _RETACRED: | |
| patch_files = dl_manager.download_and_extract(_RETACRED_PATCH_URLs) | |
| return [ | |
| datasets.SplitGenerator( | |
| name=datasets.Split.TRAIN, | |
| gen_kwargs={ | |
| "filepath": os.path.join(data_dir, self.config.language, f"train_{self.config.language}.json"), | |
| "patch_filepath": patch_files.get("train"), | |
| }, | |
| ), | |
| datasets.SplitGenerator( | |
| name=datasets.Split.TEST, | |
| gen_kwargs={ | |
| "filepath": os.path.join(data_dir, self.config.language, f"test_{self.config.language}.json"), | |
| "patch_filepath": patch_files.get("test"), | |
| }, | |
| ), | |
| datasets.SplitGenerator( | |
| name=datasets.Split.VALIDATION, | |
| gen_kwargs={ | |
| "filepath": os.path.join(data_dir, self.config.language, f"dev_{self.config.language}.json"), | |
| "patch_filepath": patch_files.get("dev"), | |
| }, | |
| ), | |
| datasets.SplitGenerator( | |
| name=_BACKTRANSLATION_TEST_SPLIT, | |
| gen_kwargs={ | |
| "filepath": os.path.join(data_dir, self.config.language, f"test_en_{self.config.language}_bt.json"), | |
| "patch_filepath": patch_files.get("test"), | |
| }, | |
| ), | |
| ] | |
| def _generate_examples(self, filepath, patch_filepath): | |
| """Yields examples.""" | |
| # This method will receive as arguments the `gen_kwargs` defined in the previous `_split_generators` method. | |
| # It is in charge of opening the given file and yielding (key, example) tuples from the dataset | |
| # The key is not important, it's more here for legacy reason (legacy from tfds) | |
| patch_examples = {} | |
| if patch_filepath is not None: | |
| with open(patch_filepath, encoding="utf-8") as f: | |
| if self.config.label_variant == _REVISITED: | |
| patch_examples = {example["id"]: example for example in json.load(f)} | |
| elif self.config.label_variant == _RETACRED: | |
| patch_examples = {_id: {"id": _id, "relation": label} for _id, label in json.load(f).items()} | |
| with open(filepath, encoding="utf-8") as f: | |
| data = json.load(f) | |
| for example in data: | |
| id_ = example["id"] | |
| if id_ in patch_examples: | |
| example.update(patch_examples[id_]) | |
| elif self.config.label_variant == _RETACRED: | |
| # RE-TACRED was pruned, skip example if its id is not in patch_examples | |
| continue | |
| yield id_, { | |
| "id": example["id"], | |
| "token": [convert_ptb_token(token) for token in example["token"]], | |
| "subj_start": example["subj_start"], | |
| "subj_end": example["subj_end"] + 1, # make end offset exclusive | |
| "subj_type": example["subj_type"], | |
| "obj_start": example["obj_start"], | |
| "obj_end": example["obj_end"] + 1, # make end offset exclusive | |
| "obj_type": example["obj_type"], | |
| "relation": example["relation"], | |
| } | |