import json import datasets logger = datasets.logging.get_logger(__name__) _DESCRIPTION = """[Lexical Relation Classification](https://aclanthology.org/P19-1169/)""" _NAME = "lexical_relation_classification" _VERSION = "1.0.0" _CITATION = """ @inproceedings{wang-etal-2019-spherere, title = "{S}phere{RE}: Distinguishing Lexical Relations with Hyperspherical Relation Embeddings", author = "Wang, Chengyu and He, Xiaofeng and Zhou, Aoying", booktitle = "Proceedings of the 57th Annual Meeting of the Association for Computational Linguistics", month = jul, year = "2019", address = "Florence, Italy", publisher = "Association for Computational Linguistics", url = "https://aclanthology.org/P19-1169", doi = "10.18653/v1/P19-1169", pages = "1727--1737", abstract = "Lexical relations describe how meanings of terms relate to each other. Typical examples include hypernymy, synonymy, meronymy, etc. Automatic distinction of lexical relations is vital for NLP applications, and also challenging due to the lack of contextual signals to discriminate between such relations. In this work, we present a neural representation learning model to distinguish lexical relations among term pairs based on Hyperspherical Relation Embeddings (SphereRE). Rather than learning embeddings for individual terms, the model learns representations of relation triples by mapping them to the hyperspherical embedding space, where relation triples of different lexical relations are well separated. Experiments over several benchmarks confirm SphereRE outperforms state-of-the-arts.", } """ _HOME_PAGE = "https://github.com/asahi417/relbert" _URL = f'https://huggingface.co/datasets/relbert/{_NAME}/raw/main/dataset' _URLS = { str(datasets.Split.TRAIN): { 'BLESS': [f'{_URL}/BLESS/train.jsonl'], 'CogALexV': [f'{_URL}/CogALexV/train.jsonl'], 'K&H+N': [f'{_URL}/K&H+N/train.jsonl'], 'ROOT09': [f'{_URL}/ROOT09/train.jsonl'], 'EVALution': [f'{_URL}/EVALution/train.jsonl'], }, str(datasets.Split.TEST): { 'BLESS': [f'{_URL}/BLESS/test.jsonl'], 'CogALexV': [f'{_URL}/CogALexV/test.jsonl'], 'K&H+N': [f'{_URL}/K&H+N/test.jsonl'], 'ROOT09': [f'{_URL}/ROOT09/test.jsonl'], 'EVALution': [f'{_URL}/EVALution/test.jsonl'], }, str(datasets.Split.VALIDATION): { 'BLESS': [f'{_URL}/BLESS/val.jsonl'], 'K&H+N': [f'{_URL}/K&H+N/val.jsonl'], 'ROOT09': [f'{_URL}/ROOT09/val.jsonl'], 'EVALution': [f'{_URL}/EVALution/val.jsonl'], } } _DATASET = list(_URLS[list(_URLS.keys())[0]].keys()) class LexicalRelationClassificationConfig(datasets.BuilderConfig): """BuilderConfig""" def __init__(self, **kwargs): """BuilderConfig. Args: **kwargs: keyword arguments forwarded to super. """ super(LexicalRelationClassificationConfig, self).__init__(**kwargs) class LexicalRelationClassification(datasets.GeneratorBasedBuilder): BUILDER_CONFIGS = [LexicalRelationClassificationConfig(name=i, description=f"Dataset {i}") for i in sorted(_DATASET)] def _info(self): return datasets.DatasetInfo( description=_DESCRIPTION, features=datasets.Features( { "head": datasets.Value("string"), "tail": datasets.Value("string"), "relation": datasets.Value("string"), } ), supervised_keys=None, homepage=_HOME_PAGE ) def _split_generators(self, dl_manager): downloaded_file = dl_manager.download_and_extract( {k: v[self.config.name] for k, v in _URLS.items() if self.config.name in v} ) return [datasets.SplitGenerator(name=k, gen_kwargs={"filepaths": downloaded_file[k]}) for k, v in _URLS.items() if self.config.name in v] def _generate_examples(self, filepaths): _key = 0 for filepath in filepaths: logger.info("generating examples from = %s", filepath) with open(filepath, encoding="utf-8") as f: _list = [i for i in f.read().split('\n') if len(i) > 0] for i in _list: data = json.loads(i) yield _key, data _key += 1