import json from itertools import chain import datasets logger = datasets.logging.get_logger(__name__) _DESCRIPTION = """[Unified Benchmark for Metaphor Identification]""" _NAME = "Metaphors_and_Analogies" _VERSION = "1.0.0" _CITATION = "" _HOME_PAGE = "https://github.com/Mionies/metaphors_and_analogies_datasets" #_URL = f'https://huggingface.co/datasets/Joanne/{_NAME}/raw/main/dataset' #_URL = f'https://huggingface.co/datasets/Joanne/{_NAME}/resolve/main' _URL="." ############# META DATA #################################################### _CARDILLO_DESCRIPTION = ''' 520 sentence pairs of the form A is a B. The pairs share the same 'B' concept. B is composed with 2 different A concept to form one literal sentence and one metaphorical sentence. ''' _CARDILLO_CITATION = ''' ''' _CARDILLO_PAPER_LINK = ''' https://link.springer.com/article/10.3758/s13428-016-0717-1 https://www.ncbi.nlm.nih.gov/pmc/articles/PMC2952404/ ''' ############################################# _JANKOWIAC_DESCRIPTION = ''' 120 triples of sentences labelled Example sentences :    - This banana is like a harbour - anomaly - A house is a harbour - metaphor - This area is a harbour - literal ''' _JANKOWIAC_CITATION = ''' ''' _JANKOWIAC_PAPER_LINK ='''https://link-springer-com.abc.cardiff.ac.uk/article/10.1007/s10936-020-09695-7 ''' ############################################# _SAT_DESCRIPTION = ''' One stem pair and 5 different possible answer pairs. Only one pair of the five forms a correct proportional analogy with the stam pair. Example: - stem : callow - experience Multiple choices : - painting - artist - heavy - weight - barren - fruit - gift - donor - measure - ounce - answer : barren - fruit ''' _SAT_CITATION = ''' ''' _SAT_LINK = ''' ''' ############################################## _GREEN_DESCRIPTION = ''' 40 proportional analogies quadruples : 40 stem pairs, 40 near analogies of those pairs, 40 far analogies, and 40 non analogical pairs. ''' _GREEN_CITATION = ''' Green, A. E., Kraemer, D. J. M., Fugelsang, J., Gray, J. R., & Dunbar, K. (2010). Connecting Long Distance: Semantic Distance in Analogical Reasoning Modulates Frontopolar Cortex Activity. Cerebral Cortex, 10, 70-76. ''' _GREEN_LINK = ''' ''' ##################################################### _KMIECIK_DESCRIPTION = ''' List of quadruples with the labels 'analogy': TRUE/FALSE, 'near_analogy : 'true/false'. ''' _KMIECIK_CITATION = ''' Kmiecik, M. J., Brisson, R. J., & Morrison, R. G. (2019). The time course of semantic and relational processing during verbal analogical reasoning. Brain and Cognition, 129, 25-34.''' _KMIECIK_LINK = ''' ''' ################## SPLITS & PATHS ############################################ _URLS = { str(datasets.Split.TRAIN): { 'Quadruples_Kmiecik_random_split' : [f'{_URL}/hf_datasets/Quadruples_Kmiecik_random_split/train.jsonl'], 'Pairs_Jankowiac_random_split' : [f'{_URL}/hf_datasets/Pairs_Jankowiac_random_split/train.jsonl'], 'Quadruples_Green_lexical_split' : [f'{_URL}/hf_datasets/Quadruples_Green_lexical_split/train.jsonl'], 'Pairs_Cardillo_set' : [f'{_URL}/hf_datasets/Pairs_Cardillo_set/train.jsonl'], 'Quadruples_SAT_MET_lexical_split' : [f'{_URL}/hf_datasets/Quadruples_SAT_MET_lexical_split/train.jsonl'], 'Quadruples_SAT_MET_FILTERED_lexical_split' : [f'{_URL}/hf_datasets/Quadruples_SAT_MET_FILTERED_lexical_split/train.jsonl'], 'Pairs_Jankowiac_lexical_split' : [f'{_URL}/hf_datasets/Pairs_Jankowiac_lexical_split/train.jsonl'], 'Quadruples_Green_set' : [f'{_URL}/hf_datasets/Quadruples_Green_set/train.jsonl'], 'Quadruples_Kmiecik_lexical_split_on_CD' : [f'{_URL}/hf_datasets/Quadruples_Kmiecik_lexical_split_on_CD/train.jsonl'], 'Quadruples_Kmiecik_lexical_split_on_AB' : [f'{_URL}/hf_datasets/Quadruples_Kmiecik_lexical_split_on_AB/train.jsonl'], 'Quadruples_SAT_MET_random_split' : [f'{_URL}/hf_datasets/Quadruples_SAT_MET_random_split/train.jsonl'], 'Quadruples_SAT_MET_FILTERED_random_split' : [f'{_URL}/hf_datasets/Quadruples_SAT_MET_FILTERED_random_split/train.jsonl'], 'Quadruples_Green_random_split' : [f'{_URL}/hf_datasets/Quadruples_Green_random_split/train.jsonl'], 'Quadruples_SAT_MET_set' : [f'{_URL}/hf_datasets/Quadruples_SAT_MET_set/train.jsonl'], 'Quadruples_SAT_MET_FILTERED_set' : [f'{_URL}/hf_datasets/Quadruples_SAT_MET_FILTERED_set/train.jsonl'], 'Pairs_Cardillo_random_split' : [f'{_URL}/hf_datasets/Pairs_Cardillo_random_split/train.jsonl'], 'Pairs_Cardillo_set' : [f'{_URL}/hf_datasets/Pairs_Cardillo_set/train.jsonl'], 'Pairs_Cardillo_lexical_split' : [f'{_URL}/hf_datasets/Pairs_Cardillo_lexical_split/train.jsonl'], 'Pairs_Jankowiac_set' : [f'{_URL}/hf_datasets/Pairs_Jankowiac_set/train.jsonl'] }, str(datasets.Split.VALIDATION): { 'Quadruples_Kmiecik_random_split' : [f'{_URL}/hf_datasets/Quadruples_Kmiecik_random_split/validation.jsonl'], 'Pairs_Jankowiac_random_split' : [f'{_URL}/hf_datasets/Pairs_Jankowiac_random_split/validation.jsonl'], 'Quadruples_Green_lexical_split' : [f'{_URL}/hf_datasets/Quadruples_Green_lexical_split/validation.jsonl'], 'Pairs_Cardillo_orig_set' : [f'{_URL}/hf_datasets/Pairs_Cardillo_orig_set/validation.jsonl'], 'Quadruples_SAT_MET_lexical_split' : [f'{_URL}/hf_datasets/Quadruples_SAT_MET_lexical_split/validation.jsonl'], 'Quadruples_SAT_MET_FILTERED_lexical_split' : [f'{_URL}/hf_datasets/Quadruples_SAT_MET_FILTERED_lexical_split/validation.jsonl'], 'Pairs_Jankowiac_lexical_split' : [f'{_URL}/hf_datasets/Pairs_Jankowiac_lexical_split/validation.jsonl'], 'Quadruples_Green_set' : [f'{_URL}/hf_datasets/Quadruples_Green_set/validation.jsonl'], 'Quadruples_Kmiecik_lexical_split_on_CD' : [f'{_URL}/hf_datasets/Quadruples_Kmiecik_lexical_split_on_CD/validation.jsonl'], 'Quadruples_Kmiecik_lexical_split_on_AB' : [f'{_URL}/hf_datasets/Quadruples_Kmiecik_lexical_split_on_AB/validation.jsonl'], 'Quadruples_SAT_MET_random_split' : [f'{_URL}/hf_datasets/Quadruples_SAT_MET_random_split/validation.jsonl'], 'Quadruples_SAT_MET_FILTERED_random_split' : [f'{_URL}/hf_datasets/Quadruples_SAT_MET_FILTERED_random_split/validation.jsonl'], 'Quadruples_Green_random_split' : [f'{_URL}/hf_datasets/Quadruples_Green_random_split/validation.jsonl'], 'Quadruples_SAT_MET_set' : [f'{_URL}/hf_datasets/Quadruples_SAT_MET_set/validation.jsonl'], 'Quadruples_SAT_MET_FILTERED_set' : [f'{_URL}/hf_datasets/Quadruples_SAT_MET_FILTERED_set/validation.jsonl'], 'Pairs_Cardillo_random_split' : [f'{_URL}/hf_datasets/Pairs_Cardillo_random_split/validation.jsonl'], 'Pairs_Cardillo_set' : [f'{_URL}/hf_datasets/Pairs_Cardillo_set/validation.jsonl'], 'Pairs_Cardillo_lexical_split' : [f'{_URL}/hf_datasets/Pairs_Cardillo_lexical_split/validation.jsonl'], 'Pairs_Jankowiac_set' : [f'{_URL}/hf_datasets/Pairs_Jankowiac_set/validation.jsonl'] }, str(datasets.Split.TEST): { 'Quadruples_Kmiecik_random_split' : [f'{_URL}/hf_datasets/Quadruples_Kmiecik_random_split/test.jsonl'], 'Pairs_Jankowiac_random_split' : [f'{_URL}/hf_datasets/Pairs_Jankowiac_random_split/test.jsonl'], 'Quadruples_Green_lexical_split' : [f'{_URL}/hf_datasets/Quadruples_Green_lexical_split/test.jsonl'], 'Pairs_Cardillo_set' : [f'{_URL}/hf_datasets/Pairs_Cardillo_set/test.jsonl'], 'Quadruples_SAT_MET_lexical_split' : [f'{_URL}/hf_datasets/Quadruples_SAT_MET_lexical_split/test.jsonl'], 'Quadruples_SAT_MET_FILTERED_lexical_split' : [f'{_URL}/hf_datasets/Quadruples_SAT_MET_FILTERED_lexical_split/test.jsonl'], 'Pairs_Jankowiac_lexical_split' : [f'{_URL}/hf_datasets/Pairs_Jankowiac_lexical_split/test.jsonl'], 'Quadruples_Green_set' : [f'{_URL}/hf_datasets/Quadruples_Green_set/test.jsonl'], 'Quadruples_Kmiecik_lexical_split_on_CD' : [f'{_URL}/hf_datasets/Quadruples_Kmiecik_lexical_split_on_CD/test.jsonl'], 'Quadruples_Kmiecik_lexical_split_on_AB' : [f'{_URL}/hf_datasets/Quadruples_Kmiecik_lexical_split_on_AB/test.jsonl'], 'Quadruples_SAT_MET_random_split' : [f'{_URL}/hf_datasets/Quadruples_SAT_MET_random_split/test.jsonl'], 'Quadruples_SAT_MET_FILTERED_random_split' : [f'{_URL}/hf_datasets/Quadruples_SAT_MET_FILTERED_random_split/test.jsonl'], 'Quadruples_Green_random_split' : [f'{_URL}/hf_datasets/Quadruples_Green_random_split/test.jsonl'], 'Quadruples_SAT_MET_set' : [f'{_URL}/hf_datasets/Quadruples_SAT_MET_set/test.jsonl'], 'Quadruples_SAT_MET_FILTERED_set' : [f'{_URL}/hf_datasets/Quadruples_SAT_MET_FILTERED_set/test.jsonl'], 'Pairs_Cardillo_random_split' : [f'{_URL}/hf_datasets/Pairs_Cardillo_random_split/test.jsonl'], 'Pairs_Cardillo_set' : [f'{_URL}/hf_datasets/Pairs_Cardillo_set/test.jsonl'], 'Pairs_Cardillo_lexical_split' : [f'{_URL}/hf_datasets/Pairs_Cardillo_lexical_split/test.jsonl'], 'Pairs_Jankowiac_set' : [f'{_URL}/hf_datasets/Pairs_Jankowiac_set/test.jsonl'] } } #_DATASET = sorted(list(set(list(chain(*[list(i.keys()) for i in _URLS.values()]))))) ############################## DATASETS NAMES & CONFIG ###################################################### _PAIR_CL_CONFIG1=[ 'Pairs_Cardillo_random_split', 'Pairs_Jankowiac_random_split', 'Pairs_Cardillo_lexical_split', 'Pairs_Jankowiac_lexical_split' ] _QUAD_CL_CONFIG2 = [ 'Quadruples_Kmiecik_random_split', 'Quadruples_Green_lexical_split', 'Quadruples_SAT_MET_lexical_split', 'Quadruples_SAT_MET_FILTERED_lexical_split', 'Quadruples_Kmiecik_lexical_split_on_CD', 'Quadruples_Kmiecik_lexical_split_on_AB', 'Quadruples_SAT_MET_random_split', 'Quadruples_SAT_MET_FILTERED_random_split', 'Quadruples_Green_random_split' ] _PAIR_SET_CONFIG3 =['Pairs_Cardillo_set','Pairs_Jankowiac_set'] _QUAD_SET_CONFIG4 =['Quadruples_Green_set','Quadruples_SAT_MET_set', 'Quadruples_SAT_MET_FILTERED_set'] _CONFIG1 = ["corpus",'id','set_id','label','sentence','A','B',"A_position","B_position","5-folds"] _CONFIG2 = ["corpus","id","set_id","label","AB","CD","5-folds"] _CONFIG3 = ["corpus","id","pair_ids","labels","sentences","A_positions","B_positions","answer","stem","5-folds"] _CONFIG4 = ["corpus","id","pair_ids","labels","answer","stem","pairs","5-folds"] _DATASET = _PAIR_CL_CONFIG1 + _QUAD_CL_CONFIG2 + _PAIR_SET_CONFIG3 + _QUAD_SET_CONFIG4 _DFEAT = {} for x in _PAIR_CL_CONFIG1: _DFEAT[x]=_CONFIG1 for x in _QUAD_CL_CONFIG2: _DFEAT[x]=_CONFIG2 for x in _PAIR_SET_CONFIG3: _DFEAT[x]=_CONFIG3 for x in _QUAD_SET_CONFIG4: _DFEAT[x]=_CONFIG4 ############################################################################################ class MetAnConfig(datasets.BuilderConfig): """BuilderConfig""" def __init__(self, features,citation,**kwargs): """BuilderConfig. Args: **kwargs: keyword arguments forwarded to super. """ super(MetAnConfig, self).__init__(version=datasets.Version(_VERSION),**kwargs) self.features = features self.citation= citation class MetAn(datasets.GeneratorBasedBuilder): """ The Metaphors and Analogies Datasets""" B1 = [MetAnConfig(name=x,description=f"Dataset {x} : a dataset for classification of pairs in A is-a B constructions", citation="", features=_CONFIG1) for x in _PAIR_CL_CONFIG1 ] B2 = [MetAnConfig(name=x,description=f"Dataset {x} : a dataset for classification of quadruples in A is to B what C is to D constructions", citation="", features=_CONFIG2) for x in _QUAD_CL_CONFIG2 ] B3 = [MetAnConfig(name=x,description=f"Dataset {x} : a dataset for set of pairs of the form A is-a B, with exactly one metaphorical instance among them", citation="", features=_CONFIG3) for x in _PAIR_SET_CONFIG3 ] B4 = [MetAnConfig(name=x,description=f"Dataset : {x} : a dataset with sets of quadruples ", citation="", features=_CONFIG4) for x in _QUAD_SET_CONFIG4 ] BUILDER_CONFIGS = B1+B2+B3+B4 def _info(self): features_all = { "id": datasets.Value("string"), "set_id": datasets.Value("int32"), "pair_ids":datasets.Sequence(datasets.Value("string")), "corpus": datasets.Value("string"), "label": datasets.Value("int32"), "labels":datasets.Sequence(datasets.Value("int32")), "A": datasets.Value("string"), "B": datasets.Value("string"), "A_position":datasets.Sequence(datasets.Value("int32")), "B_position":datasets.Sequence(datasets.Value("int32")), "A_positions":datasets.Sequence(datasets.Sequence(datasets.Value("int32"))), "B_positions":datasets.Sequence(datasets.Sequence(datasets.Value("int32"))), "AB" :datasets.Sequence(datasets.Value("string")), "CD" :datasets.Sequence(datasets.Value("string")), "stem": datasets.Value("string"), "answer": datasets.Value("int32"), "sentence": datasets.Value("string"), "sentences":datasets.Sequence(datasets.Value("string")), "pairs": datasets.Sequence(datasets.Sequence(datasets.Value("string"))), "5-folds":datasets.Sequence(datasets.Value("string")) } features = {feature : features_all[feature] for feature in _DFEAT[self.config.name]} return datasets.DatasetInfo( description=_DESCRIPTION+self.config.description, features=datasets.Features(features), citation = "" ) def _split_generators(self, dl_manager): target_urls = {k: v[self.config.name] for k, v in _URLS.items() if self.config.name in v} downloaded_file = dl_manager.download_and_extract(target_urls) return [datasets.SplitGenerator( name=k, gen_kwargs={"filepaths": downloaded_file[k]} ) for k, v in _URLS.items() if self.config.name in v] def _generate_examples(self, filepaths): _key = 0 for filepath in filepaths: logger.info("generating examples from = %s", filepath) with open(filepath, encoding="utf-8") as f: _list = [i for i in f.read().split('\n') if len(i) > 0] for i in _list: data = json.loads(i) yield _key, data _key += 1