wiki-entity-similarity / wiki-entity-similarity.py
Exr0n's picture
use correct name for int
aed46a0 verified
raw
history blame
No virus
4.71 kB
import datasets
from dataclasses import dataclass
import csv
_DESCRIPTION = '''WES: Learning Semantic Similarity from 6M Names for 1M Entities'''
_CITE = '''\
@inproceedings{exr0n2022WES
author={Exr0n},
title={WES: Learning Semantic Similarity from 6M Names for 1M Entities},
year={2022}
}
'''
_HUGGINGFACE_REPO = "https://huggingface.co/datasets/Exr0n/wiki-entity-similarity/resolve/main/"
@dataclass
class WikiEntitySimilarityConfig(datasets.BuilderConfig):
"""BuilderConfig for CSV."""
year: int = None
type: str = None
threshhold: int = None
# path: str = None
class WikiEntitySimilarity(datasets.GeneratorBasedBuilder):
"""WES: Learning semantic similarity from 6M names for 1M entities"""
BUILDER_CONFIG_CLASS = WikiEntitySimilarityConfig
BUILDER_CONFIGS = [
WikiEntitySimilarityConfig(
name='2018thresh5corpus',
description='raw link corpus (all true): min 5 inbound links, lowest quality',
year=2018,
type='corpus',
threshhold=5,
# path="https://huggingface.co/datasets/Exr0n/wiki-entity-similarity/resolve/main/link_synonyms-2018-thresh_5.csv"
),
WikiEntitySimilarityConfig(
name='2018thresh10corpus',
description='raw link corpus (all true): min 10 inbound links, medium quality',
year=2018,
type='corpus',
threshhold=10,
# path="https://huggingface.co/datasets/Exr0n/wiki-entity-similarity/resolve/main/link_synonyms-2018-thresh_10.csv"
),
WikiEntitySimilarityConfig(
name='2018thresh20corpus',
description='raw link corpus (all true): min 20 inbound links, high quality',
year=2018,
type='corpus',
threshhold=20,
# path="https://huggingface.co/datasets/Exr0n/wiki-entity-similarity/resolve/main/link_synonyms-2018-thresh_20.csv"
),
WikiEntitySimilarityConfig(
name='2018thresh5pairs',
description='training pairs based on min 5 inbound links, lowest quality',
year=2018,
type='pairs',
threshhold=5,
# path="https://huggingface.co/datasets/Exr0n/wiki-entity-similarity/resolve/main/2018thresh5"
),
WikiEntitySimilarityConfig(
name='2018thresh10pairs',
description='training pairs based on min 10 inbound links, medium quality',
year=2018,
type='pairs',
threshhold=10,
# path="https://huggingface.co/datasets/Exr0n/wiki-entity-similarity/resolve/main/2018thresh10"
),
WikiEntitySimilarityConfig(
name='2018thresh20pairs',
description='training pairs based on min 20 inbound links, high quality',
year=2018,
type='pairs',
threshhold=20,
# path="https://huggingface.co/datasets/Exr0n/wiki-entity-similarity/resolve/main/2018thresh20"
),
]
def _info(self):
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=datasets.Features(
{
'article': datasets.Value('string'),
'link_text': datasets.Value('string'),
'is_same': datasets.Value('uint8'),
}
),
citation=_CITE,
homepage="https://github.com/Exr0nProjects/wiki-entity-similarity",
)
def _split_generators(self, dl_manager):
path = _HUGGINGFACE_REPO + f"{self.config.year}thresh{self.config.threshhold}"
if self.config.type == 'corpus':
filepath = dl_manager.download(path + 'corpus.csv')
return [ datasets.SplitGenerator(name=datasets.Split.TRAIN,
gen_kwargs={ 'path': filepath }) ]
elif self.config.type == 'pairs':
ret = []
for n, e in zip(['train', 'dev', 'test'],
[datasets.Split.TRAIN,
datasets.Split.VALIDATION,
datasets.Split.TEST]):
fp = dl_manager.download(path + n + '.csv')
ret.append( datasets.SplitGenerator(name=e, gen_kwargs={ 'path': fp }) )
return ret
else:
raise ValueError(f"invalid dataset type '{self.config.type}', expected 'corpus' for raw links or 'pairs' for trainable pairs with negative examples")
def _generate_examples(self, path):
with open(path, 'r') as rf:
reader = csv.DictReader(rf)
for i, row in enumerate(reader):
yield i, row