Datasets:

Languages:
English
Multilinguality:
monolingual
Size Categories:
unknown
Language Creators:
machine-generated
Annotations Creators:
expert-generated
Source Datasets:
original
Tags:
word-segmentation
License:
dev_stanford / dev_stanford.py
ruanchaves's picture
Update dev_stanford.py
56beb90
raw history blame
No virus
1.94 kB
"""Dev-Stanford dataset by Çelebi et al.."""
import datasets
_CITATION = """
@article{celebi2018segmenting,
title={Segmenting hashtags and analyzing their grammatical structure},
author={Celebi, Arda and {\"O}zg{\"u}r, Arzucan},
journal={Journal of the Association for Information Science and Technology},
volume={69},
number={5},
pages={675--686},
year={2018},
publisher={Wiley Online Library}
}
"""
_DESCRIPTION = """
1000 hashtags manually segmented by Çelebi et al. for development purposes,
randomly selected from the Stanford Sentiment Tweet Corpus by Sentiment140.
"""
_URLS = {
"dev": "https://raw.githubusercontent.com/ardax/hashtag-segmentor/master/Dev-Stanford"
}
class DevStanford(datasets.GeneratorBasedBuilder):
VERSION = datasets.Version("1.0.0")
def _info(self):
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=datasets.Features(
{
"index": datasets.Value("int32"),
"hashtag": datasets.Value("string"),
"segmentation": datasets.Value("string")
}
),
supervised_keys=None,
homepage="https://tabilab.cmpe.boun.edu.tr/projects/hashtag_segmentation/",
citation=_CITATION,
)
def _split_generators(self, dl_manager):
downloaded_files = dl_manager.download(_URLS)
return [
datasets.SplitGenerator(name=datasets.Split.VALIDATION, gen_kwargs={"filepath": downloaded_files["dev"] })
]
def _generate_examples(self, filepath):
with open(filepath, 'r') as f:
for idx, line in enumerate(f):
yield idx, {
"index": idx,
"hashtag": line.strip().replace(" ", ""),
"segmentation": line.strip()
}