Datasets:
Multilinguality:
multilingual
Size Categories:
unknown
Language Creators:
machine-generated
Annotations Creators:
machine-generated
Source Datasets:
original
ArXiv:
License:
"""HashSet dataset.""" | |
import datasets | |
import pandas as pd | |
_CITATION = """ | |
@article{kodali2022hashset, | |
title={HashSet--A Dataset For Hashtag Segmentation}, | |
author={Kodali, Prashant and Bhatnagar, Akshala and Ahuja, Naman and Shrivastava, Manish and Kumaraguru, Ponnurangam}, | |
journal={arXiv preprint arXiv:2201.06741}, | |
year={2022} | |
} | |
""" | |
_DESCRIPTION = """ | |
Hashset is a new dataset consisiting on 1.9k manually annotated and 3.3M loosely supervised tweets for testing the | |
efficiency of hashtag segmentation models. We compare State of The Art Hashtag Segmentation models on Hashset and other | |
baseline datasets (STAN and BOUN). We compare and analyse the results across the datasets to argue that HashSet can act | |
as a good benchmark for hashtag segmentation tasks. | |
HashSet Distant: 3.3M loosely collected camel cased hashtags containing hashtag and their segmentation. | |
HashSet Distant Sampled is a sample of 20,000 camel cased hashtags from the HashSet Distant dataset. | |
""" | |
_URL = "https://raw.githubusercontent.com/prashantkodali/HashSet/master/datasets/hashset/HashSet-Distant-sampled.csv" | |
class HashSetDistantSampled(datasets.GeneratorBasedBuilder): | |
VERSION = datasets.Version("1.0.0") | |
def _info(self): | |
return datasets.DatasetInfo( | |
description=_DESCRIPTION, | |
features=datasets.Features( | |
{ | |
"index": datasets.Value("int32"), | |
"hashtag": datasets.Value("string"), | |
"segmentation": datasets.Value("string") | |
} | |
), | |
supervised_keys=None, | |
homepage="https://github.com/prashantkodali/HashSet/", | |
citation=_CITATION, | |
) | |
def _split_generators(self, dl_manager): | |
downloaded_files = dl_manager.download(_URL) | |
return [ | |
datasets.SplitGenerator(name=datasets.Split.TEST, gen_kwargs={"filepath": downloaded_files }), | |
] | |
def _generate_examples(self, filepath): | |
records = pd.read_csv(filepath).to_dict("records") | |
for idx, row in enumerate(records): | |
yield idx, { | |
"index": row["Unnamed: 0.1"], | |
"hashtag": row["Unsegmented_hashtag"], | |
"segmentation": row["Segmented_hashtag"] | |
} | |