Datasets:
Languages:
English
Multilinguality:
monolingual
Size Categories:
unknown
Language Creators:
machine-generated
Annotations Creators:
expert-generated
Source Datasets:
original
ArXiv:
Tags:
word-segmentation
License:
unknown
"""STAN small dataset by Bansal et al..""" | |
import datasets | |
import pandas as pd | |
_CITATION = """ | |
@misc{bansal2015deep, | |
title={Towards Deep Semantic Analysis Of Hashtags}, | |
author={Piyush Bansal and Romil Bansal and Vasudeva Varma}, | |
year={2015}, | |
eprint={1501.03210}, | |
archivePrefix={arXiv}, | |
primaryClass={cs.IR} | |
} | |
""" | |
_DESCRIPTION = """ | |
Manually Annotated Stanford Sentiment Analysis Dataset by Bansal et al.. | |
""" | |
_URLS = { | |
"test": "https://github.com/prashantkodali/HashSet/raw/master/datasets/stan-small-bansal_et_al.pkl" | |
} | |
class StanSmall(datasets.GeneratorBasedBuilder): | |
VERSION = datasets.Version("1.0.0") | |
def _info(self): | |
return datasets.DatasetInfo( | |
description=_DESCRIPTION, | |
features=datasets.Features( | |
{ | |
"index": datasets.Value("int32"), | |
"hashtag": datasets.Value("string"), | |
"segmentation": datasets.Value("string"), | |
"alternatives": datasets.Sequence( | |
{ | |
"segmentation": datasets.Value("string") | |
} | |
) | |
} | |
), | |
supervised_keys=None, | |
homepage="https://github.com/mounicam/hashtag_master", | |
citation=_CITATION, | |
) | |
def _split_generators(self, dl_manager): | |
downloaded_files = dl_manager.download(_URLS) | |
return [ | |
datasets.SplitGenerator(name=datasets.Split.TEST, gen_kwargs={"filepath": downloaded_files["test"] }), | |
] | |
def _generate_examples(self, filepath): | |
def get_segmentation(row): | |
needle = row["hashtags"] | |
haystack = row["goldtruths"][0].strip() | |
output = "" | |
iterator = iter(haystack) | |
for char in needle: | |
output += char | |
while True: | |
try: | |
next_char = next(iterator) | |
if next_char.lower() == char.lower(): | |
break | |
elif next_char.isspace(): | |
output = output[0:-1] + next_char + output[-1] | |
except StopIteration: | |
break | |
return output | |
def get_alternatives(row, segmentation): | |
alts = list(set([x.strip() for x in row["goldtruths"]])) | |
alts = [x for x in alts if x != segmentation] | |
alts = [{"segmentation": x} for x in alts] | |
return alts | |
with open(filepath, 'rb') as f: | |
try: | |
import pickle | |
records = pickle.load(f) | |
except ValueError: | |
try: | |
import pickle5 as pickle | |
records = pickle.load(f) | |
except ModuleNotFoundError: | |
raise ImportError( | |
"""To be able to use stan_small, you need to install the following dependencies['pickle5'] | |
using 'pip install pickle5' for instance""" | |
) | |
records = records.to_dict("records") | |
for idx, row in enumerate(records): | |
segmentation = get_segmentation(row) | |
alternatives = get_alternatives(row, segmentation) | |
yield idx, { | |
"index": idx, | |
"hashtag": row["hashtags"], | |
"segmentation": segmentation, | |
"alternatives": alternatives | |
} | |