Datasets:

Languages:
English
Multilinguality:
monolingual
Size Categories:
unknown
Language Creators:
machine-generated
Annotations Creators:
expert-generated
Source Datasets:
original
License:
stan_large / stan_large.py
ruanchaves's picture
Update stan_large.py
dcf9e61
"""STAN large dataset"""
from multiprocessing.sharedctypes import Value
import datasets
import pandas as pd
import ast
_CITATION = """
@inproceedings{maddela-etal-2019-multi,
title = "Multi-task Pairwise Neural Ranking for Hashtag Segmentation",
author = "Maddela, Mounica and
Xu, Wei and
Preo{\c{t}}iuc-Pietro, Daniel",
booktitle = "Proceedings of the 57th Annual Meeting of the Association for Computational Linguistics",
month = jul,
year = "2019",
address = "Florence, Italy",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/P19-1242",
doi = "10.18653/v1/P19-1242",
pages = "2538--2549",
abstract = "Hashtags are often employed on social media and beyond to add metadata to a textual utterance with the goal of increasing discoverability, aiding search, or providing additional semantics. However, the semantic content of hashtags is not straightforward to infer as these represent ad-hoc conventions which frequently include multiple words joined together and can include abbreviations and unorthodox spellings. We build a dataset of 12,594 hashtags split into individual segments and propose a set of approaches for hashtag segmentation by framing it as a pairwise ranking problem between candidate segmentations. Our novel neural approaches demonstrate 24.6{\%} error reduction in hashtag segmentation accuracy compared to the current state-of-the-art method. Finally, we demonstrate that a deeper understanding of hashtag semantics obtained through segmentation is useful for downstream applications such as sentiment analysis, for which we achieved a 2.6{\%} increase in average recall on the SemEval 2017 sentiment analysis dataset.",
}
"""
_DESCRIPTION = """
The description below was taken from the paper "Multi-task Pairwise Neural Ranking for Hashtag Segmentation"
by Maddela et al..
"STAN large, our new expert curated dataset, which includes all 12,594 unique English hashtags and their
associated tweets from the same Stanford dataset.
STAN small is the most commonly used dataset in previous work. However, after reexamination, we found annotation
errors in 6.8% of the hashtags in this dataset, which is significant given that the error rate of the state-of-the art
models is only around 10%. Most of the errors were related to named entities. For example, #lionhead,
which refers to the “Lionhead” video game company, was labeled as “lion head”.
We therefore constructed the STAN large dataset of 12,594 hashtags with additional quality control for human annotations."
"""
_URLS = {
"train": "https://raw.githubusercontent.com/ruanchaves/hashformers/master/datasets/stan_large_train.csv",
"dev": "https://raw.githubusercontent.com/ruanchaves/hashformers/master/datasets/stan_large_dev.csv",
"test": "https://raw.githubusercontent.com/ruanchaves/hashformers/master/datasets/stan_large_test.csv"
}
class StanLarge(datasets.GeneratorBasedBuilder):
VERSION = datasets.Version("1.0.0")
def _info(self):
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=datasets.Features(
{
"index": datasets.Value("int32"),
"hashtag": datasets.Value("string"),
"segmentation": datasets.Value("string"),
"alternatives": datasets.Sequence(
{
"segmentation": datasets.Value("string")
}
)
}
),
supervised_keys=None,
homepage="https://github.com/mounicam/hashtag_master",
citation=_CITATION,
)
def _split_generators(self, dl_manager):
downloaded_files = dl_manager.download(_URLS)
return [
datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath": downloaded_files["train"] }),
datasets.SplitGenerator(name=datasets.Split.VALIDATION, gen_kwargs={"filepath": downloaded_files["dev"] }),
datasets.SplitGenerator(name=datasets.Split.TEST, gen_kwargs={"filepath": downloaded_files["test"] }),
]
def _generate_examples(self, filepath):
def get_segmentation(row):
needle = row["hashtags"]
haystack = row["goldtruths"][0].strip()
output = ""
iterator = iter(haystack)
for char in needle:
output += char
while True:
try:
next_char = next(iterator)
if next_char.lower() == char.lower():
break
elif next_char.isspace():
output = output[0:-1] + next_char + output[-1]
except StopIteration:
break
return output
def get_alternatives(row, segmentation):
alts = list(set([x.strip() for x in row["goldtruths"]]))
alts = [x for x in alts if x != segmentation]
alts = [{"segmentation": x} for x in alts]
return alts
records = pd.read_csv(filepath).to_dict("records")
records = [{"hashtags": row["hashtags"], "goldtruths": ast.literal_eval(row["goldtruths"])} for row in records]
for idx, row in enumerate(records):
segmentation = get_segmentation(row)
alternatives = get_alternatives(row, segmentation)
yield idx, {
"index": idx,
"hashtag": row["hashtags"],
"segmentation": segmentation,
"alternatives": alternatives
}