Dataset Preview Go to dataset viewer
index (int)hashtag (string)segmentation (string)alternatives (json)
0
eclipse
eclipse
{ "segmentation": [ "Eclipse" ] }
1
k
k
{ "segmentation": [ "K" ] }
2
pmqs
pmqs
{ "segmentation": [] }
3
stupidaccident
stupid accident
{ "segmentation": [] }
4
addictivetvshows
addictive tv shows
{ "segmentation": [ "Addictive TV Shows" ] }
5
TCM
TCM
{ "segmentation": [ "tcm" ] }
6
WakingUpTooEarlyFail
Waking Up Too Early Fail
{ "segmentation": [ "waking up too early fail" ] }
7
NYC
NYC
{ "segmentation": [] }
8
helpme
help me
{ "segmentation": [] }
9
slimnas
slimnas
{ "segmentation": [ "Slimnas" ] }
10
twistory
twistory
{ "segmentation": [] }
11
e3xbox
e3 xbox
{ "segmentation": [ "E3 Xbox" ] }
12
sadalcholics
sad alcholics
{ "segmentation": [] }
13
Throwdown
Throw down
{ "segmentation": [ "Throwdown", "throw down", "throwdown" ] }
14
carelessmistakeI
careless mistake I
{ "segmentation": [] }
15
marsarmy
mars army
{ "segmentation": [] }
16
socStardom3
socStardom3
{ "segmentation": [ "soc stardom 3" ] }
17
myparentscookandfreezeitforme
my parents cook and freeze it for me
{ "segmentation": [] }
18
MyFirstTime
My First Time
{ "segmentation": [ "my first time" ] }
19
podstock09
podstock 09
{ "segmentation": [ "podstock09" ] }
20
FreeBSD
FreeBSD
{ "segmentation": [ "Free BSD" ] }
21
mtv
mtv
{ "segmentation": [ "MTV" ] }
22
groezrock
groezrock
{ "segmentation": [ "groez rock" ] }
23
Moose
Moose
{ "segmentation": [ "moose" ] }
24
phenway
phenway
{ "segmentation": [] }
25
AWESOME
AWESOME
{ "segmentation": [ "awesome" ] }
26
caughtinthemiddle
caught in the middle
{ "segmentation": [] }
27
TechFellow
Tech Fellow
{ "segmentation": [] }
28
tommybaby09
tommy baby 09
{ "segmentation": [] }
29
guru
guru
{ "segmentation": [] }
30
silverlight
silverlight
{ "segmentation": [ "Silverlight" ] }
31
newmoon
new moon
{ "segmentation": [ "New moon", "New Moon" ] }
32
GuiltyPleasures
Guilty Pleasures
{ "segmentation": [ "guilty pleasures" ] }
33
stopniley
stop niley
{ "segmentation": [] }
34
journalist
journalist
{ "segmentation": [] }
35
trvsdjam
trvs djam
{ "segmentation": [ "TRVS DJAM" ] }
36
nhl
nhl
{ "segmentation": [ "NHL" ] }
37
matchmaker
matchmaker
{ "segmentation": [ "Matchmaker", "match maker" ] }
38
CoD5
CoD5
{ "segmentation": [ "CoD 5" ] }
39
twitteradio
twitteradio
{ "segmentation": [] }
40
Sitecore
Sitecore
{ "segmentation": [] }
41
retard
retard
{ "segmentation": [] }
42
OrlandoMagic
Orlando Magic
{ "segmentation": [] }
43
Twittarrr
Twittarrr
{ "segmentation": [] }
44
TiVo
TiVo
{ "segmentation": [] }
45
CloudViews
Cloud Views
{ "segmentation": [] }
46
reinvention
reinvention
{ "segmentation": [] }
47
sqr
sqr
{ "segmentation": [] }
48
constipation
constipation
{ "segmentation": [] }
49
OWF
OWF
{ "segmentation": [ "owf" ] }
50
cue
cue
{ "segmentation": [] }
51
blogroll
blogroll
{ "segmentation": [ "Blogroll" ] }
52
grilling
grilling
{ "segmentation": [] }
53
honestsundays
honest sundays
{ "segmentation": [] }
54
TheRescue
The Rescue
{ "segmentation": [ "the rescue" ] }
55
followers
followers
{ "segmentation": [] }
56
OneNote
OneNote
{ "segmentation": [ "One Note", "one note" ] }
57
tehran
tehran
{ "segmentation": [] }
58
HeHas2muchSwagga
He Has 2 much Swagga
{ "segmentation": [ "He Has 2 much swagga" ] }
59
comedianmedicines
comedian medicines
{ "segmentation": [] }
60
cempaka_STW
cempaka _ STW
{ "segmentation": [] }
61
abff
abff
{ "segmentation": [ "ABFF" ] }
62
Susan
Susan
{ "segmentation": [] }
63
portugal
portugal
{ "segmentation": [ "Portugal" ] }
64
artists
artists
{ "segmentation": [] }
65
TechPB
TechPB
{ "segmentation": [] }
66
Torturewood
Torture wood
{ "segmentation": [ "torturewood", "Torture Wood" ] }
67
chatfail
chat fail
{ "segmentation": [] }
68
downtime
down time
{ "segmentation": [] }
69
Jetblue
Jetblue
{ "segmentation": [] }
70
DSAS09
DSAS 09
{ "segmentation": [] }
71
tweetaporsms
tweet a por sms
{ "segmentation": [ "Tweet a por sms" ] }
72
ahsunday
ah sunday
{ "segmentation": [] }
73
phrases
phrases
{ "segmentation": [] }
74
mozmae
mozmae
{ "segmentation": [] }
75
alltimeblow
all time blow
{ "segmentation": [] }
76
CantonFirstFri
Canton First Fri
{ "segmentation": [ "Canton first fri" ] }
77
rats
rats
{ "segmentation": [] }
78
thingsthathurtthemost
things that hurt the most
{ "segmentation": [] }
79
fridayfollow
friday follow
{ "segmentation": [] }
80
sosickofhockeytoo
so sick of hockey too
{ "segmentation": [] }
81
Worms
Worms
{ "segmentation": [ "worms" ] }
82
nomaintenance
no maintenance
{ "segmentation": [] }
83
ISC09
ISC 09
{ "segmentation": [] }
84
killmenow
kill me now
{ "segmentation": [] }
85
fakeceleb
fake celeb
{ "segmentation": [ "Fake Celeb" ] }
86
IndiaGovt
India Govt
{ "segmentation": [ "india govt" ] }
87
SUPAFLYCREDIBLE
SUPA FLY CREDIBLE
{ "segmentation": [ "SUPAFLYCREDIBLE", "Supafly credible", "SUPAFLY CREDIBLE" ] }
88
youregreat
youre great
{ "segmentation": [ "you re great" ] }
89
musiclover
music lover
{ "segmentation": [] }
90
mscoscon
mscoscon
{ "segmentation": [ "MSCOSCON" ] }
91
GoWings
Go Wings
{ "segmentation": [] }
92
dvd
dvd
{ "segmentation": [] }
93
ultra
ultra
{ "segmentation": [] }
94
linguistics
linguistics
{ "segmentation": [ "Linguistics" ] }
95
scramblesunday
scramble sunday
{ "segmentation": [] }
96
GuidingLight
Guiding Light
{ "segmentation": [ "guiding light" ] }
97
funnelcake
funnel cake
{ "segmentation": [] }
98
thankyouGod
thank you God
{ "segmentation": [] }
99
hdd
hdd
{ "segmentation": [ "HDD" ] }
End of preview (truncated to 100 rows)

Dataset Card for STAN Large

Dataset Summary

The description below was taken from the paper "Multi-task Pairwise Neural Ranking for Hashtag Segmentation" by Maddela et al..

"STAN large, our new expert curated dataset, which includes all 12,594 unique English hashtags and their associated tweets from the same Stanford dataset.

STAN small is the most commonly used dataset in previous work. However, after reexamination, we found annotation errors in 6.8% of the hashtags in this dataset, which is significant given that the error rate of the state-of-the art models is only around 10%. Most of the errors were related to named entities. For example, #lionhead, which refers to the “Lionhead” video game company, was labeled as “lion head”.

We therefore constructed the STAN large dataset of 12,594 hashtags with additional quality control for human annotations."

Languages

English

Dataset Structure

Data Instances

{
    "index": 15,
    "hashtag": "PokemonPlatinum",
    "segmentation": "Pokemon Platinum",
    "alternatives": {
        "segmentation": [
            "Pokemon platinum"
        ]
    }
}

Data Fields

  • index: a numerical index.
  • hashtag: the original hashtag.
  • segmentation: the gold segmentation for the hashtag.
  • alternatives: other segmentations that are also accepted as a gold segmentation.

Although segmentation has exactly the same characters as hashtag except for the spaces, the segmentations inside alternatives may have characters corrected to uppercase.

Dataset Creation

  • All hashtag segmentation and identifier splitting datasets on this profile have the same basic fields: hashtag and segmentation or identifier and segmentation.

  • The only difference between hashtag and segmentation or between identifier and segmentation are the whitespace characters. Spell checking, expanding abbreviations or correcting characters to uppercase go into other fields.

  • There is always whitespace between an alphanumeric character and a sequence of any special characters ( such as _ , :, ~ ).

  • If there are any annotations for named entity recognition and other token classification tasks, they are given in a spans field.

Additional Information

Citation Information

@inproceedings{maddela-etal-2019-multi,
    title = "Multi-task Pairwise Neural Ranking for Hashtag Segmentation",
    author = "Maddela, Mounica  and
      Xu, Wei  and
      Preo{\c{t}}iuc-Pietro, Daniel",
    booktitle = "Proceedings of the 57th Annual Meeting of the Association for Computational Linguistics",
    month = jul,
    year = "2019",
    address = "Florence, Italy",
    publisher = "Association for Computational Linguistics",
    url = "https://aclanthology.org/P19-1242",
    doi = "10.18653/v1/P19-1242",
    pages = "2538--2549",
    abstract = "Hashtags are often employed on social media and beyond to add metadata to a textual utterance with the goal of increasing discoverability, aiding search, or providing additional semantics. However, the semantic content of hashtags is not straightforward to infer as these represent ad-hoc conventions which frequently include multiple words joined together and can include abbreviations and unorthodox spellings. We build a dataset of 12,594 hashtags split into individual segments and propose a set of approaches for hashtag segmentation by framing it as a pairwise ranking problem between candidate segmentations. Our novel neural approaches demonstrate 24.6{\%} error reduction in hashtag segmentation accuracy compared to the current state-of-the-art method. Finally, we demonstrate that a deeper understanding of hashtag semantics obtained through segmentation is useful for downstream applications such as sentiment analysis, for which we achieved a 2.6{\%} increase in average recall on the SemEval 2017 sentiment analysis dataset.",
}

Contributions

This dataset was added by @ruanchaves while developing the hashformers library.

Edit dataset card
Evaluate models HF Leaderboard