File size: 2,229 Bytes
c7b324e
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
51b5430
c7b324e
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
"""HashSet dataset."""

import datasets
import pandas as pd

_CITATION = """

@article{kodali2022hashset,

  title={HashSet--A Dataset For Hashtag Segmentation},

  author={Kodali, Prashant and Bhatnagar, Akshala and Ahuja, Naman and Shrivastava, Manish and Kumaraguru, Ponnurangam},

  journal={arXiv preprint arXiv:2201.06741},

  year={2022}

}

"""

_DESCRIPTION = """

Hashset is a new dataset consisiting on 1.9k manually annotated and 3.3M loosely supervised tweets for testing the 

efficiency of hashtag segmentation models. We compare State of The Art Hashtag Segmentation models on Hashset and other 

baseline datasets (STAN and BOUN). We compare and analyse the results across the datasets to argue that HashSet can act 

as a good benchmark for hashtag segmentation tasks.



HashSet Distant: 3.3M loosely collected camel cased hashtags containing hashtag and their segmentation.

"""
_URL = "https://raw.githubusercontent.com/prashantkodali/HashSet/master/datasets/hashset/HashSet-Distant.csv"

class HashSetDistant(datasets.GeneratorBasedBuilder):

    VERSION = datasets.Version("1.0.0")

    def _info(self):
        return datasets.DatasetInfo(
            description=_DESCRIPTION,
            features=datasets.Features(
                {
                    "index": datasets.Value("int32"),
                    "hashtag": datasets.Value("string"),
                    "segmentation": datasets.Value("string")
                }
            ),
            supervised_keys=None,
            homepage="https://github.com/prashantkodali/HashSet/",
            citation=_CITATION,
        )

    def _split_generators(self, dl_manager):
        downloaded_files = dl_manager.download(_URL)
        return [
            datasets.SplitGenerator(name=datasets.Split.TEST, gen_kwargs={"filepath": downloaded_files }),
        ]

    def _generate_examples(self, filepath):
        records = pd.read_csv(filepath).to_dict("records")
        for idx, row in enumerate(records):
            yield idx, {
                "index": row["Unnamed: 0.1"],
                "hashtag": row["Unsegmented_hashtag"],
                "segmentation": row["Segmented_hashtag"]
            }