ruanchaves commited on
Commit
c7b324e
1 Parent(s): db3249f

Upload hashset_distant.py

Browse files
Files changed (1) hide show
  1. hashset_distant.py +57 -0
hashset_distant.py ADDED
@@ -0,0 +1,57 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """HashSet dataset."""
2
+
3
+ import datasets
4
+ import pandas as pd
5
+
6
+ _CITATION = """
7
+ @article{kodali2022hashset,
8
+ title={HashSet--A Dataset For Hashtag Segmentation},
9
+ author={Kodali, Prashant and Bhatnagar, Akshala and Ahuja, Naman and Shrivastava, Manish and Kumaraguru, Ponnurangam},
10
+ journal={arXiv preprint arXiv:2201.06741},
11
+ year={2022}
12
+ }
13
+ """
14
+
15
+ _DESCRIPTION = """
16
+ Hashset is a new dataset consisiting on 1.9k manually annotated and 3.3M loosely supervised tweets for testing the
17
+ efficiency of hashtag segmentation models. We compare State of The Art Hashtag Segmentation models on Hashset and other
18
+ baseline datasets (STAN and BOUN). We compare and analyse the results across the datasets to argue that HashSet can act
19
+ as a good benchmark for hashtag segmentation tasks.
20
+
21
+ HashSet Distant: 3.3M loosely collected camel cased hashtags containing hashtag and their segmentation.
22
+ """
23
+ _URL = "https://raw.githubusercontent.com/prashantkodali/HashSet/master/datasets/hashset/HashSet-Distant.csv"
24
+
25
+ class HashSetDistantSampled(datasets.GeneratorBasedBuilder):
26
+
27
+ VERSION = datasets.Version("1.0.0")
28
+
29
+ def _info(self):
30
+ return datasets.DatasetInfo(
31
+ description=_DESCRIPTION,
32
+ features=datasets.Features(
33
+ {
34
+ "index": datasets.Value("int32"),
35
+ "hashtag": datasets.Value("string"),
36
+ "segmentation": datasets.Value("string")
37
+ }
38
+ ),
39
+ supervised_keys=None,
40
+ homepage="https://github.com/prashantkodali/HashSet/",
41
+ citation=_CITATION,
42
+ )
43
+
44
+ def _split_generators(self, dl_manager):
45
+ downloaded_files = dl_manager.download(_URL)
46
+ return [
47
+ datasets.SplitGenerator(name=datasets.Split.TEST, gen_kwargs={"filepath": downloaded_files }),
48
+ ]
49
+
50
+ def _generate_examples(self, filepath):
51
+ records = pd.read_csv(filepath).to_dict("records")
52
+ for idx, row in enumerate(records):
53
+ yield idx, {
54
+ "index": row["Unnamed: 0.1"],
55
+ "hashtag": row["Unsegmented_hashtag"],
56
+ "segmentation": row["Segmented_hashtag"]
57
+ }