Datasets:

Languages:
English
Multilinguality:
monolingual
Size Categories:
unknown
Language Creators:
machine-generated
Annotations Creators:
expert-generated
Source Datasets:
original
Tags:
word-segmentation
License:
ruanchaves commited on
Commit
31c49b7
1 Parent(s): fc7e6ce

Upload boun.py

Browse files
Files changed (1) hide show
  1. boun.py +62 -0
boun.py ADDED
@@ -0,0 +1,62 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """STAN large dataset as corrected by Maddela et al.."""
2
+
3
+ import datasets
4
+
5
+ _CITATION = """
6
+ @article{celebi2018segmenting,
7
+ title={Segmenting hashtags and analyzing their grammatical structure},
8
+ author={Celebi, Arda and {\"O}zg{\"u}r, Arzucan},
9
+ journal={Journal of the Association for Information Science and Technology},
10
+ volume={69},
11
+ number={5},
12
+ pages={675--686},
13
+ year={2018},
14
+ publisher={Wiley Online Library}
15
+ }
16
+ """
17
+
18
+ _DESCRIPTION = """
19
+ Dev-BOUN Development set that includes 500 manually segmented hashtags. These are selected from tweets about movies,
20
+ tv shows, popular people, sports teams etc. Test-BOUN Test set that includes 500 manually segmented hashtags.
21
+ These are selected from tweets about movies, tv shows, popular people, sports teams etc.
22
+ """
23
+ _URLS = {
24
+ "dev": "https://tabilab.cmpe.boun.edu.tr/projects/hashtag_segmentation/Dev-BOUN",
25
+ "test": "https://tabilab.cmpe.boun.edu.tr/projects/hashtag_segmentation/Test-BOUN"
26
+ }
27
+
28
+ class Boun(datasets.GeneratorBasedBuilder):
29
+
30
+ VERSION = datasets.Version("1.0.0")
31
+
32
+ def _info(self):
33
+ return datasets.DatasetInfo(
34
+ description=_DESCRIPTION,
35
+ features=datasets.Features(
36
+ {
37
+ "index": datasets.Value("int32"),
38
+ "hashtag": datasets.Value("string"),
39
+ "segmentation": datasets.Value("string")
40
+ }
41
+ ),
42
+ supervised_keys=None,
43
+ homepage="https://github.com/mounicam/hashtag_master",
44
+ citation=_CITATION,
45
+ )
46
+
47
+ def _split_generators(self, dl_manager):
48
+ downloaded_files = dl_manager.download(_URLS)
49
+ return [
50
+ datasets.SplitGenerator(name=datasets.Split.VALIDATION, gen_kwargs={"filepath": downloaded_files["dev"] }),
51
+ datasets.SplitGenerator(name=datasets.Split.TEST, gen_kwargs={"filepath": downloaded_files["test"] }),
52
+ ]
53
+
54
+ def _generate_examples(self, filepath):
55
+
56
+ with open(filepath, 'r') as f:
57
+ for idx, line in enumerate(f):
58
+ yield idx, {
59
+ "index": idx,
60
+ "hashtag": line.strip().replace(" ", ""),
61
+ "segmentation": line.strip()
62
+ }