Datasets:

Languages:
English
Multilinguality:
monolingual
Size Categories:
unknown
Language Creators:
machine-generated
Annotations Creators:
expert-generated
Source Datasets:
original
Tags:
word-segmentation
License:
ruanchaves commited on
Commit
a262af0
1 Parent(s): e49e1e7

Upload dev_stanford.py

Browse files
Files changed (1) hide show
  1. dev_stanford.py +59 -0
dev_stanford.py ADDED
@@ -0,0 +1,59 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """STAN large dataset as corrected by Maddela et al.."""
2
+
3
+ import datasets
4
+
5
+ _CITATION = """
6
+ @article{celebi2018segmenting,
7
+ title={Segmenting hashtags and analyzing their grammatical structure},
8
+ author={Celebi, Arda and {\"O}zg{\"u}r, Arzucan},
9
+ journal={Journal of the Association for Information Science and Technology},
10
+ volume={69},
11
+ number={5},
12
+ pages={675--686},
13
+ year={2018},
14
+ publisher={Wiley Online Library}
15
+ }
16
+ """
17
+
18
+ _DESCRIPTION = """
19
+ 1000 hashtags manually segmented by Çelebi et al. for development purposes,
20
+ randomly selected from the Stanford Sentiment Tweet Corpus by Sentiment140.
21
+ """
22
+ _URLS = {
23
+ "dev": "https://tabilab.cmpe.boun.edu.tr/projects/hashtag_segmentation/Dev-Stanford"
24
+ }
25
+
26
+ class DevStanford(datasets.GeneratorBasedBuilder):
27
+
28
+ VERSION = datasets.Version("1.0.0")
29
+
30
+ def _info(self):
31
+ return datasets.DatasetInfo(
32
+ description=_DESCRIPTION,
33
+ features=datasets.Features(
34
+ {
35
+ "index": datasets.Value("int32"),
36
+ "hashtag": datasets.Value("string"),
37
+ "segmentation": datasets.Value("string")
38
+ }
39
+ ),
40
+ supervised_keys=None,
41
+ homepage="https://tabilab.cmpe.boun.edu.tr/projects/hashtag_segmentation/",
42
+ citation=_CITATION,
43
+ )
44
+
45
+ def _split_generators(self, dl_manager):
46
+ downloaded_files = dl_manager.download(_URLS)
47
+ return [
48
+ datasets.SplitGenerator(name=datasets.Split.VALIDATION, gen_kwargs={"filepath": downloaded_files["dev"] })
49
+ ]
50
+
51
+ def _generate_examples(self, filepath):
52
+
53
+ with open(filepath, 'r') as f:
54
+ for idx, line in enumerate(f):
55
+ yield idx, {
56
+ "index": idx,
57
+ "hashtag": line.strip().replace(" ", ""),
58
+ "segmentation": line.strip()
59
+ }