Datasets:

Languages:
code
Multilinguality:
monolingual
Size Categories:
unknown
Language Creators:
machine-generated
Annotations Creators:
expert-generated
Source Datasets:
original
Tags:
word-segmentation
License:
ruanchaves commited on
Commit
d0c5bd7
1 Parent(s): 2a5c0e6

Upload binkley.py

Browse files
Files changed (1) hide show
  1. binkley.py +86 -0
binkley.py ADDED
@@ -0,0 +1,86 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """BT11"""
2
+
3
+ import datasets
4
+ import pandas as pd
5
+ from collections import deque
6
+
7
+ _CITATION = """
8
+ @inproceedings{li2018helpful,
9
+ title={Helpful or Not? An investigation on the feasibility of identifier splitting via CNN-BiLSTM-CRF.},
10
+ author={Li, Jiechu and Du, Qingfeng and Shi, Kun and He, Yu and Wang, Xin and Xu, Jincheng},
11
+ booktitle={SEKE},
12
+ pages={175--174},
13
+ year={2018}
14
+ }
15
+ """
16
+
17
+ _DESCRIPTION = """
18
+ In programming languages, identifiers are tokens (also called symbols) which name language entities.
19
+ Some of the kinds of entities an identifier might denote include variables, types, labels, subroutines, and packages.
20
+
21
+ Binkley is a dataset for identifier segmentation,
22
+ i.e. the task of adding spaces between the words on a identifier.
23
+ """
24
+ _URL = "https://raw.githubusercontent.com/ruanchaves/hashformers/master/datasets/binkley.csv"
25
+
26
+ class Binkley(datasets.GeneratorBasedBuilder):
27
+
28
+ VERSION = datasets.Version("1.0.0")
29
+
30
+ def _info(self):
31
+ return datasets.DatasetInfo(
32
+ description=_DESCRIPTION,
33
+ features=datasets.Features(
34
+ {
35
+ "index": datasets.Value("int32"),
36
+ "identifier": datasets.Value("string"),
37
+ "segmentation": datasets.Value("string")
38
+ }
39
+ ),
40
+ supervised_keys=None,
41
+ homepage="",
42
+ citation=_CITATION,
43
+ )
44
+
45
+ def _split_generators(self, dl_manager):
46
+ downloaded_files = dl_manager.download(_URL)
47
+ return [
48
+ datasets.SplitGenerator(name=datasets.Split.TEST, gen_kwargs={"filepath": downloaded_files}),
49
+ ]
50
+
51
+ def _generate_examples(self, filepath):
52
+
53
+ def get_segmentation(needle, haystack, sep="-"):
54
+ output = haystack
55
+ needle = needle.lower()
56
+ haystack = haystack.lower()
57
+ counter = 0
58
+ pos = deque()
59
+ iterator = iter(haystack)
60
+ for char in needle:
61
+ if char == sep:
62
+ pos.appendleft(counter)
63
+ continue
64
+ while True:
65
+ try:
66
+ next_char = next(iterator)
67
+ counter += 1
68
+ if next_char == char:
69
+ break
70
+ except StopIteration:
71
+ break
72
+ while pos:
73
+ next_pos = pos.popleft()
74
+ output = output[:next_pos] + " " + output[next_pos:]
75
+ return output
76
+
77
+ df = pd.read_csv(filepath, header=None)[[0,1]]
78
+ df = df.dropna()
79
+ records = df.to_dict("records")
80
+
81
+ for idx, item in enumerate(records):
82
+ yield idx, {
83
+ "index": idx,
84
+ "identifier": item[0],
85
+ "segmentation": get_segmentation(item[1], item[0])
86
+ }