Datasets:

Languages:
code
Multilinguality:
monolingual
Size Categories:
unknown
Language Creators:
machine-generated
Annotations Creators:
expert-generated
Source Datasets:
original
Tags:
word-segmentation
License:
File size: 2,954 Bytes
3884461
d0c5bd7
 
 
 
 
 
3884461
 
 
 
 
 
 
 
d0c5bd7
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
"""Binkley"""

import datasets
import pandas as pd
from collections import deque

_CITATION = """

@inproceedings{inproceedings,

author = {Lawrie, Dawn and Binkley, David and Morrell, Christopher},

year = {2010},

month = {11},

pages = {3 - 12},

title = {Normalizing Source Code Vocabulary},

journal = {Proceedings - Working Conference on Reverse Engineering, WCRE},

doi = {10.1109/WCRE.2010.10}

}

"""

_DESCRIPTION = """

In programming languages, identifiers are tokens (also called symbols) which name language entities.

Some of the kinds of entities an identifier might denote include variables, types, labels, subroutines, and packages.



Binkley is a dataset for identifier segmentation, 

i.e. the task of adding spaces between the words on a identifier.

"""
_URL = "https://raw.githubusercontent.com/ruanchaves/hashformers/master/datasets/binkley.csv"

class Binkley(datasets.GeneratorBasedBuilder):

    VERSION = datasets.Version("1.0.0")

    def _info(self):
        return datasets.DatasetInfo(
            description=_DESCRIPTION,
            features=datasets.Features(
                {
                    "index": datasets.Value("int32"),
                    "identifier": datasets.Value("string"),
                    "segmentation": datasets.Value("string")
                }
            ),
            supervised_keys=None,
            homepage="",
            citation=_CITATION,
        )

    def _split_generators(self, dl_manager):
        downloaded_files = dl_manager.download(_URL)
        return [
            datasets.SplitGenerator(name=datasets.Split.TEST, gen_kwargs={"filepath": downloaded_files}),
        ]

    def _generate_examples(self, filepath):

        def get_segmentation(needle, haystack, sep="-"):
            output = haystack
            needle = needle.lower()
            haystack = haystack.lower()
            counter = 0
            pos = deque()
            iterator = iter(haystack)
            for char in needle:
                if char == sep:
                    pos.appendleft(counter)
                    continue
                while True:
                    try:
                        next_char = next(iterator)
                        counter += 1
                        if next_char == char:
                            break
                    except StopIteration:
                        break
            while pos:
                next_pos = pos.popleft()
                output = output[:next_pos] + " " + output[next_pos:]
            return output

        df = pd.read_csv(filepath, header=None)[[0,1]]
        df = df.dropna()
        records = df.to_dict("records")

        for idx, item in enumerate(records):
            yield idx, {
                "index": idx,
                "identifier": item[0],
                "segmentation": get_segmentation(item[1], item[0])
            }