File size: 2,930 Bytes
e34d40c
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
"""BT11"""

import datasets
import pandas as pd
from collections import deque

_CITATION = """

@inproceedings{li2018helpful,

  title={Helpful or Not? An investigation on the feasibility of identifier splitting via CNN-BiLSTM-CRF.},

  author={Li, Jiechu and Du, Qingfeng and Shi, Kun and He, Yu and Wang, Xin and Xu, Jincheng},

  booktitle={SEKE},

  pages={175--174},

  year={2018}

}

"""

_DESCRIPTION = """

In programming languages, identifiers are tokens (also called symbols) which name language entities.

Some of the kinds of entities an identifier might denote include variables, types, labels, subroutines, and packages.



BT11 is a dataset for identifier segmentation, 

i.e. the task of adding spaces between the words on a identifier.

"""
_URL = "https://raw.githubusercontent.com/ruanchaves/hashformers/master/datasets/bt11.csv"

class BT11(datasets.GeneratorBasedBuilder):

    VERSION = datasets.Version("1.0.0")

    def _info(self):
        return datasets.DatasetInfo(
            description=_DESCRIPTION,
            features=datasets.Features(
                {
                    "index": datasets.Value("int32"),
                    "identifier": datasets.Value("string"),
                    "segmentation": datasets.Value("string")
                }
            ),
            supervised_keys=None,
            homepage="",
            citation=_CITATION,
        )

    def _split_generators(self, dl_manager):
        downloaded_files = dl_manager.download(_URL)
        return [
            datasets.SplitGenerator(name=datasets.Split.TEST, gen_kwargs={"filepath": downloaded_files}),
        ]

    def _generate_examples(self, filepath):

        def get_segmentation(needle, haystack, sep="-"):
            output = haystack
            needle = needle.lower()
            haystack = haystack.lower()
            counter = 0
            pos = deque()
            iterator = iter(haystack)
            for char in needle:
                if char == sep:
                    pos.appendleft(counter)
                    continue
                while True:
                    try:
                        next_char = next(iterator)
                        counter += 1
                        if next_char == char:
                            break
                    except StopIteration:
                        break
            while pos:
                next_pos = pos.popleft()
                output = output[:next_pos] + " " + output[next_pos:]
            return output

        df = pd.read_csv(filepath, header=None)[[0,1]]
        df = df.dropna()
        records = df.to_dict("records")

        for idx, item in enumerate(records):
            yield idx, {
                "index": idx,
                "identifier": item[0],
                "segmentation": get_segmentation(item[1], item[0])
            }