File size: 3,806 Bytes
da6c6fa
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
f1abc1a
da6c6fa
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
f1abc1a
da6c6fa
f1abc1a
 
 
 
 
 
 
 
 
 
 
 
da6c6fa
 
 
f1abc1a
da6c6fa
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
"""The Loyola University of Delaware Identifier Splitting Oracle"""

import datasets
import pandas as pd
from collections import deque

_CITATION = """

@article{hill2014empirical,

  title={An empirical study of identifier splitting techniques},

  author={Hill, Emily and Binkley, David and Lawrie, Dawn and Pollock, Lori and Vijay-Shanker, K},

  journal={Empirical Software Engineering},

  volume={19},

  number={6},

  pages={1754--1780},

  year={2014},

  publisher={Springer}

}

"""

_DESCRIPTION = """

In programming languages, identifiers are tokens (also called symbols) which name language entities.

Some of the kinds of entities an identifier might denote include variables, types, labels, subroutines, and packages.



The Loyola University of Delaware Identifier Splitting Oracle is a dataset for identifier segmentation, 

i.e. the task of adding spaces between the words on a identifier.

"""
_URL = "https://raw.githubusercontent.com/ruanchaves/hashformers/master/datasets/loyola-udelaware-identifier-splitting-oracle.txt"


class Loyola(datasets.GeneratorBasedBuilder):

    VERSION = datasets.Version("1.0.0")

    def _info(self):
        return datasets.DatasetInfo(
            description=_DESCRIPTION,
            features=datasets.Features(
                {
                    "index": datasets.Value("int32"),
                    "identifier": datasets.Value("string"),
                    "segmentation": datasets.Value("string"),
                    "language": datasets.Value("string"),
                    "source": datasets.Value("string")
                }
            ),
            supervised_keys=None,
            homepage="http://www.cs.loyola.edu/~binkley/ludiso/",
            citation=_CITATION,
        )

    def _split_generators(self, dl_manager):
        downloaded_files = dl_manager.download(_URL)
        return [
            datasets.SplitGenerator(name=datasets.Split.TEST, gen_kwargs={"filepath": downloaded_files}),
        ]

    def _generate_examples(self, filepath):

        def get_segmentation(needle, haystack, sep="-"):
            counter = 0
            pos = deque()
            iterator = iter(haystack)
            for char in needle:
                if char == sep:
                    pos.appendleft(counter)
                    continue
                while True:
                    try:
                        next_char = next(iterator)
                        counter += 1
                        if next_char == char:
                            break
                    except StopIteration:
                        break

            output = haystack
            
            while pos:
                next_pos = pos.popleft()
                output = output[:next_pos] + " " + output[next_pos:]
           
            pos = deque()
            previous = output[0]
            for index, char in enumerate(output[1:]):
                if (not previous.isalnum() and not previous.isspace()) and char.isalnum():
                    pos.appendleft(index + 1)
                previous = char

            while pos:
                next_pos = pos.popleft()
                output = output[:next_pos] + " " + output[next_pos:]

            return output

        with open(filepath, 'r') as f:
            records = f.read().split("\n")
            records = [x for x in records if x]
            records = [x.split(" ") for x in records]

        for idx, item in enumerate(records):
            yield idx, {
                "index": idx,
                "identifier": item[1],
                "segmentation": get_segmentation(item[4], item[1]),
                "language": item[2],
                "source": item[3],
            }