File size: 2,837 Bytes
f8de7e3
0b8b1f4
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
dfc54ec
 
0b8b1f4
 
 
 
 
 
 
 
 
 
 
 
 
 
 
dfc54ec
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
0b8b1f4
 
 
 
 
dfc54ec
0b8b1f4
 
 
dfc54ec
 
0b8b1f4
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
"""Lynx"""

import datasets
import pandas as pd
from collections import deque

_CITATION = """

@inproceedings{li2018helpful,

  title={Helpful or Not? An investigation on the feasibility of identifier splitting via CNN-BiLSTM-CRF.},

  author={Li, Jiechu and Du, Qingfeng and Shi, Kun and He, Yu and Wang, Xin and Xu, Jincheng},

  booktitle={SEKE},

  pages={175--174},

  year={2018}

}

"""

_DESCRIPTION = """

In programming languages, identifiers are tokens (also called symbols) which name language entities.

Some of the kinds of entities an identifier might denote include variables, types, labels, subroutines, and packages.



Lynx is a dataset for identifier segmentation, 

i.e. the task of adding spaces between the words on a identifier.

"""
_URL = "https://raw.githubusercontent.com/ruanchaves/hashformers/master/datasets/lynx.txt"

class Lynx(datasets.GeneratorBasedBuilder):

    VERSION = datasets.Version("1.0.0")

    def _info(self):
        return datasets.DatasetInfo(
            description=_DESCRIPTION,
            features=datasets.Features(
                {
                    "index": datasets.Value("int32"),
                    "identifier": datasets.Value("string"),
                    "segmentation": datasets.Value("string"),
                    "expansion": datasets.Value("string")
                }
            ),
            supervised_keys=None,
            homepage="",
            citation=_CITATION,
        )

    def _split_generators(self, dl_manager):
        downloaded_files = dl_manager.download(_URL)
        return [
            datasets.SplitGenerator(name=datasets.Split.TEST, gen_kwargs={"filepath": downloaded_files}),
        ]

    def _generate_examples(self, filepath):

        def get_segmentation(needle, haystack):
            output = ""
            haystack = iter(haystack)
            for char in needle:
                while True:
                    try:
                        next_char = next(haystack)
                        if next_char == char:
                            output += next_char
                            break
                        elif next_char.isspace():
                            output += next_char
                    except StopIteration:
                        break
            return output

        with open(filepath, "r") as f:

            for idx, line in enumerate(f):
                fields = line.split(":")
                identifier = fields[0].strip()
                expansion = fields[1].strip()
                yield idx, {
                    "index": idx,
                    "identifier": identifier,
                    "segmentation": get_segmentation(identifier, expansion),
                    "expansion": expansion
                }