|
"""BT11"""
|
|
|
|
import datasets
|
|
import pandas as pd
|
|
from collections import deque
|
|
|
|
_CITATION = """
|
|
@inproceedings{li2018helpful,
|
|
title={Helpful or Not? An investigation on the feasibility of identifier splitting via CNN-BiLSTM-CRF.},
|
|
author={Li, Jiechu and Du, Qingfeng and Shi, Kun and He, Yu and Wang, Xin and Xu, Jincheng},
|
|
booktitle={SEKE},
|
|
pages={175--174},
|
|
year={2018}
|
|
}
|
|
"""
|
|
|
|
_DESCRIPTION = """
|
|
In programming languages, identifiers are tokens (also called symbols) which name language entities.
|
|
Some of the kinds of entities an identifier might denote include variables, types, labels, subroutines, and packages.
|
|
|
|
BT11 is a dataset for identifier segmentation,
|
|
i.e. the task of adding spaces between the words on a identifier.
|
|
"""
|
|
_URL = "https://raw.githubusercontent.com/ruanchaves/hashformers/master/datasets/bt11.csv"
|
|
|
|
class BT11(datasets.GeneratorBasedBuilder):
|
|
|
|
VERSION = datasets.Version("1.0.0")
|
|
|
|
def _info(self):
|
|
return datasets.DatasetInfo(
|
|
description=_DESCRIPTION,
|
|
features=datasets.Features(
|
|
{
|
|
"index": datasets.Value("int32"),
|
|
"identifier": datasets.Value("string"),
|
|
"segmentation": datasets.Value("string")
|
|
}
|
|
),
|
|
supervised_keys=None,
|
|
homepage="",
|
|
citation=_CITATION,
|
|
)
|
|
|
|
def _split_generators(self, dl_manager):
|
|
downloaded_files = dl_manager.download(_URL)
|
|
return [
|
|
datasets.SplitGenerator(name=datasets.Split.TEST, gen_kwargs={"filepath": downloaded_files}),
|
|
]
|
|
|
|
def _generate_examples(self, filepath):
|
|
|
|
def get_segmentation(needle, haystack, sep="-"):
|
|
output = haystack
|
|
needle = needle.lower()
|
|
haystack = haystack.lower()
|
|
counter = 0
|
|
pos = deque()
|
|
iterator = iter(haystack)
|
|
for char in needle:
|
|
if char == sep:
|
|
pos.appendleft(counter)
|
|
continue
|
|
while True:
|
|
try:
|
|
next_char = next(iterator)
|
|
counter += 1
|
|
if next_char == char:
|
|
break
|
|
except StopIteration:
|
|
break
|
|
while pos:
|
|
next_pos = pos.popleft()
|
|
output = output[:next_pos] + " " + output[next_pos:]
|
|
return output
|
|
|
|
df = pd.read_csv(filepath, header=None)[[0,1]]
|
|
df = df.dropna()
|
|
records = df.to_dict("records")
|
|
|
|
for idx, item in enumerate(records):
|
|
yield idx, {
|
|
"index": idx,
|
|
"identifier": item[0],
|
|
"segmentation": get_segmentation(item[1], item[0])
|
|
} |