Datasets:
Languages:
code
Multilinguality:
monolingual
Size Categories:
unknown
Language Creators:
machine-generated
Annotations Creators:
expert-generated
Source Datasets:
original
Tags:
word-segmentation
License:
"""Lynx""" | |
import datasets | |
import pandas as pd | |
from collections import deque | |
_CITATION = """ | |
@inproceedings{li2018helpful, | |
title={Helpful or Not? An investigation on the feasibility of identifier splitting via CNN-BiLSTM-CRF.}, | |
author={Li, Jiechu and Du, Qingfeng and Shi, Kun and He, Yu and Wang, Xin and Xu, Jincheng}, | |
booktitle={SEKE}, | |
pages={175--174}, | |
year={2018} | |
} | |
""" | |
_DESCRIPTION = """ | |
In programming languages, identifiers are tokens (also called symbols) which name language entities. | |
Some of the kinds of entities an identifier might denote include variables, types, labels, subroutines, and packages. | |
Lynx is a dataset for identifier segmentation, | |
i.e. the task of adding spaces between the words on a identifier. | |
""" | |
_URL = "https://raw.githubusercontent.com/ruanchaves/hashformers/master/datasets/lynx.txt" | |
class Lynx(datasets.GeneratorBasedBuilder): | |
VERSION = datasets.Version("1.0.0") | |
def _info(self): | |
return datasets.DatasetInfo( | |
description=_DESCRIPTION, | |
features=datasets.Features( | |
{ | |
"index": datasets.Value("int32"), | |
"identifier": datasets.Value("string"), | |
"segmentation": datasets.Value("string"), | |
"expansion": datasets.Value("string") | |
} | |
), | |
supervised_keys=None, | |
homepage="", | |
citation=_CITATION, | |
) | |
def _split_generators(self, dl_manager): | |
downloaded_files = dl_manager.download(_URL) | |
return [ | |
datasets.SplitGenerator(name=datasets.Split.TEST, gen_kwargs={"filepath": downloaded_files}), | |
] | |
def _generate_examples(self, filepath): | |
def get_segmentation(needle, haystack): | |
output = "" | |
haystack = iter(haystack) | |
for char in needle: | |
while True: | |
try: | |
next_char = next(haystack) | |
if next_char == char: | |
output += next_char | |
break | |
elif next_char.isspace(): | |
output += next_char | |
except StopIteration: | |
break | |
return output | |
with open(filepath, "r") as f: | |
for idx, line in enumerate(f): | |
fields = line.split(":") | |
identifier = fields[0].strip() | |
expansion = fields[1].strip() | |
yield idx, { | |
"index": idx, | |
"identifier": identifier, | |
"segmentation": get_segmentation(identifier, expansion), | |
"expansion": expansion | |
} |