Datasets:

Modalities:
Text
Languages:
English
ArXiv:
Tags:
code
Libraries:
Datasets
License:
bc-transcoder / bc-transcoder.py
gabeorlanski's picture
Update bc-transcoder.py
92f2381
raw
history blame contribute delete
No virus
4.34 kB
import json
import datasets
_DESCRIPTION = """The Transcoder dataset in BabelCode format. Currently supports translation from C++ and Python."""
_URL = "https://raw.githubusercontent.com/google-research/babelcode/main/data/hf_datasets/transcoder.jsonl"
_LANGUAGES = {
"C++",
"CSharp",
"Dart",
"Go",
"Haskell",
"Java",
"Javascript",
"Julia",
"Kotlin",
"Lua",
"PHP",
"Python",
"R",
"Rust",
"Scala",
"TypeScript",
}
_CITATION = """\
@article{orlanski2023measuring,
title={Measuring The Impact Of Programming Language Distribution},
author={Orlanski, Gabriel and Xiao, Kefan and Garcia, Xavier and Hui, Jeffrey and Howland, Joshua and Malmaud, Jonathan and Austin, Jacob and Singh, Rishah and Catasta, Michele},
journal={arXiv preprint arXiv:2302.01973},
year={2023}
}
@article{roziere2020unsupervised,
title={Unsupervised translation of programming languages},
author={Roziere, Baptiste and Lachaux, Marie-Anne and Chanussot, Lowik and Lample, Guillaume},
journal={Advances in Neural Information Processing Systems},
volume={33},
year={2020}
}"""
_HOMEPAGE = "https://github.com/google-research/babelcode"
_LICENSE = "CC-BY-4.0"
_VERSION = "1.0.0"
_KEYS_REMOVE = {"text", "signature_with_docstring"}
_QUESTION_INFO_KEYS = {
"entry_fn_name",
"entry_cls_name",
"test_code",
"test_list",
"test_case_ids",
"commands",
"timeouts",
"extension"
}
class BCTranscoder(datasets.GeneratorBasedBuilder):
"""BC-Transcoder"""
VERSION = datasets.Version(_VERSION)
BUILDER_CONFIGS = [
datasets.BuilderConfig(
name="all",
version=datasets.Version(_VERSION),
description=_DESCRIPTION,
),
] + [
datasets.BuilderConfig(
name=lang,
version=datasets.Version(_VERSION),
description=_DESCRIPTION + f" Examples are only in {lang}.",
) for lang in _LANGUAGES
]
DEFAULT_CONFIG_NAME = "all"
def _info(self):
list_keys = ['timeouts', 'commands', 'test_case_ids']
question_info_type = {
k:datasets.Value(dtype="string")
for k in _QUESTION_INFO_KEYS if k not in list_keys
}
question_info_type['test_case_ids'] = datasets.Sequence(datasets.Value('string'))
question_info_type['commands'] = datasets.Sequence(datasets.Sequence(datasets.Value('string')))
question_info_type['timeouts'] = datasets.Sequence(datasets.Value('int32'))
features = datasets.Features({
"qid": datasets.Value("string"),
"title": datasets.Value("string"),
"language": datasets.Value("string"),
"signature": datasets.Value("string"),
"arguments": datasets.Sequence(datasets.Value("string")),
"source_py": datasets.Value("string"),
"source_cpp": datasets.Value("string"),
"question_info": datasets.Features(question_info_type)
})
description = _DESCRIPTION
if self.config.name != 'all':
description = _DESCRIPTION + f" Examples are only in {self.config.name}."
return datasets.DatasetInfo(
description=description,
features=features,
supervised_keys=None,
homepage=_HOMEPAGE,
license=_LICENSE,
citation=_CITATION,
)
def _split_generators(self, dl_manager):
"""Returns SplitGenerators."""
data_dir = dl_manager.download_and_extract(_URL)
return [
datasets.SplitGenerator(
name=datasets.Split.TEST,
gen_kwargs={"filepath": data_dir},
),
]
def _generate_examples(self, filepath):
""" Yields the examples from the dataset"""
with open(filepath, encoding='utf-8') as file:
id_ = 0
for l in file:
if not l.strip():
continue
d = json.loads(l)
if self.config.name != 'all' and d['language'] != self.config.name:
continue
question_info = {}
for k in _QUESTION_INFO_KEYS:
question_info[k] = d.pop(k)
question_info['test_list'] = json.dumps(question_info['test_list'])
d['question_info'] = question_info
d['source_py'] = d.pop('solution_python')
d['source_cpp'] = d.pop('solution_cpp')
for k in _KEYS_REMOVE:
d.pop(k)
yield id_, d
id_ += 1