Datasets:
Tasks:
Text2Text Generation
Modalities:
Text
Languages:
code
Size:
1K - 10K
ArXiv:
Tags:
code-generation
License:
import json | |
import datasets | |
logger = datasets.logging.get_logger(__name__) | |
_CITATION = """\ | |
@article{bavarian2022efficient, | |
title={Efficient Training of Language Models to Fill in the Middle}, | |
author={Bavarian, Mohammad and Jun, Heewoo and Tezak, Nikolas and Schulman, John and McLeavey, Christine and Tworek, Jerry and Chen, Mark}, | |
journal={arXiv preprint arXiv:2207.14255}, | |
year={2022} | |
} | |
""" | |
_DESCRIPTION = """\ | |
An evaluation benchamrk for infilling tasks on HumanEval dataset for code generation. | |
""" | |
_SUBSETS = [ "MultiLineInfilling", "SingleLineInfilling", "RandomSpanInfilling", "RandomSpanInfillingLight" ] | |
class HumanevalConfig(datasets.BuilderConfig): | |
"""BuilderConfig for HumanevalConfig.""" | |
def __init__( | |
self, | |
subset, | |
**kwargs, | |
): | |
self.subset = subset | |
name = f"HumanEval-{subset}" | |
kwargs["name"] = name | |
super(HumanevalConfig, self).__init__(**kwargs) | |
class MultiPLE(datasets.GeneratorBasedBuilder): | |
BUILDER_CONFIG_CLASS = HumanevalConfig | |
BUILDER_CONFIGS = [ | |
HumanevalConfig( | |
subset=subset, | |
version=datasets.Version("1.0.0")) | |
for subset in _SUBSETS | |
] | |
DEFAULT_CONFIG_NAME = "HumanEval-SingleLineInfilling" | |
def _info(self): | |
return datasets.DatasetInfo( | |
description=_DESCRIPTION, | |
license="MIT", | |
features = datasets.Features({'task_id': datasets.Value(dtype='string'), | |
'entry_point': datasets.Value(dtype='string'), | |
'prompt': datasets.Value(dtype='string'), | |
'suffix': datasets.Value(dtype='string'), | |
'canonical_solution': datasets.Value(dtype='string'), | |
'test': datasets.Value(dtype='string')}), | |
supervised_keys=None, | |
homepage="https://github.com/openai/human-eval-infilling", | |
citation=_CITATION | |
) | |
def _split_generators(self, dl_manager: datasets.DownloadManager): | |
files = dl_manager.download( | |
f"data/{self.config.name}.jsonl" | |
) | |
return [ | |
datasets.SplitGenerator( | |
name=datasets.Split.TEST, | |
gen_kwargs={ | |
"filepath": files, | |
} | |
) | |
] | |
def _generate_examples(self, filepath): | |
with open(filepath) as f: | |
for id, line in enumerate(f): | |
row = json.loads(line) | |
yield id, row |