import json import datasets from pathlib import Path logger = datasets.logging.get_logger(__name__) _CITATION = """\ @misc{multipl-e, doi = {10.48550/ARXIV.2208.08227}, url = {https://arxiv.org/abs/2208.08227}, author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and Feldman, Molly Q and Guha, Arjun and Greenberg, Michael and Jangda, Abhinav}, title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18 Programming Languages}, publisher = {arXiv}, year = {2022}, } """ _DESCRIPTION = """\ MultiPL-E is a dataset for evaluating large language models for code \ generation that supports 18 programming languages. It takes the OpenAI \ "HumanEval" Python benchmarks and uses little compilers to translate them \ to other languages. It is easy to add support for new languages and benchmarks. """ _LANGUAGES = [ "cpp", "cs", "d", "go", "java", "jl", "js", "lua", "php", "pl", "py", "r", "rb", "rkt", "rs", "scala", "sh", "swift", "ts" ] _VARIATIONS = [ "keep", "transform", "reworded", "remove" ] class MultiPLEBuilderConfig(datasets.BuilderConfig): """BuilderConfig for MultiPLEBuilderConfig.""" def __init__( self, language, variation, **kwargs, ): self.language = language self.variation = variation name = f"{language}-{variation}" kwargs["name"] = name super(MultiPLEBuilderConfig, self).__init__(**kwargs) class MultiPLE(datasets.GeneratorBasedBuilder): BUILDER_CONFIG_CLASS = MultiPLEBuilderConfig BUILDER_CONFIGS = [ MultiPLEBuilderConfig( language=language, variation=variation, version=datasets.Version("1.0.0")) for language in _LANGUAGES for variation in _VARIATIONS ] DEFAULT_CONFIG_NAME = "cpp-reworded" def _info(self): return datasets.DatasetInfo( description=_DESCRIPTION, license="MIT", features=datasets.Features({ "name": datasets.Value("string"), "language": datasets.Value("string"), "prompt": datasets.Value("string"), "doctests": datasets.Value("string"), "original": datasets.Value("string"), "prompt_terminology": datasets.Value("string"), "tests": datasets.Value("string"), "stop_tokens": datasets.features.Sequence(datasets.Value("string")), }), supervised_keys=None, homepage="https://nuprl.github.io/MultiPL-E/", citation=_CITATION, task_templates=[] ) def _split_generators(self, dl_manager: datasets.DownloadManager): logger.warn("WTF") files = dl_manager.download( f"https://raw.githubusercontent.com/nuprl/MultiPL-E/375e903198713b7f5faa95a4047c6928cf7348f9/prompts/{self.config.name}.json" ) return [ datasets.SplitGenerator( name=datasets.Split.TEST, gen_kwargs={ "filepath": files, } ) ] def _generate_examples(self, filepath): with open(filepath, encoding="utf-8") as f: data = json.load(f) for id_, row in enumerate(data): yield id_, row