diff --git a/.gitattributes b/.gitattributes deleted file mode 100644 index 91382cc7fb6f9003e1f7ed36187d86d531dbead1..0000000000000000000000000000000000000000 --- a/.gitattributes +++ /dev/null @@ -1,51 +0,0 @@ -*.7z filter=lfs diff=lfs merge=lfs -text -*.arrow filter=lfs diff=lfs merge=lfs -text -*.bin filter=lfs diff=lfs merge=lfs -text -*.bz2 filter=lfs diff=lfs merge=lfs -text -*.ftz filter=lfs diff=lfs merge=lfs -text -*.gz filter=lfs diff=lfs merge=lfs -text -*.h5 filter=lfs diff=lfs merge=lfs -text -*.joblib filter=lfs diff=lfs merge=lfs -text -*.lfs.* filter=lfs diff=lfs merge=lfs -text -*.lz4 filter=lfs diff=lfs merge=lfs -text -*.model filter=lfs diff=lfs merge=lfs -text -*.msgpack filter=lfs diff=lfs merge=lfs -text -*.npy filter=lfs diff=lfs merge=lfs -text -*.npz filter=lfs diff=lfs merge=lfs -text -*.onnx filter=lfs diff=lfs merge=lfs -text -*.ot filter=lfs diff=lfs merge=lfs -text -*.parquet filter=lfs diff=lfs merge=lfs -text -*.pb filter=lfs diff=lfs merge=lfs -text -*.pickle filter=lfs diff=lfs merge=lfs -text -*.pkl filter=lfs diff=lfs merge=lfs -text -*.pt filter=lfs diff=lfs merge=lfs -text -*.pth filter=lfs diff=lfs merge=lfs -text -*.rar filter=lfs diff=lfs merge=lfs -text -saved_model/**/* filter=lfs diff=lfs merge=lfs -text -*.tar.* filter=lfs diff=lfs merge=lfs -text -*.tflite filter=lfs diff=lfs merge=lfs -text -*.tgz filter=lfs diff=lfs merge=lfs -text -*.wasm filter=lfs diff=lfs merge=lfs -text -*.xz filter=lfs diff=lfs merge=lfs -text -*.zip filter=lfs diff=lfs merge=lfs -text -*.zst filter=lfs diff=lfs merge=lfs -text -*tfevents* filter=lfs diff=lfs merge=lfs -text -# Audio files - uncompressed -*.pcm filter=lfs diff=lfs merge=lfs -text -*.sam filter=lfs diff=lfs merge=lfs -text -*.raw filter=lfs diff=lfs merge=lfs -text -# Audio files - compressed -*.aac filter=lfs diff=lfs merge=lfs -text -*.flac filter=lfs diff=lfs merge=lfs -text -*.mp3 filter=lfs diff=lfs merge=lfs -text -*.ogg filter=lfs diff=lfs merge=lfs -text -*.wav filter=lfs diff=lfs merge=lfs -text -# Image files - uncompressed -*.bmp filter=lfs diff=lfs merge=lfs -text -*.gif filter=lfs diff=lfs merge=lfs -text -*.png filter=lfs diff=lfs merge=lfs -text -*.tiff filter=lfs diff=lfs merge=lfs -text -# Image files - compressed -*.jpg filter=lfs diff=lfs merge=lfs -text -*.jpeg filter=lfs diff=lfs merge=lfs -text -*.webp filter=lfs diff=lfs merge=lfs -text diff --git a/.gitignore b/.gitignore deleted file mode 100644 index 225fc6f665041d5b55e2f00b3a1e275e9b0317ac..0000000000000000000000000000000000000000 --- a/.gitignore +++ /dev/null @@ -1 +0,0 @@ -/__pycache__ diff --git a/MultiPL-E.py b/MultiPL-E.py deleted file mode 100644 index 5e26ffbe0708debc427cc068c7d5483a3d1172fa..0000000000000000000000000000000000000000 --- a/MultiPL-E.py +++ /dev/null @@ -1,121 +0,0 @@ -import json -import datasets -from pathlib import Path - -logger = datasets.logging.get_logger(__name__) - -_CITATION = """\ -@misc{multipl-e, - doi = {10.48550/ARXIV.2208.08227}, - url = {https://arxiv.org/abs/2208.08227}, - author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and - Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and - Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and - Feldman, Molly Q and Guha, Arjun and - Greenberg, Michael and Jangda, Abhinav}, - title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18 - Programming Languages}, - publisher = {arXiv}, - year = {2022}, -} -""" - -_DESCRIPTION = """\ -MultiPL-E is a dataset for evaluating large language models for code \ -generation that supports 18 programming languages. It takes the OpenAI \ -"HumanEval" and the MBPP Python benchmarks and uses little compilers to \ -translate them to other languages. It is easy to add support for new languages \ -and benchmarks. -""" - -_SRCDATA = [ "humaneval", "mbpp" ] - -_LANGUAGES = [ - "cpp", "cs", "d", "go", "java", "jl", "js", "lua", "php", "pl", "py", "r", - "rb", "rkt", "rs", "scala", "sh", "swift", "ts" -] - -_VARIATIONS = [ "keep", "transform", "reworded", "remove" ] - -class MultiPLEBuilderConfig(datasets.BuilderConfig): - """BuilderConfig for MultiPLEBuilderConfig.""" - - def __init__( - self, - srcdata, - language, - variation, - **kwargs, - ): - self.language = language - self.variation = variation - self.srcdata = srcdata - name = f"{srcdata}-{language}" - if variation != "reworded": - name = f"{name}-{variation}" - kwargs["name"] = name - super(MultiPLEBuilderConfig, self).__init__(**kwargs) - -def _is_interesting(srcdata: str, variation: str): - if srcdata == "humaneval": - return True - if srcdata == "mbpp": - # MBPP does not have doctests, so these are the only interesting - # variations - return variation in [ "keep", "reworded" ] - -class MultiPLE(datasets.GeneratorBasedBuilder): - BUILDER_CONFIG_CLASS = MultiPLEBuilderConfig - - BUILDER_CONFIGS = [ - MultiPLEBuilderConfig( - srcdata=srcdata, - language=language, - variation=variation, - version=datasets.Version("2.0.0")) - for srcdata in _SRCDATA - for language in _LANGUAGES - for variation in _VARIATIONS - if _is_interesting(srcdata, variation) - ] - - DEFAULT_CONFIG_NAME = "humaneval-cpp" - - def _info(self): - return datasets.DatasetInfo( - description=_DESCRIPTION, - license="MIT", - features=datasets.Features({ - "name": datasets.Value("string"), - "language": datasets.Value("string"), - "prompt": datasets.Value("string"), - "doctests": datasets.Value("string"), - "original": datasets.Value("string"), - "prompt_terminology": datasets.Value("string"), - "tests": datasets.Value("string"), - "stop_tokens": datasets.features.Sequence(datasets.Value("string")), - }), - supervised_keys=None, - homepage="https://nuprl.github.io/MultiPL-E/", - citation=_CITATION, - task_templates=[] - ) - - def _split_generators(self, dl_manager: datasets.DownloadManager): - files = dl_manager.download( - f"https://raw.githubusercontent.com/nuprl/MultiPL-E/1f21818a0f3265fd0a41c3954e30aab47f34063a/prompts/{self.config.srcdata}-{self.config.language}-{self.config.variation}.json" - ) - return [ - datasets.SplitGenerator( - name=datasets.Split.TEST, - gen_kwargs={ - "filepath": files, - } - ) - ] - - def _generate_examples(self, filepath): - with open(filepath, encoding="utf-8") as f: - data = json.load(f) - for id_, row in enumerate(data): - yield id_, row diff --git a/README.md b/README.md deleted file mode 100644 index 9a5fadb7d769b66f3bf8663c2ea770e43e179c9c..0000000000000000000000000000000000000000 --- a/README.md +++ /dev/null @@ -1,107 +0,0 @@ ---- -annotations_creators: -- machine-generated -language: -- en -language_creators: -- machine-generated -- expert-generated -license: -- mit -multilinguality: -- monolingual -pretty_name: MultiPLE-E -size_categories: -- 1K len(problem["prompt"]) and stop_index < min_stop_index: - min_stop_index = stop_index - return decoded_string[:min_stop_index] - -for problem in problems["test"]: - input_ids = tokenizer( - problem["prompt"], - return_tensors="pt", - ).input_ids.cuda() - generated_ids = model.generate( - input_ids, max_length=512, pad_token_id=tokenizer.eos_token_id + 2 - ) - truncated_string = stop_at_stop_token(tokenizer.decode(generated_ids[0]), problem) - filename = problem["name"] + "." + LANG - with open(filename, "w") as f: - print(f"Created {filename}") - f.write(truncated_string) - f.write("\n") - f.write(problem["tests"]) -``` \ No newline at end of file diff --git a/dataset_infos.json b/dataset_infos.json deleted file mode 100644 index b6cbd4fc0d63650dd03f04a369aec1b8136d4ef4..0000000000000000000000000000000000000000 --- a/dataset_infos.json +++ /dev/null @@ -1 +0,0 @@ -{"cpp-keep": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "cpp-keep", "version": {"version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 217792, "num_examples": 161, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/375e903198713b7f5faa95a4047c6928cf7348f9/prompts/cpp-keep.json": {"num_bytes": 248493, "checksum": "56d81141f7b29c237796e14173b8e2884e97d27a8d57c3644a237c09f59227b4"}}, "download_size": 248493, "post_processing_size": null, "dataset_size": 217792, "size_in_bytes": 466285}, "cpp-transform": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "cpp-transform", "version": {"version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 239517, "num_examples": 161, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/375e903198713b7f5faa95a4047c6928cf7348f9/prompts/cpp-transform.json": {"num_bytes": 270773, "checksum": "cb154fc45bef323590b79bb70c14aba4bad59b6a2180615d8937485d41a93d1e"}}, "download_size": 270773, "post_processing_size": null, "dataset_size": 239517, "size_in_bytes": 510290}, "cpp-reworded": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "cpp-reworded", "version": {"version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 239767, "num_examples": 161, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/375e903198713b7f5faa95a4047c6928cf7348f9/prompts/cpp-reworded.json": {"num_bytes": 271023, "checksum": "ac639faf8c79348712cb2cd1d95df135a226a49006461245acf810039b9420ce"}}, "download_size": 271023, "post_processing_size": null, "dataset_size": 239767, "size_in_bytes": 510790}, "cpp-remove": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "cpp-remove", "version": {"version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 198566, "num_examples": 158, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/375e903198713b7f5faa95a4047c6928cf7348f9/prompts/cpp-remove.json": {"num_bytes": 227555, "checksum": "729a5a6e1d68668554f77de56ef17b44eab57beea03f2fb920c075cb4f6a905f"}}, "download_size": 227555, "post_processing_size": null, "dataset_size": 198566, "size_in_bytes": 426121}, "cs-keep": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "cs-keep", "version": {"version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 259874, "num_examples": 158, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/375e903198713b7f5faa95a4047c6928cf7348f9/prompts/cs-keep.json": {"num_bytes": 291137, "checksum": "db62ab52665a2742d0bef4de662ca187a703227083881177dad4f2712da5199a"}}, "download_size": 291137, "post_processing_size": null, "dataset_size": 259874, "size_in_bytes": 551011}, "cs-transform": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "cs-transform", "version": {"version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 283738, "num_examples": 158, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/375e903198713b7f5faa95a4047c6928cf7348f9/prompts/cs-transform.json": {"num_bytes": 315563, "checksum": "505f4892388ede789dd09a256c3dbc801549c8d1d372fa60b4db339fe09d6319"}}, "download_size": 315563, "post_processing_size": null, "dataset_size": 283738, "size_in_bytes": 599301}, "cs-reworded": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "cs-reworded", "version": {"version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 283673, "num_examples": 158, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/375e903198713b7f5faa95a4047c6928cf7348f9/prompts/cs-reworded.json": {"num_bytes": 315498, "checksum": "0304b710180681c9a68fe97684a87e71ab35aec9f229fd1d592e0b0ea698d8c2"}}, "download_size": 315498, "post_processing_size": null, "dataset_size": 283673, "size_in_bytes": 599171}, "cs-remove": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "cs-remove", "version": {"version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 237663, "num_examples": 155, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/375e903198713b7f5faa95a4047c6928cf7348f9/prompts/cs-remove.json": {"num_bytes": 267251, "checksum": "8e2295c157152f2105d805dc06b26ab91e31000cdc8710f31e693bc65de1b753"}}, "download_size": 267251, "post_processing_size": null, "dataset_size": 237663, "size_in_bytes": 504914}, "d-keep": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "d-keep", "version": {"version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 175592, "num_examples": 156, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/375e903198713b7f5faa95a4047c6928cf7348f9/prompts/d-keep.json": {"num_bytes": 209568, "checksum": "e34578f5aabf7a3664eee62f77b00cc908c3db8a6a7aeb071965de247f9750e7"}}, "download_size": 209568, "post_processing_size": null, "dataset_size": 175592, "size_in_bytes": 385160}, "d-transform": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "d-transform", "version": {"version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 181121, "num_examples": 156, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/375e903198713b7f5faa95a4047c6928cf7348f9/prompts/d-transform.json": {"num_bytes": 215649, "checksum": "a9d182b3a60e4f951e2235f2a4157b91f518623b6ae21260e1d5d6703cf77a78"}}, "download_size": 215649, "post_processing_size": null, "dataset_size": 181121, "size_in_bytes": 396770}, "d-reworded": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "d-reworded", "version": {"version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 181296, "num_examples": 156, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/375e903198713b7f5faa95a4047c6928cf7348f9/prompts/d-reworded.json": {"num_bytes": 215824, "checksum": "6a021fd31c45c3f68742f7d60d27082d45d17229daae221d46c70ace9d61bc2b"}}, "download_size": 215824, "post_processing_size": null, "dataset_size": 181296, "size_in_bytes": 397120}, "d-remove": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "d-remove", "version": {"version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 157938, "num_examples": 153, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/375e903198713b7f5faa95a4047c6928cf7348f9/prompts/d-remove.json": {"num_bytes": 190211, "checksum": "9a36e460e3f0e7fcb92fa6d9f1da5e9d62cf5ee6787af73468bb2a54dada295a"}}, "download_size": 190211, "post_processing_size": null, "dataset_size": 157938, "size_in_bytes": 348149}, "go-keep": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "go-keep", "version": {"version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 241130, "num_examples": 154, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/375e903198713b7f5faa95a4047c6928cf7348f9/prompts/go-keep.json": {"num_bytes": 280424, "checksum": "6de07406cbf81f3a6d0199ec9fc85eaf78a20d9954f8f3ea22e7d1b2fa9a92b6"}}, "download_size": 280424, "post_processing_size": null, "dataset_size": 241130, "size_in_bytes": 521554}, "go-transform": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "go-transform", "version": {"version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 247448, "num_examples": 154, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/375e903198713b7f5faa95a4047c6928cf7348f9/prompts/go-transform.json": {"num_bytes": 287275, "checksum": "084a15fb951dd89dc33a06cf49acaf2610ee0e2de0c9f8d1325b08a4a88b2ebc"}}, "download_size": 287275, "post_processing_size": null, "dataset_size": 247448, "size_in_bytes": 534723}, "go-reworded": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "go-reworded", "version": {"version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 247354, "num_examples": 154, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/375e903198713b7f5faa95a4047c6928cf7348f9/prompts/go-reworded.json": {"num_bytes": 287181, "checksum": "b5fee01832bc349cab80f50aa68ec6e8df37cf054457ccfd0333229acae60b08"}}, "download_size": 287181, "post_processing_size": null, "dataset_size": 247354, "size_in_bytes": 534535}, "go-remove": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "go-remove", "version": {"version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 221519, "num_examples": 151, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/375e903198713b7f5faa95a4047c6928cf7348f9/prompts/go-remove.json": {"num_bytes": 258980, "checksum": "e4bbf884adf71965e8b0978ff20ff779de60f50bd7da8912b620b713de3bc376"}}, "download_size": 258980, "post_processing_size": null, "dataset_size": 221519, "size_in_bytes": 480499}, "java-keep": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "java-keep", "version": {"version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 259836, "num_examples": 158, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/375e903198713b7f5faa95a4047c6928cf7348f9/prompts/java-keep.json": {"num_bytes": 291099, "checksum": "7bf1559d86c8a92fd15b4ed812d885c99c50551f392b2ad816a8e7060527e89c"}}, "download_size": 291099, "post_processing_size": null, "dataset_size": 259836, "size_in_bytes": 550935}, "java-transform": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "java-transform", "version": {"version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 286548, "num_examples": 158, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/375e903198713b7f5faa95a4047c6928cf7348f9/prompts/java-transform.json": {"num_bytes": 318373, "checksum": "b5da36d56612e80384d9e6a46407241934730d3ba5bca98c5e7ccfb112f9d628"}}, "download_size": 318373, "post_processing_size": null, "dataset_size": 286548, "size_in_bytes": 604921}, "java-reworded": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "java-reworded", "version": {"version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 288031, "num_examples": 158, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/375e903198713b7f5faa95a4047c6928cf7348f9/prompts/java-reworded.json": {"num_bytes": 319856, "checksum": "893dabdd6b521f3e05ab84748cd27a1e6debbe9400478c8ca889953940145ca1"}}, "download_size": 319856, "post_processing_size": null, "dataset_size": 288031, "size_in_bytes": 607887}, "java-remove": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "java-remove", "version": {"version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 237672, "num_examples": 155, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/375e903198713b7f5faa95a4047c6928cf7348f9/prompts/java-remove.json": {"num_bytes": 267260, "checksum": "a6c69545169e760eb802d953af94dde684146430b281d43ffa98f72f1416a34d"}}, "download_size": 267260, "post_processing_size": null, "dataset_size": 237672, "size_in_bytes": 504932}, "jl-keep": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "jl-keep", "version": {"version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 163708, "num_examples": 159, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/375e903198713b7f5faa95a4047c6928cf7348f9/prompts/jl-keep.json": {"num_bytes": 198696, "checksum": "7fa3f79aa3d56fadae3414684f0f102f87d529099d84a6f5d30a652714419d7b"}}, "download_size": 198696, "post_processing_size": null, "dataset_size": 163708, "size_in_bytes": 362404}, "jl-transform": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "jl-transform", "version": {"version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 167969, "num_examples": 159, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/375e903198713b7f5faa95a4047c6928cf7348f9/prompts/jl-transform.json": {"num_bytes": 203514, "checksum": "255731ab55a8eb128bcf6b3ececbd0dcd5fcb087753b830f148788c53ebfee8e"}}, "download_size": 203514, "post_processing_size": null, "dataset_size": 167969, "size_in_bytes": 371483}, "jl-reworded": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "jl-reworded", "version": {"version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 168251, "num_examples": 159, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/375e903198713b7f5faa95a4047c6928cf7348f9/prompts/jl-reworded.json": {"num_bytes": 203796, "checksum": "ceef60793f1d2c97d96df7e8ef54695a17a6a1d47a11e4c9c7a202c50300aff3"}}, "download_size": 203796, "post_processing_size": null, "dataset_size": 168251, "size_in_bytes": 372047}, "jl-remove": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "jl-remove", "version": {"version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 145913, "num_examples": 156, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/375e903198713b7f5faa95a4047c6928cf7348f9/prompts/jl-remove.json": {"num_bytes": 179158, "checksum": "221e77ae9a1c3c3ab95d0c5010b119f9fd6f1fea9afaa79e5cf033f9a62e9d11"}}, "download_size": 179158, "post_processing_size": null, "dataset_size": 145913, "size_in_bytes": 325071}, "js-keep": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "js-keep", "version": {"version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 177635, "num_examples": 161, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/375e903198713b7f5faa95a4047c6928cf7348f9/prompts/js-keep.json": {"num_bytes": 211822, "checksum": "02e56da39247f31c4f399a62210fdbe97bb45f6ec239140c3985432b72485bf2"}}, "download_size": 211822, "post_processing_size": null, "dataset_size": 177635, "size_in_bytes": 389457}, "js-transform": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "js-transform", "version": {"version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 181987, "num_examples": 161, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/375e903198713b7f5faa95a4047c6928cf7348f9/prompts/js-transform.json": {"num_bytes": 216729, "checksum": "d90db81d52580d6d21cca9b16662fdac11b4ff5f2b50521652014c3c4d66b9c0"}}, "download_size": 216729, "post_processing_size": null, "dataset_size": 181987, "size_in_bytes": 398716}, "js-reworded": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "js-reworded", "version": {"version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 182171, "num_examples": 161, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/375e903198713b7f5faa95a4047c6928cf7348f9/prompts/js-reworded.json": {"num_bytes": 216913, "checksum": "ed2aa0a25d0fd9dd963668079e334d88acd8caf0bf020a33964f7cd4700eb670"}}, "download_size": 216913, "post_processing_size": null, "dataset_size": 182171, "size_in_bytes": 399084}, "js-remove": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "js-remove", "version": {"version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 158619, "num_examples": 158, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/375e903198713b7f5faa95a4047c6928cf7348f9/prompts/js-remove.json": {"num_bytes": 191028, "checksum": "8b0d17122dac1a1efef793d71e73473892aba8c8ebf8bf2238e4be8f7cd2685d"}}, "download_size": 191028, "post_processing_size": null, "dataset_size": 158619, "size_in_bytes": 349647}, "lua-keep": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "lua-keep", "version": {"version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 180398, "num_examples": 161, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/375e903198713b7f5faa95a4047c6928cf7348f9/prompts/lua-keep.json": {"num_bytes": 212511, "checksum": "fb7466e8b89c92fab70dbd7f0074972cf0c6e970f94f7203c4fa01797af59e67"}}, "download_size": 212511, "post_processing_size": null, "dataset_size": 180398, "size_in_bytes": 392909}, "lua-transform": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "lua-transform", "version": {"version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 184763, "num_examples": 161, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/375e903198713b7f5faa95a4047c6928cf7348f9/prompts/lua-transform.json": {"num_bytes": 216595, "checksum": "fba904e9325bb59360bb4e583f796bce78587695db92c6a4b4145a6bbb8778df"}}, "download_size": 216595, "post_processing_size": null, "dataset_size": 184763, "size_in_bytes": 401358}, "lua-reworded": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "lua-reworded", "version": {"version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 184853, "num_examples": 161, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/375e903198713b7f5faa95a4047c6928cf7348f9/prompts/lua-reworded.json": {"num_bytes": 216685, "checksum": "54b8881bd6d2ba52b1d2e77388f20429edd60e705cf2c8cc87c58db966ceb2ff"}}, "download_size": 216685, "post_processing_size": null, "dataset_size": 184853, "size_in_bytes": 401538}, "lua-remove": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "lua-remove", "version": {"version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 161339, "num_examples": 158, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/375e903198713b7f5faa95a4047c6928cf7348f9/prompts/lua-remove.json": {"num_bytes": 191690, "checksum": "e12d5519c6f740d9341136043e93f42986a13b7f00a64c393592bca83400f45e"}}, "download_size": 191690, "post_processing_size": null, "dataset_size": 161339, "size_in_bytes": 353029}, "php-keep": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "php-keep", "version": {"version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 219526, "num_examples": 161, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/375e903198713b7f5faa95a4047c6928cf7348f9/prompts/php-keep.json": {"num_bytes": 256134, "checksum": "6e8bbef0effb50396b752e4e2ee3cd42e9f1edcf253e684dffe0d60efd447af4"}}, "download_size": 256134, "post_processing_size": null, "dataset_size": 219526, "size_in_bytes": 475660}, "php-transform": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "php-transform", "version": {"version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 225575, "num_examples": 161, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/375e903198713b7f5faa95a4047c6928cf7348f9/prompts/php-transform.json": {"num_bytes": 262738, "checksum": "113c46223db9f1235ba2f0a390a0f01a9775400a671537e70755ea471e99088c"}}, "download_size": 262738, "post_processing_size": null, "dataset_size": 225575, "size_in_bytes": 488313}, "php-reworded": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "php-reworded", "version": {"version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 225730, "num_examples": 161, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/375e903198713b7f5faa95a4047c6928cf7348f9/prompts/php-reworded.json": {"num_bytes": 262893, "checksum": "a27a5093957369e68f16ec973cc3fe16a400a6b1e0efa2469ef607ea5529b176"}}, "download_size": 262893, "post_processing_size": null, "dataset_size": 225730, "size_in_bytes": 488623}, "php-remove": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "php-remove", "version": {"version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 200047, "num_examples": 158, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/375e903198713b7f5faa95a4047c6928cf7348f9/prompts/php-remove.json": {"num_bytes": 234848, "checksum": "3b13b33434a08c9bcff8db2a72e3ec89c85a794b8c1ca576a10614693d3b27b0"}}, "download_size": 234848, "post_processing_size": null, "dataset_size": 200047, "size_in_bytes": 434895}, "pl-keep": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "pl-keep", "version": {"version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 239874, "num_examples": 161, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/375e903198713b7f5faa95a4047c6928cf7348f9/prompts/pl-keep.json": {"num_bytes": 279351, "checksum": "116f82cec38a8a9f38bd14bbd9348d18f13879a98c293c7ce9ff38829da8bf3f"}}, "download_size": 279351, "post_processing_size": null, "dataset_size": 239874, "size_in_bytes": 519225}, "pl-transform": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "pl-transform", "version": {"version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 243611, "num_examples": 161, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/375e903198713b7f5faa95a4047c6928cf7348f9/prompts/pl-transform.json": {"num_bytes": 283767, "checksum": "552decb4ad799ae7204b0434600d0a7b1b2136dc34dbaa1a3e6ca7acb681173e"}}, "download_size": 283767, "post_processing_size": null, "dataset_size": 243611, "size_in_bytes": 527378}, "pl-reworded": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "pl-reworded", "version": {"version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 243661, "num_examples": 161, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/375e903198713b7f5faa95a4047c6928cf7348f9/prompts/pl-reworded.json": {"num_bytes": 283817, "checksum": "52010c713c3cb0ee07b691f0c04be20baf35223019bc8dfeb08720b82fd8ce58"}}, "download_size": 283817, "post_processing_size": null, "dataset_size": 243661, "size_in_bytes": 527478}, "pl-remove": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "pl-remove", "version": {"version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 220817, "num_examples": 158, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/375e903198713b7f5faa95a4047c6928cf7348f9/prompts/pl-remove.json": {"num_bytes": 258463, "checksum": "94723d826be5a900f975ffd97039dba9de878945f6d81fa0a59bdebed5c87ef6"}}, "download_size": 258463, "post_processing_size": null, "dataset_size": 220817, "size_in_bytes": 479280}, "py-keep": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "py-keep", "version": {"version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 173537, "num_examples": 161, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/375e903198713b7f5faa95a4047c6928cf7348f9/prompts/py-keep.json": {"num_bytes": 207009, "checksum": "c583508bfd9ca7f7d8730f7cf618cd5d0fb4d2000f48d39d5311b4eeb06fb6a3"}}, "download_size": 207009, "post_processing_size": null, "dataset_size": 173537, "size_in_bytes": 380546}, "py-transform": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "py-transform", "version": {"version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 177787, "num_examples": 161, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/375e903198713b7f5faa95a4047c6928cf7348f9/prompts/py-transform.json": {"num_bytes": 210975, "checksum": "9518a25d142569e8adf490d2cf6ed0df3ed16663991f73900d8477152f9a00c3"}}, "download_size": 210975, "post_processing_size": null, "dataset_size": 177787, "size_in_bytes": 388762}, "py-reworded": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "py-reworded", "version": {"version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 177787, "num_examples": 161, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/375e903198713b7f5faa95a4047c6928cf7348f9/prompts/py-reworded.json": {"num_bytes": 210975, "checksum": "56360077d2f35ca58965a85084205b31d4c296563d3fd93f1248bca308535f7f"}}, "download_size": 210975, "post_processing_size": null, "dataset_size": 177787, "size_in_bytes": 388762}, "py-remove": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "py-remove", "version": {"version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 155389, "num_examples": 158, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/375e903198713b7f5faa95a4047c6928cf7348f9/prompts/py-remove.json": {"num_bytes": 187068, "checksum": "491dc22f69bd7e4098c9b927addec8a3f9e7f0a7f93bac655bdc4440c26008a1"}}, "download_size": 187068, "post_processing_size": null, "dataset_size": 155389, "size_in_bytes": 342457}, "r-keep": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "r-keep", "version": {"version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 186803, "num_examples": 161, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/375e903198713b7f5faa95a4047c6928cf7348f9/prompts/r-keep.json": {"num_bytes": 215857, "checksum": "efd573dd3afcf7e6bdbea508dda54067e73777fc0d2e9e6570a52dfda63aa0fa"}}, "download_size": 215857, "post_processing_size": null, "dataset_size": 186803, "size_in_bytes": 402660}, "r-transform": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "r-transform", "version": {"version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 191732, "num_examples": 161, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/375e903198713b7f5faa95a4047c6928cf7348f9/prompts/r-transform.json": {"num_bytes": 220505, "checksum": "5a7b5f28ae59eec006d012623f594c9143fe9854487bd98817ed075d4d2abb97"}}, "download_size": 220505, "post_processing_size": null, "dataset_size": 191732, "size_in_bytes": 412237}, "r-reworded": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "r-reworded", "version": {"version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 191747, "num_examples": 161, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/375e903198713b7f5faa95a4047c6928cf7348f9/prompts/r-reworded.json": {"num_bytes": 220520, "checksum": "7d4063b824313d807dc8901bf86aab318b6a905549a2229fa9fdf286a526f215"}}, "download_size": 220520, "post_processing_size": null, "dataset_size": 191747, "size_in_bytes": 412267}, "r-remove": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "r-remove", "version": {"version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 168422, "num_examples": 158, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/375e903198713b7f5faa95a4047c6928cf7348f9/prompts/r-remove.json": {"num_bytes": 195771, "checksum": "32085e69d9f3975f38ce336e8e90b34124b19b8d581cdf7d0c5c902c14d6f012"}}, "download_size": 195771, "post_processing_size": null, "dataset_size": 168422, "size_in_bytes": 364193}, "rb-keep": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "rb-keep", "version": {"version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 181999, "num_examples": 161, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/375e903198713b7f5faa95a4047c6928cf7348f9/prompts/rb-keep.json": {"num_bytes": 216186, "checksum": "d8e86b7408460ff14841666c7514971db6092cdd1b5565d629bf908a71046ba1"}}, "download_size": 216186, "post_processing_size": null, "dataset_size": 181999, "size_in_bytes": 398185}, "rb-transform": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "rb-transform", "version": {"version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 188317, "num_examples": 161, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/375e903198713b7f5faa95a4047c6928cf7348f9/prompts/rb-transform.json": {"num_bytes": 223059, "checksum": "b53abcc9538e2c743d5bfc0e86f18e0832e6ec0dbd611a98566b05950436d31c"}}, "download_size": 223059, "post_processing_size": null, "dataset_size": 188317, "size_in_bytes": 411376}, "rb-reworded": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "rb-reworded", "version": {"version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 188457, "num_examples": 161, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/375e903198713b7f5faa95a4047c6928cf7348f9/prompts/rb-reworded.json": {"num_bytes": 223199, "checksum": "17d1d757c496a5230aacc106a6e61146cb8d8c29f5c9de9c3cd1000e7123b9ad"}}, "download_size": 223199, "post_processing_size": null, "dataset_size": 188457, "size_in_bytes": 411656}, "rb-remove": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "rb-remove", "version": {"version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 163569, "num_examples": 158, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/375e903198713b7f5faa95a4047c6928cf7348f9/prompts/rb-remove.json": {"num_bytes": 195978, "checksum": "02488606f2897203cf131aeb57eec365b93ecb0e7dd7a73d048890f0fd060e72"}}, "download_size": 195978, "post_processing_size": null, "dataset_size": 163569, "size_in_bytes": 359547}, "rkt-keep": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "rkt-keep", "version": {"version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 177757, "num_examples": 161, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/375e903198713b7f5faa95a4047c6928cf7348f9/prompts/rkt-keep.json": {"num_bytes": 212266, "checksum": "7086c9ca18882c7f0a18a4b46dfe84c0b5293b69a4c9d8964ad72a797ad72871"}}, "download_size": 212266, "post_processing_size": null, "dataset_size": 177757, "size_in_bytes": 390023}, "rkt-transform": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "rkt-transform", "version": {"version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 182937, "num_examples": 161, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/375e903198713b7f5faa95a4047c6928cf7348f9/prompts/rkt-transform.json": {"num_bytes": 218001, "checksum": "360afce46e550266f91f096d22e8a5e31e3b7f234c1d465a45c72a82ef2bda17"}}, "download_size": 218001, "post_processing_size": null, "dataset_size": 182937, "size_in_bytes": 400938}, "rkt-reworded": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "rkt-reworded", "version": {"version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 182754, "num_examples": 161, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/375e903198713b7f5faa95a4047c6928cf7348f9/prompts/rkt-reworded.json": {"num_bytes": 217818, "checksum": "6d399f13b03d66d107c56736285bddd09c4be707a7bfba5d3865c964ea467d8a"}}, "download_size": 217818, "post_processing_size": null, "dataset_size": 182754, "size_in_bytes": 400572}, "rkt-remove": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "rkt-remove", "version": {"version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 158729, "num_examples": 158, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/375e903198713b7f5faa95a4047c6928cf7348f9/prompts/rkt-remove.json": {"num_bytes": 191454, "checksum": "4b9e8bd27090d5d21882ac505f579d0825b079af5769c3ca9d8e7585e0e7005a"}}, "download_size": 191454, "post_processing_size": null, "dataset_size": 158729, "size_in_bytes": 350183}, "rs-keep": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "rs-keep", "version": {"version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 177191, "num_examples": 156, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/375e903198713b7f5faa95a4047c6928cf7348f9/prompts/rs-keep.json": {"num_bytes": 206604, "checksum": "d5960e79973aea8bc30d276d5aa8c2750d336b80ff26be4ecc93495a77fd597b"}}, "download_size": 206604, "post_processing_size": null, "dataset_size": 177191, "size_in_bytes": 383795}, "rs-transform": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "rs-transform", "version": {"version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 188587, "num_examples": 156, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/375e903198713b7f5faa95a4047c6928cf7348f9/prompts/rs-transform.json": {"num_bytes": 218555, "checksum": "1cd4f2931c17a8d9ee3aa8e646b818f2f2d5981b252639ff723d34ea5a13f973"}}, "download_size": 218555, "post_processing_size": null, "dataset_size": 188587, "size_in_bytes": 407142}, "rs-reworded": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "rs-reworded", "version": {"version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 188841, "num_examples": 156, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/375e903198713b7f5faa95a4047c6928cf7348f9/prompts/rs-reworded.json": {"num_bytes": 218809, "checksum": "78d55aaa02b3faf1b0005b1b3757364274adebd294ee2281653230ebd829b594"}}, "download_size": 218809, "post_processing_size": null, "dataset_size": 188841, "size_in_bytes": 407650}, "rs-remove": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "rs-remove", "version": {"version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 158191, "num_examples": 153, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/375e903198713b7f5faa95a4047c6928cf7348f9/prompts/rs-remove.json": {"num_bytes": 185991, "checksum": "064b21353df32e13ad02e7bf68b9a977f78000b632b73828487f5d47a0a9c610"}}, "download_size": 185991, "post_processing_size": null, "dataset_size": 158191, "size_in_bytes": 344182}, "scala-keep": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "scala-keep", "version": {"version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 222118, "num_examples": 160, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/375e903198713b7f5faa95a4047c6928cf7348f9/prompts/scala-keep.json": {"num_bytes": 253027, "checksum": "eb90cccebedf54864fa5fe487141d5467962aecd05d1eee25403a0369e6ffde6"}}, "download_size": 253027, "post_processing_size": null, "dataset_size": 222118, "size_in_bytes": 475145}, "scala-transform": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "scala-transform", "version": {"version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 240540, "num_examples": 160, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/375e903198713b7f5faa95a4047c6928cf7348f9/prompts/scala-transform.json": {"num_bytes": 272012, "checksum": "48669c1583008ffdd607006c3d4d0df65c0be452b1b7fa5429d15b4739495b34"}}, "download_size": 272012, "post_processing_size": null, "dataset_size": 240540, "size_in_bytes": 512552}, "scala-reworded": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "scala-reworded", "version": {"version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 240466, "num_examples": 160, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/375e903198713b7f5faa95a4047c6928cf7348f9/prompts/scala-reworded.json": {"num_bytes": 271938, "checksum": "06b28cd512364d4b69a1ff5bfc61b7db620fb21dd73aff0c15db5a547879d38a"}}, "download_size": 271938, "post_processing_size": null, "dataset_size": 240466, "size_in_bytes": 512404}, "scala-remove": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "scala-remove", "version": {"version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 200261, "num_examples": 157, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/375e903198713b7f5faa95a4047c6928cf7348f9/prompts/scala-remove.json": {"num_bytes": 229477, "checksum": "1fc1cc45643a50b0a54e467506582d72c8a7ff1124d07502599f6d16cb51fa93"}}, "download_size": 229477, "post_processing_size": null, "dataset_size": 200261, "size_in_bytes": 429738}, "sh-keep": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "sh-keep", "version": {"version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 158460, "num_examples": 158, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/375e903198713b7f5faa95a4047c6928cf7348f9/prompts/sh-keep.json": {"num_bytes": 193268, "checksum": "4f7240af8ed75b8448061713aa5e92352119b8db4618f0da4378ecd78478d81a"}}, "download_size": 193268, "post_processing_size": null, "dataset_size": 158460, "size_in_bytes": 351728}, "sh-transform": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "sh-transform", "version": {"version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 164552, "num_examples": 158, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/375e903198713b7f5faa95a4047c6928cf7348f9/prompts/sh-transform.json": {"num_bytes": 201631, "checksum": "961c6ce6bf00bb9422c809065fc185da86fb5eadf2d87a40f29f63b855fc032e"}}, "download_size": 201631, "post_processing_size": null, "dataset_size": 164552, "size_in_bytes": 366183}, "sh-reworded": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "sh-reworded", "version": {"version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 164521, "num_examples": 158, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/375e903198713b7f5faa95a4047c6928cf7348f9/prompts/sh-reworded.json": {"num_bytes": 201600, "checksum": "9f1e19a95aa83cf8ef4a9a23acbd3a1cee176ec13e049f57ade645126ca56ad8"}}, "download_size": 201600, "post_processing_size": null, "dataset_size": 164521, "size_in_bytes": 366121}, "sh-remove": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "sh-remove", "version": {"version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 140720, "num_examples": 155, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/375e903198713b7f5faa95a4047c6928cf7348f9/prompts/sh-remove.json": {"num_bytes": 173767, "checksum": "0e3e37a23e2a2183ead389b70d46a487a31a96e82de8cc3fb1bf7f43d2ae00d9"}}, "download_size": 173767, "post_processing_size": null, "dataset_size": 140720, "size_in_bytes": 314487}, "swift-keep": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "swift-keep", "version": {"version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 201798, "num_examples": 161, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/375e903198713b7f5faa95a4047c6928cf7348f9/prompts/swift-keep.json": {"num_bytes": 233903, "checksum": "2f47aae44c26a505bce9a7c456377c015ddb35952017f626cac03c0cd6655642"}}, "download_size": 233903, "post_processing_size": null, "dataset_size": 201798, "size_in_bytes": 435701}, "swift-transform": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "swift-transform", "version": {"version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 204760, "num_examples": 158, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/375e903198713b7f5faa95a4047c6928cf7348f9/prompts/swift-transform.json": {"num_bytes": 236660, "checksum": "c0b76d009ffc75e26040f13c511e78bdfdb4fafe7743fbc2b1315173e638c438"}}, "download_size": 236660, "post_processing_size": null, "dataset_size": 204760, "size_in_bytes": 441420}, "swift-reworded": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "swift-reworded", "version": {"version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 204920, "num_examples": 158, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/375e903198713b7f5faa95a4047c6928cf7348f9/prompts/swift-reworded.json": {"num_bytes": 236820, "checksum": "193c6907ee55129c7ce823ad9162e9a52f0c0f1657220e6a329718385d31c969"}}, "download_size": 236820, "post_processing_size": null, "dataset_size": 204920, "size_in_bytes": 441740}, "swift-remove": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "swift-remove", "version": {"version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 181681, "num_examples": 158, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/375e903198713b7f5faa95a4047c6928cf7348f9/prompts/swift-remove.json": {"num_bytes": 212047, "checksum": "9c5aadcab3e2bed9592808321c2f5abbf18c257b71b329bc41689c4a54972ead"}}, "download_size": 212047, "post_processing_size": null, "dataset_size": 181681, "size_in_bytes": 393728}, "ts-keep": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "ts-keep", "version": {"version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 181763, "num_examples": 159, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/375e903198713b7f5faa95a4047c6928cf7348f9/prompts/ts-keep.json": {"num_bytes": 215589, "checksum": "bea4e1776118c9bb9f3211deeaa6ce03dde208031b8d90f533f7d5b1d7bb5830"}}, "download_size": 215589, "post_processing_size": null, "dataset_size": 181763, "size_in_bytes": 397352}, "ts-transform": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "ts-transform", "version": {"version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 186037, "num_examples": 159, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/375e903198713b7f5faa95a4047c6928cf7348f9/prompts/ts-transform.json": {"num_bytes": 220423, "checksum": "6081b604f3673a39bd5e8fc68a67977a3855f477cdfc1431a6cf0e2fb0be00bf"}}, "download_size": 220423, "post_processing_size": null, "dataset_size": 186037, "size_in_bytes": 406460}, "ts-reworded": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "ts-reworded", "version": {"version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 186215, "num_examples": 159, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/375e903198713b7f5faa95a4047c6928cf7348f9/prompts/ts-reworded.json": {"num_bytes": 220601, "checksum": "e64fa52e9e95e4daa62a9e8162b4ba1a6ec3e2881a7968ba4a69eaa3d8ba61e3"}}, "download_size": 220601, "post_processing_size": null, "dataset_size": 186215, "size_in_bytes": 406816}, "ts-remove": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "ts-remove", "version": {"version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 162881, "num_examples": 156, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/375e903198713b7f5faa95a4047c6928cf7348f9/prompts/ts-remove.json": {"num_bytes": 194985, "checksum": "7a98910e983f01a13325280b3d9d383bbd1454eced4b5b08b4f7da9daf781f32"}}, "download_size": 194985, "post_processing_size": null, "dataset_size": 162881, "size_in_bytes": 357866}, "cpp": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "cpp", "version": {"version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 239767, "num_examples": 161, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/375e903198713b7f5faa95a4047c6928cf7348f9/prompts/cpp-reworded.json": {"num_bytes": 271023, "checksum": "ac639faf8c79348712cb2cd1d95df135a226a49006461245acf810039b9420ce"}}, "download_size": 271023, "post_processing_size": null, "dataset_size": 239767, "size_in_bytes": 510790}, "cs": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "cs", "version": {"version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 283673, "num_examples": 158, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/375e903198713b7f5faa95a4047c6928cf7348f9/prompts/cs-reworded.json": {"num_bytes": 315498, "checksum": "0304b710180681c9a68fe97684a87e71ab35aec9f229fd1d592e0b0ea698d8c2"}}, "download_size": 315498, "post_processing_size": null, "dataset_size": 283673, "size_in_bytes": 599171}, "d": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "d", "version": {"version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 181296, "num_examples": 156, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/375e903198713b7f5faa95a4047c6928cf7348f9/prompts/d-reworded.json": {"num_bytes": 215824, "checksum": "6a021fd31c45c3f68742f7d60d27082d45d17229daae221d46c70ace9d61bc2b"}}, "download_size": 215824, "post_processing_size": null, "dataset_size": 181296, "size_in_bytes": 397120}, "go": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "go", "version": {"version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 247354, "num_examples": 154, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/375e903198713b7f5faa95a4047c6928cf7348f9/prompts/go-reworded.json": {"num_bytes": 287181, "checksum": "b5fee01832bc349cab80f50aa68ec6e8df37cf054457ccfd0333229acae60b08"}}, "download_size": 287181, "post_processing_size": null, "dataset_size": 247354, "size_in_bytes": 534535}, "java": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "java", "version": {"version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 288031, "num_examples": 158, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/375e903198713b7f5faa95a4047c6928cf7348f9/prompts/java-reworded.json": {"num_bytes": 319856, "checksum": "893dabdd6b521f3e05ab84748cd27a1e6debbe9400478c8ca889953940145ca1"}}, "download_size": 319856, "post_processing_size": null, "dataset_size": 288031, "size_in_bytes": 607887}, "jl": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "jl", "version": {"version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 168251, "num_examples": 159, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/375e903198713b7f5faa95a4047c6928cf7348f9/prompts/jl-reworded.json": {"num_bytes": 203796, "checksum": "ceef60793f1d2c97d96df7e8ef54695a17a6a1d47a11e4c9c7a202c50300aff3"}}, "download_size": 203796, "post_processing_size": null, "dataset_size": 168251, "size_in_bytes": 372047}, "js": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "js", "version": {"version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 182171, "num_examples": 161, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/375e903198713b7f5faa95a4047c6928cf7348f9/prompts/js-reworded.json": {"num_bytes": 216913, "checksum": "ed2aa0a25d0fd9dd963668079e334d88acd8caf0bf020a33964f7cd4700eb670"}}, "download_size": 216913, "post_processing_size": null, "dataset_size": 182171, "size_in_bytes": 399084}, "lua": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "lua", "version": {"version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 184853, "num_examples": 161, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/375e903198713b7f5faa95a4047c6928cf7348f9/prompts/lua-reworded.json": {"num_bytes": 216685, "checksum": "54b8881bd6d2ba52b1d2e77388f20429edd60e705cf2c8cc87c58db966ceb2ff"}}, "download_size": 216685, "post_processing_size": null, "dataset_size": 184853, "size_in_bytes": 401538}, "php": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "php", "version": {"version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 225730, "num_examples": 161, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/375e903198713b7f5faa95a4047c6928cf7348f9/prompts/php-reworded.json": {"num_bytes": 262893, "checksum": "a27a5093957369e68f16ec973cc3fe16a400a6b1e0efa2469ef607ea5529b176"}}, "download_size": 262893, "post_processing_size": null, "dataset_size": 225730, "size_in_bytes": 488623}, "pl": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "pl", "version": {"version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 243661, "num_examples": 161, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/375e903198713b7f5faa95a4047c6928cf7348f9/prompts/pl-reworded.json": {"num_bytes": 283817, "checksum": "52010c713c3cb0ee07b691f0c04be20baf35223019bc8dfeb08720b82fd8ce58"}}, "download_size": 283817, "post_processing_size": null, "dataset_size": 243661, "size_in_bytes": 527478}, "py": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "py", "version": {"version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 177787, "num_examples": 161, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/375e903198713b7f5faa95a4047c6928cf7348f9/prompts/py-reworded.json": {"num_bytes": 210975, "checksum": "56360077d2f35ca58965a85084205b31d4c296563d3fd93f1248bca308535f7f"}}, "download_size": 210975, "post_processing_size": null, "dataset_size": 177787, "size_in_bytes": 388762}, "r": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "r", "version": {"version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 191747, "num_examples": 161, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/375e903198713b7f5faa95a4047c6928cf7348f9/prompts/r-reworded.json": {"num_bytes": 220520, "checksum": "7d4063b824313d807dc8901bf86aab318b6a905549a2229fa9fdf286a526f215"}}, "download_size": 220520, "post_processing_size": null, "dataset_size": 191747, "size_in_bytes": 412267}, "rb": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "rb", "version": {"version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 188457, "num_examples": 161, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/375e903198713b7f5faa95a4047c6928cf7348f9/prompts/rb-reworded.json": {"num_bytes": 223199, "checksum": "17d1d757c496a5230aacc106a6e61146cb8d8c29f5c9de9c3cd1000e7123b9ad"}}, "download_size": 223199, "post_processing_size": null, "dataset_size": 188457, "size_in_bytes": 411656}, "rkt": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "rkt", "version": {"version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 182754, "num_examples": 161, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/375e903198713b7f5faa95a4047c6928cf7348f9/prompts/rkt-reworded.json": {"num_bytes": 217818, "checksum": "6d399f13b03d66d107c56736285bddd09c4be707a7bfba5d3865c964ea467d8a"}}, "download_size": 217818, "post_processing_size": null, "dataset_size": 182754, "size_in_bytes": 400572}, "rs": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "rs", "version": {"version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 188841, "num_examples": 156, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/375e903198713b7f5faa95a4047c6928cf7348f9/prompts/rs-reworded.json": {"num_bytes": 218809, "checksum": "78d55aaa02b3faf1b0005b1b3757364274adebd294ee2281653230ebd829b594"}}, "download_size": 218809, "post_processing_size": null, "dataset_size": 188841, "size_in_bytes": 407650}, "scala": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "scala", "version": {"version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 240466, "num_examples": 160, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/375e903198713b7f5faa95a4047c6928cf7348f9/prompts/scala-reworded.json": {"num_bytes": 271938, "checksum": "06b28cd512364d4b69a1ff5bfc61b7db620fb21dd73aff0c15db5a547879d38a"}}, "download_size": 271938, "post_processing_size": null, "dataset_size": 240466, "size_in_bytes": 512404}, "sh": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "sh", "version": {"version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 164521, "num_examples": 158, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/375e903198713b7f5faa95a4047c6928cf7348f9/prompts/sh-reworded.json": {"num_bytes": 201600, "checksum": "9f1e19a95aa83cf8ef4a9a23acbd3a1cee176ec13e049f57ade645126ca56ad8"}}, "download_size": 201600, "post_processing_size": null, "dataset_size": 164521, "size_in_bytes": 366121}, "swift": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "swift", "version": {"version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 204920, "num_examples": 158, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/375e903198713b7f5faa95a4047c6928cf7348f9/prompts/swift-reworded.json": {"num_bytes": 236820, "checksum": "193c6907ee55129c7ce823ad9162e9a52f0c0f1657220e6a329718385d31c969"}}, "download_size": 236820, "post_processing_size": null, "dataset_size": 204920, "size_in_bytes": 441740}, "ts": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "ts", "version": {"version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 186215, "num_examples": 159, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/375e903198713b7f5faa95a4047c6928cf7348f9/prompts/ts-reworded.json": {"num_bytes": 220601, "checksum": "e64fa52e9e95e4daa62a9e8162b4ba1a6ec3e2881a7968ba4a69eaa3d8ba61e3"}}, "download_size": 220601, "post_processing_size": null, "dataset_size": 186215, "size_in_bytes": 406816}, "humaneval-cpp-keep": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" and the MBPP Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "humaneval-cpp-keep", "version": {"version_str": "2.0.0", "description": null, "major": 2, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 217792, "num_examples": 161, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/1f21818a0f3265fd0a41c3954e30aab47f34063a/prompts/humaneval-cpp-keep.json": {"num_bytes": 248493, "checksum": "56d81141f7b29c237796e14173b8e2884e97d27a8d57c3644a237c09f59227b4"}}, "download_size": 248493, "post_processing_size": null, "dataset_size": 217792, "size_in_bytes": 466285}, "humaneval-cpp-transform": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" and the MBPP Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "humaneval-cpp-transform", "version": {"version_str": "2.0.0", "description": null, "major": 2, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 239517, "num_examples": 161, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/1f21818a0f3265fd0a41c3954e30aab47f34063a/prompts/humaneval-cpp-transform.json": {"num_bytes": 270773, "checksum": "cb154fc45bef323590b79bb70c14aba4bad59b6a2180615d8937485d41a93d1e"}}, "download_size": 270773, "post_processing_size": null, "dataset_size": 239517, "size_in_bytes": 510290}, "humaneval-cpp": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" and the MBPP Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "humaneval-cpp", "version": {"version_str": "2.0.0", "description": null, "major": 2, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 239767, "num_examples": 161, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/1f21818a0f3265fd0a41c3954e30aab47f34063a/prompts/humaneval-cpp-reworded.json": {"num_bytes": 271023, "checksum": "ac639faf8c79348712cb2cd1d95df135a226a49006461245acf810039b9420ce"}}, "download_size": 271023, "post_processing_size": null, "dataset_size": 239767, "size_in_bytes": 510790}, "humaneval-cpp-remove": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" and the MBPP Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "humaneval-cpp-remove", "version": {"version_str": "2.0.0", "description": null, "major": 2, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 198566, "num_examples": 158, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/1f21818a0f3265fd0a41c3954e30aab47f34063a/prompts/humaneval-cpp-remove.json": {"num_bytes": 227555, "checksum": "729a5a6e1d68668554f77de56ef17b44eab57beea03f2fb920c075cb4f6a905f"}}, "download_size": 227555, "post_processing_size": null, "dataset_size": 198566, "size_in_bytes": 426121}, "humaneval-cs-keep": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" and the MBPP Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "humaneval-cs-keep", "version": {"version_str": "2.0.0", "description": null, "major": 2, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 259874, "num_examples": 158, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/1f21818a0f3265fd0a41c3954e30aab47f34063a/prompts/humaneval-cs-keep.json": {"num_bytes": 291137, "checksum": "db62ab52665a2742d0bef4de662ca187a703227083881177dad4f2712da5199a"}}, "download_size": 291137, "post_processing_size": null, "dataset_size": 259874, "size_in_bytes": 551011}, "humaneval-cs-transform": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" and the MBPP Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "humaneval-cs-transform", "version": {"version_str": "2.0.0", "description": null, "major": 2, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 283738, "num_examples": 158, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/1f21818a0f3265fd0a41c3954e30aab47f34063a/prompts/humaneval-cs-transform.json": {"num_bytes": 315563, "checksum": "505f4892388ede789dd09a256c3dbc801549c8d1d372fa60b4db339fe09d6319"}}, "download_size": 315563, "post_processing_size": null, "dataset_size": 283738, "size_in_bytes": 599301}, "humaneval-cs": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" and the MBPP Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "humaneval-cs", "version": {"version_str": "2.0.0", "description": null, "major": 2, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 283673, "num_examples": 158, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/1f21818a0f3265fd0a41c3954e30aab47f34063a/prompts/humaneval-cs-reworded.json": {"num_bytes": 315498, "checksum": "0304b710180681c9a68fe97684a87e71ab35aec9f229fd1d592e0b0ea698d8c2"}}, "download_size": 315498, "post_processing_size": null, "dataset_size": 283673, "size_in_bytes": 599171}, "humaneval-cs-remove": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" and the MBPP Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "humaneval-cs-remove", "version": {"version_str": "2.0.0", "description": null, "major": 2, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 237663, "num_examples": 155, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/1f21818a0f3265fd0a41c3954e30aab47f34063a/prompts/humaneval-cs-remove.json": {"num_bytes": 267251, "checksum": "8e2295c157152f2105d805dc06b26ab91e31000cdc8710f31e693bc65de1b753"}}, "download_size": 267251, "post_processing_size": null, "dataset_size": 237663, "size_in_bytes": 504914}, "humaneval-d-keep": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" and the MBPP Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "humaneval-d-keep", "version": {"version_str": "2.0.0", "description": null, "major": 2, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 175592, "num_examples": 156, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/1f21818a0f3265fd0a41c3954e30aab47f34063a/prompts/humaneval-d-keep.json": {"num_bytes": 209568, "checksum": "e34578f5aabf7a3664eee62f77b00cc908c3db8a6a7aeb071965de247f9750e7"}}, "download_size": 209568, "post_processing_size": null, "dataset_size": 175592, "size_in_bytes": 385160}, "humaneval-d-transform": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" and the MBPP Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "humaneval-d-transform", "version": {"version_str": "2.0.0", "description": null, "major": 2, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 181121, "num_examples": 156, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/1f21818a0f3265fd0a41c3954e30aab47f34063a/prompts/humaneval-d-transform.json": {"num_bytes": 215649, "checksum": "a9d182b3a60e4f951e2235f2a4157b91f518623b6ae21260e1d5d6703cf77a78"}}, "download_size": 215649, "post_processing_size": null, "dataset_size": 181121, "size_in_bytes": 396770}, "humaneval-d": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" and the MBPP Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "humaneval-d", "version": {"version_str": "2.0.0", "description": null, "major": 2, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 181296, "num_examples": 156, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/1f21818a0f3265fd0a41c3954e30aab47f34063a/prompts/humaneval-d-reworded.json": {"num_bytes": 215824, "checksum": "d75ed19dd0c27be82add466bb84cba11f2f7421f3c334402ca5b70f7c2112d5c"}}, "download_size": 215824, "post_processing_size": null, "dataset_size": 181296, "size_in_bytes": 397120}, "humaneval-d-remove": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" and the MBPP Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "humaneval-d-remove", "version": {"version_str": "2.0.0", "description": null, "major": 2, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 157938, "num_examples": 153, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/1f21818a0f3265fd0a41c3954e30aab47f34063a/prompts/humaneval-d-remove.json": {"num_bytes": 190211, "checksum": "9a36e460e3f0e7fcb92fa6d9f1da5e9d62cf5ee6787af73468bb2a54dada295a"}}, "download_size": 190211, "post_processing_size": null, "dataset_size": 157938, "size_in_bytes": 348149}, "humaneval-go-keep": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" and the MBPP Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "humaneval-go-keep", "version": {"version_str": "2.0.0", "description": null, "major": 2, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 241130, "num_examples": 154, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/1f21818a0f3265fd0a41c3954e30aab47f34063a/prompts/humaneval-go-keep.json": {"num_bytes": 280424, "checksum": "6de07406cbf81f3a6d0199ec9fc85eaf78a20d9954f8f3ea22e7d1b2fa9a92b6"}}, "download_size": 280424, "post_processing_size": null, "dataset_size": 241130, "size_in_bytes": 521554}, "humaneval-go-transform": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" and the MBPP Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "humaneval-go-transform", "version": {"version_str": "2.0.0", "description": null, "major": 2, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 247448, "num_examples": 154, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/1f21818a0f3265fd0a41c3954e30aab47f34063a/prompts/humaneval-go-transform.json": {"num_bytes": 287275, "checksum": "084a15fb951dd89dc33a06cf49acaf2610ee0e2de0c9f8d1325b08a4a88b2ebc"}}, "download_size": 287275, "post_processing_size": null, "dataset_size": 247448, "size_in_bytes": 534723}, "humaneval-go": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" and the MBPP Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "humaneval-go", "version": {"version_str": "2.0.0", "description": null, "major": 2, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 247354, "num_examples": 154, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/1f21818a0f3265fd0a41c3954e30aab47f34063a/prompts/humaneval-go-reworded.json": {"num_bytes": 287181, "checksum": "b5fee01832bc349cab80f50aa68ec6e8df37cf054457ccfd0333229acae60b08"}}, "download_size": 287181, "post_processing_size": null, "dataset_size": 247354, "size_in_bytes": 534535}, "humaneval-go-remove": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" and the MBPP Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "humaneval-go-remove", "version": {"version_str": "2.0.0", "description": null, "major": 2, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 221519, "num_examples": 151, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/1f21818a0f3265fd0a41c3954e30aab47f34063a/prompts/humaneval-go-remove.json": {"num_bytes": 258980, "checksum": "e4bbf884adf71965e8b0978ff20ff779de60f50bd7da8912b620b713de3bc376"}}, "download_size": 258980, "post_processing_size": null, "dataset_size": 221519, "size_in_bytes": 480499}, "humaneval-java-keep": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" and the MBPP Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "humaneval-java-keep", "version": {"version_str": "2.0.0", "description": null, "major": 2, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 259836, "num_examples": 158, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/1f21818a0f3265fd0a41c3954e30aab47f34063a/prompts/humaneval-java-keep.json": {"num_bytes": 291099, "checksum": "7bf1559d86c8a92fd15b4ed812d885c99c50551f392b2ad816a8e7060527e89c"}}, "download_size": 291099, "post_processing_size": null, "dataset_size": 259836, "size_in_bytes": 550935}, "humaneval-java-transform": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" and the MBPP Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "humaneval-java-transform", "version": {"version_str": "2.0.0", "description": null, "major": 2, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 286548, "num_examples": 158, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/1f21818a0f3265fd0a41c3954e30aab47f34063a/prompts/humaneval-java-transform.json": {"num_bytes": 318373, "checksum": "b5da36d56612e80384d9e6a46407241934730d3ba5bca98c5e7ccfb112f9d628"}}, "download_size": 318373, "post_processing_size": null, "dataset_size": 286548, "size_in_bytes": 604921}, "humaneval-java": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" and the MBPP Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "humaneval-java", "version": {"version_str": "2.0.0", "description": null, "major": 2, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 288031, "num_examples": 158, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/1f21818a0f3265fd0a41c3954e30aab47f34063a/prompts/humaneval-java-reworded.json": {"num_bytes": 319856, "checksum": "893dabdd6b521f3e05ab84748cd27a1e6debbe9400478c8ca889953940145ca1"}}, "download_size": 319856, "post_processing_size": null, "dataset_size": 288031, "size_in_bytes": 607887}, "humaneval-java-remove": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" and the MBPP Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "humaneval-java-remove", "version": {"version_str": "2.0.0", "description": null, "major": 2, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 237672, "num_examples": 155, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/1f21818a0f3265fd0a41c3954e30aab47f34063a/prompts/humaneval-java-remove.json": {"num_bytes": 267260, "checksum": "a6c69545169e760eb802d953af94dde684146430b281d43ffa98f72f1416a34d"}}, "download_size": 267260, "post_processing_size": null, "dataset_size": 237672, "size_in_bytes": 504932}, "humaneval-jl-keep": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" and the MBPP Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "humaneval-jl-keep", "version": {"version_str": "2.0.0", "description": null, "major": 2, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 163708, "num_examples": 159, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/1f21818a0f3265fd0a41c3954e30aab47f34063a/prompts/humaneval-jl-keep.json": {"num_bytes": 198696, "checksum": "7fa3f79aa3d56fadae3414684f0f102f87d529099d84a6f5d30a652714419d7b"}}, "download_size": 198696, "post_processing_size": null, "dataset_size": 163708, "size_in_bytes": 362404}, "humaneval-jl-transform": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" and the MBPP Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "humaneval-jl-transform", "version": {"version_str": "2.0.0", "description": null, "major": 2, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 167969, "num_examples": 159, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/1f21818a0f3265fd0a41c3954e30aab47f34063a/prompts/humaneval-jl-transform.json": {"num_bytes": 203514, "checksum": "255731ab55a8eb128bcf6b3ececbd0dcd5fcb087753b830f148788c53ebfee8e"}}, "download_size": 203514, "post_processing_size": null, "dataset_size": 167969, "size_in_bytes": 371483}, "humaneval-jl": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" and the MBPP Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "humaneval-jl", "version": {"version_str": "2.0.0", "description": null, "major": 2, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 168251, "num_examples": 159, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/1f21818a0f3265fd0a41c3954e30aab47f34063a/prompts/humaneval-jl-reworded.json": {"num_bytes": 203796, "checksum": "ceef60793f1d2c97d96df7e8ef54695a17a6a1d47a11e4c9c7a202c50300aff3"}}, "download_size": 203796, "post_processing_size": null, "dataset_size": 168251, "size_in_bytes": 372047}, "humaneval-jl-remove": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" and the MBPP Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "humaneval-jl-remove", "version": {"version_str": "2.0.0", "description": null, "major": 2, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 145913, "num_examples": 156, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/1f21818a0f3265fd0a41c3954e30aab47f34063a/prompts/humaneval-jl-remove.json": {"num_bytes": 179158, "checksum": "221e77ae9a1c3c3ab95d0c5010b119f9fd6f1fea9afaa79e5cf033f9a62e9d11"}}, "download_size": 179158, "post_processing_size": null, "dataset_size": 145913, "size_in_bytes": 325071}, "humaneval-js-keep": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" and the MBPP Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "humaneval-js-keep", "version": {"version_str": "2.0.0", "description": null, "major": 2, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 177635, "num_examples": 161, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/1f21818a0f3265fd0a41c3954e30aab47f34063a/prompts/humaneval-js-keep.json": {"num_bytes": 211822, "checksum": "02e56da39247f31c4f399a62210fdbe97bb45f6ec239140c3985432b72485bf2"}}, "download_size": 211822, "post_processing_size": null, "dataset_size": 177635, "size_in_bytes": 389457}, "humaneval-js-transform": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" and the MBPP Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "humaneval-js-transform", "version": {"version_str": "2.0.0", "description": null, "major": 2, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 181987, "num_examples": 161, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/1f21818a0f3265fd0a41c3954e30aab47f34063a/prompts/humaneval-js-transform.json": {"num_bytes": 216729, "checksum": "d90db81d52580d6d21cca9b16662fdac11b4ff5f2b50521652014c3c4d66b9c0"}}, "download_size": 216729, "post_processing_size": null, "dataset_size": 181987, "size_in_bytes": 398716}, "humaneval-js": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" and the MBPP Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "humaneval-js", "version": {"version_str": "2.0.0", "description": null, "major": 2, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 182171, "num_examples": 161, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/1f21818a0f3265fd0a41c3954e30aab47f34063a/prompts/humaneval-js-reworded.json": {"num_bytes": 216913, "checksum": "ed2aa0a25d0fd9dd963668079e334d88acd8caf0bf020a33964f7cd4700eb670"}}, "download_size": 216913, "post_processing_size": null, "dataset_size": 182171, "size_in_bytes": 399084}, "humaneval-js-remove": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" and the MBPP Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "humaneval-js-remove", "version": {"version_str": "2.0.0", "description": null, "major": 2, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 158619, "num_examples": 158, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/1f21818a0f3265fd0a41c3954e30aab47f34063a/prompts/humaneval-js-remove.json": {"num_bytes": 191028, "checksum": "8b0d17122dac1a1efef793d71e73473892aba8c8ebf8bf2238e4be8f7cd2685d"}}, "download_size": 191028, "post_processing_size": null, "dataset_size": 158619, "size_in_bytes": 349647}, "humaneval-lua-keep": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" and the MBPP Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "humaneval-lua-keep", "version": {"version_str": "2.0.0", "description": null, "major": 2, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 180398, "num_examples": 161, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/1f21818a0f3265fd0a41c3954e30aab47f34063a/prompts/humaneval-lua-keep.json": {"num_bytes": 212511, "checksum": "fb7466e8b89c92fab70dbd7f0074972cf0c6e970f94f7203c4fa01797af59e67"}}, "download_size": 212511, "post_processing_size": null, "dataset_size": 180398, "size_in_bytes": 392909}, "humaneval-lua-transform": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" and the MBPP Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "humaneval-lua-transform", "version": {"version_str": "2.0.0", "description": null, "major": 2, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 184763, "num_examples": 161, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/1f21818a0f3265fd0a41c3954e30aab47f34063a/prompts/humaneval-lua-transform.json": {"num_bytes": 216595, "checksum": "fba904e9325bb59360bb4e583f796bce78587695db92c6a4b4145a6bbb8778df"}}, "download_size": 216595, "post_processing_size": null, "dataset_size": 184763, "size_in_bytes": 401358}, "humaneval-lua": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" and the MBPP Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "humaneval-lua", "version": {"version_str": "2.0.0", "description": null, "major": 2, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 184853, "num_examples": 161, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/1f21818a0f3265fd0a41c3954e30aab47f34063a/prompts/humaneval-lua-reworded.json": {"num_bytes": 216685, "checksum": "54b8881bd6d2ba52b1d2e77388f20429edd60e705cf2c8cc87c58db966ceb2ff"}}, "download_size": 216685, "post_processing_size": null, "dataset_size": 184853, "size_in_bytes": 401538}, "humaneval-lua-remove": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" and the MBPP Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "humaneval-lua-remove", "version": {"version_str": "2.0.0", "description": null, "major": 2, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 161339, "num_examples": 158, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/1f21818a0f3265fd0a41c3954e30aab47f34063a/prompts/humaneval-lua-remove.json": {"num_bytes": 191690, "checksum": "e12d5519c6f740d9341136043e93f42986a13b7f00a64c393592bca83400f45e"}}, "download_size": 191690, "post_processing_size": null, "dataset_size": 161339, "size_in_bytes": 353029}, "humaneval-php-keep": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" and the MBPP Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "humaneval-php-keep", "version": {"version_str": "2.0.0", "description": null, "major": 2, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 219526, "num_examples": 161, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/1f21818a0f3265fd0a41c3954e30aab47f34063a/prompts/humaneval-php-keep.json": {"num_bytes": 256134, "checksum": "6e8bbef0effb50396b752e4e2ee3cd42e9f1edcf253e684dffe0d60efd447af4"}}, "download_size": 256134, "post_processing_size": null, "dataset_size": 219526, "size_in_bytes": 475660}, "humaneval-php-transform": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" and the MBPP Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "humaneval-php-transform", "version": {"version_str": "2.0.0", "description": null, "major": 2, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 225575, "num_examples": 161, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/1f21818a0f3265fd0a41c3954e30aab47f34063a/prompts/humaneval-php-transform.json": {"num_bytes": 262738, "checksum": "113c46223db9f1235ba2f0a390a0f01a9775400a671537e70755ea471e99088c"}}, "download_size": 262738, "post_processing_size": null, "dataset_size": 225575, "size_in_bytes": 488313}, "humaneval-php": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" and the MBPP Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "humaneval-php", "version": {"version_str": "2.0.0", "description": null, "major": 2, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 225730, "num_examples": 161, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/1f21818a0f3265fd0a41c3954e30aab47f34063a/prompts/humaneval-php-reworded.json": {"num_bytes": 262893, "checksum": "a27a5093957369e68f16ec973cc3fe16a400a6b1e0efa2469ef607ea5529b176"}}, "download_size": 262893, "post_processing_size": null, "dataset_size": 225730, "size_in_bytes": 488623}, "humaneval-php-remove": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" and the MBPP Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "humaneval-php-remove", "version": {"version_str": "2.0.0", "description": null, "major": 2, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 200047, "num_examples": 158, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/1f21818a0f3265fd0a41c3954e30aab47f34063a/prompts/humaneval-php-remove.json": {"num_bytes": 234848, "checksum": "3b13b33434a08c9bcff8db2a72e3ec89c85a794b8c1ca576a10614693d3b27b0"}}, "download_size": 234848, "post_processing_size": null, "dataset_size": 200047, "size_in_bytes": 434895}, "humaneval-pl-keep": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" and the MBPP Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "humaneval-pl-keep", "version": {"version_str": "2.0.0", "description": null, "major": 2, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 239874, "num_examples": 161, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/1f21818a0f3265fd0a41c3954e30aab47f34063a/prompts/humaneval-pl-keep.json": {"num_bytes": 279351, "checksum": "116f82cec38a8a9f38bd14bbd9348d18f13879a98c293c7ce9ff38829da8bf3f"}}, "download_size": 279351, "post_processing_size": null, "dataset_size": 239874, "size_in_bytes": 519225}, "humaneval-pl-transform": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" and the MBPP Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "humaneval-pl-transform", "version": {"version_str": "2.0.0", "description": null, "major": 2, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 243611, "num_examples": 161, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/1f21818a0f3265fd0a41c3954e30aab47f34063a/prompts/humaneval-pl-transform.json": {"num_bytes": 283767, "checksum": "552decb4ad799ae7204b0434600d0a7b1b2136dc34dbaa1a3e6ca7acb681173e"}}, "download_size": 283767, "post_processing_size": null, "dataset_size": 243611, "size_in_bytes": 527378}, "humaneval-pl": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" and the MBPP Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "humaneval-pl", "version": {"version_str": "2.0.0", "description": null, "major": 2, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 243661, "num_examples": 161, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/1f21818a0f3265fd0a41c3954e30aab47f34063a/prompts/humaneval-pl-reworded.json": {"num_bytes": 283817, "checksum": "52010c713c3cb0ee07b691f0c04be20baf35223019bc8dfeb08720b82fd8ce58"}}, "download_size": 283817, "post_processing_size": null, "dataset_size": 243661, "size_in_bytes": 527478}, "humaneval-pl-remove": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" and the MBPP Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "humaneval-pl-remove", "version": {"version_str": "2.0.0", "description": null, "major": 2, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 220817, "num_examples": 158, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/1f21818a0f3265fd0a41c3954e30aab47f34063a/prompts/humaneval-pl-remove.json": {"num_bytes": 258463, "checksum": "94723d826be5a900f975ffd97039dba9de878945f6d81fa0a59bdebed5c87ef6"}}, "download_size": 258463, "post_processing_size": null, "dataset_size": 220817, "size_in_bytes": 479280}, "humaneval-py-keep": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" and the MBPP Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "humaneval-py-keep", "version": {"version_str": "2.0.0", "description": null, "major": 2, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 173537, "num_examples": 161, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/1f21818a0f3265fd0a41c3954e30aab47f34063a/prompts/humaneval-py-keep.json": {"num_bytes": 207009, "checksum": "c583508bfd9ca7f7d8730f7cf618cd5d0fb4d2000f48d39d5311b4eeb06fb6a3"}}, "download_size": 207009, "post_processing_size": null, "dataset_size": 173537, "size_in_bytes": 380546}, "humaneval-py-transform": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" and the MBPP Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "humaneval-py-transform", "version": {"version_str": "2.0.0", "description": null, "major": 2, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 177787, "num_examples": 161, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/1f21818a0f3265fd0a41c3954e30aab47f34063a/prompts/humaneval-py-transform.json": {"num_bytes": 210975, "checksum": "9518a25d142569e8adf490d2cf6ed0df3ed16663991f73900d8477152f9a00c3"}}, "download_size": 210975, "post_processing_size": null, "dataset_size": 177787, "size_in_bytes": 388762}, "humaneval-py": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" and the MBPP Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "humaneval-py", "version": {"version_str": "2.0.0", "description": null, "major": 2, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 177787, "num_examples": 161, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/1f21818a0f3265fd0a41c3954e30aab47f34063a/prompts/humaneval-py-reworded.json": {"num_bytes": 210975, "checksum": "56360077d2f35ca58965a85084205b31d4c296563d3fd93f1248bca308535f7f"}}, "download_size": 210975, "post_processing_size": null, "dataset_size": 177787, "size_in_bytes": 388762}, "humaneval-py-remove": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" and the MBPP Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "humaneval-py-remove", "version": {"version_str": "2.0.0", "description": null, "major": 2, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 155389, "num_examples": 158, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/1f21818a0f3265fd0a41c3954e30aab47f34063a/prompts/humaneval-py-remove.json": {"num_bytes": 187068, "checksum": "491dc22f69bd7e4098c9b927addec8a3f9e7f0a7f93bac655bdc4440c26008a1"}}, "download_size": 187068, "post_processing_size": null, "dataset_size": 155389, "size_in_bytes": 342457}, "humaneval-r-keep": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" and the MBPP Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "humaneval-r-keep", "version": {"version_str": "2.0.0", "description": null, "major": 2, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 186803, "num_examples": 161, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/1f21818a0f3265fd0a41c3954e30aab47f34063a/prompts/humaneval-r-keep.json": {"num_bytes": 215857, "checksum": "efd573dd3afcf7e6bdbea508dda54067e73777fc0d2e9e6570a52dfda63aa0fa"}}, "download_size": 215857, "post_processing_size": null, "dataset_size": 186803, "size_in_bytes": 402660}, "humaneval-r-transform": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" and the MBPP Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "humaneval-r-transform", "version": {"version_str": "2.0.0", "description": null, "major": 2, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 191732, "num_examples": 161, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/1f21818a0f3265fd0a41c3954e30aab47f34063a/prompts/humaneval-r-transform.json": {"num_bytes": 220505, "checksum": "5a7b5f28ae59eec006d012623f594c9143fe9854487bd98817ed075d4d2abb97"}}, "download_size": 220505, "post_processing_size": null, "dataset_size": 191732, "size_in_bytes": 412237}, "humaneval-r": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" and the MBPP Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "humaneval-r", "version": {"version_str": "2.0.0", "description": null, "major": 2, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 191747, "num_examples": 161, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/1f21818a0f3265fd0a41c3954e30aab47f34063a/prompts/humaneval-r-reworded.json": {"num_bytes": 220520, "checksum": "7d4063b824313d807dc8901bf86aab318b6a905549a2229fa9fdf286a526f215"}}, "download_size": 220520, "post_processing_size": null, "dataset_size": 191747, "size_in_bytes": 412267}, "humaneval-r-remove": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" and the MBPP Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "humaneval-r-remove", "version": {"version_str": "2.0.0", "description": null, "major": 2, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 168422, "num_examples": 158, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/1f21818a0f3265fd0a41c3954e30aab47f34063a/prompts/humaneval-r-remove.json": {"num_bytes": 195771, "checksum": "32085e69d9f3975f38ce336e8e90b34124b19b8d581cdf7d0c5c902c14d6f012"}}, "download_size": 195771, "post_processing_size": null, "dataset_size": 168422, "size_in_bytes": 364193}, "humaneval-rb-keep": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" and the MBPP Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "humaneval-rb-keep", "version": {"version_str": "2.0.0", "description": null, "major": 2, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 181999, "num_examples": 161, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/1f21818a0f3265fd0a41c3954e30aab47f34063a/prompts/humaneval-rb-keep.json": {"num_bytes": 216186, "checksum": "d8e86b7408460ff14841666c7514971db6092cdd1b5565d629bf908a71046ba1"}}, "download_size": 216186, "post_processing_size": null, "dataset_size": 181999, "size_in_bytes": 398185}, "humaneval-rb-transform": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" and the MBPP Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "humaneval-rb-transform", "version": {"version_str": "2.0.0", "description": null, "major": 2, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 188317, "num_examples": 161, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/1f21818a0f3265fd0a41c3954e30aab47f34063a/prompts/humaneval-rb-transform.json": {"num_bytes": 223059, "checksum": "b53abcc9538e2c743d5bfc0e86f18e0832e6ec0dbd611a98566b05950436d31c"}}, "download_size": 223059, "post_processing_size": null, "dataset_size": 188317, "size_in_bytes": 411376}, "humaneval-rb": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" and the MBPP Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "humaneval-rb", "version": {"version_str": "2.0.0", "description": null, "major": 2, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 188457, "num_examples": 161, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/1f21818a0f3265fd0a41c3954e30aab47f34063a/prompts/humaneval-rb-reworded.json": {"num_bytes": 223199, "checksum": "17d1d757c496a5230aacc106a6e61146cb8d8c29f5c9de9c3cd1000e7123b9ad"}}, "download_size": 223199, "post_processing_size": null, "dataset_size": 188457, "size_in_bytes": 411656}, "humaneval-rb-remove": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" and the MBPP Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "humaneval-rb-remove", "version": {"version_str": "2.0.0", "description": null, "major": 2, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 163569, "num_examples": 158, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/1f21818a0f3265fd0a41c3954e30aab47f34063a/prompts/humaneval-rb-remove.json": {"num_bytes": 195978, "checksum": "02488606f2897203cf131aeb57eec365b93ecb0e7dd7a73d048890f0fd060e72"}}, "download_size": 195978, "post_processing_size": null, "dataset_size": 163569, "size_in_bytes": 359547}, "humaneval-rkt-keep": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" and the MBPP Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "humaneval-rkt-keep", "version": {"version_str": "2.0.0", "description": null, "major": 2, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 177757, "num_examples": 161, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/1f21818a0f3265fd0a41c3954e30aab47f34063a/prompts/humaneval-rkt-keep.json": {"num_bytes": 212266, "checksum": "7086c9ca18882c7f0a18a4b46dfe84c0b5293b69a4c9d8964ad72a797ad72871"}}, "download_size": 212266, "post_processing_size": null, "dataset_size": 177757, "size_in_bytes": 390023}, "humaneval-rkt-transform": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" and the MBPP Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "humaneval-rkt-transform", "version": {"version_str": "2.0.0", "description": null, "major": 2, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 182937, "num_examples": 161, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/1f21818a0f3265fd0a41c3954e30aab47f34063a/prompts/humaneval-rkt-transform.json": {"num_bytes": 218001, "checksum": "360afce46e550266f91f096d22e8a5e31e3b7f234c1d465a45c72a82ef2bda17"}}, "download_size": 218001, "post_processing_size": null, "dataset_size": 182937, "size_in_bytes": 400938}, "humaneval-rkt": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" and the MBPP Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "humaneval-rkt", "version": {"version_str": "2.0.0", "description": null, "major": 2, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 182754, "num_examples": 161, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/1f21818a0f3265fd0a41c3954e30aab47f34063a/prompts/humaneval-rkt-reworded.json": {"num_bytes": 217818, "checksum": "6d399f13b03d66d107c56736285bddd09c4be707a7bfba5d3865c964ea467d8a"}}, "download_size": 217818, "post_processing_size": null, "dataset_size": 182754, "size_in_bytes": 400572}, "humaneval-rkt-remove": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" and the MBPP Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "humaneval-rkt-remove", "version": {"version_str": "2.0.0", "description": null, "major": 2, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 158729, "num_examples": 158, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/1f21818a0f3265fd0a41c3954e30aab47f34063a/prompts/humaneval-rkt-remove.json": {"num_bytes": 191454, "checksum": "4b9e8bd27090d5d21882ac505f579d0825b079af5769c3ca9d8e7585e0e7005a"}}, "download_size": 191454, "post_processing_size": null, "dataset_size": 158729, "size_in_bytes": 350183}, "humaneval-rs-keep": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" and the MBPP Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "humaneval-rs-keep", "version": {"version_str": "2.0.0", "description": null, "major": 2, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 177191, "num_examples": 156, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/1f21818a0f3265fd0a41c3954e30aab47f34063a/prompts/humaneval-rs-keep.json": {"num_bytes": 206604, "checksum": "d5960e79973aea8bc30d276d5aa8c2750d336b80ff26be4ecc93495a77fd597b"}}, "download_size": 206604, "post_processing_size": null, "dataset_size": 177191, "size_in_bytes": 383795}, "humaneval-rs-transform": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" and the MBPP Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "humaneval-rs-transform", "version": {"version_str": "2.0.0", "description": null, "major": 2, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 188587, "num_examples": 156, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/1f21818a0f3265fd0a41c3954e30aab47f34063a/prompts/humaneval-rs-transform.json": {"num_bytes": 218555, "checksum": "1cd4f2931c17a8d9ee3aa8e646b818f2f2d5981b252639ff723d34ea5a13f973"}}, "download_size": 218555, "post_processing_size": null, "dataset_size": 188587, "size_in_bytes": 407142}, "humaneval-rs": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" and the MBPP Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "humaneval-rs", "version": {"version_str": "2.0.0", "description": null, "major": 2, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 188841, "num_examples": 156, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/1f21818a0f3265fd0a41c3954e30aab47f34063a/prompts/humaneval-rs-reworded.json": {"num_bytes": 218809, "checksum": "78d55aaa02b3faf1b0005b1b3757364274adebd294ee2281653230ebd829b594"}}, "download_size": 218809, "post_processing_size": null, "dataset_size": 188841, "size_in_bytes": 407650}, "humaneval-rs-remove": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" and the MBPP Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "humaneval-rs-remove", "version": {"version_str": "2.0.0", "description": null, "major": 2, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 158191, "num_examples": 153, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/1f21818a0f3265fd0a41c3954e30aab47f34063a/prompts/humaneval-rs-remove.json": {"num_bytes": 185991, "checksum": "064b21353df32e13ad02e7bf68b9a977f78000b632b73828487f5d47a0a9c610"}}, "download_size": 185991, "post_processing_size": null, "dataset_size": 158191, "size_in_bytes": 344182}, "humaneval-scala-keep": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" and the MBPP Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "humaneval-scala-keep", "version": {"version_str": "2.0.0", "description": null, "major": 2, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 222118, "num_examples": 160, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/1f21818a0f3265fd0a41c3954e30aab47f34063a/prompts/humaneval-scala-keep.json": {"num_bytes": 253027, "checksum": "eb90cccebedf54864fa5fe487141d5467962aecd05d1eee25403a0369e6ffde6"}}, "download_size": 253027, "post_processing_size": null, "dataset_size": 222118, "size_in_bytes": 475145}, "humaneval-scala-transform": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" and the MBPP Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "humaneval-scala-transform", "version": {"version_str": "2.0.0", "description": null, "major": 2, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 240540, "num_examples": 160, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/1f21818a0f3265fd0a41c3954e30aab47f34063a/prompts/humaneval-scala-transform.json": {"num_bytes": 272012, "checksum": "48669c1583008ffdd607006c3d4d0df65c0be452b1b7fa5429d15b4739495b34"}}, "download_size": 272012, "post_processing_size": null, "dataset_size": 240540, "size_in_bytes": 512552}, "humaneval-scala": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" and the MBPP Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "humaneval-scala", "version": {"version_str": "2.0.0", "description": null, "major": 2, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 240466, "num_examples": 160, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/1f21818a0f3265fd0a41c3954e30aab47f34063a/prompts/humaneval-scala-reworded.json": {"num_bytes": 271938, "checksum": "06b28cd512364d4b69a1ff5bfc61b7db620fb21dd73aff0c15db5a547879d38a"}}, "download_size": 271938, "post_processing_size": null, "dataset_size": 240466, "size_in_bytes": 512404}, "humaneval-scala-remove": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" and the MBPP Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "humaneval-scala-remove", "version": {"version_str": "2.0.0", "description": null, "major": 2, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 200261, "num_examples": 157, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/1f21818a0f3265fd0a41c3954e30aab47f34063a/prompts/humaneval-scala-remove.json": {"num_bytes": 229477, "checksum": "1fc1cc45643a50b0a54e467506582d72c8a7ff1124d07502599f6d16cb51fa93"}}, "download_size": 229477, "post_processing_size": null, "dataset_size": 200261, "size_in_bytes": 429738}, "humaneval-sh-keep": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" and the MBPP Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "humaneval-sh-keep", "version": {"version_str": "2.0.0", "description": null, "major": 2, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 158460, "num_examples": 158, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/1f21818a0f3265fd0a41c3954e30aab47f34063a/prompts/humaneval-sh-keep.json": {"num_bytes": 193268, "checksum": "4f7240af8ed75b8448061713aa5e92352119b8db4618f0da4378ecd78478d81a"}}, "download_size": 193268, "post_processing_size": null, "dataset_size": 158460, "size_in_bytes": 351728}, "humaneval-sh-transform": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" and the MBPP Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "humaneval-sh-transform", "version": {"version_str": "2.0.0", "description": null, "major": 2, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 164552, "num_examples": 158, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/1f21818a0f3265fd0a41c3954e30aab47f34063a/prompts/humaneval-sh-transform.json": {"num_bytes": 201631, "checksum": "961c6ce6bf00bb9422c809065fc185da86fb5eadf2d87a40f29f63b855fc032e"}}, "download_size": 201631, "post_processing_size": null, "dataset_size": 164552, "size_in_bytes": 366183}, "humaneval-sh": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" and the MBPP Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "humaneval-sh", "version": {"version_str": "2.0.0", "description": null, "major": 2, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 164521, "num_examples": 158, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/1f21818a0f3265fd0a41c3954e30aab47f34063a/prompts/humaneval-sh-reworded.json": {"num_bytes": 201600, "checksum": "9f1e19a95aa83cf8ef4a9a23acbd3a1cee176ec13e049f57ade645126ca56ad8"}}, "download_size": 201600, "post_processing_size": null, "dataset_size": 164521, "size_in_bytes": 366121}, "humaneval-sh-remove": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" and the MBPP Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "humaneval-sh-remove", "version": {"version_str": "2.0.0", "description": null, "major": 2, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 140720, "num_examples": 155, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/1f21818a0f3265fd0a41c3954e30aab47f34063a/prompts/humaneval-sh-remove.json": {"num_bytes": 173767, "checksum": "0e3e37a23e2a2183ead389b70d46a487a31a96e82de8cc3fb1bf7f43d2ae00d9"}}, "download_size": 173767, "post_processing_size": null, "dataset_size": 140720, "size_in_bytes": 314487}, "humaneval-swift-keep": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" and the MBPP Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "humaneval-swift-keep", "version": {"version_str": "2.0.0", "description": null, "major": 2, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 201798, "num_examples": 161, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/1f21818a0f3265fd0a41c3954e30aab47f34063a/prompts/humaneval-swift-keep.json": {"num_bytes": 233903, "checksum": "2f47aae44c26a505bce9a7c456377c015ddb35952017f626cac03c0cd6655642"}}, "download_size": 233903, "post_processing_size": null, "dataset_size": 201798, "size_in_bytes": 435701}, "humaneval-swift-transform": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" and the MBPP Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "humaneval-swift-transform", "version": {"version_str": "2.0.0", "description": null, "major": 2, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 204760, "num_examples": 158, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/1f21818a0f3265fd0a41c3954e30aab47f34063a/prompts/humaneval-swift-transform.json": {"num_bytes": 236660, "checksum": "c0b76d009ffc75e26040f13c511e78bdfdb4fafe7743fbc2b1315173e638c438"}}, "download_size": 236660, "post_processing_size": null, "dataset_size": 204760, "size_in_bytes": 441420}, "humaneval-swift": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" and the MBPP Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "humaneval-swift", "version": {"version_str": "2.0.0", "description": null, "major": 2, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 204920, "num_examples": 158, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/1f21818a0f3265fd0a41c3954e30aab47f34063a/prompts/humaneval-swift-reworded.json": {"num_bytes": 236820, "checksum": "193c6907ee55129c7ce823ad9162e9a52f0c0f1657220e6a329718385d31c969"}}, "download_size": 236820, "post_processing_size": null, "dataset_size": 204920, "size_in_bytes": 441740}, "humaneval-swift-remove": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" and the MBPP Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "humaneval-swift-remove", "version": {"version_str": "2.0.0", "description": null, "major": 2, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 181681, "num_examples": 158, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/1f21818a0f3265fd0a41c3954e30aab47f34063a/prompts/humaneval-swift-remove.json": {"num_bytes": 212047, "checksum": "9c5aadcab3e2bed9592808321c2f5abbf18c257b71b329bc41689c4a54972ead"}}, "download_size": 212047, "post_processing_size": null, "dataset_size": 181681, "size_in_bytes": 393728}, "humaneval-ts-keep": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" and the MBPP Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "humaneval-ts-keep", "version": {"version_str": "2.0.0", "description": null, "major": 2, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 181763, "num_examples": 159, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/1f21818a0f3265fd0a41c3954e30aab47f34063a/prompts/humaneval-ts-keep.json": {"num_bytes": 215589, "checksum": "bea4e1776118c9bb9f3211deeaa6ce03dde208031b8d90f533f7d5b1d7bb5830"}}, "download_size": 215589, "post_processing_size": null, "dataset_size": 181763, "size_in_bytes": 397352}, "humaneval-ts-transform": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" and the MBPP Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "humaneval-ts-transform", "version": {"version_str": "2.0.0", "description": null, "major": 2, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 186037, "num_examples": 159, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/1f21818a0f3265fd0a41c3954e30aab47f34063a/prompts/humaneval-ts-transform.json": {"num_bytes": 220423, "checksum": "6081b604f3673a39bd5e8fc68a67977a3855f477cdfc1431a6cf0e2fb0be00bf"}}, "download_size": 220423, "post_processing_size": null, "dataset_size": 186037, "size_in_bytes": 406460}, "humaneval-ts": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" and the MBPP Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "humaneval-ts", "version": {"version_str": "2.0.0", "description": null, "major": 2, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 186215, "num_examples": 159, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/1f21818a0f3265fd0a41c3954e30aab47f34063a/prompts/humaneval-ts-reworded.json": {"num_bytes": 220601, "checksum": "e64fa52e9e95e4daa62a9e8162b4ba1a6ec3e2881a7968ba4a69eaa3d8ba61e3"}}, "download_size": 220601, "post_processing_size": null, "dataset_size": 186215, "size_in_bytes": 406816}, "humaneval-ts-remove": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" and the MBPP Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "humaneval-ts-remove", "version": {"version_str": "2.0.0", "description": null, "major": 2, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 162881, "num_examples": 156, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/1f21818a0f3265fd0a41c3954e30aab47f34063a/prompts/humaneval-ts-remove.json": {"num_bytes": 194985, "checksum": "7a98910e983f01a13325280b3d9d383bbd1454eced4b5b08b4f7da9daf781f32"}}, "download_size": 194985, "post_processing_size": null, "dataset_size": 162881, "size_in_bytes": 357866}, "mbpp-cpp-keep": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" and the MBPP Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "mbpp-cpp-keep", "version": {"version_str": "2.0.0", "description": null, "major": 2, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 339440, "num_examples": 398, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/1f21818a0f3265fd0a41c3954e30aab47f34063a/prompts/mbpp-cpp-keep.json": {"num_bytes": 407399, "checksum": "e3c84df6f9a1a9a6229a0902b314a4eedb13fd4ea1841e1d6f5db22f41de961a"}}, "download_size": 407399, "post_processing_size": null, "dataset_size": 339440, "size_in_bytes": 746839}, "mbpp-cpp": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" and the MBPP Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "mbpp-cpp", "version": {"version_str": "2.0.0", "description": null, "major": 2, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 341872, "num_examples": 398, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/1f21818a0f3265fd0a41c3954e30aab47f34063a/prompts/mbpp-cpp-reworded.json": {"num_bytes": 409831, "checksum": "080a9a1f1613dbb54d7e2e8d0e6bae938f9bc0b40aada117a50d25a394c04620"}}, "download_size": 409831, "post_processing_size": null, "dataset_size": 341872, "size_in_bytes": 751703}, "mbpp-cs-keep": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" and the MBPP Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "mbpp-cs-keep", "version": {"version_str": "2.0.0", "description": null, "major": 2, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 382709, "num_examples": 373, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/1f21818a0f3265fd0a41c3954e30aab47f34063a/prompts/mbpp-cs-keep.json": {"num_bytes": 448784, "checksum": "7f622d149330cd461050e4a16a4b61c3503424db8d51fb10db8910855322cdcf"}}, "download_size": 448784, "post_processing_size": null, "dataset_size": 382709, "size_in_bytes": 831493}, "mbpp-cs": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" and the MBPP Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "mbpp-cs", "version": {"version_str": "2.0.0", "description": null, "major": 2, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 384527, "num_examples": 373, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/1f21818a0f3265fd0a41c3954e30aab47f34063a/prompts/mbpp-cs-reworded.json": {"num_bytes": 450602, "checksum": "1b27e7a9d6c32cfda54b6ac046bd7337578d1691f8785a3ffb1652b21055e4ea"}}, "download_size": 450602, "post_processing_size": null, "dataset_size": 384527, "size_in_bytes": 835129}, "mbpp-d-keep": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" and the MBPP Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "mbpp-d-keep", "version": {"version_str": "2.0.0", "description": null, "major": 2, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 177848, "num_examples": 292, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/1f21818a0f3265fd0a41c3954e30aab47f34063a/prompts/mbpp-d-keep.json": {"num_bytes": 235309, "checksum": "d62c83ece8b11edd841c40745fd205220406928c1adb8a169db3c76b9f41f55a"}}, "download_size": 235309, "post_processing_size": null, "dataset_size": 177848, "size_in_bytes": 413157}, "mbpp-d": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" and the MBPP Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "mbpp-d", "version": {"version_str": "2.0.0", "description": null, "major": 2, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 179380, "num_examples": 292, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/1f21818a0f3265fd0a41c3954e30aab47f34063a/prompts/mbpp-d-reworded.json": {"num_bytes": 236841, "checksum": "aa0b4fdd91bc391a8c17d2a72323e7ac2e84fc4f668d113a14104ce94f92049e"}}, "download_size": 236841, "post_processing_size": null, "dataset_size": 179380, "size_in_bytes": 416221}, "mbpp-go-keep": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" and the MBPP Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "mbpp-go-keep", "version": {"version_str": "2.0.0", "description": null, "major": 2, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 410162, "num_examples": 383, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/1f21818a0f3265fd0a41c3954e30aab47f34063a/prompts/mbpp-go-keep.json": {"num_bytes": 499994, "checksum": "ed4b493959212b9bd9aa2b5eb2685dfaf296edf1a757289b34ea20d4502b9558"}}, "download_size": 499994, "post_processing_size": null, "dataset_size": 410162, "size_in_bytes": 910156}, "mbpp-go": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" and the MBPP Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "mbpp-go", "version": {"version_str": "2.0.0", "description": null, "major": 2, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 411839, "num_examples": 383, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/1f21818a0f3265fd0a41c3954e30aab47f34063a/prompts/mbpp-go-reworded.json": {"num_bytes": 501671, "checksum": "48ff57086cc7b7cb1589307aa6016484c506780a961868ba7af074ef6ae1f34b"}}, "download_size": 501671, "post_processing_size": null, "dataset_size": 411839, "size_in_bytes": 913510}, "mbpp-java-keep": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" and the MBPP Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "mbpp-java-keep", "version": {"version_str": "2.0.0", "description": null, "major": 2, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 380120, "num_examples": 373, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/1f21818a0f3265fd0a41c3954e30aab47f34063a/prompts/mbpp-java-keep.json": {"num_bytes": 446183, "checksum": "1812b0ac0283138fa84b81ba7d2071f1b3c75dd068734cd860be34b3ac7bc8cc"}}, "download_size": 446183, "post_processing_size": null, "dataset_size": 380120, "size_in_bytes": 826303}, "mbpp-java": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" and the MBPP Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "mbpp-java", "version": {"version_str": "2.0.0", "description": null, "major": 2, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 384060, "num_examples": 373, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/1f21818a0f3265fd0a41c3954e30aab47f34063a/prompts/mbpp-java-reworded.json": {"num_bytes": 450123, "checksum": "498180b596fa44f8ebf1019da00486b23c93dcc65f22984fda3f735a85b8db36"}}, "download_size": 450123, "post_processing_size": null, "dataset_size": 384060, "size_in_bytes": 834183}, "mbpp-jl-keep": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" and the MBPP Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "mbpp-jl-keep", "version": {"version_str": "2.0.0", "description": null, "major": 2, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 178552, "num_examples": 320, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/1f21818a0f3265fd0a41c3954e30aab47f34063a/prompts/mbpp-jl-keep.json": {"num_bytes": 241358, "checksum": "072d310282ec3b836986c8bbb90d29923943775f12f4f47673c3d6fb05d57a6d"}}, "download_size": 241358, "post_processing_size": null, "dataset_size": 178552, "size_in_bytes": 419910}, "mbpp-jl": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" and the MBPP Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "mbpp-jl", "version": {"version_str": "2.0.0", "description": null, "major": 2, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 180497, "num_examples": 320, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/1f21818a0f3265fd0a41c3954e30aab47f34063a/prompts/mbpp-jl-reworded.json": {"num_bytes": 243303, "checksum": "fdf8f4fc01286c471a0de0f9ae7a49390a055dd9706de6a3a641b5b4b29b44b1"}}, "download_size": 243303, "post_processing_size": null, "dataset_size": 180497, "size_in_bytes": 423800}, "mbpp-js-keep": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" and the MBPP Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "mbpp-js-keep", "version": {"version_str": "2.0.0", "description": null, "major": 2, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 257339, "num_examples": 401, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/1f21818a0f3265fd0a41c3954e30aab47f34063a/prompts/mbpp-js-keep.json": {"num_bytes": 334884, "checksum": "d28b66a738f7a377bf9dd7639f144836fd801b24bb5b269dae210c41b7b0346e"}}, "download_size": 334884, "post_processing_size": null, "dataset_size": 257339, "size_in_bytes": 592223}, "mbpp-js": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" and the MBPP Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "mbpp-js", "version": {"version_str": "2.0.0", "description": null, "major": 2, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 259574, "num_examples": 401, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/1f21818a0f3265fd0a41c3954e30aab47f34063a/prompts/mbpp-js-reworded.json": {"num_bytes": 337119, "checksum": "076ba2748c39911f1f14eac85082e83de8b9d80b601461f014a0171899e4e997"}}, "download_size": 337119, "post_processing_size": null, "dataset_size": 259574, "size_in_bytes": 596693}, "mbpp-lua-keep": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" and the MBPP Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "mbpp-lua-keep", "version": {"version_str": "2.0.0", "description": null, "major": 2, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 263370, "num_examples": 401, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/1f21818a0f3265fd0a41c3954e30aab47f34063a/prompts/mbpp-lua-keep.json": {"num_bytes": 337249, "checksum": "fc8560820ec0bdfa59965bfa6ae17e6075060b28915f751d97c46b1ecb774f16"}}, "download_size": 337249, "post_processing_size": null, "dataset_size": 263370, "size_in_bytes": 600619}, "mbpp-lua": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" and the MBPP Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "mbpp-lua", "version": {"version_str": "2.0.0", "description": null, "major": 2, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 265616, "num_examples": 401, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/1f21818a0f3265fd0a41c3954e30aab47f34063a/prompts/mbpp-lua-reworded.json": {"num_bytes": 339495, "checksum": "226c6374ce87b97a5afdd9aa992a4964bbcd38e556ea40693ff99ded5d619fb9"}}, "download_size": 339495, "post_processing_size": null, "dataset_size": 265616, "size_in_bytes": 605111}, "mbpp-php-keep": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" and the MBPP Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "mbpp-php-keep", "version": {"version_str": "2.0.0", "description": null, "major": 2, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 310609, "num_examples": 401, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/1f21818a0f3265fd0a41c3954e30aab47f34063a/prompts/mbpp-php-keep.json": {"num_bytes": 391081, "checksum": "40f1c95a438297bc7c55a963000f68dd6abe1f491e8ff5598c189614f0a84a33"}}, "download_size": 391081, "post_processing_size": null, "dataset_size": 310609, "size_in_bytes": 701690}, "mbpp-php": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" and the MBPP Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "mbpp-php", "version": {"version_str": "2.0.0", "description": null, "major": 2, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 312951, "num_examples": 401, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/1f21818a0f3265fd0a41c3954e30aab47f34063a/prompts/mbpp-php-reworded.json": {"num_bytes": 393423, "checksum": "aaaa1572a3a97c312926e14c775f74b7e8fd584e6deb87d7d25e14190a803b1d"}}, "download_size": 393423, "post_processing_size": null, "dataset_size": 312951, "size_in_bytes": 706374}, "mbpp-pl-keep": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" and the MBPP Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "mbpp-pl-keep", "version": {"version_str": "2.0.0", "description": null, "major": 2, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 322633, "num_examples": 400, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/1f21818a0f3265fd0a41c3954e30aab47f34063a/prompts/mbpp-pl-keep.json": {"num_bytes": 404809, "checksum": "b3994de11a5d60f77d4da65e40d9f38f93494c6193ccc2898f8d62e8f3da08a8"}}, "download_size": 404809, "post_processing_size": null, "dataset_size": 322633, "size_in_bytes": 727442}, "mbpp-pl": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" and the MBPP Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "mbpp-pl", "version": {"version_str": "2.0.0", "description": null, "major": 2, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 324803, "num_examples": 400, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/1f21818a0f3265fd0a41c3954e30aab47f34063a/prompts/mbpp-pl-reworded.json": {"num_bytes": 406979, "checksum": "01813d0ff84fa70dd0f28469face716422ef16ffa2416cef5696111b928cbc61"}}, "download_size": 406979, "post_processing_size": null, "dataset_size": 324803, "size_in_bytes": 731782}, "mbpp-py-keep": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" and the MBPP Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "mbpp-py-keep", "version": {"version_str": "2.0.0", "description": null, "major": 2, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 253380, "num_examples": 401, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/1f21818a0f3265fd0a41c3954e30aab47f34063a/prompts/mbpp-py-keep.json": {"num_bytes": 331354, "checksum": "9ec370f2060aeb03ac1b5e5d9af3dad905a8028bd9eab9826d970798abd5f630"}}, "download_size": 331354, "post_processing_size": null, "dataset_size": 253380, "size_in_bytes": 584734}, "mbpp-py": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" and the MBPP Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "mbpp-py", "version": {"version_str": "2.0.0", "description": null, "major": 2, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 255385, "num_examples": 401, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/1f21818a0f3265fd0a41c3954e30aab47f34063a/prompts/mbpp-py-reworded.json": {"num_bytes": 333359, "checksum": "81068ac397d354862ea01003b000564fbba68c61a318d6638ca6d78acf65c3af"}}, "download_size": 333359, "post_processing_size": null, "dataset_size": 255385, "size_in_bytes": 588744}, "mbpp-r-keep": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" and the MBPP Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "mbpp-r-keep", "version": {"version_str": "2.0.0", "description": null, "major": 2, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 262111, "num_examples": 401, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/1f21818a0f3265fd0a41c3954e30aab47f34063a/prompts/mbpp-r-keep.json": {"num_bytes": 328371, "checksum": "5699a80863ee284962010902886e7a72a5dfbafbe36de93e5c9dd50ae14aa83a"}}, "download_size": 328371, "post_processing_size": null, "dataset_size": 262111, "size_in_bytes": 590482}, "mbpp-r": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" and the MBPP Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "mbpp-r", "version": {"version_str": "2.0.0", "description": null, "major": 2, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 263918, "num_examples": 401, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/1f21818a0f3265fd0a41c3954e30aab47f34063a/prompts/mbpp-r-reworded.json": {"num_bytes": 330178, "checksum": "4f23b3da5ebb200ccee197a1f53962b2a441d5b27e3a956817163ff82f1946cf"}}, "download_size": 330178, "post_processing_size": null, "dataset_size": 263918, "size_in_bytes": 594096}, "mbpp-rb-keep": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" and the MBPP Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "mbpp-rb-keep", "version": {"version_str": "2.0.0", "description": null, "major": 2, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 267707, "num_examples": 401, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/1f21818a0f3265fd0a41c3954e30aab47f34063a/prompts/mbpp-rb-keep.json": {"num_bytes": 345252, "checksum": "bea376b8c5f67ebdf1ad622aa4fff8b7887ff315709ab7501cc098095a79012e"}}, "download_size": 345252, "post_processing_size": null, "dataset_size": 267707, "size_in_bytes": 612959}, "mbpp-rb": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" and the MBPP Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "mbpp-rb", "version": {"version_str": "2.0.0", "description": null, "major": 2, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 269877, "num_examples": 401, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/1f21818a0f3265fd0a41c3954e30aab47f34063a/prompts/mbpp-rb-reworded.json": {"num_bytes": 347422, "checksum": "73d9c62fd2ecf61100140d8c1bf6f2de3d9662f4303a945170e087a77ac2f1bb"}}, "download_size": 347422, "post_processing_size": null, "dataset_size": 269877, "size_in_bytes": 617299}, "mbpp-rkt-keep": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" and the MBPP Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "mbpp-rkt-keep", "version": {"version_str": "2.0.0", "description": null, "major": 2, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 262836, "num_examples": 401, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/1f21818a0f3265fd0a41c3954e30aab47f34063a/prompts/mbpp-rkt-keep.json": {"num_bytes": 341183, "checksum": "6769cb46846884c53f72c1f559293ef35a5b8c7932a3996603bd4402c8902d7e"}}, "download_size": 341183, "post_processing_size": null, "dataset_size": 262836, "size_in_bytes": 604019}, "mbpp-rkt": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" and the MBPP Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "mbpp-rkt", "version": {"version_str": "2.0.0", "description": null, "major": 2, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 264723, "num_examples": 401, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/1f21818a0f3265fd0a41c3954e30aab47f34063a/prompts/mbpp-rkt-reworded.json": {"num_bytes": 343070, "checksum": "5e4d7cc7fe4faa7ea409e421165c554bb280851514c7bf115551dca3703afa2d"}}, "download_size": 343070, "post_processing_size": null, "dataset_size": 264723, "size_in_bytes": 607793}, "mbpp-rs-keep": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" and the MBPP Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "mbpp-rs-keep", "version": {"version_str": "2.0.0", "description": null, "major": 2, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 166041, "num_examples": 289, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/1f21818a0f3265fd0a41c3954e30aab47f34063a/prompts/mbpp-rs-keep.json": {"num_bytes": 214015, "checksum": "952e48ae0f414c42ee19ae92d6fb5f2bdf81c008a376103c8e51a2523f2325da"}}, "download_size": 214015, "post_processing_size": null, "dataset_size": 166041, "size_in_bytes": 380056}, "mbpp-rs": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" and the MBPP Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "mbpp-rs", "version": {"version_str": "2.0.0", "description": null, "major": 2, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 167707, "num_examples": 289, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/1f21818a0f3265fd0a41c3954e30aab47f34063a/prompts/mbpp-rs-reworded.json": {"num_bytes": 215681, "checksum": "964b7029c74bb3668501ec3dea666b917a2660d687016a714f93ae52d75273ba"}}, "download_size": 215681, "post_processing_size": null, "dataset_size": 167707, "size_in_bytes": 383388}, "mbpp-scala-keep": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" and the MBPP Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "mbpp-scala-keep", "version": {"version_str": "2.0.0", "description": null, "major": 2, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 315930, "num_examples": 398, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/1f21818a0f3265fd0a41c3954e30aab47f34063a/prompts/mbpp-scala-keep.json": {"num_bytes": 384809, "checksum": "3bf7328b8e271fa80df710be3cb5d90cc787847102697e3d69e78ba75376ae16"}}, "download_size": 384809, "post_processing_size": null, "dataset_size": 315930, "size_in_bytes": 700739}, "mbpp-scala": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" and the MBPP Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "mbpp-scala", "version": {"version_str": "2.0.0", "description": null, "major": 2, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 318164, "num_examples": 398, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/1f21818a0f3265fd0a41c3954e30aab47f34063a/prompts/mbpp-scala-reworded.json": {"num_bytes": 387043, "checksum": "74d2555f838c250f0c666e097c7fd88d38ed3e54bf094a5e2fc56605070ebb31"}}, "download_size": 387043, "post_processing_size": null, "dataset_size": 318164, "size_in_bytes": 705207}, "mbpp-sh-keep": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" and the MBPP Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "mbpp-sh-keep", "version": {"version_str": "2.0.0", "description": null, "major": 2, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 214958, "num_examples": 383, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/1f21818a0f3265fd0a41c3954e30aab47f34063a/prompts/mbpp-sh-keep.json": {"num_bytes": 287141, "checksum": "0f7e3598fc94b4b2e880d9d3ae634c2e4db2c73c2079d0909e5640177443519e"}}, "download_size": 287141, "post_processing_size": null, "dataset_size": 214958, "size_in_bytes": 502099}, "mbpp-sh": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" and the MBPP Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "mbpp-sh", "version": {"version_str": "2.0.0", "description": null, "major": 2, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 216727, "num_examples": 383, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/1f21818a0f3265fd0a41c3954e30aab47f34063a/prompts/mbpp-sh-reworded.json": {"num_bytes": 288910, "checksum": "d0f0dcb40bbc83da76881237182a7b03c6991ab11d774203d195ce9a95cb820e"}}, "download_size": 288910, "post_processing_size": null, "dataset_size": 216727, "size_in_bytes": 505637}, "mbpp-swift-keep": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" and the MBPP Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "mbpp-swift-keep", "version": {"version_str": "2.0.0", "description": null, "major": 2, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 246862, "num_examples": 324, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/1f21818a0f3265fd0a41c3954e30aab47f34063a/prompts/mbpp-swift-keep.json": {"num_bytes": 304578, "checksum": "5e4561ea1a4b266b308fd275a3f0f439f5179a12797470771ac1b7d7fb185dca"}}, "download_size": 304578, "post_processing_size": null, "dataset_size": 246862, "size_in_bytes": 551440}, "mbpp-swift": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" and the MBPP Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "mbpp-swift", "version": {"version_str": "2.0.0", "description": null, "major": 2, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 249075, "num_examples": 324, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/1f21818a0f3265fd0a41c3954e30aab47f34063a/prompts/mbpp-swift-reworded.json": {"num_bytes": 306791, "checksum": "e6f59e481efd23f2957b1da6f423a357f390a663f878810c93488a6c7c8d49de"}}, "download_size": 306791, "post_processing_size": null, "dataset_size": 249075, "size_in_bytes": 555866}, "mbpp-ts-keep": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" and the MBPP Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "mbpp-ts-keep", "version": {"version_str": "2.0.0", "description": null, "major": 2, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 211792, "num_examples": 322, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/1f21818a0f3265fd0a41c3954e30aab47f34063a/prompts/mbpp-ts-keep.json": {"num_bytes": 273416, "checksum": "81511f4dea1fbea124eaa8681924603079ba1161bd4c785b820f059f87f42489"}}, "download_size": 273416, "post_processing_size": null, "dataset_size": 211792, "size_in_bytes": 485208}, "mbpp-ts": {"description": "MultiPL-E is a dataset for evaluating large language models for code generation that supports 18 programming languages. It takes the OpenAI \"HumanEval\" and the MBPP Python benchmarks and uses little compilers to translate them to other languages. It is easy to add support for new languages and benchmarks.\n", "citation": "@misc{multipl-e,\n doi = {10.48550/ARXIV.2208.08227},\n url = {https://arxiv.org/abs/2208.08227},\n author = {Cassano, Federico and Gouwar, John and Nguyen, Daniel and\n Nguyen, Sydney and Phipps-Costin, Luna and Pinckney, Donald and \n Yee, Ming-Ho and Zi, Yangtian and Anderson, Carolyn Jane and \n Feldman, Molly Q and Guha, Arjun and \n Greenberg, Michael and Jangda, Abhinav},\n title = {A Scalable and Extensible Approach to Benchmarking NL2Code for 18\n Programming Languages},\n publisher = {arXiv},\n year = {2022},\n}\n", "homepage": "https://nuprl.github.io/MultiPL-E/", "license": "MIT", "features": {"name": {"dtype": "string", "id": null, "_type": "Value"}, "language": {"dtype": "string", "id": null, "_type": "Value"}, "prompt": {"dtype": "string", "id": null, "_type": "Value"}, "doctests": {"dtype": "string", "id": null, "_type": "Value"}, "original": {"dtype": "string", "id": null, "_type": "Value"}, "prompt_terminology": {"dtype": "string", "id": null, "_type": "Value"}, "tests": {"dtype": "string", "id": null, "_type": "Value"}, "stop_tokens": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": [], "builder_name": "multi_pl-e", "config_name": "mbpp-ts", "version": {"version_str": "2.0.0", "description": null, "major": 2, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 213578, "num_examples": 322, "dataset_name": "multi_pl-e"}}, "download_checksums": {"https://raw.githubusercontent.com/nuprl/MultiPL-E/1f21818a0f3265fd0a41c3954e30aab47f34063a/prompts/mbpp-ts-reworded.json": {"num_bytes": 275202, "checksum": "38fc7a95298f6f954a1c5cb905e4a6e2314b5f3dc066de6e52de729fbb7b4752"}}, "download_size": 275202, "post_processing_size": null, "dataset_size": 213578, "size_in_bytes": 488780}} \ No newline at end of file diff --git a/humaneval-cpp-keep/multi_pl-e-test.parquet b/humaneval-cpp-keep/multi_pl-e-test.parquet new file mode 100644 index 0000000000000000000000000000000000000000..86b56e2453ce7c5804c000a6defa395d1b442e07 --- /dev/null +++ b/humaneval-cpp-keep/multi_pl-e-test.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:fb17fb217f21716b22425d37c0f90d87426175dea415e33fac6e168ea7da0e9a +size 81078 diff --git a/humaneval-cpp-remove/multi_pl-e-test.parquet b/humaneval-cpp-remove/multi_pl-e-test.parquet new file mode 100644 index 0000000000000000000000000000000000000000..21324a86010f13fd7313ba0c68ed946e816717e5 --- /dev/null +++ b/humaneval-cpp-remove/multi_pl-e-test.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c2dae7c92225011a2d5716026024834304fded129bf121e0bc6ce8e50174ca31 +size 69506 diff --git a/humaneval-cpp-transform/multi_pl-e-test.parquet b/humaneval-cpp-transform/multi_pl-e-test.parquet new file mode 100644 index 0000000000000000000000000000000000000000..56fbbb757d7f6233edb48fa9a30e331f1f0eadcf --- /dev/null +++ b/humaneval-cpp-transform/multi_pl-e-test.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2f2285605c1ddcc3435f635e3d4d79abf34d3ecf3950382482e4e2cf22ca9f82 +size 83562 diff --git a/humaneval-cpp/multi_pl-e-test.parquet b/humaneval-cpp/multi_pl-e-test.parquet new file mode 100644 index 0000000000000000000000000000000000000000..4287d24c3723bb56c4c9d5fd4c49cc042dcb5329 --- /dev/null +++ b/humaneval-cpp/multi_pl-e-test.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:18487b0d686ad732a5971c9299742101e34fbd472d0e3b05a7bd4a9836cfd00f +size 83532 diff --git a/humaneval-cs-keep/multi_pl-e-test.parquet b/humaneval-cs-keep/multi_pl-e-test.parquet new file mode 100644 index 0000000000000000000000000000000000000000..ce8308e557756d8ba3efba64881a0c816a4edd3e --- /dev/null +++ b/humaneval-cs-keep/multi_pl-e-test.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4ae3329d13bbd6b3d4d9bcbea89560be99a2cc7689d9e2c055153519e555f87a +size 80035 diff --git a/humaneval-cs-remove/multi_pl-e-test.parquet b/humaneval-cs-remove/multi_pl-e-test.parquet new file mode 100644 index 0000000000000000000000000000000000000000..8c696c45548ba6022f0b3af9662a17a6ed21a207 --- /dev/null +++ b/humaneval-cs-remove/multi_pl-e-test.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:30790095b47f8a9c9b2c923b16531a53d054f7ad59e951cff60ee6ca52ebd45f +size 68561 diff --git a/humaneval-cs-transform/multi_pl-e-test.parquet b/humaneval-cs-transform/multi_pl-e-test.parquet new file mode 100644 index 0000000000000000000000000000000000000000..a32b92d90371bd3ee3cd9e66222f016fbf13b449 --- /dev/null +++ b/humaneval-cs-transform/multi_pl-e-test.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f087170cd4329fc9874ad76b700e6b31b208a87c55b7c4faeb632c50deef313d +size 82329 diff --git a/humaneval-cs/multi_pl-e-test.parquet b/humaneval-cs/multi_pl-e-test.parquet new file mode 100644 index 0000000000000000000000000000000000000000..a773ae9e57c30476d21a5020d980c5fadaf4283f --- /dev/null +++ b/humaneval-cs/multi_pl-e-test.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:bdebc99f68fc48fef064f3fde1761fb96e448e9ce40ad4c29d9a19492563c99d +size 82288 diff --git a/humaneval-d-keep/multi_pl-e-test.parquet b/humaneval-d-keep/multi_pl-e-test.parquet new file mode 100644 index 0000000000000000000000000000000000000000..8390257105402fe6358365cf4fe602ee34139dd9 --- /dev/null +++ b/humaneval-d-keep/multi_pl-e-test.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:435d06b8fab3f475b6cc8dd1bb273ad9e95957cc8d1bc2bca235611c408a22c0 +size 72164 diff --git a/humaneval-d-remove/multi_pl-e-test.parquet b/humaneval-d-remove/multi_pl-e-test.parquet new file mode 100644 index 0000000000000000000000000000000000000000..75a4ef52bf5bc4ffe9141e50c214d61af9137c6f --- /dev/null +++ b/humaneval-d-remove/multi_pl-e-test.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c487ce57d5886a8e6837362fdfb9f25949aeaf97685d5ca3026034161a92ce1c +size 61317 diff --git a/humaneval-d-transform/multi_pl-e-test.parquet b/humaneval-d-transform/multi_pl-e-test.parquet new file mode 100644 index 0000000000000000000000000000000000000000..a244f54a8bf9f231966983cb719cbaacc02fc3a2 --- /dev/null +++ b/humaneval-d-transform/multi_pl-e-test.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2dec899282b4267fd12883bbe7dac6081748e42fa0ac1fee2b8f0ec0b7e668e2 +size 72331 diff --git a/humaneval-d/multi_pl-e-test.parquet b/humaneval-d/multi_pl-e-test.parquet new file mode 100644 index 0000000000000000000000000000000000000000..c46f990d54bba519d5a8536725854b050595c7d3 --- /dev/null +++ b/humaneval-d/multi_pl-e-test.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9bd79bf3dfb7afe20a279d417f6ad7d1a3b12f8ef6a15e9d6092a8116c133be6 +size 72362 diff --git a/humaneval-go-keep/multi_pl-e-test.parquet b/humaneval-go-keep/multi_pl-e-test.parquet new file mode 100644 index 0000000000000000000000000000000000000000..9e6251474e72682a1f95c150bf023ba4a8a213ba --- /dev/null +++ b/humaneval-go-keep/multi_pl-e-test.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b302d09020216e34e5d1b982f58909610121699f4d0a2431e9f834369c497adf +size 78374 diff --git a/humaneval-go-remove/multi_pl-e-test.parquet b/humaneval-go-remove/multi_pl-e-test.parquet new file mode 100644 index 0000000000000000000000000000000000000000..a3b9da99d05535a38657f1362789c39cd41008cc --- /dev/null +++ b/humaneval-go-remove/multi_pl-e-test.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c17a8daa422e09fc371ff01a44b5edbb5b599e28cf4bdcd3a4996ed16a117b8f +size 67750 diff --git a/humaneval-go-transform/multi_pl-e-test.parquet b/humaneval-go-transform/multi_pl-e-test.parquet new file mode 100644 index 0000000000000000000000000000000000000000..5c6865847f6bc5faedd4d18b07b57f4025d7e0a9 --- /dev/null +++ b/humaneval-go-transform/multi_pl-e-test.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ae3213ef592540e86fe8b01c506fb8a737ebe0a08141afa60f1e6dfd267527c5 +size 78407 diff --git a/humaneval-go/multi_pl-e-test.parquet b/humaneval-go/multi_pl-e-test.parquet new file mode 100644 index 0000000000000000000000000000000000000000..660102d75c25d71fe445f9fe6c9268790578e0a0 --- /dev/null +++ b/humaneval-go/multi_pl-e-test.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c6460960de2a501db0bb28585e24ec50a24cfbded6f92a4b142f63df78735d4a +size 78345 diff --git a/humaneval-java-keep/multi_pl-e-test.parquet b/humaneval-java-keep/multi_pl-e-test.parquet new file mode 100644 index 0000000000000000000000000000000000000000..a0401a9d6a55ccb3b45f743949f737c01cf8e226 --- /dev/null +++ b/humaneval-java-keep/multi_pl-e-test.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0224c2001633e8a6ac490f29f34af4c499a8a4d31cb45be91d6a301a9ed823b6 +size 83275 diff --git a/humaneval-java-remove/multi_pl-e-test.parquet b/humaneval-java-remove/multi_pl-e-test.parquet new file mode 100644 index 0000000000000000000000000000000000000000..a2a6e519763477699610eafce6e00d45093c90ed --- /dev/null +++ b/humaneval-java-remove/multi_pl-e-test.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c5c255a319548a40834739e064f0c8090b2e63fc0f938f7678c84f8985b52eb5 +size 71879 diff --git a/humaneval-java-transform/multi_pl-e-test.parquet b/humaneval-java-transform/multi_pl-e-test.parquet new file mode 100644 index 0000000000000000000000000000000000000000..e025a80e2b0eb4df9f9dd78bbe84b3c9d3b783ae --- /dev/null +++ b/humaneval-java-transform/multi_pl-e-test.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6db4bf0fa90a38fc955931e194503d905a6a53bf8587467b51a7885a881b0810 +size 85967 diff --git a/humaneval-java/multi_pl-e-test.parquet b/humaneval-java/multi_pl-e-test.parquet new file mode 100644 index 0000000000000000000000000000000000000000..401fa98694bf97a815b9259cd74b038b40939254 --- /dev/null +++ b/humaneval-java/multi_pl-e-test.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:985abeab4db51cb59b8dd54a81b8ff742ca905cb0b34218a5ddd2f8ca76a47fe +size 86261 diff --git a/humaneval-jl-keep/multi_pl-e-test.parquet b/humaneval-jl-keep/multi_pl-e-test.parquet new file mode 100644 index 0000000000000000000000000000000000000000..5a52bde736fc6eaabf5578c43375ae9218ffc979 --- /dev/null +++ b/humaneval-jl-keep/multi_pl-e-test.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:08f260792503c0217a0dc9513eb045075622b4b18eac3b9b5147564f4ee84e12 +size 72294 diff --git a/humaneval-jl-remove/multi_pl-e-test.parquet b/humaneval-jl-remove/multi_pl-e-test.parquet new file mode 100644 index 0000000000000000000000000000000000000000..0fbe209f2b4bc57f5528287341055d4efee5b166 --- /dev/null +++ b/humaneval-jl-remove/multi_pl-e-test.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:fe70bf808c3f6ca5e9ed6e3a0be1d762235ef11ee96ccd8ade09e1302f1c08f6 +size 61669 diff --git a/humaneval-jl-transform/multi_pl-e-test.parquet b/humaneval-jl-transform/multi_pl-e-test.parquet new file mode 100644 index 0000000000000000000000000000000000000000..4a6437e7b83524f62a69e9ef65fe4795d6c79ec3 --- /dev/null +++ b/humaneval-jl-transform/multi_pl-e-test.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0f82d0b1754ae87849c3495e5150d3923b45520223ab4d5a4082bd7313cb81eb +size 71743 diff --git a/humaneval-jl/multi_pl-e-test.parquet b/humaneval-jl/multi_pl-e-test.parquet new file mode 100644 index 0000000000000000000000000000000000000000..adc3e2a16513d72711a8954d6dc73ed51efd4533 --- /dev/null +++ b/humaneval-jl/multi_pl-e-test.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:52bf409557759aa26184276f5f78c4bee00240a834bca66f33bafd2238326ad7 +size 71701 diff --git a/humaneval-js-keep/multi_pl-e-test.parquet b/humaneval-js-keep/multi_pl-e-test.parquet new file mode 100644 index 0000000000000000000000000000000000000000..843e67b9a74c85193b1e357cc7f3294259abe593 --- /dev/null +++ b/humaneval-js-keep/multi_pl-e-test.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d09b585c4aaad0011fe39f7591eb0db37ec4c648527440990134e76dd8c65395 +size 70987 diff --git a/humaneval-js-remove/multi_pl-e-test.parquet b/humaneval-js-remove/multi_pl-e-test.parquet new file mode 100644 index 0000000000000000000000000000000000000000..f46e2d6022acdd743bbb5f650613dd60d4c290e8 --- /dev/null +++ b/humaneval-js-remove/multi_pl-e-test.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:260163d9f0d38df5fe849226d0e96c27c40a4787eb7f578619b728fd1ef13f75 +size 60378 diff --git a/humaneval-js-transform/multi_pl-e-test.parquet b/humaneval-js-transform/multi_pl-e-test.parquet new file mode 100644 index 0000000000000000000000000000000000000000..ae8cc68b9f4bcbdc35d49e4be005c283c27d318f --- /dev/null +++ b/humaneval-js-transform/multi_pl-e-test.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ab15718609e1afeb579b649b9f2acef11b95ed38b2252a5da63da758b73f320f +size 70431 diff --git a/humaneval-js/multi_pl-e-test.parquet b/humaneval-js/multi_pl-e-test.parquet new file mode 100644 index 0000000000000000000000000000000000000000..57bf39eaa0867d635cd5c88c54be0ecdd3fe280b --- /dev/null +++ b/humaneval-js/multi_pl-e-test.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7595ea764e487e466c64548ddef58eb03856d2a9f3e6bbbc4aadc6bb24ec6aa5 +size 70446 diff --git a/humaneval-lua-keep/multi_pl-e-test.parquet b/humaneval-lua-keep/multi_pl-e-test.parquet new file mode 100644 index 0000000000000000000000000000000000000000..073c71fe5752edc5981aa551dcf4e377e2ebd828 --- /dev/null +++ b/humaneval-lua-keep/multi_pl-e-test.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d624fdc30d6ebe4d9e1da47514b73dac2aec88b75f9eb31aaa7d87b8ea1d8f81 +size 71286 diff --git a/humaneval-lua-remove/multi_pl-e-test.parquet b/humaneval-lua-remove/multi_pl-e-test.parquet new file mode 100644 index 0000000000000000000000000000000000000000..8db4dcae829545baa6875561c26f071778512b8e --- /dev/null +++ b/humaneval-lua-remove/multi_pl-e-test.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b983a13d4d033cda8423e3dd51d6f780a32876a270e1056f87fea68e720379e9 +size 60620 diff --git a/humaneval-lua-transform/multi_pl-e-test.parquet b/humaneval-lua-transform/multi_pl-e-test.parquet new file mode 100644 index 0000000000000000000000000000000000000000..d49cfd9d8c8fef0936cc9db893ba0ab50420f31e --- /dev/null +++ b/humaneval-lua-transform/multi_pl-e-test.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:856b988a5a1746c0aaffd07a0a9c0757772dba3e234d7159d21f51f5c147a039 +size 70741 diff --git a/humaneval-lua/multi_pl-e-test.parquet b/humaneval-lua/multi_pl-e-test.parquet new file mode 100644 index 0000000000000000000000000000000000000000..ac84c29909ad45dd410726438b7009b6f5d67745 --- /dev/null +++ b/humaneval-lua/multi_pl-e-test.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f1552551e88d3c2b33116dfd3683c3d335d9c91eefc4741bed4d91f40013a2fd +size 70601 diff --git a/humaneval-php-keep/multi_pl-e-test.parquet b/humaneval-php-keep/multi_pl-e-test.parquet new file mode 100644 index 0000000000000000000000000000000000000000..fb5265eea56a8fc5c7be27dcc2a6452750cf6573 --- /dev/null +++ b/humaneval-php-keep/multi_pl-e-test.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:27f69af68207e95dee9d973b2526f9c09c027af16572cf79c547b040fd2bc2d3 +size 75617 diff --git a/humaneval-php-remove/multi_pl-e-test.parquet b/humaneval-php-remove/multi_pl-e-test.parquet new file mode 100644 index 0000000000000000000000000000000000000000..42acd4a53307dc083d69e777e9c3362c1756f1bb --- /dev/null +++ b/humaneval-php-remove/multi_pl-e-test.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:bc655830533f65339d08d5940340b46a4717fcd549a4d976fa6732d62b2e18cd +size 64892 diff --git a/humaneval-php-transform/multi_pl-e-test.parquet b/humaneval-php-transform/multi_pl-e-test.parquet new file mode 100644 index 0000000000000000000000000000000000000000..1e925e1adf3d3eecd485f5d25c0dbb439c3b43ba --- /dev/null +++ b/humaneval-php-transform/multi_pl-e-test.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b7bab486a8ab94809d52918b806d75f2bb2419d395818d9f8b7f8ca73955ba4c +size 75391 diff --git a/humaneval-php/multi_pl-e-test.parquet b/humaneval-php/multi_pl-e-test.parquet new file mode 100644 index 0000000000000000000000000000000000000000..f990a0c089b164ee94361db1d43d5615003f49e7 --- /dev/null +++ b/humaneval-php/multi_pl-e-test.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:45af418cca1f249d39e21f66f907b531182c754e28e557892008dbbd6dda3a79 +size 75516 diff --git a/humaneval-pl-keep/multi_pl-e-test.parquet b/humaneval-pl-keep/multi_pl-e-test.parquet new file mode 100644 index 0000000000000000000000000000000000000000..bee956528473862a77ad8d0a207d44c1a1d1b36d --- /dev/null +++ b/humaneval-pl-keep/multi_pl-e-test.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2be25e6710b2434e6e0118393d41f4a8edfe228af31308f4b548f87755c39830 +size 78424 diff --git a/humaneval-pl-remove/multi_pl-e-test.parquet b/humaneval-pl-remove/multi_pl-e-test.parquet new file mode 100644 index 0000000000000000000000000000000000000000..3b118631631ca22a17eb941f57aff53fba0c4003 --- /dev/null +++ b/humaneval-pl-remove/multi_pl-e-test.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0690bdebf2a81fae63fd2ac01df28e84b1faba30855523111e7e8785ff73a2e0 +size 67318 diff --git a/humaneval-pl-transform/multi_pl-e-test.parquet b/humaneval-pl-transform/multi_pl-e-test.parquet new file mode 100644 index 0000000000000000000000000000000000000000..851066ca75611a226831f30f085617933d07d489 --- /dev/null +++ b/humaneval-pl-transform/multi_pl-e-test.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:076af41b3c2490e9eddc0f8ce64be8b7d023f34200f67ee7966eb528efe42fa6 +size 77618 diff --git a/humaneval-pl/multi_pl-e-test.parquet b/humaneval-pl/multi_pl-e-test.parquet new file mode 100644 index 0000000000000000000000000000000000000000..32e077287f6a3865f94169f6c2c5377ef96ed65b --- /dev/null +++ b/humaneval-pl/multi_pl-e-test.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e14b135752176dbf546b50c0e27ed432a714bbb9d71ea02ae543b59a3ee15afe +size 77628 diff --git a/humaneval-py-keep/multi_pl-e-test.parquet b/humaneval-py-keep/multi_pl-e-test.parquet new file mode 100644 index 0000000000000000000000000000000000000000..0dc4ad2b674f45bb377e6deb364445cbe3e719b2 --- /dev/null +++ b/humaneval-py-keep/multi_pl-e-test.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:dd893b1662e145148abae4a37297b04a0b5559c7976081704428ec666594a914 +size 70135 diff --git a/humaneval-py-remove/multi_pl-e-test.parquet b/humaneval-py-remove/multi_pl-e-test.parquet new file mode 100644 index 0000000000000000000000000000000000000000..e591eae07e746b2812c7a42877b79c56ba7a3276 --- /dev/null +++ b/humaneval-py-remove/multi_pl-e-test.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:afe6d716531382dfb4210fb6f52245611bcc90f5b6294585a3d05a053fa75828 +size 59419 diff --git a/humaneval-py-transform/multi_pl-e-test.parquet b/humaneval-py-transform/multi_pl-e-test.parquet new file mode 100644 index 0000000000000000000000000000000000000000..ae00fb0719d688b9cdcec8eafdede582a9d0b605 --- /dev/null +++ b/humaneval-py-transform/multi_pl-e-test.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:77ef53896c5d8859c58a95b0ac91228d27f7f1597381761881d8af01d4c521da +size 69819 diff --git a/humaneval-py/multi_pl-e-test.parquet b/humaneval-py/multi_pl-e-test.parquet new file mode 100644 index 0000000000000000000000000000000000000000..031432f64de063d0e15d81a33c7fa2ad86c2fee9 --- /dev/null +++ b/humaneval-py/multi_pl-e-test.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1aac59190587404c84f2b86db2584e94d2d03b79d5e49fd951c7659ef735b506 +size 69819 diff --git a/humaneval-r-keep/multi_pl-e-test.parquet b/humaneval-r-keep/multi_pl-e-test.parquet new file mode 100644 index 0000000000000000000000000000000000000000..3e31fca671267260cfa5fcec811ed7aa52538d38 --- /dev/null +++ b/humaneval-r-keep/multi_pl-e-test.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:27f2407961914f987a17917fe087a8ed799c5dc4118f86dbab0e0e1ee99bf10e +size 72705 diff --git a/humaneval-r-remove/multi_pl-e-test.parquet b/humaneval-r-remove/multi_pl-e-test.parquet new file mode 100644 index 0000000000000000000000000000000000000000..45eb96df5a2c48a720f22df59e166b1a58055480 --- /dev/null +++ b/humaneval-r-remove/multi_pl-e-test.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7db46a65810a5afe4ebe49923c964db8e47adde7e9c83147674fa2749bc3e5e6 +size 61889 diff --git a/humaneval-r-transform/multi_pl-e-test.parquet b/humaneval-r-transform/multi_pl-e-test.parquet new file mode 100644 index 0000000000000000000000000000000000000000..25582516a512f375d8ab75749a51e492cbcad33b --- /dev/null +++ b/humaneval-r-transform/multi_pl-e-test.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a99e8fdc95cc0e47ec0cb80c9393281492af89bd54ba49642054ff5399cf4afc +size 72363 diff --git a/humaneval-r/multi_pl-e-test.parquet b/humaneval-r/multi_pl-e-test.parquet new file mode 100644 index 0000000000000000000000000000000000000000..29b9a181ebc6f404ba28af45281ec19d7cac829f --- /dev/null +++ b/humaneval-r/multi_pl-e-test.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:78b570d097d428e87475a92156caf83d251b2eb95c9a3df7e76e64325439a876 +size 72362 diff --git a/humaneval-rb-keep/multi_pl-e-test.parquet b/humaneval-rb-keep/multi_pl-e-test.parquet new file mode 100644 index 0000000000000000000000000000000000000000..f42290f7037f9082195343770f357a82f6917923 --- /dev/null +++ b/humaneval-rb-keep/multi_pl-e-test.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7da24365b5bc56b65dff17a1f33e1aac04c8867156d2058ae7bda46ce269fa8d +size 73213 diff --git a/humaneval-rb-remove/multi_pl-e-test.parquet b/humaneval-rb-remove/multi_pl-e-test.parquet new file mode 100644 index 0000000000000000000000000000000000000000..d2c815eccaa63b1d923114faa0ee34f05032966b --- /dev/null +++ b/humaneval-rb-remove/multi_pl-e-test.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:505cff8f4e827b91055581fd4e4d83b68cd4ccfe406e0567ca9a6918170529b3 +size 62844 diff --git a/humaneval-rb-transform/multi_pl-e-test.parquet b/humaneval-rb-transform/multi_pl-e-test.parquet new file mode 100644 index 0000000000000000000000000000000000000000..df4260aa0d7c28e91096b6b5cd11b622f4855112 --- /dev/null +++ b/humaneval-rb-transform/multi_pl-e-test.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:02a376f757966de5feaec6dbad298e1a9ce6e02af27bcee877f32c7f13a73eeb +size 73177 diff --git a/humaneval-rb/multi_pl-e-test.parquet b/humaneval-rb/multi_pl-e-test.parquet new file mode 100644 index 0000000000000000000000000000000000000000..cc8c5ed91fac67a1763dfc0051eae044b1c8f9fb --- /dev/null +++ b/humaneval-rb/multi_pl-e-test.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c558b9233dc14a6bb4a7e4604bda8ba0ad7705bac554f29a9a8cb4d47d198d77 +size 73274 diff --git a/humaneval-rkt-keep/multi_pl-e-test.parquet b/humaneval-rkt-keep/multi_pl-e-test.parquet new file mode 100644 index 0000000000000000000000000000000000000000..7dfa6f9170d99f3db4e9ac6e64ab451ce986e830 --- /dev/null +++ b/humaneval-rkt-keep/multi_pl-e-test.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6dad790c00d25005b0fac9339c2ffb3fcb25b2a8b6491949ffec2fa973572a3b +size 70643 diff --git a/humaneval-rkt-remove/multi_pl-e-test.parquet b/humaneval-rkt-remove/multi_pl-e-test.parquet new file mode 100644 index 0000000000000000000000000000000000000000..26a937140f8163193a09f63f52e9608462a23400 --- /dev/null +++ b/humaneval-rkt-remove/multi_pl-e-test.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0f839741c910f1753ec336b5709e66d843f0b5bf123980ebaac0643065decb56 +size 59725 diff --git a/humaneval-rkt-transform/multi_pl-e-test.parquet b/humaneval-rkt-transform/multi_pl-e-test.parquet new file mode 100644 index 0000000000000000000000000000000000000000..cb3543016d0589a3d5f7bbd228e75a5f4dcfae81 --- /dev/null +++ b/humaneval-rkt-transform/multi_pl-e-test.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e1811ef594b1fb10d71009be42a53942527ab86e81acda4215e657316e4d035a +size 70071 diff --git a/humaneval-rkt/multi_pl-e-test.parquet b/humaneval-rkt/multi_pl-e-test.parquet new file mode 100644 index 0000000000000000000000000000000000000000..64acb0a0997f3e0c0b7520979ccd6c99b6e5c75d --- /dev/null +++ b/humaneval-rkt/multi_pl-e-test.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8ec9294ace12be799dfec3749824f23e5ea5f885f6575b410d4c204dbedddc64 +size 69922 diff --git a/humaneval-rs-keep/multi_pl-e-test.parquet b/humaneval-rs-keep/multi_pl-e-test.parquet new file mode 100644 index 0000000000000000000000000000000000000000..753ce1ac38f0646065658c2a595850a553918c6f --- /dev/null +++ b/humaneval-rs-keep/multi_pl-e-test.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ee0213317fb204faaa70b5e3b1ed38c5467411970db80e289f44cf774ab1422c +size 74159 diff --git a/humaneval-rs-remove/multi_pl-e-test.parquet b/humaneval-rs-remove/multi_pl-e-test.parquet new file mode 100644 index 0000000000000000000000000000000000000000..75f5b9b6a708b0952c51f7ff3c5947e3cb29244e --- /dev/null +++ b/humaneval-rs-remove/multi_pl-e-test.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2fa444ca368fb3b293928d0f3d68b9fa2dedaf755ef74a9655d7e50a56e20c81 +size 63197 diff --git a/humaneval-rs-transform/multi_pl-e-test.parquet b/humaneval-rs-transform/multi_pl-e-test.parquet new file mode 100644 index 0000000000000000000000000000000000000000..18df920d5452f350441483bc49c233c5dbf914a3 --- /dev/null +++ b/humaneval-rs-transform/multi_pl-e-test.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8a76d31ac01b51b20cbd8a0360d31cd1f6c3eeede29b8725834bc74debc8d13a +size 75629 diff --git a/humaneval-rs/multi_pl-e-test.parquet b/humaneval-rs/multi_pl-e-test.parquet new file mode 100644 index 0000000000000000000000000000000000000000..031e2a78ec0625d6c9fba644a72ef690752c3cae --- /dev/null +++ b/humaneval-rs/multi_pl-e-test.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:277a19410c0d9710f44e205e455d2b3014787522516b05594decdb4d4671306b +size 75673 diff --git a/humaneval-scala-keep/multi_pl-e-test.parquet b/humaneval-scala-keep/multi_pl-e-test.parquet new file mode 100644 index 0000000000000000000000000000000000000000..f8a1db94b2c56fcc9f7eea295fd9d815b1844fb5 --- /dev/null +++ b/humaneval-scala-keep/multi_pl-e-test.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e261812cb7134995c575bfc6d5a80c3d83bf3f39769d2f2d329a32d15cd8f911 +size 79521 diff --git a/humaneval-scala-remove/multi_pl-e-test.parquet b/humaneval-scala-remove/multi_pl-e-test.parquet new file mode 100644 index 0000000000000000000000000000000000000000..811669ae06b9849509f54950db1d1ad5a18f72a2 --- /dev/null +++ b/humaneval-scala-remove/multi_pl-e-test.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:51524e581ad7754bc9b9b9575dc151b20da27c84e4d2309e878031cf0012c400 +size 67431 diff --git a/humaneval-scala-transform/multi_pl-e-test.parquet b/humaneval-scala-transform/multi_pl-e-test.parquet new file mode 100644 index 0000000000000000000000000000000000000000..a65dc95af2de2747d772bd10fee1b8c6be37898f --- /dev/null +++ b/humaneval-scala-transform/multi_pl-e-test.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:49edf53493171b0b64512f0ede158bb27032b4fcf00215ee16c81ed45a539543 +size 81148 diff --git a/humaneval-scala/multi_pl-e-test.parquet b/humaneval-scala/multi_pl-e-test.parquet new file mode 100644 index 0000000000000000000000000000000000000000..048095cfb72387a2ed1df9a5e051740aaddc8a0d --- /dev/null +++ b/humaneval-scala/multi_pl-e-test.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:fae585e5077752868961ea8aaaa4e45fb09e19978ba62ff5ae805bbed086c3e5 +size 81112 diff --git a/humaneval-sh-keep/multi_pl-e-test.parquet b/humaneval-sh-keep/multi_pl-e-test.parquet new file mode 100644 index 0000000000000000000000000000000000000000..bd656cb1c417dc29a2cda928654845f2260dab6b --- /dev/null +++ b/humaneval-sh-keep/multi_pl-e-test.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d90e0295018a1d4eccc3d6f38d74465740c85a4a684e278a6f1289aa3df89564 +size 67934 diff --git a/humaneval-sh-remove/multi_pl-e-test.parquet b/humaneval-sh-remove/multi_pl-e-test.parquet new file mode 100644 index 0000000000000000000000000000000000000000..62185556817a8cc9bcdf19d4da14867264934df0 --- /dev/null +++ b/humaneval-sh-remove/multi_pl-e-test.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ec2abf2df61c7d053ec20dded68168146966a609b6686791337d58c79b3d4499 +size 57358 diff --git a/humaneval-sh-transform/multi_pl-e-test.parquet b/humaneval-sh-transform/multi_pl-e-test.parquet new file mode 100644 index 0000000000000000000000000000000000000000..7b4bbd01c8e661e40a244434d36672af871c92f1 --- /dev/null +++ b/humaneval-sh-transform/multi_pl-e-test.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2ab90eaa79851056634bd7d44505a4751049b32e6353b788646d84f8ad9bf77a +size 67756 diff --git a/humaneval-sh/multi_pl-e-test.parquet b/humaneval-sh/multi_pl-e-test.parquet new file mode 100644 index 0000000000000000000000000000000000000000..50a5fb96a14469d8e8484a208a3777e54e122ab7 --- /dev/null +++ b/humaneval-sh/multi_pl-e-test.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d3332c53b8b14ee77c4ea0cda8188d33a0f32f85132b7eac03d23f71b71c5746 +size 67707 diff --git a/humaneval-swift-keep/multi_pl-e-test.parquet b/humaneval-swift-keep/multi_pl-e-test.parquet new file mode 100644 index 0000000000000000000000000000000000000000..01c1afd8f7944da9fa0e8253d0193d9b4c9abc88 --- /dev/null +++ b/humaneval-swift-keep/multi_pl-e-test.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2028179b93e762a45ee512ca32556d4dfd123d1a97ada3fc1350f9fe5a7566bd +size 77907 diff --git a/humaneval-swift-remove/multi_pl-e-test.parquet b/humaneval-swift-remove/multi_pl-e-test.parquet new file mode 100644 index 0000000000000000000000000000000000000000..d611c6b52474cd189b454c26de8e86ff0cd6f265 --- /dev/null +++ b/humaneval-swift-remove/multi_pl-e-test.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0e2ce3968bce60aa955249177f1c0dbdf9ab1c5a75f7e91ed41f449844489921 +size 66781 diff --git a/humaneval-swift-transform/multi_pl-e-test.parquet b/humaneval-swift-transform/multi_pl-e-test.parquet new file mode 100644 index 0000000000000000000000000000000000000000..ce65a88b936f86e58e6f62bf7b8cd35fc9be813f --- /dev/null +++ b/humaneval-swift-transform/multi_pl-e-test.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:cd6f25f1c37e054f315094e6ea476039c8dd7497efa2a41cefca7ad017f2dc6e +size 78470 diff --git a/humaneval-swift/multi_pl-e-test.parquet b/humaneval-swift/multi_pl-e-test.parquet new file mode 100644 index 0000000000000000000000000000000000000000..3c38388749e6e6f147468dddf8313f0afaa7c9fa --- /dev/null +++ b/humaneval-swift/multi_pl-e-test.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:cd5e5de9c0ec4dff28cbdf364ccfdfc8bcbd8bbb69aabe3b54930bc6e977da74 +size 78476 diff --git a/humaneval-ts-keep/multi_pl-e-test.parquet b/humaneval-ts-keep/multi_pl-e-test.parquet new file mode 100644 index 0000000000000000000000000000000000000000..700581290a5f323a2f2d293aace916d126649cba --- /dev/null +++ b/humaneval-ts-keep/multi_pl-e-test.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0c49894ab0c8cea536a9bc6540507a420a722b1859907f372ada25638e499006 +size 71292 diff --git a/humaneval-ts-remove/multi_pl-e-test.parquet b/humaneval-ts-remove/multi_pl-e-test.parquet new file mode 100644 index 0000000000000000000000000000000000000000..02ee6ca74b3bef80ba45e5b9f73db917baec6a74 --- /dev/null +++ b/humaneval-ts-remove/multi_pl-e-test.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:80ab90d294eb8df58e4cc350515a7d6f8b22c2b574a9f465d4a7f6ea58e4a844 +size 60651 diff --git a/humaneval-ts-transform/multi_pl-e-test.parquet b/humaneval-ts-transform/multi_pl-e-test.parquet new file mode 100644 index 0000000000000000000000000000000000000000..8e4a0f68d9a823a6e78c9b5f5a2e7c7706537b76 --- /dev/null +++ b/humaneval-ts-transform/multi_pl-e-test.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:bef35c1c7f7cbaa9bf570ae68473e41b4bfa3964e47a4eeaa4583cb69ac03a20 +size 70758 diff --git a/humaneval-ts/multi_pl-e-test.parquet b/humaneval-ts/multi_pl-e-test.parquet new file mode 100644 index 0000000000000000000000000000000000000000..a63aa55896c82e55a95cc70624130283b421897d --- /dev/null +++ b/humaneval-ts/multi_pl-e-test.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:95215861d0ffad2e2058bfed8da6f4aa5f9658f599e2a926e0825e82dc2adeb2 +size 70767 diff --git a/mbpp-cpp-keep/multi_pl-e-test.parquet b/mbpp-cpp-keep/multi_pl-e-test.parquet new file mode 100644 index 0000000000000000000000000000000000000000..8b4a2cf29f603b2747514ef14cbb9865279811b8 --- /dev/null +++ b/mbpp-cpp-keep/multi_pl-e-test.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:006181434fc37bbb38dbf76bf9b9d201f862be376480325041fe5a48c818bf64 +size 95701 diff --git a/mbpp-cpp/multi_pl-e-test.parquet b/mbpp-cpp/multi_pl-e-test.parquet new file mode 100644 index 0000000000000000000000000000000000000000..cff8c4a8df1b00a9313e881cdc8c16293b7ea832 --- /dev/null +++ b/mbpp-cpp/multi_pl-e-test.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:06ccd33600d38b8e4f305b6464c3bf1933dc8a483e2d6b54e2f643ec133b2476 +size 95730 diff --git a/mbpp-cs-keep/multi_pl-e-test.parquet b/mbpp-cs-keep/multi_pl-e-test.parquet new file mode 100644 index 0000000000000000000000000000000000000000..44881a9929146e6c41c57ab09e47224331d0fd90 --- /dev/null +++ b/mbpp-cs-keep/multi_pl-e-test.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3b53b6abee0a44a2848580b8048ac3747089ed08fd91e3e946fc5f95afa1b992 +size 94058 diff --git a/mbpp-cs/multi_pl-e-test.parquet b/mbpp-cs/multi_pl-e-test.parquet new file mode 100644 index 0000000000000000000000000000000000000000..f416f9bbe82e474b5c28ae00ce5dda2a36ad5ea5 --- /dev/null +++ b/mbpp-cs/multi_pl-e-test.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:acc2417668157e9215688765585135fcc28ae1fa63554c5a8fefe59e2887f653 +size 93997 diff --git a/mbpp-d-keep/multi_pl-e-test.parquet b/mbpp-d-keep/multi_pl-e-test.parquet new file mode 100644 index 0000000000000000000000000000000000000000..5f35edb4296e4e864a3dc29fcd8bb65fbe8714c3 --- /dev/null +++ b/mbpp-d-keep/multi_pl-e-test.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8f205716d48cb43886db80dd72d33fafe4621a595b7480b717bcefc926c8f929 +size 59120 diff --git a/mbpp-d/multi_pl-e-test.parquet b/mbpp-d/multi_pl-e-test.parquet new file mode 100644 index 0000000000000000000000000000000000000000..4a9078df7d2b7387b8daab7f7e1df965ba4b7eda --- /dev/null +++ b/mbpp-d/multi_pl-e-test.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e763e6d94ed533044aa16132c210140d1c00991bc38f0cc7ddb9c48f20d970df +size 59229 diff --git a/mbpp-go-keep/multi_pl-e-test.parquet b/mbpp-go-keep/multi_pl-e-test.parquet new file mode 100644 index 0000000000000000000000000000000000000000..de6f1235615be9b17519a0ace4fe96389e4b244d --- /dev/null +++ b/mbpp-go-keep/multi_pl-e-test.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:22b24bb290626044aea6f22e2efe2e0a83a1c8561b5f5889073448317e04ab6c +size 96780 diff --git a/mbpp-go/multi_pl-e-test.parquet b/mbpp-go/multi_pl-e-test.parquet new file mode 100644 index 0000000000000000000000000000000000000000..77c4f1b557e58d9996242af7bc4b903e446c21fb --- /dev/null +++ b/mbpp-go/multi_pl-e-test.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:89805aa6d6d68268379b77b620397b6600a35c6dc300e3d362b2ff96b6ab8064 +size 96538 diff --git a/mbpp-java-keep/multi_pl-e-test.parquet b/mbpp-java-keep/multi_pl-e-test.parquet new file mode 100644 index 0000000000000000000000000000000000000000..df97ce6252b003f92ca34e9e8b5b7115cf8b8d24 --- /dev/null +++ b/mbpp-java-keep/multi_pl-e-test.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e8f5191977bfdab4b6a32635b91d959d59641a4f459233e56643d4dda65426ce +size 93706 diff --git a/mbpp-java/multi_pl-e-test.parquet b/mbpp-java/multi_pl-e-test.parquet new file mode 100644 index 0000000000000000000000000000000000000000..5249ca30c46919858a884f728aa67b22b3498520 --- /dev/null +++ b/mbpp-java/multi_pl-e-test.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d38d753caeed06d920a149d7bacc89fcb6a6183b891ba6734e444c9763353549 +size 94196 diff --git a/mbpp-jl-keep/multi_pl-e-test.parquet b/mbpp-jl-keep/multi_pl-e-test.parquet new file mode 100644 index 0000000000000000000000000000000000000000..b94ff849b39c6f9730cc3bde76ec111cde784b35 --- /dev/null +++ b/mbpp-jl-keep/multi_pl-e-test.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:32eafccc1133562174a7e18811d10fef8fe0ca26cd18b23be5fe93fac248cafc +size 62029 diff --git a/mbpp-jl/multi_pl-e-test.parquet b/mbpp-jl/multi_pl-e-test.parquet new file mode 100644 index 0000000000000000000000000000000000000000..ab49a56531413eb3bcf47f1b3e8b3b0363435543 --- /dev/null +++ b/mbpp-jl/multi_pl-e-test.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9e1c4e4760eec419c06ca4e3a024c2e35a6c20f2e2e5fd887b8ac83c568ac777 +size 61970 diff --git a/mbpp-js-keep/multi_pl-e-test.parquet b/mbpp-js-keep/multi_pl-e-test.parquet new file mode 100644 index 0000000000000000000000000000000000000000..b8079507053d7b7f24e48a5aa6d2cb15aac9cf86 --- /dev/null +++ b/mbpp-js-keep/multi_pl-e-test.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c768f75e2b3ed7bdbeb0d3f1ae5d4793a02f6a37d6f8a925eded54b05b7e6397 +size 78567 diff --git a/mbpp-js/multi_pl-e-test.parquet b/mbpp-js/multi_pl-e-test.parquet new file mode 100644 index 0000000000000000000000000000000000000000..73286cc5b79bfffa80c1d550b68aaf68ac7e737c --- /dev/null +++ b/mbpp-js/multi_pl-e-test.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1d42c6f59c084fa7b3c7f0787641b0f76158ea7f03a6fbe7b52c46deb882f885 +size 79329 diff --git a/mbpp-lua-keep/multi_pl-e-test.parquet b/mbpp-lua-keep/multi_pl-e-test.parquet new file mode 100644 index 0000000000000000000000000000000000000000..833ba1f2dec173bbc2dddcfeffc46ac3bcf03503 --- /dev/null +++ b/mbpp-lua-keep/multi_pl-e-test.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8abd10310bd6d87726760626cbb2fab29a084bea0b21e6adbad1d3277f7ec5be +size 79093 diff --git a/mbpp-lua/multi_pl-e-test.parquet b/mbpp-lua/multi_pl-e-test.parquet new file mode 100644 index 0000000000000000000000000000000000000000..f5a5c86f75c53c57cbd2a326c347f1af45d7728d --- /dev/null +++ b/mbpp-lua/multi_pl-e-test.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b2e5c26c082ed6bcb11c737517985a8da89597e955feeafe9b77a6f6069ea487 +size 79862 diff --git a/mbpp-php-keep/multi_pl-e-test.parquet b/mbpp-php-keep/multi_pl-e-test.parquet new file mode 100644 index 0000000000000000000000000000000000000000..6b68ad0febb3636fd3f7e2a204562bbd99a1d8dd --- /dev/null +++ b/mbpp-php-keep/multi_pl-e-test.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c39abb7e053b8b6e852ef7a730c697bdc7db9cfb734156015fb49a9aedba2b02 +size 82994 diff --git a/mbpp-php/multi_pl-e-test.parquet b/mbpp-php/multi_pl-e-test.parquet new file mode 100644 index 0000000000000000000000000000000000000000..45e4e3f2d78c8d5464f483bd9fb17513d4630191 --- /dev/null +++ b/mbpp-php/multi_pl-e-test.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3d38b5af7b9db2ea834063f9832f36133193c6466898be283de34ce034e6d28b +size 83695 diff --git a/mbpp-pl-keep/multi_pl-e-test.parquet b/mbpp-pl-keep/multi_pl-e-test.parquet new file mode 100644 index 0000000000000000000000000000000000000000..9ac45cc0775d63137c5f717a35a2289e953e67ad --- /dev/null +++ b/mbpp-pl-keep/multi_pl-e-test.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1faf3735d28266f4464897e3f2ca124a38255d304dfde47301e774c840c9bf57 +size 84330 diff --git a/mbpp-pl/multi_pl-e-test.parquet b/mbpp-pl/multi_pl-e-test.parquet new file mode 100644 index 0000000000000000000000000000000000000000..136bbbadd7ca1e9a031a1724d9e815f40c41e818 --- /dev/null +++ b/mbpp-pl/multi_pl-e-test.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:646627ebb99be82f01e3143798d60665b2c67b2b5607da0a005b477216249aa8 +size 85074 diff --git a/mbpp-py-keep/multi_pl-e-test.parquet b/mbpp-py-keep/multi_pl-e-test.parquet new file mode 100644 index 0000000000000000000000000000000000000000..c3de3bd244b00e6886c05f143887df433e98d05f --- /dev/null +++ b/mbpp-py-keep/multi_pl-e-test.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:841fcf5679cd75b1d976e8fd59e50413abb83d328b1a90c54e659eae505af8e4 +size 82517 diff --git a/mbpp-py/multi_pl-e-test.parquet b/mbpp-py/multi_pl-e-test.parquet new file mode 100644 index 0000000000000000000000000000000000000000..7a257e85c532bdb14d505b176cfe006084bbe818 --- /dev/null +++ b/mbpp-py/multi_pl-e-test.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:dd31ba93731490728707751d8c29a22ad77a474d8dd3dd5a5c8b9f2c729bca8d +size 82552 diff --git a/mbpp-r-keep/multi_pl-e-test.parquet b/mbpp-r-keep/multi_pl-e-test.parquet new file mode 100644 index 0000000000000000000000000000000000000000..ccc319327c370cd2fd013b755e689057f5daeb86 --- /dev/null +++ b/mbpp-r-keep/multi_pl-e-test.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:99e38f95ffbec84c6c16320259aec97e1f588bd6fe1dfdd7cb0a2be3b494789d +size 80796 diff --git a/mbpp-r/multi_pl-e-test.parquet b/mbpp-r/multi_pl-e-test.parquet new file mode 100644 index 0000000000000000000000000000000000000000..05e8ebe69fc9eb9fd53c1d1f77107476f64afddc --- /dev/null +++ b/mbpp-r/multi_pl-e-test.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2c461f6722737eae55299ea7cb70ec9cb713369e72095f9c23a05f53e2272a65 +size 80663 diff --git a/mbpp-rb-keep/multi_pl-e-test.parquet b/mbpp-rb-keep/multi_pl-e-test.parquet new file mode 100644 index 0000000000000000000000000000000000000000..8b2b12ab509a6c083bab2f29b1a1f2c6a3f73cd5 --- /dev/null +++ b/mbpp-rb-keep/multi_pl-e-test.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1b783bca6a9e863b4f8bce86c91a497104f9bd288b898f6626329951640c8542 +size 84159 diff --git a/mbpp-rb/multi_pl-e-test.parquet b/mbpp-rb/multi_pl-e-test.parquet new file mode 100644 index 0000000000000000000000000000000000000000..27aa0d273ded6ef27dddd5a67299c1a393e6a6f1 --- /dev/null +++ b/mbpp-rb/multi_pl-e-test.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a6c104011e1e202ae503d267a45638dad533d3295efb75c0197f83fd48ca6b6c +size 84966 diff --git a/mbpp-rkt-keep/multi_pl-e-test.parquet b/mbpp-rkt-keep/multi_pl-e-test.parquet new file mode 100644 index 0000000000000000000000000000000000000000..23012fe612cf1f7e3a73a703045c9292bf176cd7 --- /dev/null +++ b/mbpp-rkt-keep/multi_pl-e-test.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e74f5bf7b7a06cc1a92b6a36ddfaeaf72f27b3c0f20707af4657bf7efb51c3ce +size 78156 diff --git a/mbpp-rkt/multi_pl-e-test.parquet b/mbpp-rkt/multi_pl-e-test.parquet new file mode 100644 index 0000000000000000000000000000000000000000..cd6e9552a3724271b22435be81ebf3892f7af89b --- /dev/null +++ b/mbpp-rkt/multi_pl-e-test.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:48f35a96b0d304ebcb83cd31ada5a0457f5319ae40ce407a42494445a6a4eea7 +size 77964 diff --git a/mbpp-rs-keep/multi_pl-e-test.parquet b/mbpp-rs-keep/multi_pl-e-test.parquet new file mode 100644 index 0000000000000000000000000000000000000000..0433416e8c9ee4cae8086faeda5d48ffa9785a46 --- /dev/null +++ b/mbpp-rs-keep/multi_pl-e-test.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b12219640e07b48085e3fa8d00f546a809832299f55101c94a8d0045b16aa777 +size 57654 diff --git a/mbpp-rs/multi_pl-e-test.parquet b/mbpp-rs/multi_pl-e-test.parquet new file mode 100644 index 0000000000000000000000000000000000000000..d5070b0ea7b800d0fd7a2858cdb6491b6e11079b --- /dev/null +++ b/mbpp-rs/multi_pl-e-test.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6535943bedab0a5b5fa07dd05afc4560ee157d96b8a2ac39ea5883d2a30cf1dd +size 57633 diff --git a/mbpp-scala-keep/multi_pl-e-test.parquet b/mbpp-scala-keep/multi_pl-e-test.parquet new file mode 100644 index 0000000000000000000000000000000000000000..d781e217b59ae0af06b15ea627e4680c74fd3c38 --- /dev/null +++ b/mbpp-scala-keep/multi_pl-e-test.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4a3a31a7d4b313da631930ff3f505a0ef194a9ee738a623a87e94dd02875184f +size 91926 diff --git a/mbpp-scala/multi_pl-e-test.parquet b/mbpp-scala/multi_pl-e-test.parquet new file mode 100644 index 0000000000000000000000000000000000000000..cb75fe8e7c6c956e3fde56da979f13b5206ecec7 --- /dev/null +++ b/mbpp-scala/multi_pl-e-test.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:19d4503d9d883b0194b552c7b42359b4eaa7c00d8afcfce7a07a63f00cb9aaed +size 91781 diff --git a/mbpp-sh-keep/multi_pl-e-test.parquet b/mbpp-sh-keep/multi_pl-e-test.parquet new file mode 100644 index 0000000000000000000000000000000000000000..2e83f9f377b962b9fe534e39151a58ee95e05416 --- /dev/null +++ b/mbpp-sh-keep/multi_pl-e-test.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:63e23d4806c9b7a39df71cfe2eb3a80ff726e2b05abac91c2b9e95c844091024 +size 70522 diff --git a/mbpp-sh/multi_pl-e-test.parquet b/mbpp-sh/multi_pl-e-test.parquet new file mode 100644 index 0000000000000000000000000000000000000000..fd1e8e0da974aa5bca2ed7f1e74f3bbf39674ad6 --- /dev/null +++ b/mbpp-sh/multi_pl-e-test.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f34504bb5bd71f79219d1cf910fc0520bbe624b244417170493fbe2ae846fe12 +size 70384 diff --git a/mbpp-swift-keep/multi_pl-e-test.parquet b/mbpp-swift-keep/multi_pl-e-test.parquet new file mode 100644 index 0000000000000000000000000000000000000000..b4231c9d3e8b1db956cce36240b8a1f3b62311b6 --- /dev/null +++ b/mbpp-swift-keep/multi_pl-e-test.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0ae1d1d0208f9ae2c28245462c7be821e819e91cbb89f92d28ea5e7ee42e4dd7 +size 71904 diff --git a/mbpp-swift/multi_pl-e-test.parquet b/mbpp-swift/multi_pl-e-test.parquet new file mode 100644 index 0000000000000000000000000000000000000000..bbbba29db063d28e653a4d74f0afb4958e8ebfa0 --- /dev/null +++ b/mbpp-swift/multi_pl-e-test.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ad64844dc85827729a7a804ae181ce34ae75396e484891ffb10fee0f4ac73e6d +size 71933 diff --git a/mbpp-ts-keep/multi_pl-e-test.parquet b/mbpp-ts-keep/multi_pl-e-test.parquet new file mode 100644 index 0000000000000000000000000000000000000000..cd4edd3aea4bf0af5ff3448cfabb525051c339f8 --- /dev/null +++ b/mbpp-ts-keep/multi_pl-e-test.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:cf201372a059226c1e646d298c75332f26f8b1b785ec2c3ac38bc6fa5608a855 +size 64463 diff --git a/mbpp-ts/multi_pl-e-test.parquet b/mbpp-ts/multi_pl-e-test.parquet new file mode 100644 index 0000000000000000000000000000000000000000..80bc2968655d0c4297e31dd7e415aae1d74517ef --- /dev/null +++ b/mbpp-ts/multi_pl-e-test.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:fb3a06bc240e65a00807088da88081ed6dce632231581289f6830b5b5286d3fb +size 64446